pull/2/head
cailun 1 year ago
parent 8291d5800a
commit b6a8bde35d

@ -0,0 +1,436 @@
# Generated by Huawei LiteOS Kconfig Tool
#
# Compiler
#
LOSCFG_COMPILER_GCC=y
# LOSCFG_COMPILER_HIMIX_32 is not set
LOSCFG_COMPILER_ARM_NONE_EABI=y
#
# Compiler Options
#
# end of Compiler Options
# LOSCFG_COMPILER_OPTIMIZE_NONE is not set
LOSCFG_COMPILER_OPTIMIZE_SPEED=y
# LOSCFG_COMPILER_OPTIMIZE_SIZE is not set
# end of Compiler
#
# Targets
#
LOSCFG_FAMILY_STM32=y
# LOSCFG_FAMILY_QEMU is not set
# LOSCFG_FAMILY_GD is not set
# LOSCFG_FAMILY_CSKY is not set
# LOSCFG_FAMILY_SIFIVE is not set
LOSCFG_PLATFORM="Cloud_STM32F429IGTx_FIRE"
LOSCFG_PLATFORM_STM32F429IGTX=y
# LOSCFG_PLATFORM_STM32F072_Nucleo is not set
# LOSCFG_PLATFORM_STM32F103_FIRE_ARBITRARY is not set
# LOSCFG_PLATFORM_STM32F407_ATK_EXPLORER is not set
# LOSCFG_PLATFORM_STM32F769IDISCOVERY is not set
# LOSCFG_PLATFORM_STM32L4R9I_DISCOVERY is not set
# LOSCFG_PLATFORM_STM32L073_NUCLEO is not set
# LOSCFG_PLATFORM_STM32L431_BearPi is not set
# LOSCFG_PLATFORM_STM32L476_NB476 is not set
# LOSCFG_PLATFORM_STM32L496_NUCLEO is not set
# LOSCFG_PLATFORM_STM32L552_NUCLEO is not set
LOSCFG_HW_SD_CARD=y
LOSCFG_DRIVER_HAL_LIB=y
LOSCFG_HW_SPI_FLASH=y
LOSCFG_USING_BOARD_LD=y
LOSCFG_ARCH_ARM_AARCH32=y
LOSCFG_ARCH_ARM_CORTEX_M=y
LOSCFG_ARCH_ARM_V7M=y
LOSCFG_ARCH_ARM_VER="armv7-m"
LOSCFG_ARCH_FPU_VFP_V4=y
LOSCFG_ARCH_FPU_VFP_D16=y
LOSCFG_ARCH_FPU="fpv4-sp-d16"
LOSCFG_ARCH_CORTEX_M4=y
LOSCFG_ARCH_CPU="cortex-m4"
LOSCFG_ARCH_FPU_ENABLE=y
LOSCFG_APC_ENABLE=y
# LOSCFG_FPB_ENABLE is not set
# end of Targets
#
# Kernel
#
# LOSCFG_KERNEL_SMP is not set
#
# Basic Config
#
LOSCFG_SCHED_SQ=y
#
# Task
#
LOSCFG_BASE_CORE_TIMESLICE=y
LOSCFG_BASE_CORE_TIMESLICE_TIMEOUT=2
LOSCFG_OBSOLETE_API=y
LOSCFG_BASE_CORE_TSK_MONITOR=y
# LOSCFG_TASK_STATIC_ALLOCATION is not set
LOSCFG_BASE_CORE_TSK_LIMIT=16
LOSCFG_BASE_CORE_TSK_MIN_STACK_SIZE=1024
LOSCFG_BASE_CORE_TSK_DEFAULT_STACK_SIZE=1536
LOSCFG_BASE_CORE_TSK_SWTMR_STACK_SIZE=1536
LOSCFG_BASE_CORE_TSK_IDLE_STACK_SIZE=2048
LOSCFG_BASE_CORE_TSK_DEFAULT_PRIO=10
LOSCFG_BASE_CORE_TICK_PER_SECOND=1000
# end of Task
LOSCFG_BASE_CORE_USE_MULTI_LIST=y
# LOSCFG_BASE_CORE_USE_SINGLE_LIST is not set
# end of Basic Config
#
# Memory Management
#
# LOSCFG_KERNEL_MEM_BESTFIT is not set
LOSCFG_KERNEL_MEM_BESTFIT_LITTLE=y
# LOSCFG_KERNEL_MEM_SLAB_EXTENTION is not set
LOSCFG_MEM_TASK_STAT=y
LOSCFG_KERNEL_MEMBOX=y
LOSCFG_KERNEL_MEMBOX_STATIC=y
# LOSCFG_KERNEL_MEMBOX_DYNAMIC is not set
# end of Memory Management
#
# Interrupt Management
#
LOSCFG_SHARED_IRQ=y
LOSCFG_PLATFORM_HWI_LIMIT=256
LOSCFG_HWI_PRIO_LIMIT=32
# end of Interrupt Management
#
# Exception Management
#
# LOSCFG_ARCH_EXC_SIMPLE_INFO is not set
# LOSCFG_SHELL_EXCINFO_DUMP is not set
# end of Exception Management
LOSCFG_BASE_CORE_SWTMR=y
LOSCFG_BASE_CORE_SWTMR_LIMIT=16
# LOSCFG_BASE_CORE_SWTMR_IN_ISR is not set
LOSCFG_BASE_IPC_QUEUE=y
# LOSCFG_QUEUE_STATIC_ALLOCATION is not set
LOSCFG_BASE_IPC_QUEUE_LIMIT=10
LOSCFG_BASE_IPC_EVENT=y
LOSCFG_BASE_IPC_MUX=y
LOSCFG_MUTEX_WAITMODE_PRIO=y
# LOSCFG_MUTEX_WAITMODE_FIFO is not set
LOSCFG_BASE_IPC_MUX_LIMIT=20
LOSCFG_BASE_IPC_SEM=y
LOSCFG_BASE_IPC_SEM_LIMIT=20
# LOSCFG_KERNEL_NX is not set
# LOSCFG_KERNEL_RINGBUF is not set
LOSCFG_KERNEL_EXTKERNEL=y
LOSCFG_KERNEL_CPPSUPPORT=y
# LOSCFG_KERNEL_CPUP is not set
LOSCFG_KERNEL_LOWPOWER=y
#
# Low Power Management Configure
#
LOSCFG_KERNEL_TICKLESS=y
# LOSCFG_KERNEL_POWER_MGR is not set
# end of Low Power Management Configure
# LOSCFG_KERNEL_TRACE is not set
# end of Kernel
#
# Lib
#
LOSCFG_LIB_LIBC=y
LOSCFG_COMPAT_POSIX=y
LOSCFG_LIB_LIBM=y
LOSCFG_LIB_STDIO=y
LOSCFG_SUPPORT_LONG_DOUBLE=y
LOSCFG_LIB_ZLIB=y
# LOSCFG_LIB_CPP_EXTEND is not set
# end of Lib
#
# Compat
#
LOSCFG_COMPAT_CMSIS=y
CMSIS_OS_VER=1
# end of Compat
#
# Components
#
#
# Ai
#
# LOSCFG_COMPONENTS_AI is not set
# end of Ai
#
# Connectivity
#
# LOSCFG_COMPONENTS_CONNECTIVITY is not set
# end of Connectivity
#
# FileSystem
#
# LOSCFG_COMPONENTS_FS_FATFS is not set
# LOSCFG_COMPONENTS_FS_LITTLEFS is not set
# LOSCFG_COMPONENTS_FS_RAMFS is not set
# LOSCFG_COMPONENTS_FS_SPIFFS is not set
# end of FileSystem
#
# Graphical User Interface
#
# end of Graphical User Interface
#
# Language
#
# LOSCFG_COMPONENTS_LUA is not set
# end of Language
#
# Lib
#
# LOSCFG_COMPONENTS_LIB_CJSON is not set
# end of Lib
#
# Log
#
# LOSCFG_COMPONENTS_LOG is not set
# end of Log
#
# Media
#
# LOSCFG_COMPONENTS_LIBPNG is not set
# LOSCFG_COMPONENTS_OPENEXIF is not set
# LOSCFG_COMPONENTS_OPUS is not set
# LOSCFG_COMPONENTS_UPNP is not set
# end of Media
#
# Network
#
# LOSCFG_COMPONENTS_NET_AT is not set
# LOSCFG_COMPONENTS_NET_LWIP is not set
# LOSCFG_COMPONENTS_NET_SAL is not set
# LOSCFG_COMPONENTS_NET_IFCONFIG is not set
# LOSCFG_COMPONENTS_IPERF is not set
# LOSCFG_COMPONENTS_PCAP is not set
# LOSCFG_COMPONENTS_NET_PING is not set
# LOSCFG_COMPONENTS_TFTP_SERVER is not set
# end of Network
#
# Security
#
# LOSCFG_COMPONENTS_SECURITY is not set
# LOSCFG_COMPONENTS_OPENSSL is not set
# end of Security
#
# Sensorhub
#
# LOSCFG_COMPONENTS_SENSORHUB is not set
# end of Sensorhub
#
# Utility
#
# LOSCFG_COMPONENTS_BIDIREFERENCE is not set
# LOSCFG_COMPONENTS_CURL is not set
# LOSCFG_COMPONENTS_FASTLZ is not set
# LOSCFG_COMPONENTS_FREETYPE is not set
# LOSCFG_COMPONENTS_HARFBUZZ is not set
# LOSCFG_COMPONENTS_ICONV is not set
# LOSCFG_COMPONENTS_INIPARSER is not set
# LOSCFG_COMPONENTS_JSON_C is not set
# LOSCFG_COMPONENTS_JSONCPP is not set
# LOSCFG_COMPONENTS_LIBXML2 is not set
# LOSCFG_COMPONENTS_SQLITE is not set
# LOSCFG_COMPONENTS_THTTPD is not set
# LOSCFG_COMPONENTS_TINYXML2 is not set
# end of Utility
# end of Components
#
# Demos
#
#
# Agent Tiny Lwm2m Demo
#
# LOSCFG_DEMOS_AGENT_TINY_LWM2M is not set
# end of Agent Tiny Lwm2m Demo
#
# Agent Tiny Mqtt Demo
#
# LOSCFG_DEMOS_AGENT_TINY_MQTT is not set
# end of Agent Tiny Mqtt Demo
#
# Ai Demo
#
# LOSCFG_DEMOS_AI is not set
# end of Ai Demo
#
# Drivers Demo
#
# LOSCFG_DEMO_CAN is not set
# end of Drivers Demo
#
# Dtls Server Demo
#
# LOSCFG_DEMOS_DTLS_SERVER is not set
# end of Dtls Server Demo
#
# FileSystem Demo
#
# LOSCFG_DEMOS_FS_FAT is not set
# LOSCFG_DEMOS_FS_LITTLEFS is not set
# LOSCFG_DEMOS_FS_RAM is not set
# LOSCFG_DEMOS_FS_SPIF is not set
# end of FileSystem Demo
#
# Graphical User Interface Demo
#
# end of Graphical User Interface Demo
#
# Ipv6 Client Demo
#
# LOSCFG_DEMOS_IPV6_CLIENT is not set
# end of Ipv6 Client Demo
#
# Kernel Demo
#
# LOSCFG_DEMOS_KERNEL is not set
# end of Kernel Demo
#
# Language Demo
#
# LOSCFG_DEMOS_LUA is not set
# end of Language Demo
#
# LMS Demo
#
# end of LMS Demo
#
# Media Demo
#
# LOSCFG_DEMOS_LIBPNG is not set
# LOSCFG_DEMOS_OPENEXIF is not set
# LOSCFG_DEMOS_OPUS is not set
# LOSCFG_DEMOS_UPNP is not set
# end of Media Demo
#
# NB-IoT Demo Without Atiny
#
# LOSCFG_DEMOS_NBIOT_WITHOUT_ATINY is not set
# end of NB-IoT Demo Without Atiny
#
# Security Demo
#
# LOSCFG_DEMOS_OPENSSL is not set
# end of Security Demo
#
# Sensorhub Demo
#
#
# only support Cloud_STM32F429IGTx_FIRE
#
# LOSCFG_DEMOS_SENSORHUB is not set
# end of Sensorhub Demo
#
# Trace Demo
#
# LOSCFG_DEMOS_TRACE is not set
# end of Trace Demo
#
# Utility Demo
#
# LOSCFG_DEMOS_BIDIREFERENCE is not set
# LOSCFG_DEMOS_CURL is not set
# LOSCFG_DEMOS_FASTLZ is not set
# LOSCFG_DEMOS_FREETYPE is not set
# LOSCFG_DEMOS_HARFBUZZ is not set
# LOSCFG_DEMOS_ICONV is not set
# LOSCFG_DEMOS_INIPARSER is not set
# LOSCFG_DEMOS_JSON_C is not set
# LOSCFG_DEMOS_JSONCPP is not set
# LOSCFG_DEMOS_LIBXML2 is not set
# LOSCFG_DEMOS_SQLITE is not set
# LOSCFG_DEMOS_TINYXML2 is not set
# end of Utility Demo
# end of Demos
#
# Debug
#
LOSCFG_COMPILE_DEBUG=y
LOSCFG_PLATFORM_ADAPT=y
LOSCFG_BACKTRACE=y
# LOSCFG_ENABLE_MAGICKEY is not set
# LOSCFG_THUMB is not set
LOSCFG_DEBUG_VERSION=y
# LOSCFG_DEBUG_KERNEL is not set
LOSCFG_SHELL=y
LOSCFG_SHELL_UART=y
LOSCFG_SHELL_EXTENDED_CMDS=y
# LOSCFG_SHELL_DMESG is not set
# LOSCFG_MEM_DEBUG is not set
LOSCFG_SERIAL_OUTPUT_ENABLE=y
#
# TestSuite or AppInit
#
LOSCFG_PLATFORM_OSAPPINIT=y
# LOSCFG_TEST is not set
# LOSCFG_TESTSUIT_SHELL is not set
# LOSCFG_TEST_MANUAL_TEST is not set
# LOSCFG_FUZZ_DT is not set
# end of TestSuite or AppInit
# end of Debug
#
# Driver
#
# LOSCFG_DRIVERS_BASE is not set
LOSCFG_CORTEX_M_NVIC=y
LOSCFG_CORTEX_M_SYSTICK=y
LOSCFG_DRIVERS_SIMPLE_UART=y
# end of Driver
#
# Stack Smashing Protector (SSP) Compiler Feature
#
# LOSCFG_CC_NO_STACKPROTECTOR is not set
# LOSCFG_CC_STACKPROTECTOR is not set
LOSCFG_CC_STACKPROTECTOR_STRONG=y
# LOSCFG_CC_STACKPROTECTOR_ALL is not set
# end of Stack Smashing Protector (SSP) Compiler Feature

@ -0,0 +1,13 @@
### Issue Description
### Steps to Reproduce
### Error Message

@ -0,0 +1,13 @@
### 该问题是怎么引起的?
### 重现步骤
### 报错信息

@ -0,0 +1,15 @@
### Issues Related
### Problem Description
### Change Description
### User Case

@ -0,0 +1,19 @@
### 相关的Issue
### 原因(目的、解决的问题等)
### 描述(做了什么,变更了什么)
### 测试用例(新增、改动、可能影响的功能)
### 自动化测试(不需要可以注释掉,建议默认)
ci_with_api_test
ci_with_ui_test

2
src/.gitignore vendored

@ -0,0 +1,2 @@
/.vscode
/out

@ -0,0 +1,24 @@
* Copyright (c) <2013-2020>, <Huawei Technologies Co., Ltd>
* All rights reserved.
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

@ -0,0 +1,112 @@
LITEOSTOPDIR = $(CURDIR)
export LITEOSTOPDIR
-include $(LITEOSTOPDIR)/config.mk
RM = rm -rf
MAKE = make
__LIBS = libs
LITEOS_TARGET = Huawei_LiteOS
LITEOS_LIBS_TARGET = libs_target
.PHONY: all lib clean cleanall $(LITEOS_TARGET) debug release help
all: $(OUT) $(BUILD) $(LITEOS_TARGET)
lib: $(OUT) $(BUILD) $(LITEOS_LIBS_TARGET)
debug:
$(HIDE)echo "=============== make a debug version ==============="
$(HIDE) $(MAKE) all
release:
ifeq ($(PLATFORM),)
$(HIDE)echo "=============== make a release version for platform $(PLATFORM) ==============="
$(HIDE)$(SCRIPTS_PATH)/mklibversion.sh $(PLATFORM)
else
$(HIDE)echo "================make a release version for all platform ==============="
$(HIDE)$(SCRIPTS_PATH)/mklibversion.sh
endif
#-----need move when make version-----#
##### make lib #####
$(__LIBS): $(OUT) $(CXX_INCLUDE)
$(LITEOS_TARGET): $(__LIBS) LITEOS_BUILD
$(OUT): $(LITEOS_MENUCONFIG_H)
$(HIDE)mkdir -p $(OUT)/lib
$(BUILD):
$(HIDE)mkdir -p $(BUILD)
$(LITEOS_LIBS_TARGET): $(__LIBS)
$(HIDE)for dir in $(LIB_SUBDIRS); \
do $(MAKE) -C $$dir all || exit 1; \
done
$(HIDE)echo "=============== make lib done ==============="
include tools/menuconfig/Makefile.kconfig
$(LITEOS_MENUCONFIG_H):
ifneq ($(LITEOS_PLATFORM_MENUCONFIG_H), $(wildcard $(LITEOS_PLATFORM_MENUCONFIG_H)))
$(HIDE)+make savemenuconfig
endif
LITEOS_BUILD: $(LITEOS_MENUCONFIG_H)
LITEOS_BUILD:
$(HIDE)echo $(LOSCFG_ENTRY_SRC)
$(HIDE)for dir in $(LITEOS_SUBDIRS); \
do $(MAKE) -C $$dir all || \
if [ "$$?" != "0" ]; then \
echo "########################################################################################################"; \
echo "######## LiteOS build failed! ########"; \
echo "########################################################################################################"; \
exit 1; \
fi;\
done
$(LITEOS_TARGET):
ifeq ($(OS), Linux)
$(call update_from_baselib_file)
endif
ifeq ($(LD), $(CC))
$(LD) $(LITEOS_LDFLAGS) $(LITEOS_TABLES_LDFLAGS) $(LITEOS_DYNLDFLAGS) -Wl,-Map=$(OUT)/$@.map -o $(OUT)/$@.elf -Wl,--start-group $(LITEOS_BASELIB) -Wl,--end-group
else
$(LD) $(LITEOS_LDFLAGS) $(LITEOS_TABLES_LDFLAGS) $(LITEOS_DYNLDFLAGS) -Map=$(OUT)/$@.map -o $(OUT)/$@.elf --start-group $(LITEOS_BASELIB) --end-group
endif
ifeq ($(LOSCFG_FAMILY_RASPBERRY), y)
ifeq ($(LOSCFG_ARCH_ARM_AARCH64), y)
$(OBJCOPY) -O binary $(OUT)/$@.elf $(OUT)/kernel8.img
else
$(OBJCOPY) -O binary $(OUT)/$@.elf $(OUT)/kernel7.img
endif
endif
ifeq ($(LOSCFG_PLATFORM_ESP32)$(LOSCFG_PLATFORM_ESP32_QEMU), y)
esptool.py --chip esp32 elf2image --flash_mode dio --flash_freq 80m --flash_size 4MB -o $(OUT)/$@.bin $(OUT)/$@.elf
else ifeq ($(LOSCFG_PLATFORM_ESP8266), y)
esptool.py --chip esp8266 elf2image --flash_mode dio --flash_freq 40m --flash_size 2MB --version=3 -o $(OUT)/$@.bin $(OUT)/$@.elf
else
$(OBJCOPY) -O binary $(OUT)/$@.elf $(OUT)/$@.bin
endif
$(OBJDUMP) -t $(OUT)/$@.elf |sort >$(OUT)/$@.sym.sorted
$(OBJDUMP) -d $(OUT)/$@.elf >$(OUT)/$@.asm
$(SIZE) $(OUT)/$@.elf
$(HIDE)echo "########################################################################################################"
$(HIDE)echo "######## LiteOS build successfully! ########"
$(HIDE)echo "########################################################################################################"
clean:
$(HIDE)for dir in $(LITEOS_SUBDIRS); \
do make -C $$dir clean|| exit 1; \
done
$(HIDE)$(RM) .config.old
$(HIDE)$(RM) $(__OBJS) $(LITEOS_TARGET) $(OUT) $(BUILD) $(LITEOS_MENUCONFIG_H) *.bak *~
$(HIDE)$(RM) -rf $(LITEOS_PLATFORM_MENUCONFIG_H)
$(HIDE)echo "clean $(LITEOS_PLATFORM) finish"
cleanall:
$(HIDE)rm -rf $(LITEOSTOPDIR)/out
$(HIDE)echo "clean all"

File diff suppressed because it is too large Load Diff

@ -0,0 +1,124 @@
[![star](https://gitee.com/LiteOS/LiteOS/badge/star.svg?theme=gvp)](https://gitee.com/LiteOS/LiteOS/stargazers)
[![fork](https://gitee.com/LiteOS/LiteOS/badge/fork.svg?theme=gvp)](https://gitee.com/LiteOS/LiteOS/members)
[View English](README_EN.md)
## Huawei LiteOS简介
Huawei LiteOS是华为面向物联网领域开发的一个基于实时内核的轻量级操作系统。本项目属于<a href="https://www.huaweicloud.com/product/liteos.html" target="_blank">华为物联网操作系统[Huawei LiteOS]</a>源码现有基础内核包括不可裁剪的极小内核和可裁剪的其他模块。极小内核包含任务管理、内存管理、异常管理、系统时钟和中断管理。可裁剪模块包括信号量、互斥锁、队列管理、事件管理、软件定时器等。除了基础内核Huawei LiteOS还提供了增强内核包括C++支持、低功耗以及维测模块。低功耗通过支持Tickless机制、run-stop休眠唤醒可以极大地降低系统功耗。维测部分包含了获取CPU占用率、Trace事件跟踪、Shell命令行等功能。
Huawei LiteOS同时提供端云协同能力集成了LwM2M、CoAP、mbedtls、LwIP全套IoT互联协议栈且在LwM2M的基础上提供了AgentTiny模块用户只需关注自身的应用而不必关注LwM2M实现细节直接使用AgentTiny封装的接口即可简单快速实现与云平台安全可靠的连接。
Huawei LiteOS自开源社区发布以来围绕NB-IoT物联网市场从技术、生态、解决方案、商用支持等多维度使能合作伙伴构建开源的物联网生态。目前已经聚合了50+ MCU和解决方案合作伙伴共同推出一批开源开发套件和行业解决方案帮助众多行业客户快速推出物联网终端和服务客户涵盖抄表、停车、路灯、环保、共享单车、物流等众多行业为开发者提供 “一站式” 完整软件平台,可有效降低开发门槛、缩短开发周期。
关于Huawei LiteOS更详细的介绍详见[LiteOS产品介绍](./doc/LiteOS_Introduction.md)。
## Huawei LiteOS 资料中文网
<a href="https://support.huaweicloud.com/LiteOS/index.html" target="_blank">Huawei LiteOS官方文档中心</a>
## LiteOS 代码导读
[LiteOS内核源代码目录说明](./doc/LiteOS_Code_Info.md)
该文档描述的是LiteOS内核源代码的详细信息。通过此文档读者可以了解LiteOS的源代码结构以及LiteOS的main()函数的功能。
## LiteOS 编译介绍和开发工具
[LiteOS编译介绍和开发工具](./doc/LiteOS_Build_and_IDE.md)
该文档介绍了LiteOS的编译框架以及如何在Linux和Windows下编译LiteOS。
## LiteOS 快速入门
[LiteOS快速入门](./doc/LiteOS_Quick_Start.md)
该文档介绍如何快速将LiteOS在开发板和QEMU模拟器上运行起来。
## LiteOS 移植指南
[LiteOS移植指南](./doc/LiteOS_Porting_Guide.md)
该文档基于STM32芯片平台详细介绍如何快速移植LiteOS。
## LiteOS 内核开发指南
[LiteOS内核开发指南](./doc/LiteOS_Kernel_Developer_Guide.md)
该文档详细讲解了LiteOS各模块开发及其实现原理。用户可以通过阅读该文档学习各模块的使用。
## LiteOS 组件开发指南
<a href="https://gitee.com/LiteOS/LiteOS_Components/tree/master" target="_blank">LiteOS组件开发指南</a>
该文档详细介绍了LiteOS组件的构成以及完整的开发流程。用户可以通过阅读该文档了解LiteOS组件框架和开发流程。
## LiteOS 维测指南
[LiteOS维测指南](./doc/LiteOS_Maintenance_Guide.md)
该文档详细介绍了LiteOS具备的维测能力包括内存调测方法、IPC通信调测方法、Trace、调度统计、获取CPU占用率等。
## LiteOS Shell
[Shell使用教程](./shell/README_CN.md)
该文档详细讲解了在LiteOS如何定制用户自定义Shell命令以及如何执行内置的Shell命令。
## LiteOS 标准库
[LiteOS标准库](./doc/LiteOS_Standard_Library.md)
该文档列出了LiteOS支持的POSIX、CMSIS等接口。
## LiteOS Demos
[LiteOS demos](./demos)目录下包含了LiteOS提供的各模块Demo及其文档。文档详细介绍了Demo的功能以及运行方法。
## LiteOS API参考
<a href="https://www.huawei.com/minisite/liteos/cn/api/index.html" target="_blank">API参考</a>请访问LiteOS官网。
## LiteOS 代码贡献必读
[LiteOS 代码&文档贡献指南](./doc/LiteOS_Contribute_Guide.md)
该文档包含LiteOS的编程规范、文档写作规范、提交代码&文档时信息的填写规范、以及如何提交到LiteOS仓库。
## LiteOS 支持的硬件
* LiteOS开源项目目前支持ARM Cortex-M0Cortex-M3Cortex-M4Cortex-M7Cortex-A等芯片架构
* LiteOS联合业界主流MCU厂家通过开发者活动目前已经适配了多种通用MCU开发套件
## 开源协议
* 遵循BSD-3开源许可协议
* [Huawei LiteOS 知识产权政策](./doc/LiteOS_Contribute_Guide.md#协议)
## 联系我们
* 技术支持
欢迎<a href="https://gitee.com/LiteOS/LiteOS/issues" target="_blank">提交issue</a>对关心的问题发起讨论,或到<a href="https://bbs.huaweicloud.com/forum/forum-729-1.html" target="_blank">LiteOS论坛</a>发起交流。
您也可以发送问题至邮箱LiteOSSupport@huawei.com。
* 技术合作
如您有合作意向希望加入Huawei LiteOS生态合作伙伴请发邮件至LiteOSSupport@huawei.com或访问<a href="http://www.huawei.com/liteos" target="_blank">LiteOS官网</a>,进一步了解详细信息。

@ -0,0 +1,99 @@
[![star](https://gitee.com/LiteOS/LiteOS/badge/star.svg?theme=gvp)](https://gitee.com/LiteOS/LiteOS/stargazers)
[![fork](https://gitee.com/LiteOS/LiteOS/badge/fork.svg?theme=gvp)](https://gitee.com/LiteOS/LiteOS/members)
[中文](README.md)
## Introduction to Huawei LiteOS
Huawei LiteOS is a lightweight operating system based on real-time kernel developed by Huawei for the IoT. The existing basic kernel includes an untailorable ultra-small kernel and some tailorable modules. The ultra-small kernel covers task management, memory management, interrupt management, error handling, and system clock. Tailorable modules include semaphore, mutex lock, queue management, event management, and software timer. In addition to the basic kernel functions, Huawei LiteOS also provides the enhanced kernel functions, including the C++ support, low power consumption, and maintenance and test module. The low power mechanisms such as tickless and run-stop hibernation and wakeup can be used to greatly reduce the system power consumption. The maintenance and test module can be used to obtain CPU usage, trace events, and run shell commands.
Huawei LiteOS provides device-cloud synergy capabilities and integrates the full set of IoT interconnection protocol stacks including LwM2M, CoAP, Mbed TLS, and LwIP. In addition, Huawei LiteOS provides the AgentTiny module based on LwM2M. You only need to focus on your own applications instead of LwM2M implementation details. You can directly use the AgentTiny-encapsulated APIs to implement secure and reliable connections with the cloud platforms.
Since the release of open-source community, Huawei LiteOS has enabled partners in terms of technologies, ecosystems, solutions, and commercial support in the NB-IoT market to build an open-source IoT ecosystem. Currently, Huawei has aggregated more than 50 MCUs and solution partners to jointly launch a series of open-source development kits and industry solutions, helping industry customers quickly launch IoT terminals and services. Such customers are involved in various industries, such as meter reading, parking, street lamp, environmental protection, bicycle sharing, and logistics. Moreover, Huawei LiteOS provides developers with a one-stop software platform, lowering development requirements and improving development efficiency.
For details about Huawei LiteOS, see [LiteOS Product Introduction](./doc/LiteOS_Introduction_en/README_EN.md).
## LiteOS Code Overview
[LiteOS Kernel Source Code Directory Description](./doc/LiteOS_Code_Info_en.md)
This document describes the source code of the LiteOS kernel. Through this document, you can understand the source code structure and the main() function of LiteOS.
## LiteOS Build and Development Tools
[LiteOS Build and Development Tools](./doc/LiteOS_Build_and_IDE_en/README_EN.md)
This document describes the LiteOS build framework and how to build LiteOS on Linux and Windows.
## LiteOS Quick Start
[LiteOS Quick Start](./doc/LiteOS_Quick_Start_en/README_EN.md)
This document describes how to quickly run LiteOS on the development board and QEMU emulator.
## LiteOS Porting Guide
[LiteOS Porting Guide](./doc/LiteOS_Porting_Guide_en/README_EN.md)
This document describes how to quickly port LiteOS based on the STM32 chip platform.
## LiteOS Kernel Development Guide
[LiteOS Kernel Development Guide](./doc/LiteOS_Kernel_Developer_Guide_en/README_EN.md)
This document describes the development and implementation principles of each LiteOS module. Through this document, you can understand how to use each module.
## LiteOS Maintenance and Test Guide
[LiteOS Maintenance and Test Guide](./doc/LiteOS_Maintenance_Guide_en/README_EN.md)
This document describes the maintenance and test capabilities of LiteOS, including the memory commissioning method, IPC communication commissioning method, event tracing, scheduling statistics, and CPU usage obtaining.
## LiteOS Shell
[Shell Tutorial](./shell/doc_en/README_EN.md)
This document describes how to customize shell commands on LiteOS and how to execute built-in shell commands.
## LiteOS Standard Library
[LiteOS Standard Library](./doc/LiteOS_Standard_Library.md)
This document lists the POSIX and CMSIS APIs supported by LiteOS.
## LiteOS Demos
The [LiteOS demos](./demos) directory contains demos and documents of each module provided by LiteOS. This document describes functions and running methods of demos.
## LiteOS API Reference
For details about <a href="https://www.huawei.com/minisite/liteos/cn/api/index.html" target="_blank">APIs</a>, visit the LiteOS official website.
## LiteOS Code Contribution Guide
[LiteOS Code & Document Contribution Guide](./doc/LiteOS_Contribute_Guide_en.md)
This document describes the LiteOS programming specifications, document writing specifications, and specifications for filling in information when committing code and documents to the LiteOS repository.
## Hardware Supported by LiteOS
* Currently, the LiteOS open-source project supports chip architectures such as ARM Cortex-M0, Cortex-M3, Cortex-M4, Cortex-M7 and Cortex-A.
* LiteOS cooperates with the industry mainstream MCUs and has adapted to multiple common MCU development suites through developer activities.
## Open-Source Protocol
* Comply with the BSD-3 open-source license agreement.
* [Huawei LiteOS IPR Policy](./doc/LiteOS_Contribute_Guide_en.md#protocols)
## Contacting Us
* Technical support
<a href="https://gitee.com/LiteOS/LiteOS/issues" target="_blank">Commit an issue</a> that you are concerned about.
You can also send issues to LiteOSSupport@huawei.com.
* Technical cooperation
If you are interested in being Huawei LiteOS ecosystem partners, send an email to LiteOSSupport@huawei.com, or visit the <a href="https://www.huawei.com/minisite/liteos/en/index.html" target="_blank">Huawei LiteOS official website</a> for more details.

@ -0,0 +1,55 @@
# Copyright (c) Huawei Technologies Co., Ltd. 2019-2020. All rights reserved.
osource "arch/arm/Kconfig"
osource "arch/arm64/Kconfig"
osource "arch/xtensa/Kconfig"
osource "arch/riscv/Kconfig"
osource "arch/csky/Kconfig"
config LOSCFG_ARCH_FPU_ENABLE
bool "Enable Floating Pointer Unit"
default y
depends on LOSCFG_ARCH_CORTEX_M4 || LOSCFG_ARCH_CORTEX_M7 || LOSCFG_ARCH_ARM_AARCH32 || \
LOSCFG_ARCH_ARM_AARCH64 || LOSCFG_ARCH_RISCV_RV32IMC || LOSCFG_ARCH_XTENSA_LX6
help
This option will bypass floating procedure in system.
config LOSCFG_ARCH_SECURE_MONITOR_MODE
bool "Run On Secure Monitor Mode"
default n
depends on LOSCFG_ARCH_ARM_AARCH64
help
This option will make the system run on EL3.
config LOSCFG_APC_ENABLE
bool "Enable Access Permission Control"
default y
help
This option will Enable Access Permission Control
config LOSCFG_FPB_ENABLE
bool "Enable Flash Patch"
default n
depends on LOSCFG_ARCH_ARM_CORTEX_M || LOSCFG_ARCH_RISCV_RV32IMC
help
Answer Y to enable LiteOS to support Flash Patch
config RISCV_FPB_SET_PCO
bool "Enable patch offset bigger than 1M"
default y
depends on LOSCFG_ARCH_RISCV && LOSCFG_FPB_ENABLE
help
Enable patch offset bigger than 1M by Set PCO(patch code branch offset) bit
config LOSCFG_LIB_CONFIGURABLE
bool
default n
help
Answer Y to enable LiteOS to support base kernel lib configurable
config LOSCFG_MULTI_BINARIES
bool
default n
select LOSCFG_LIB_CONFIGURABLE
help
Answer Y to enable LiteOS to support multi binaries

@ -0,0 +1,192 @@
# Copyright (c) Huawei Technologies Co., Ltd. 2019-2020. All rights reserved.
#
# ARM 32-bit(Aarch32) implementations
#
config LOSCFG_ARCH_ARM_AARCH32
bool
select LOSCFG_LIB_LIBC
help
32-bit ARM architecture implementations.
It is not limited to ARMv7-A but also ARMv7-R, ARMv7-M and etc.
config LOSCFG_ARCH_ARM_CORTEX_A
bool
select LOSCFG_ARCH_ARM_AARCH32
config LOSCFG_ARCH_ARM_CORTEX_R
bool
select LOSCFG_ARCH_ARM_AARCH32
config LOSCFG_ARCH_ARM_CORTEX_M
bool
select LOSCFG_ARCH_ARM_AARCH32
#
# Architecture Versions
#
config LOSCFG_ARCH_ARM_V7A
bool
select LOSCFG_ARCH_ARM_CORTEX_A
config LOSCFG_ARCH_ARM_V8A_AARCH32
bool
select LOSCFG_ARCH_ARM_CORTEX_A
help
ARMv8-A supported Aarch32 and Aarch64. This option is for ARMv8-A cores running
at Aarch32 mode.
config LOSCFG_ARCH_ARM_V7R
bool
select LOSCFG_ARCH_ARM_CORTEX_R
config LOSCFG_ARCH_ARM_V6M
bool
select LOSCFG_ARCH_ARM_CORTEX_M
config LOSCFG_ARCH_ARM_V7M
bool
select LOSCFG_ARCH_ARM_CORTEX_M
config LOSCFG_ARCH_ARM_V8M
bool
select LOSCFG_ARCH_ARM_CORTEX_M
config LOSCFG_ARCH_ARM_V8R
bool
select LOSCFG_ARCH_ARM_CORTEX_R
config LOSCFG_ARCH_ARM_VER
string
default "armv6-m" if LOSCFG_ARCH_ARM_V6M
default "armv7-a" if LOSCFG_ARCH_ARM_V7A
default "armv7-r" if LOSCFG_ARCH_ARM_V7R
default "armv7-m" if LOSCFG_ARCH_ARM_V7M
default "armv8-a" if LOSCFG_ARCH_ARM_V8A_AARCH32
default "armv8-r" if LOSCFG_ARCH_ARM_V8R
default "armv8-m.main" if LOSCFG_ARCH_ARM_V8M
#
# VFP Hardware
#
config LOSCFG_ARCH_FPU_VFP_V3
bool
help
An optional extension to the Arm, Thumb, and ThumbEE instruction sets in the ARMv7-A and ARMv7-R profiles.
VFPv3U is a variant of VFPv3 that supports the trapping of floating-point exceptions to support code.
config LOSCFG_ARCH_FPU_VFP_V4
bool
help
An optional extension to the Arm, Thumb, and ThumbEE instruction sets in the ARMv7-A and ARMv7-R profiles.
VFPv4U is a variant of VFPv4 that supports the trapping of floating-point exceptions to support code.
VFPv4 and VFPv4U add both the Half-precision Extension and the fused multiply-add instructions to the features of VFPv3.
config LOSCFG_ARCH_FPU_VFP_V5
bool
help
An optional extension to the Arm, Thumb, and ThumbEE instruction sets in the ARMv7-A ARMv7-R and ARMv7-M profiles.
Addition of double-precision operand support for existing data processing instructions in FPv4-SP-D16-M.
16 double-precision registers. This is the same as for FPv4 and there are no additional registers.
Software-enable control for single-precision and double-precision support using CPACR.
Double-precision and single-precision support, when both are implemented, cannot be enabled independently of one another.
Identical load/store instruction support to FPv4 extensions that already includes support for 64-bit data types.
config LOSCFG_ARCH_FPU_VFP_D16
bool
depends on LOSCFG_ARCH_ARM_AARCH32
help
VPU implemented with 16 doubleword registers (16 x 64-bit).
config LOSCFG_ARCH_FPU_VFP_D32
bool
depends on LOSCFG_ARCH_ARM_AARCH32
help
VPU implemented with 32 doubleword registers (32 x 64-bit).
config LOSCFG_ARCH_FPU_VFP_NEON
bool
help
Advanced SIMD extension (NEON) support.
config LOSCFG_ARCH_FPU
string
default "vfpv3" if LOSCFG_ARCH_FPU_VFP_V3 && LOSCFG_ARCH_FPU_VFP_D32
default "vfpv3-d16" if LOSCFG_ARCH_FPU_VFP_V3 && LOSCFG_ARCH_FPU_VFP_D16
default "neon-vfpv4" if LOSCFG_ARCH_FPU_VFP_V4 && LOSCFG_ARCH_FPU_VFP_D32 && LOSCFG_ARCH_FPU_VFP_NEON
default "vfpv4" if LOSCFG_ARCH_FPU_VFP_V4 && LOSCFG_ARCH_FPU_VFP_D32
default "fpv4-sp-d16" if LOSCFG_ARCH_FPU_VFP_V4 && LOSCFG_ARCH_FPU_VFP_D16 && LOSCFG_ARCH_ARM_V7M
default "vfpv4-d16" if LOSCFG_ARCH_FPU_VFP_V4 && LOSCFG_ARCH_FPU_VFP_D16
default "fpv5-sp-d16" if LOSCFG_ARCH_FPU_VFP_V5 && LOSCFG_ARCH_FPU_VFP_D16
#
# Supported Processor Cores
#
config LOSCFG_ARCH_CORTEX_M0
bool
select LOSCFG_ARCH_ARM_V6M
select LOSCFG_ARCH_ARM_AARCH32
config LOSCFG_ARCH_CORTEX_M0_PLUS
bool
select LOSCFG_ARCH_ARM_V6M
select LOSCFG_ARCH_ARM_AARCH32
config LOSCFG_ARCH_CORTEX_M3
bool
select LOSCFG_ARCH_ARM_V7M
select LOSCFG_ARCH_ARM_AARCH32
select LOSCFG_ARCH_FPU_VFP_V3
select LOSCFG_ARCH_FPU_VFP_D16
config LOSCFG_ARCH_CORTEX_M4
bool
select LOSCFG_ARCH_ARM_V7M
select LOSCFG_ARCH_ARM_AARCH32
select LOSCFG_ARCH_FPU_VFP_V4
select LOSCFG_ARCH_FPU_VFP_D16
config LOSCFG_ARCH_CORTEX_M33
bool
select LOSCFG_ARCH_ARM_V8M
select LOSCFG_ARCH_ARM_AARCH32
config LOSCFG_ARCH_CORTEX_M7
bool
select LOSCFG_ARCH_ARM_V7M
select LOSCFG_ARCH_ARM_AARCH32
select LOSCFG_ARCH_FPU_VFP_V5
select LOSCFG_ARCH_FPU_VFP_D16
config LOSCFG_ARCH_CORTEX_A7
bool
select LOSCFG_ARCH_ARM_V7A
select LOSCFG_ARCH_FPU_VFP_V4
select LOSCFG_ARCH_FPU_VFP_D32
select LOSCFG_ARCH_FPU_VFP_NEON
config LOSCFG_ARCH_CORTEX_A9
bool
select LOSCFG_ARCH_ARM_V7A
select LOSCFG_ARCH_FPU_VFP_V3
select LOSCFG_ARCH_FPU_VFP_D16
config LOSCFG_ARCH_CORTEX_A53_AARCH32
bool
select LOSCFG_ARCH_ARM_V8A_AARCH32
select LOSCFG_ARCH_FPU_VFP_V4
select LOSCFG_ARCH_FPU_VFP_D32
select LOSCFG_ARCH_FPU_VFP_NEON
config LOSCFG_ARCH_CPU
string
default "cortex-m0" if LOSCFG_ARCH_CORTEX_M0
default "cortex-m0plus" if LOSCFG_ARCH_CORTEX_M0_PLUS
default "cortex-m3" if LOSCFG_ARCH_CORTEX_M3
default "cortex-m4" if LOSCFG_ARCH_CORTEX_M4
default "cortex-m7" if LOSCFG_ARCH_CORTEX_M7
default "cortex-m33" if LOSCFG_ARCH_CORTEX_M33
default "cortex-a7" if LOSCFG_ARCH_CORTEX_A7
default "cortex-a9" if LOSCFG_ARCH_CORTEX_A9
default "cortex-a53" if LOSCFG_ARCH_CORTEX_A53_AARCH32

@ -0,0 +1,34 @@
include $(LITEOSTOPDIR)/config.mk
MODULE_NAME := $(LOSCFG_ARCH_CPU)
LOCAL_SRCS_y := $(wildcard src/canary.c) $(wildcard src/cpu.c) \
$(wildcard src/fault.c) $(wildcard src/mmu.c) \
$(wildcard src/task.c) $(wildcard src/*.S)
LOCAL_INCLUDE := \
-I $(LITEOSTOPDIR)/kernel/extended/include \
ifeq ($(LITEOS_ARM_ARCH), -march=armv7-a)
LOCAL_SRCS_y += $(wildcard src/armv7/*.S)
else ifeq ($(LITEOS_ARM_ARCH), -march=armv7-r)
LOCAL_SRCS_y += $(wildcard src/armv7/cache.S)
endif
ifneq ($(LOSCFG_APC_ENABLE), y)
LOCAL_SRCS_y := $(filter-out src/mmu.c, $(LOCAL_SRCS_y))
endif
ifeq ($(LOSCFG_KERNEL_PERF), y)
LOCAL_SRCS_y += $(wildcard src/pmu/armv7_pmu.c)
LOCAL_INCLUDE += -I $(LITEOSTOPDIR)/kernel/extended/src/pmu \
-I $(LITEOSTOPDIR)/kernel/extended/perf
endif
LOCAL_SRCS = $(LOCAL_SRCS_y)
LOCAL_FLAGS := $(LOCAL_INCLUDE) $(LITEOS_GCOV_OPTS)
ifeq ($(LOSCFG_GDB), y)
LOCAL_FLAGS += $(AS_OBJS_LIBC_FLAGS)
endif
include $(MODULE)

@ -0,0 +1,60 @@
# strip quotation mark in configuration
LOSCFG_ARCH_CPU_STRIP := $(subst $\",,$(LOSCFG_ARCH_CPU))
LOSCFG_ARCH_CPU = $(LOSCFG_ARCH_CPU_STRIP)
LOSCFG_ARCH_FPU_STRIP := $(subst $\",,$(LOSCFG_ARCH_FPU))
LOSCFG_ARCH_FPU = $(LOSCFG_ARCH_FPU_STRIP)
LITEOS_BASELIB += -l$(LOSCFG_ARCH_CPU)
LIB_SUBDIRS += arch/arm/cortex_a_r
# FPU compile options
ifeq ($(LOSCFG_ARCH_FPU_ENABLE), y)
LITEOS_FLOAT_OPTS := -mfloat-abi=softfp
LITEOS_FPU_OPTS := -mfpu=$(LOSCFG_ARCH_FPU)
else
LITEOS_FLOAT_OPTS := -mfloat-abi=soft
endif
# CPU compile options
LITEOS_CPU_OPTS := -mcpu=$(LOSCFG_ARCH_CPU)$(EXTENSION)
# gcc libc folder style is combine with core and fpu
# for example, cortex-a7 with softfp abi and neon vfp4 is: a7_softfp_neon_vfp4
LITEOS_GCCLIB := $(subst cortex-,,$(LOSCFG_ARCH_CPU))_softfp_$(LOSCFG_ARCH_FPU)
ifeq ($(wildcard $(GCC_GCCLIB_PATH)/$(LITEOS_GCCLIB)),)
LITEOS_GCCLIB :=
endif
LITEOS_CORE_COPTS = $(LITEOS_CPU_OPTS) $(LITEOS_FLOAT_OPTS) $(LITEOS_FPU_OPTS)
LITEOS_INTERWORK += $(LITEOS_CORE_COPTS)
LITEOS_NODEBUG += $(LITEOS_CORE_COPTS)
LITEOS_ASOPTS += $(LITEOS_CPU_OPTS)
LITEOS_CXXOPTS_BASE += $(LITEOS_CORE_COPTS)
LITEOS_COPTS_BASE += -mno-unaligned-access -mthumb-interwork
ifeq ($(LOSCFG_THUMB), y)
LITEOS_COPTS_EXTRA_INTERWORK += -mthumb
LITEOS_CMACRO += -DLOSCFG_INTERWORK_THUMB
endif
ARCH_INCLUDE := -I $(LITEOSTOPDIR)/arch/arm/cortex_a_r/include \
-I $(LITEOSTOPDIR)/arch/arm/cortex_a_r/include/arch \
-I $(LITEOSTOPDIR)/arch/arm/cortex_a_r/src/include
LITEOS_PLATFORM_INCLUDE += $(ARCH_INCLUDE)
LITEOS_CXXINCLUDE += $(ARCH_INCLUDE)
# extra definition for other module
LITEOS_CPU_TYPE = $(LOSCFG_ARCH_CPU)
LITEOS_ARM_ARCH := -march=$(subst $\",,$(LOSCFG_ARCH_ARM_VER))
# linux style macros
LINUX_ARCH_$(LOSCFG_ARCH_ARM_V7A) = -D__LINUX_ARM_ARCH__=7
LINUX_ARCH_$(LOSCFG_ARCH_ARM_V7R) = -D__LINUX_ARM_ARCH__=7
LINUX_ARCH_$(LOSCFG_ARCH_ARM_V8A) = -D__LINUX_ARM_ARCH__=8
LINUX_ARCH_$(LOSCFG_ARCH_ARM_V8R) = -D__LINUX_ARM_ARCH__=8
AS_OBJS_LIBC_FLAGS += $(LINUX_ARCH_y)

@ -0,0 +1,75 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: Aarch32 Assembly Defines and Macros HeadFile
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#ifndef _ARCH_ASM_H
#define _ARCH_ASM_H
#include "arch/regs.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
#define FUNCTION(x) \
.global x; \
.text; \
.code 32; \
x: \
/*
* Used to set current cpu's exception stack pointer.
* The stack distribution is as follows:
* ------------------------------------------
* stackTop | cpu n | ... | cpu 1 | cpu 0 | stackBottom
* ------------------------------------------
* | stackSize | ... | stackSize | stackSize |
*/
.macro EXC_SP_SET stackBottom, stackSize, reg0, reg1
mrc p15, 0, \reg0, c0, c0, 5
and \reg0, \reg0, #MPIDR_CPUID_MASK /* get cpu id */
mov \reg1, #\stackSize
mul \reg1, \reg1, \reg0 /* calculate current cpu stack offset */
#ifdef LOSCFG_LIB_CONFIGURABLE
ldr \reg0, =\stackBottom
ldr \reg0, [\reg0]
#else
ldr \reg0, =\stackBottom
#endif
sub \reg0, \reg0, \reg1 /* calculate current cpu stack bottom */
mov sp, \reg0 /* set sp */
.endm
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
#endif /* _ARCH_ASM_H */

@ -0,0 +1,355 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: Aarch32 Atomic HeadFile
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#ifndef _ARCH_ATOMIC_H
#define _ARCH_ATOMIC_H
#include "los_typedef.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
{
return *(volatile INT32 *)v;
}
STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
{
*(volatile INT32 *)v = setVal;
}
STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
{
INT32 val;
UINT32 status;
do {
__asm__ __volatile__("ldrex %1, [%2]\n"
"add %1, %1, %3\n"
"strex %0, %1, [%2]"
: "=&r"(status), "=&r"(val)
: "r"(v), "r"(addVal)
: "cc");
} while (__builtin_expect(status != 0, 0));
return val;
}
STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
{
INT32 val;
UINT32 status;
do {
__asm__ __volatile__("ldrex %1, [%2]\n"
"sub %1, %1, %3\n"
"strex %0, %1, [%2]"
: "=&r"(status), "=&r"(val)
: "r"(v), "r"(subVal)
: "cc");
} while (__builtin_expect(status != 0, 0));
return val;
}
STATIC INLINE VOID ArchAtomicInc(Atomic *v)
{
INT32 val;
UINT32 status;
do {
__asm__ __volatile__("ldrex %0, [%3]\n"
"add %0, %0, #1\n"
"strex %1, %0, [%3]"
: "=&r"(val), "=&r"(status), "+m"(*v)
: "r"(v)
: "cc");
} while (__builtin_expect(status != 0, 0));
}
STATIC INLINE INT32 ArchAtomicIncRet(Atomic *v)
{
INT32 val;
UINT32 status;
do {
__asm__ __volatile__("ldrex %0, [%3]\n"
"add %0, %0, #1\n"
"strex %1, %0, [%3]"
: "=&r"(val), "=&r"(status), "+m"(*v)
: "r"(v)
: "cc");
} while (__builtin_expect(status != 0, 0));
return val;
}
STATIC INLINE VOID ArchAtomicDec(Atomic *v)
{
INT32 val;
UINT32 status;
do {
__asm__ __volatile__("ldrex %0, [%3]\n"
"sub %0, %0, #1\n"
"strex %1, %0, [%3]"
: "=&r"(val), "=&r"(status), "+m"(*v)
: "r"(v)
: "cc");
} while (__builtin_expect(status != 0, 0));
}
STATIC INLINE INT32 ArchAtomicDecRet(Atomic *v)
{
INT32 val;
UINT32 status;
do {
__asm__ __volatile__("ldrex %0, [%3]\n"
"sub %0, %0, #1\n"
"strex %1, %0, [%3]"
: "=&r"(val), "=&r"(status), "+m"(*v)
: "r"(v)
: "cc");
} while (__builtin_expect(status != 0, 0));
return val;
}
STATIC INLINE INT64 ArchAtomic64Read(const Atomic64 *v)
{
INT64 val;
do {
__asm__ __volatile__("ldrexd %0, %H0, [%1]"
: "=&r"(val)
: "r"(v)
: "cc");
} while (0);
return val;
}
STATIC INLINE VOID ArchAtomic64Set(Atomic64 *v, INT64 setVal)
{
INT64 tmp;
UINT32 status;
do {
__asm__ __volatile__("ldrexd %1, %H1, [%2]\n"
"strexd %0, %3, %H3, [%2]"
: "=&r"(status), "=&r"(tmp)
: "r"(v), "r"(setVal)
: "cc");
} while (__builtin_expect(status != 0, 0));
}
STATIC INLINE INT64 ArchAtomic64Add(Atomic64 *v, INT64 addVal)
{
INT64 val;
UINT32 status;
do {
__asm__ __volatile__("ldrexd %1, %H1, [%2]\n"
"adds %Q1, %Q1, %Q3\n"
"adc %R1, %R1, %R3\n"
"strexd %0, %1, %H1, [%2]"
: "=&r"(status), "=&r"(val)
: "r"(v), "r"(addVal)
: "cc");
} while (__builtin_expect(status != 0, 0));
return val;
}
STATIC INLINE INT64 ArchAtomic64Sub(Atomic64 *v, INT64 subVal)
{
INT64 val;
UINT32 status;
do {
__asm__ __volatile__("ldrexd %1, %H1, [%2]\n"
"subs %Q1, %Q1, %Q3\n"
"sbc %R1, %R1, %R3\n"
"strexd %0, %1, %H1, [%2]"
: "=&r"(status), "=&r"(val)
: "r"(v), "r"(subVal)
: "cc");
} while (__builtin_expect(status != 0, 0));
return val;
}
STATIC INLINE VOID ArchAtomic64Inc(Atomic64 *v)
{
INT64 val;
UINT32 status;
do {
__asm__ __volatile__("ldrexd %0, %H0, [%3]\n"
"adds %Q0, %Q0, #1\n"
"adc %R0, %R0, #0\n"
"strexd %1, %0, %H0, [%3]"
: "=&r"(val), "=&r"(status), "+m"(*v)
: "r"(v)
: "cc");
} while (__builtin_expect(status != 0, 0));
}
STATIC INLINE INT64 ArchAtomic64IncRet(Atomic64 *v)
{
INT64 val;
UINT32 status;
do {
__asm__ __volatile__("ldrexd %0, %H0, [%3]\n"
"adds %Q0, %Q0, #1\n"
"adc %R0, %R0, #0\n"
"strexd %1, %0, %H0, [%3]"
: "=&r"(val), "=&r"(status), "+m"(*v)
: "r"(v)
: "cc");
} while (__builtin_expect(status != 0, 0));
return val;
}
STATIC INLINE VOID ArchAtomic64Dec(Atomic64 *v)
{
INT64 val;
UINT32 status;
do {
__asm__ __volatile__("ldrexd %0, %H0, [%3]\n"
"subs %Q0, %Q0, #1\n"
"sbc %R0, %R0, #0\n"
"strexd %1, %0, %H0, [%3]"
: "=&r"(val), "=&r"(status), "+m"(*v)
: "r"(v)
: "cc");
} while (__builtin_expect(status != 0, 0));
}
STATIC INLINE INT64 ArchAtomic64DecRet(Atomic64 *v)
{
INT64 val;
UINT32 status;
do {
__asm__ __volatile__("ldrexd %0, %H0, [%3]\n"
"subs %Q0, %Q0, #1\n"
"sbc %R0, %R0, #0\n"
"strexd %1, %0, %H0, [%3]"
: "=&r"(val), "=&r"(status), "+m"(*v)
: "r"(v)
: "cc");
} while (__builtin_expect(status != 0, 0));
return val;
}
STATIC INLINE INT32 ArchAtomicXchg32bits(Atomic *v, INT32 val)
{
INT32 prevVal;
UINT32 status;
do {
__asm__ __volatile__("ldrex %0, [%3]\n"
"strex %1, %4, [%3]"
: "=&r"(prevVal), "=&r"(status), "+m"(*v)
: "r"(v), "r"(val)
: "cc");
} while (__builtin_expect(status != 0, 0));
return prevVal;
}
STATIC INLINE INT64 ArchAtomicXchg64bits(Atomic64 *v, INT64 val)
{
INT64 prevVal;
UINT32 status;
do {
__asm__ __volatile__("ldrexd %0, %H0, [%3]\n"
"strexd %1, %4, %H4, [%3]"
: "=&r"(prevVal), "=&r"(status), "+m"(*v)
: "r"(v), "r"(val)
: "cc");
} while (__builtin_expect(status != 0, 0));
return prevVal;
}
STATIC INLINE BOOL ArchAtomicCmpXchg32bits(Atomic *v, INT32 val, INT32 oldVal)
{
INT32 prevVal;
UINT32 status;
do {
__asm__ __volatile__("ldrex %0, [%3]\n"
"mov %1, #0\n"
"teq %0, %4\n"
"strexeq %1, %5, [%3]"
: "=&r"(prevVal), "=&r"(status), "+m"(*v)
: "r"(v), "r"(oldVal), "r"(val)
: "cc");
} while (__builtin_expect(status != 0, 0));
return prevVal != oldVal;
}
STATIC INLINE BOOL ArchAtomicCmpXchg64bits(Atomic64 *v, INT64 val, INT64 oldVal)
{
INT64 prevVal;
UINT32 status;
do {
__asm__ __volatile__("ldrexd %0, %H0, [%3]\n"
"mov %1, #0\n"
"teq %0, %4\n"
"teqeq %H0, %H4\n"
"strexdeq %1, %5, %H5, [%3]"
: "=&r"(prevVal), "=&r"(status), "+m"(*v)
: "r"(v), "r"(oldVal), "r"(val)
: "cc");
} while (__builtin_expect(status != 0, 0));
return prevVal != oldVal;
}
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
#endif /* _ARCH_ATOMIC_H */

@ -0,0 +1,61 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
* Description: Barrier HeadFile
* Author: Huawei LiteOS Team
* Create: 2020-01-14
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#ifndef _ARCH_BARRIER_H
#define _ARCH_BARRIER_H
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
#define SEV() __asm__ volatile("sev" : : : "memory")
#define WFE() __asm__ volatile("wfe" : : : "memory");
#define WFI() __asm__ volatile("wfi" : : : "memory");
#define DSB() __asm__ volatile("dsb" : : : "memory")
#define DMB() __asm__ volatile("dmb" : : : "memory")
#define ISB() __asm__ volatile("isb" : : : "memory")
#define BARRIER() __asm__ volatile("":::"memory")
/* Old Style APIs */
#define sev SEV
#define wfe WFE
#define wfi WFI
#define dsb DSB
#define dmb DMB
#define isb ISB
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
#endif /* _ARCH_BARRIER_H */

@ -0,0 +1,64 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
* Description: Cache operations HeadFile
* Author: Huawei LiteOS Team
* Create: 2020-01-14
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#ifndef _ARCH_CACHE_H
#define _ARCH_CACHE_H
#include "los_typedef.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
extern VOID ArchDCacheCleanByAddr(UINTPTR start, UINTPTR end);
extern VOID ArchDCacheInvByAddr(UINTPTR start, UINTPTR end);
/* Old Style APIs */
STATIC INLINE VOID flush_icache(VOID)
{
/*
* Use ICIALLUIS instead of ICIALLU. ICIALLUIS operates on all processors in the Inner
* shareable domain of the processor that performs the operation.
*/
__asm__ __volatile__ ("mcr p15, 0, %0, c7, c1, 0" : : "r" (0) : "memory");
}
STATIC INLINE VOID flush_dcache(UINTPTR start, UINTPTR end)
{
ArchDCacheCleanByAddr(start, end);
}
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
#endif /* _ARCH_CACHE_H */

@ -0,0 +1,79 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
* Description: Canary Defines Headfile
* Author: Huawei LiteOS Team
* Create: 2020-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
/**
* @defgroup canary
* @ingroup kernel
*/
#ifndef _ARCH_CANARY_H
#define _ARCH_CANARY_H
#include "los_typedef.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
#ifdef __GNUC__
extern UINTPTR __stack_chk_guard;
/**
* @ingroup cannary
* @brief Stack protector canaries value init.
*
* @par Description:
* This API is used to init canaries value __stack_chk_guard if the SP compiling options:
* -fstack-protector-strong or -fstack-protector-all is enabled.
*
* @attention
* <ul>
* <li>This API is a weak function, We recommend to implement true random number generator
* function for __stack_chk_guard value to replace it.</li>
* </ul>
*
* @param None.
*
* @retval None.
* @par Dependency:
* <ul><li>arch/sp.h: the header file that contains the API declaration.</li></ul>
* @see none
* @since Huawei LiteOS V200R005C00
*/
extern VOID ArchStackGuardInit(VOID);
#endif
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
#endif /* _ARCH_CANARY_H */

@ -0,0 +1,90 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2018-2020. All rights reserved.
* Description: CPU Operations HeadFile
* Author: Huawei LiteOS Team
* Create: 2018-08-21
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
/**
* @defgroup cpu
* @ingroup kernel
*/
#ifndef _ARCH_CPU_H
#define _ARCH_CPU_H
#include "los_typedef.h"
#include "arch/regs.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
typedef struct {
const UINT32 partNo;
const CHAR *cpuName;
} CpuVendor;
extern UINT64 g_cpuMap[];
#define CPU_MAP_GET(cpuid) g_cpuMap[(cpuid)]
#define CPU_MAP_SET(cpuid, hwid) (g_cpuMap[(cpuid)] = (hwid))
extern const CHAR *ArchCpuInfo(VOID);
STATIC INLINE UINT32 ArchCurrCpuid(VOID)
{
#ifdef LOSCFG_KERNEL_SMP
return ARM_SYSREG_READ(MPIDR) & MPIDR_CPUID_MASK;
#else
return 0;
#endif
}
STATIC INLINE UINT64 OsHwIDGet(VOID)
{
return ARM_SYSREG_READ(MPIDR);
}
STATIC INLINE UINT32 OsMainIDGet(VOID)
{
return ARM_SYSREG_READ(MIDR);
}
STATIC INLINE UINT32 ArchSPGet(VOID)
{
UINT32 val;
__asm__ __volatile__("mov %0, sp" : "=r"(val));
return val;
}
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
#endif /* _ARCH_CPU_H */

@ -0,0 +1,178 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2019-2020. All rights reserved.
* Description: Aarch32 Exception HeadFile
* Author: Huawei LiteOS Team
* Create: 2019-10-10
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#ifndef _ARCH_EXCEPTION_H
#define _ARCH_EXCEPTION_H
#include "arch/regs.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
/* Define exception type ID */
#define OS_EXCEPT_RESET 0x00
#define OS_EXCEPT_UNDEF_INSTR 0x01
#define OS_EXCEPT_SWI 0x02
#define OS_EXCEPT_PREFETCH_ABORT 0x03
#define OS_EXCEPT_DATA_ABORT 0x04
#define OS_EXCEPT_FIQ 0x05
#define OS_EXCEPT_ADDR_ABORT 0x06
#define OS_EXCEPT_IRQ 0x07
/* Define core num */
#ifdef LOSCFG_KERNEL_SMP
#define CORE_NUM LOSCFG_KERNEL_SMP_CORE_NUM
#else
#define CORE_NUM 1
#endif
/* Initial bit32 stack value. */
#define OS_STACK_INIT 0xCACACACA
/* Bit32 stack top magic number. */
#define OS_STACK_MAGIC_WORD 0xCCCCCCCC
#ifdef LOSCFG_GDB
#define OS_EXC_UNDEF_STACK_SIZE 512
#define OS_EXC_ABT_STACK_SIZE 512
#else
#define OS_EXC_UNDEF_STACK_SIZE 40
#define OS_EXC_ABT_STACK_SIZE 40
#endif
#define OS_EXC_FIQ_STACK_SIZE 64
#define OS_EXC_IRQ_STACK_SIZE 64
#define OS_EXC_SVC_STACK_SIZE 0x2000
#define OS_EXC_STACK_SIZE 0x1000
#ifndef __ASSEMBLER__
extern UINTPTR __fiq_stack_top;
extern UINTPTR __svc_stack_top;
extern UINTPTR __abt_stack_top;
extern UINTPTR __undef_stack_top;
extern UINTPTR __exc_stack_top;
extern UINTPTR __irq_stack_top;
extern UINTPTR __fiq_stack;
extern UINTPTR __svc_stack;
extern UINTPTR __abt_stack;
extern UINTPTR __undef_stack;
extern UINTPTR __exc_stack;
extern UINTPTR __irq_stack;
typedef struct {
UINT32 regCPSR; /**< Current program status register (CPSR) */
UINT32 R0; /**< Register R0 */
UINT32 R1; /**< Register R1 */
UINT32 R2; /**< Register R2 */
UINT32 R3; /**< Register R3 */
UINT32 R4; /**< Register R4 */
UINT32 R5; /**< Register R5 */
UINT32 R6; /**< Register R6 */
UINT32 R7; /**< Register R7 */
UINT32 R8; /**< Register R8 */
UINT32 R9; /**< Register R9 */
UINT32 R10; /**< Register R10 */
UINT32 R11; /**< Register R11 */
UINT32 R12; /**< Register R12 */
UINT32 SP; /**< Stack pointer */
UINT32 LR; /**< Program returning address. */
UINT32 PC; /**< PC pointer of the exceptional function */
} ExcContext;
typedef struct {
UINT16 phase; /**< Phase in which an exception occurs */
UINT16 type; /**< Exception type */
UINT16 nestCnt; /**< Count of nested exception */
UINT16 reserved; /**< Reserved for alignment */
ExcContext *context; /**< Hardware context when an exception occurs */
} ExcInfo;
#define ArchGetFp() ({ \
UINTPTR _regFp; \
__asm__ __volatile__("mov %0, fp" : "=r"(_regFp)); \
_regFp; \
})
typedef VOID (*EXC_PROC_FUNC)(UINT32, ExcContext*);
UINT32 ArchSetExcHook(EXC_PROC_FUNC excHook);
#define LOS_ExcRegHook ArchSetExcHook
STATIC INLINE VOID ArchHaltCpu(VOID)
{
__asm__ __volatile__("swi 0");
}
VOID ArchBackTraceWithSp(const VOID *stackPointer);
VOID ArchBackTrace(VOID);
VOID ArchExcInit(VOID);
UINT32 ArchBackTraceGet(UINTPTR fp, UINTPTR *callChain, UINT32 maxDepth);
STATIC INLINE UINT32 OsGetDFSR(VOID)
{
UINT32 regDFSR;
__asm__ __volatile__("mrc p15, 0, %0, c5, c0, 0"
: "=r"(regDFSR));
return regDFSR;
}
STATIC INLINE UINT32 OsGetIFSR(VOID)
{
UINT32 regIFSR;
__asm__ __volatile__("mrc p15, 0, %0, c5, c0, 1"
: "=r"(regIFSR));
return regIFSR;
}
STATIC INLINE UINT32 OsGetDFAR(VOID)
{
UINT32 regDFAR;
__asm__ __volatile__("mrc p15, 0, %0, c6, c0, 0"
: "=r"(regDFAR));
return regDFAR;
}
STATIC INLINE UINT32 OsGetIFAR(VOID)
{
UINT32 regIFAR;
__asm__ __volatile__("mrc p15, 0, %0, c6, c0, 2"
: "=r"(regIFAR));
return regIFAR;
}
#endif /* __ASSEMBLER__ */
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
#endif /* _ARCH_EXCEPTION_H */

@ -0,0 +1,120 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
* Description: Interrupt Operations HeadFile
* Author: Huawei LiteOS Team
* Create: 2020-01-14
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#ifndef _ARCH_INTERRUPT_H
#define _ARCH_INTERRUPT_H
#include "los_typedef.h"
#include "arch/regs.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
#define LOSCFG_ARM_ARCH __ARM_ARCH
/* CPU interrupt mask handle implementation */
#if LOSCFG_ARM_ARCH >= 6
STATIC INLINE UINT32 ArchIntLock(VOID)
{
UINT32 intSave;
__asm__ __volatile__(
"mrs %0, cpsr \n"
"cpsid if"
: "=r"(intSave)
:
: "memory");
return intSave;
}
STATIC INLINE UINT32 ArchIntUnlock(VOID)
{
UINT32 intSave;
__asm__ __volatile__(
"mrs %0, cpsr \n"
"cpsie if"
: "=r"(intSave)
:
: "memory");
return intSave;
}
#else
STATIC INLINE UINT32 ArchIntLock(VOID)
{
UINT32 intSave, temp;
__asm__ __volatile__(
"mrs %0, cpsr \n"
"orr %1, %0, #0xc0 \n"
"msr cpsr_c, %1"
: "=r"(intSave), "=r"(temp)
:
: "memory");
return intSave;
}
STATIC INLINE UINT32 ArchIntUnlock(VOID)
{
UINT32 intSave;
__asm__ __volatile__(
"mrs %0, cpsr \n"
"bic %0, %0, #0xc0 \n"
"msr cpsr_c, %0"
: "=r"(intSave)
:
: "memory");
return intSave;
}
#endif
STATIC INLINE VOID ArchIntRestore(UINT32 intSave)
{
__asm__ __volatile__("msr cpsr_c, %0" : :"r"(intSave) :"memory");
}
STATIC INLINE UINT32 ArchIntLocked(VOID)
{
UINT32 intSave;
__asm__ __volatile__("mrs %0, cpsr" :"=r" (intSave) : :"memory", "cc");
return intSave & PSR_I_BIT;
}
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
#endif /* _ARCH_INTERRUPT_H */

@ -0,0 +1,475 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: ARMv7 Mmu HeadFile
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
/**
* @defgroup mmu
* @ingroup kernel
*/
#ifndef _ARCH_MMU_H
#define _ARCH_MMU_H
#include "los_typedef.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
#define UNCACHEABLE 0
#define CACHEABLE 1
#define UNBUFFERABLE 0
#define BUFFERABLE 1
#define EXECUTABLE 0
#define NON_EXECUTABLE 1
#define ACCESS_RW 3U /* ap = 0 ap1 = 0b11 */
#define ACCESS_RO 7U /* ap = 1 ap1 = 0b11 */
#define ACCESS_NA 0U /* ap = 0 ap1 = 0 */
#define D_MANAGER 0
#define D_CLIENT 1
#define D_NA 2
#define DOMAIN0 0
#define DOMAIN1 1
#define DOMAIN2 2
#define MMU_AP_STATE(flag) (((flag) & 0x1U) ? ACCESS_RW : ACCESS_RO)
#define MMU_CACHE_STATE(flag) (((flag) >> 1) & 0x1U)
#define MMU_BUFFER_STATE(flag) (((flag) >> 2) & 0x1U)
#define MMU_EXECUTE_STATE(flag) (((flag) >> 3) & 0x1U)
#define MMU_GET_AREA(flag) ((flag) & (0x1U << 4))
#define MMU_DESC_LEN 4
#define MMU_DESC_OFFSET(vBase) ((vBase) << 2)
/**
* @ingroup mmu
* The access permission mode is read-only.
*/
#define ACCESS_PERM_RO_RO 0
/**
* @ingroup mmu
* The access permission mode is read and write.
*/
#define ACCESS_PERM_RW_RW (1U << 0)
/**
* @ingroup mmu
* The cache enabled.
*/
#define CACHE_ENABLE (1U << 1)
/**
* @ingroup mmu
* The cache disabled.
*/
#define CACHE_DISABLE 0
/**
* @ingroup mmu
* The buffer enabled.
*/
#define BUFFER_ENABLE (1U << 2)
/**
* @ingroup mmu
* The buffer disabled.
*/
#define BUFFER_DISABLE 0
/**
* @ingroup mmu
* Set it non-executable.
*/
#define EXEC_DISABLE (1U << 3)
/**
* @ingroup mmu
* Set it executable.
*/
#define EXEC_ENABLE 0
/**
* @ingroup mmu
* The first section(1M/item).
*/
#define FIRST_SECTION (1U << 4)
/**
* @ingroup mmu
* The second page(4K/item).
*/
#define SECOND_PAGE 0
#ifdef LOSCFG_KERNEL_SMP
#define MMU_SHAREABLE 1
#else
#define MMU_SHAREABLE 0
#endif
#define MMU_1K 0x400U
#define MMU_4K 0x1000U
#define MMU_16K 0x4000U
#define MMU_64K 0x10000U
#define MMU_1M 0x100000U
#define MMU_4G 0x100000000ULL
#define SHIFT_1K 10
#define SHIFT_4K 12
#define SHIFT_16K 14
#define SHIFT_64K 16
#define SHIFT_1M 20
#define SHIFT_2M 21
#define SHIFT_1G 30
/**
* @ingroup mmu
* mmu second page information structure.
*
*/
typedef struct {
UINT32 page_addr; /* The second page start addr */
UINT32 page_length; /* The second page length */
/*
* The second page page table storage addr,
* diff second page table can't be coincided
*/
UINT32 page_descriptor_addr;
/*
* The second page type, it can be set
* small page ID(4K) : MMU_SECOND_LEVEL_SMALL_PAGE_TABLE_ID
* or big page ID(64K) : MMU_SECOND_LEVEL_BIG_PAGE_TABLE_ID
*/
UINT32 page_type;
} SENCOND_PAGE;
/**
* @ingroup mmu
* mmu param setting information structure.
*
*/
typedef struct {
UINT32 startAddr; /* Starting address of a section. */
UINT32 endAddr; /* Ending address of a section. */
/*
* Mode set.
* bit0: ACCESS_PERM_RW_RW/ACCESS_PERM_RO_RO(1/0)
* bit1: CACHE_ENABLE/CACHE_DISABLE(1/0)
* bit2: BUFFER_ENABLE/BUFFER_DISABLE(1/0)
* bit3: EXEC_DISENABLE/EXEC_ENABLE(1/0)
* bit4: FIRST_SECTION/SECOND_PAGE(1/0)
* bit5~7: ignore
*/
UINT32 uwFlag;
/*
* the goal object of second page,
* if uwFlag is FIRST_SECTION, stPage will be ignored, and you can set this member as NULL
*/
SENCOND_PAGE *stPage;
} MMU_PARAM;
/* ARM Domain Access Control Bit Masks */
#define ACCESS_TYPE_NO_ACCESS(domainNum) (0x0U << ((domainNum) << 1))
#define ACCESS_TYPE_CLIENT(domainNum) (0x1U << ((domainNum) << 1))
#define ACCESS_TYPE_MANAGER(domainNum) (0x3U << ((domainNum) << 1))
#define MMU_FIRST_LEVEL_FAULT_ID 0x0
#define MMU_FIRST_LEVEL_PAGE_TABLE_ID 0x1
#define MMU_FIRST_LEVEL_SECTION_ID 0x2
#define MMU_FIRST_LEVEL_RESERVED_ID 0x3
/**
* @ingroup mmu
* The second page type select 64K
*/
#define MMU_SECOND_LEVEL_BIG_PAGE_TABLE_ID 0x1U
/**
* @ingroup mmu
* The second page type select 4K
*/
#define MMU_SECOND_LEVEL_SMALL_PAGE_TABLE_ID 0x2U
struct MMUFirstLevelFault {
UINT32 id : 2; /* [1 : 0] */
UINT32 sbz : 30; /* [31 : 2] */
};
struct MMUFirstLevelPageTable {
UINT32 id : 2; /* [1 : 0] */
UINT32 pxn : 1; /* [2] */
UINT32 ns : 1; /* [3] */
UINT32 sbz : 1; /* [4] */
UINT32 domain : 4; /* [8 : 5] */
UINT32 imp : 1; /* [9] */
UINT32 baseAddress : 22; /* [31 : 10] */
};
struct MMUSecondLevelBigPageTable {
UINT32 id : 2; /* [1 : 0] */
UINT32 b : 1; /* [2] */
UINT32 c : 1; /* [3] */
UINT32 ap1 : 2; /* [5 : 4] */
UINT32 sbz : 3; /* [8 : 6] */
UINT32 ap : 1; /* [9] */
UINT32 s : 1; /* [10] */
UINT32 ng : 1; /* [11] */
UINT32 tex : 3; /* [14 : 12] */
UINT32 xn : 1; /* [15] */
UINT32 baseAddress : 16; /* [31 : 16] */
};
struct MMUSecondLevelSmallPageTable {
UINT32 xn : 1; /* [0] */
UINT32 id : 1; /* [1] */
UINT32 b : 1; /* [2] */
UINT32 c : 1; /* [3] */
UINT32 ap1 : 2; /* [5 : 4] */
UINT32 tex : 3; /* [8 : 6] */
UINT32 ap : 1; /* [9] */
UINT32 s : 1; /* [10] */
UINT32 ng : 1; /* [11] */
UINT32 baseAddress : 20; /* [31 : 12] */
};
struct MMUFirstLevelSection {
UINT32 id : 2; /* [1 : 0] */
UINT32 b : 1; /* [2] */
UINT32 c : 1; /* [3] */
UINT32 xn : 1; /* [4] */
UINT32 domain : 4; /* [8 : 5] */
UINT32 imp : 1; /* [9] */
UINT32 ap1 : 2; /* [11 : 10] */
UINT32 tex : 3; /* [14 : 12] */
UINT32 ap : 1; /* [15] */
UINT32 s : 1; /* [16] */
UINT32 ng : 1; /* [17] */
UINT32 revs : 1; /* [18] */
UINT32 ns : 1; /* [19] */
UINT32 baseAddress : 12; /* [31 : 20] */
};
struct MMUFirstLevelReserved {
UINT32 id : 2; /* [1 : 0] */
UINT32 sbz : 30; /* [31 : 2] */
};
#define X_MMU_SET_AP_ALL(item, access) do { \
(item).ap1 = (access) & 0x3U; \
(item).ap = (access) >> 2; \
} while (0)
#define X_MMU_SET_BCX(item, buff, cache, exeNever) do { \
(item).b = (buff); \
(item).c = (cache); \
(item).xn = (exeNever); \
} while (0)
#define X_MMU_CHG_DESC(aBase, vBase, size, baseAddress, tableBase) do { \
UINT32 i, j, k; \
k = (tableBase) + MMU_DESC_OFFSET(vBase); \
for (j = (aBase), i = 0; i < (size); ++i, ++j, k += MMU_DESC_LEN) { \
(baseAddress) = j; \
*(UINTPTR *)(UINTPTR)k = desc.word; \
} \
} while (0)
#define X_MMU_CHG_DESC_64K(aBase, vBase, size, baseAddress, tableBase) do { \
UINT32 i, j, k, n; \
k = (tableBase) + MMU_DESC_OFFSET(vBase); \
for (j = (aBase), i = 0; i < (size); ++i, ++j) { \
(baseAddress) = j; \
for (n = 0; n < (MMU_64K / MMU_4K); ++n, k += MMU_DESC_LEN) { \
*(UINTPTR *)(UINTPTR)k = desc.word; \
} \
} \
} while (0)
#define SECTION_CHANGE(item, cache, buff, access, exeNever) do { \
union MMUFirstLevelDescriptor desc; \
desc.word = (*(UINTPTR *)(item)); \
desc.section.s = (MMU_SHAREABLE); \
X_MMU_SET_BCX(desc.section, (buff), (cache), (exeNever)); \
X_MMU_SET_AP_ALL(desc.section, (access)); \
(*(UINTPTR *)(UINTPTR)(item)) = desc.word; \
} while (0)
#define X_MMU_SECTION(aBase, vBase, size, cache, buff, access, exeNever, sdomain) do { \
union MMUFirstLevelDescriptor desc = { .word = 0 }; \
desc.section.id = MMU_FIRST_LEVEL_SECTION_ID; \
desc.section.s = (MMU_SHAREABLE); \
desc.section.domain = (sdomain); \
X_MMU_SET_BCX(desc.section, (buff), (cache), (exeNever)); \
X_MMU_SET_AP_ALL(desc.section, (access)); \
X_MMU_CHG_DESC(aBase, vBase, size, desc.section.baseAddress, ttbBase); \
} while (0)
#define X_MMU_ONE_LEVEL_PAGE(aBase, vBase, size, sdomain) do { \
union MMUFirstLevelDescriptor desc = { .word = 0 }; \
desc.pageTable.id = MMU_FIRST_LEVEL_PAGE_TABLE_ID; \
desc.pageTable.domain = (sdomain); \
X_MMU_CHG_DESC(aBase, vBase, size, desc.pageTable.baseAddress, ttbBase); \
} while (0)
#define X_MMU_TWO_LEVEL_PAGE(aBase, vBase, size, cache, buff, access, exeNever) do { \
union MMUFirstLevelDescriptor desc = { .word = 0 }; \
desc.smallPageTable.id = MMU_SECOND_LEVEL_SMALL_PAGE_TABLE_ID >> 1; \
desc.smallPageTable.s = (MMU_SHAREABLE); \
X_MMU_SET_BCX(desc.smallPageTable, (buff), (cache), (exeNever)); \
X_MMU_SET_AP_ALL(desc.smallPageTable, (access)); \
X_MMU_CHG_DESC(aBase, vBase, size, desc.smallPageTable.baseAddress, sttBase); \
} while (0)
#define X_MMU_TWO_LEVEL_PAGE64K(aBase, vBase, size, cache, buff, access, exeNever) do { \
union MMUFirstLevelDescriptor desc = { .word = 0 }; \
desc.bigPageTable.id = MMU_SECOND_LEVEL_BIG_PAGE_TABLE_ID; \
desc.bigPageTable.s = (MMU_SHAREABLE); \
X_MMU_SET_BCX(desc.bigPageTable, (buff), (cache), (exeNever)); \
X_MMU_SET_AP_ALL(desc.bigPageTable, (access)); \
X_MMU_CHG_DESC_64K(aBase, vBase, size, desc.bigPageTable.baseAddress, sttBase); \
} while (0)
union MMUFirstLevelDescriptor {
UINTPTR word;
struct MMUFirstLevelFault fault;
struct MMUFirstLevelPageTable pageTable;
struct MMUFirstLevelSection section;
struct MMUFirstLevelReserved reserved;
struct MMUSecondLevelSmallPageTable smallPageTable;
struct MMUSecondLevelBigPageTable bigPageTable;
};
STATIC INLINE VOID EnableAPCheck(VOID)
{
UINT32 regDACR = ACCESS_TYPE_MANAGER(0) |
ACCESS_TYPE_CLIENT(1);
__asm volatile("mcr p15, 0, %0, c3, c0, 0"
:
: "r"(regDACR));
}
STATIC INLINE VOID DisableAPCheck(VOID)
{
UINT32 regDACR = ACCESS_TYPE_MANAGER(0) |
ACCESS_TYPE_MANAGER(1);
__asm volatile("mcr p15, 0, %0, c3, c0, 0"
:
: "r"(regDACR));
}
STATIC INLINE VOID CleanTLB(VOID)
{
/* replace TLBIALL with TLBIALLIS for multi-core sync */
__asm volatile("mov %0, #0\n"
"mcr p15, 0, %0, c8, c3, 0\n"
:
: "r"(0));
}
/* Second page struct for OS and App */
extern SENCOND_PAGE g_mmuOsPage;
extern SENCOND_PAGE g_mmuAppPage;
#ifdef LOSCFG_KERNEL_DYNLOAD
extern SENCOND_PAGE g_mmuDlPage;
#endif
/**
* @ingroup mmu
* @brief Memory Management Unit Second page memory map.
*
* @par Description:
* This API is used to set the second page memory map of a section that is specified by
* a starting address and ending address.
* @attention
* <ul>
* <li>The passed-in starting address and ending address must be aligned on a boundary of 1M.
* The access permission mode can be only set to ACCESS_PERM_RO_RO and ACCESS_PERM_RW_RW.
* </li>
* </ul>
*
* @param SENCOND_PAGE [IN] param for second page enable, the struct contains below members:
* page_addr: Starting address of the section.
* page_length: Length of the section.
* page_descriptor_addr: second page descriptor address for the section.
* page_type: The second page type, MMU_SECOND_LEVEL_SMALL_PAGE_TABLE_ID
* or MMU_SECOND_LEVEL_BIG_PAGE_TABLE_ID.
* @param flag [IN] param for second page enable, the mode of the section.
*
* @retval None.
* @par Dependency:
* <ul><li>mmu.h: the header file that contains the API declaration.</li></ul>
* @see
* @since Huawei LiteOS V100R001C00
*/
VOID ArchSecPageEnable(SENCOND_PAGE *page, UINT32 flag);
/**
* @ingroup mmu
* @brief Memory Management Unit Cache/Buffer/Access Permission Setting.
*
* @par Description:
* This API is used to set the Cache/Buffer/access permission mode of a section that is specified by
* a starting address and ending address.
* @attention
* <ul>
* <li>The passed-in starting address and ending address must be aligned on a boundary of 4K or 1M.
* The access permission mode can be only set to ACCESS_PERM_RO_RO and ACCESS_PERM_RW_RW.
* </li>
* </ul>
*
* @param MMU_PARAM [IN] param for mmu setting, the struct contains below members.
* startAddr: Starting address of a section.
* endAddr: Ending address of a section.
* uwFlag: Mode set. There are three func could be controlled with three bit.
* bit0: ACCESS_PERM_RW_RW/ACCESS_PERM_RO_RO(1/0).
* bit1: CACHE_ENABLE/CACHE_DISABLE(1/0).
* bit2: BUFFER_ENABLE/BUFFER_DISABLE(1/0).
* bit3: FIRST_SECTION/SECOND_PAGE(1/0) it need comfire your memory type, be descripted.
* bit4~7: ignore.
* stPage: The goal object of second page, if uwFlag bit3 is FIRST_SECTION,
* stPage will be ignored, and you can set this member as NULL.
*
* @retval None.
* @par Dependency:
* <ul><li>mmu.h: the header file that contains the API declaration.</li></ul>
* @see
* @since Huawei LiteOS V100R001C00
*/
VOID ArchMMUParamSet(MMU_PARAM *mPara);
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
#endif /* _ARCH_MMU_H */

@ -0,0 +1,50 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2019-2020. All rights reserved.
* Description: Arm-a Mmu Inner HeadFile
* Author: Huawei LiteOS Team
* Create: 2019-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#ifndef _ARCH_MMU_PRI_H
#define _ARCH_MMU_PRI_H
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
extern VOID OsAppSecPteInit(UINTPTR startAddr, UINTPTR len, UINT32 flag, UINT32 pageType);
extern VOID OsSysSecPteInit(VOID);
extern VOID OsNoCachedRemap(UINTPTR physAddr, size_t size);
extern VOID OsCachedRemap(UINTPTR physAddr, size_t size);
extern VOID ArchCodeProtect(VOID);
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
#endif /* _ARCH_MMU_PRI_H */

@ -0,0 +1,58 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
* Description: LiteOS Arch Perf HeadFile
* Author: Huawei LiteOS Team
* Create: 2020-09-10
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#ifndef _PERF_H
#define _PERF_H
#include "los_typedef.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
#define OsPerfArchFetchCallerRegs(regs) \
do { \
(regs)->pc = (UINTPTR)__builtin_return_address(0); \
(regs)->fp = (UINTPTR)__builtin_frame_address(0); \
} while (0)
#define OsPerfArchFetchIrqRegs(regs, tcb) \
do { \
(regs)->pc = (tcb)->pc; \
(regs)->fp = (tcb)->fp; \
} while (0)
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
#endif /* _PERF_H */

@ -0,0 +1,175 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
* Description: CPU Register Defines Headfile
* Author: Huawei LiteOS Team
* Create: 2020-01-14
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
/**
* @defgroup sys reg
* @ingroup kernel
*/
#ifndef _ARCH_REGS_H
#define _ARCH_REGS_H
#ifndef __ASSEMBLER__
#include "arch/barrier.h"
#include "los_typedef.h"
#endif /* __ASSEMBLER__ */
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
#ifndef __ASSEMBLER__
#define ARM_SYSREG_READ(REG) \
({ \
UINT32 _val; \
__asm__ volatile("mrc " REG : "=r" (_val)); \
_val; \
})
#define ARM_SYSREG_WRITE(REG, val) \
({ \
__asm__ volatile("mcr " REG :: "r" (val)); \
ISB(); \
})
#define ARM_SYSREG64_READ(REG) \
({ \
UINT64 _val; \
__asm__ volatile("mrrc " REG : "=r" (_val)); \
_val; \
})
#define ARM_SYSREG64_WRITE(REG, val) \
({ \
__asm__ volatile("mcrr " REG :: "r" (val)); \
ISB(); \
})
#define CP14_REG(CRn, Op1, CRm, Op2) "p14, "#Op1", %0, "#CRn","#CRm","#Op2
#define CP15_REG(CRn, Op1, CRm, Op2) "p15, "#Op1", %0, "#CRn","#CRm","#Op2
#define CP15_REG64(CRn, Op1) "p15, "#Op1", %0, %H0,"#CRn
/*
* Identification registers (c0)
*/
#define MIDR CP15_REG(c0, 0, c0, 0) /* Main ID Register */
#define MPIDR CP15_REG(c0, 0, c0, 5) /* Multiprocessor Affinity Register */
#define CCSIDR CP15_REG(c0, 1, c0, 0) /* Cache Size ID Registers */
#define CLIDR CP15_REG(c0, 1, c0, 1) /* Cache Level ID Register */
#define VPIDR CP15_REG(c0, 4, c0, 0) /* Virtualization Processor ID Register */
#define VMPIDR CP15_REG(c0, 4, c0, 5) /* Virtualization Multiprocessor ID Register */
/*
* System control registers (c1)
*/
#define SCTLR CP15_REG(c1, 0, c0, 0) /* System Control Register */
#define ACTLR CP15_REG(c1, 0, c0, 1) /* Auxiliary Control Register */
#define CPACR CP15_REG(c1, 0, c0, 2) /* Coprocessor Access Control Register */
/*
* Memory protection and control registers (c2 & c3)
*/
#define TTBR0 CP15_REG(c2, 0, c0, 0) /* Translation Table Base Register 0 */
#define TTBR1 CP15_REG(c2, 0, c0, 1) /* Translation Table Base Register 1 */
#define TTBCR CP15_REG(c2, 0, c0, 2) /* Translation Table Base Control Register */
#define DACR CP15_REG(c3, 0, c0, 0) /* Domain Access Control Register */
/*
* Memory system fault registers (c5 & c6)
*/
#define DFSR CP15_REG(c5, 0, c0, 0) /* Data Fault Status Register */
#define IFSR CP15_REG(c5, 0, c0, 1) /* Instruction Fault Status Register */
#define DFAR CP15_REG(c6, 0, c0, 0) /* Data Fault Address Register */
#define IFAR CP15_REG(c6, 0, c0, 2) /* Instruction Fault Address Register */
/*
* Cache maintenance, address translation, and other functions
*/
#define ICIALLUIS CP15_REG(c7, 0, c1, 0) /* Instruction Cache Invalidate All to PoU,
Inner Shareable */
#define BPIALLIS CP15_REG(c7, 0, c1, 6) /* Branch Predictor Invalidate All,
Inner Shareable */
#define ICIALLU CP15_REG(c7, 0, c5, 0) /* Instruction Cache Invalidate All to PoU */
#define ICIMVAU CP15_REG(c7, 0, c5, 1) /* Instruction Cache Invalidate by MVA to PoU */
#define BPIALL CP15_REG(c7, 0, c5, 6) /* Branch Predictor Invalidate All */
#define BPIMVA CP15_REG(c7, 0, c5, 7) /* Branch Predictor Invalidate by MVA */
#define DCIMVAC CP15_REG(c7, 0, c6, 1) /* Data Cache Invalidate by MVA to PoC */
#define DCISW CP15_REG(c7, 0, c6, 2) /* Data Cache Invalidate by Set/Way */
#define DCCMVAC CP15_REG(c7, 0, c10, 1) /* Data Cache Clean by MVA to PoC */
#define DCCSW CP15_REG(c7, 0, c10, 2) /* Data Cache Clean by Set/Way */
#define DCCMVAU CP15_REG(c7, 0, c11, 1) /* Data Cache Clean by MVA to PoU */
#define DCCIMVAC CP15_REG(c7, 0, c14, 1) /* Data Cache Clean and Invalidate by MVA to PoC */
#define DCCISW CP15_REG(c7, 0, c14, 2) /* Data Cache Clean and Invalidate by Set/Way */
/*
* Process, context and thread ID registers (c13)
*/
#define FCSEIDR CP15_REG(c13, 0, c0, 0) /* FCSE Process ID Register */
#define CONTEXTIDR CP15_REG(c13, 0, c0, 1) /* Context ID Register */
#define TPIDRURW CP15_REG(c13, 0, c0, 2) /* User Read/Write Thread ID Register */
#define TPIDRURO CP15_REG(c13, 0, c0, 3) /* User Read-Only Thread ID Register */
#define TPIDRPRW CP15_REG(c13, 0, c0, 4) /* PL1 only Thread ID Register */
#endif /* __ASSEMBLER__ */
#define PSR_F_BIT 0x00000040u
#define PSR_I_BIT 0x00000080u
#define PSR_A_BIT 0x00000100u
#define PSR_T_ARM 0x00000000u
#define PSR_T_THUMB 0x00000020u
#define CPSR_USR_MODE 0x00000010u
#define CPSR_FIQ_MODE 0x00000011u
#define CPSR_IRQ_MODE 0x00000012u
#define CPSR_SVC_MODE 0x00000013u
#define CPSR_ABT_MODE 0x00000017u
#define CPSR_UNDEF_MODE 0x0000001Bu
#define CPSR_HYP_MODE 0x0000001Au
#define CPSR_MODE_MASK 0x1F
#define CPSR_IRQ_DISABLE PSR_I_BIT /* IRQ disabled when =1 */
#define CPSR_FIQ_DISABLE PSR_F_BIT /* FIQ disabled when =1 */
#define CPSR_INT_DISABLE (CPSR_IRQ_DISABLE | CPSR_FIQ_DISABLE)
#define PSR_MODE_SVC_ARM (CPSR_SVC_MODE | PSR_T_ARM | CPSR_INT_DISABLE)
#define PSR_MODE_SVC_THUMB (CPSR_SVC_MODE | PSR_T_THUMB | CPSR_INT_DISABLE)
/* MPIDR field defines */
#define MPIDR_CPUID_MASK 0x000000FFu
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
#endif /* _ARCH_REGS_H */

@ -0,0 +1,50 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
* Description: Spinlock Low Level Impelmentations Headfile
* Author: Huawei LiteOS Team
* Create: 2020-01-14
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#ifndef _ARCH_SPINLOCK_H
#define _ARCH_SPINLOCK_H
#include "los_typedef.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
extern VOID ArchSpinLock(size_t *lock);
extern VOID ArchSpinUnlock(size_t *lock);
extern INT32 ArchSpinTrylock(size_t *lock);
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
#endif /* _ARCH_SPINLOCK_H */

@ -0,0 +1,94 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
* Description: Task Low Level Impelmentations Headfile
* Author: Huawei LiteOS Team
* Create: 2020-01-14
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#ifndef _ARCH_TASK_H
#define _ARCH_TASK_H
#include "los_typedef.h"
#include "arch/cpu.h"
#include "arch/regs.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
#define LOSCFG_STACK_POINT_ALIGN_SIZE (sizeof(UINTPTR) * 2)
#if defined(LOSCFG_ARCH_FPU_VFP_D16)
#define FP_REGS_NUM 16
#elif defined (LOSCFG_ARCH_FPU_VFP_D32)
#define FP_REGS_NUM 32
#endif
#define GEN_REGS_NUM 13
/* The size of this structure must be smaller than or equal to the size specified by OS_TSK_STACK_ALIGN (16 bytes). */
typedef struct {
#ifdef LOSCFG_ARCH_FPU_ENABLE
UINT64 D[FP_REGS_NUM]; /* D0-D31 */
UINT32 regFPSCR; /* FPSCR */
UINT32 regFPEXC; /* FPEXC */
#endif
UINT32 R[GEN_REGS_NUM]; /* R0-R12 */
UINT32 LR; /* R14 */
UINT32 PC; /* R15 */
UINT32 regPSR;
} TaskContext;
STATIC INLINE VOID *ArchCurrTaskGet(VOID)
{
return (VOID *)(UINTPTR)ARM_SYSREG_READ(TPIDRPRW);
}
STATIC INLINE VOID ArchCurrTaskSet(const VOID *val)
{
ARM_SYSREG_WRITE(TPIDRPRW, (UINT32)(UINTPTR)val);
}
STATIC INLINE UINTPTR ArchGetTaskFp(const VOID *stackPointer)
{
return ((TaskContext *)(stackPointer))->R[11]; /* R11: FP */
}
/*
* Description : task stack initialization
* Input : taskId -- task ID
* stackSize -- task stack size
* topStack -- stack top of task (low address)
* Return : pointer to the task context
*/
extern VOID *OsTaskStackInit(UINT32 taskId, UINT32 stackSize, VOID *topStack);
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
#endif /* _ARCH_TASK_H */

@ -0,0 +1,13 @@
include $(LITEOSTOPDIR)/config.mk
ARFLAGS = cr
all:
mkdir -p $(OUT)/lib
cp -rf $(LITEOS_CPU_TYPE)/*.a $(OUT)/lib
clean:
rm -rf $(OUT)/lib/lib$(LITEOS_CPU_TYPE).a
.PHONY: all clean

@ -0,0 +1,80 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: cache
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#include "arch/asm.h"
.fpu vfpv4
#if defined(LOSCFG_ARCH_ARM_V7A)
.arch armv7a
#elif defined(LOSCFG_ARCH_ARM_V7R)
.arch armv7r
#endif
.macro DCACHE_LINE_SIZE, reg, tmp
mrc p15, 0, \tmp, c0, c0, 1
lsr \tmp, \tmp, #16
and \tmp, \tmp, #0xf
mov \reg, #4
mov \reg, \reg, lsl \tmp
.endm
FUNCTION(ArchDCacheInvByAddr)
push {r2, r3}
DCACHE_LINE_SIZE r2, r3
sub r3, r2, #1
tst r0, r3
bic r0, r0, r3
mcrne p15, 0, r0, c7, c14, 1
tst r1, r3
bic r1, r1, r3
mcrne p15, 0, r1, c7, c14, 1
1:
mcr p15, 0, r0, c7, c6, 1
add r0, r0, r2
cmp r0, r1
blo 1b
dsb
pop {r2, r3}
mov pc, lr
FUNCTION(ArchDCacheCleanByAddr)
push {r2, r3}
DCACHE_LINE_SIZE r2, r3
sub r3, r2, #1
bic r0, r0, r3
1:
mcr p15, 0, r0, c7, c10, 1
add r0, r0, r2
cmp r0, r1
blo 1b
dsb
pop {r2, r3}
mov pc, lr

@ -0,0 +1,112 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: mmu
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
.global enable_mmu
.fpu vfpv4
.arch armv7a
.macro INVALIDATE_DCACHE_ALL
/*
* Invalidate Data cache
* to make the code general purpose, we calculate the
* cache size first and loop through each set + way
*/
mrc p15, 1, r0, c0, c0, 0 /* Read Cache Size ID */
mov r3, #0x1ff
and r0, r3, r0, lsr #13 /* r0 = no. of sets - 1 */
mov r1, #0 /* r1 = way counter way_loop */
way_loop:
mov r3, #0 /* r3 = set counter set_loop */
set_loop:
mov r2, r1, lsl #30
orr r2, r3, lsl #5 /* r2 = set/way cache operation format */
mcr p15, 0, r2, c7, c6, 2 /* Invalidate line described by r2 */
add r3, r3, #1 /* Increment set counter */
cmp r0, r3 /* Last set reached yet */
bgt set_loop /* if not, iterate set_loop */
add r1, r1, #1 /* else, next */
cmp r1, #4 /* Last way reached yet */
bne way_loop /* if not, iterate way_loop */
mcr p15, 0, r1, c8, c7, 0 /* Invalidate TLB */
.endm
enable_mmu:
push {lr}
mrc p15, 0, r0, c1, c0, 0
bic r0, r0, #0x1000 /* disable ICache [SCTLR:bit 12 set as 0] */
bic r0, r0, #0x000f /* disable DCache, write buffer */
mcr p15, 0, r0, c1, c0, 0
ISB
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 /* Invalidate L1 Caches Instruction cache */
INVALIDATE_DCACHE_ALL
mov r0, #0 /* TTBCR, Translation Table Base Control Register, select TTBR0 */
mcr p15, 0, r0, c2, c0, 2
ISB
/* Set the TTB register */
ldr r0, =g_firstPageTable
mcr p15, 0, r0, c2, c0, 0
/* Set the Domain Access Control Register */
ldr r0, =((0x3U << 0) | (0x1U << 2)) /* ACCESS_TYPE_MANAGER(DOMAIN0) | ACCESS_TYPE_CLIENT(DOMAIN1); */
mcr p15, 0, r0, c3, c0, 0
mrc p15, 0, r0, c1, c0, 1 /* ACTLR, Auxiliary Control Register, IMPLEMENTATION DEFINED */
#if defined(LOSCFG_ARCH_CORTEX_A7) || defined(LOSCFG_ARCH_CORTEX_A9) || defined(LOSCFG_ARCH_CORTEX_A17)
orr r0, r0, #(1U << 6) /* SMP, Enables coherent requests to the processor. */
#endif
#if defined(LOSCFG_ARCH_CORTEX_A9) || defined(LOSCFG_ARCH_CORTEX_A17)
orr r0, r0, #(1U << 2) /* Enable D-side prefetch */
#endif
mcr p15, 0, r0, c1, c0, 1 /* ACTLR, Auxiliary Control Register */
ldr r2, =10f
DSB
mrc p15, 0, r0, c1, c0, 0 /* SCTLR, System Control Register */
bic r0, #((1U << 29) | (1U << 28)) /* Disable TRE/AFE */
orr r0, r0, #(1U << 12) /* enable ICache */
orr r0, r0, #(1U << 2) /* Dcache enable */
orr r0, r0, #(1U << 11) /* Global branch prediction enable bit */
orr r0, r0, #(1U << 8) /* 'S' bit */
orr r0, r0, #(1U << 5) /* CP15BEN bit */
orr r0, r0, #1 /* mmu enable */
mcr p15, 0, r0, c1, c0, 0 /* SCTLR, System Control Register */
ISB
mov pc, r2
nop
nop
nop
10:
pop {pc}

@ -0,0 +1,78 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
* Description: Aach32 Canary
* Author: Huawei LiteOS Team
* Create: 2020-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#include "arch/canary.h"
#include "arch/regs.h"
#include "stdlib.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
#ifdef __GNUC__
/* stack protector */
UINTPTR __stack_chk_guard = 0x000a0dff;
#ifdef LOSCFG_ARCH_CORTEX_A7
STATIC UINT64 ArchGetTimerCnt(VOID)
{
UINT64 cntpct;
cntpct = ARM_SYSREG64_READ(CP15_REG64(c14, 0));
return cntpct;
}
#endif
/*
* If the SP compiling options:-fstack-protector-strong or -fstack-protector-all is enabled,
* We recommend to implement true random number generator function for __stack_chk_guard
* value to replace the function implementation template shown as below.
*/
#pragma GCC push_options
#pragma GCC optimize ("-fno-stack-protector")
LITE_OS_SEC_TEXT_INIT WEAK VOID ArchStackGuardInit(VOID)
{
#ifdef LOSCFG_ARCH_CORTEX_A7
int rnd;
UINT64 seed;
seed = ArchGetTimerCnt();
srand((unsigned int)seed);
rnd = rand();
__stack_chk_guard = (UINTPTR)rnd;
#endif
}
#pragma GCC pop_options
#endif
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */

@ -0,0 +1,72 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: Aarch32 Hw Task Implementation
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#include "los_config.h"
#include "arch/cpu.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
/* support cpu vendors */
CpuVendor g_cpuTable[] = {
/* armv7-a */
{ 0xc07, "Cortex-A7" },
{ 0xc09, "Cortex-A9" },
/* armv7-r */
{ 0xc18, "Cortex-R8" },
{ 0xd03, "Cortex-A53" },
};
/* logical cpu mapping */
UINT64 g_cpuMap[LOSCFG_KERNEL_CORE_NUM] = {
[0 ... LOSCFG_KERNEL_CORE_NUM - 1] = (UINT64)(-1)
};
const CHAR *ArchCpuInfo(VOID)
{
UINT32 midr = OsMainIDGet();
/* [15:4] is the primary part number */
UINT32 partNo = (midr & 0xFFF0) >> 0x4;
for (UINT32 i = 0; i < (sizeof(g_cpuTable) / sizeof(CpuVendor)); i++) {
if (partNo == g_cpuTable[i].partNo) {
return g_cpuTable[i].cpuName;
}
}
return "unknown";
}
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */

@ -0,0 +1,202 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: ARMv7 Dispatch Implementation
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#include "arch/regs.h"
#include "arch/asm.h"
#include "arch/exception.h"
.global OsStartToRun
.global OsTaskSchedule
.global OsIrqHandler
.equ OS_TASK_STATUS_RUNNING, 0x0010U
.fpu vfpv4
/* macros to align and unalign the stack on 8 byte boundary for ABI compliance */
.macro STACK_ALIGN, reg
MOV \reg, sp
TST SP, #4
SUBEQ SP, #4
PUSH { \reg }
.endm
.macro STACK_RESTORE, reg
POP { \reg }
MOV sp, \reg
.endm
/* macros to save and restore fpu regs */
.macro PUSH_FPU_REGS reg1
#ifdef LOSCFG_ARCH_FPU_ENABLE
VMRS \reg1, FPEXC
PUSH {\reg1}
VMRS \reg1, FPSCR
PUSH {\reg1}
#ifdef LOSCFG_ARCH_FPU_VFP_D32
VPUSH {D16-D31}
#endif
VPUSH {D0-D15}
#endif
.endm
.macro POP_FPU_REGS reg1
#ifdef LOSCFG_ARCH_FPU_ENABLE
VPOP {D0-D15}
#ifdef LOSCFG_ARCH_FPU_VFP_D32
VPOP {D16-D31}
#endif
POP {\reg1}
VMSR FPSCR, \reg1
POP {\reg1}
VMSR FPEXC, \reg1
#endif
.endm
/*
* R0: new task
*/
OsStartToRun:
MSR CPSR_c, #(CPSR_INT_DISABLE | CPSR_SVC_MODE)
MOV R1, #OS_TASK_STATUS_RUNNING
STRH R1, [R0, #4]
/* R0 is new task, save it on tpidrprw */
MCR p15, 0, R0, c13, c0, 4
ISB
#ifdef LOSCFG_ARCH_FPU_ENABLE
/*
* Test whether the fpu on the hardware is turned on,
* if it is not turned on, the execution of the instruction
* will cause an exception.
*/
VPUSH {S0}
VPOP {S0}
VPUSH {D0}
VPOP {D0}
#endif
B OsTaskContextLoad
/*
* R0: new task
* R1: run task
*/
OsTaskSchedule:
MRS R2, CPSR
PUSH {R2}
PUSH {LR}
/* push r0-r12, lr */
PUSH {R0-R12, LR}
/* save fpu registers */
PUSH_FPU_REGS R2
/* store sp on running task */
STR SP, [R1]
OsTaskContextLoad:
/* clear the flag of ldrex */
CLREX
/* switch to new task's sp */
LDR SP, [R0]
/* restore fpu registers */
POP_FPU_REGS R2
/* restore r0-r12, lr */
POP {R0-R12, LR}
POP {R1}
POP {R2}
MSR CPSR, R2
BX R1
OsIrqHandler:
SUB LR, LR, #4
#ifdef LOSCFG_KERNEL_PERF
PUSH {R0-R3, R12, LR}
MOV R0, LR
MOV R1, FP
BL OsPerfSetIrqRegs
POP {R0-R3, R12, LR}
#endif
/* save spsr and lr(svc's pc) onto the svc stack */
SRSDB #0x13!
/* disable irq, switch to svc mode */
CPSID i, #0x13
/* push caller saved regs as trashed regs */
PUSH {R0-R3, R12, LR}
/* 8 bytes stack align */
STACK_ALIGN R0
/*
* save fpu regs in case in case those been
* altered in interrupt handlers.
*/
PUSH_FPU_REGS R0
#ifdef LOSCFG_IRQ_USE_STANDALONE_STACK
PUSH {R4}
MOV R4, SP
#ifdef LOSCFG_LIB_CONFIGURABLE
EXC_SP_SET g_svcStackTop, OS_EXC_SVC_STACK_SIZE, R1, R2
#else
EXC_SP_SET __svc_stack_top, OS_EXC_SVC_STACK_SIZE, R1, R2
#endif
#endif
BLX OsIntEntry
#ifdef LOSCFG_IRQ_USE_STANDALONE_STACK
MOV SP, R4
POP {R4}
#endif
/* process pending signals */
BL OsTaskProcSignal
/* check if needs to schedule */
CMP R0, #0
BLNE OsSchedPreempt
/* restore fpu regs */
POP_FPU_REGS R0
STACK_RESTORE R0
OsIrqContextRestore:
POP {R0-R3, R12, LR}
RFEIA SP!

@ -0,0 +1,254 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: ARMv7 Hw Exc Implementation
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#include "arch/regs.h"
#include "arch/asm.h"
#include "arch/exception.h"
.extern g_intCount
.extern g_curNestCount
.extern OsExcHandleEntry
#ifdef LOSCFG_LIB_CONFIGURABLE
.extern g_svcStackTop
.extern g_excStackTop
#else
.extern __svc_stack_top
.extern __exc_stack_top
#endif
#ifdef LOSCFG_GDB
.extern OsUndefIncExcHandleEntry
#if __LINUX_ARM_ARCH__ >= 7
.extern OsPrefetchAbortExcHandleEntry
.extern OsDataAbortExcHandleEntry
#endif
#endif
.global _osExceptFiqHdl
.global _osExceptAddrAbortHdl
.global _osExceptDataAbortHdl
.global _osExceptPrefetchAbortHdl
.global _osExceptSwiHdl
.global _osExceptUndefInstrHdl
.fpu vfpv4
#ifdef LOSCFG_GDB
.macro GDB_HANDLE fun
SUB SP, SP, #12
STMFD SP!, {R0-R12}
MRS R1, SPSR
STMFD SP!, {R1} @save spsr
ADD R0, SP, #14 * 4
MOV R3, LR @save pc
MRS R1, CPSR
MRS R2, SPSR
MOV R4, SP
ORR R2, R2, #(CPSR_INT_DISABLE)
MSR CPSR_c, R2
STR SP, [R0] @SP
STR LR, [R0, #4] @LR
STR R3, [R0, #8] @PC
ORR R1, R1, #(CPSR_INT_DISABLE)
BIC R1, R1, #OS_PSR_THUMB
MSR CPSR_c, R1
MOV R0, R4
BL \fun
ADD SP, SP, #4
LDMFD SP!, {R0-R12}
MOV R0, SP
ADD SP, SP, #8
LDR R1, [R0, #8] @get pc
STMFD SP!, {R1}
AND R1, R1, #0x03
CMP R1, #0
BEQ 1f
LDR R1, [R0, #-14 * 4]
ORR R1, R1, #OS_PSR_THUMB
B 2f
1:
LDR R1, [R0, #-14 * 4]
2:
MSR SPSR, R1
LDR R1, [R0, #-12 * 4] @get R1
STMFD SP!, {R1}
LDR R1, [R0,#-13 * 4] @get R0
STMFD SP!, {R1}
LDMFD SP!, {R0-R1, PC}^
.endm
#endif
@ Description: Undefined instruction exception handler
_osExceptUndefInstrHdl:
#ifdef LOSCFG_GDB
GDB_HANDLE OsUndefIncExcHandleEntry
#else
@ LR offset to return from this exception: 0.
STMFD SP, {R0-R7} @ Push working registers, but don`t change SP.
MOV R0, #OS_EXCEPT_UNDEF_INSTR @ Set exception ID to OS_EXCEPT_UNDEF_INSTR.
B _osExceptDispatch @ Branch to global exception handler.
#endif
@ Description: Software interrupt exception handler
_osExceptSwiHdl:
STMFD SP!, {LR} @ Store PC
STMFD SP!, {LR}
STMFD SP!, {SP}
STMFD SP!, {R0-R12} @ Store SP,LR,R0-R12
MRS R1, SPSR @ Save exception`s CPSR.
STMFD SP!, {R1} @ Push task`s CPSR (i.e. exception SPSR).
MOV R0, #OS_EXCEPT_SWI @ Set exception ID to OS_EXCEPT_SWI.
MOV R5, SP
B _osExceptionSwi @ Branch to global exception handler.
@ Description: Prefectch abort exception handler
_osExceptPrefetchAbortHdl:
#ifdef LOSCFG_GDB
#if __LINUX_ARM_ARCH__ >= 7
GDB_HANDLE OsPrefetchAbortExcHandleEntry
#endif
#else
SUB LR, LR, #4 @ LR offset to return from this exception: -4.
STMFD SP, {R0-R7} @ Push working registers, but don`t change SP.
MOV R0, #OS_EXCEPT_PREFETCH_ABORT @ Set exception ID to OS_EXCEPT_PREFETCH_ABORT.
B _osExceptDispatch @ Branch to global exception handler.
#endif
@ Description: Data abort exception handler
_osExceptDataAbortHdl:
#ifdef LOSCFG_GDB
#if __LINUX_ARM_ARCH__ >= 7
GDB_HANDLE OsDataAbortExcHandleEntry
#endif
#else
SUB LR, LR, #8 @ LR offset to return from this exception: -8.
STMFD SP, {R0-R7} @ Push working registers, but don`t change SP.
MOV R0, #OS_EXCEPT_DATA_ABORT @ Set exception ID to OS_EXCEPT_DATA_ABORT.
B _osExceptDispatch @ Branch to global exception handler.
#endif
@ Description: Address abort exception handler
_osExceptAddrAbortHdl:
SUB LR, LR, #8 @ LR offset to return from this exception: -8.
STMFD SP, {R0-R7} @ Push working registers, but don`t change SP.
MOV R0, #OS_EXCEPT_ADDR_ABORT @ Set exception ID to OS_EXCEPT_ADDR_ABORT.
B _osExceptDispatch @ Branch to global exception handler.
@ Description: Fast interrupt request exception handler
_osExceptFiqHdl:
SUB LR, LR, #4 @ LR offset to return from this exception: -4.
STMFD SP, {R0-R7} @ Push working registers.
MOV R0, #OS_EXCEPT_FIQ @ Set exception ID to OS_EXCEPT_FIQ.
B _osExceptDispatch @ Branch to global exception handler.
@ Description: Exception handler
@ Parameter : R0 Exception Type
@ Regs Hold : R3 Exception`s CPSR
_osExceptDispatch:
MRS R1, SPSR @ Save CPSR before exception.
MOV R2, LR @ Save PC before exception.
SUB R3, SP, #(8 * 4) @ Save the start address of working registers.
MSR CPSR_c, #(CPSR_INT_DISABLE | CPSR_SVC_MODE) @ Switch to SVC mode, and disable all interrupts
MOV R5, SP
#ifdef LOSCFG_LIB_CONFIGURABLE
EXC_SP_SET g_excStackTop, OS_EXC_STACK_SIZE, R6, R7
#else
EXC_SP_SET __exc_stack_top, OS_EXC_STACK_SIZE, R6, R7
#endif
STMFD SP!, {R2} @ Push Exception PC
STMFD SP!, {LR}
STMFD SP!, {R5} @ Push original SP,
STMFD SP!, {R8-R12} @ Push original R12-R8,
LDMFD R3!, {R4-R11} @ Move original R7-R0 from exception stack to original stack.
STMFD SP!, {R4-R11}
STMFD SP!, {R1} @ Push task`s CPSR (i.e. exception SPSR).
_osExceptionSwi:
MOV R1, SP
LDR R2, =g_curNestCount @ if(g_curNestCount > 0) dump to _osExceptionGetSP
LDR R4, [R2]
CMP R4, #0
BNE _osExceptionGetSP
MRC P15, 0, R4, C0, C0, 5
AND R4, R4, #MPIDR_CPUID_MASK @ Get Current cpu id
LSL R2, R4, #2
LDR R3, =g_intCount @ Judge the exception is occur in task stack or system stack
ADD R3, R3, R2
LDR R2, [R3]
CMP R2, #0 @ if (g_intCount[ArchCurrCpuid()] > 0)
BNE _osExceptionGetSP @ can not switch svc stack
#ifdef LOSCFG_LIB_CONFIGURABLE
EXC_SP_SET g_svcStackTop, OS_EXC_SVC_STACK_SIZE, R6, R7 @ Switch to unified exception stack.
#else
EXC_SP_SET __svc_stack_top, OS_EXC_SVC_STACK_SIZE, R6, R7 @ Switch to unified exception stack.
#endif
ADD R2, R2, #1
STR R2, [R3]
_osExceptionGetSP:
LDR R2, =OsExcHandleEntry @ OsExcHandleEntry(UINT32 excType, ExcContext * excBufAddr)
MOV LR, PC
BX R2
.end

@ -0,0 +1,638 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: ARMv7 Exc Implementation
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#include "arch/exception.h"
#include "los_memory_pri.h"
#include "los_printf_pri.h"
#include "los_task_pri.h"
#if defined(LOSCFG_SHELL_EXCINFO_DUMP) || defined(LOSCFG_EXC_INTERACTION)
#include "los_exc_pri.h"
#include "los_hwi_pri.h"
#endif
#ifdef LOSCFG_COREDUMP
#include "los_coredump.h"
#endif
#ifdef LOSCFG_GDB
#include "gdb_int.h"
#endif
#include "los_mp_pri.h"
#ifdef LOSCFG_KERNEL_TRACE
#include "los_trace_pri.h"
#endif
#ifdef LOSCFG_LIB_CONFIGURABLE
UINTPTR g_svcStackTop = (UINTPTR)(&__svc_stack_top);
UINTPTR g_excStackTop = (UINTPTR)(&__exc_stack_top);
#endif
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
VOID OsExcHook(UINT32 excType, ExcContext *excBufAddr);
UINT32 g_curNestCount = 0;
STATIC EXC_PROC_FUNC g_excHook = (EXC_PROC_FUNC)OsExcHook;
#ifdef LOSCFG_KERNEL_SMP
STATIC SPIN_LOCK_INIT(g_excSerializerSpin);
#endif
#define OS_MAX_BACKTRACE 15U
#define DUMPSIZE 128U
#define DUMPREGS 12U
#define INSTR_SET_MASK 0x01000020U
#define THUMB_INSTR_LEN 2U
#define ARM_INSTR_LEN 4U
#define POINTER_SIZE 4U
#define GET_FS(fsr) (((fsr) & 0xFU) | (((fsr) & (1U << 10)) >> 6))
#define GET_WNR(dfsr) ((dfsr) & (1U << 11))
#define IS_VALID_ADDR(ptr) (((ptr) >= SYS_MEM_BASE) && \
((ptr) <= g_sys_mem_addr_end) && \
IS_ALIGNED((ptr), sizeof(CHAR *)))
STATIC const StackInfo g_excStack[] = {
{ &__undef_stack, OS_EXC_UNDEF_STACK_SIZE, "udf_stack" },
{ &__abt_stack, OS_EXC_ABT_STACK_SIZE, "abt_stack" },
{ &__fiq_stack, OS_EXC_FIQ_STACK_SIZE, "fiq_stack" },
{ &__svc_stack, OS_EXC_SVC_STACK_SIZE, "svc_stack" },
{ &__irq_stack, OS_EXC_IRQ_STACK_SIZE, "irq_stack" },
{ &__exc_stack, OS_EXC_STACK_SIZE, "exc_stack" }
};
STATIC INT32 OsDecodeFS(UINT32 bitsFS)
{
switch (bitsFS) {
case 0x05: /* 0b00101 */
case 0x07: /* 0b00111 */
PrintExcInfo("Translation fault, %s\n", (bitsFS & 0x2) ? "page" : "section");
break;
case 0x09: /* 0b01001 */
case 0x0b: /* 0b01011 */
PrintExcInfo("Domain fault, %s\n", (bitsFS & 0x2) ? "page" : "section");
break;
case 0x0d: /* 0b01101 */
case 0x0f: /* 0b01111 */
PrintExcInfo("Permission fault, %s\n", (bitsFS & 0x2) ? "page" : "section");
break;
default:
PrintExcInfo("Unknown fault! FS:0x%x. "
"Check IFSR and DFSR in ARM Architecture Reference Manual.\n",
bitsFS);
break;
}
return LOS_OK;
}
STATIC INT32 OsDecodeInstructionFSR(UINT32 regIFSR)
{
INT32 ret;
UINT32 bitsFS = GET_FS(regIFSR); /* FS bits[4]+[3:0] */
ret = OsDecodeFS(bitsFS);
return ret;
}
STATIC INT32 OsDecodeDataFSR(UINT32 regDFSR)
{
INT32 ret = 0;
UINT32 bitWnR = GET_WNR(regDFSR); /* WnR bit[11] */
UINT32 bitsFS = GET_FS(regDFSR); /* FS bits[4]+[3:0] */
if (bitWnR) {
PrintExcInfo("Abort caused by a write instruction. ");
} else {
PrintExcInfo("Abort caused by a read instruction. ");
}
if (bitsFS == 0x01) { /* 0b00001 */
PrintExcInfo("Alignment fault.\n");
return ret;
}
ret = OsDecodeFS(bitsFS);
return ret;
}
STATIC VOID OsExcType(UINT32 excType, ExcContext *excBufAddr)
{
/* undefinited exception handling or software interrupt */
if ((excType == OS_EXCEPT_UNDEF_INSTR) || (excType == OS_EXCEPT_SWI)) {
if ((excBufAddr->regCPSR & INSTR_SET_MASK) == 0) { /* work status: ARM */
excBufAddr->PC = excBufAddr->PC - ARM_INSTR_LEN;
} else if ((excBufAddr->regCPSR & INSTR_SET_MASK) == 0x20) { /* work status: Thumb */
excBufAddr->PC = excBufAddr->PC - THUMB_INSTR_LEN;
}
}
if (excType == OS_EXCEPT_PREFETCH_ABORT) {
PrintExcInfo("prefetch_abort fault fsr:0x%x, far:0x%0+8x\n", OsGetIFSR(), OsGetIFAR());
(VOID)OsDecodeInstructionFSR(OsGetIFSR());
} else if (excType == OS_EXCEPT_DATA_ABORT) {
PrintExcInfo("data_abort fsr:0x%x, far:0x%0+8x\n", OsGetDFSR(), OsGetDFAR());
(VOID)OsDecodeDataFSR(OsGetDFSR());
}
}
STATIC const CHAR *g_excTypeString[] = {
"reset",
"undefined instruction",
"software interrupt",
"prefetch abort",
"data abort",
"fiq",
"address abort",
"irq"
};
STATIC VOID OsExcSysInfo(UINT32 excType, const ExcContext *excBufAddr)
{
LosTaskCB *runTask = OsCurrTaskGet();
PrintExcInfo("excType:%s\n"
"taskName = %s\n"
"taskId = %u\n"
"task stackSize = %u\n"
"system mem addr = 0x%x\n"
"excBuffAddr pc = 0x%x\n"
"excBuffAddr lr = 0x%x\n"
"excBuffAddr sp = 0x%x\n"
"excBuffAddr fp = 0x%x\n",
g_excTypeString[excType],
runTask->taskName,
runTask->taskId,
runTask->stackSize,
m_aucSysMem0,
excBufAddr->PC,
excBufAddr->LR,
excBufAddr->SP,
excBufAddr->R11);
}
STATIC VOID OsExcRegsInfo(const ExcContext *excBufAddr)
{
/*
* Split register information into two parts:
* Ensure printing does not rely on memory modules.
*/
PrintExcInfo("R0 = 0x%x\n"
"R1 = 0x%x\n"
"R2 = 0x%x\n"
"R3 = 0x%x\n"
"R4 = 0x%x\n"
"R5 = 0x%x\n"
"R6 = 0x%x\n",
excBufAddr->R0, excBufAddr->R1, excBufAddr->R2, excBufAddr->R3,
excBufAddr->R4, excBufAddr->R5, excBufAddr->R6);
PrintExcInfo("R7 = 0x%x\n"
"R8 = 0x%x\n"
"R9 = 0x%x\n"
"R10 = 0x%x\n"
"R11 = 0x%x\n"
"R12 = 0x%x\n"
"CPSR = 0x%x\n",
excBufAddr->R7, excBufAddr->R8, excBufAddr->R9, excBufAddr->R10,
excBufAddr->R11, excBufAddr->R12, excBufAddr->regCPSR);
}
UINT32 ArchSetExcHook(EXC_PROC_FUNC excHook)
{
UINT32 intSave;
intSave = ArchIntLock();
g_excHook = excHook;
ArchIntRestore(intSave);
return 0;
}
EXC_PROC_FUNC ArchGetExcHook(VOID)
{
return g_excHook;
}
VOID OsDumpContextMem(const ExcContext *excBufAddr)
{
UINT32 count = 0;
const UINT32 *excReg = NULL;
for (excReg = &(excBufAddr->R0); count <= DUMPREGS; excReg++, count++) {
if (IS_VALID_ADDR(*excReg)) {
PrintExcInfo("\ndump mem around R%u:%u", count, (*excReg));
OsDumpMemByte(DUMPSIZE, ((*excReg) - (DUMPSIZE >> 1)));
}
}
if (IS_VALID_ADDR(excBufAddr->SP)) {
PrintExcInfo("\ndump mem around SP:%p", excBufAddr->SP);
OsDumpMemByte(DUMPSIZE, (excBufAddr->SP - (DUMPSIZE >> 1)));
}
}
#ifdef LOSCFG_BACKTRACE
/* this function is used to validate fp or validate the checking range start and end. */
STATIC INLINE BOOL IsValidFP(UINTPTR regFP, UINTPTR start, UINTPTR end)
{
return (regFP > start) && (regFP < end);
}
STATIC INLINE BOOL FindSuitableStack(UINTPTR regFP, UINTPTR *start, UINTPTR *end)
{
UINT32 index, stackStart, stackEnd;
BOOL found = FALSE;
LosTaskCB *taskCB = NULL;
const StackInfo *stack = NULL;
/* Search in the task stacks */
for (index = 0; index < g_taskMaxNum; index++) {
taskCB = &g_taskCBArray[index];
if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {
continue;
}
stackStart = taskCB->topOfStack;
stackEnd = taskCB->topOfStack + taskCB->stackSize;
if (IsValidFP(regFP, stackStart, stackEnd)) {
found = TRUE;
goto FOUND;
}
}
/* Search in the exc stacks */
for (index = 0; index < sizeof(g_excStack) / sizeof(StackInfo); index++) {
stack = &g_excStack[index];
stackStart = (UINTPTR)stack->stackTop;
stackEnd = stackStart + LOSCFG_KERNEL_CORE_NUM * stack->stackSize;
if (IsValidFP(regFP, stackStart, stackEnd)) {
found = TRUE;
goto FOUND;
}
}
FOUND:
if (found == TRUE) {
*start = stackStart;
*end = stackEnd;
}
return found;
}
STATIC VOID BackTraceWithFp(UINTPTR fp)
{
PrintExcInfo("*******backtrace begin*******\n");
(VOID)ArchBackTraceGet(fp, NULL, OS_MAX_BACKTRACE);
PrintExcInfo("*******backtrace end*******\n");
}
#endif
UINT32 ArchBackTraceGet(UINTPTR fp, UINTPTR *callChain, UINT32 maxDepth)
{
#ifdef LOSCFG_BACKTRACE
UINTPTR tmpFP;
UINTPTR backLR;
UINTPTR backFP = fp;
UINTPTR stackStart, stackEnd;
UINT32 count = 0;
if (FindSuitableStack(fp, &stackStart, &stackEnd) == FALSE) {
PrintExcInfo("fp error, backtrace failed!\n");
return 0;
}
/*
* Check whether it is the leaf function.
* Generally, the frame pointer points to the address of link register, while in the leaf function,
* there's no function call, and compiler will not store the link register, but the frame pointer
* will still be stored and updated. In that case we needs to find the right position of frame pointer.
*/
tmpFP = *((UINTPTR *)(fp));
if (IsValidFP(tmpFP, stackStart, stackEnd)) {
backFP = tmpFP;
if (callChain == NULL) {
PrintExcInfo("traceback fp fixed, trace using fp = 0x%x\n", backFP);
}
}
while (IsValidFP(backFP, stackStart, stackEnd)) {
tmpFP = backFP;
backLR = *((UINTPTR *)(tmpFP));
backFP = *((UINTPTR *)(tmpFP - POINTER_SIZE));
if (callChain == NULL) {
PrintExcInfo("traceback %u -- lr = 0x%x fp = 0x%x\n", count, backLR, backFP);
} else {
callChain[count] = backLR;
}
count++;
if ((count == maxDepth) || (backFP == tmpFP)) {
break;
}
}
return count;
#else
(VOID)fp;
(VOID)callChain;
(VOID)maxDepth;
return 0;
#endif
}
VOID ArchBackTrace(VOID)
{
#ifdef LOSCFG_BACKTRACE
UINT32 fp = ArchGetFp();
PrintExcInfo("fp:0x%08x\n", fp);
BackTraceWithFp(fp);
#endif
}
VOID ArchBackTraceWithSp(const VOID *stackPointer)
{
#ifdef LOSCFG_BACKTRACE
UINT32 fp = ArchGetTaskFp(stackPointer);
PrintExcInfo("fp:0x%08x\n", fp);
BackTraceWithFp(fp);
#else
(VOID)stackPointer;
#endif
}
VOID ArchExcInit(VOID)
{
OsExcStackInfoReg(g_excStack, sizeof(g_excStack) / sizeof(g_excStack[0]));
}
VOID OsExcHook(UINT32 excType, ExcContext *excBufAddr)
{
OsExcType(excType, excBufAddr);
OsExcSysInfo(excType, excBufAddr);
OsExcRegsInfo(excBufAddr);
#ifdef LOSCFG_BACKTRACE
BackTraceWithFp(excBufAddr->R11);
#endif
(VOID)OsShellCmdTskInfoGet(OS_ALL_TASK_MASK);
OsExcStackInfo();
OsDumpContextMem(excBufAddr);
#ifdef LOSCFG_KERNEL_MEM_BESTFIT
OsMemIntegrityMultiCheck();
#endif
#ifdef LOSCFG_KERNEL_TRACE
if (g_traceDumpHook != NULL) {
g_traceDumpHook(FALSE);
}
#endif
#ifdef LOSCFG_COREDUMP
LOS_CoreDumpV2(excType, excBufAddr);
#endif
}
VOID OsCallStackInfo(VOID)
{
UINT32 count = 0;
LosTaskCB *runTask = OsCurrTaskGet();
UINTPTR stackBottom = runTask->topOfStack + runTask->stackSize;
UINT32 *stackPointer = (UINT32 *)stackBottom;
PrintExcInfo("runTask->stackPointer = 0x%x\n"
"runTask->topOfStack = 0x%x\n"
"text_start:0x%x,text_end:0x%x\n",
stackPointer, runTask->topOfStack, &__text_start, &__text_end);
while ((stackPointer > (UINT32 *)runTask->topOfStack) && (count < OS_MAX_BACKTRACE)) {
if ((*stackPointer > (UINTPTR)(&__text_start)) &&
(*stackPointer < (UINTPTR)(&__text_end)) &&
IS_ALIGNED((*stackPointer), POINTER_SIZE)) {
if ((*(stackPointer - 1) > (UINT32)runTask->topOfStack) &&
(*(stackPointer - 1) < stackBottom) &&
IS_ALIGNED((*(stackPointer - 1)), POINTER_SIZE)) {
count++;
PrintExcInfo("traceback %u -- lr = 0x%x\n", count, *stackPointer);
}
}
stackPointer--;
}
PrintExcInfo("\n");
}
#ifdef LOSCFG_GDB
VOID OsUndefIncExcHandleEntry(ExcContext *excBufAddr)
{
excBufAddr->PC -= 4; /* lr in undef is pc + 4 */
if (gdb_undef_hook(excBufAddr, OS_EXCEPT_UNDEF_INSTR)) {
return;
}
if (g_excHook != NULL) {
g_excHook(OS_EXCEPT_UNDEF_INSTR, excBufAddr);
}
while (1) {
;
}
}
#if __LINUX_ARM_ARCH__ >= 7
VOID OsPrefetchAbortExcHandleEntry(ExcContext *excBufAddr)
{
excBufAddr->PC -= 4; /* lr in prefetch abort is pc + 4 */
if (gdbhw_hook(excBufAddr, OS_EXCEPT_PREFETCH_ABORT)) {
return;
}
if (g_excHook != NULL) {
g_excHook(OS_EXCEPT_PREFETCH_ABORT, excBufAddr);
}
while (1) {
;
}
}
VOID OsDataAbortExcHandleEntry(ExcContext *excBufAddr)
{
excBufAddr->PC -= 8; /* lr in data abort is pc + 8 */
if (gdbhw_hook(excBufAddr, OS_EXCEPT_DATA_ABORT)) {
return;
}
if (g_excHook != NULL) {
g_excHook(OS_EXCEPT_DATA_ABORT, excBufAddr);
}
while (1) {
;
}
}
#endif /* __LINUX_ARM_ARCH__ */
#endif /* LOSCFG_GDB */
#ifdef LOSCFG_KERNEL_SMP
#define EXC_WAIT_INTER 50U
#define EXC_WAIT_TIME 2000U
#define INVALID_CPUID 0xFFFF
STATIC UINT32 g_excCpuid = INVALID_CPUID;
STATIC VOID CpuStatusOutput(VOID)
{
UINT32 i;
for (i = 0; i < LOSCFG_KERNEL_CORE_NUM; i++) {
switch (g_percpu[i].excFlag) {
case CPU_RUNNING:
PrintExcInfo("cpu%u is running.\n", i);
break;
case CPU_HALT:
PrintExcInfo("cpu%u is halted.\n", i);
break;
case CPU_EXC:
PrintExcInfo("cpu%u is in exc.\n", i);
break;
default:
break;
}
}
}
STATIC VOID WaitAllCpuStop(UINT32 cpuid)
{
UINT32 i;
UINT32 time = 0;
while (time < EXC_WAIT_TIME) {
for (i = 0; i < LOSCFG_KERNEL_CORE_NUM; i++) {
if ((i != cpuid) && (g_percpu[i].excFlag == CPU_RUNNING)) {
LOS_Mdelay(EXC_WAIT_INTER);
time += EXC_WAIT_INTER;
break;
}
}
/* Other CPUs are all haletd or in the exc. */
if (i == LOSCFG_KERNEL_CORE_NUM) {
break;
}
}
return;
}
STATIC VOID CheckAllCpuStatus(VOID)
{
UINT32 currCpuid = ArchCurrCpuid();
LOS_SpinLock(&g_excSerializerSpin);
if (g_excCpuid == INVALID_CPUID) {
g_excCpuid = ArchCurrCpuid();
} else if (g_excCpuid != ArchCurrCpuid()) {
LOS_SpinUnlock(&g_excSerializerSpin);
while (1) {}
}
LOS_SpinUnlock(&g_excSerializerSpin);
WaitAllCpuStop(currCpuid);
CpuStatusOutput();
}
#endif
/*
* Description : EXC handler entry
* Input : excType --- exc type
* excBufAddr --- address of EXC buf
*/
LITE_OS_SEC_TEXT_INIT VOID OsExcHandleEntry(UINT32 excType, ExcContext *excBufAddr)
{
#ifdef LOSCFG_KERNEL_SMP
UINT32 ret;
/* use halt ipi to stop other active cores */
UINT32 target = (UINT32)(OS_MP_CPU_ALL & ~CPUID_TO_AFFI_MASK(ArchCurrCpuid()));
ret = HalIrqSendIpi(target, LOS_MP_IPI_HALT);
if (ret != LOS_OK) {
PrintExcInfo("The interrupt %d is invalid, irq send inter-core interrupt failed.\n", LOS_MP_IPI_HALT);
}
OsPercpuGet()->excFlag = CPU_EXC;
LOCKDEP_CLEAR_LOCKS();
CheckAllCpuStatus();
#endif
#ifdef LOSCFG_SHELL_EXCINFO_DUMP
LogReadWriteFunc func = OsGetExcInfoRW();
#endif
g_curNestCount++;
if (g_excHook != NULL) {
if (g_curNestCount == 1) {
#ifdef LOSCFG_SHELL_EXCINFO_DUMP
if (func != NULL) {
OsSetExcInfoOffset(0);
OsIrqNestingCntSet(0); /* 0: int nest count */
OsRecordExcInfoTime();
OsIrqNestingCntSet(1); /* 1: int nest count */
}
#endif
g_excHook(excType, excBufAddr);
} else {
OsCallStackInfo();
}
#ifdef LOSCFG_SHELL_EXCINFO_DUMP
if (func != NULL) {
PrintExcInfo("Be sure your space bigger than OsOsGetExcInfoOffset():0x%x\n", OsGetExcInfoOffset());
OsIrqNestingCntSet(0); /* 0: int nest count */
func(OsGetExcInfoDumpAddr(), OsGetExcInfoLen(), 0, OsGetExcInfoBuf());
OsIrqNestingCntSet(1); /* 1: int nest count */
}
#endif
}
#ifdef LOSCFG_EXC_INTERACTION
OsKeepExcInteractionTask();
#endif
while (1) {
;
}
}
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */

@ -0,0 +1,41 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: ARMv7 JMP Implementation
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#include "arch/asm.h"
FUNCTION(longjmp)
ldmfd r0,{r4-r14}
cmp r1,#0
moveq r1,#1
mov r0,r1
mov pc,lr
FUNCTION(setjmp)
stmea r0,{r4-r14}
mov r0,#0
mov pc,lr

@ -0,0 +1,538 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: MMU Config Implementation
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#include "mmu.h"
#include "los_config.h"
#include "los_hwi.h"
#include "asm/dma.h"
#include "los_memory.h"
#ifdef LOSCFG_KERNEL_RUNSTOP
#include "lowpower/los_runstop_pri.h"
#endif
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
/* This is operation for page table */
#if defined (LOSCFG_KERNEL_NX) && defined (LOSCFG_KERNEL_DYNLOAD)
__attribute__((aligned(MMU_16K))) __attribute__((section(".bss.prebss.translation_table")))
UINT8 g_firstPageTable[MMU_16K];
__attribute__((aligned(MMU_1K))) UINT8 g_secondPageTableOs[MMU_16K];
__attribute__((aligned(MMU_1K))) UINT8 g_secondPageTableDl[LOS_DL_HEAP_SIZE / MMU_1K];
__attribute__((aligned(MMU_1K))) UINT8 g_secondPageTableApp[MMZ_MEM_LEN / MMU_1K];
#else
__attribute__((aligned(MMU_16K))) __attribute__((section(".bss.prebss.translation_table")))
UINT8 g_firstPageTable[MMU_16K];
__attribute__((aligned(MMU_1K))) UINT8 g_secondPageTableOs[MMU_16K];
__attribute__((aligned(MMU_1K))) UINT8 g_secondPageTableApp[MMZ_MEM_LEN / MMU_1K];
#endif
SENCOND_PAGE g_mmuOsPage = {0};
SENCOND_PAGE g_mmuAppPage = {0};
#ifdef LOSCFG_KERNEL_DYNLOAD
SENCOND_PAGE g_mmuDlPage = {0};
#endif
#ifdef LOSCFG_NULL_ADDRESS_PROTECT
__attribute__((aligned(MMU_1K))) UINT32 g_excSecondPageTable[MMU_1K];
SENCOND_PAGE g_excPage = {0};
#define EXC_VECTOR_ADDR 0xFFFF0000
/* EXC_VECTOR_ALIGN equal the value that EXC_VECTOR_ADDR aligned by 1M */
#define EXC_VECTOR_ALIGN 0xFFF00000
#endif
#define ITEM_PRINT_LEN 20
#define BYTES_PER_ITEM 4
#define ITEM_TYPE_MASK 0x3
/* table start position + offset = 'addr' table item position */
#define MMU_GET_FIRST_TABLE_ADDR(addr) (((addr) >> SHIFT_1M) * 4 + (UINTPTR)g_firstPageTable)
/* get item content which storaged by table */
#define MMU_GET_FIRST_TABLE_ITEM(addr) (*(UINTPTR *)MMU_GET_FIRST_TABLE_ADDR(addr))
/* if the first item ID is MMU_FIRST_LEVEL_PAGE_TABLE_ID, get second table item addr by hi 22bits */
#define MMU_GET_SECOND_TABLE_BASE(addr) ((MMU_GET_FIRST_TABLE_ITEM(addr)) & 0xfffffc00)
/* second table item offset */
#define MMU_GET_SECOND_TABLE_OFFSET(addr) ((((addr) & (MMU_1M - 1)) >> SHIFT_4K) * 4)
/* second table item address */
#define MMU_GET_SECOND_TABLE_ADDR(addr) (MMU_GET_SECOND_TABLE_BASE(addr) + MMU_GET_SECOND_TABLE_OFFSET(addr))
STATIC VOID MmuSetMemPage(MMU_PARAM *para)
{
UINT32 pageBase;
UINT32 pageStartIndex, pageEndIndex;
UINT32 length, bitsCache, bitsBuf, bitsAP;
#if defined(LOSCFG_ARCH_CORTEX_A7) || defined(LOSCFG_ARCH_CORTEX_A17) || defined(LOSCFG_ARCH_CORTEX_A53_AARCH32)
UINT32 bitsXn;
#endif
UINT32 endAddr = para->endAddr;
UINT32 pageSize = (para->stPage->page_type == MMU_SECOND_LEVEL_BIG_PAGE_TABLE_ID) ? MMU_64K : MMU_4K;
UINT32 sttBase = para->stPage->page_descriptor_addr;
UINT32 rshiftBits = (pageSize == MMU_64K) ? SHIFT_64K : SHIFT_4K;
if ((para->startAddr & (pageSize - 1)) != 0) {
return;
}
if ((endAddr & (pageSize - 1)) != 0) {
endAddr = ALIGN(endAddr, pageSize);
}
pageStartIndex = (para->startAddr - para->stPage->page_addr) >> rshiftBits;
pageEndIndex = (endAddr - para->stPage->page_addr) >> rshiftBits;
length = pageEndIndex - pageStartIndex;
#ifdef LOSCFG_NULL_ADDRESS_PROTECT
if (para->startAddr == EXC_VECTOR_ALIGN) {
para->startAddr -= EXC_VECTOR_ADDR - SYS_MEM_BASE;
}
#endif
bitsAP = MMU_AP_STATE(para->uwFlag);
bitsCache = MMU_CACHE_STATE(para->uwFlag);
bitsBuf = MMU_BUFFER_STATE(para->uwFlag);
#ifdef LOSCFG_ARCH_ARM926
if (pageSize == MMU_64K) {
pageBase = para->startAddr >> SHIFT_64K;
X_MMU_TWO_LEVEL_PAGE64K(pageBase, pageStartIndex, length, bitsCache, bitsBuf, bitsAP);
} else {
pageBase = para->startAddr >> SHIFT_4K;
X_MMU_TWO_LEVEL_PAGE(pageBase, pageStartIndex, length, bitsCache, bitsBuf, bitsAP);
}
#elif defined(LOSCFG_ARCH_CORTEX_A7) || defined(LOSCFG_ARCH_CORTEX_A17) || defined(LOSCFG_ARCH_CORTEX_A53_AARCH32)
bitsXn = MMU_EXECUTE_STATE(para->uwFlag);
if (pageSize == MMU_64K) {
pageBase = para->startAddr >> SHIFT_64K;
X_MMU_TWO_LEVEL_PAGE64K(pageBase, pageStartIndex, length, bitsCache, bitsBuf, bitsAP, bitsXn);
} else {
pageBase = para->startAddr >> SHIFT_4K;
X_MMU_TWO_LEVEL_PAGE(pageBase, pageStartIndex, length, bitsCache, bitsBuf, bitsAP, bitsXn);
}
#endif
}
STATIC UINT32 MmuSetFirstSection(const MMU_PARAM *para, UINT32 itemStart, UINT32 itemEnd)
{
UINT32 intSave;
UINT32 itemTemp = itemStart;
UINT32 bitsCache, bitsBuf, bitsAP;
#if defined(LOSCFG_ARCH_CORTEX_A7) || defined(LOSCFG_ARCH_CORTEX_A17) || defined(LOSCFG_ARCH_CORTEX_A53_AARCH32)
UINT32 bitsXn = MMU_EXECUTE_STATE(para->uwFlag);
#endif
bitsAP = MMU_AP_STATE(para->uwFlag);
bitsCache = MMU_CACHE_STATE(para->uwFlag);
bitsBuf = MMU_BUFFER_STATE(para->uwFlag);
while (itemTemp <= itemEnd) {
if (((*(UINTPTR *)(UINTPTR)itemTemp) & ITEM_TYPE_MASK) != MMU_FIRST_LEVEL_SECTION_ID) {
PRINT_ERR("not all mem belongs to first section(1M every item), mmu table ID:%u\n",
((*(UINT32 *)(UINTPTR)itemTemp) & ITEM_TYPE_MASK));
return LOS_NOK;
}
itemTemp += sizeof(UINTPTR);
}
itemTemp = itemStart;
intSave = LOS_IntLock();
DisableAPCheck();
while (itemTemp <= itemEnd) {
#ifdef LOSCFG_ARCH_ARM926
SECTION_CHANGE(itemTemp, bitsCache, bitsBuf, bitsAP);
#elif defined(LOSCFG_ARCH_CORTEX_A7) || defined(LOSCFG_ARCH_CORTEX_A17) || defined(LOSCFG_ARCH_CORTEX_A53_AARCH32)
SECTION_CHANGE(itemTemp, bitsCache, bitsBuf, bitsAP, bitsXn);
#endif
itemTemp += sizeof(UINTPTR);
}
EnableAPCheck();
LOS_IntRestore(intSave);
dma_cache_clean(itemStart, itemEnd);
return LOS_OK;
}
STATIC UINT32 MmuSetSecondPage(MMU_PARAM *para, UINT32 itemStart, UINT32 itemEnd)
{
UINT32 intSave;
UINT32 itemTemp = itemStart;
UINT32 pageStart, pageEnd, pageSize;
if (para->stPage == NULL) {
return LOS_NOK;
}
if ((para->startAddr < para->stPage->page_addr) ||
(para->endAddr > (para->stPage->page_length + para->stPage->page_addr))) {
PRINT_ERR("addr input not belongs to this second page \n"
"para->startAddr:0x%x, para->stPage->page_addr:0x%x\n",
para->startAddr, para->stPage->page_addr);
PRINT_ERR("para->endAddr:0x%x, (para->stPage->page_length + para->stPage->page_addr):0x%x\n",
para->endAddr, para->stPage->page_length + para->stPage->page_addr);
return LOS_NOK;
}
while (itemTemp <= itemEnd) {
if (((*(UINTPTR *)(UINTPTR)itemTemp) & ITEM_TYPE_MASK) != MMU_FIRST_LEVEL_PAGE_TABLE_ID) {
PRINT_ERR("not all mem belongs to second page(4K or 64K every item), mmu table ID:%u \n",
((*(UINT32 *)(UINTPTR)itemTemp) & ITEM_TYPE_MASK));
return LOS_NOK;
}
itemTemp += sizeof(UINTPTR);
}
intSave = LOS_IntLock();
DisableAPCheck();
MmuSetMemPage(para);
EnableAPCheck();
LOS_IntRestore(intSave);
pageStart = MMU_GET_SECOND_TABLE_ADDR(para->startAddr);
pageSize = (para->stPage->page_type == MMU_SECOND_LEVEL_BIG_PAGE_TABLE_ID) ? MMU_64K : MMU_4K;
pageEnd = MMU_GET_SECOND_TABLE_ADDR(para->endAddr - 1 + pageSize);
dma_cache_clean(pageStart, pageEnd);
return LOS_OK;
}
VOID ArchSecPageEnable(SENCOND_PAGE *page, UINT32 flag)
{
UINT32 pageStart, pageEnd;
UINT32 secStart, secEnd;
UINT32 ttbBase = (UINTPTR)g_firstPageTable;
MMU_PARAM para;
if (page == NULL) {
PRINT_ERR("second page table(stPage) can't be NULL\n");
return;
}
para.startAddr = page->page_addr;
para.endAddr = page->page_addr + page->page_length;
para.uwFlag = flag;
para.stPage = page;
pageStart = page->page_descriptor_addr;
/* page size = 2 ^ 12, 4K */
pageEnd = page->page_descriptor_addr + ((page->page_length >> SHIFT_4K) * BYTES_PER_ITEM);
DisableAPCheck();
MmuSetMemPage(&para);
dma_cache_clean(pageStart, pageEnd);
X_MMU_ONE_LEVEL_PAGE(pageStart >> SHIFT_1K, page->page_addr >> SHIFT_1M,
page->page_length >> SHIFT_1M, D_CLIENT);
secStart = ttbBase + ((para.startAddr >> SHIFT_1M) * BYTES_PER_ITEM);
secEnd = ttbBase + ((para.endAddr >> SHIFT_1M) * BYTES_PER_ITEM);
dma_cache_clean(secStart, secEnd);
CleanTLB();
EnableAPCheck();
}
VOID ArchMMUParamSet(MMU_PARAM *para)
{
UINT32 ret;
UINT32 itemStart, itemEnd;
UINT32 tableType;
if (para == NULL) {
PRINT_ERR("input is null\n");
return;
}
itemStart = MMU_GET_FIRST_TABLE_ADDR(para->startAddr);
itemEnd = MMU_GET_FIRST_TABLE_ADDR(para->endAddr - 1);
if (itemStart > itemEnd) {
PRINT_ERR("wrong addr input, itemStart:0x%x, itemEnd:0x%x\n", itemStart, itemEnd);
return;
}
tableType = MMU_GET_AREA(para->uwFlag);
if (tableType == SECOND_PAGE) {
ret = MmuSetSecondPage(para, itemStart, itemEnd);
if (ret == LOS_NOK) {
return;
}
} else if (tableType == FIRST_SECTION) {
ret = MmuSetFirstSection(para, itemStart, itemEnd);
if (ret == LOS_NOK) {
return;
}
}
CleanTLB();
}
VOID OsCachedRemap(UINTPTR physAddr, size_t size)
{
MMU_PARAM para;
if (physAddr < SYS_MEM_BASE)
return;
para.startAddr = physAddr;
para.endAddr = physAddr + size;
#ifdef LOSCFG_ARCH_ARM926
para.uwFlag = BUFFER_ENABLE | CACHE_ENABLE | ACCESS_PERM_RW_RW;
#elif defined(LOSCFG_ARCH_CORTEX_A7) || defined(LOSCFG_ARCH_CORTEX_A17) || defined(LOSCFG_ARCH_CORTEX_A53_AARCH32)
para.uwFlag = BUFFER_ENABLE | CACHE_ENABLE | EXEC_DISABLE | ACCESS_PERM_RW_RW;
#endif
para.stPage = (SENCOND_PAGE *)&g_mmuAppPage;
ArchMMUParamSet(&para);
}
VOID OsNoCachedRemap(UINTPTR physAddr, size_t size)
{
MMU_PARAM para;
if (physAddr < SYS_MEM_BASE)
return;
para.startAddr = physAddr;
para.endAddr = physAddr + size;
#ifdef LOSCFG_ARCH_ARM926
para.uwFlag = BUFFER_DISABLE | CACHE_DISABLE | ACCESS_PERM_RW_RW;
#elif defined(LOSCFG_ARCH_CORTEX_A7) || defined(LOSCFG_ARCH_CORTEX_A17) || defined(LOSCFG_ARCH_CORTEX_A53_AARCH32)
para.uwFlag = BUFFER_DISABLE | CACHE_DISABLE | EXEC_DISABLE | ACCESS_PERM_RW_RW;
#endif
para.stPage = (SENCOND_PAGE *)&g_mmuAppPage;
ArchMMUParamSet(&para);
}
VOID ArchCodeProtect(VOID)
{
MMU_PARAM mPara;
/* note: must confirm that every addr be aglined as 4K(64K) */
mPara.startAddr = (UINTPTR)&__text_start;
mPara.endAddr = (UINTPTR)&__ram_data_start;
mPara.uwFlag = BUFFER_ENABLE | CACHE_ENABLE | ACCESS_PERM_RO_RO;
mPara.stPage = (SENCOND_PAGE *)&g_mmuOsPage;
ArchMMUParamSet(&mPara);
#ifdef LOSCFG_KERNEL_NX
mPara.startAddr = (UINTPTR)&__ram_data_start;
mPara.endAddr = ((((UINTPTR)&__ram_data_start) + MMU_1M - 1) & ~(MMU_1M - 1));
mPara.uwFlag = BUFFER_ENABLE | CACHE_ENABLE | EXEC_DISABLE | ACCESS_PERM_RW_RW;
mPara.stPage = (SENCOND_PAGE *)&g_mmuOsPage;
ArchMMUParamSet(&mPara);
#endif
}
INT32 ArchMemNoAccessSet(UINTPTR startaddr, size_t length)
{
UINTPTR ttbBase = (UINTPTR)g_firstPageTable;
UINTPTR endAddr = startaddr + length;
UINT32 base;
if (startaddr >= endAddr) {
PRINT_ERR("The Input param invalid ,length equal 0 or the configuration scope overflow."
"startaddr:0x%x, length:0x%x\n",
startaddr, length);
return -1;
}
if ((startaddr >= SYS_MEM_BASE) && (startaddr <= g_sys_mem_addr_end)) {
PRINT_ERR("The no access permission area should not contain os system mem,startaddr:0x%x\n", startaddr);
return -1;
}
if ((endAddr >= SYS_MEM_BASE) && (endAddr <= g_sys_mem_addr_end)) {
PRINT_ERR("The no access permission area should not contain os system mem,endAddr:0x%x\n", endAddr);
return -1;
}
if (((startaddr & (MMU_1M - 1)) != 0) || ((length & (MMU_1M - 1)) != 0)) {
PRINT_ERR("The start address or the length is not aligned as 1M, startaddr:0x%x, length:0x%x\n", startaddr,
length);
return -1;
}
DisableAPCheck();
base = startaddr >> SHIFT_1M;
#ifdef LOSCFG_ARCH_ARM926
X_MMU_SECTION(base, base, length >> SHIFT_1M, 0, 0, 0, D_NA);
#elif defined(LOSCFG_ARCH_CORTEX_A7) || defined(LOSCFG_ARCH_CORTEX_A17) || defined(LOSCFG_ARCH_CORTEX_A53_AARCH32)
X_MMU_SECTION(base, base, length >> SHIFT_1M, 0, 0, 0, 0, D_NA);
#endif
dma_cache_clean(ttbBase + ((startaddr >> SHIFT_1M) * BYTES_PER_ITEM),
ttbBase + ((endAddr >> SHIFT_1M) * BYTES_PER_ITEM));
CleanTLB();
EnableAPCheck();
return 0;
}
VOID ArchPrintPageItem(const MMU_PARAM *para)
{
UINT32 tmp;
UINT32 startAddr;
UINT32 pageLen;
if (para == NULL) {
return;
}
if (MMU_GET_AREA(para->uwFlag) == SECOND_PAGE) {
if (para->stPage == NULL) {
return;
}
startAddr = para->stPage->page_descriptor_addr +
(((para->startAddr - para->stPage->page_addr) >> SHIFT_4K) * BYTES_PER_ITEM);
pageLen = ((para->endAddr - para->startAddr) >> SHIFT_4K) * BYTES_PER_ITEM;
if ((para->endAddr & (MMU_4K - 1)) != 0) {
pageLen += sizeof(UINT32);
}
PRINTK("SECOND_PAGE:\n");
} else if (MMU_GET_AREA(para->uwFlag) == FIRST_SECTION) {
startAddr = (UINTPTR)g_firstPageTable + ((para->startAddr >> SHIFT_1M) * BYTES_PER_ITEM);
pageLen = ((para->endAddr - para->startAddr) >> SHIFT_1M) * BYTES_PER_ITEM;
if ((para->endAddr & (MMU_1M - 1)) != 0) {
pageLen += sizeof(UINT32);
}
PRINTK("FIRST_SECTION:\n");
} else {
return;
}
PRINTK("para->endAddr = 0x%x para->startAddr = 0x%x page_len = %u * 4\n",
para->endAddr, para->startAddr, pageLen / BYTES_PER_ITEM);
for (tmp = 0; tmp < pageLen; tmp += sizeof(UINT32)) {
if (tmp % ITEM_PRINT_LEN == 0) {
PRINTK("\n");
}
PRINTK ("0x%0+8x ", *(UINTPTR *)(UINTPTR)(startAddr + tmp));
}
PRINTK("\n");
}
/*
* The liteos cache addr & length
*/
#define LITEOS_CACHE_ADDR SYS_MEM_BASE
#define LITEOS_CACHE_LENGTH (g_sys_mem_addr_end - LITEOS_CACHE_ADDR)
#if (LITEOS_CACHE_ADDR & (MMU_1M - 1))
#error "LITEOS_CACHE_ADDR is not aligned by 1M!"
#endif
VOID MmuSectionMap(VOID)
{
UINT32 ttbBase = (UINTPTR)g_firstPageTable;
#ifdef LOSCFG_KERNEL_RUNSTOP
if (IsImageResume()) {
return;
}
#endif
/* First clear all TT entries - ie Set them to Faulting */
(VOID)memset_s((VOID *)(UINTPTR)ttbBase, MMU_16K, 0, MMU_16K);
/*
* Set domain of mmu descriptor of (0~1M) D_NA, check the illegal access to NULL pointer in code.
* Access to NULL pointer and mem (0 ~ 1M) will trigger exception immediately
*/
X_MMU_SECTION(0, 0, (MMU_1M >> SHIFT_1M), UNCACHEABLE, UNBUFFERABLE,
ACCESS_NA, NON_EXECUTABLE, D_NA);
/* Set all mem 4G except (0~1M) as uncacheable & rw first */
X_MMU_SECTION((MMU_1M >> SHIFT_1M), (MMU_1M >> SHIFT_1M), ((MMU_4G - MMU_1M) >> SHIFT_1M),
UNCACHEABLE, UNBUFFERABLE, ACCESS_RW, NON_EXECUTABLE, D_CLIENT);
if (LITEOS_CACHE_LENGTH & (MMU_1M - 1)) {
PRINT_ERR("LITEOS_CACHE_LENGTH is not aligned by 1M.\n");
return;
}
/*
* set table as your config
* 1: LITEOS_CACHE_ADDR ~ LITEOS_CACHE_ADDR + LITEOS_CACHE_LENGTH ---- set as section(1M) and cacheable & rw
*/
#ifdef LOSCFG_KERNEL_NX
UINTPTR codeLens = ((((UINTPTR)(&__ram_data_start) - SYS_MEM_BASE) + MMU_1M - 1) & ~(MMU_1M - 1));
UINTPTR dataStart = LITEOS_CACHE_ADDR + codeLens;
UINTPTR dataLens = LITEOS_CACHE_LENGTH - codeLens;
X_MMU_SECTION((LITEOS_CACHE_ADDR >> SHIFT_1M), (LITEOS_CACHE_ADDR >> SHIFT_1M), (codeLens >> SHIFT_1M),
CACHEABLE, BUFFERABLE, ACCESS_RW, EXECUTABLE, D_CLIENT);
X_MMU_SECTION((dataStart >> SHIFT_1M), (dataStart >> SHIFT_1M), (dataLens >> SHIFT_1M),
CACHEABLE, BUFFERABLE, ACCESS_RW, NON_EXECUTABLE, D_CLIENT);
#else
X_MMU_SECTION((LITEOS_CACHE_ADDR >> SHIFT_1M), (SYS_MEM_BASE >> SHIFT_1M), (LITEOS_CACHE_LENGTH >> SHIFT_1M),
CACHEABLE, BUFFERABLE, ACCESS_RW, EXECUTABLE, D_CLIENT);
#endif
}
/* Init OS related second page item */
VOID OsSysSecPteInit(VOID)
{
/*
* The page table storage addr
* notice: must ensure it has enough free mem for storage page table
*/
g_mmuOsPage.page_addr = SYS_MEM_BASE;
g_mmuOsPage.page_length = (((UINTPTR)(&__ram_data_start) - SYS_MEM_BASE) + MMU_1M - 1) & ~(MMU_1M - 1);
g_mmuOsPage.page_descriptor_addr = (UINTPTR)g_secondPageTableOs;
g_mmuOsPage.page_type = MMU_SECOND_LEVEL_SMALL_PAGE_TABLE_ID;
if (g_mmuOsPage.page_length > (sizeof(g_secondPageTableOs) << 10)) { /* 10: 2^10 = 4k / 4 */
PRINT_ERR("%s,%d\n", __FUNCTION__, __LINE__);
PRINT_ERR("the mapping size of os second page is 0x%x, should be not bigger than 0x%x\n",
g_mmuOsPage.page_length, (sizeof(g_secondPageTableOs) << 10)); /* 10: 2^10 = 4k / 4 */
return;
}
ArchSecPageEnable(&g_mmuOsPage, BUFFER_ENABLE | CACHE_ENABLE | ACCESS_PERM_RW_RW);
#if defined (LOSCFG_KERNEL_NX) && defined (LOSCFG_KERNEL_DYNLOAD)
g_mmuDlPage.page_addr = LOS_DL_HEAP_BASE;
g_mmuDlPage.page_length = LOS_DL_HEAP_SIZE;
g_mmuDlPage.page_descriptor_addr = (UINTPTR)g_secondPageTableDl;
g_mmuDlPage.page_type = MMU_SECOND_LEVEL_SMALL_PAGE_TABLE_ID;
ArchSecPageEnable(&g_mmuDlPage, BUFFER_DISABLE | CACHE_DISABLE | EXEC_DISABLE | ACCESS_PERM_RW_RW);
#endif
#ifdef LOSCFG_NULL_ADDRESS_PROTECT
X_MMU_SECOND_TABLE_EXC_PAGE_SET();
#endif
}
/* Init app related second page item */
VOID OsAppSecPteInit(UINTPTR startAddr, UINTPTR len, UINT32 flag, UINT32 pageType)
{
g_mmuAppPage.page_addr = startAddr;
g_mmuAppPage.page_length = len;
g_mmuAppPage.page_descriptor_addr = (UINTPTR)g_secondPageTableApp;
g_mmuAppPage.page_type = pageType;
if (g_mmuAppPage.page_length > (sizeof(g_secondPageTableApp) << 10)) { /* 10: 2^10 = 4k / 4 */
PRINT_ERR("the mapping size of app second page is 0x%x, should be not bigger than 0x%x\n",
g_mmuAppPage.page_length, sizeof(g_secondPageTableApp) << 10); /* 10: 2^10 = 4k / 4 */
return;
}
ArchSecPageEnable(&g_mmuAppPage, flag);
}
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */

@ -0,0 +1,389 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
* Description: LiteOS Perf Armv7 Pmu Module Implementation
* Author: Huawei LiteOS Team
* Create: 2020-07-29
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#include "armv7_pmu_pri.h"
#include "perf_pmu_pri.h"
#include "gic_common.h"
#include "gic_v3.h"
#include "los_typedef.h"
#include "los_hwi_pri.h"
#include "los_mp_pri.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
LITE_OS_SEC_DATA STATIC UINT32 g_pmuIrqNr[LOSCFG_KERNEL_CORE_NUM] = { 169, 170 };
STATIC HwPmu g_armv7Pmu;
STATIC INLINE UINT32 Armv7PmncRead(VOID)
{
UINT32 value = 0;
asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(value));
return value;
}
STATIC INLINE VOID Armv7PmncWrite(UINT32 value)
{
value &= ARMV7_PMNC_MASK;
asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(value));
isb();
}
STATIC INLINE UINT32 Armv7PmuOverflowed(UINT32 pmnc)
{
return pmnc & ARMV7_OVERFLOWED_MASK;
}
STATIC INLINE UINT32 Armv7PmuCntOverflowed(UINT32 pmnc, UINT32 index)
{
return pmnc & ARMV7_CNT2BIT(ARMV7_IDX2CNT(index));
}
STATIC INLINE UINT32 Armv7CntValid(UINT32 index)
{
return index <= ARMV7_IDX_COUNTER_LAST;
}
STATIC INLINE VOID Armv7PmuSelCnt(UINT32 index)
{
UINT32 counter = ARMV7_IDX2CNT(index);
asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
isb();
}
STATIC INLINE VOID Armv7PmuSetCntPeriod(UINT32 index, UINT32 period)
{
if (!Armv7CntValid(index)) {
PRINT_ERR("CPU writing wrong counter %u\n", index);
} else if (index == ARMV7_IDX_CYCLE_COUNTER) {
asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (period));
} else {
Armv7PmuSelCnt(index);
asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (period));
}
}
STATIC INLINE VOID Armv7BindEvt2Cnt(UINT32 index, UINT32 value)
{
PRINT_DEBUG("bind event: %u to counter: %u\n", value, index);
Armv7PmuSelCnt(index);
value &= ARMV7_EVTYPE_MASK;
asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (value));
}
STATIC INLINE VOID Armv7EnableCnt(UINT32 index)
{
UINT32 counter = ARMV7_IDX2CNT(index);
PRINT_DEBUG("index : %u, counter: %u\n", index, counter);
asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (ARMV7_CNT2BIT(counter)));
}
STATIC INLINE VOID Armv7DisableCnt(UINT32 index)
{
UINT32 counter = ARMV7_IDX2CNT(index);
PRINT_DEBUG("index : %u, counter: %u\n", index, counter);
asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (ARMV7_CNT2BIT(counter)));
}
STATIC INLINE VOID Armv7EnableCntInterrupt(UINT32 index)
{
UINT32 counter = ARMV7_IDX2CNT(index);
asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (ARMV7_CNT2BIT(counter)));
isb();
}
STATIC INLINE VOID Armv7DisableCntInterrupt(UINT32 index)
{
UINT32 counter = ARMV7_IDX2CNT(index);
asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (ARMV7_CNT2BIT(counter)));
/* Clear the overflow flag in case an interrupt is pending. */
asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (ARMV7_CNT2BIT(counter)));
isb();
}
STATIC INLINE UINT32 Armv7PmuGetOverflowStatus(VOID)
{
UINT32 value;
asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (value));
value &= ARMV7_FLAG_MASK;
asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (value));
return value;
}
STATIC VOID Armv7EnableEvent(Event *event)
{
UINT32 cnt = event->counter;
if (!Armv7CntValid(cnt)) {
PRINT_ERR("CPU enabling wrong PMNC counter IRQ enable %u\n", cnt);
return;
}
if (event->period == 0) {
PRINT_INFO("event period value not valid, counter: %u\n", cnt);
return;
}
/*
* Enable counter and interrupt, and set the counter to count
* the event that we're interested in.
*/
UINT32 intSave = LOS_IntLock();
Armv7DisableCnt(cnt);
/*
* Set event (if destined for PMNx counters)
* We only need to set the event for the cycle counter if we
* have the ability to perform event filtering.
*/
if (cnt != ARMV7_IDX_CYCLE_COUNTER) {
Armv7BindEvt2Cnt(cnt, event->eventId);
}
/* Enable interrupt for this counter */
Armv7EnableCntInterrupt(cnt);
Armv7EnableCnt(cnt);
LOS_IntRestore(intSave);
PRINT_DEBUG("enabled event: %u cnt: %u\n", event->eventId, cnt);
}
STATIC VOID Armv7DisableEvent(Event *event)
{
UINT32 cnt = event->counter;
if (!Armv7CntValid(cnt)) {
PRINT_ERR("CPU enabling wrong PMNC counter IRQ enable %u\n", cnt);
return;
}
UINT32 intSave = LOS_IntLock();
Armv7DisableCnt(cnt);
Armv7DisableCntInterrupt(cnt);
LOS_IntRestore(intSave);
}
STATIC VOID Armv7StartAllCnt(VOID)
{
PRINT_DEBUG("starting pmu...\n");
/* Enable all counters */
UINT32 reg = Armv7PmncRead() | ARMV7_PMNC_E;
if (g_armv7Pmu.cntDivided) {
reg |= ARMV7_PMNC_D;
} else {
reg &= ~ARMV7_PMNC_D;
}
Armv7PmncWrite(reg);
LOS_HwiEnable(g_pmuIrqNr[ArchCurrCpuid()]);
}
STATIC VOID Armv7StopAllCnt(VOID)
{
PRINT_DEBUG("stopping pmu...\n");
/* Disable all counters */
Armv7PmncWrite(Armv7PmncRead() & ~ARMV7_PMNC_E);
LOS_HwiDisable(g_pmuIrqNr[ArchCurrCpuid()]);
}
STATIC VOID Armv7ResetAllCnt(VOID)
{
UINT32 index;
/* The counter and interrupt enable registers are unknown at reset. */
for (index = ARMV7_IDX_CYCLE_COUNTER; index < ARMV7_IDX_MAX_COUNTER; index++) {
Armv7DisableCnt(index);
Armv7DisableCntInterrupt(index);
}
/* Initialize & Reset PMNC: C and P bits and D bits */
UINT32 reg = ARMV7_PMNC_P | ARMV7_PMNC_C | (g_armv7Pmu.cntDivided ? ARMV7_PMNC_D : 0);
Armv7PmncWrite(reg);
}
STATIC VOID Armv7SetEventPeriod(Event *event)
{
if (event->period != 0) {
PRINT_INFO("counter: %u, period: 0x%x\n", event->counter, event->period);
Armv7PmuSetCntPeriod(event->counter, PERIOD_CALC(event->period));
}
}
STATIC UINTPTR Armv7ReadEventCnt(Event *event)
{
UINT32 value = 0;
UINT32 index = event->counter;
if (!Armv7CntValid(index)) {
PRINT_ERR("CPU reading wrong counter %u\n", index);
} else if (index == ARMV7_IDX_CYCLE_COUNTER) {
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
} else {
Armv7PmuSelCnt(index);
asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
}
if (value < PERIOD_CALC(event->period)) {
if (Armv7PmuCntOverflowed(Armv7PmuGetOverflowStatus(), event->counter)) {
value += event->period;
}
} else {
value -= PERIOD_CALC(event->period);
}
return value;
}
STATIC const UINT32 g_armv7Map[] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERF_HW_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERF_HW_INSTRUCTIONS,
[PERF_COUNT_HW_DCACHE_REFERENCES] = ARMV7_PERF_HW_DCACHES,
[PERF_COUNT_HW_DCACHE_MISSES] = ARMV7_PERF_HW_DCACHE_MISSES,
[PERF_COUNT_HW_ICACHE_REFERENCES] = ARMV7_PERF_HW_ICACHES,
[PERF_COUNT_HW_ICACHE_MISSES] = ARMV7_PERF_HW_ICACHE_MISSES,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERF_HW_BRANCHES,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERF_HW_BRANCE_MISSES,
};
UINT32 Armv7PmuMapEvent(UINT32 eventType, BOOL reverse)
{
if (!reverse) { /* Common event to armv7 real event */
if (eventType < ARRAY_SIZE(g_armv7Map)) {
return g_armv7Map[eventType];
}
return eventType;
} else { /* Armv7 real event to common event */
UINT32 i;
for (i = 0; i < ARRAY_SIZE(g_armv7Map); i++) {
if (g_armv7Map[i] == eventType) {
return i;
}
}
return PERF_HW_INVAILD_EVENT_TYPE;
}
}
STATIC VOID Armv7PmuIrqHandler(VOID)
{
UINT32 index;
PerfRegs regs;
PerfEvent *events = &(g_armv7Pmu.pmu.events);
UINT32 eventNum = events->nr;
/* Get and reset the IRQ flags */
UINT32 pmnc = Armv7PmuGetOverflowStatus();
if (!Armv7PmuOverflowed(pmnc)) {
return;
}
(VOID)memset_s(&regs, sizeof(PerfRegs), 0, sizeof(PerfRegs));
OsPerfFetchIrqRegs(&regs);
Armv7StopAllCnt();
for (index = 0; index < eventNum; index++) {
Event *event = &(events->per[index]);
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
*/
if (!Armv7PmuCntOverflowed(pmnc, event->counter) || (event->period == 0)) {
continue;
}
Armv7PmuSetCntPeriod(event->counter, PERIOD_CALC(event->period));
OsPerfUpdateEventCount(event, event->period);
OsPerfHandleOverFlow(event, &regs);
}
Armv7StartAllCnt();
}
UINT32 OsGetPmuMaxCounter(VOID)
{
return ARMV7_IDX_MAX_COUNTER;
}
UINT32 OsGetPmuCycleCounter(VOID)
{
return ARMV7_IDX_CYCLE_COUNTER;
}
UINT32 OsGetPmuCounter0(VOID)
{
return ARMV7_IDX_COUNTER0;
}
STATIC HwPmu g_armv7Pmu = {
.canDivided = TRUE,
.enable = Armv7EnableEvent,
.disable = Armv7DisableEvent,
.start = Armv7StartAllCnt,
.stop = Armv7StopAllCnt,
.clear = Armv7ResetAllCnt,
.setPeriod = Armv7SetEventPeriod,
.readCnt = Armv7ReadEventCnt,
.mapEvent = Armv7PmuMapEvent,
};
UINT32 OsHwPmuInit(VOID)
{
UINT32 ret;
UINT32 index;
for (index = 0; index < LOSCFG_KERNEL_CORE_NUM; index++) {
ret = LOS_HwiCreate(g_pmuIrqNr[index], 0, 0, Armv7PmuIrqHandler, 0);
if (ret != LOS_OK) {
PRINT_ERR("pmu %u irq handler register failed\n", g_pmuIrqNr[index]);
return ret;
}
#ifdef LOSCFG_KERNEL_SMP
ret = HalIrqSetAffinity(g_pmuIrqNr[index], CPUID_TO_AFFI_MASK(index));
if (ret != LOS_OK) {
PRINT_ERR("The interrupt %u is invalid, irq set affinity failed.\n", g_pmuIrqNr[index]);
return ret;
}
#endif
}
ret = OsPerfHwInit(&g_armv7Pmu);
return ret;
}
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */

@ -0,0 +1,105 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
* Description: LiteOS Perf Armv7 Pmu Module Private HeadFile
* Author: Huawei LiteOS Team
* Create: 2020-07-29
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#ifndef _ARMV7_PMU_PRI_H
#define _ARMV7_PMU_PRI_H
#include "los_typedef.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
/* counters overflow flag status reg */
#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK /* Mask for pmu overflowed */
/* pmnc config reg */
#define ARMV7_PMNC_E (1U << 0) /* Enable all counters */
#define ARMV7_PMNC_P (1U << 1) /* Reset all counters */
#define ARMV7_PMNC_C (1U << 2) /* Cycle counter reset */
#define ARMV7_PMNC_D (1U << 3) /* CCNT counts every 64th cpu cycle */
#define ARMV7_PMNC_X (1U << 4) /* Export to ETM */
#define ARMV7_PMNC_DP (1U << 5) /* Disable CCNT if non-invasive debug */
#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
/* pmxevtyper event selection reg */
#define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
/* armv7 counters index */
#define ARMV7_IDX_COUNTER0 1
#define ARMV7_IDX_CYCLE_COUNTER 0
#define ARMV7_IDX_MAX_COUNTER 9
#define ARMV7_MAX_COUNTERS 32
#define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + ARMV7_MAX_COUNTERS - 1)
#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
/* armv7 event counter index mapping */
#define ARMV7_CNT2BIT(x) (1UL << (x))
#define ARMV7_IDX2CNT(x) (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
enum PmuEventType {
ARMV7_PERF_HW_CYCLES = 0xFF, /* cycles */
ARMV7_PERF_HW_INSTRUCTIONS = 0x08, /* instructions */
ARMV7_PERF_HW_DCACHES = 0x04, /* dcache */
ARMV7_PERF_HW_DCACHE_MISSES = 0x03, /* dcache-misses */
ARMV7_PERF_HW_ICACHES = 0x14, /* icache */
ARMV7_PERF_HW_ICACHE_MISSES = 0x01, /* icache-misses */
ARMV7_PERF_HW_BRANCHES = 0x0C, /* software change of pc */
ARMV7_PERF_HW_BRANCE_MISSES = 0x10, /* branch-misses */
ARMV7_PERF_HW_PRED_BRANCH = 0x12, /* predictable branches */
ARMV7_PERF_HW_NUM_CYC_IRQ = 0x50, /* number of cycles Irqs are interrupted */
ARMV7_PERF_HW_EXC_TAKEN = 0x09, /* exception_taken */
ARMV7_PERF_HW_DATA_READ = 0x06, /* data read */
ARMV7_PERF_HW_DATA_WRITE = 0x07, /* data write */
ARMV7_PERF_HW_STREX_PASSED = 0x80, /* strex passed */
ARMV7_PERF_HW_STREX_FAILED = 0x81, /* strex failed */
ARMV7_PERF_HW_LP_IN_TCM = 0x82, /* literal pool in TCM region */
ARMV7_PERF_HW_DMB_STALL = 0x90, /* DMB stall */
ARMV7_PERF_HW_ITCM_ACCESS = 0x91, /* ITCM access */
ARMV7_PERF_HW_DTCM_ACCESS = 0x92, /* DTCM access */
ARMV7_PERF_HW_DATA_EVICTION = 0x93, /* data eviction */
ARMV7_PERF_HW_SCU = 0x94, /* SCU coherency operation */
ARMV7_PERF_HW_INSCACHE_DEP_DW = 0x95, /* instruction cache dependent stall */
ARMV7_PERF_HW_DATA_CACHE_DEP_STALL = 0x96, /* data cache dependent stall */
ARMV7_PERF_HW_NOCACHE_NO_PER_DEP_STALL = 0x97, /* non-cacheable no peripheral dependent stall */
ARMV7_PERF_HW_NOCACHE_PER_DEP_STALL = 0x98, /* non-Cacheable peripheral dependent stall */
ARMV7_PERF_HW_DATA_CACHE_HP_DEP_STALL = 0x99, /* data cache high priority dependent stall */
ARMV7_PERF_HW_AXI_FAST_PERIPHERAL = 0x9A, /* Accesses_to_AXI_fast_peripheral_port(reads_and_writes) */
};
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
#endif /* _ARMV7_PMU_PRI_H */

@ -0,0 +1,128 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: Aarch32 Hw Runstop handle
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* -------------------------------------------------------------------------- */
#include "arch/regs.h"
#include "arch/asm.h"
.extern g_saveAR
.extern g_saveSRContext
.global OsSRSaveRegister
.global OsSRRestoreRegister
.fpu vfpv4
@.fpu neon
.arch armv7a
.text
OsSRSaveRegister:
PUSH {R2}
LDR R2, =g_saveAR
STR R0, [R2]
STR R1, [R2, #4]
POP {R2}
MRC P15, 0, R0, c0, c0, 5
AND R0, R0, #MPIDR_CPUID_MASK
MOV R1, #72 @This number is the total number of bytes in the task context register(R0~R15, SPSR, CPSR).
MUL R1, R1, R0
LDR R0, =g_saveSRContext
ADD R0, R0, R1
ADD R0, R0, #72
MOV R1, SP
STMFD R0!, {R1}
MRS R1, SPSR
STMFD R0!, {R1}
MOV R1, LR
STMFD R0!, {R1} @PC
STMFD R0!, {R1} @LR
STMFD R0!, {R12}
MOV R12, R0
LDR R0, =g_saveAR
LDR R0, [R0]
LDR R1, =g_saveAR
LDR R1, [R1, #4]
STMFD R12!, {R0-R3}
STMFD R12!, {R4-R11}
MRS R0, CPSR
STMFD R12!, {R0}
BX LR
OsSRRestoreRegister:
MRC P15, 0, R0, c0, c0, 5
AND R0, R0, #MPIDR_CPUID_MASK
MOV R1, #72 @This number is the total number of bytes in the task context register(R0~R15, SPSR, CPSR).
MUL R1, R1, R0
LDR R12, =g_saveSRContext
ADD R12, R12, R1
LDMFD R12!, {R0}
MSR CPSR_cxsf, R0
LDMFD R12!, {R4-R11}
LDMFD R12!, {R0-R3}
PUSH {R2}
LDR R2, =g_saveAR
STR R0, [R2]
STR R1, [R2, #4]
POP {R2}
MOV R0, R12
LDMFD R0!, {R12}
LDMFD R0!, {R1} @LR
LDMFD R0!, {R1} @PC
MOV LR, R1
LDMFD R0!, {R1}
MSR SPSR_cxsf, R1
LDMFD R0!, {R1}
MOV SP, R1
LDR R0, =g_saveAR
LDR R0, [R0]
LDR R1, =g_saveAR
LDR R1, [R1, #4]
BX LR
.end

@ -0,0 +1,62 @@
/*----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2018-2020. All rights reserved.
* Description: Spinlock Assembly
* Author: Huawei LiteOS Team
* Create: 2018-07-11
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*---------------------------------------------------------------------------*/
#include "arch/asm.h"
FUNCTION(ArchSpinLock)
mov r2, #1
1:
ldrex r1, [r0]
teq r1, #0
wfene
strexeq r1, r2, [r0]
teqeq r1, #0
bne 1b
dmb
bx lr
/*
* when spinlock already taken, r1 will read 1 which is same with the
* 'strex' failure return value, no extra convertion needed.
*/
FUNCTION(ArchSpinTrylock)
ldrex r1, [r0]
teq r1, #0
moveq r2, #1
strexeq r1, r2, [r0]
dmb
mov r0, r1
bx lr
FUNCTION(ArchSpinUnlock)
dmb
mov r1, #0
str r1, [r0]
dsb
sev
bx lr

@ -0,0 +1,106 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: Aarch32 Hw Task Implementation
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#include "los_task_pri.h"
#include "arch/task.h"
#include "arch/cache.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
/* bit[30] is enable FPU */
#define FP_EN (1U << 30)
LITE_OS_SEC_TEXT_INIT VOID OsTaskExit(VOID)
{
__asm__ __volatile__("swi 0");
}
#ifdef LOSCFG_GDB
STATIC VOID OsTaskEntrySetupLoopFrame(UINT32) __attribute__((noinline, naked));
VOID OsTaskEntrySetupLoopFrame(UINT32 arg0)
{
asm volatile("\tsub fp, sp, #0x4\n"
"\tpush {fp, lr}\n"
"\tadd fp, sp, #0x4\n"
"\tpush {fp, lr}\n"
"\tadd fp, sp, #0x4\n"
"\tbl OsTaskEntry\n"
"\tpop {fp, lr}\n"
"\tpop {fp, pc}\n");
}
#endif
LITE_OS_SEC_TEXT_INIT VOID *OsTaskStackInit(UINT32 taskId, UINT32 stackSize, VOID *topStack)
{
UINT32 index = 1;
TaskContext *taskContext = NULL;
OsStackInit(topStack, stackSize);
taskContext = (TaskContext *)(((UINTPTR)topStack + stackSize) - sizeof(TaskContext));
/* initialize the task context */
#ifdef LOSCFG_GDB
taskContext->PC = (UINTPTR)OsTaskEntrySetupLoopFrame;
#else
taskContext->PC = (UINTPTR)OsTaskEntry;
#endif
taskContext->LR = (UINTPTR)OsTaskExit; /* LR should be kept, to distinguish it's THUMB or ARM instruction */
taskContext->R[0] = taskId; /* R0 */
taskContext->R[index++] = 0x01010101; /* R1, 0x01010101 : reg initialed magic word */
for (; index < GEN_REGS_NUM; index++) {
taskContext->R[index] = taskContext->R[index - 1] + taskContext->R[1]; /* R2 - R12 */
}
#ifdef LOSCFG_INTERWORK_THUMB
taskContext->regPSR = PSR_MODE_SVC_THUMB; /* CPSR (Disable IRQ and FIQ interrupts, THUMNB-mode) */
#else
taskContext->regPSR = PSR_MODE_SVC_ARM; /* CPSR (Disable IRQ and FIQ interrupts, ARM-mode) */
#endif
#ifdef LOSCFG_ARCH_FPU_ENABLE
/* 0xAAA0000000000000LL : float reg initialed magic word */
for (index = 0; index < FP_REGS_NUM; index++) {
taskContext->D[index] = 0xAAA0000000000000LL + index; /* D0 - D31 */
}
taskContext->regFPSCR = 0;
taskContext->regFPEXC = FP_EN;
#endif
return (VOID *)taskContext;
}
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */

@ -0,0 +1,24 @@
include $(LITEOSTOPDIR)/config.mk
MODULE_NAME := $(LOSCFG_ARCH_CPU)
LOCAL_SRCS := $(wildcard src/*.c) $(wildcard src/*.S)
ifneq ($(LOSCFG_FPB_ENABLE), y)
LOCAL_SRCS := $(filter-out src/fpb.c, $(LOCAL_SRCS))
endif
ifneq ($(LOSCFG_APC_ENABLE), y)
LOCAL_SRCS := $(filter-out src/mpu.c, $(LOCAL_SRCS))
endif
LOCAL_INCLUDE := \
-I $(LITEOSTOPDIR)/kernel/base/include \
-I $(LITEOSTOPDIR)/kernel/extended/include \
LOCAL_FLAGS := $(LOCAL_INCLUDE) $(LITEOS_GCOV_OPTS)
ifeq ($(LOSCFG_GDB), y)
LOCAL_FLAGS += $(AS_OBJS_LIBC_FLAGS)
endif
include $(MODULE)

@ -0,0 +1,411 @@
/******************************************************************************
* @file cachel1_armv7.h
* @brief CMSIS Level 1 Cache API for Armv7-M and later
* @version V1.0.0
* @date 03. March 2020
******************************************************************************/
/*
* Copyright (c) 2020 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if defined ( __ICCARM__ )
#pragma system_include /* treat file as system include file for MISRA check */
#elif defined (__clang__)
#pragma clang system_header /* treat file as system include file */
#endif
#ifndef ARM_CACHEL1_ARMV7_H
#define ARM_CACHEL1_ARMV7_H
/**
\ingroup CMSIS_Core_FunctionInterface
\defgroup CMSIS_Core_CacheFunctions Cache Functions
\brief Functions that configure Instruction and Data cache.
@{
*/
/* Cache Size ID Register Macros */
#define CCSIDR_WAYS(x) (((x) & SCB_CCSIDR_ASSOCIATIVITY_Msk) >> SCB_CCSIDR_ASSOCIATIVITY_Pos)
#define CCSIDR_SETS(x) (((x) & SCB_CCSIDR_NUMSETS_Msk ) >> SCB_CCSIDR_NUMSETS_Pos )
#ifndef __SCB_DCACHE_LINE_SIZE
#define __SCB_DCACHE_LINE_SIZE 32U /*!< Cortex-M7 cache line size is fixed to 32 bytes (8 words). See also register SCB_CCSIDR */
#endif
#ifndef __SCB_ICACHE_LINE_SIZE
#define __SCB_ICACHE_LINE_SIZE 32U /*!< Cortex-M7 cache line size is fixed to 32 bytes (8 words). See also register SCB_CCSIDR */
#endif
/**
\brief Enable I-Cache
\details Turns on I-Cache
*/
__STATIC_FORCEINLINE void SCB_EnableICache (void)
{
#if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)
if (SCB->CCR & SCB_CCR_IC_Msk) return; /* return if ICache is already enabled */
__DSB();
__ISB();
SCB->ICIALLU = 0UL; /* invalidate I-Cache */
__DSB();
__ISB();
SCB->CCR |= (uint32_t)SCB_CCR_IC_Msk; /* enable I-Cache */
__DSB();
__ISB();
#endif
}
/**
\brief Disable I-Cache
\details Turns off I-Cache
*/
__STATIC_FORCEINLINE void SCB_DisableICache (void)
{
#if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)
__DSB();
__ISB();
SCB->CCR &= ~(uint32_t)SCB_CCR_IC_Msk; /* disable I-Cache */
SCB->ICIALLU = 0UL; /* invalidate I-Cache */
__DSB();
__ISB();
#endif
}
/**
\brief Invalidate I-Cache
\details Invalidates I-Cache
*/
__STATIC_FORCEINLINE void SCB_InvalidateICache (void)
{
#if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)
__DSB();
__ISB();
SCB->ICIALLU = 0UL;
__DSB();
__ISB();
#endif
}
/**
\brief I-Cache Invalidate by address
\details Invalidates I-Cache for the given address.
I-Cache is invalidated starting from a 32 byte aligned address in 32 byte granularity.
I-Cache memory blocks which are part of given address + given size are invalidated.
\param[in] addr address
\param[in] isize size of memory block (in number of bytes)
*/
__STATIC_FORCEINLINE void SCB_InvalidateICache_by_Addr (void *addr, int32_t isize)
{
#if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)
if ( isize > 0 ) {
int32_t op_size = isize + (((uint32_t)addr) & (__SCB_ICACHE_LINE_SIZE - 1U));
uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_ICACHE_LINE_SIZE - 1U) */;
__DSB();
do {
SCB->ICIMVAU = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */
op_addr += __SCB_ICACHE_LINE_SIZE;
op_size -= __SCB_ICACHE_LINE_SIZE;
} while ( op_size > 0 );
__DSB();
__ISB();
}
#endif
}
/**
\brief Enable D-Cache
\details Turns on D-Cache
*/
__STATIC_FORCEINLINE void SCB_EnableDCache (void)
{
#if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
uint32_t ccsidr;
uint32_t sets;
uint32_t ways;
if (SCB->CCR & SCB_CCR_DC_Msk) return; /* return if DCache is already enabled */
SCB->CSSELR = 0U; /* select Level 1 data cache */
__DSB();
ccsidr = SCB->CCSIDR;
/* invalidate D-Cache */
sets = (uint32_t)(CCSIDR_SETS(ccsidr));
do {
ways = (uint32_t)(CCSIDR_WAYS(ccsidr));
do {
SCB->DCISW = (((sets << SCB_DCISW_SET_Pos) & SCB_DCISW_SET_Msk) |
((ways << SCB_DCISW_WAY_Pos) & SCB_DCISW_WAY_Msk) );
#if defined ( __CC_ARM )
__schedule_barrier();
#endif
} while (ways-- != 0U);
} while(sets-- != 0U);
__DSB();
SCB->CCR |= (uint32_t)SCB_CCR_DC_Msk; /* enable D-Cache */
__DSB();
__ISB();
#endif
}
/**
\brief Disable D-Cache
\details Turns off D-Cache
*/
__STATIC_FORCEINLINE void SCB_DisableDCache (void)
{
#if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
uint32_t ccsidr;
uint32_t sets;
uint32_t ways;
SCB->CSSELR = 0U; /* select Level 1 data cache */
__DSB();
SCB->CCR &= ~(uint32_t)SCB_CCR_DC_Msk; /* disable D-Cache */
__DSB();
ccsidr = SCB->CCSIDR;
/* clean & invalidate D-Cache */
sets = (uint32_t)(CCSIDR_SETS(ccsidr));
do {
ways = (uint32_t)(CCSIDR_WAYS(ccsidr));
do {
SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) |
((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) );
#if defined ( __CC_ARM )
__schedule_barrier();
#endif
} while (ways-- != 0U);
} while(sets-- != 0U);
__DSB();
__ISB();
#endif
}
/**
\brief Invalidate D-Cache
\details Invalidates D-Cache
*/
__STATIC_FORCEINLINE void SCB_InvalidateDCache (void)
{
#if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
uint32_t ccsidr;
uint32_t sets;
uint32_t ways;
SCB->CSSELR = 0U; /* select Level 1 data cache */
__DSB();
ccsidr = SCB->CCSIDR;
/* invalidate D-Cache */
sets = (uint32_t)(CCSIDR_SETS(ccsidr));
do {
ways = (uint32_t)(CCSIDR_WAYS(ccsidr));
do {
SCB->DCISW = (((sets << SCB_DCISW_SET_Pos) & SCB_DCISW_SET_Msk) |
((ways << SCB_DCISW_WAY_Pos) & SCB_DCISW_WAY_Msk) );
#if defined ( __CC_ARM )
__schedule_barrier();
#endif
} while (ways-- != 0U);
} while(sets-- != 0U);
__DSB();
__ISB();
#endif
}
/**
\brief Clean D-Cache
\details Cleans D-Cache
*/
__STATIC_FORCEINLINE void SCB_CleanDCache (void)
{
#if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
uint32_t ccsidr;
uint32_t sets;
uint32_t ways;
SCB->CSSELR = 0U; /* select Level 1 data cache */
__DSB();
ccsidr = SCB->CCSIDR;
/* clean D-Cache */
sets = (uint32_t)(CCSIDR_SETS(ccsidr));
do {
ways = (uint32_t)(CCSIDR_WAYS(ccsidr));
do {
SCB->DCCSW = (((sets << SCB_DCCSW_SET_Pos) & SCB_DCCSW_SET_Msk) |
((ways << SCB_DCCSW_WAY_Pos) & SCB_DCCSW_WAY_Msk) );
#if defined ( __CC_ARM )
__schedule_barrier();
#endif
} while (ways-- != 0U);
} while(sets-- != 0U);
__DSB();
__ISB();
#endif
}
/**
\brief Clean & Invalidate D-Cache
\details Cleans and Invalidates D-Cache
*/
__STATIC_FORCEINLINE void SCB_CleanInvalidateDCache (void)
{
#if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
uint32_t ccsidr;
uint32_t sets;
uint32_t ways;
SCB->CSSELR = 0U; /* select Level 1 data cache */
__DSB();
ccsidr = SCB->CCSIDR;
/* clean & invalidate D-Cache */
sets = (uint32_t)(CCSIDR_SETS(ccsidr));
do {
ways = (uint32_t)(CCSIDR_WAYS(ccsidr));
do {
SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) |
((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) );
#if defined ( __CC_ARM )
__schedule_barrier();
#endif
} while (ways-- != 0U);
} while(sets-- != 0U);
__DSB();
__ISB();
#endif
}
/**
\brief D-Cache Invalidate by address
\details Invalidates D-Cache for the given address.
D-Cache is invalidated starting from a 32 byte aligned address in 32 byte granularity.
D-Cache memory blocks which are part of given address + given size are invalidated.
\param[in] addr address
\param[in] dsize size of memory block (in number of bytes)
*/
__STATIC_FORCEINLINE void SCB_InvalidateDCache_by_Addr (void *addr, int32_t dsize)
{
#if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
if ( dsize > 0 ) {
int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U));
uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */;
__DSB();
do {
SCB->DCIMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */
op_addr += __SCB_DCACHE_LINE_SIZE;
op_size -= __SCB_DCACHE_LINE_SIZE;
} while ( op_size > 0 );
__DSB();
__ISB();
}
#endif
}
/**
\brief D-Cache Clean by address
\details Cleans D-Cache for the given address
D-Cache is cleaned starting from a 32 byte aligned address in 32 byte granularity.
D-Cache memory blocks which are part of given address + given size are cleaned.
\param[in] addr address
\param[in] dsize size of memory block (in number of bytes)
*/
__STATIC_FORCEINLINE void SCB_CleanDCache_by_Addr (uint32_t *addr, int32_t dsize)
{
#if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
if ( dsize > 0 ) {
int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U));
uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */;
__DSB();
do {
SCB->DCCMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */
op_addr += __SCB_DCACHE_LINE_SIZE;
op_size -= __SCB_DCACHE_LINE_SIZE;
} while ( op_size > 0 );
__DSB();
__ISB();
}
#endif
}
/**
\brief D-Cache Clean and Invalidate by address
\details Cleans and invalidates D_Cache for the given address
D-Cache is cleaned and invalidated starting from a 32 byte aligned address in 32 byte granularity.
D-Cache memory blocks which are part of given address + given size are cleaned and invalidated.
\param[in] addr address (aligned to 32-byte boundary)
\param[in] dsize size of memory block (in number of bytes)
*/
__STATIC_FORCEINLINE void SCB_CleanInvalidateDCache_by_Addr (uint32_t *addr, int32_t dsize)
{
#if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
if ( dsize > 0 ) {
int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U));
uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */;
__DSB();
do {
SCB->DCCIMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */
op_addr += __SCB_DCACHE_LINE_SIZE;
op_size -= __SCB_DCACHE_LINE_SIZE;
} while ( op_size > 0 );
__DSB();
__ISB();
}
#endif
}
/*@} end of CMSIS_Core_CacheFunctions */
#endif /* ARM_CACHEL1_ARMV7_H */

@ -0,0 +1,885 @@
/**************************************************************************//**
* @file cmsis_armcc.h
* @brief CMSIS compiler ARMCC (Arm Compiler 5) header file
* @version V5.2.1
* @date 26. March 2020
******************************************************************************/
/*
* Copyright (c) 2009-2020 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __CMSIS_ARMCC_H
#define __CMSIS_ARMCC_H
#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 400677)
#error "Please use Arm Compiler Toolchain V4.0.677 or later!"
#endif
/* CMSIS compiler control architecture macros */
#if ((defined (__TARGET_ARCH_6_M ) && (__TARGET_ARCH_6_M == 1)) || \
(defined (__TARGET_ARCH_6S_M ) && (__TARGET_ARCH_6S_M == 1)) )
#define __ARM_ARCH_6M__ 1
#endif
#if (defined (__TARGET_ARCH_7_M ) && (__TARGET_ARCH_7_M == 1))
#define __ARM_ARCH_7M__ 1
#endif
#if (defined (__TARGET_ARCH_7E_M) && (__TARGET_ARCH_7E_M == 1))
#define __ARM_ARCH_7EM__ 1
#endif
/* __ARM_ARCH_8M_BASE__ not applicable */
/* __ARM_ARCH_8M_MAIN__ not applicable */
/* __ARM_ARCH_8_1M_MAIN__ not applicable */
/* CMSIS compiler control DSP macros */
#if ((defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) )
#define __ARM_FEATURE_DSP 1
#endif
/* CMSIS compiler specific defines */
#ifndef __ASM
#define __ASM __asm
#endif
#ifndef __INLINE
#define __INLINE __inline
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static __inline
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE static __forceinline
#endif
#ifndef __NO_RETURN
#define __NO_RETURN __declspec(noreturn)
#endif
#ifndef __USED
#define __USED __attribute__((used))
#endif
#ifndef __WEAK
#define __WEAK __attribute__((weak))
#endif
#ifndef __PACKED
#define __PACKED __attribute__((packed))
#endif
#ifndef __PACKED_STRUCT
#define __PACKED_STRUCT __packed struct
#endif
#ifndef __PACKED_UNION
#define __PACKED_UNION __packed union
#endif
#ifndef __UNALIGNED_UINT32 /* deprecated */
#define __UNALIGNED_UINT32(x) (*((__packed uint32_t *)(x)))
#endif
#ifndef __UNALIGNED_UINT16_WRITE
#define __UNALIGNED_UINT16_WRITE(addr, val) ((*((__packed uint16_t *)(addr))) = (val))
#endif
#ifndef __UNALIGNED_UINT16_READ
#define __UNALIGNED_UINT16_READ(addr) (*((const __packed uint16_t *)(addr)))
#endif
#ifndef __UNALIGNED_UINT32_WRITE
#define __UNALIGNED_UINT32_WRITE(addr, val) ((*((__packed uint32_t *)(addr))) = (val))
#endif
#ifndef __UNALIGNED_UINT32_READ
#define __UNALIGNED_UINT32_READ(addr) (*((const __packed uint32_t *)(addr)))
#endif
#ifndef __ALIGNED
#define __ALIGNED(x) __attribute__((aligned(x)))
#endif
#ifndef __RESTRICT
#define __RESTRICT __restrict
#endif
#ifndef __COMPILER_BARRIER
#define __COMPILER_BARRIER() __memory_changed()
#endif
/* ######################### Startup and Lowlevel Init ######################## */
#ifndef __PROGRAM_START
#define __PROGRAM_START __main
#endif
#ifndef __INITIAL_SP
#define __INITIAL_SP Image$$ARM_LIB_STACK$$ZI$$Limit
#endif
#ifndef __STACK_LIMIT
#define __STACK_LIMIT Image$$ARM_LIB_STACK$$ZI$$Base
#endif
#ifndef __VECTOR_TABLE
#define __VECTOR_TABLE __Vectors
#endif
#ifndef __VECTOR_TABLE_ATTRIBUTE
#define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET")))
#endif
/* ########################### Core Function Access ########################### */
/** \ingroup CMSIS_Core_FunctionInterface
\defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
@{
*/
/**
\brief Enable IRQ Interrupts
\details Enables IRQ interrupts by clearing the I-bit in the CPSR.
Can only be executed in Privileged modes.
*/
/* intrinsic void __enable_irq(); */
/**
\brief Disable IRQ Interrupts
\details Disables IRQ interrupts by setting the I-bit in the CPSR.
Can only be executed in Privileged modes.
*/
/* intrinsic void __disable_irq(); */
/**
\brief Get Control Register
\details Returns the content of the Control Register.
\return Control Register value
*/
__STATIC_INLINE uint32_t __get_CONTROL(void)
{
register uint32_t __regControl __ASM("control");
return(__regControl);
}
/**
\brief Set Control Register
\details Writes the given value to the Control Register.
\param [in] control Control Register value to set
*/
__STATIC_INLINE void __set_CONTROL(uint32_t control)
{
register uint32_t __regControl __ASM("control");
__regControl = control;
}
/**
\brief Get IPSR Register
\details Returns the content of the IPSR Register.
\return IPSR Register value
*/
__STATIC_INLINE uint32_t __get_IPSR(void)
{
register uint32_t __regIPSR __ASM("ipsr");
return(__regIPSR);
}
/**
\brief Get APSR Register
\details Returns the content of the APSR Register.
\return APSR Register value
*/
__STATIC_INLINE uint32_t __get_APSR(void)
{
register uint32_t __regAPSR __ASM("apsr");
return(__regAPSR);
}
/**
\brief Get xPSR Register
\details Returns the content of the xPSR Register.
\return xPSR Register value
*/
__STATIC_INLINE uint32_t __get_xPSR(void)
{
register uint32_t __regXPSR __ASM("xpsr");
return(__regXPSR);
}
/**
\brief Get Process Stack Pointer
\details Returns the current value of the Process Stack Pointer (PSP).
\return PSP Register value
*/
__STATIC_INLINE uint32_t __get_PSP(void)
{
register uint32_t __regProcessStackPointer __ASM("psp");
return(__regProcessStackPointer);
}
/**
\brief Set Process Stack Pointer
\details Assigns the given value to the Process Stack Pointer (PSP).
\param [in] topOfProcStack Process Stack Pointer value to set
*/
__STATIC_INLINE void __set_PSP(uint32_t topOfProcStack)
{
register uint32_t __regProcessStackPointer __ASM("psp");
__regProcessStackPointer = topOfProcStack;
}
/**
\brief Get Main Stack Pointer
\details Returns the current value of the Main Stack Pointer (MSP).
\return MSP Register value
*/
__STATIC_INLINE uint32_t __get_MSP(void)
{
register uint32_t __regMainStackPointer __ASM("msp");
return(__regMainStackPointer);
}
/**
\brief Set Main Stack Pointer
\details Assigns the given value to the Main Stack Pointer (MSP).
\param [in] topOfMainStack Main Stack Pointer value to set
*/
__STATIC_INLINE void __set_MSP(uint32_t topOfMainStack)
{
register uint32_t __regMainStackPointer __ASM("msp");
__regMainStackPointer = topOfMainStack;
}
/**
\brief Get Priority Mask
\details Returns the current state of the priority mask bit from the Priority Mask Register.
\return Priority Mask value
*/
__STATIC_INLINE uint32_t __get_PRIMASK(void)
{
register uint32_t __regPriMask __ASM("primask");
return(__regPriMask);
}
/**
\brief Set Priority Mask
\details Assigns the given value to the Priority Mask Register.
\param [in] priMask Priority Mask
*/
__STATIC_INLINE void __set_PRIMASK(uint32_t priMask)
{
register uint32_t __regPriMask __ASM("primask");
__regPriMask = (priMask);
}
#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
(defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) )
/**
\brief Enable FIQ
\details Enables FIQ interrupts by clearing the F-bit in the CPSR.
Can only be executed in Privileged modes.
*/
#define __enable_fault_irq __enable_fiq
/**
\brief Disable FIQ
\details Disables FIQ interrupts by setting the F-bit in the CPSR.
Can only be executed in Privileged modes.
*/
#define __disable_fault_irq __disable_fiq
/**
\brief Get Base Priority
\details Returns the current value of the Base Priority register.
\return Base Priority register value
*/
__STATIC_INLINE uint32_t __get_BASEPRI(void)
{
register uint32_t __regBasePri __ASM("basepri");
return(__regBasePri);
}
/**
\brief Set Base Priority
\details Assigns the given value to the Base Priority register.
\param [in] basePri Base Priority value to set
*/
__STATIC_INLINE void __set_BASEPRI(uint32_t basePri)
{
register uint32_t __regBasePri __ASM("basepri");
__regBasePri = (basePri & 0xFFU);
}
/**
\brief Set Base Priority with condition
\details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled,
or the new value increases the BASEPRI priority level.
\param [in] basePri Base Priority value to set
*/
__STATIC_INLINE void __set_BASEPRI_MAX(uint32_t basePri)
{
register uint32_t __regBasePriMax __ASM("basepri_max");
__regBasePriMax = (basePri & 0xFFU);
}
/**
\brief Get Fault Mask
\details Returns the current value of the Fault Mask register.
\return Fault Mask register value
*/
__STATIC_INLINE uint32_t __get_FAULTMASK(void)
{
register uint32_t __regFaultMask __ASM("faultmask");
return(__regFaultMask);
}
/**
\brief Set Fault Mask
\details Assigns the given value to the Fault Mask register.
\param [in] faultMask Fault Mask value to set
*/
__STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask)
{
register uint32_t __regFaultMask __ASM("faultmask");
__regFaultMask = (faultMask & (uint32_t)1U);
}
#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
(defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */
/**
\brief Get FPSCR
\details Returns the current value of the Floating Point Status/Control register.
\return Floating Point Status/Control register value
*/
__STATIC_INLINE uint32_t __get_FPSCR(void)
{
#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
(defined (__FPU_USED ) && (__FPU_USED == 1U)) )
register uint32_t __regfpscr __ASM("fpscr");
return(__regfpscr);
#else
return(0U);
#endif
}
/**
\brief Set FPSCR
\details Assigns the given value to the Floating Point Status/Control register.
\param [in] fpscr Floating Point Status/Control value to set
*/
__STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
{
#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
(defined (__FPU_USED ) && (__FPU_USED == 1U)) )
register uint32_t __regfpscr __ASM("fpscr");
__regfpscr = (fpscr);
#else
(void)fpscr;
#endif
}
/*@} end of CMSIS_Core_RegAccFunctions */
/* ########################## Core Instruction Access ######################### */
/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
Access to dedicated instructions
@{
*/
/**
\brief No Operation
\details No Operation does nothing. This instruction can be used for code alignment purposes.
*/
#define __NOP __nop
/**
\brief Wait For Interrupt
\details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs.
*/
#define __WFI __wfi
/**
\brief Wait For Event
\details Wait For Event is a hint instruction that permits the processor to enter
a low-power state until one of a number of events occurs.
*/
#define __WFE __wfe
/**
\brief Send Event
\details Send Event is a hint instruction. It causes an event to be signaled to the CPU.
*/
#define __SEV __sev
/**
\brief Instruction Synchronization Barrier
\details Instruction Synchronization Barrier flushes the pipeline in the processor,
so that all instructions following the ISB are fetched from cache or memory,
after the instruction has been completed.
*/
#define __ISB() __isb(0xF)
/**
\brief Data Synchronization Barrier
\details Acts as a special kind of Data Memory Barrier.
It completes when all explicit memory accesses before this instruction complete.
*/
#define __DSB() __dsb(0xF)
/**
\brief Data Memory Barrier
\details Ensures the apparent order of the explicit memory operations before
and after the instruction, without ensuring their completion.
*/
#define __DMB() __dmb(0xF)
/**
\brief Reverse byte order (32 bit)
\details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412.
\param [in] value Value to reverse
\return Reversed value
*/
#define __REV __rev
/**
\brief Reverse byte order (16 bit)
\details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856.
\param [in] value Value to reverse
\return Reversed value
*/
#ifndef __NO_EMBEDDED_ASM
__attribute__((section(".rev16_text"))) __STATIC_INLINE __ASM uint32_t __REV16(uint32_t value)
{
rev16 r0, r0
bx lr
}
#endif
/**
\brief Reverse byte order (16 bit)
\details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000.
\param [in] value Value to reverse
\return Reversed value
*/
#ifndef __NO_EMBEDDED_ASM
__attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int16_t __REVSH(int16_t value)
{
revsh r0, r0
bx lr
}
#endif
/**
\brief Rotate Right in unsigned value (32 bit)
\details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
\param [in] op1 Value to rotate
\param [in] op2 Number of Bits to rotate
\return Rotated value
*/
#define __ROR __ror
/**
\brief Breakpoint
\details Causes the processor to enter Debug state.
Debug tools can use this to investigate system state when the instruction at a particular address is reached.
\param [in] value is ignored by the processor.
If required, a debugger can use it to store additional information about the breakpoint.
*/
#define __BKPT(value) __breakpoint(value)
/**
\brief Reverse bit order of value
\details Reverses the bit order of the given value.
\param [in] value Value to reverse
\return Reversed value
*/
#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
(defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) )
#define __RBIT __rbit
#else
__attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value)
{
uint32_t result;
uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */
result = value; /* r will be reversed bits of v; first get LSB of v */
for (value >>= 1U; value != 0U; value >>= 1U)
{
result <<= 1U;
result |= value & 1U;
s--;
}
result <<= s; /* shift when v's highest bits are zero */
return result;
}
#endif
/**
\brief Count leading zeros
\details Counts the number of leading zeros of a data value.
\param [in] value Value to count the leading zeros
\return number of leading zeros in value
*/
#define __CLZ __clz
#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
(defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) )
/**
\brief LDR Exclusive (8 bit)
\details Executes a exclusive LDR instruction for 8 bit value.
\param [in] ptr Pointer to data
\return value of type uint8_t at (*ptr)
*/
#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020)
#define __LDREXB(ptr) ((uint8_t ) __ldrex(ptr))
#else
#define __LDREXB(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint8_t ) __ldrex(ptr)) _Pragma("pop")
#endif
/**
\brief LDR Exclusive (16 bit)
\details Executes a exclusive LDR instruction for 16 bit values.
\param [in] ptr Pointer to data
\return value of type uint16_t at (*ptr)
*/
#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020)
#define __LDREXH(ptr) ((uint16_t) __ldrex(ptr))
#else
#define __LDREXH(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint16_t) __ldrex(ptr)) _Pragma("pop")
#endif
/**
\brief LDR Exclusive (32 bit)
\details Executes a exclusive LDR instruction for 32 bit values.
\param [in] ptr Pointer to data
\return value of type uint32_t at (*ptr)
*/
#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020)
#define __LDREXW(ptr) ((uint32_t ) __ldrex(ptr))
#else
#define __LDREXW(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint32_t ) __ldrex(ptr)) _Pragma("pop")
#endif
/**
\brief STR Exclusive (8 bit)
\details Executes a exclusive STR instruction for 8 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
\return 0 Function succeeded
\return 1 Function failed
*/
#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020)
#define __STREXB(value, ptr) __strex(value, ptr)
#else
#define __STREXB(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop")
#endif
/**
\brief STR Exclusive (16 bit)
\details Executes a exclusive STR instruction for 16 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
\return 0 Function succeeded
\return 1 Function failed
*/
#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020)
#define __STREXH(value, ptr) __strex(value, ptr)
#else
#define __STREXH(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop")
#endif
/**
\brief STR Exclusive (32 bit)
\details Executes a exclusive STR instruction for 32 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
\return 0 Function succeeded
\return 1 Function failed
*/
#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020)
#define __STREXW(value, ptr) __strex(value, ptr)
#else
#define __STREXW(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop")
#endif
/**
\brief Remove the exclusive lock
\details Removes the exclusive lock which is created by LDREX.
*/
#define __CLREX __clrex
/**
\brief Signed Saturate
\details Saturates a signed value.
\param [in] value Value to be saturated
\param [in] sat Bit position to saturate to (1..32)
\return Saturated value
*/
#define __SSAT __ssat
/**
\brief Unsigned Saturate
\details Saturates an unsigned value.
\param [in] value Value to be saturated
\param [in] sat Bit position to saturate to (0..31)
\return Saturated value
*/
#define __USAT __usat
/**
\brief Rotate Right with Extend (32 bit)
\details Moves each bit of a bitstring right by one bit.
The carry input is shifted in at the left end of the bitstring.
\param [in] value Value to rotate
\return Rotated value
*/
#ifndef __NO_EMBEDDED_ASM
__attribute__((section(".rrx_text"))) __STATIC_INLINE __ASM uint32_t __RRX(uint32_t value)
{
rrx r0, r0
bx lr
}
#endif
/**
\brief LDRT Unprivileged (8 bit)
\details Executes a Unprivileged LDRT instruction for 8 bit value.
\param [in] ptr Pointer to data
\return value of type uint8_t at (*ptr)
*/
#define __LDRBT(ptr) ((uint8_t ) __ldrt(ptr))
/**
\brief LDRT Unprivileged (16 bit)
\details Executes a Unprivileged LDRT instruction for 16 bit values.
\param [in] ptr Pointer to data
\return value of type uint16_t at (*ptr)
*/
#define __LDRHT(ptr) ((uint16_t) __ldrt(ptr))
/**
\brief LDRT Unprivileged (32 bit)
\details Executes a Unprivileged LDRT instruction for 32 bit values.
\param [in] ptr Pointer to data
\return value of type uint32_t at (*ptr)
*/
#define __LDRT(ptr) ((uint32_t ) __ldrt(ptr))
/**
\brief STRT Unprivileged (8 bit)
\details Executes a Unprivileged STRT instruction for 8 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
*/
#define __STRBT(value, ptr) __strt(value, ptr)
/**
\brief STRT Unprivileged (16 bit)
\details Executes a Unprivileged STRT instruction for 16 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
*/
#define __STRHT(value, ptr) __strt(value, ptr)
/**
\brief STRT Unprivileged (32 bit)
\details Executes a Unprivileged STRT instruction for 32 bit values.
\param [in] value Value to store
\param [in] ptr Pointer to location
*/
#define __STRT(value, ptr) __strt(value, ptr)
#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
(defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */
/**
\brief Signed Saturate
\details Saturates a signed value.
\param [in] value Value to be saturated
\param [in] sat Bit position to saturate to (1..32)
\return Saturated value
*/
__attribute__((always_inline)) __STATIC_INLINE int32_t __SSAT(int32_t val, uint32_t sat)
{
if ((sat >= 1U) && (sat <= 32U))
{
const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U);
const int32_t min = -1 - max ;
if (val > max)
{
return max;
}
else if (val < min)
{
return min;
}
}
return val;
}
/**
\brief Unsigned Saturate
\details Saturates an unsigned value.
\param [in] value Value to be saturated
\param [in] sat Bit position to saturate to (0..31)
\return Saturated value
*/
__attribute__((always_inline)) __STATIC_INLINE uint32_t __USAT(int32_t val, uint32_t sat)
{
if (sat <= 31U)
{
const uint32_t max = ((1U << sat) - 1U);
if (val > (int32_t)max)
{
return max;
}
else if (val < 0)
{
return 0U;
}
}
return (uint32_t)val;
}
#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \
(defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */
/*@}*/ /* end of group CMSIS_Core_InstructionInterface */
/* ################### Compiler specific Intrinsics ########################### */
/** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
Access to dedicated SIMD instructions
@{
*/
#if ((defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) )
#define __SADD8 __sadd8
#define __QADD8 __qadd8
#define __SHADD8 __shadd8
#define __UADD8 __uadd8
#define __UQADD8 __uqadd8
#define __UHADD8 __uhadd8
#define __SSUB8 __ssub8
#define __QSUB8 __qsub8
#define __SHSUB8 __shsub8
#define __USUB8 __usub8
#define __UQSUB8 __uqsub8
#define __UHSUB8 __uhsub8
#define __SADD16 __sadd16
#define __QADD16 __qadd16
#define __SHADD16 __shadd16
#define __UADD16 __uadd16
#define __UQADD16 __uqadd16
#define __UHADD16 __uhadd16
#define __SSUB16 __ssub16
#define __QSUB16 __qsub16
#define __SHSUB16 __shsub16
#define __USUB16 __usub16
#define __UQSUB16 __uqsub16
#define __UHSUB16 __uhsub16
#define __SASX __sasx
#define __QASX __qasx
#define __SHASX __shasx
#define __UASX __uasx
#define __UQASX __uqasx
#define __UHASX __uhasx
#define __SSAX __ssax
#define __QSAX __qsax
#define __SHSAX __shsax
#define __USAX __usax
#define __UQSAX __uqsax
#define __UHSAX __uhsax
#define __USAD8 __usad8
#define __USADA8 __usada8
#define __SSAT16 __ssat16
#define __USAT16 __usat16
#define __UXTB16 __uxtb16
#define __UXTAB16 __uxtab16
#define __SXTB16 __sxtb16
#define __SXTAB16 __sxtab16
#define __SMUAD __smuad
#define __SMUADX __smuadx
#define __SMLAD __smlad
#define __SMLADX __smladx
#define __SMLALD __smlald
#define __SMLALDX __smlaldx
#define __SMUSD __smusd
#define __SMUSDX __smusdx
#define __SMLSD __smlsd
#define __SMLSDX __smlsdx
#define __SMLSLD __smlsld
#define __SMLSLDX __smlsldx
#define __SEL __sel
#define __QADD __qadd
#define __QSUB __qsub
#define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
#define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
#define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \
((int64_t)(ARG3) << 32U) ) >> 32U))
#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2))
#endif /* ((defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */
/*@} end of group CMSIS_SIMD_intrinsics */
#endif /* __CMSIS_ARMCC_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -0,0 +1,283 @@
/**************************************************************************//**
* @file cmsis_compiler.h
* @brief CMSIS compiler generic header file
* @version V5.1.0
* @date 09. October 2018
******************************************************************************/
/*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __CMSIS_COMPILER_H
#define __CMSIS_COMPILER_H
#include <stdint.h>
/*
* Arm Compiler 4/5
*/
#if defined ( __CC_ARM )
#include "cmsis_armcc.h"
/*
* Arm Compiler 6.6 LTM (armclang)
*/
#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) && (__ARMCC_VERSION < 6100100)
#include "cmsis_armclang_ltm.h"
/*
* Arm Compiler above 6.10.1 (armclang)
*/
#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6100100)
#include "cmsis_armclang.h"
/*
* GNU Compiler
*/
#elif defined ( __GNUC__ )
#include "cmsis_gcc.h"
/*
* IAR Compiler
*/
#elif defined ( __ICCARM__ )
#include <cmsis_iccarm.h>
/*
* TI Arm Compiler
*/
#elif defined ( __TI_ARM__ )
#include <cmsis_ccs.h>
#ifndef __ASM
#define __ASM __asm
#endif
#ifndef __INLINE
#define __INLINE inline
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static inline
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __STATIC_INLINE
#endif
#ifndef __NO_RETURN
#define __NO_RETURN __attribute__((noreturn))
#endif
#ifndef __USED
#define __USED __attribute__((used))
#endif
#ifndef __WEAK
#define __WEAK __attribute__((weak))
#endif
#ifndef __PACKED
#define __PACKED __attribute__((packed))
#endif
#ifndef __PACKED_STRUCT
#define __PACKED_STRUCT struct __attribute__((packed))
#endif
#ifndef __PACKED_UNION
#define __PACKED_UNION union __attribute__((packed))
#endif
#ifndef __UNALIGNED_UINT32 /* deprecated */
struct __attribute__((packed)) T_UINT32 { uint32_t v; };
#define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v)
#endif
#ifndef __UNALIGNED_UINT16_WRITE
__PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
#define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void*)(addr))->v) = (val))
#endif
#ifndef __UNALIGNED_UINT16_READ
__PACKED_STRUCT T_UINT16_READ { uint16_t v; };
#define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v)
#endif
#ifndef __UNALIGNED_UINT32_WRITE
__PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
#define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
#endif
#ifndef __UNALIGNED_UINT32_READ
__PACKED_STRUCT T_UINT32_READ { uint32_t v; };
#define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v)
#endif
#ifndef __ALIGNED
#define __ALIGNED(x) __attribute__((aligned(x)))
#endif
#ifndef __RESTRICT
#define __RESTRICT __restrict
#endif
#ifndef __COMPILER_BARRIER
#warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored.
#define __COMPILER_BARRIER() (void)0
#endif
/*
* TASKING Compiler
*/
#elif defined ( __TASKING__ )
/*
* The CMSIS functions have been implemented as intrinsics in the compiler.
* Please use "carm -?i" to get an up to date list of all intrinsics,
* Including the CMSIS ones.
*/
#ifndef __ASM
#define __ASM __asm
#endif
#ifndef __INLINE
#define __INLINE inline
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static inline
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __STATIC_INLINE
#endif
#ifndef __NO_RETURN
#define __NO_RETURN __attribute__((noreturn))
#endif
#ifndef __USED
#define __USED __attribute__((used))
#endif
#ifndef __WEAK
#define __WEAK __attribute__((weak))
#endif
#ifndef __PACKED
#define __PACKED __packed__
#endif
#ifndef __PACKED_STRUCT
#define __PACKED_STRUCT struct __packed__
#endif
#ifndef __PACKED_UNION
#define __PACKED_UNION union __packed__
#endif
#ifndef __UNALIGNED_UINT32 /* deprecated */
struct __packed__ T_UINT32 { uint32_t v; };
#define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v)
#endif
#ifndef __UNALIGNED_UINT16_WRITE
__PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
#define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val))
#endif
#ifndef __UNALIGNED_UINT16_READ
__PACKED_STRUCT T_UINT16_READ { uint16_t v; };
#define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v)
#endif
#ifndef __UNALIGNED_UINT32_WRITE
__PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
#define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
#endif
#ifndef __UNALIGNED_UINT32_READ
__PACKED_STRUCT T_UINT32_READ { uint32_t v; };
#define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v)
#endif
#ifndef __ALIGNED
#define __ALIGNED(x) __align(x)
#endif
#ifndef __RESTRICT
#warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored.
#define __RESTRICT
#endif
#ifndef __COMPILER_BARRIER
#warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored.
#define __COMPILER_BARRIER() (void)0
#endif
/*
* COSMIC Compiler
*/
#elif defined ( __CSMC__ )
#include <cmsis_csm.h>
#ifndef __ASM
#define __ASM _asm
#endif
#ifndef __INLINE
#define __INLINE inline
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static inline
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __STATIC_INLINE
#endif
#ifndef __NO_RETURN
// NO RETURN is automatically detected hence no warning here
#define __NO_RETURN
#endif
#ifndef __USED
#warning No compiler specific solution for __USED. __USED is ignored.
#define __USED
#endif
#ifndef __WEAK
#define __WEAK __weak
#endif
#ifndef __PACKED
#define __PACKED @packed
#endif
#ifndef __PACKED_STRUCT
#define __PACKED_STRUCT @packed struct
#endif
#ifndef __PACKED_UNION
#define __PACKED_UNION @packed union
#endif
#ifndef __UNALIGNED_UINT32 /* deprecated */
@packed struct T_UINT32 { uint32_t v; };
#define __UNALIGNED_UINT32(x) (((struct T_UINT32 *)(x))->v)
#endif
#ifndef __UNALIGNED_UINT16_WRITE
__PACKED_STRUCT T_UINT16_WRITE { uint16_t v; };
#define __UNALIGNED_UINT16_WRITE(addr, val) (void)((((struct T_UINT16_WRITE *)(void *)(addr))->v) = (val))
#endif
#ifndef __UNALIGNED_UINT16_READ
__PACKED_STRUCT T_UINT16_READ { uint16_t v; };
#define __UNALIGNED_UINT16_READ(addr) (((const struct T_UINT16_READ *)(const void *)(addr))->v)
#endif
#ifndef __UNALIGNED_UINT32_WRITE
__PACKED_STRUCT T_UINT32_WRITE { uint32_t v; };
#define __UNALIGNED_UINT32_WRITE(addr, val) (void)((((struct T_UINT32_WRITE *)(void *)(addr))->v) = (val))
#endif
#ifndef __UNALIGNED_UINT32_READ
__PACKED_STRUCT T_UINT32_READ { uint32_t v; };
#define __UNALIGNED_UINT32_READ(addr) (((const struct T_UINT32_READ *)(const void *)(addr))->v)
#endif
#ifndef __ALIGNED
#warning No compiler specific solution for __ALIGNED. __ALIGNED is ignored.
#define __ALIGNED(x)
#endif
#ifndef __RESTRICT
#warning No compiler specific solution for __RESTRICT. __RESTRICT is ignored.
#define __RESTRICT
#endif
#ifndef __COMPILER_BARRIER
#warning No compiler specific solution for __COMPILER_BARRIER. __COMPILER_BARRIER is ignored.
#define __COMPILER_BARRIER() (void)0
#endif
#else
#error Unknown compiler.
#endif
#endif /* __CMSIS_COMPILER_H */

File diff suppressed because it is too large Load Diff

@ -0,0 +1,968 @@
/**************************************************************************//**
* @file cmsis_iccarm.h
* @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file
* @version V5.2.0
* @date 28. January 2020
******************************************************************************/
//------------------------------------------------------------------------------
//
// Copyright (c) 2017-2019 IAR Systems
// Copyright (c) 2017-2019 Arm Limited. All rights reserved.
//
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License")
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//------------------------------------------------------------------------------
#ifndef __CMSIS_ICCARM_H__
#define __CMSIS_ICCARM_H__
#ifndef __ICCARM__
#error This file should only be compiled by ICCARM
#endif
#pragma system_include
#define __IAR_FT _Pragma("inline=forced") __intrinsic
#if (__VER__ >= 8000000)
#define __ICCARM_V8 1
#else
#define __ICCARM_V8 0
#endif
#ifndef __ALIGNED
#if __ICCARM_V8
#define __ALIGNED(x) __attribute__((aligned(x)))
#elif (__VER__ >= 7080000)
/* Needs IAR language extensions */
#define __ALIGNED(x) __attribute__((aligned(x)))
#else
#warning No compiler specific solution for __ALIGNED.__ALIGNED is ignored.
#define __ALIGNED(x)
#endif
#endif
/* Define compiler macros for CPU architecture, used in CMSIS 5.
*/
#if __ARM_ARCH_6M__ || __ARM_ARCH_7M__ || __ARM_ARCH_7EM__ || __ARM_ARCH_8M_BASE__ || __ARM_ARCH_8M_MAIN__
/* Macros already defined */
#else
#if defined(__ARM8M_MAINLINE__) || defined(__ARM8EM_MAINLINE__)
#define __ARM_ARCH_8M_MAIN__ 1
#elif defined(__ARM8M_BASELINE__)
#define __ARM_ARCH_8M_BASE__ 1
#elif defined(__ARM_ARCH_PROFILE) && __ARM_ARCH_PROFILE == 'M'
#if __ARM_ARCH == 6
#define __ARM_ARCH_6M__ 1
#elif __ARM_ARCH == 7
#if __ARM_FEATURE_DSP
#define __ARM_ARCH_7EM__ 1
#else
#define __ARM_ARCH_7M__ 1
#endif
#endif /* __ARM_ARCH */
#endif /* __ARM_ARCH_PROFILE == 'M' */
#endif
/* Alternativ core deduction for older ICCARM's */
#if !defined(__ARM_ARCH_6M__) && !defined(__ARM_ARCH_7M__) && !defined(__ARM_ARCH_7EM__) && \
!defined(__ARM_ARCH_8M_BASE__) && !defined(__ARM_ARCH_8M_MAIN__)
#if defined(__ARM6M__) && (__CORE__ == __ARM6M__)
#define __ARM_ARCH_6M__ 1
#elif defined(__ARM7M__) && (__CORE__ == __ARM7M__)
#define __ARM_ARCH_7M__ 1
#elif defined(__ARM7EM__) && (__CORE__ == __ARM7EM__)
#define __ARM_ARCH_7EM__ 1
#elif defined(__ARM8M_BASELINE__) && (__CORE == __ARM8M_BASELINE__)
#define __ARM_ARCH_8M_BASE__ 1
#elif defined(__ARM8M_MAINLINE__) && (__CORE == __ARM8M_MAINLINE__)
#define __ARM_ARCH_8M_MAIN__ 1
#elif defined(__ARM8EM_MAINLINE__) && (__CORE == __ARM8EM_MAINLINE__)
#define __ARM_ARCH_8M_MAIN__ 1
#else
#error "Unknown target."
#endif
#endif
#if defined(__ARM_ARCH_6M__) && __ARM_ARCH_6M__==1
#define __IAR_M0_FAMILY 1
#elif defined(__ARM_ARCH_8M_BASE__) && __ARM_ARCH_8M_BASE__==1
#define __IAR_M0_FAMILY 1
#else
#define __IAR_M0_FAMILY 0
#endif
#ifndef __ASM
#define __ASM __asm
#endif
#ifndef __COMPILER_BARRIER
#define __COMPILER_BARRIER() __ASM volatile("":::"memory")
#endif
#ifndef __INLINE
#define __INLINE inline
#endif
#ifndef __NO_RETURN
#if __ICCARM_V8
#define __NO_RETURN __attribute__((__noreturn__))
#else
#define __NO_RETURN _Pragma("object_attribute=__noreturn")
#endif
#endif
#ifndef __PACKED
#if __ICCARM_V8
#define __PACKED __attribute__((packed, aligned(1)))
#else
/* Needs IAR language extensions */
#define __PACKED __packed
#endif
#endif
#ifndef __PACKED_STRUCT
#if __ICCARM_V8
#define __PACKED_STRUCT struct __attribute__((packed, aligned(1)))
#else
/* Needs IAR language extensions */
#define __PACKED_STRUCT __packed struct
#endif
#endif
#ifndef __PACKED_UNION
#if __ICCARM_V8
#define __PACKED_UNION union __attribute__((packed, aligned(1)))
#else
/* Needs IAR language extensions */
#define __PACKED_UNION __packed union
#endif
#endif
#ifndef __RESTRICT
#if __ICCARM_V8
#define __RESTRICT __restrict
#else
/* Needs IAR language extensions */
#define __RESTRICT restrict
#endif
#endif
#ifndef __STATIC_INLINE
#define __STATIC_INLINE static inline
#endif
#ifndef __FORCEINLINE
#define __FORCEINLINE _Pragma("inline=forced")
#endif
#ifndef __STATIC_FORCEINLINE
#define __STATIC_FORCEINLINE __FORCEINLINE __STATIC_INLINE
#endif
#ifndef __UNALIGNED_UINT16_READ
#pragma language=save
#pragma language=extended
__IAR_FT uint16_t __iar_uint16_read(void const *ptr)
{
return *(__packed uint16_t*)(ptr);
}
#pragma language=restore
#define __UNALIGNED_UINT16_READ(PTR) __iar_uint16_read(PTR)
#endif
#ifndef __UNALIGNED_UINT16_WRITE
#pragma language=save
#pragma language=extended
__IAR_FT void __iar_uint16_write(void const *ptr, uint16_t val)
{
*(__packed uint16_t*)(ptr) = val;;
}
#pragma language=restore
#define __UNALIGNED_UINT16_WRITE(PTR,VAL) __iar_uint16_write(PTR,VAL)
#endif
#ifndef __UNALIGNED_UINT32_READ
#pragma language=save
#pragma language=extended
__IAR_FT uint32_t __iar_uint32_read(void const *ptr)
{
return *(__packed uint32_t*)(ptr);
}
#pragma language=restore
#define __UNALIGNED_UINT32_READ(PTR) __iar_uint32_read(PTR)
#endif
#ifndef __UNALIGNED_UINT32_WRITE
#pragma language=save
#pragma language=extended
__IAR_FT void __iar_uint32_write(void const *ptr, uint32_t val)
{
*(__packed uint32_t*)(ptr) = val;;
}
#pragma language=restore
#define __UNALIGNED_UINT32_WRITE(PTR,VAL) __iar_uint32_write(PTR,VAL)
#endif
#ifndef __UNALIGNED_UINT32 /* deprecated */
#pragma language=save
#pragma language=extended
__packed struct __iar_u32 { uint32_t v; };
#pragma language=restore
#define __UNALIGNED_UINT32(PTR) (((struct __iar_u32 *)(PTR))->v)
#endif
#ifndef __USED
#if __ICCARM_V8
#define __USED __attribute__((used))
#else
#define __USED _Pragma("__root")
#endif
#endif
#ifndef __WEAK
#if __ICCARM_V8
#define __WEAK __attribute__((weak))
#else
#define __WEAK _Pragma("__weak")
#endif
#endif
#ifndef __PROGRAM_START
#define __PROGRAM_START __iar_program_start
#endif
#ifndef __INITIAL_SP
#define __INITIAL_SP CSTACK$$Limit
#endif
#ifndef __STACK_LIMIT
#define __STACK_LIMIT CSTACK$$Base
#endif
#ifndef __VECTOR_TABLE
#define __VECTOR_TABLE __vector_table
#endif
#ifndef __VECTOR_TABLE_ATTRIBUTE
#define __VECTOR_TABLE_ATTRIBUTE @".intvec"
#endif
#ifndef __ICCARM_INTRINSICS_VERSION__
#define __ICCARM_INTRINSICS_VERSION__ 0
#endif
#if __ICCARM_INTRINSICS_VERSION__ == 2
#if defined(__CLZ)
#undef __CLZ
#endif
#if defined(__REVSH)
#undef __REVSH
#endif
#if defined(__RBIT)
#undef __RBIT
#endif
#if defined(__SSAT)
#undef __SSAT
#endif
#if defined(__USAT)
#undef __USAT
#endif
#include "iccarm_builtin.h"
#define __disable_fault_irq __iar_builtin_disable_fiq
#define __disable_irq __iar_builtin_disable_interrupt
#define __enable_fault_irq __iar_builtin_enable_fiq
#define __enable_irq __iar_builtin_enable_interrupt
#define __arm_rsr __iar_builtin_rsr
#define __arm_wsr __iar_builtin_wsr
#define __get_APSR() (__arm_rsr("APSR"))
#define __get_BASEPRI() (__arm_rsr("BASEPRI"))
#define __get_CONTROL() (__arm_rsr("CONTROL"))
#define __get_FAULTMASK() (__arm_rsr("FAULTMASK"))
#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
(defined (__FPU_USED ) && (__FPU_USED == 1U)) )
#define __get_FPSCR() (__arm_rsr("FPSCR"))
#define __set_FPSCR(VALUE) (__arm_wsr("FPSCR", (VALUE)))
#else
#define __get_FPSCR() ( 0 )
#define __set_FPSCR(VALUE) ((void)VALUE)
#endif
#define __get_IPSR() (__arm_rsr("IPSR"))
#define __get_MSP() (__arm_rsr("MSP"))
#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
(!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
// without main extensions, the non-secure MSPLIM is RAZ/WI
#define __get_MSPLIM() (0U)
#else
#define __get_MSPLIM() (__arm_rsr("MSPLIM"))
#endif
#define __get_PRIMASK() (__arm_rsr("PRIMASK"))
#define __get_PSP() (__arm_rsr("PSP"))
#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
(!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
// without main extensions, the non-secure PSPLIM is RAZ/WI
#define __get_PSPLIM() (0U)
#else
#define __get_PSPLIM() (__arm_rsr("PSPLIM"))
#endif
#define __get_xPSR() (__arm_rsr("xPSR"))
#define __set_BASEPRI(VALUE) (__arm_wsr("BASEPRI", (VALUE)))
#define __set_BASEPRI_MAX(VALUE) (__arm_wsr("BASEPRI_MAX", (VALUE)))
#define __set_CONTROL(VALUE) (__arm_wsr("CONTROL", (VALUE)))
#define __set_FAULTMASK(VALUE) (__arm_wsr("FAULTMASK", (VALUE)))
#define __set_MSP(VALUE) (__arm_wsr("MSP", (VALUE)))
#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
(!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
// without main extensions, the non-secure MSPLIM is RAZ/WI
#define __set_MSPLIM(VALUE) ((void)(VALUE))
#else
#define __set_MSPLIM(VALUE) (__arm_wsr("MSPLIM", (VALUE)))
#endif
#define __set_PRIMASK(VALUE) (__arm_wsr("PRIMASK", (VALUE)))
#define __set_PSP(VALUE) (__arm_wsr("PSP", (VALUE)))
#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
(!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
// without main extensions, the non-secure PSPLIM is RAZ/WI
#define __set_PSPLIM(VALUE) ((void)(VALUE))
#else
#define __set_PSPLIM(VALUE) (__arm_wsr("PSPLIM", (VALUE)))
#endif
#define __TZ_get_CONTROL_NS() (__arm_rsr("CONTROL_NS"))
#define __TZ_set_CONTROL_NS(VALUE) (__arm_wsr("CONTROL_NS", (VALUE)))
#define __TZ_get_PSP_NS() (__arm_rsr("PSP_NS"))
#define __TZ_set_PSP_NS(VALUE) (__arm_wsr("PSP_NS", (VALUE)))
#define __TZ_get_MSP_NS() (__arm_rsr("MSP_NS"))
#define __TZ_set_MSP_NS(VALUE) (__arm_wsr("MSP_NS", (VALUE)))
#define __TZ_get_SP_NS() (__arm_rsr("SP_NS"))
#define __TZ_set_SP_NS(VALUE) (__arm_wsr("SP_NS", (VALUE)))
#define __TZ_get_PRIMASK_NS() (__arm_rsr("PRIMASK_NS"))
#define __TZ_set_PRIMASK_NS(VALUE) (__arm_wsr("PRIMASK_NS", (VALUE)))
#define __TZ_get_BASEPRI_NS() (__arm_rsr("BASEPRI_NS"))
#define __TZ_set_BASEPRI_NS(VALUE) (__arm_wsr("BASEPRI_NS", (VALUE)))
#define __TZ_get_FAULTMASK_NS() (__arm_rsr("FAULTMASK_NS"))
#define __TZ_set_FAULTMASK_NS(VALUE)(__arm_wsr("FAULTMASK_NS", (VALUE)))
#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
(!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3)))
// without main extensions, the non-secure PSPLIM is RAZ/WI
#define __TZ_get_PSPLIM_NS() (0U)
#define __TZ_set_PSPLIM_NS(VALUE) ((void)(VALUE))
#else
#define __TZ_get_PSPLIM_NS() (__arm_rsr("PSPLIM_NS"))
#define __TZ_set_PSPLIM_NS(VALUE) (__arm_wsr("PSPLIM_NS", (VALUE)))
#endif
#define __TZ_get_MSPLIM_NS() (__arm_rsr("MSPLIM_NS"))
#define __TZ_set_MSPLIM_NS(VALUE) (__arm_wsr("MSPLIM_NS", (VALUE)))
#define __NOP __iar_builtin_no_operation
#define __CLZ __iar_builtin_CLZ
#define __CLREX __iar_builtin_CLREX
#define __DMB __iar_builtin_DMB
#define __DSB __iar_builtin_DSB
#define __ISB __iar_builtin_ISB
#define __LDREXB __iar_builtin_LDREXB
#define __LDREXH __iar_builtin_LDREXH
#define __LDREXW __iar_builtin_LDREX
#define __RBIT __iar_builtin_RBIT
#define __REV __iar_builtin_REV
#define __REV16 __iar_builtin_REV16
__IAR_FT int16_t __REVSH(int16_t val)
{
return (int16_t) __iar_builtin_REVSH(val);
}
#define __ROR __iar_builtin_ROR
#define __RRX __iar_builtin_RRX
#define __SEV __iar_builtin_SEV
#if !__IAR_M0_FAMILY
#define __SSAT __iar_builtin_SSAT
#endif
#define __STREXB __iar_builtin_STREXB
#define __STREXH __iar_builtin_STREXH
#define __STREXW __iar_builtin_STREX
#if !__IAR_M0_FAMILY
#define __USAT __iar_builtin_USAT
#endif
#define __WFE __iar_builtin_WFE
#define __WFI __iar_builtin_WFI
#if __ARM_MEDIA__
#define __SADD8 __iar_builtin_SADD8
#define __QADD8 __iar_builtin_QADD8
#define __SHADD8 __iar_builtin_SHADD8
#define __UADD8 __iar_builtin_UADD8
#define __UQADD8 __iar_builtin_UQADD8
#define __UHADD8 __iar_builtin_UHADD8
#define __SSUB8 __iar_builtin_SSUB8
#define __QSUB8 __iar_builtin_QSUB8
#define __SHSUB8 __iar_builtin_SHSUB8
#define __USUB8 __iar_builtin_USUB8
#define __UQSUB8 __iar_builtin_UQSUB8
#define __UHSUB8 __iar_builtin_UHSUB8
#define __SADD16 __iar_builtin_SADD16
#define __QADD16 __iar_builtin_QADD16
#define __SHADD16 __iar_builtin_SHADD16
#define __UADD16 __iar_builtin_UADD16
#define __UQADD16 __iar_builtin_UQADD16
#define __UHADD16 __iar_builtin_UHADD16
#define __SSUB16 __iar_builtin_SSUB16
#define __QSUB16 __iar_builtin_QSUB16
#define __SHSUB16 __iar_builtin_SHSUB16
#define __USUB16 __iar_builtin_USUB16
#define __UQSUB16 __iar_builtin_UQSUB16
#define __UHSUB16 __iar_builtin_UHSUB16
#define __SASX __iar_builtin_SASX
#define __QASX __iar_builtin_QASX
#define __SHASX __iar_builtin_SHASX
#define __UASX __iar_builtin_UASX
#define __UQASX __iar_builtin_UQASX
#define __UHASX __iar_builtin_UHASX
#define __SSAX __iar_builtin_SSAX
#define __QSAX __iar_builtin_QSAX
#define __SHSAX __iar_builtin_SHSAX
#define __USAX __iar_builtin_USAX
#define __UQSAX __iar_builtin_UQSAX
#define __UHSAX __iar_builtin_UHSAX
#define __USAD8 __iar_builtin_USAD8
#define __USADA8 __iar_builtin_USADA8
#define __SSAT16 __iar_builtin_SSAT16
#define __USAT16 __iar_builtin_USAT16
#define __UXTB16 __iar_builtin_UXTB16
#define __UXTAB16 __iar_builtin_UXTAB16
#define __SXTB16 __iar_builtin_SXTB16
#define __SXTAB16 __iar_builtin_SXTAB16
#define __SMUAD __iar_builtin_SMUAD
#define __SMUADX __iar_builtin_SMUADX
#define __SMMLA __iar_builtin_SMMLA
#define __SMLAD __iar_builtin_SMLAD
#define __SMLADX __iar_builtin_SMLADX
#define __SMLALD __iar_builtin_SMLALD
#define __SMLALDX __iar_builtin_SMLALDX
#define __SMUSD __iar_builtin_SMUSD
#define __SMUSDX __iar_builtin_SMUSDX
#define __SMLSD __iar_builtin_SMLSD
#define __SMLSDX __iar_builtin_SMLSDX
#define __SMLSLD __iar_builtin_SMLSLD
#define __SMLSLDX __iar_builtin_SMLSLDX
#define __SEL __iar_builtin_SEL
#define __QADD __iar_builtin_QADD
#define __QSUB __iar_builtin_QSUB
#define __PKHBT __iar_builtin_PKHBT
#define __PKHTB __iar_builtin_PKHTB
#endif
#else /* __ICCARM_INTRINSICS_VERSION__ == 2 */
#if __IAR_M0_FAMILY
/* Avoid clash between intrinsics.h and arm_math.h when compiling for Cortex-M0. */
#define __CLZ __cmsis_iar_clz_not_active
#define __SSAT __cmsis_iar_ssat_not_active
#define __USAT __cmsis_iar_usat_not_active
#define __RBIT __cmsis_iar_rbit_not_active
#define __get_APSR __cmsis_iar_get_APSR_not_active
#endif
#if (!((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
(defined (__FPU_USED ) && (__FPU_USED == 1U)) ))
#define __get_FPSCR __cmsis_iar_get_FPSR_not_active
#define __set_FPSCR __cmsis_iar_set_FPSR_not_active
#endif
#ifdef __INTRINSICS_INCLUDED
#error intrinsics.h is already included previously!
#endif
#include <intrinsics.h>
#if __IAR_M0_FAMILY
/* Avoid clash between intrinsics.h and arm_math.h when compiling for Cortex-M0. */
#undef __CLZ
#undef __SSAT
#undef __USAT
#undef __RBIT
#undef __get_APSR
__STATIC_INLINE uint8_t __CLZ(uint32_t data)
{
if (data == 0U) { return 32U; }
uint32_t count = 0U;
uint32_t mask = 0x80000000U;
while ((data & mask) == 0U)
{
count += 1U;
mask = mask >> 1U;
}
return count;
}
__STATIC_INLINE uint32_t __RBIT(uint32_t v)
{
uint8_t sc = 31U;
uint32_t r = v;
for (v >>= 1U; v; v >>= 1U)
{
r <<= 1U;
r |= v & 1U;
sc--;
}
return (r << sc);
}
__STATIC_INLINE uint32_t __get_APSR(void)
{
uint32_t res;
__asm("MRS %0,APSR" : "=r" (res));
return res;
}
#endif
#if (!((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \
(defined (__FPU_USED ) && (__FPU_USED == 1U)) ))
#undef __get_FPSCR
#undef __set_FPSCR
#define __get_FPSCR() (0)
#define __set_FPSCR(VALUE) ((void)VALUE)
#endif
#pragma diag_suppress=Pe940
#pragma diag_suppress=Pe177
#define __enable_irq __enable_interrupt
#define __disable_irq __disable_interrupt
#define __NOP __no_operation
#define __get_xPSR __get_PSR
#if (!defined(__ARM_ARCH_6M__) || __ARM_ARCH_6M__==0)
__IAR_FT uint32_t __LDREXW(uint32_t volatile *ptr)
{
return __LDREX((unsigned long *)ptr);
}
__IAR_FT uint32_t __STREXW(uint32_t value, uint32_t volatile *ptr)
{
return __STREX(value, (unsigned long *)ptr);
}
#endif
/* __CORTEX_M is defined in core_cm0.h, core_cm3.h and core_cm4.h. */
#if (__CORTEX_M >= 0x03)
__IAR_FT uint32_t __RRX(uint32_t value)
{
uint32_t result;
__ASM volatile("RRX %0, %1" : "=r"(result) : "r" (value));
return(result);
}
__IAR_FT void __set_BASEPRI_MAX(uint32_t value)
{
__asm volatile("MSR BASEPRI_MAX,%0"::"r" (value));
}
#define __enable_fault_irq __enable_fiq
#define __disable_fault_irq __disable_fiq
#endif /* (__CORTEX_M >= 0x03) */
__IAR_FT uint32_t __ROR(uint32_t op1, uint32_t op2)
{
return (op1 >> op2) | (op1 << ((sizeof(op1)*8)-op2));
}
#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
(defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) )
__IAR_FT uint32_t __get_MSPLIM(void)
{
uint32_t res;
#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
(!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3)))
// without main extensions, the non-secure MSPLIM is RAZ/WI
res = 0U;
#else
__asm volatile("MRS %0,MSPLIM" : "=r" (res));
#endif
return res;
}
__IAR_FT void __set_MSPLIM(uint32_t value)
{
#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
(!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3)))
// without main extensions, the non-secure MSPLIM is RAZ/WI
(void)value;
#else
__asm volatile("MSR MSPLIM,%0" :: "r" (value));
#endif
}
__IAR_FT uint32_t __get_PSPLIM(void)
{
uint32_t res;
#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
(!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3)))
// without main extensions, the non-secure PSPLIM is RAZ/WI
res = 0U;
#else
__asm volatile("MRS %0,PSPLIM" : "=r" (res));
#endif
return res;
}
__IAR_FT void __set_PSPLIM(uint32_t value)
{
#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
(!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3)))
// without main extensions, the non-secure PSPLIM is RAZ/WI
(void)value;
#else
__asm volatile("MSR PSPLIM,%0" :: "r" (value));
#endif
}
__IAR_FT uint32_t __TZ_get_CONTROL_NS(void)
{
uint32_t res;
__asm volatile("MRS %0,CONTROL_NS" : "=r" (res));
return res;
}
__IAR_FT void __TZ_set_CONTROL_NS(uint32_t value)
{
__asm volatile("MSR CONTROL_NS,%0" :: "r" (value));
}
__IAR_FT uint32_t __TZ_get_PSP_NS(void)
{
uint32_t res;
__asm volatile("MRS %0,PSP_NS" : "=r" (res));
return res;
}
__IAR_FT void __TZ_set_PSP_NS(uint32_t value)
{
__asm volatile("MSR PSP_NS,%0" :: "r" (value));
}
__IAR_FT uint32_t __TZ_get_MSP_NS(void)
{
uint32_t res;
__asm volatile("MRS %0,MSP_NS" : "=r" (res));
return res;
}
__IAR_FT void __TZ_set_MSP_NS(uint32_t value)
{
__asm volatile("MSR MSP_NS,%0" :: "r" (value));
}
__IAR_FT uint32_t __TZ_get_SP_NS(void)
{
uint32_t res;
__asm volatile("MRS %0,SP_NS" : "=r" (res));
return res;
}
__IAR_FT void __TZ_set_SP_NS(uint32_t value)
{
__asm volatile("MSR SP_NS,%0" :: "r" (value));
}
__IAR_FT uint32_t __TZ_get_PRIMASK_NS(void)
{
uint32_t res;
__asm volatile("MRS %0,PRIMASK_NS" : "=r" (res));
return res;
}
__IAR_FT void __TZ_set_PRIMASK_NS(uint32_t value)
{
__asm volatile("MSR PRIMASK_NS,%0" :: "r" (value));
}
__IAR_FT uint32_t __TZ_get_BASEPRI_NS(void)
{
uint32_t res;
__asm volatile("MRS %0,BASEPRI_NS" : "=r" (res));
return res;
}
__IAR_FT void __TZ_set_BASEPRI_NS(uint32_t value)
{
__asm volatile("MSR BASEPRI_NS,%0" :: "r" (value));
}
__IAR_FT uint32_t __TZ_get_FAULTMASK_NS(void)
{
uint32_t res;
__asm volatile("MRS %0,FAULTMASK_NS" : "=r" (res));
return res;
}
__IAR_FT void __TZ_set_FAULTMASK_NS(uint32_t value)
{
__asm volatile("MSR FAULTMASK_NS,%0" :: "r" (value));
}
__IAR_FT uint32_t __TZ_get_PSPLIM_NS(void)
{
uint32_t res;
#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
(!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3)))
// without main extensions, the non-secure PSPLIM is RAZ/WI
res = 0U;
#else
__asm volatile("MRS %0,PSPLIM_NS" : "=r" (res));
#endif
return res;
}
__IAR_FT void __TZ_set_PSPLIM_NS(uint32_t value)
{
#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \
(!defined (__ARM_FEATURE_CMSE ) || (__ARM_FEATURE_CMSE < 3)))
// without main extensions, the non-secure PSPLIM is RAZ/WI
(void)value;
#else
__asm volatile("MSR PSPLIM_NS,%0" :: "r" (value));
#endif
}
__IAR_FT uint32_t __TZ_get_MSPLIM_NS(void)
{
uint32_t res;
__asm volatile("MRS %0,MSPLIM_NS" : "=r" (res));
return res;
}
__IAR_FT void __TZ_set_MSPLIM_NS(uint32_t value)
{
__asm volatile("MSR MSPLIM_NS,%0" :: "r" (value));
}
#endif /* __ARM_ARCH_8M_MAIN__ or __ARM_ARCH_8M_BASE__ */
#endif /* __ICCARM_INTRINSICS_VERSION__ == 2 */
#define __BKPT(value) __asm volatile ("BKPT %0" : : "i"(value))
#if __IAR_M0_FAMILY
__STATIC_INLINE int32_t __SSAT(int32_t val, uint32_t sat)
{
if ((sat >= 1U) && (sat <= 32U))
{
const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U);
const int32_t min = -1 - max ;
if (val > max)
{
return max;
}
else if (val < min)
{
return min;
}
}
return val;
}
__STATIC_INLINE uint32_t __USAT(int32_t val, uint32_t sat)
{
if (sat <= 31U)
{
const uint32_t max = ((1U << sat) - 1U);
if (val > (int32_t)max)
{
return max;
}
else if (val < 0)
{
return 0U;
}
}
return (uint32_t)val;
}
#endif
#if (__CORTEX_M >= 0x03) /* __CORTEX_M is defined in core_cm0.h, core_cm3.h and core_cm4.h. */
__IAR_FT uint8_t __LDRBT(volatile uint8_t *addr)
{
uint32_t res;
__ASM volatile ("LDRBT %0, [%1]" : "=r" (res) : "r" (addr) : "memory");
return ((uint8_t)res);
}
__IAR_FT uint16_t __LDRHT(volatile uint16_t *addr)
{
uint32_t res;
__ASM volatile ("LDRHT %0, [%1]" : "=r" (res) : "r" (addr) : "memory");
return ((uint16_t)res);
}
__IAR_FT uint32_t __LDRT(volatile uint32_t *addr)
{
uint32_t res;
__ASM volatile ("LDRT %0, [%1]" : "=r" (res) : "r" (addr) : "memory");
return res;
}
__IAR_FT void __STRBT(uint8_t value, volatile uint8_t *addr)
{
__ASM volatile ("STRBT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory");
}
__IAR_FT void __STRHT(uint16_t value, volatile uint16_t *addr)
{
__ASM volatile ("STRHT %1, [%0]" : : "r" (addr), "r" ((uint32_t)value) : "memory");
}
__IAR_FT void __STRT(uint32_t value, volatile uint32_t *addr)
{
__ASM volatile ("STRT %1, [%0]" : : "r" (addr), "r" (value) : "memory");
}
#endif /* (__CORTEX_M >= 0x03) */
#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \
(defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) )
__IAR_FT uint8_t __LDAB(volatile uint8_t *ptr)
{
uint32_t res;
__ASM volatile ("LDAB %0, [%1]" : "=r" (res) : "r" (ptr) : "memory");
return ((uint8_t)res);
}
__IAR_FT uint16_t __LDAH(volatile uint16_t *ptr)
{
uint32_t res;
__ASM volatile ("LDAH %0, [%1]" : "=r" (res) : "r" (ptr) : "memory");
return ((uint16_t)res);
}
__IAR_FT uint32_t __LDA(volatile uint32_t *ptr)
{
uint32_t res;
__ASM volatile ("LDA %0, [%1]" : "=r" (res) : "r" (ptr) : "memory");
return res;
}
__IAR_FT void __STLB(uint8_t value, volatile uint8_t *ptr)
{
__ASM volatile ("STLB %1, [%0]" :: "r" (ptr), "r" (value) : "memory");
}
__IAR_FT void __STLH(uint16_t value, volatile uint16_t *ptr)
{
__ASM volatile ("STLH %1, [%0]" :: "r" (ptr), "r" (value) : "memory");
}
__IAR_FT void __STL(uint32_t value, volatile uint32_t *ptr)
{
__ASM volatile ("STL %1, [%0]" :: "r" (ptr), "r" (value) : "memory");
}
__IAR_FT uint8_t __LDAEXB(volatile uint8_t *ptr)
{
uint32_t res;
__ASM volatile ("LDAEXB %0, [%1]" : "=r" (res) : "r" (ptr) : "memory");
return ((uint8_t)res);
}
__IAR_FT uint16_t __LDAEXH(volatile uint16_t *ptr)
{
uint32_t res;
__ASM volatile ("LDAEXH %0, [%1]" : "=r" (res) : "r" (ptr) : "memory");
return ((uint16_t)res);
}
__IAR_FT uint32_t __LDAEX(volatile uint32_t *ptr)
{
uint32_t res;
__ASM volatile ("LDAEX %0, [%1]" : "=r" (res) : "r" (ptr) : "memory");
return res;
}
__IAR_FT uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr)
{
uint32_t res;
__ASM volatile ("STLEXB %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory");
return res;
}
__IAR_FT uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr)
{
uint32_t res;
__ASM volatile ("STLEXH %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory");
return res;
}
__IAR_FT uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr)
{
uint32_t res;
__ASM volatile ("STLEX %0, %2, [%1]" : "=r" (res) : "r" (ptr), "r" (value) : "memory");
return res;
}
#endif /* __ARM_ARCH_8M_MAIN__ or __ARM_ARCH_8M_BASE__ */
#undef __IAR_FT
#undef __IAR_M0_FAMILY
#undef __ICCARM_V8
#pragma diag_default=Pe940
#pragma diag_default=Pe177
#define __SXTB16_RORn(ARG1, ARG2) __SXTB16(__ROR(ARG1, ARG2))
#endif /* __CMSIS_ICCARM_H__ */

@ -0,0 +1,39 @@
/**************************************************************************//**
* @file cmsis_version.h
* @brief CMSIS Core(M) Version definitions
* @version V5.0.4
* @date 23. July 2019
******************************************************************************/
/*
* Copyright (c) 2009-2019 ARM Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if defined ( __ICCARM__ )
#pragma system_include /* treat file as system include file for MISRA check */
#elif defined (__clang__)
#pragma clang system_header /* treat file as system include file */
#endif
#ifndef __CMSIS_VERSION_H
#define __CMSIS_VERSION_H
/* CMSIS Version definitions */
#define __CM_CMSIS_VERSION_MAIN ( 5U) /*!< [31:16] CMSIS Core(M) main version */
#define __CM_CMSIS_VERSION_SUB ( 4U) /*!< [15:0] CMSIS Core(M) sub version */
#define __CM_CMSIS_VERSION ((__CM_CMSIS_VERSION_MAIN << 16U) | \
__CM_CMSIS_VERSION_SUB ) /*!< CMSIS Core(M) version number */
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -0,0 +1,952 @@
/**************************************************************************//**
* @file core_cm0.h
* @brief CMSIS Cortex-M0 Core Peripheral Access Layer Header File
* @version V5.0.8
* @date 21. August 2019
******************************************************************************/
/*
* Copyright (c) 2009-2019 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if defined ( __ICCARM__ )
#pragma system_include /* treat file as system include file for MISRA check */
#elif defined (__clang__)
#pragma clang system_header /* treat file as system include file */
#endif
#ifndef __CORE_CM0_H_GENERIC
#define __CORE_CM0_H_GENERIC
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
\page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions
CMSIS violates the following MISRA-C:2004 rules:
\li Required Rule 8.5, object/function definition in header file.<br>
Function definitions in header files are used to allow 'inlining'.
\li Required Rule 18.4, declaration of union type or object of union type: '{...}'.<br>
Unions are used for effective representation of core registers.
\li Advisory Rule 19.7, Function-like macro defined.<br>
Function-like macros are used to allow more efficient code.
*/
/*******************************************************************************
* CMSIS definitions
******************************************************************************/
/**
\ingroup Cortex_M0
@{
*/
#include "cmsis_version.h"
/* CMSIS CM0 definitions */
#define __CM0_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */
#define __CM0_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */
#define __CM0_CMSIS_VERSION ((__CM0_CMSIS_VERSION_MAIN << 16U) | \
__CM0_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */
#define __CORTEX_M (0U) /*!< Cortex-M Core */
/** __FPU_USED indicates whether an FPU is used or not.
This core does not support an FPU at all
*/
#define __FPU_USED 0U
#if defined ( __CC_ARM )
#if defined __TARGET_FPU_VFP
#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
#endif
#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
#if defined __ARM_FP
#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
#endif
#elif defined ( __GNUC__ )
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
#endif
#elif defined ( __ICCARM__ )
#if defined __ARMVFP__
#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
#endif
#elif defined ( __TI_ARM__ )
#if defined __TI_VFP_SUPPORT__
#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
#endif
#elif defined ( __TASKING__ )
#if defined __FPU_VFP__
#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
#endif
#elif defined ( __CSMC__ )
#if ( __CSMC__ & 0x400U)
#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
#endif
#endif
#include "cmsis_compiler.h" /* CMSIS compiler specific defines */
#ifdef __cplusplus
}
#endif
#endif /* __CORE_CM0_H_GENERIC */
#ifndef __CMSIS_GENERIC
#ifndef __CORE_CM0_H_DEPENDANT
#define __CORE_CM0_H_DEPENDANT
#ifdef __cplusplus
extern "C" {
#endif
/* check device defines and use defaults */
#if defined __CHECK_DEVICE_DEFINES
#ifndef __CM0_REV
#define __CM0_REV 0x0000U
#warning "__CM0_REV not defined in device header file; using default!"
#endif
#ifndef __NVIC_PRIO_BITS
#define __NVIC_PRIO_BITS 2U
#warning "__NVIC_PRIO_BITS not defined in device header file; using default!"
#endif
#ifndef __Vendor_SysTickConfig
#define __Vendor_SysTickConfig 0U
#warning "__Vendor_SysTickConfig not defined in device header file; using default!"
#endif
#endif
/* IO definitions (access restrictions to peripheral registers) */
/**
\defgroup CMSIS_glob_defs CMSIS Global Defines
<strong>IO Type Qualifiers</strong> are used
\li to specify the access to peripheral variables.
\li for automatic generation of peripheral register debug information.
*/
#ifdef __cplusplus
#define __I volatile /*!< Defines 'read only' permissions */
#else
#define __I volatile const /*!< Defines 'read only' permissions */
#endif
#define __O volatile /*!< Defines 'write only' permissions */
#define __IO volatile /*!< Defines 'read / write' permissions */
/* following defines should be used for structure members */
#define __IM volatile const /*! Defines 'read only' structure member permissions */
#define __OM volatile /*! Defines 'write only' structure member permissions */
#define __IOM volatile /*! Defines 'read / write' structure member permissions */
/*@} end of group Cortex_M0 */
/*******************************************************************************
* Register Abstraction
Core Register contain:
- Core Register
- Core NVIC Register
- Core SCB Register
- Core SysTick Register
******************************************************************************/
/**
\defgroup CMSIS_core_register Defines and Type Definitions
\brief Type definitions and defines for Cortex-M processor based devices.
*/
/**
\ingroup CMSIS_core_register
\defgroup CMSIS_CORE Status and Control Registers
\brief Core Register type definitions.
@{
*/
/**
\brief Union type to access the Application Program Status Register (APSR).
*/
typedef union
{
struct
{
uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */
uint32_t V:1; /*!< bit: 28 Overflow condition code flag */
uint32_t C:1; /*!< bit: 29 Carry condition code flag */
uint32_t Z:1; /*!< bit: 30 Zero condition code flag */
uint32_t N:1; /*!< bit: 31 Negative condition code flag */
} b; /*!< Structure used for bit access */
uint32_t w; /*!< Type used for word access */
} APSR_Type;
/* APSR Register Definitions */
#define APSR_N_Pos 31U /*!< APSR: N Position */
#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */
#define APSR_Z_Pos 30U /*!< APSR: Z Position */
#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */
#define APSR_C_Pos 29U /*!< APSR: C Position */
#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */
#define APSR_V_Pos 28U /*!< APSR: V Position */
#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */
/**
\brief Union type to access the Interrupt Program Status Register (IPSR).
*/
typedef union
{
struct
{
uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */
uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */
} b; /*!< Structure used for bit access */
uint32_t w; /*!< Type used for word access */
} IPSR_Type;
/* IPSR Register Definitions */
#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */
#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */
/**
\brief Union type to access the Special-Purpose Program Status Registers (xPSR).
*/
typedef union
{
struct
{
uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */
uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */
uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */
uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */
uint32_t V:1; /*!< bit: 28 Overflow condition code flag */
uint32_t C:1; /*!< bit: 29 Carry condition code flag */
uint32_t Z:1; /*!< bit: 30 Zero condition code flag */
uint32_t N:1; /*!< bit: 31 Negative condition code flag */
} b; /*!< Structure used for bit access */
uint32_t w; /*!< Type used for word access */
} xPSR_Type;
/* xPSR Register Definitions */
#define xPSR_N_Pos 31U /*!< xPSR: N Position */
#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */
#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */
#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */
#define xPSR_C_Pos 29U /*!< xPSR: C Position */
#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */
#define xPSR_V_Pos 28U /*!< xPSR: V Position */
#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */
#define xPSR_T_Pos 24U /*!< xPSR: T Position */
#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */
#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */
#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */
/**
\brief Union type to access the Control Registers (CONTROL).
*/
typedef union
{
struct
{
uint32_t _reserved0:1; /*!< bit: 0 Reserved */
uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */
uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */
} b; /*!< Structure used for bit access */
uint32_t w; /*!< Type used for word access */
} CONTROL_Type;
/* CONTROL Register Definitions */
#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */
#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */
/*@} end of group CMSIS_CORE */
/**
\ingroup CMSIS_core_register
\defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC)
\brief Type definitions for the NVIC Registers
@{
*/
/**
\brief Structure type to access the Nested Vectored Interrupt Controller (NVIC).
*/
typedef struct
{
__IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */
uint32_t RESERVED0[31U];
__IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */
uint32_t RESERVED1[31U];
__IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */
uint32_t RESERVED2[31U];
__IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */
uint32_t RESERVED3[31U];
uint32_t RESERVED4[64U];
__IOM uint32_t IP[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */
} NVIC_Type;
/*@} end of group CMSIS_NVIC */
/**
\ingroup CMSIS_core_register
\defgroup CMSIS_SCB System Control Block (SCB)
\brief Type definitions for the System Control Block Registers
@{
*/
/**
\brief Structure type to access the System Control Block (SCB).
*/
typedef struct
{
__IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */
__IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */
uint32_t RESERVED0;
__IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */
__IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */
__IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */
uint32_t RESERVED1;
__IOM uint32_t SHP[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */
__IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */
} SCB_Type;
/* SCB CPUID Register Definitions */
#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */
#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */
#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */
#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */
#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */
#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */
#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */
#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */
#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */
#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */
/* SCB Interrupt Control State Register Definitions */
#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */
#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */
#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */
#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */
#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */
#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */
#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */
#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */
#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */
#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */
#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */
#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */
#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */
#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */
#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */
#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */
#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */
#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */
/* SCB Application Interrupt and Reset Control Register Definitions */
#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */
#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */
#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */
#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */
#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */
#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */
#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */
#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */
#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */
#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */
/* SCB System Control Register Definitions */
#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */
#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */
#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */
#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */
#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */
#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */
/* SCB Configuration Control Register Definitions */
#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */
#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */
#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */
#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */
/* SCB System Handler Control and State Register Definitions */
#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */
#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */
/*@} end of group CMSIS_SCB */
/**
\ingroup CMSIS_core_register
\defgroup CMSIS_SysTick System Tick Timer (SysTick)
\brief Type definitions for the System Timer Registers.
@{
*/
/**
\brief Structure type to access the System Timer (SysTick).
*/
typedef struct
{
__IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */
__IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */
__IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */
__IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */
} SysTick_Type;
/* SysTick Control / Status Register Definitions */
#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */
#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */
#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */
#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */
#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */
#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */
#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */
#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */
/* SysTick Reload Register Definitions */
#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */
#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */
/* SysTick Current Register Definitions */
#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */
#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */
/* SysTick Calibration Register Definitions */
#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */
#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */
#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */
#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */
#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */
#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */
/*@} end of group CMSIS_SysTick */
/**
\ingroup CMSIS_core_register
\defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug)
\brief Cortex-M0 Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor.
Therefore they are not covered by the Cortex-M0 header file.
@{
*/
/*@} end of group CMSIS_CoreDebug */
/**
\ingroup CMSIS_core_register
\defgroup CMSIS_core_bitfield Core register bit field macros
\brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk).
@{
*/
/**
\brief Mask and shift a bit field value for use in a register bit range.
\param[in] field Name of the register bit field.
\param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type.
\return Masked and shifted value.
*/
#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk)
/**
\brief Mask and shift a register value to extract a bit filed value.
\param[in] field Name of the register bit field.
\param[in] value Value of register. This parameter is interpreted as an uint32_t type.
\return Masked and shifted bit field value.
*/
#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos)
/*@} end of group CMSIS_core_bitfield */
/**
\ingroup CMSIS_core_register
\defgroup CMSIS_core_base Core Definitions
\brief Definitions for base addresses, unions, and structures.
@{
*/
/* Memory mapping of Core Hardware */
#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */
#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */
#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */
#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */
#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */
#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */
#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */
/*@} */
/*******************************************************************************
* Hardware Abstraction Layer
Core Function Interface contains:
- Core NVIC Functions
- Core SysTick Functions
- Core Register Access Functions
******************************************************************************/
/**
\defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference
*/
/* ########################## NVIC functions #################################### */
/**
\ingroup CMSIS_Core_FunctionInterface
\defgroup CMSIS_Core_NVICFunctions NVIC Functions
\brief Functions that manage interrupts and exceptions via the NVIC.
@{
*/
#ifdef CMSIS_NVIC_VIRTUAL
#ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE
#define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h"
#endif
#include CMSIS_NVIC_VIRTUAL_HEADER_FILE
#else
#define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping
#define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping
#define NVIC_EnableIRQ __NVIC_EnableIRQ
#define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ
#define NVIC_DisableIRQ __NVIC_DisableIRQ
#define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ
#define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ
#define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ
/*#define NVIC_GetActive __NVIC_GetActive not available for Cortex-M0 */
#define NVIC_SetPriority __NVIC_SetPriority
#define NVIC_GetPriority __NVIC_GetPriority
#define NVIC_SystemReset __NVIC_SystemReset
#endif /* CMSIS_NVIC_VIRTUAL */
#ifdef CMSIS_VECTAB_VIRTUAL
#ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE
#define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h"
#endif
#include CMSIS_VECTAB_VIRTUAL_HEADER_FILE
#else
#define NVIC_SetVector __NVIC_SetVector
#define NVIC_GetVector __NVIC_GetVector
#endif /* (CMSIS_VECTAB_VIRTUAL) */
#define NVIC_USER_IRQ_OFFSET 16
/* The following EXC_RETURN values are saved the LR on exception entry */
#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */
#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */
#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */
/* Interrupt Priorities are WORD accessible only under Armv6-M */
/* The following MACROS handle generation of the register offset and byte masks */
#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL)
#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) )
#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) )
#define __NVIC_SetPriorityGrouping(X) (void)(X)
#define __NVIC_GetPriorityGrouping() (0U)
/**
\brief Enable Interrupt
\details Enables a device specific interrupt in the NVIC interrupt controller.
\param [in] IRQn Device specific interrupt number.
\note IRQn must not be negative.
*/
__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn)
{
if ((int32_t)(IRQn) >= 0)
{
__COMPILER_BARRIER();
NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
__COMPILER_BARRIER();
}
}
/**
\brief Get Interrupt Enable status
\details Returns a device specific interrupt enable status from the NVIC interrupt controller.
\param [in] IRQn Device specific interrupt number.
\return 0 Interrupt is not enabled.
\return 1 Interrupt is enabled.
\note IRQn must not be negative.
*/
__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn)
{
if ((int32_t)(IRQn) >= 0)
{
return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));
}
else
{
return(0U);
}
}
/**
\brief Disable Interrupt
\details Disables a device specific interrupt in the NVIC interrupt controller.
\param [in] IRQn Device specific interrupt number.
\note IRQn must not be negative.
*/
__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn)
{
if ((int32_t)(IRQn) >= 0)
{
NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
__DSB();
__ISB();
}
}
/**
\brief Get Pending Interrupt
\details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt.
\param [in] IRQn Device specific interrupt number.
\return 0 Interrupt status is not pending.
\return 1 Interrupt status is pending.
\note IRQn must not be negative.
*/
__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn)
{
if ((int32_t)(IRQn) >= 0)
{
return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));
}
else
{
return(0U);
}
}
/**
\brief Set Pending Interrupt
\details Sets the pending bit of a device specific interrupt in the NVIC pending register.
\param [in] IRQn Device specific interrupt number.
\note IRQn must not be negative.
*/
__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn)
{
if ((int32_t)(IRQn) >= 0)
{
NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
}
}
/**
\brief Clear Pending Interrupt
\details Clears the pending bit of a device specific interrupt in the NVIC pending register.
\param [in] IRQn Device specific interrupt number.
\note IRQn must not be negative.
*/
__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn)
{
if ((int32_t)(IRQn) >= 0)
{
NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
}
}
/**
\brief Set Interrupt Priority
\details Sets the priority of a device specific interrupt or a processor exception.
The interrupt number can be positive to specify a device specific interrupt,
or negative to specify a processor exception.
\param [in] IRQn Interrupt number.
\param [in] priority Priority to set.
\note The priority cannot be set for every processor exception.
*/
__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority)
{
if ((int32_t)(IRQn) >= 0)
{
NVIC->IP[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) |
(((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));
}
else
{
SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) |
(((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));
}
}
/**
\brief Get Interrupt Priority
\details Reads the priority of a device specific interrupt or a processor exception.
The interrupt number can be positive to specify a device specific interrupt,
or negative to specify a processor exception.
\param [in] IRQn Interrupt number.
\return Interrupt Priority.
Value is aligned automatically to the implemented priority bits of the microcontroller.
*/
__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn)
{
if ((int32_t)(IRQn) >= 0)
{
return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS)));
}
else
{
return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS)));
}
}
/**
\brief Encode Priority
\details Encodes the priority for an interrupt with the given priority group,
preemptive priority value, and subpriority value.
In case of a conflict between priority grouping and available
priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set.
\param [in] PriorityGroup Used priority group.
\param [in] PreemptPriority Preemptive priority value (starting from 0).
\param [in] SubPriority Subpriority value (starting from 0).
\return Encoded priority. Value can be used in the function \ref NVIC_SetPriority().
*/
__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority)
{
uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */
uint32_t PreemptPriorityBits;
uint32_t SubPriorityBits;
PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp);
SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS));
return (
((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) |
((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL)))
);
}
/**
\brief Decode Priority
\details Decodes an interrupt priority value with a given priority group to
preemptive priority value and subpriority value.
In case of a conflict between priority grouping and available
priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set.
\param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority().
\param [in] PriorityGroup Used priority group.
\param [out] pPreemptPriority Preemptive priority value (starting from 0).
\param [out] pSubPriority Subpriority value (starting from 0).
*/
__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority)
{
uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */
uint32_t PreemptPriorityBits;
uint32_t SubPriorityBits;
PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp);
SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS));
*pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL);
*pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL);
}
/**
\brief Set Interrupt Vector
\details Sets an interrupt vector in SRAM based interrupt vector table.
The interrupt number can be positive to specify a device specific interrupt,
or negative to specify a processor exception.
Address 0 must be mapped to SRAM.
\param [in] IRQn Interrupt number
\param [in] vector Address of interrupt handler function
*/
__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector)
{
uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */
*(vectors + (int32_t)IRQn) = vector; /* use pointer arithmetic to access vector */
/* ARM Application Note 321 states that the M0 does not require the architectural barrier */
}
/**
\brief Get Interrupt Vector
\details Reads an interrupt vector from interrupt vector table.
The interrupt number can be positive to specify a device specific interrupt,
or negative to specify a processor exception.
\param [in] IRQn Interrupt number.
\return Address of interrupt handler function
*/
__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn)
{
uint32_t *vectors = (uint32_t *)(NVIC_USER_IRQ_OFFSET << 2); /* point to 1st user interrupt */
return *(vectors + (int32_t)IRQn); /* use pointer arithmetic to access vector */
}
/**
\brief System Reset
\details Initiates a system reset request to reset the MCU.
*/
__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void)
{
__DSB(); /* Ensure all outstanding memory accesses included
buffered write are completed before reset */
SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) |
SCB_AIRCR_SYSRESETREQ_Msk);
__DSB(); /* Ensure completion of memory access */
for(;;) /* wait until reset */
{
__NOP();
}
}
/*@} end of CMSIS_Core_NVICFunctions */
/* ########################## FPU functions #################################### */
/**
\ingroup CMSIS_Core_FunctionInterface
\defgroup CMSIS_Core_FpuFunctions FPU Functions
\brief Function that provides FPU type.
@{
*/
/**
\brief get FPU type
\details returns the FPU type
\returns
- \b 0: No FPU
- \b 1: Single precision FPU
- \b 2: Double + Single precision FPU
*/
__STATIC_INLINE uint32_t SCB_GetFPUType(void)
{
return 0U; /* No FPU */
}
/*@} end of CMSIS_Core_FpuFunctions */
/* ################################## SysTick function ############################################ */
/**
\ingroup CMSIS_Core_FunctionInterface
\defgroup CMSIS_Core_SysTickFunctions SysTick Functions
\brief Functions that configure the System.
@{
*/
#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U)
/**
\brief System Tick Configuration
\details Initializes the System Timer and its interrupt, and starts the System Tick Timer.
Counter is in free running mode to generate periodic interrupts.
\param [in] ticks Number of ticks between two interrupts.
\return 0 Function succeeded.
\return 1 Function failed.
\note When the variable <b>__Vendor_SysTickConfig</b> is set to 1, then the
function <b>SysTick_Config</b> is not included. In this case, the file <b><i>device</i>.h</b>
must contain a vendor-specific implementation of this function.
*/
__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks)
{
if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk)
{
return (1UL); /* Reload value impossible */
}
SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */
NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */
SysTick->VAL = 0UL; /* Load the SysTick Counter Value */
SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk |
SysTick_CTRL_TICKINT_Msk |
SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */
return (0UL); /* Function successful */
}
#endif
/*@} end of CMSIS_Core_SysTickFunctions */
#ifdef __cplusplus
}
#endif
#endif /* __CORE_CM0_H_DEPENDANT */
#endif /* __CMSIS_GENERIC */

File diff suppressed because it is too large Load Diff

@ -0,0 +1,979 @@
/**************************************************************************//**
* @file core_cm1.h
* @brief CMSIS Cortex-M1 Core Peripheral Access Layer Header File
* @version V1.0.1
* @date 12. November 2018
******************************************************************************/
/*
* Copyright (c) 2009-2018 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if defined ( __ICCARM__ )
#pragma system_include /* treat file as system include file for MISRA check */
#elif defined (__clang__)
#pragma clang system_header /* treat file as system include file */
#endif
#ifndef __CORE_CM1_H_GENERIC
#define __CORE_CM1_H_GENERIC
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
\page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions
CMSIS violates the following MISRA-C:2004 rules:
\li Required Rule 8.5, object/function definition in header file.<br>
Function definitions in header files are used to allow 'inlining'.
\li Required Rule 18.4, declaration of union type or object of union type: '{...}'.<br>
Unions are used for effective representation of core registers.
\li Advisory Rule 19.7, Function-like macro defined.<br>
Function-like macros are used to allow more efficient code.
*/
/*******************************************************************************
* CMSIS definitions
******************************************************************************/
/**
\ingroup Cortex_M1
@{
*/
#include "cmsis_version.h"
/* CMSIS CM1 definitions */
#define __CM1_CMSIS_VERSION_MAIN (__CM_CMSIS_VERSION_MAIN) /*!< \deprecated [31:16] CMSIS HAL main version */
#define __CM1_CMSIS_VERSION_SUB (__CM_CMSIS_VERSION_SUB) /*!< \deprecated [15:0] CMSIS HAL sub version */
#define __CM1_CMSIS_VERSION ((__CM1_CMSIS_VERSION_MAIN << 16U) | \
__CM1_CMSIS_VERSION_SUB ) /*!< \deprecated CMSIS HAL version number */
#define __CORTEX_M (1U) /*!< Cortex-M Core */
/** __FPU_USED indicates whether an FPU is used or not.
This core does not support an FPU at all
*/
#define __FPU_USED 0U
#if defined ( __CC_ARM )
#if defined __TARGET_FPU_VFP
#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
#endif
#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050)
#if defined __ARM_FP
#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
#endif
#elif defined ( __GNUC__ )
#if defined (__VFP_FP__) && !defined(__SOFTFP__)
#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
#endif
#elif defined ( __ICCARM__ )
#if defined __ARMVFP__
#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
#endif
#elif defined ( __TI_ARM__ )
#if defined __TI_VFP_SUPPORT__
#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
#endif
#elif defined ( __TASKING__ )
#if defined __FPU_VFP__
#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
#endif
#elif defined ( __CSMC__ )
#if ( __CSMC__ & 0x400U)
#error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)"
#endif
#endif
#include "cmsis_compiler.h" /* CMSIS compiler specific defines */
#ifdef __cplusplus
}
#endif
#endif /* __CORE_CM1_H_GENERIC */
#ifndef __CMSIS_GENERIC
#ifndef __CORE_CM1_H_DEPENDANT
#define __CORE_CM1_H_DEPENDANT
#ifdef __cplusplus
extern "C" {
#endif
/* check device defines and use defaults */
#if defined __CHECK_DEVICE_DEFINES
#ifndef __CM1_REV
#define __CM1_REV 0x0100U
#warning "__CM1_REV not defined in device header file; using default!"
#endif
#ifndef __NVIC_PRIO_BITS
#define __NVIC_PRIO_BITS 2U
#warning "__NVIC_PRIO_BITS not defined in device header file; using default!"
#endif
#ifndef __Vendor_SysTickConfig
#define __Vendor_SysTickConfig 0U
#warning "__Vendor_SysTickConfig not defined in device header file; using default!"
#endif
#endif
/* IO definitions (access restrictions to peripheral registers) */
/**
\defgroup CMSIS_glob_defs CMSIS Global Defines
<strong>IO Type Qualifiers</strong> are used
\li to specify the access to peripheral variables.
\li for automatic generation of peripheral register debug information.
*/
#ifdef __cplusplus
#define __I volatile /*!< Defines 'read only' permissions */
#else
#define __I volatile const /*!< Defines 'read only' permissions */
#endif
#define __O volatile /*!< Defines 'write only' permissions */
#define __IO volatile /*!< Defines 'read / write' permissions */
/* following defines should be used for structure members */
#define __IM volatile const /*! Defines 'read only' structure member permissions */
#define __OM volatile /*! Defines 'write only' structure member permissions */
#define __IOM volatile /*! Defines 'read / write' structure member permissions */
/*@} end of group Cortex_M1 */
/*******************************************************************************
* Register Abstraction
Core Register contain:
- Core Register
- Core NVIC Register
- Core SCB Register
- Core SysTick Register
******************************************************************************/
/**
\defgroup CMSIS_core_register Defines and Type Definitions
\brief Type definitions and defines for Cortex-M processor based devices.
*/
/**
\ingroup CMSIS_core_register
\defgroup CMSIS_CORE Status and Control Registers
\brief Core Register type definitions.
@{
*/
/**
\brief Union type to access the Application Program Status Register (APSR).
*/
typedef union
{
struct
{
uint32_t _reserved0:28; /*!< bit: 0..27 Reserved */
uint32_t V:1; /*!< bit: 28 Overflow condition code flag */
uint32_t C:1; /*!< bit: 29 Carry condition code flag */
uint32_t Z:1; /*!< bit: 30 Zero condition code flag */
uint32_t N:1; /*!< bit: 31 Negative condition code flag */
} b; /*!< Structure used for bit access */
uint32_t w; /*!< Type used for word access */
} APSR_Type;
/* APSR Register Definitions */
#define APSR_N_Pos 31U /*!< APSR: N Position */
#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */
#define APSR_Z_Pos 30U /*!< APSR: Z Position */
#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */
#define APSR_C_Pos 29U /*!< APSR: C Position */
#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */
#define APSR_V_Pos 28U /*!< APSR: V Position */
#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */
/**
\brief Union type to access the Interrupt Program Status Register (IPSR).
*/
typedef union
{
struct
{
uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */
uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */
} b; /*!< Structure used for bit access */
uint32_t w; /*!< Type used for word access */
} IPSR_Type;
/* IPSR Register Definitions */
#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */
#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */
/**
\brief Union type to access the Special-Purpose Program Status Registers (xPSR).
*/
typedef union
{
struct
{
uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */
uint32_t _reserved0:15; /*!< bit: 9..23 Reserved */
uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */
uint32_t _reserved1:3; /*!< bit: 25..27 Reserved */
uint32_t V:1; /*!< bit: 28 Overflow condition code flag */
uint32_t C:1; /*!< bit: 29 Carry condition code flag */
uint32_t Z:1; /*!< bit: 30 Zero condition code flag */
uint32_t N:1; /*!< bit: 31 Negative condition code flag */
} b; /*!< Structure used for bit access */
uint32_t w; /*!< Type used for word access */
} xPSR_Type;
/* xPSR Register Definitions */
#define xPSR_N_Pos 31U /*!< xPSR: N Position */
#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */
#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */
#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */
#define xPSR_C_Pos 29U /*!< xPSR: C Position */
#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */
#define xPSR_V_Pos 28U /*!< xPSR: V Position */
#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */
#define xPSR_T_Pos 24U /*!< xPSR: T Position */
#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */
#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */
#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */
/**
\brief Union type to access the Control Registers (CONTROL).
*/
typedef union
{
struct
{
uint32_t _reserved0:1; /*!< bit: 0 Reserved */
uint32_t SPSEL:1; /*!< bit: 1 Stack to be used */
uint32_t _reserved1:30; /*!< bit: 2..31 Reserved */
} b; /*!< Structure used for bit access */
uint32_t w; /*!< Type used for word access */
} CONTROL_Type;
/* CONTROL Register Definitions */
#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */
#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */
/*@} end of group CMSIS_CORE */
/**
\ingroup CMSIS_core_register
\defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC)
\brief Type definitions for the NVIC Registers
@{
*/
/**
\brief Structure type to access the Nested Vectored Interrupt Controller (NVIC).
*/
typedef struct
{
__IOM uint32_t ISER[1U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */
uint32_t RESERVED0[31U];
__IOM uint32_t ICER[1U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */
uint32_t RSERVED1[31U];
__IOM uint32_t ISPR[1U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */
uint32_t RESERVED2[31U];
__IOM uint32_t ICPR[1U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */
uint32_t RESERVED3[31U];
uint32_t RESERVED4[64U];
__IOM uint32_t IP[8U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register */
} NVIC_Type;
/*@} end of group CMSIS_NVIC */
/**
\ingroup CMSIS_core_register
\defgroup CMSIS_SCB System Control Block (SCB)
\brief Type definitions for the System Control Block Registers
@{
*/
/**
\brief Structure type to access the System Control Block (SCB).
*/
typedef struct
{
__IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */
__IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */
uint32_t RESERVED0;
__IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */
__IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */
__IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */
uint32_t RESERVED1;
__IOM uint32_t SHP[2U]; /*!< Offset: 0x01C (R/W) System Handlers Priority Registers. [0] is RESERVED */
__IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */
} SCB_Type;
/* SCB CPUID Register Definitions */
#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */
#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */
#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */
#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */
#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */
#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */
#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */
#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */
#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */
#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */
/* SCB Interrupt Control State Register Definitions */
#define SCB_ICSR_NMIPENDSET_Pos 31U /*!< SCB ICSR: NMIPENDSET Position */
#define SCB_ICSR_NMIPENDSET_Msk (1UL << SCB_ICSR_NMIPENDSET_Pos) /*!< SCB ICSR: NMIPENDSET Mask */
#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */
#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */
#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */
#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */
#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */
#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */
#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */
#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */
#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */
#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */
#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */
#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */
#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */
#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */
#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */
#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */
/* SCB Application Interrupt and Reset Control Register Definitions */
#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */
#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */
#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */
#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */
#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */
#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */
#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */
#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */
#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */
#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */
/* SCB System Control Register Definitions */
#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */
#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */
#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */
#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */
#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */
#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */
/* SCB Configuration Control Register Definitions */
#define SCB_CCR_STKALIGN_Pos 9U /*!< SCB CCR: STKALIGN Position */
#define SCB_CCR_STKALIGN_Msk (1UL << SCB_CCR_STKALIGN_Pos) /*!< SCB CCR: STKALIGN Mask */
#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */
#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */
/* SCB System Handler Control and State Register Definitions */
#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */
#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */
/*@} end of group CMSIS_SCB */
/**
\ingroup CMSIS_core_register
\defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB)
\brief Type definitions for the System Control and ID Register not in the SCB
@{
*/
/**
\brief Structure type to access the System Control and ID Register not in the SCB.
*/
typedef struct
{
uint32_t RESERVED0[2U];
__IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */
} SCnSCB_Type;
/* Auxiliary Control Register Definitions */
#define SCnSCB_ACTLR_ITCMUAEN_Pos 4U /*!< ACTLR: Instruction TCM Upper Alias Enable Position */
#define SCnSCB_ACTLR_ITCMUAEN_Msk (1UL << SCnSCB_ACTLR_ITCMUAEN_Pos) /*!< ACTLR: Instruction TCM Upper Alias Enable Mask */
#define SCnSCB_ACTLR_ITCMLAEN_Pos 3U /*!< ACTLR: Instruction TCM Lower Alias Enable Position */
#define SCnSCB_ACTLR_ITCMLAEN_Msk (1UL << SCnSCB_ACTLR_ITCMLAEN_Pos) /*!< ACTLR: Instruction TCM Lower Alias Enable Mask */
/*@} end of group CMSIS_SCnotSCB */
/**
\ingroup CMSIS_core_register
\defgroup CMSIS_SysTick System Tick Timer (SysTick)
\brief Type definitions for the System Timer Registers.
@{
*/
/**
\brief Structure type to access the System Timer (SysTick).
*/
typedef struct
{
__IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */
__IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */
__IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */
__IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */
} SysTick_Type;
/* SysTick Control / Status Register Definitions */
#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */
#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */
#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */
#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */
#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */
#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */
#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */
#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */
/* SysTick Reload Register Definitions */
#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */
#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */
/* SysTick Current Register Definitions */
#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */
#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */
/* SysTick Calibration Register Definitions */
#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */
#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */
#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */
#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */
#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */
#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */
/*@} end of group CMSIS_SysTick */
/**
\ingroup CMSIS_core_register
\defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug)
\brief Cortex-M1 Core Debug Registers (DCB registers, SHCSR, and DFSR) are only accessible over DAP and not via processor.
Therefore they are not covered by the Cortex-M1 header file.
@{
*/
/*@} end of group CMSIS_CoreDebug */
/**
\ingroup CMSIS_core_register
\defgroup CMSIS_core_bitfield Core register bit field macros
\brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk).
@{
*/
/**
\brief Mask and shift a bit field value for use in a register bit range.
\param[in] field Name of the register bit field.
\param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type.
\return Masked and shifted value.
*/
#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk)
/**
\brief Mask and shift a register value to extract a bit filed value.
\param[in] field Name of the register bit field.
\param[in] value Value of register. This parameter is interpreted as an uint32_t type.
\return Masked and shifted bit field value.
*/
#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos)
/*@} end of group CMSIS_core_bitfield */
/**
\ingroup CMSIS_core_register
\defgroup CMSIS_core_base Core Definitions
\brief Definitions for base addresses, unions, and structures.
@{
*/
/* Memory mapping of Core Hardware */
#define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */
#define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */
#define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */
#define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */
#define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */
#define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */
#define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */
#define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */
/*@} */
/*******************************************************************************
* Hardware Abstraction Layer
Core Function Interface contains:
- Core NVIC Functions
- Core SysTick Functions
- Core Register Access Functions
******************************************************************************/
/**
\defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference
*/
/* ########################## NVIC functions #################################### */
/**
\ingroup CMSIS_Core_FunctionInterface
\defgroup CMSIS_Core_NVICFunctions NVIC Functions
\brief Functions that manage interrupts and exceptions via the NVIC.
@{
*/
#ifdef CMSIS_NVIC_VIRTUAL
#ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE
#define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h"
#endif
#include CMSIS_NVIC_VIRTUAL_HEADER_FILE
#else
#define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping
#define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping
#define NVIC_EnableIRQ __NVIC_EnableIRQ
#define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ
#define NVIC_DisableIRQ __NVIC_DisableIRQ
#define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ
#define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ
#define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ
/*#define NVIC_GetActive __NVIC_GetActive not available for Cortex-M1 */
#define NVIC_SetPriority __NVIC_SetPriority
#define NVIC_GetPriority __NVIC_GetPriority
#define NVIC_SystemReset __NVIC_SystemReset
#endif /* CMSIS_NVIC_VIRTUAL */
#ifdef CMSIS_VECTAB_VIRTUAL
#ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE
#define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h"
#endif
#include CMSIS_VECTAB_VIRTUAL_HEADER_FILE
#else
#define NVIC_SetVector __NVIC_SetVector
#define NVIC_GetVector __NVIC_GetVector
#endif /* (CMSIS_VECTAB_VIRTUAL) */
#define NVIC_USER_IRQ_OFFSET 16
/* The following EXC_RETURN values are saved the LR on exception entry */
#define EXC_RETURN_HANDLER (0xFFFFFFF1UL) /* return to Handler mode, uses MSP after return */
#define EXC_RETURN_THREAD_MSP (0xFFFFFFF9UL) /* return to Thread mode, uses MSP after return */
#define EXC_RETURN_THREAD_PSP (0xFFFFFFFDUL) /* return to Thread mode, uses PSP after return */
/* Interrupt Priorities are WORD accessible only under Armv6-M */
/* The following MACROS handle generation of the register offset and byte masks */
#define _BIT_SHIFT(IRQn) ( ((((uint32_t)(int32_t)(IRQn)) ) & 0x03UL) * 8UL)
#define _SHP_IDX(IRQn) ( (((((uint32_t)(int32_t)(IRQn)) & 0x0FUL)-8UL) >> 2UL) )
#define _IP_IDX(IRQn) ( (((uint32_t)(int32_t)(IRQn)) >> 2UL) )
#define __NVIC_SetPriorityGrouping(X) (void)(X)
#define __NVIC_GetPriorityGrouping() (0U)
/**
\brief Enable Interrupt
\details Enables a device specific interrupt in the NVIC interrupt controller.
\param [in] IRQn Device specific interrupt number.
\note IRQn must not be negative.
*/
__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn)
{
if ((int32_t)(IRQn) >= 0)
{
__COMPILER_BARRIER();
NVIC->ISER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
__COMPILER_BARRIER();
}
}
/**
\brief Get Interrupt Enable status
\details Returns a device specific interrupt enable status from the NVIC interrupt controller.
\param [in] IRQn Device specific interrupt number.
\return 0 Interrupt is not enabled.
\return 1 Interrupt is enabled.
\note IRQn must not be negative.
*/
__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn)
{
if ((int32_t)(IRQn) >= 0)
{
return((uint32_t)(((NVIC->ISER[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));
}
else
{
return(0U);
}
}
/**
\brief Disable Interrupt
\details Disables a device specific interrupt in the NVIC interrupt controller.
\param [in] IRQn Device specific interrupt number.
\note IRQn must not be negative.
*/
__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn)
{
if ((int32_t)(IRQn) >= 0)
{
NVIC->ICER[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
__DSB();
__ISB();
}
}
/**
\brief Get Pending Interrupt
\details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt.
\param [in] IRQn Device specific interrupt number.
\return 0 Interrupt status is not pending.
\return 1 Interrupt status is pending.
\note IRQn must not be negative.
*/
__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn)
{
if ((int32_t)(IRQn) >= 0)
{
return((uint32_t)(((NVIC->ISPR[0U] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL));
}
else
{
return(0U);
}
}
/**
\brief Set Pending Interrupt
\details Sets the pending bit of a device specific interrupt in the NVIC pending register.
\param [in] IRQn Device specific interrupt number.
\note IRQn must not be negative.
*/
__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn)
{
if ((int32_t)(IRQn) >= 0)
{
NVIC->ISPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
}
}
/**
\brief Clear Pending Interrupt
\details Clears the pending bit of a device specific interrupt in the NVIC pending register.
\param [in] IRQn Device specific interrupt number.
\note IRQn must not be negative.
*/
__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn)
{
if ((int32_t)(IRQn) >= 0)
{
NVIC->ICPR[0U] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL));
}
}
/**
\brief Set Interrupt Priority
\details Sets the priority of a device specific interrupt or a processor exception.
The interrupt number can be positive to specify a device specific interrupt,
or negative to specify a processor exception.
\param [in] IRQn Interrupt number.
\param [in] priority Priority to set.
\note The priority cannot be set for every processor exception.
*/
__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority)
{
if ((int32_t)(IRQn) >= 0)
{
NVIC->IP[_IP_IDX(IRQn)] = ((uint32_t)(NVIC->IP[_IP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) |
(((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));
}
else
{
SCB->SHP[_SHP_IDX(IRQn)] = ((uint32_t)(SCB->SHP[_SHP_IDX(IRQn)] & ~(0xFFUL << _BIT_SHIFT(IRQn))) |
(((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL) << _BIT_SHIFT(IRQn)));
}
}
/**
\brief Get Interrupt Priority
\details Reads the priority of a device specific interrupt or a processor exception.
The interrupt number can be positive to specify a device specific interrupt,
or negative to specify a processor exception.
\param [in] IRQn Interrupt number.
\return Interrupt Priority.
Value is aligned automatically to the implemented priority bits of the microcontroller.
*/
__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn)
{
if ((int32_t)(IRQn) >= 0)
{
return((uint32_t)(((NVIC->IP[ _IP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS)));
}
else
{
return((uint32_t)(((SCB->SHP[_SHP_IDX(IRQn)] >> _BIT_SHIFT(IRQn) ) & (uint32_t)0xFFUL) >> (8U - __NVIC_PRIO_BITS)));
}
}
/**
\brief Encode Priority
\details Encodes the priority for an interrupt with the given priority group,
preemptive priority value, and subpriority value.
In case of a conflict between priority grouping and available
priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set.
\param [in] PriorityGroup Used priority group.
\param [in] PreemptPriority Preemptive priority value (starting from 0).
\param [in] SubPriority Subpriority value (starting from 0).
\return Encoded priority. Value can be used in the function \ref NVIC_SetPriority().
*/
__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority)
{
uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */
uint32_t PreemptPriorityBits;
uint32_t SubPriorityBits;
PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp);
SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS));
return (
((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) |
((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL)))
);
}
/**
\brief Decode Priority
\details Decodes an interrupt priority value with a given priority group to
preemptive priority value and subpriority value.
In case of a conflict between priority grouping and available
priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set.
\param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority().
\param [in] PriorityGroup Used priority group.
\param [out] pPreemptPriority Preemptive priority value (starting from 0).
\param [out] pSubPriority Subpriority value (starting from 0).
*/
__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority)
{
uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */
uint32_t PreemptPriorityBits;
uint32_t SubPriorityBits;
PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp);
SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS));
*pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL);
*pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL);
}
/**
\brief Set Interrupt Vector
\details Sets an interrupt vector in SRAM based interrupt vector table.
The interrupt number can be positive to specify a device specific interrupt,
or negative to specify a processor exception.
Address 0 must be mapped to SRAM.
\param [in] IRQn Interrupt number
\param [in] vector Address of interrupt handler function
*/
__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector)
{
uint32_t *vectors = (uint32_t *)0x0U;
vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector;
/* ARM Application Note 321 states that the M1 does not require the architectural barrier */
}
/**
\brief Get Interrupt Vector
\details Reads an interrupt vector from interrupt vector table.
The interrupt number can be positive to specify a device specific interrupt,
or negative to specify a processor exception.
\param [in] IRQn Interrupt number.
\return Address of interrupt handler function
*/
__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn)
{
uint32_t *vectors = (uint32_t *)0x0U;
return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET];
}
/**
\brief System Reset
\details Initiates a system reset request to reset the MCU.
*/
__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void)
{
__DSB(); /* Ensure all outstanding memory accesses included
buffered write are completed before reset */
SCB->AIRCR = ((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) |
SCB_AIRCR_SYSRESETREQ_Msk);
__DSB(); /* Ensure completion of memory access */
for(;;) /* wait until reset */
{
__NOP();
}
}
/*@} end of CMSIS_Core_NVICFunctions */
/* ########################## FPU functions #################################### */
/**
\ingroup CMSIS_Core_FunctionInterface
\defgroup CMSIS_Core_FpuFunctions FPU Functions
\brief Function that provides FPU type.
@{
*/
/**
\brief get FPU type
\details returns the FPU type
\returns
- \b 0: No FPU
- \b 1: Single precision FPU
- \b 2: Double + Single precision FPU
*/
__STATIC_INLINE uint32_t SCB_GetFPUType(void)
{
return 0U; /* No FPU */
}
/*@} end of CMSIS_Core_FpuFunctions */
/* ################################## SysTick function ############################################ */
/**
\ingroup CMSIS_Core_FunctionInterface
\defgroup CMSIS_Core_SysTickFunctions SysTick Functions
\brief Functions that configure the System.
@{
*/
#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U)
/**
\brief System Tick Configuration
\details Initializes the System Timer and its interrupt, and starts the System Tick Timer.
Counter is in free running mode to generate periodic interrupts.
\param [in] ticks Number of ticks between two interrupts.
\return 0 Function succeeded.
\return 1 Function failed.
\note When the variable <b>__Vendor_SysTickConfig</b> is set to 1, then the
function <b>SysTick_Config</b> is not included. In this case, the file <b><i>device</i>.h</b>
must contain a vendor-specific implementation of this function.
*/
__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks)
{
if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk)
{
return (1UL); /* Reload value impossible */
}
SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */
NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */
SysTick->VAL = 0UL; /* Load the SysTick Counter Value */
SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk |
SysTick_CTRL_TICKINT_Msk |
SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */
return (0UL); /* Function successful */
}
#endif
/*@} end of CMSIS_Core_SysTickFunctions */
#ifdef __cplusplus
}
#endif
#endif /* __CORE_CM1_H_DEPENDANT */
#endif /* __CMSIS_GENERIC */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -0,0 +1,275 @@
/******************************************************************************
* @file mpu_armv7.h
* @brief CMSIS MPU API for Armv7-M MPU
* @version V5.1.1
* @date 10. February 2020
******************************************************************************/
/*
* Copyright (c) 2017-2020 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if defined ( __ICCARM__ )
#pragma system_include /* treat file as system include file for MISRA check */
#elif defined (__clang__)
#pragma clang system_header /* treat file as system include file */
#endif
#ifndef ARM_MPU_ARMV7_H
#define ARM_MPU_ARMV7_H
#define ARM_MPU_REGION_SIZE_32B ((uint8_t)0x04U) ///!< MPU Region Size 32 Bytes
#define ARM_MPU_REGION_SIZE_64B ((uint8_t)0x05U) ///!< MPU Region Size 64 Bytes
#define ARM_MPU_REGION_SIZE_128B ((uint8_t)0x06U) ///!< MPU Region Size 128 Bytes
#define ARM_MPU_REGION_SIZE_256B ((uint8_t)0x07U) ///!< MPU Region Size 256 Bytes
#define ARM_MPU_REGION_SIZE_512B ((uint8_t)0x08U) ///!< MPU Region Size 512 Bytes
#define ARM_MPU_REGION_SIZE_1KB ((uint8_t)0x09U) ///!< MPU Region Size 1 KByte
#define ARM_MPU_REGION_SIZE_2KB ((uint8_t)0x0AU) ///!< MPU Region Size 2 KBytes
#define ARM_MPU_REGION_SIZE_4KB ((uint8_t)0x0BU) ///!< MPU Region Size 4 KBytes
#define ARM_MPU_REGION_SIZE_8KB ((uint8_t)0x0CU) ///!< MPU Region Size 8 KBytes
#define ARM_MPU_REGION_SIZE_16KB ((uint8_t)0x0DU) ///!< MPU Region Size 16 KBytes
#define ARM_MPU_REGION_SIZE_32KB ((uint8_t)0x0EU) ///!< MPU Region Size 32 KBytes
#define ARM_MPU_REGION_SIZE_64KB ((uint8_t)0x0FU) ///!< MPU Region Size 64 KBytes
#define ARM_MPU_REGION_SIZE_128KB ((uint8_t)0x10U) ///!< MPU Region Size 128 KBytes
#define ARM_MPU_REGION_SIZE_256KB ((uint8_t)0x11U) ///!< MPU Region Size 256 KBytes
#define ARM_MPU_REGION_SIZE_512KB ((uint8_t)0x12U) ///!< MPU Region Size 512 KBytes
#define ARM_MPU_REGION_SIZE_1MB ((uint8_t)0x13U) ///!< MPU Region Size 1 MByte
#define ARM_MPU_REGION_SIZE_2MB ((uint8_t)0x14U) ///!< MPU Region Size 2 MBytes
#define ARM_MPU_REGION_SIZE_4MB ((uint8_t)0x15U) ///!< MPU Region Size 4 MBytes
#define ARM_MPU_REGION_SIZE_8MB ((uint8_t)0x16U) ///!< MPU Region Size 8 MBytes
#define ARM_MPU_REGION_SIZE_16MB ((uint8_t)0x17U) ///!< MPU Region Size 16 MBytes
#define ARM_MPU_REGION_SIZE_32MB ((uint8_t)0x18U) ///!< MPU Region Size 32 MBytes
#define ARM_MPU_REGION_SIZE_64MB ((uint8_t)0x19U) ///!< MPU Region Size 64 MBytes
#define ARM_MPU_REGION_SIZE_128MB ((uint8_t)0x1AU) ///!< MPU Region Size 128 MBytes
#define ARM_MPU_REGION_SIZE_256MB ((uint8_t)0x1BU) ///!< MPU Region Size 256 MBytes
#define ARM_MPU_REGION_SIZE_512MB ((uint8_t)0x1CU) ///!< MPU Region Size 512 MBytes
#define ARM_MPU_REGION_SIZE_1GB ((uint8_t)0x1DU) ///!< MPU Region Size 1 GByte
#define ARM_MPU_REGION_SIZE_2GB ((uint8_t)0x1EU) ///!< MPU Region Size 2 GBytes
#define ARM_MPU_REGION_SIZE_4GB ((uint8_t)0x1FU) ///!< MPU Region Size 4 GBytes
#define ARM_MPU_AP_NONE 0U ///!< MPU Access Permission no access
#define ARM_MPU_AP_PRIV 1U ///!< MPU Access Permission privileged access only
#define ARM_MPU_AP_URO 2U ///!< MPU Access Permission unprivileged access read-only
#define ARM_MPU_AP_FULL 3U ///!< MPU Access Permission full access
#define ARM_MPU_AP_PRO 5U ///!< MPU Access Permission privileged access read-only
#define ARM_MPU_AP_RO 6U ///!< MPU Access Permission read-only access
/** MPU Region Base Address Register Value
*
* \param Region The region to be configured, number 0 to 15.
* \param BaseAddress The base address for the region.
*/
#define ARM_MPU_RBAR(Region, BaseAddress) \
(((BaseAddress) & MPU_RBAR_ADDR_Msk) | \
((Region) & MPU_RBAR_REGION_Msk) | \
(MPU_RBAR_VALID_Msk))
/**
* MPU Memory Access Attributes
*
* \param TypeExtField Type extension field, allows you to configure memory access type, for example strongly ordered, peripheral.
* \param IsShareable Region is shareable between multiple bus masters.
* \param IsCacheable Region is cacheable, i.e. its value may be kept in cache.
* \param IsBufferable Region is bufferable, i.e. using write-back caching. Cacheable but non-bufferable regions use write-through policy.
*/
#define ARM_MPU_ACCESS_(TypeExtField, IsShareable, IsCacheable, IsBufferable) \
((((TypeExtField) << MPU_RASR_TEX_Pos) & MPU_RASR_TEX_Msk) | \
(((IsShareable) << MPU_RASR_S_Pos) & MPU_RASR_S_Msk) | \
(((IsCacheable) << MPU_RASR_C_Pos) & MPU_RASR_C_Msk) | \
(((IsBufferable) << MPU_RASR_B_Pos) & MPU_RASR_B_Msk))
/**
* MPU Region Attribute and Size Register Value
*
* \param DisableExec Instruction access disable bit, 1= disable instruction fetches.
* \param AccessPermission Data access permissions, allows you to configure read/write access for User and Privileged mode.
* \param AccessAttributes Memory access attribution, see \ref ARM_MPU_ACCESS_.
* \param SubRegionDisable Sub-region disable field.
* \param Size Region size of the region to be configured, for example 4K, 8K.
*/
#define ARM_MPU_RASR_EX(DisableExec, AccessPermission, AccessAttributes, SubRegionDisable, Size) \
((((DisableExec) << MPU_RASR_XN_Pos) & MPU_RASR_XN_Msk) | \
(((AccessPermission) << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk) | \
(((AccessAttributes) & (MPU_RASR_TEX_Msk | MPU_RASR_S_Msk | MPU_RASR_C_Msk | MPU_RASR_B_Msk))) | \
(((SubRegionDisable) << MPU_RASR_SRD_Pos) & MPU_RASR_SRD_Msk) | \
(((Size) << MPU_RASR_SIZE_Pos) & MPU_RASR_SIZE_Msk) | \
(((MPU_RASR_ENABLE_Msk))))
/**
* MPU Region Attribute and Size Register Value
*
* \param DisableExec Instruction access disable bit, 1= disable instruction fetches.
* \param AccessPermission Data access permissions, allows you to configure read/write access for User and Privileged mode.
* \param TypeExtField Type extension field, allows you to configure memory access type, for example strongly ordered, peripheral.
* \param IsShareable Region is shareable between multiple bus masters.
* \param IsCacheable Region is cacheable, i.e. its value may be kept in cache.
* \param IsBufferable Region is bufferable, i.e. using write-back caching. Cacheable but non-bufferable regions use write-through policy.
* \param SubRegionDisable Sub-region disable field.
* \param Size Region size of the region to be configured, for example 4K, 8K.
*/
#define ARM_MPU_RASR(DisableExec, AccessPermission, TypeExtField, IsShareable, IsCacheable, IsBufferable, SubRegionDisable, Size) \
ARM_MPU_RASR_EX(DisableExec, AccessPermission, ARM_MPU_ACCESS_(TypeExtField, IsShareable, IsCacheable, IsBufferable), SubRegionDisable, Size)
/**
* MPU Memory Access Attribute for strongly ordered memory.
* - TEX: 000b
* - Shareable
* - Non-cacheable
* - Non-bufferable
*/
#define ARM_MPU_ACCESS_ORDERED ARM_MPU_ACCESS_(0U, 1U, 0U, 0U)
/**
* MPU Memory Access Attribute for device memory.
* - TEX: 000b (if shareable) or 010b (if non-shareable)
* - Shareable or non-shareable
* - Non-cacheable
* - Bufferable (if shareable) or non-bufferable (if non-shareable)
*
* \param IsShareable Configures the device memory as shareable or non-shareable.
*/
#define ARM_MPU_ACCESS_DEVICE(IsShareable) ((IsShareable) ? ARM_MPU_ACCESS_(0U, 1U, 0U, 1U) : ARM_MPU_ACCESS_(2U, 0U, 0U, 0U))
/**
* MPU Memory Access Attribute for normal memory.
* - TEX: 1BBb (reflecting outer cacheability rules)
* - Shareable or non-shareable
* - Cacheable or non-cacheable (reflecting inner cacheability rules)
* - Bufferable or non-bufferable (reflecting inner cacheability rules)
*
* \param OuterCp Configures the outer cache policy.
* \param InnerCp Configures the inner cache policy.
* \param IsShareable Configures the memory as shareable or non-shareable.
*/
#define ARM_MPU_ACCESS_NORMAL(OuterCp, InnerCp, IsShareable) ARM_MPU_ACCESS_((4U | (OuterCp)), IsShareable, ((InnerCp) >> 1U), ((InnerCp) & 1U))
/**
* MPU Memory Access Attribute non-cacheable policy.
*/
#define ARM_MPU_CACHEP_NOCACHE 0U
/**
* MPU Memory Access Attribute write-back, write and read allocate policy.
*/
#define ARM_MPU_CACHEP_WB_WRA 1U
/**
* MPU Memory Access Attribute write-through, no write allocate policy.
*/
#define ARM_MPU_CACHEP_WT_NWA 2U
/**
* MPU Memory Access Attribute write-back, no write allocate policy.
*/
#define ARM_MPU_CACHEP_WB_NWA 3U
/**
* Struct for a single MPU Region
*/
typedef struct {
uint32_t RBAR; //!< The region base address register value (RBAR)
uint32_t RASR; //!< The region attribute and size register value (RASR) \ref MPU_RASR
} ARM_MPU_Region_t;
/** Enable the MPU.
* \param MPU_Control Default access permissions for unconfigured regions.
*/
__STATIC_INLINE void ARM_MPU_Enable(uint32_t MPU_Control)
{
__DMB();
MPU->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk;
#ifdef SCB_SHCSR_MEMFAULTENA_Msk
SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk;
#endif
__DSB();
__ISB();
}
/** Disable the MPU.
*/
__STATIC_INLINE void ARM_MPU_Disable(void)
{
__DMB();
#ifdef SCB_SHCSR_MEMFAULTENA_Msk
SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk;
#endif
MPU->CTRL &= ~MPU_CTRL_ENABLE_Msk;
__DSB();
__ISB();
}
/** Clear and disable the given MPU region.
* \param rnr Region number to be cleared.
*/
__STATIC_INLINE void ARM_MPU_ClrRegion(uint32_t rnr)
{
MPU->RNR = rnr;
MPU->RASR = 0U;
}
/** Configure an MPU region.
* \param rbar Value for RBAR register.
* \param rsar Value for RSAR register.
*/
__STATIC_INLINE void ARM_MPU_SetRegion(uint32_t rbar, uint32_t rasr)
{
MPU->RBAR = rbar;
MPU->RASR = rasr;
}
/** Configure the given MPU region.
* \param rnr Region number to be configured.
* \param rbar Value for RBAR register.
* \param rsar Value for RSAR register.
*/
__STATIC_INLINE void ARM_MPU_SetRegionEx(uint32_t rnr, uint32_t rbar, uint32_t rasr)
{
MPU->RNR = rnr;
MPU->RBAR = rbar;
MPU->RASR = rasr;
}
/** Memcopy with strictly ordered memory access, e.g. for register targets.
* \param dst Destination data is copied to.
* \param src Source data is copied from.
* \param len Amount of data words to be copied.
*/
__STATIC_INLINE void ARM_MPU_OrderedMemcpy(volatile uint32_t* dst, const uint32_t* __RESTRICT src, uint32_t len)
{
uint32_t i;
for (i = 0U; i < len; ++i)
{
dst[i] = src[i];
}
}
/** Load the given number of MPU regions from a table.
* \param table Pointer to the MPU configuration table.
* \param cnt Amount of regions to be configured.
*/
__STATIC_INLINE void ARM_MPU_Load(ARM_MPU_Region_t const* table, uint32_t cnt)
{
const uint32_t rowWordSize = sizeof(ARM_MPU_Region_t)/4U;
while (cnt > MPU_TYPE_RALIASES) {
ARM_MPU_OrderedMemcpy(&(MPU->RBAR), &(table->RBAR), MPU_TYPE_RALIASES*rowWordSize);
table += MPU_TYPE_RALIASES;
cnt -= MPU_TYPE_RALIASES;
}
ARM_MPU_OrderedMemcpy(&(MPU->RBAR), &(table->RBAR), cnt*rowWordSize);
}
#endif

@ -0,0 +1,352 @@
/******************************************************************************
* @file mpu_armv8.h
* @brief CMSIS MPU API for Armv8-M and Armv8.1-M MPU
* @version V5.1.2
* @date 10. February 2020
******************************************************************************/
/*
* Copyright (c) 2017-2020 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if defined ( __ICCARM__ )
#pragma system_include /* treat file as system include file for MISRA check */
#elif defined (__clang__)
#pragma clang system_header /* treat file as system include file */
#endif
#ifndef ARM_MPU_ARMV8_H
#define ARM_MPU_ARMV8_H
/** \brief Attribute for device memory (outer only) */
#define ARM_MPU_ATTR_DEVICE ( 0U )
/** \brief Attribute for non-cacheable, normal memory */
#define ARM_MPU_ATTR_NON_CACHEABLE ( 4U )
/** \brief Attribute for normal memory (outer and inner)
* \param NT Non-Transient: Set to 1 for non-transient data.
* \param WB Write-Back: Set to 1 to use write-back update policy.
* \param RA Read Allocation: Set to 1 to use cache allocation on read miss.
* \param WA Write Allocation: Set to 1 to use cache allocation on write miss.
*/
#define ARM_MPU_ATTR_MEMORY_(NT, WB, RA, WA) \
((((NT) & 1U) << 3U) | (((WB) & 1U) << 2U) | (((RA) & 1U) << 1U) | ((WA) & 1U))
/** \brief Device memory type non Gathering, non Re-ordering, non Early Write Acknowledgement */
#define ARM_MPU_ATTR_DEVICE_nGnRnE (0U)
/** \brief Device memory type non Gathering, non Re-ordering, Early Write Acknowledgement */
#define ARM_MPU_ATTR_DEVICE_nGnRE (1U)
/** \brief Device memory type non Gathering, Re-ordering, Early Write Acknowledgement */
#define ARM_MPU_ATTR_DEVICE_nGRE (2U)
/** \brief Device memory type Gathering, Re-ordering, Early Write Acknowledgement */
#define ARM_MPU_ATTR_DEVICE_GRE (3U)
/** \brief Memory Attribute
* \param O Outer memory attributes
* \param I O == ARM_MPU_ATTR_DEVICE: Device memory attributes, else: Inner memory attributes
*/
#define ARM_MPU_ATTR(O, I) ((((O) & 0xFU) << 4U) | ((((O) & 0xFU) != 0U) ? ((I) & 0xFU) : (((I) & 0x3U) << 2U)))
/** \brief Normal memory non-shareable */
#define ARM_MPU_SH_NON (0U)
/** \brief Normal memory outer shareable */
#define ARM_MPU_SH_OUTER (2U)
/** \brief Normal memory inner shareable */
#define ARM_MPU_SH_INNER (3U)
/** \brief Memory access permissions
* \param RO Read-Only: Set to 1 for read-only memory.
* \param NP Non-Privileged: Set to 1 for non-privileged memory.
*/
#define ARM_MPU_AP_(RO, NP) ((((RO) & 1U) << 1U) | ((NP) & 1U))
/** \brief Region Base Address Register value
* \param BASE The base address bits [31:5] of a memory region. The value is zero extended. Effective address gets 32 byte aligned.
* \param SH Defines the Shareability domain for this memory region.
* \param RO Read-Only: Set to 1 for a read-only memory region.
* \param NP Non-Privileged: Set to 1 for a non-privileged memory region.
* \oaram XN eXecute Never: Set to 1 for a non-executable memory region.
*/
#define ARM_MPU_RBAR(BASE, SH, RO, NP, XN) \
(((BASE) & MPU_RBAR_BASE_Msk) | \
(((SH) << MPU_RBAR_SH_Pos) & MPU_RBAR_SH_Msk) | \
((ARM_MPU_AP_(RO, NP) << MPU_RBAR_AP_Pos) & MPU_RBAR_AP_Msk) | \
(((XN) << MPU_RBAR_XN_Pos) & MPU_RBAR_XN_Msk))
/** \brief Region Limit Address Register value
* \param LIMIT The limit address bits [31:5] for this memory region. The value is one extended.
* \param IDX The attribute index to be associated with this memory region.
*/
#define ARM_MPU_RLAR(LIMIT, IDX) \
(((LIMIT) & MPU_RLAR_LIMIT_Msk) | \
(((IDX) << MPU_RLAR_AttrIndx_Pos) & MPU_RLAR_AttrIndx_Msk) | \
(MPU_RLAR_EN_Msk))
#if defined(MPU_RLAR_PXN_Pos)
/** \brief Region Limit Address Register with PXN value
* \param LIMIT The limit address bits [31:5] for this memory region. The value is one extended.
* \param PXN Privileged execute never. Defines whether code can be executed from this privileged region.
* \param IDX The attribute index to be associated with this memory region.
*/
#define ARM_MPU_RLAR_PXN(LIMIT, PXN, IDX) \
(((LIMIT) & MPU_RLAR_LIMIT_Msk) | \
(((PXN) << MPU_RLAR_PXN_Pos) & MPU_RLAR_PXN_Msk) | \
(((IDX) << MPU_RLAR_AttrIndx_Pos) & MPU_RLAR_AttrIndx_Msk) | \
(MPU_RLAR_EN_Msk))
#endif
/**
* Struct for a single MPU Region
*/
typedef struct {
uint32_t RBAR; /*!< Region Base Address Register value */
uint32_t RLAR; /*!< Region Limit Address Register value */
} ARM_MPU_Region_t;
/** Enable the MPU.
* \param MPU_Control Default access permissions for unconfigured regions.
*/
__STATIC_INLINE void ARM_MPU_Enable(uint32_t MPU_Control)
{
__DMB();
MPU->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk;
#ifdef SCB_SHCSR_MEMFAULTENA_Msk
SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk;
#endif
__DSB();
__ISB();
}
/** Disable the MPU.
*/
__STATIC_INLINE void ARM_MPU_Disable(void)
{
__DMB();
#ifdef SCB_SHCSR_MEMFAULTENA_Msk
SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk;
#endif
MPU->CTRL &= ~MPU_CTRL_ENABLE_Msk;
__DSB();
__ISB();
}
#ifdef MPU_NS
/** Enable the Non-secure MPU.
* \param MPU_Control Default access permissions for unconfigured regions.
*/
__STATIC_INLINE void ARM_MPU_Enable_NS(uint32_t MPU_Control)
{
__DMB();
MPU_NS->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk;
#ifdef SCB_SHCSR_MEMFAULTENA_Msk
SCB_NS->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk;
#endif
__DSB();
__ISB();
}
/** Disable the Non-secure MPU.
*/
__STATIC_INLINE void ARM_MPU_Disable_NS(void)
{
__DMB();
#ifdef SCB_SHCSR_MEMFAULTENA_Msk
SCB_NS->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk;
#endif
MPU_NS->CTRL &= ~MPU_CTRL_ENABLE_Msk;
__DSB();
__ISB();
}
#endif
/** Set the memory attribute encoding to the given MPU.
* \param mpu Pointer to the MPU to be configured.
* \param idx The attribute index to be set [0-7]
* \param attr The attribute value to be set.
*/
__STATIC_INLINE void ARM_MPU_SetMemAttrEx(MPU_Type* mpu, uint8_t idx, uint8_t attr)
{
const uint8_t reg = idx / 4U;
const uint32_t pos = ((idx % 4U) * 8U);
const uint32_t mask = 0xFFU << pos;
if (reg >= (sizeof(mpu->MAIR) / sizeof(mpu->MAIR[0]))) {
return; // invalid index
}
mpu->MAIR[reg] = ((mpu->MAIR[reg] & ~mask) | ((attr << pos) & mask));
}
/** Set the memory attribute encoding.
* \param idx The attribute index to be set [0-7]
* \param attr The attribute value to be set.
*/
__STATIC_INLINE void ARM_MPU_SetMemAttr(uint8_t idx, uint8_t attr)
{
ARM_MPU_SetMemAttrEx(MPU, idx, attr);
}
#ifdef MPU_NS
/** Set the memory attribute encoding to the Non-secure MPU.
* \param idx The attribute index to be set [0-7]
* \param attr The attribute value to be set.
*/
__STATIC_INLINE void ARM_MPU_SetMemAttr_NS(uint8_t idx, uint8_t attr)
{
ARM_MPU_SetMemAttrEx(MPU_NS, idx, attr);
}
#endif
/** Clear and disable the given MPU region of the given MPU.
* \param mpu Pointer to MPU to be used.
* \param rnr Region number to be cleared.
*/
__STATIC_INLINE void ARM_MPU_ClrRegionEx(MPU_Type* mpu, uint32_t rnr)
{
mpu->RNR = rnr;
mpu->RLAR = 0U;
}
/** Clear and disable the given MPU region.
* \param rnr Region number to be cleared.
*/
__STATIC_INLINE void ARM_MPU_ClrRegion(uint32_t rnr)
{
ARM_MPU_ClrRegionEx(MPU, rnr);
}
#ifdef MPU_NS
/** Clear and disable the given Non-secure MPU region.
* \param rnr Region number to be cleared.
*/
__STATIC_INLINE void ARM_MPU_ClrRegion_NS(uint32_t rnr)
{
ARM_MPU_ClrRegionEx(MPU_NS, rnr);
}
#endif
/** Configure the given MPU region of the given MPU.
* \param mpu Pointer to MPU to be used.
* \param rnr Region number to be configured.
* \param rbar Value for RBAR register.
* \param rlar Value for RLAR register.
*/
__STATIC_INLINE void ARM_MPU_SetRegionEx(MPU_Type* mpu, uint32_t rnr, uint32_t rbar, uint32_t rlar)
{
mpu->RNR = rnr;
mpu->RBAR = rbar;
mpu->RLAR = rlar;
}
/** Configure the given MPU region.
* \param rnr Region number to be configured.
* \param rbar Value for RBAR register.
* \param rlar Value for RLAR register.
*/
__STATIC_INLINE void ARM_MPU_SetRegion(uint32_t rnr, uint32_t rbar, uint32_t rlar)
{
ARM_MPU_SetRegionEx(MPU, rnr, rbar, rlar);
}
#ifdef MPU_NS
/** Configure the given Non-secure MPU region.
* \param rnr Region number to be configured.
* \param rbar Value for RBAR register.
* \param rlar Value for RLAR register.
*/
__STATIC_INLINE void ARM_MPU_SetRegion_NS(uint32_t rnr, uint32_t rbar, uint32_t rlar)
{
ARM_MPU_SetRegionEx(MPU_NS, rnr, rbar, rlar);
}
#endif
/** Memcopy with strictly ordered memory access, e.g. for register targets.
* \param dst Destination data is copied to.
* \param src Source data is copied from.
* \param len Amount of data words to be copied.
*/
__STATIC_INLINE void ARM_MPU_OrderedMemcpy(volatile uint32_t* dst, const uint32_t* __RESTRICT src, uint32_t len)
{
uint32_t i;
for (i = 0U; i < len; ++i)
{
dst[i] = src[i];
}
}
/** Load the given number of MPU regions from a table to the given MPU.
* \param mpu Pointer to the MPU registers to be used.
* \param rnr First region number to be configured.
* \param table Pointer to the MPU configuration table.
* \param cnt Amount of regions to be configured.
*/
__STATIC_INLINE void ARM_MPU_LoadEx(MPU_Type* mpu, uint32_t rnr, ARM_MPU_Region_t const* table, uint32_t cnt)
{
const uint32_t rowWordSize = sizeof(ARM_MPU_Region_t)/4U;
if (cnt == 1U) {
mpu->RNR = rnr;
ARM_MPU_OrderedMemcpy(&(mpu->RBAR), &(table->RBAR), rowWordSize);
} else {
uint32_t rnrBase = rnr & ~(MPU_TYPE_RALIASES-1U);
uint32_t rnrOffset = rnr % MPU_TYPE_RALIASES;
mpu->RNR = rnrBase;
while ((rnrOffset + cnt) > MPU_TYPE_RALIASES) {
uint32_t c = MPU_TYPE_RALIASES - rnrOffset;
ARM_MPU_OrderedMemcpy(&(mpu->RBAR)+(rnrOffset*2U), &(table->RBAR), c*rowWordSize);
table += c;
cnt -= c;
rnrOffset = 0U;
rnrBase += MPU_TYPE_RALIASES;
mpu->RNR = rnrBase;
}
ARM_MPU_OrderedMemcpy(&(mpu->RBAR)+(rnrOffset*2U), &(table->RBAR), cnt*rowWordSize);
}
}
/** Load the given number of MPU regions from a table.
* \param rnr First region number to be configured.
* \param table Pointer to the MPU configuration table.
* \param cnt Amount of regions to be configured.
*/
__STATIC_INLINE void ARM_MPU_Load(uint32_t rnr, ARM_MPU_Region_t const* table, uint32_t cnt)
{
ARM_MPU_LoadEx(MPU, rnr, table, cnt);
}
#ifdef MPU_NS
/** Load the given number of MPU regions from a table to the Non-secure MPU.
* \param rnr First region number to be configured.
* \param table Pointer to the MPU configuration table.
* \param cnt Amount of regions to be configured.
*/
__STATIC_INLINE void ARM_MPU_Load_NS(uint32_t rnr, ARM_MPU_Region_t const* table, uint32_t cnt)
{
ARM_MPU_LoadEx(MPU_NS, rnr, table, cnt);
}
#endif
#endif

@ -0,0 +1,337 @@
/******************************************************************************
* @file pmu_armv8.h
* @brief CMSIS PMU API for Armv8.1-M PMU
* @version V1.0.0
* @date 24. March 2020
******************************************************************************/
/*
* Copyright (c) 2020 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if defined ( __ICCARM__ )
#pragma system_include /* treat file as system include file for MISRA check */
#elif defined (__clang__)
#pragma clang system_header /* treat file as system include file */
#endif
#ifndef ARM_PMU_ARMV8_H
#define ARM_PMU_ARMV8_H
/**
* \brief PMU Events
* \note See the Armv8.1-M Architecture Reference Manual for full details on these PMU events.
* */
#define ARM_PMU_SW_INCR 0x0000 /*!< Software update to the PMU_SWINC register, architecturally executed and condition code check pass */
#define ARM_PMU_L1I_CACHE_REFILL 0x0001 /*!< L1 I-Cache refill */
#define ARM_PMU_L1D_CACHE_REFILL 0x0003 /*!< L1 D-Cache refill */
#define ARM_PMU_L1D_CACHE 0x0004 /*!< L1 D-Cache access */
#define ARM_PMU_LD_RETIRED 0x0006 /*!< Memory-reading instruction architecturally executed and condition code check pass */
#define ARM_PMU_ST_RETIRED 0x0007 /*!< Memory-writing instruction architecturally executed and condition code check pass */
#define ARM_PMU_INST_RETIRED 0x0008 /*!< Instruction architecturally executed */
#define ARM_PMU_EXC_TAKEN 0x0009 /*!< Exception entry */
#define ARM_PMU_EXC_RETURN 0x000A /*!< Exception return instruction architecturally executed and the condition code check pass */
#define ARM_PMU_PC_WRITE_RETIRED 0x000C /*!< Software change to the Program Counter (PC). Instruction is architecturally executed and condition code check pass */
#define ARM_PMU_BR_IMMED_RETIRED 0x000D /*!< Immediate branch architecturally executed */
#define ARM_PMU_BR_RETURN_RETIRED 0x000E /*!< Function return instruction architecturally executed and the condition code check pass */
#define ARM_PMU_UNALIGNED_LDST_RETIRED 0x000F /*!< Unaligned memory memory-reading or memory-writing instruction architecturally executed and condition code check pass */
#define ARM_PMU_BR_MIS_PRED 0x0010 /*!< Mispredicted or not predicted branch speculatively executed */
#define ARM_PMU_CPU_CYCLES 0x0011 /*!< Cycle */
#define ARM_PMU_BR_PRED 0x0012 /*!< Predictable branch speculatively executed */
#define ARM_PMU_MEM_ACCESS 0x0013 /*!< Data memory access */
#define ARM_PMU_L1I_CACHE 0x0014 /*!< Level 1 instruction cache access */
#define ARM_PMU_L1D_CACHE_WB 0x0015 /*!< Level 1 data cache write-back */
#define ARM_PMU_L2D_CACHE 0x0016 /*!< Level 2 data cache access */
#define ARM_PMU_L2D_CACHE_REFILL 0x0017 /*!< Level 2 data cache refill */
#define ARM_PMU_L2D_CACHE_WB 0x0018 /*!< Level 2 data cache write-back */
#define ARM_PMU_BUS_ACCESS 0x0019 /*!< Bus access */
#define ARM_PMU_MEMORY_ERROR 0x001A /*!< Local memory error */
#define ARM_PMU_INST_SPEC 0x001B /*!< Instruction speculatively executed */
#define ARM_PMU_BUS_CYCLES 0x001D /*!< Bus cycles */
#define ARM_PMU_CHAIN 0x001E /*!< For an odd numbered counter, increment when an overflow occurs on the preceding even-numbered counter on the same PE */
#define ARM_PMU_L1D_CACHE_ALLOCATE 0x001F /*!< Level 1 data cache allocation without refill */
#define ARM_PMU_L2D_CACHE_ALLOCATE 0x0020 /*!< Level 2 data cache allocation without refill */
#define ARM_PMU_BR_RETIRED 0x0021 /*!< Branch instruction architecturally executed */
#define ARM_PMU_BR_MIS_PRED_RETIRED 0x0022 /*!< Mispredicted branch instruction architecturally executed */
#define ARM_PMU_STALL_FRONTEND 0x0023 /*!< No operation issued because of the frontend */
#define ARM_PMU_STALL_BACKEND 0x0024 /*!< No operation issued because of the backend */
#define ARM_PMU_L2I_CACHE 0x0027 /*!< Level 2 instruction cache access */
#define ARM_PMU_L2I_CACHE_REFILL 0x0028 /*!< Level 2 instruction cache refill */
#define ARM_PMU_L3D_CACHE_ALLOCATE 0x0029 /*!< Level 3 data cache allocation without refill */
#define ARM_PMU_L3D_CACHE_REFILL 0x002A /*!< Level 3 data cache refill */
#define ARM_PMU_L3D_CACHE 0x002B /*!< Level 3 data cache access */
#define ARM_PMU_L3D_CACHE_WB 0x002C /*!< Level 3 data cache write-back */
#define ARM_PMU_LL_CACHE_RD 0x0036 /*!< Last level data cache read */
#define ARM_PMU_LL_CACHE_MISS_RD 0x0037 /*!< Last level data cache read miss */
#define ARM_PMU_L1D_CACHE_MISS_RD 0x0039 /*!< Level 1 data cache read miss */
#define ARM_PMU_OP_COMPLETE 0x003A /*!< Operation retired */
#define ARM_PMU_OP_SPEC 0x003B /*!< Operation speculatively executed */
#define ARM_PMU_STALL 0x003C /*!< Stall cycle for instruction or operation not sent for execution */
#define ARM_PMU_STALL_OP_BACKEND 0x003D /*!< Stall cycle for instruction or operation not sent for execution due to pipeline backend */
#define ARM_PMU_STALL_OP_FRONTEND 0x003E /*!< Stall cycle for instruction or operation not sent for execution due to pipeline frontend */
#define ARM_PMU_STALL_OP 0x003F /*!< Instruction or operation slots not occupied each cycle */
#define ARM_PMU_L1D_CACHE_RD 0x0040 /*!< Level 1 data cache read */
#define ARM_PMU_LE_RETIRED 0x0100 /*!< Loop end instruction executed */
#define ARM_PMU_LE_SPEC 0x0101 /*!< Loop end instruction speculatively executed */
#define ARM_PMU_BF_RETIRED 0x0104 /*!< Branch future instruction architecturally executed and condition code check pass */
#define ARM_PMU_BF_SPEC 0x0105 /*!< Branch future instruction speculatively executed and condition code check pass */
#define ARM_PMU_LE_CANCEL 0x0108 /*!< Loop end instruction not taken */
#define ARM_PMU_BF_CANCEL 0x0109 /*!< Branch future instruction not taken */
#define ARM_PMU_SE_CALL_S 0x0114 /*!< Call to secure function, resulting in Security state change */
#define ARM_PMU_SE_CALL_NS 0x0115 /*!< Call to non-secure function, resulting in Security state change */
#define ARM_PMU_DWT_CMPMATCH0 0x0118 /*!< DWT comparator 0 match */
#define ARM_PMU_DWT_CMPMATCH1 0x0119 /*!< DWT comparator 1 match */
#define ARM_PMU_DWT_CMPMATCH2 0x011A /*!< DWT comparator 2 match */
#define ARM_PMU_DWT_CMPMATCH3 0x011B /*!< DWT comparator 3 match */
#define ARM_PMU_MVE_INST_RETIRED 0x0200 /*!< MVE instruction architecturally executed */
#define ARM_PMU_MVE_INST_SPEC 0x0201 /*!< MVE instruction speculatively executed */
#define ARM_PMU_MVE_FP_RETIRED 0x0204 /*!< MVE floating-point instruction architecturally executed */
#define ARM_PMU_MVE_FP_SPEC 0x0205 /*!< MVE floating-point instruction speculatively executed */
#define ARM_PMU_MVE_FP_HP_RETIRED 0x0208 /*!< MVE half-precision floating-point instruction architecturally executed */
#define ARM_PMU_MVE_FP_HP_SPEC 0x0209 /*!< MVE half-precision floating-point instruction speculatively executed */
#define ARM_PMU_MVE_FP_SP_RETIRED 0x020C /*!< MVE single-precision floating-point instruction architecturally executed */
#define ARM_PMU_MVE_FP_SP_SPEC 0x020D /*!< MVE single-precision floating-point instruction speculatively executed */
#define ARM_PMU_MVE_FP_MAC_RETIRED 0x0214 /*!< MVE floating-point multiply or multiply-accumulate instruction architecturally executed */
#define ARM_PMU_MVE_FP_MAC_SPEC 0x0215 /*!< MVE floating-point multiply or multiply-accumulate instruction speculatively executed */
#define ARM_PMU_MVE_INT_RETIRED 0x0224 /*!< MVE integer instruction architecturally executed */
#define ARM_PMU_MVE_INT_SPEC 0x0225 /*!< MVE integer instruction speculatively executed */
#define ARM_PMU_MVE_INT_MAC_RETIRED 0x0228 /*!< MVE multiply or multiply-accumulate instruction architecturally executed */
#define ARM_PMU_MVE_INT_MAC_SPEC 0x0229 /*!< MVE multiply or multiply-accumulate instruction speculatively executed */
#define ARM_PMU_MVE_LDST_RETIRED 0x0238 /*!< MVE load or store instruction architecturally executed */
#define ARM_PMU_MVE_LDST_SPEC 0x0239 /*!< MVE load or store instruction speculatively executed */
#define ARM_PMU_MVE_LD_RETIRED 0x023C /*!< MVE load instruction architecturally executed */
#define ARM_PMU_MVE_LD_SPEC 0x023D /*!< MVE load instruction speculatively executed */
#define ARM_PMU_MVE_ST_RETIRED 0x0240 /*!< MVE store instruction architecturally executed */
#define ARM_PMU_MVE_ST_SPEC 0x0241 /*!< MVE store instruction speculatively executed */
#define ARM_PMU_MVE_LDST_CONTIG_RETIRED 0x0244 /*!< MVE contiguous load or store instruction architecturally executed */
#define ARM_PMU_MVE_LDST_CONTIG_SPEC 0x0245 /*!< MVE contiguous load or store instruction speculatively executed */
#define ARM_PMU_MVE_LD_CONTIG_RETIRED 0x0248 /*!< MVE contiguous load instruction architecturally executed */
#define ARM_PMU_MVE_LD_CONTIG_SPEC 0x0249 /*!< MVE contiguous load instruction speculatively executed */
#define ARM_PMU_MVE_ST_CONTIG_RETIRED 0x024C /*!< MVE contiguous store instruction architecturally executed */
#define ARM_PMU_MVE_ST_CONTIG_SPEC 0x024D /*!< MVE contiguous store instruction speculatively executed */
#define ARM_PMU_MVE_LDST_NONCONTIG_RETIRED 0x0250 /*!< MVE non-contiguous load or store instruction architecturally executed */
#define ARM_PMU_MVE_LDST_NONCONTIG_SPEC 0x0251 /*!< MVE non-contiguous load or store instruction speculatively executed */
#define ARM_PMU_MVE_LD_NONCONTIG_RETIRED 0x0254 /*!< MVE non-contiguous load instruction architecturally executed */
#define ARM_PMU_MVE_LD_NONCONTIG_SPEC 0x0255 /*!< MVE non-contiguous load instruction speculatively executed */
#define ARM_PMU_MVE_ST_NONCONTIG_RETIRED 0x0258 /*!< MVE non-contiguous store instruction architecturally executed */
#define ARM_PMU_MVE_ST_NONCONTIG_SPEC 0x0259 /*!< MVE non-contiguous store instruction speculatively executed */
#define ARM_PMU_MVE_LDST_MULTI_RETIRED 0x025C /*!< MVE memory instruction targeting multiple registers architecturally executed */
#define ARM_PMU_MVE_LDST_MULTI_SPEC 0x025D /*!< MVE memory instruction targeting multiple registers speculatively executed */
#define ARM_PMU_MVE_LD_MULTI_RETIRED 0x0260 /*!< MVE memory load instruction targeting multiple registers architecturally executed */
#define ARM_PMU_MVE_LD_MULTI_SPEC 0x0261 /*!< MVE memory load instruction targeting multiple registers speculatively executed */
#define ARM_PMU_MVE_ST_MULTI_RETIRED 0x0261 /*!< MVE memory store instruction targeting multiple registers architecturally executed */
#define ARM_PMU_MVE_ST_MULTI_SPEC 0x0265 /*!< MVE memory store instruction targeting multiple registers speculatively executed */
#define ARM_PMU_MVE_LDST_UNALIGNED_RETIRED 0x028C /*!< MVE unaligned memory load or store instruction architecturally executed */
#define ARM_PMU_MVE_LDST_UNALIGNED_SPEC 0x028D /*!< MVE unaligned memory load or store instruction speculatively executed */
#define ARM_PMU_MVE_LD_UNALIGNED_RETIRED 0x0290 /*!< MVE unaligned load instruction architecturally executed */
#define ARM_PMU_MVE_LD_UNALIGNED_SPEC 0x0291 /*!< MVE unaligned load instruction speculatively executed */
#define ARM_PMU_MVE_ST_UNALIGNED_RETIRED 0x0294 /*!< MVE unaligned store instruction architecturally executed */
#define ARM_PMU_MVE_ST_UNALIGNED_SPEC 0x0295 /*!< MVE unaligned store instruction speculatively executed */
#define ARM_PMU_MVE_LDST_UNALIGNED_NONCONTIG_RETIRED 0x0298 /*!< MVE unaligned noncontiguous load or store instruction architecturally executed */
#define ARM_PMU_MVE_LDST_UNALIGNED_NONCONTIG_SPEC 0x0299 /*!< MVE unaligned noncontiguous load or store instruction speculatively executed */
#define ARM_PMU_MVE_VREDUCE_RETIRED 0x02A0 /*!< MVE vector reduction instruction architecturally executed */
#define ARM_PMU_MVE_VREDUCE_SPEC 0x02A1 /*!< MVE vector reduction instruction speculatively executed */
#define ARM_PMU_MVE_VREDUCE_FP_RETIRED 0x02A4 /*!< MVE floating-point vector reduction instruction architecturally executed */
#define ARM_PMU_MVE_VREDUCE_FP_SPEC 0x02A5 /*!< MVE floating-point vector reduction instruction speculatively executed */
#define ARM_PMU_MVE_VREDUCE_INT_RETIRED 0x02A8 /*!< MVE integer vector reduction instruction architecturally executed */
#define ARM_PMU_MVE_VREDUCE_INT_SPEC 0x02A9 /*!< MVE integer vector reduction instruction speculatively executed */
#define ARM_PMU_MVE_PRED 0x02B8 /*!< Cycles where one or more predicated beats architecturally executed */
#define ARM_PMU_MVE_STALL 0x02CC /*!< Stall cycles caused by an MVE instruction */
#define ARM_PMU_MVE_STALL_RESOURCE 0x02CD /*!< Stall cycles caused by an MVE instruction because of resource conflicts */
#define ARM_PMU_MVE_STALL_RESOURCE_MEM 0x02CE /*!< Stall cycles caused by an MVE instruction because of memory resource conflicts */
#define ARM_PMU_MVE_STALL_RESOURCE_FP 0x02CF /*!< Stall cycles caused by an MVE instruction because of floating-point resource conflicts */
#define ARM_PMU_MVE_STALL_RESOURCE_INT 0x02D0 /*!< Stall cycles caused by an MVE instruction because of integer resource conflicts */
#define ARM_PMU_MVE_STALL_BREAK 0x02D3 /*!< Stall cycles caused by an MVE chain break */
#define ARM_PMU_MVE_STALL_DEPENDENCY 0x02D4 /*!< Stall cycles caused by MVE register dependency */
#define ARM_PMU_ITCM_ACCESS 0x4007 /*!< Instruction TCM access */
#define ARM_PMU_DTCM_ACCESS 0x4008 /*!< Data TCM access */
#define ARM_PMU_TRCEXTOUT0 0x4010 /*!< ETM external output 0 */
#define ARM_PMU_TRCEXTOUT1 0x4011 /*!< ETM external output 1 */
#define ARM_PMU_TRCEXTOUT2 0x4012 /*!< ETM external output 2 */
#define ARM_PMU_TRCEXTOUT3 0x4013 /*!< ETM external output 3 */
#define ARM_PMU_CTI_TRIGOUT4 0x4018 /*!< Cross-trigger Interface output trigger 4 */
#define ARM_PMU_CTI_TRIGOUT5 0x4019 /*!< Cross-trigger Interface output trigger 5 */
#define ARM_PMU_CTI_TRIGOUT6 0x401A /*!< Cross-trigger Interface output trigger 6 */
#define ARM_PMU_CTI_TRIGOUT7 0x401B /*!< Cross-trigger Interface output trigger 7 */
/** \brief PMU Functions */
__STATIC_INLINE void ARM_PMU_Enable(void);
__STATIC_INLINE void ARM_PMU_Disable(void);
__STATIC_INLINE void ARM_PMU_Set_EVTYPER(uint32_t num, uint32_t type);
__STATIC_INLINE void ARM_PMU_CYCCNT_Reset(void);
__STATIC_INLINE void ARM_PMU_EVCNTR_ALL_Reset(void);
__STATIC_INLINE void ARM_PMU_CNTR_Enable(uint32_t mask);
__STATIC_INLINE void ARM_PMU_CNTR_Disable(uint32_t mask);
__STATIC_INLINE uint32_t ARM_PMU_Get_CCNTR(void);
__STATIC_INLINE uint32_t ARM_PMU_Get_EVCNTR(uint32_t num);
__STATIC_INLINE uint32_t ARM_PMU_Get_CNTR_OVS(void);
__STATIC_INLINE void ARM_PMU_Set_CNTR_OVS(uint32_t mask);
__STATIC_INLINE void ARM_PMU_Set_CNTR_IRQ_Enable(uint32_t mask);
__STATIC_INLINE void ARM_PMU_Set_CNTR_IRQ_Disable(uint32_t mask);
__STATIC_INLINE void ARM_PMU_CNTR_Increment(uint32_t mask);
/**
\brief Enable the PMU
*/
__STATIC_INLINE void ARM_PMU_Enable(void)
{
PMU->CTRL |= PMU_CTRL_ENABLE_Msk;
}
/**
\brief Disable the PMU
*/
__STATIC_INLINE void ARM_PMU_Disable(void)
{
PMU->CTRL &= ~PMU_CTRL_ENABLE_Msk;
}
/**
\brief Set event to count for PMU eventer counter
\param [in] num Event counter (0-30) to configure
\param [in] type Event to count
*/
__STATIC_INLINE void ARM_PMU_Set_EVTYPER(uint32_t num, uint32_t type)
{
PMU->EVTYPER[num] = type;
}
/**
\brief Reset cycle counter
*/
__STATIC_INLINE void ARM_PMU_CYCCNT_Reset(void)
{
PMU->CTRL |= PMU_CTRL_CYCCNT_RESET_Msk;
}
/**
\brief Reset all event counters
*/
__STATIC_INLINE void ARM_PMU_EVCNTR_ALL_Reset(void)
{
PMU->CTRL |= PMU_CTRL_EVENTCNT_RESET_Msk;
}
/**
\brief Enable counters
\param [in] mask Counters to enable
\note Enables one or more of the following:
- event counters (0-30)
- cycle counter
*/
__STATIC_INLINE void ARM_PMU_CNTR_Enable(uint32_t mask)
{
PMU->CNTENSET = mask;
}
/**
\brief Disable counters
\param [in] mask Counters to enable
\note Disables one or more of the following:
- event counters (0-30)
- cycle counter
*/
__STATIC_INLINE void ARM_PMU_CNTR_Disable(uint32_t mask)
{
PMU->CNTENCLR = mask;
}
/**
\brief Read cycle counter
\return Cycle count
*/
__STATIC_INLINE uint32_t ARM_PMU_Get_CCNTR(void)
{
return PMU->CCNTR;
}
/**
\brief Read event counter
\param [in] num Event counter (0-30) to read
\return Event count
*/
__STATIC_INLINE uint32_t ARM_PMU_Get_EVCNTR(uint32_t num)
{
return PMU->EVCNTR[num];
}
/**
\brief Read counter overflow status
\return Counter overflow status bits for the following:
- event counters (0-30)
- cycle counter
*/
__STATIC_INLINE uint32_t ARM_PMU_Get_CNTR_OVS(void)
{
return PMU->OVSSET;
}
/**
\brief Clear counter overflow status
\param [in] mask Counter overflow status bits to clear
\note Clears overflow status bits for one or more of the following:
- event counters (0-30)
- cycle counter
*/
__STATIC_INLINE void ARM_PMU_Set_CNTR_OVS(uint32_t mask)
{
PMU->OVSCLR = mask;
}
/**
\brief Enable counter overflow interrupt request
\param [in] mask Counter overflow interrupt request bits to set
\note Sets overflow interrupt request bits for one or more of the following:
- event counters (0-30)
- cycle counter
*/
__STATIC_INLINE void ARM_PMU_Set_CNTR_IRQ_Enable(uint32_t mask)
{
PMU->INTENSET = mask;
}
/**
\brief Disable counter overflow interrupt request
\param [in] mask Counter overflow interrupt request bits to clear
\note Clears overflow interrupt request bits for one or more of the following:
- event counters (0-30)
- cycle counter
*/
__STATIC_INLINE void ARM_PMU_Set_CNTR_IRQ_Disable(uint32_t mask)
{
PMU->INTENCLR = mask;
}
/**
\brief Software increment event counter
\param [in] mask Counters to increment
\note Software increment bits for one or more event counters (0-30)
*/
__STATIC_INLINE void ARM_PMU_CNTR_Increment(uint32_t mask)
{
PMU->SWINC = mask;
}
#endif

@ -0,0 +1,70 @@
/******************************************************************************
* @file tz_context.h
* @brief Context Management for Armv8-M TrustZone
* @version V1.0.1
* @date 10. January 2018
******************************************************************************/
/*
* Copyright (c) 2017-2018 Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#if defined ( __ICCARM__ )
#pragma system_include /* treat file as system include file for MISRA check */
#elif defined (__clang__)
#pragma clang system_header /* treat file as system include file */
#endif
#ifndef TZ_CONTEXT_H
#define TZ_CONTEXT_H
#include <stdint.h>
#ifndef TZ_MODULEID_T
#define TZ_MODULEID_T
/// \details Data type that identifies secure software modules called by a process.
typedef uint32_t TZ_ModuleId_t;
#endif
/// \details TZ Memory ID identifies an allocated memory slot.
typedef uint32_t TZ_MemoryId_t;
/// Initialize secure context memory system
/// \return execution status (1: success, 0: error)
uint32_t TZ_InitContextSystem_S (void);
/// Allocate context memory for calling secure software modules in TrustZone
/// \param[in] module identifies software modules called from non-secure mode
/// \return value != 0 id TrustZone memory slot identifier
/// \return value 0 no memory available or internal error
TZ_MemoryId_t TZ_AllocModuleContext_S (TZ_ModuleId_t module);
/// Free context memory that was previously allocated with \ref TZ_AllocModuleContext_S
/// \param[in] id TrustZone memory slot identifier
/// \return execution status (1: success, 0: error)
uint32_t TZ_FreeModuleContext_S (TZ_MemoryId_t id);
/// Load secure context (called on RTOS thread context switch)
/// \param[in] id TrustZone memory slot identifier
/// \return execution status (1: success, 0: error)
uint32_t TZ_LoadContext_S (TZ_MemoryId_t id);
/// Store secure context (called on RTOS thread context switch)
/// \param[in] id TrustZone memory slot identifier
/// \return execution status (1: success, 0: error)
uint32_t TZ_StoreContext_S (TZ_MemoryId_t id);
#endif // TZ_CONTEXT_H

@ -0,0 +1,68 @@
# strip quotation mark in configuration
LOSCFG_ARCH_CPU_STRIP := $(subst $\",,$(LOSCFG_ARCH_CPU))
LOSCFG_ARCH_CPU = $(LOSCFG_ARCH_CPU_STRIP)
LOSCFG_ARCH_FPU_STRIP := $(subst $\",,$(LOSCFG_ARCH_FPU))
LOSCFG_ARCH_FPU = $(LOSCFG_ARCH_FPU_STRIP)
LITEOS_BASELIB += -l$(LOSCFG_ARCH_CPU)
LIB_SUBDIRS += arch/arm/cortex_m
# FPU compile options: hard/soft/softfp, use hard as default
ifeq ($(findstring y, $(LOSCFG_ARCH_CORTEX_M0)$(LOSCFG_ARCH_CORTEX_M0_PLUS)), y)
# cortex-m0 and cortex-m0plus don't support fpu
else ifeq ($(LOSCFG_ARCH_CORTEX_M3), y)
LITEOS_FLOAT_OPTS := -mfloat-abi=softfp
else ifeq ($(LOSCFG_ARCH_CORTEX_M33), y)
LITEOS_FPU_OPTS := -mfpu=fpv5-d16
LITEOS_FLOAT_OPTS := -mfloat-abi=softfp
else
ifeq ($(LOSCFG_ARCH_FPU_ENABLE), y)
LITEOS_FLOAT_OPTS := -mfloat-abi=softfp
LITEOS_FPU_OPTS := -mfpu=$(LOSCFG_ARCH_FPU)
else
EXTENSION := +nofp
LITEOS_FLOAT_OPTS := -mfloat-abi=soft
endif
endif
# CPU compile options
LITEOS_CPU_OPTS := -mcpu=$(LOSCFG_ARCH_CPU)$(EXTENSION)
# gcc libc folder style is combine with core and fpu, use thumb as default
# for example, cortex-m7 with softfp abi and thumb is: thumb/v7e-m+fp/softfp
# attention: for v6e, can use thumb/v6-m/nofp/libgcc.a
ifdef LOSCFG_ARCH_ARM_V6M
LITEOS_GCCLIB := thumb/v6-m/nofp
else
ifeq ($(LOSCFG_ARCH_FPU_ENABLE), y)
LITEOS_GCCLIB := thumb/v7e-m+fp/$(subst -mfloat-abi=,,$(LITEOS_FLOAT_OPTS))
else
LITEOS_GCCLIB := thumb/v7-m/nofp
endif
endif
LITEOS_CORE_COPTS = $(LITEOS_CPU_OPTS) $(LITEOS_FLOAT_OPTS) $(LITEOS_FPU_OPTS)
LITEOS_INTERWORK += $(LITEOS_CORE_COPTS)
LITEOS_NODEBUG += $(LITEOS_CORE_COPTS)
LITEOS_ASOPTS += $(LITEOS_CPU_OPTS)
LITEOS_CXXOPTS_BASE += $(LITEOS_CORE_COPTS)
ARCH_INCLUDE := -I $(LITEOSTOPDIR)/arch/arm/cortex_m/include \
-I $(LITEOSTOPDIR)/arch/arm/cmsis
LITEOS_PLATFORM_INCLUDE += $(ARCH_INCLUDE)
LITEOS_CXXINCLUDE += $(ARCH_INCLUDE)
# extra definition for other module
LITEOS_CPU_TYPE = $(LOSCFG_ARCH_CPU)
LITEOS_ARM_ARCH := -march=$(subst $\",,$(LOSCFG_ARCH_ARM_VER))
# linux style macros
LINUX_ARCH_$(LOSCFG_ARCH_ARM_V6M) = -D__LINUX_ARM_ARCH__=6
LINUX_ARCH_$(LOSCFG_ARCH_ARM_V7M) = -D__LINUX_ARM_ARCH__=7
LINUX_ARCH_$(LOSCFG_ARCH_ARM_V8M) = -D__LINUX_ARM_ARCH__=8
AS_OBJS_LIBC_FLAGS += $(LINUX_ARCH_y)

@ -0,0 +1,579 @@
/* ----------------------------------------------------------------------------RCH_ATOMIC_H
* Copyright (c) Huawei Technologies Co., Ltd. 2018-2020. All rights reserved.
* Description: Atomic Operations HeadFile
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
/**
* @defgroup los_atomic Atomic
* @ingroup kernel
*/
#ifndef _ARCH_ATOMIC_H
#define _ARCH_ATOMIC_H
#include "los_typedef.h"
#include "los_hwi.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
typedef volatile INT32 Atomic;
typedef volatile INT64 Atomic64;
/**
* @ingroup los_atomic
* @brief Atomic read.
*
* @par Description:
* This API is used to implement the atomic read and return the result value of the read.
* @attention
* <ul>
* <li>The pointer v must not be NULL.</li>
* </ul>
*
* @param v [IN] The reading pointer.
*
* @retval #INT32 The result value of the read.
* @par Dependency:
* <ul><li>los_atomic.h: the header file that contains the API declaration.</li></ul>
* @see
* @since Huawei LiteOS V200R003C00
*/
STATIC INLINE INT32 ArchAtomicRead(const Atomic *v)
{
return *v;
}
/**
* @ingroup los_atomic
* @brief Atomic setting.
*
* @par Description:
* This API is used to implement the atomic setting operation.
* @attention
* <ul>
* <li>The pointer v must not be NULL.</li>
* </ul>
*
* @param v [IN] The variable pointer to be setting.
* @param setVal [IN] The value to be setting.
*
* @retval none.
* @par Dependency:
* <ul><li>los_atomic.h: the header file that contains the API declaration.</li></ul>
* @see
* @since Huawei LiteOS V200R003C00
*/
STATIC INLINE VOID ArchAtomicSet(Atomic *v, INT32 setVal)
{
*v = setVal;
}
#if __ARM_ARCH >= 7
/**
* ARMv6-M does not support exclusive access instructions such as LDREX or STREX, or any other atomic swap instruction.
* ARMv7-M does not support LDREXD and STREXD.
* */
/**
* @ingroup los_atomic
* @brief Atomic addition.
*
* @par Description:
* This API is used to implement the atomic addition and return the result value of the augend.
* @attention
* <ul>
* <li>The pointer v must not be NULL.</li>
* <li>If the addtion result is not in the range of representable values for 32-bit signed integer,
* an int integer overflow may occur to the return value</li>
* </ul>
*
* @param v [IN] The augend pointer.
* @param addVal [IN] The addend.
*
* @retval #INT32 The result value of the augend.
* @par Dependency:
* <ul><li>los_atomic.h: the header file that contains the API declaration.</li></ul>
* @see
* @since Huawei LiteOS V200R003C00
*/
STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
{
INT32 val;
UINT32 status;
do {
__asm__ __volatile__("ldrex %1, [%2]\n"
"add %1, %1, %3\n"
"strex %0, %1, [%2]"
: "=&r"(status), "=&r"(val)
: "r"(v), "r"(addVal)
: "cc");
} while (__builtin_expect(status != 0, 0));
return val;
}
/**
* @ingroup los_atomic
* @brief Atomic subtraction.
*
* @par Description:
* This API is used to implement the atomic subtraction and return the result value of the minuend.
* @attention
* <ul>
* <li>The pointer v must not be NULL.</li>
* <li>If the subtraction result is not in the range of representable values for 32-bit signed integer,
* an int integer overflow may occur to the return value</li>
* </ul>
*
* @param v [IN] The minuend pointer.
* @param subVal [IN] The subtrahend.
*
* @retval #INT32 The result value of the minuend.
* @par Dependency:
* <ul><li>los_atomic.h: the header file that contains the API declaration.</li></ul>
* @see
* @since Huawei LiteOS V200R003C00
*/
STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
{
INT32 val;
UINT32 status;
do {
__asm__ __volatile__("ldrex %1, [%2]\n"
"sub %1, %1, %3\n"
"strex %0, %1, [%2]"
: "=&r"(status), "=&r"(val)
: "r"(v), "r"(subVal)
: "cc");
} while (__builtin_expect(status != 0, 0));
return val;
}
/**
* @ingroup los_atomic
* @brief Atomic addSelf.
*
* @par Description:
* This API is used to implement the atomic addSelf.
* @attention
* <ul>
* <li>The pointer v must not be NULL.</li>
* <li>The value which v point to must not be INT_MAX to avoid integer overflow after adding 1.</li>
* </ul>
*
* @param v [IN] The addSelf variable pointer.
*
* @retval none.
* @par Dependency:
* <ul><li>los_atomic.h: the header file that contains the API declaration.</li></ul>
* @see
* @since Huawei LiteOS V200R003C00
*/
STATIC INLINE VOID ArchAtomicInc(Atomic *v)
{
INT32 val;
UINT32 status;
do {
__asm__ __volatile__("ldrex %0, [%3]\n"
"add %0, %0, #1\n"
"strex %1, %0, [%3]"
: "=&r"(val), "=&r"(status), "+m"(*v)
: "r"(v)
: "cc");
} while (__builtin_expect(status != 0, 0));
}
/**
* @ingroup los_atomic
* @brief Atomic addSelf.
*
* @par Description:
* This API is used to implement the atomic addSelf and return the result of addSelf.
* @attention
* <ul>
* <li>The pointer v must not be NULL.</li>
* <li>The value which v point to must not be INT_MAX to avoid integer overflow after adding 1.</li>
* </ul>
*
* @param v [IN] The addSelf variable pointer.
*
* @retval #INT32 The return value of variable addSelf.
* @par Dependency:
* <ul><li>los_atomic.h: the header file that contains the API declaration.</li></ul>
* @see
* @since Huawei LiteOS V200R003C00
*/
STATIC INLINE INT32 ArchAtomicIncRet(Atomic *v)
{
INT32 val;
UINT32 status;
do {
__asm__ __volatile__("ldrex %0, [%3]\n"
"add %0, %0, #1\n"
"strex %1, %0, [%3]"
: "=&r"(val), "=&r"(status), "+m"(*v)
: "r"(v)
: "cc");
} while (__builtin_expect(status != 0, 0));
return val;
}
/**
* @ingroup los_atomic
* @brief Atomic auto-decrement.
*
* @par Description:
* This API is used to implementating the atomic auto-decrement.
* @attention
* <ul>
* <li>The pointer v must not be NULL.</li>
* <li>The value which v point to must not be INT_MIN to avoid overflow after reducing 1.</li>
* </ul>
*
* @param v [IN] The auto-decrement variable pointer.
*
* @retval none.
* @par Dependency:
* <ul><li>los_atomic.h: the header file that contains the API declaration.</li></ul>
* @see
* @since Huawei LiteOS V200R003C00
*/
STATIC INLINE VOID ArchAtomicDec(Atomic *v)
{
INT32 val;
UINT32 status;
do {
__asm__ __volatile__("ldrex %0, [%3]\n"
"sub %0, %0, #1\n"
"strex %1, %0, [%3]"
: "=&r"(val), "=&r"(status), "+m"(*v)
: "r"(v)
: "cc");
} while (__builtin_expect(status != 0, 0));
}
/**
* @ingroup los_atomic
* @brief Atomic auto-decrement.
*
* @par Description:
* This API is used to implementating the atomic auto-decrement and return the result of auto-decrement.
* @attention
* <ul>
* <li>The pointer v must not be NULL.</li>
* <li>The value which v point to must not be INT_MIN to avoid overflow after reducing 1.</li>
* </ul>
*
* @param v [IN] The auto-decrement variable pointer.
*
* @retval #INT32 The return value of variable auto-decrement.
* @par Dependency:
* <ul><li>los_atomic.h: the header file that contains the API declaration.</li></ul>
* @see
* @since Huawei LiteOS V200R003C00
*/
STATIC INLINE INT32 ArchAtomicDecRet(Atomic *v)
{
INT32 val;
UINT32 status;
do {
__asm__ __volatile__("ldrex %0, [%3]\n"
"sub %0, %0, #1\n"
"strex %1, %0, [%3]"
: "=&r"(val), "=&r"(status), "+m"(*v)
: "r"(v)
: "cc");
} while (__builtin_expect(status != 0, 0));
return val;
}
/**
* @ingroup los_atomic
* @brief Atomic exchange for 32-bit variable.
*
* @par Description:
* This API is used to implement the atomic exchange for 32-bit variable
* and return the previous value of the atomic variable.
* @attention
* <ul>The pointer v must not be NULL.</ul>
*
* @param v [IN] The variable pointer.
* @param val [IN] The exchange value.
*
* @retval #INT32 The previous value of the atomic variable
* @par Dependency:
* <ul><li>los_atomic.h: the header file that contains the API declaration.</li></ul>
* @see
* @since Huawei LiteOS V100R001C00
*/
STATIC INLINE INT32 ArchAtomicXchg32bits(Atomic *v, INT32 val)
{
INT32 prevVal;
UINT32 status;
do {
__asm__ __volatile__("ldrex %0, [%3]\n"
"strex %1, %4, [%3]"
: "=&r"(prevVal), "=&r"(status), "+m"(*v)
: "r"(v), "r"(val)
: "cc");
} while (__builtin_expect(status != 0, 0));
return prevVal;
}
/**
* @ingroup los_atomic
* @brief Atomic exchange for 32-bit variable with compare.
*
* @par Description:
* This API is used to implement the atomic exchange for 32-bit variable, if the value of variable is equal to oldVal.
* @attention
* <ul>The pointer v must not be NULL.</ul>
*
* @param v [IN] The variable pointer.
* @param val [IN] The new value.
* @param oldVal [IN] The old value.
*
* @retval TRUE The previous value of the atomic variable is not equal to oldVal.
* @retval FALSE The previous value of the atomic variable is equal to oldVal.
* @par Dependency:
* <ul><li>los_atomic.h: the header file that contains the API declaration.</li></ul>
* @see
* @since Huawei LiteOS V200R003C00
*/
STATIC INLINE BOOL ArchAtomicCmpXchg32bits(Atomic *v, INT32 val, INT32 oldVal)
{
INT32 prevVal;
UINT32 status;
do {
__asm__ __volatile__("1: ldrex %0, %2\n"
" mov %1, #0\n"
" cmp %0, %3\n"
" bne 2f\n"
" strex %1, %4, %2\n"
"2:"
: "=&r"(prevVal), "=&r"(status), "+Q"(*v)
: "r"(oldVal), "r"(val)
: "cc");
} while (__builtin_expect(status != 0, 0));
return prevVal != oldVal;
}
#else /* __ARM_ARCH < 7 */
STATIC INLINE INT32 ArchAtomicAdd(Atomic *v, INT32 addVal)
{
INT32 val;
UINT32 intSave;
intSave = LOS_IntLock();
*v += addVal;
val = *v;
LOS_IntRestore(intSave);
return val;
}
STATIC INLINE INT32 ArchAtomicSub(Atomic *v, INT32 subVal)
{
INT32 val;
UINT32 intSave;
intSave = LOS_IntLock();
*v -= subVal;
val = *v;
LOS_IntRestore(intSave);
return val;
}
STATIC INLINE VOID ArchAtomicInc(Atomic *v)
{
(VOID)ArchAtomicAdd(v, 1);
}
STATIC INLINE VOID ArchAtomicDec(Atomic *v)
{
(VOID)ArchAtomicSub(v, 1);
}
STATIC INLINE INT32 ArchAtomicIncRet(Atomic *v)
{
return ArchAtomicAdd(v, 1);
}
STATIC INLINE INT32 ArchAtomicDecRet(Atomic *v)
{
return ArchAtomicSub(v, 1);
}
STATIC INLINE INT32 ArchAtomicXchg32bits(Atomic *v, INT32 val)
{
INT32 prevVal;
UINT32 intSave;
intSave = LOS_IntLock();
prevVal = *v;
*v = val;
LOS_IntRestore(intSave);
return prevVal;
}
STATIC INLINE BOOL ArchAtomicCmpXchg32bits(Atomic *v, INT32 val, INT32 oldVal)
{
INT32 prevVal;
UINT32 intSave;
intSave = LOS_IntLock();
prevVal = *v;
if (prevVal == oldVal) {
*v = val;
}
LOS_IntRestore(intSave);
return prevVal != oldVal;
}
#endif /* __ARM_ARCH */
STATIC INLINE INT64 ArchAtomic64Read(const Atomic64 *v)
{
INT64 val;
UINT32 intSave;
intSave = LOS_IntLock();
val = *v;
LOS_IntRestore(intSave);
return val;
}
STATIC INLINE VOID ArchAtomic64Set(Atomic64 *v, INT64 setVal)
{
UINT32 intSave;
intSave = LOS_IntLock();
*v = setVal;
LOS_IntRestore(intSave);
}
STATIC INLINE INT64 ArchAtomic64Add(Atomic64 *v, INT64 addVal)
{
INT64 val;
UINT32 intSave;
intSave = LOS_IntLock();
*v += addVal;
val = *v;
LOS_IntRestore(intSave);
return val;
}
STATIC INLINE INT64 ArchAtomic64Sub(Atomic64 *v, INT64 subVal)
{
INT64 val;
UINT32 intSave;
intSave = LOS_IntLock();
*v -= subVal;
val = *v;
LOS_IntRestore(intSave);
return val;
}
STATIC INLINE VOID ArchAtomic64Inc(Atomic64 *v)
{
(VOID)ArchAtomic64Add(v, 1);
}
STATIC INLINE INT64 ArchAtomic64IncRet(Atomic64 *v)
{
return ArchAtomic64Add(v, 1);
}
STATIC INLINE VOID ArchAtomic64Dec(Atomic64 *v)
{
(VOID)ArchAtomic64Sub(v, 1);
}
STATIC INLINE INT64 ArchAtomic64DecRet(Atomic64 *v)
{
return ArchAtomic64Sub(v, 1);
}
STATIC INLINE INT64 ArchAtomicXchg64bits(Atomic64 *v, INT64 val)
{
INT64 prevVal;
UINT32 intSave;
intSave = LOS_IntLock();
prevVal = *v;
*v = val;
LOS_IntRestore(intSave);
return prevVal;
}
STATIC INLINE BOOL ArchAtomicCmpXchg64bits(Atomic64 *v, INT64 val, INT64 oldVal)
{
INT64 prevVal;
UINT32 intSave;
intSave = LOS_IntLock();
prevVal = *v;
if (prevVal == oldVal) {
*v = val;
}
LOS_IntRestore(intSave);
return prevVal != oldVal;
}
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
#endif /* _ARCH_ATOMIC_H */

@ -0,0 +1,61 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: Barrier HeadFile
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#ifndef _ARCH_BARRIER_H
#define _ARCH_BARRIER_H
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
#define SEV() __asm__ volatile("sev" : : : "memory")
#define WFE() __asm__ volatile("wfe" : : : "memory");
#define WFI() __asm__ volatile("wfi" : : : "memory");
#define DSB() __asm__ volatile("dsb" : : : "memory")
#define DMB() __asm__ volatile("dmb" : : : "memory")
#define ISB() __asm__ volatile("isb" : : : "memory")
#define BARRIER() __asm__ volatile("":::"memory")Plos
/* Old Style APIs */
#define sev SEV
#define wfe WFE
#define wfi WFI
#define dsb DSB
#define dmb DMB
#define isb ISB
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
#endif /* _ARCH_BARRIER_H */

@ -0,0 +1,32 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: Cache Operations HeadFile
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------- */
#ifndef _ARCH_CACHE_H
#define _ARCH_CACHE_H
#endif /* _ARCH_CACHE_H */

@ -0,0 +1,80 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
* Description: Canary Defines Headfile
* Author: Huawei LiteOS Team
* Create: 2020-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
/**
* @defgroup canary
* @ingroup kernel
*/
#ifndef _ARCH_CANARY_H
#define _ARCH_CANARY_H
#include "los_typedef.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
#ifdef __GNUC__
extern UINTPTR __stack_chk_guard;
/**
* @ingroup canary
* @brief Stack protector canaries value init.
*
* @par Description:
* This API is used to init canaries value __stack_chk_guard if the SP compiling options:
* -fstack-protector-strong or -fstack-protector-all is enabled.
*
* @attention
* <ul>
* <li>This API is a weak function, We recommend to implement true random number generator
* function for __stack_chk_guard value to replace it.</li>
* </ul>
*
* @param None.
*
* @retval None.
* @par Dependency:
* <ul><li>arch/sp.h: the header file that contains the API declaration.</li></ul>
* @see none
* @since Huawei LiteOS V200R005C00
*/
extern VOID ArchStackGuardInit(VOID);
#endif
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
#endif /* _ARCH_CANARY_H */

@ -0,0 +1,105 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: CPU Operations HeadFile
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
/**
* @defgroup los_hw Hardware
* @ingroup kernel
*/
#ifndef _ARCH_CPU_H
#define _ARCH_CPU_H
#include "los_typedef.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
#define CPUID_BASE 0xE000ED00 /* Main ID Register */
#define ARM_SYSREG_READ(addr) (*(volatile unsigned *)(uintptr_t)(addr))
typedef struct {
const UINT32 partNo;
const CHAR *cpuName;
} CpuVendor;
extern const CHAR *ArchCpuInfo(VOID);
STATIC INLINE UINT32 OsMainIDGet(VOID)
{
return ARM_SYSREG_READ(CPUID_BASE);
}
STATIC INLINE UINT32 ArchSPGet(VOID)
{
UINT32 val;
asm volatile("mov %0, sp" : "=r"(val));
return val;
}
STATIC INLINE UINTPTR ArchGetSp(VOID)
{
UINTPTR regSp;
__asm__ __volatile__("mov %0, sp\n" : "=r"(regSp));
return regSp;
}
STATIC INLINE UINTPTR ArchGetPsp(VOID)
{
UINTPTR regPsp;
__asm__ __volatile__("MRS %0, psp\n" : "=r"(regPsp));
return regPsp;
}
STATIC INLINE UINTPTR ArchGetMsp(VOID)
{
UINTPTR regMsp;
__asm__ __volatile__("MRS %0, msp\n" : "=r"(regMsp));
return regMsp;
}
STATIC INLINE UINT32 ArchCurrCpuid(void)
{
return 0;
}
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
#endif /* _ARCH_CPU_H */

@ -0,0 +1,373 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: Exception Operations HeadFile
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------- */
/**
* @defgroup los_exc Exception
* @ingroup kernel
*/
#ifndef _ARCH_EXCEPTION_H
#define _ARCH_EXCEPTION_H
#include "los_typedef.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
/* Initial bit32 stack value. */
#define OS_STACK_INIT 0xCACACACA
/* Bit32 stack top magic number. */
#define OS_STACK_MAGIC_WORD 0xCCCCCCCC
typedef enum {
OS_EXC_TYPE_CONTEXT = 0,
OS_EXC_TYPE_TSK = 1,
OS_EXC_TYPE_QUE = 2,
OS_EXC_TYPE_NVIC = 3,
OS_EXC_TYPE_TSK_SWITCH = 4,
OS_EXC_TYPE_MEM = 5,
OS_EXC_TYPE_MAX = 6
} ExcInfoType;
typedef UINT32 (*EXC_INFO_SAVE_CALLBACK)(UINT32, VOID*);
typedef struct tagExcInfoCallBackArray {
ExcInfoType type;
UINT32 valid;
EXC_INFO_SAVE_CALLBACK fnExcInfoCb;
VOID *pArg;
} ExcInfoArray;
#define OS_EXC_IN_INIT 0
#define OS_EXC_IN_TASK 1
#define OS_EXC_IN_HWI 2
#define OS_EXC_MAX_BUF_LEN 25
#define OS_EXC_MAX_NEST_DEPTH 1
#define OS_NVIC_SHCSR 0xE000ED24
#define OS_NVIC_CCR 0xE000ED14
#define OS_NVIC_INT_ENABLE_SIZE 0x20
#define OS_NVIC_INT_PRI_SIZE 0xF0
#define OS_NVIC_EXCPRI_SIZE 0xC
#define OS_NVIC_INT_PEND_SIZE OS_NVIC_INT_ACT_SIZE
#define OS_NVIC_INT_ACT_SIZE OS_NVIC_INT_ENABLE_SIZE
#define OS_EXC_FLAG_NO_FLOAT 0x10000000
#define OS_EXC_FLAG_FAULTADDR_VALID 0x01
#define OS_EXC_FLAG_IN_HWI 0x02
#define OS_EXC_IMPRECISE_ACCESS_ADDR 0xABABABAB
#define FAULT_STATUS_REG_BIT 32
/**
* @ingroup los_exc
* the struct of register files
*
* description: the register files that saved when exception triggered
*
*/
typedef struct tagExcContext {
// handler save
#ifdef LOSCFG_ARCH_FPU_ENABLE
UINT32 S16;
UINT32 S17;
UINT32 S18;
UINT32 S19;
UINT32 S20;
UINT32 S21;
UINT32 S22;
UINT32 S23;
UINT32 S24;
UINT32 S25;
UINT32 S26;
UINT32 S27;
UINT32 S28;
UINT32 S29;
UINT32 S30;
UINT32 S31;
#endif
UINT32 R4;
UINT32 R5;
UINT32 R6;
UINT32 R7;
UINT32 R8;
UINT32 R9;
UINT32 R10;
UINT32 R11;
UINT32 PriMask;
// auto save
UINT32 SP;
UINT32 R0;
UINT32 R1;
UINT32 R2;
UINT32 R3;
UINT32 R12;
UINT32 LR;
UINT32 PC;
UINT32 xPSR;
#ifdef LOSCFG_ARCH_FPU_ENABLE
UINT32 S0;
UINT32 S1;
UINT32 S2;
UINT32 S3;
UINT32 S4;
UINT32 S5;
UINT32 S6;
UINT32 S7;
UINT32 S8;
UINT32 S9;
UINT32 S10;
UINT32 S11;
UINT32 S12;
UINT32 S13;
UINT32 S14;
UINT32 S15;
UINT32 FPSCR;
UINT32 NO_NAME;
#endif
} ExcContext;
typedef VOID (* EXC_PROC_FUNC)(UINT32, ExcContext *);
VOID OsExcHandleEntry(UINT32 excType, UINT32 faultAddr, UINT32 pid, const ExcContext *excBufAddr);
/**
* @ingroup los_exc
* @brief: Exception initialization.
*
* @par Description:
* This API is used to configure the exception function vector table.
*
* @attention:
* <ul><li>None.</li></ul>
*
* @param None.
*
* @retval: None
* @par Dependency:
* <ul><li>los_hwi.h: the header file that contains the API declaration.</li></ul>
* @see None.
* @since Huawei LiteOS V100R001C00
*/
VOID OsExcInit(VOID);
extern VOID OsExcNMI(VOID);
extern VOID OsExcHardFault(VOID);
extern VOID OsExcMemFault(VOID);
extern VOID OsExcBusFault(VOID);
extern VOID OsExcUsageFault(VOID);
extern VOID OsExcSvcCall(VOID);
extern VOID OsBackTrace(VOID);
/**
* @ingroup los_exc
* bus fault err in stack
*/
#define OS_EXC_BF_STKERR 1
/**
* @ingroup los_exc
* bus fault err out stack
*/
#define OS_EXC_BF_UNSTKERR 2
/**
* @ingroup los_exc
* bus fault err imprecise access
*/
#define OS_EXC_BF_IMPRECISERR 3
/**
* @ingroup los_exc
* bus fault err precise access
*/
#define OS_EXC_BF_PRECISERR 4
/**
* @ingroup los_exc
* bus fault err ibus
*/
#define OS_EXC_BF_IBUSERR 5
/**
* @ingroup los_exc
* mem fault err for manager resiter in stack
*/
#define OS_EXC_MF_MSTKERR 6
/**
* @ingroup los_exc
* mem fault err for manager resiter out stack
*/
#define OS_EXC_MF_MUNSTKERR 7
/**
* @ingroup los_exc
* mem fault err for data access invalid
*/
#define OS_EXC_MF_DACCVIOL 8
/**
* @ingroup los_exc
* mem fault err for instruction access invalid
*/
#define OS_EXC_MF_IACCVIOL 9
/**
* @ingroup los_exc
* divide zero err
*/
#define OS_EXC_UF_DIVBYZERO 10
/**
* @ingroup los_exc
* data unaligned err
*/
#define OS_EXC_UF_UNALIGNED 11
/**
* @ingroup los_exc
* no co-processor err
*/
#define OS_EXC_UF_NOCP 12
/**
* @ingroup los_exc
* invalid PC err
*/
#define OS_EXC_UF_INVPC 13
/**
* @ingroup los_exc
* invalid state err
*/
#define OS_EXC_UF_INVSTATE 14
/**
* @ingroup los_exc
* undefined instruction err
*/
#define OS_EXC_UF_UNDEFINSTR 15
/**
* @ingroup los_exc
* nu-masked interrupt
*/
#define OS_EXC_CAUSE_NMI 16
/**
* @ingroup los_exc
* hardware fault
*/
#define OS_EXC_CAUSE_HARDFAULT 17
/**
* @ingroup los_exc
* task exit err
*/
#define OS_EXC_CAUSE_TASK_EXIT 18
/**
* @ingroup los_exc
* fatal err
*/
#define OS_EXC_CAUSE_FATAL_ERR 19
/**
* @ingroup los_exc
* debug event trigger hardware interrupt err
*/
#define OS_EXC_CAUSE_DEBUGEVT 20
/**
* @ingroup los_exc
* access vector trigger hardware interrupt err
*/
#define OS_EXC_CAUSE_VECTBL 21
typedef struct tagExcInfo {
UINT16 phase; /* Phase in which an exception occurs:
* 0 means that the exception occurred during initialization,
* 1 means that the exception occurred in the task,
* 2 means that the exception occurred in the interrupt.
*/
UINT16 type; /* Exception type */
UINT32 faultAddr; /* The wrong access address when the exception occurred */
UINT32 intNumOrTaskId; /* An exception occurred during the interrupt indicating the interrupt number,
* An exception occurs in the task, indicating the task id,
* If it occurs in the initialization, it is 0xffffffff
*/
UINT16 nestCnt; /* Count of nested exception */
UINT16 reserved; /* Reserved for alignment */
ExcContext *context; /* Hardware context when an exception occurs */
} ExcInfo;
extern UINT32 g_curNestCount;
VOID OsExcInfoDisplay(const ExcInfo *exc, const ExcContext *excBufAddr);
/**
* @ingroup los_exc
* @brief Kernel task backtrace function.
*
* @par Description:
* Backtrace function that prints task call stack information traced from the input task.
* @attention
* <ul>
* <li>The input taskID should be valid.</li>
* </ul>
*
* @param taskID [IN] Type #UINT32 Task ID.
*
* @retval #None.
*
* @par Dependency:
* los_exc.h: the header file that contains the API declaration.
* @see None.
* @since Huawei LiteOS V100R001C00
*/
extern VOID OsTaskBackTrace(UINT32 taskId);
VOID ArchExcInit(VOID);
STATIC INLINE VOID ArchHaltCpu(VOID)
{
__asm__ __volatile__("swi 0");
}
VOID ArchBackTrace(VOID);
VOID ArchBackTraceWithSp(const VOID *stackPointer);
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
#endif /* _ARCH_EXCEPTION_H */

@ -0,0 +1,60 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2018-2020. All rights reserved.
* Description : LiteOS arm-m flash patch module implemention.
* Author : Huawei LiteOS Team
* Create : 2018-03-07
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------- */
#include "stdint.h"
#include "los_errno.h"
#ifndef _ARCH_FPB_H
#define _ARCH_FPB_H
#define FPB_SUCCESS LOS_OK
#define FPB_COMP_REPEAT_ERR LOS_ERRNO_OS_ERROR(LOS_MOD_FPB, 0x00)
#define FPB_NO_COMP_ERR LOS_ERRNO_OS_ERROR(LOS_MOD_FPB, 0x01)
#define FPB_TYPE_ERR LOS_ERRNO_OS_ERROR(LOS_MOD_FPB, 0x02)
#define FPB_NO_FREE_COMP_ERR LOS_ERRNO_OS_ERROR(LOS_MOD_FPB, 0x03)
#define FPB_ADDR_NOT_ALIGN_ERR LOS_ERRNO_OS_ERROR(LOS_MOD_FPB, 0x04)
#define FPB_TARGET_ADDR_ERR LOS_ERRNO_OS_ERROR(LOS_MOD_FPB, 0x05)
#define FPB_BUSY_ERR LOS_ERRNO_OS_ERROR(LOS_MOD_FPB, 0x06)
#define FPB_ERROR_INPUT_ERR LOS_ERRNO_OS_ERROR(LOS_MOD_FPB, 0x07)
typedef enum {
FPB_TYPE_INSTR = 0,
FPB_TYPE_LITERAL = 1,
FPB_TYPE_MAX
} FpbCompTypeEnum;
void FpbInit(void);
UINT32 FpbAddPatch(UINT32 oldAddr, UINT32 patchValue, FpbCompTypeEnum fpbType);
UINT32 FpbDeletePatch(UINT32 oldAddr, FpbCompTypeEnum fpbType);
void FpbDisable(void);
void FpbLock(void);
#endif /* _ARCH_FPB_H */

@ -0,0 +1,60 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: Interrupt Operations HeadFile
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#ifndef _ARCH_INTERRUPT_H
#define _ARCH_INTERRUPT_H
#include "los_typedef.h"
#include "arch/regs.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
extern UINT32 ArchIntLock(VOID);
extern UINT32 ArchIntUnlock(VOID);
extern VOID ArchIntRestore(UINT32 intSave);
STATIC INLINE UINT32 ArchIntLocked(VOID)
{
UINT32 intSave;
__asm__ volatile("mrs %0, primask" : "=r" (intSave) : : "memory");
return intSave;
}
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
#endif /* _ARCH_INTERRUPT_H */

@ -0,0 +1,405 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: Mpu
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
/**
* @defgroup mpu
* @ingroup kernel
*/
#ifndef _ARCH_MPU_H
#define _ARCH_MPU_H
#include "los_base.h"
#include "los_task.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
#define MPU_DISABLE 0
#define MPU_ENABLE 0x7
/**
* @ingroup mpu
* MPU support number : MPU maximum number of region support(According to the cotex-m4 authority Guide)
*/
#define MPU_MAX_SUPPORT 8
/**
* @ingroup mpu
* Region size.
*/
#ifndef MPU_REGION_SIZE_32B
#define MPU_REGION_SIZE_32B 0x04
#endif
#ifndef MPU_REGION_SIZE_64B
#define MPU_REGION_SIZE_64B 0x05
#endif
#ifndef MPU_REGION_SIZE_128B
#define MPU_REGION_SIZE_128B 0x06
#endif
#ifndef MPU_REGION_SIZE_256B
#define MPU_REGION_SIZE_256B 0x07
#endif
#ifndef MPU_REGION_SIZE_512B
#define MPU_REGION_SIZE_512B 0x08
#endif
#ifndef MPU_REGION_SIZE_1KB
#define MPU_REGION_SIZE_1KB 0x09
#endif
#ifndef MPU_REGION_SIZE_2KB
#define MPU_REGION_SIZE_2KB 0x0A
#endif
#ifndef MPU_REGION_SIZE_4KB
#define MPU_REGION_SIZE_4KB 0x0B
#endif
#ifndef MPU_REGION_SIZE_8KB
#define MPU_REGION_SIZE_8KB 0x0C
#endif
#ifndef MPU_REGION_SIZE_16KB
#define MPU_REGION_SIZE_16KB 0x0D
#endif
#ifndef MPU_REGION_SIZE_32KB
#define MPU_REGION_SIZE_32KB 0x0E
#endif
#ifndef MPU_REGION_SIZE_64KB
#define MPU_REGION_SIZE_64KB 0x0F
#endif
#ifndef MPU_REGION_SIZE_128KB
#define MPU_REGION_SIZE_128KB 0x10
#endif
#ifndef MPU_REGION_SIZE_256KB
#define MPU_REGION_SIZE_256KB 0x11
#endif
#ifndef MPU_REGION_SIZE_512B
#define MPU_REGION_SIZE_512KB 0x12
#endif
#ifndef MPU_REGION_SIZE_1MB
#define MPU_REGION_SIZE_1MB 0x13
#endif
#ifndef MPU_REGION_SIZE_2MB
#define MPU_REGION_SIZE_2MB 0x14
#endif
#ifndef MPU_REGION_SIZE_4MB
#define MPU_REGION_SIZE_4MB 0x15
#endif
#ifndef MPU_REGION_SIZE_8MB
#define MPU_REGION_SIZE_8MB 0x16
#endif
#ifndef MPU_REGION_SIZE_16MB
#define MPU_REGION_SIZE_16MB 0x17
#endif
#ifndef MPU_REGION_SIZE_32MB
#define MPU_REGION_SIZE_32MB 0x18
#endif
#ifndef MPU_REGION_SIZE_64MB
#define MPU_REGION_SIZE_64MB 0x19
#endif
#ifndef MPU_REGION_SIZE_128MB
#define MPU_REGION_SIZE_128MB 0x1A
#endif
#ifndef MPU_REGION_SIZE_256MB
#define MPU_REGION_SIZE_256MB 0x1B
#endif
#ifndef MPU_REGION_SIZE_512MB
#define MPU_REGION_SIZE_512MB 0x1C
#endif
#ifndef MPU_REGION_SIZE_1GB
#define MPU_REGION_SIZE_1GB 0x1D
#endif
#ifndef MPU_REGION_SIZE_2GB
#define MPU_REGION_SIZE_2GB 0x1E
#endif
#ifndef MPU_REGION_SIZE_4GB
#define MPU_REGION_SIZE_4GB 0x1F
#endif
#ifndef MPU_REGION_ENABLE
#define MPU_REGION_ENABLE 0x01U
#endif
#ifndef MPU_REGION_DISABLE
#define MPU_REGION_DISABLE 0x00U
#endif
#ifndef MPU_INSTRUCTION_ACCESS_ENABLE
#define MPU_INSTRUCTION_ACCESS_ENABLE 0x00U
#endif
#ifndef MPU_INSTRUCTION_ACCESS_DISABLE
#define MPU_INSTRUCTION_ACCESS_DISABLE 0x01U
#endif
#ifndef MPU_ACCESS_SHAREABLE
#define MPU_ACCESS_SHAREABLE 0x01U
#endif
#ifndef MPU_ACCESS_NOT_SHAREABLE
#define MPU_ACCESS_NOT_SHAREABLE 0x00U
#endif
#ifndef MPU_ACCESS_CACHEABLE
#define MPU_ACCESS_CACHEABLE 0x01U
#endif
#ifndef MPU_ACCESS_NOT_CACHEABLE
#define MPU_ACCESS_NOT_CACHEABLE 0x00U
#endif
#ifndef MPU_ACCESS_BUFFERABLE
#define MPU_ACCESS_BUFFERABLE 0x01U
#endif
#ifndef MPU_ACCESS_NOT_BUFFERABLE
#define MPU_ACCESS_NOT_BUFFERABLE 0x00U
#endif
#ifndef MPU_HFNMIENA_ENABLE
#define MPU_HFNMIENA_ENABLE 0x01U
#endif
#ifndef MPU_HFNMIENA_DISABLE
#define MPU_HFNMIENA_DISABLE 0x00U
#endif
/**
* @ingroup mpu
* Access permission.
*/
#define MPU_DEFS_RASR_AP_NO_ACCESS 0 /* privilege level and user level are prohibited */
#define MPU_DEFS_RASR_AP_PRIV_RW 1 /* only privilege level can be read and write */
#define MPU_DEFS_RASR_AP_PRIV_RW_USER_RO 2 /* privilege level read and write, user level read only */
#define MPU_DEFS_RASR_AP_FULL_ACCESS 3 /* privilege level and user level can be read and write */
#define MPU_DEFS_RASR_AP_PRIV_RO 5 /* privilege level read only, user level is prohibited */
#define MPU_DEFS_RASR_AP_RO 6 /* privilege level and user level read only */
/**
* @ingroup mpu
* region info
* uwBaseAddress must be in the range of RAM
* uwAccessPermission and uwRegionSize are not arbitrary input, selected by the definition of the macro
*/
typedef struct {
UINT8 number; /* number of MPU register to be checked */
UINT32 baseAddress; /* set the base address of the protected region,
the base address must be in the range of RAM */
UINT32 accessPermission; /* privilege level and user level access permission */
BOOL sharable; /* whether to share */
BOOL cachable; /* whether cache */
BOOL buffable; /* whether buffer */
UINT32 regionSize; /* region size */
BOOL hfnmiena; /* Whether in the NMI and hard fault service routine
is not mandatory in addition to MPU */
BOOL xn; /* To indicate whether instructions are fetchable in this region,
0 fetchable; 1: otherwise */
} MPU_REGION_INFO;
/**
* @ingroup mpu
* MPU error code: The pointer to an input parameter is NULL.
*
* Value: 0x02001200
*
* Solution: Check whether the pointer to the input parameter is usable.
*/
#define LOS_ERRNO_MPU_PTR_NULL LOS_ERRNO_OS_ERROR(LOS_MOD_MPU, 0x00)
/**
* @ingroup mpu
* MPU error code: The base address is not aligned to the boundary of the region capacity.
*
* Value: 0x02001201
*
* Solution: Check base address.
*/
#define LOS_ERRNO_MPU_INVALID_BASE_ADDRESS LOS_ERRNO_OS_ERROR(LOS_MOD_MPU, 0x01)
/**
* @ingroup mpu
* MPU error code: Capacity less than 32 bytes.
*
* Value: 0x02001202
*
* Solution: Guaranteed that the capacity is greater than or equal to 32 bytes.
*/
#define LOS_ERRNO_MPU_INVALID_CAPACITY LOS_ERRNO_OS_ERROR(LOS_MOD_MPU, 0x02)
/**
* @ingroup mpu
* MPU error code: Chip is not configured MPU.
*
* Value: 0x02001203
*
* Solution: Make sure the chip is configured with MPU.
*/
#define LOS_ERRNO_MPU_NOT_CONFIGURED LOS_ERRNO_OS_ERROR(LOS_MOD_MPU, 0x03)
/**
* @ingroup mpu
* MPU error code: Invalid number.
*
* Value: 0x02001204
*
* Solution: Enter a valid number.
*/
#define LOS_ERRNO_MPU_INVALID_NUMBER LOS_ERRNO_OS_ERROR(LOS_MOD_MPU, 0x04)
/**
* @ingroup mpu
* MPU error code: Region has already been enabled.
*
* Value: 0x02001205
*
* Solution: If you want to re enable the region, please first in addition to the region.
*/
#define LOS_ERRNO_MPU_REGION_IS_ENABLED LOS_ERRNO_OS_ERROR(LOS_MOD_MPU, 0x05)
/**
* @ingroup mpu
* MPU error code: Region has already been disabled.
*
* Value: 0x02001206
*
* Solution: If you want to re enable the region, please first in addition to the region.
*/
#define LOS_ERRNO_MPU_REGION_IS_DISABLED LOS_ERRNO_OS_ERROR(LOS_MOD_MPU, 0x06)
/**
* @ingroup mpu
* MPU error code: Invalid access.
*
* Value: 0x02001207
*
* Solution: Checking whether the access is correct.
*/
#define LOS_ERRNO_MPU_REGION_INVALID_ACCESS LOS_ERRNO_OS_ERROR(LOS_MOD_MPU, 0x07)
/**
* @ingroup mpu
* MPU error code: Base address is not in RAM.
*
* Value: 0x02001208
*
* Solution: Checking base address
*/
#define LOS_ERRNO_MPU_BASE_ADDRESS_NOT_IN_RAM LOS_ERRNO_OS_ERROR(LOS_MOD_MPU, 0x08)
/**
* @ingroup mpu
* MPU error code: According to the current base address, the size of the application is too big.
*
* Value: 0x02001209
*
* Solution: baseAddress + regionSize must not exceed RAM Max address
*/
#define LOS_ERRNO_MPU_REGION_SIZE_IS_TOO_BIG LOS_ERRNO_OS_ERROR(LOS_MOD_MPU, 0x09)
/**
* @ingroup mpu
* @brief Obtain the set protection region.
*
* @par Description:
* This API is used to set protection region.
* @attention
* <ul>
* <li>the base address must be in the range of RAM.</li>
* </ul>
*
* @param mpuInfo [IN] MPU_REGION_INFO. Set the related configuration information for the protected area
*
* @retval #LOS_ERRNO_MPU_TASK_PTR_NULL 0x02001200: The pointer to an input parameter is NULL.
* @retval #LOS_ERRNO_MPU_INVALID_BASE_ADDRESS 0x02001201: The base address is not aligned to the boundary of the
* region capacity.
* @retval #LOS_ERRNO_MPU_INVALID_CAPACITY 0x02001202: Capacity less than 32 bytes.
* @retval #LOS_ERRNO_MPU_NOT_CONFIGURED 0x02001203: Chip is not configured MPU.
* @retval #LOS_ERRNO_MPU_INVALID_NUMBER 0x02001204: Invalid number.
* @retval #LOS_ERRNO_MPU_REGION_IS_ENABLED 0x02001205: Region has already been enabled.
* @retval #LOS_ERRNO_MPU_REGION_IS_DISABLED 0x02001206: Region has already been disabled.
* @par Dependency:
* <ul><li>mpu.h: the header file that contains the API declaration.</li></ul>
* @since Huawei LiteOS V100R001C00
*/
extern UINT32 ArchProtectionRegionSet(MPU_REGION_INFO *mpuInfo);
/**
* @ingroup mpu
* @brief Obtain the set protection region.
*
* @par Description:
* This API is used to set protection region.
* @attention
* <ul>
* <li>.</li>
* </ul>
*
* @param number [IN] UINT8.which region to be selected.
*
* @retval #LOS_ERRNO_MPU_INVALID_NUMBER 0x02001204: Invalid number.
* @retval #LOS_ERRNO_MPU_REGION_IS_DISABLED 0x02001206: Region has already been disabled.
* @par Dependency:
* <ul><li>mpu.h: the header file that contains the API declaration.</li></ul>
* @since Huawei LiteOS V100R001C00
*/
extern UINT32 ArchRegionDisable(UINT8 number);
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
#endif /* _ARCH_MPU_H */

@ -0,0 +1,32 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: CPU Register Defines Headfile
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------- */
#ifndef _ARCH_REGS_H
#define _ARCH_REGS_H
#endif /* _ARCH_REGS_H */

@ -0,0 +1,32 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: Spinlock Low Level Impelmentations Headfile
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------- */
#ifndef _ARCH_SPINLOCK_H
#define _ARCH_SPINLOCK_H
#endif /* _ARCH_SPINLOCK_H */

@ -0,0 +1,109 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: Task Operations HeadFile
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
/**
* @defgroup los_task
* @ingroup kernel
*/
#ifndef _ARCH_TASK_H
#define _ARCH_TASK_H
#include "los_typedef.h"
#include "arch/cpu.h"
#include "arch/regs.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
#if ((defined(__CC_ARM) && defined(__TARGET_FPU_VFP)) \
|| (defined(__CLANG_ARM) && defined(__VFP_FP__) && !defined(__SOFTFP__)) \
|| (defined(__ICCARM__) && defined(__ARMVFP__ )) \
|| (defined(__GNUC__) && defined(__VFP_FP__) && !defined(__SOFTFP__)))
#define FPU_USED 1
#else
#define FPU_USED 0
#endif
#define LOSCFG_STACK_POINT_ALIGN_SIZE (sizeof(UINTPTR) * 2)
extern VOID *g_runTask;
extern VOID *g_oldTask;
STATIC INLINE VOID *ArchCurrTaskGet(VOID)
{
return g_runTask;
}
STATIC INLINE VOID ArchCurrTaskSet(VOID *val)
{
g_runTask = val;
}
typedef struct tagContext {
UINT32 R4;
UINT32 R5;
UINT32 R6;
UINT32 R7;
UINT32 R8;
UINT32 R9;
UINT32 R10;
UINT32 R11;
UINT32 PriMask;
#if FPU_USED
UINT32 excReturn;
#endif
UINT32 R0;
UINT32 R1;
UINT32 R2;
UINT32 R3;
UINT32 R12;
UINT32 LR;
UINT32 PC;
UINT32 xPSR;
} TaskContext;
/*
* Description : task stack initialization
* Input : taskId -- task ID
* stackSize -- task stack size
* topStack -- stack top of task (low address)
* Return : pointer to the task context
*/
extern VOID *OsTaskStackInit(UINT32 taskId, UINT32 stackSize, VOID *topStack);
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
#endif /* _ARCH_TASK_H */

@ -0,0 +1,59 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
* Description: Aach32 Canary
* Author: Huawei LiteOS Team
* Create: 2020-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#include "arch/canary.h"
#include "stdlib.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
#ifdef __GNUC__
/* stack protector */
UINTPTR __stack_chk_guard = 0x000a0dff;
/*
* If the SP compiling options:-fstack-protector-strong or -fstack-protector-all is enabled,
* We recommend to implement true random number generator function for __stack_chk_guard
* value to replace the function implementation template shown as below.
*/
#pragma GCC push_options
#pragma GCC optimize ("-fno-stack-protector")
LITE_OS_SEC_TEXT_INIT WEAK VOID ArchStackGuardInit(VOID)
{
}
#pragma GCC pop_options
#endif
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */

@ -0,0 +1,73 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: Cortex-M Cpu Implementation
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#include "los_config.h"
#include "arch/cpu.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
/* support cpu vendors */
CpuVendor g_cpuTable[] = {
/* armv7-m */
{ 0xC20, "Cortex-M0" },
{ 0xC21, "Cortex-M1" },
{ 0xC23, "Cortex-M3" },
{ 0xC24, "Cortex-M4" },
{ 0xC27, "Cortex-M7" },
{ 0xD21, "Cortex-M33" },
};
/* logical cpu mapping */
UINT64 g_cpuMap[LOSCFG_KERNEL_CORE_NUM] = {
[0 ... LOSCFG_KERNEL_CORE_NUM - 1] = (UINT64)(-1)
};
const CHAR *ArchCpuInfo(VOID)
{
UINT32 midr = OsMainIDGet();
/* [15:4] is the primary part number */
UINT32 partNo = (midr & 0xFFF0) >> 0x4;
for (UINT32 i = 0; i < (sizeof(g_cpuTable) / sizeof(CpuVendor)); i++) {
if (partNo == g_cpuTable[i].partNo) {
return g_cpuTable[i].cpuName;
}
}
return "unknown";
}
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */

@ -0,0 +1,217 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: Cortex-M Dispatch Implementation
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
.syntax unified
#ifdef LOSCFG_ARCH_ARM_V6M
.arch armv6-m
#elif defined(LOSCFG_ARCH_CORTEX_M33)
.arch armv8-m.main
#else
.arch armv7e-m
#endif
.thumb
.equ OS_NVIC_INT_CTRL, 0xE000ED04
.equ OS_NVIC_SYSPRI2, 0xE000ED20
.equ OS_NVIC_PENDSV_PRI, 0xF0F00000
.equ OS_NVIC_PENDSVSET, 0x10000000
.equ OS_TASK_STATUS_RUNNING, 0x0010
.section .text
.thumb
.type OsStartToRun, %function
.global OsStartToRun
OsStartToRun:
.fnstart
.cantunwind
ldr r4, =OS_NVIC_SYSPRI2
ldr r5, =OS_NVIC_PENDSV_PRI
str r5, [r4]
ldr r1, =g_oldTask
str r0, [r1]
ldr r1, =g_runTask
str r0, [r1]
movs r1, #2
msr CONTROL, r1
ldrh r7, [r0 , #4]
movs r6, #OS_TASK_STATUS_RUNNING
strh r6, [r0 , #4]
#ifdef LOSCFG_ARCH_ARM_V6M
ldr r3, [r0]
adds r3, r3, #36
ldmfd r3!, {r0-r2}
adds r3, r3, #4
ldmfd r3!, {R4-R7}
msr psp, r3
subs r3, r3, #20
ldr r3, [r3]
#else
ldr r12, [r0]
add r12, r12, #36
/* __VFP_FP__ is set by -mfpu; __SOFTFP__ is set by -mfloat-abi=soft. */
#if defined(__VFP_FP__) && !defined(__SOFTFP__)
add r12, r12, #4
#endif
ldmfd r12!, {R0-R7}
msr psp, r12
#ifdef LOSCFG_ARCH_FPU_ENABLE
vpush {s0};
vpop {s0};
#endif
#endif
mov lr, r5
cpsie I
bx r6
.fnend
.type ArchIntLock, %function
.global ArchIntLock
ArchIntLock:
.fnstart
.cantunwind
mrs r0, PRIMASK
cpsid I
bx lr
.fnend
.type ArchIntUnlock, %function
.global ArchIntUnlock
ArchIntUnlock:
.fnstart
.cantunwind
mrs r0, PRIMASK
cpsie I
bx lr
.fnend
.type ArchIntRestore, %function
.global ArchIntRestore
ArchIntRestore:
.fnstart
.cantunwind
msr PRIMASK, r0
bx lr
.fnend
.type OsTaskSchedule, %function
.global OsTaskSchedule
OsTaskSchedule:
.fnstart
.cantunwind
ldr r2, =OS_NVIC_INT_CTRL
ldr r3, =OS_NVIC_PENDSVSET
str r3, [r2]
bx lr
.fnend
.type osPendSV, %function
.global osPendSV
osPendSV:
.fnstart
.cantunwind
mrs r12, PRIMASK
cpsid I
TaskSwitch:
mrs r0, psp
#ifdef LOSCFG_ARCH_ARM_V6M
subs r0, #36
stmia r0!, {r4-r7}
mov r3, r8
mov r4, r9
mov r5, r10
mov r6, r11
mov r7, r12
stmia r0!, {r3 - r7}
subs r0, #36
#else
#if defined(__VFP_FP__) && !defined(__SOFTFP__)
/* when enter the exc or interrut, lr's value is EXC_RETURN. If FPCA = 1, the 4's bit of EXC_RETURN is 0. */
tst lr, #0x10
it eq
vstmdbeq r0!, {d8-d15} /* push VFP registers. */
stmfd r0!, {r14}
#endif
stmfd r0!, {r4-r12}
#endif
ldr r5, =g_oldTask
ldr r1, [r5]
str r0, [r1]
ldr r0, =g_runTask
ldr r0, [r0]
/* g_oldTask = g_runTask */
str r0, [r5]
ldr r1, [r0]
#ifdef LOSCFG_ARCH_ARM_V6M
adds r1, #16
ldmfd r1!, {r3-r7}
mov r8, r3
mov r9, r4
mov r10, r5
mov r11, r6
mov r12, r7
subs r1, #36
ldmfd r1!, {r4-r7}
adds r1, #20
#else
ldmfd r1!, {r4-r12}
#endif
#if defined(__VFP_FP__) && !defined(__SOFTFP__)
ldmfd r1!, {r14}
tst r14, #0x10
it eq
vldmiaeq r1!, {d8-d15}
#endif
msr psp, r1
msr PRIMASK, r12
bx lr
.fnend

@ -0,0 +1,335 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: Cortex-M Hw Exc Implementation
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
.syntax unified
#if defined(LOSCFG_ARCH_CORTEX_M33)
.arch armv8-m.main
.thumb
.fpu fpv5-d16
#else
.arch armv7-m
.thumb
.fpu vfpv4
#endif
.section .text
.global OsExcNMI
.global OsExcHardFault
.global OsExcMemFault
.global OsExcBusFault
.global OsExcUsageFault
.global OsExcSvcCall
.extern OsExcHandleEntry
.extern g_excTbl
.extern g_taskScheduled
.equ OS_EXC_CAUSE_NMI , 16
.equ OS_EXC_CAUSE_HARDFAULT , 17
.equ HF_DEBUGEVT , 20
.equ HF_VECTBL , 21
.equ OS_EXC_FLAG_FAULTADDR_VALID , 0x10000
.equ OS_EXC_FLAG_IN_HWI , 0x20000
.equ OS_EXC_FLAG_NO_FLOAT , 0x10000000
.equ OS_NVIC_FSR , 0xE000ED28 /* include BusFault/MemFault/UsageFault State Regeister */
.equ OS_NVIC_HFSR , 0xE000ED2C /* HardFault State Regeister */
.equ OS_NVIC_BFAR , 0xE000ED38
.equ OS_NVIC_MMAR , 0xE000ED34
.equ OS_NVIC_ACT_BASE , 0xE000E300
.equ OS_NVIC_SHCSRS , 0xE000ED24
.equ OS_NVIC_SHCSR_MASK , 0xC00
.equ EXCEPT_FRAME_OFFSET_PC , 0x06 * 4 /* see cortex-m7 reference manual: chapter 2.3.7 */
.type OsExcNMI, %function
.global OsExcNMI
OsExcNMI:
.fnstart
.cantunwind
MOV R0, #OS_EXC_CAUSE_NMI
MOV R1, #0
B OsExcDispatch
.fnend
.type OsExcHardFault, %function
.global OsExcHardFault
OsExcHardFault:
.fnstart
.cantunwind
MOV R0, #OS_EXC_CAUSE_HARDFAULT
LDR R2, =OS_NVIC_HFSR
LDR R2, [R2]
MOV R1, #HF_DEBUGEVT
ORR R0, R0, R1, LSL #0x8
TST R2, #0x80000000
BNE OsExcDispatch /* DEBUGEVT */
AND R0, #0x000000FF
MOV R1, #HF_VECTBL
ORR R0, R0, R1, LSL #0x8
TST R2, #0x00000002
BNE OsExcDispatch /* VECTBL */
/* if not DEBUGEVT and VECTBL then is FORCED */
AND R0, #0x000000FF
MRS R2, MSP
LDR R1, [R2, EXCEPT_FRAME_OFFSET_PC]
LDR R2, =OS_NVIC_FSR
LDR R2, [R2]
TST R2, #0x8000 /* BFARVALID */
BNE HFBusFault /* BusFault */
TST R2, #0x80 /* MMARVALID */
BNE HFMemFault /* MemFault */
MOV R12,#0
B OsHFExcCommonBMU
.fnend
.type HFBusFault, %function
HFBusFault:
.fnstart
.cantunwind
LDR R1, =OS_NVIC_BFAR
LDR R1, [R1]
MOV R12, #OS_EXC_FLAG_FAULTADDR_VALID
B OsHFExcCommonBMU
.fnend
.type HFMemFault, %function
HFMemFault:
.fnstart
.cantunwind
LDR R1, =OS_NVIC_MMAR
LDR R1, [R1]
MOV R12, #OS_EXC_FLAG_FAULTADDR_VALID
.fnend
.type OsHFExcCommonBMU, %function
.global OsHFExcCommonBMU
OsHFExcCommonBMU:
.fnstart
.cantunwind
CLZ R2, R2
LDR R3, =g_excTbl
ADD R3, R3, R2
LDRB R2, [R3]
ORR R0, R0, R2, LSL #0x8
ORR R0, R12
B OsExcDispatch
.fnend
.type OsExcSvcCall, %function
.global OsExcSvcCall
OsExcSvcCall:
.fnstart
.cantunwind
TST LR, #0x4
ITE EQ
MRSEQ R0, MSP
MRSNE R0, PSP
LDR R1, [R0,#24]
LDRB R0, [R1,#-2]
MOV R1, #0
B OsExcDispatch
.fnend
.type OsExcBusFault, %function
.global OsExcBusFault
OsExcBusFault:
.fnstart
.cantunwind
LDR R0, =OS_NVIC_FSR
LDR R0, [R0]
TST R0, #0x8000 /* BFARVALID */
BEQ ExcBusNoAddr
LDR R1, =OS_NVIC_BFAR
LDR R1, [R1]
MOV R12, #OS_EXC_FLAG_FAULTADDR_VALID
AND R0, #0x1F00
B OsExcCommonBMU
.fnend
.type ExcBusNoAddr, %function
ExcBusNoAddr:
.fnstart
.cantunwind
MOV R12,#0
B OsExcCommonBMU
.fnend
.type OsExcMemFault, %function
.global OsExcMemFault
OsExcMemFault:
.fnstart
.cantunwind
LDR R0, =OS_NVIC_FSR
LDR R0, [R0]
TST R0, #0x80 /* MMARVALID */
BEQ ExcMemNoAddr
LDR R1, =OS_NVIC_MMAR
LDR R1, [R1]
MOV R12, #OS_EXC_FLAG_FAULTADDR_VALID
AND R0, #0x1B
B OsExcCommonBMU
.fnend
.type ExcMemNoAddr, %function
ExcMemNoAddr:
.fnstart
.cantunwind
MOV R12,#0
B OsExcCommonBMU
.fnend
.type OsExcUsageFault, %function
.global OsExcUsageFault
OsExcUsageFault:
LDR R0, =OS_NVIC_FSR
LDR R0, [R0]
LDR R1, =#0x030F
LSL R1, #16
AND R0, R1
MOV R12, #0
OsExcCommonBMU:
CLZ R0, R0
LDR R3, =g_excTbl
ADD R3, R3, R0
LDRB R0, [R3]
ORR R0, R0, R12
/* R0 -- EXCCAUSE(bit 16 is 1 if EXCADDR valid), R1 -- EXCADDR */
OsExcDispatch:
LDR R2, =OS_NVIC_ACT_BASE
MOV R12, #8 /* R12 is hwi check loop counter */
HwiActiveCheck:
LDR R3, [R2] /* R3 store active hwi register when exc */
CMP R3, #0
BEQ HwiActiveCheckNext
/* exc occurred in IRQ */
ORR R0, #OS_EXC_FLAG_IN_HWI
RBIT R2, R3
CLZ R2, R2
AND R12, #1
ADD R2, R2, R12, LSL #5 /* calculate R2 (hwi number) as pid, thrid parameter */
ExcInMSP:
CMP LR, #0xFFFFFFED
BNE NoFloatInMsp
ADD R3, R13, #104
PUSH {R3}
MRS R12, PRIMASK /* store message-->exc: disable int */
PUSH {R4-R12} /* store message-->exc: {R4-R12} */
VPUSH {D8-D15}
B HandleEntry
NoFloatInMsp:
ADD R3, R13, #32
PUSH {R3} /* save IRQ SP, store message-->exc: MSP(R13) */
MRS R12, PRIMASK /* store message-->exc: disable int? */
PUSH {R4-R12} /* store message-->exc: {R4-R12} */
ORR R0, R0, #OS_EXC_FLAG_NO_FLOAT
B HandleEntry
HwiActiveCheckNext:
ADD R2, #4 /* next NVIC ACT ADDR */
SUBS R12, #1
BNE HwiActiveCheck
/* NMI interrupt excption */
LDR R2, =OS_NVIC_SHCSRS
LDRH R2,[R2]
LDR R3,=OS_NVIC_SHCSR_MASK
AND R2, R2,R3
CMP R2,#0
BNE ExcInMSP
/* exc occurred in Task or Init or exc reserved for register info from task stack */
LDR R2, =g_taskScheduled
LDR R2, [R2]
TST R2, #1 /*os scheduled */
BEQ ExcInMSP /* if exc occurred in Init then branch */
CMP LR, #0xFFFFFFED /*auto push floating registers */
BNE NoFloatInPsp
/* exc occurred in Task */
MOV R2, R13
SUB R13, #96 /* add 8 Bytes reg(for STMFD) */
MRS R3, PSP
ADD R12, R3, #104
PUSH {R12} /* save task SP */
MRS R12, PRIMASK
PUSH {R4-R12}
VPUSH {D8-D15}
/* copy auto saved task register */
LDMFD R3!, {R4-R11} /* R4-R11 store PSP reg(auto push when exc in task) */
VLDMIA R3!, {D8-D15}
VSTMDB R2!, {D8-D15}
STMFD R2!, {R4-R11}
B HandleEntry
NoFloatInPsp:
MOV R2, R13 /*no auto push floating registers */
SUB R13, #32 /* add 8 Bytes reg(for STMFD) */
MRS R3, PSP
ADD R12, R3, #32
PUSH {R12} /* save task SP */
MRS R12, PRIMASK
PUSH {R4-R12}
LDMFD R3, {R4-R11} /* R4-R11 store PSP reg(auto push when exc in task) */
STMFD R2!, {R4-R11}
ORR R0, R0, #OS_EXC_FLAG_NO_FLOAT
HandleEntry:
MOV R3, R13 /* R13:the 4th param */
CPSID I
CPSID F
B OsExcHandleEntry
NOP

@ -0,0 +1,377 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: Cortex-M Exception Handler
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#include "arch/exception.h"
#include "los_task_pri.h"
#include "los_hwi_pri.h"
#include "securec.h"
#include "los_printf_pri.h"
#include "los_memory_pri.h"
#include "nvic.h"
#ifdef LOSCFG_KERNEL_TRACE
#include "los_trace_pri.h"
#endif
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
#define USGFAULT (1U << 18)
#define BUSFAULT (1U << 17)
#define MEMFAULT (1U << 16)
#define DIV0FAULT (1U << 4)
#define HARD_FAULT_IRQN (-13)
#define MASK_16_BIT 16
#define OFFSET_OF_PSP 40 /* 10 registers */
#ifdef LOSCFG_BACKTRACE
#define OS_MAX_BACKTRACE 15
#define THUM_OFFSET 2
#define STACK_OFFSET 4
#define MASK_OFFSET 2
#define HIGH_8_OFFSET 8
#define BL_CMD_OFFSET 4
#define BLX_CMD_OFFSET 2
#define PUSH_MASK_WITH_LR 0xb5
#define PUSH_MASK 0xb4
#define OFFSET_ADDRESS_MASK 0x7FF07FF
#define LOW_11_BITS_MASK 0x7FF
#define HIGH_11_BITS_MASK 0x7FF0000
#define HIGH_8_BITS_MASK 0xFF00
#define SIGN_BIT_MASK 0x400000
#define HIGH_OFFSET_NUMBER 12
#define LOW_OFFSET_NUMBER 1
#define BL_INS 0xF000F000
#define BLX_INX 0x4700
extern CHAR _estack;
static const int g_textStart = (const int)&__text_start;
static const int g_textEnd = (const int)&__text_end;
static const int g_estack = (const int)&_estack;
#endif
UINT32 g_curNestCount = 0;
ExcInfo g_excInfo;
UINT8 g_excTbl[FAULT_STATUS_REG_BIT] = {
0, 0, 0, 0, 0, 0, OS_EXC_UF_DIVBYZERO, OS_EXC_UF_UNALIGNED,
0, 0, 0, 0, OS_EXC_UF_NOCP, OS_EXC_UF_INVPC, OS_EXC_UF_INVSTATE, OS_EXC_UF_UNDEFINSTR,
0, 0, 0, OS_EXC_BF_STKERR, OS_EXC_BF_UNSTKERR, OS_EXC_BF_IMPRECISERR, OS_EXC_BF_PRECISERR, OS_EXC_BF_IBUSERR,
0, 0, 0, OS_EXC_MF_MSTKERR, OS_EXC_MF_MUNSTKERR, 0, OS_EXC_MF_DACCVIOL, OS_EXC_MF_IACCVIOL
};
ExcInfoArray g_excArray[OS_EXC_TYPE_MAX - 1];
STATIC const CHAR *g_phaseName[] = {
"fault in init",
"fault in task",
"fault in interrupt",
};
STATIC VOID OsExcSysInfo(VOID)
{
LosTaskCB *runTask = OsCurrTaskGet();
if (runTask != NULL) {
PrintExcInfo("TaskName = %s\n"
"TaskId = %u\n"
"Task stackSize = %u\n"
"System mem addr = 0x%x\n",
runTask->taskName,
runTask->taskId,
runTask->stackSize,
m_aucSysMem0);
}
}
LITE_OS_SEC_TEXT_INIT VOID OsExcInfoDisplay(const ExcInfo *exc, const ExcContext *excBufAddr)
{
PrintExcInfo("Phase = %s\n"
"Type = 0x%x\n"
"FaultAddr = 0x%x\n"
"intNumOrTaskId = 0x%x\n"
"R0 = 0x%x\n"
"R1 = 0x%x\n"
"R2 = 0x%x\n"
"R3 = 0x%x\n"
"R4 = 0x%x\n"
"R5 = 0x%x\n"
"R6 = 0x%x\n"
"R7 = 0x%x\n"
"R8 = 0x%x\n"
"R9 = 0x%x\n"
"R10 = 0x%x\n"
"R11 = 0x%x\n"
"R12 = 0x%x\n"
"PriMask = 0x%x\n"
"SP = 0x%x\n"
"LR = 0x%x\n"
"PC = 0x%x\n"
"xPSR = 0x%x\n",
g_phaseName[exc->phase], exc->type, exc->faultAddr, exc->intNumOrTaskId, excBufAddr->R0,
excBufAddr->R1, excBufAddr->R2, excBufAddr->R3, excBufAddr->R4, excBufAddr->R5,
excBufAddr->R6, excBufAddr->R7, excBufAddr->R8, excBufAddr->R9,
excBufAddr->R10, excBufAddr->R11, excBufAddr->R12, excBufAddr->PriMask,
excBufAddr->SP, excBufAddr->LR, excBufAddr->PC, excBufAddr->xPSR);
}
LITE_OS_SEC_TEXT_INIT VOID OsExcHandleEntry(UINT32 excType, UINT32 faultAddr, UINT32 pid,
const ExcContext *excBufAddr)
{
ExcContext *BufAddr = NULL;
UINT16 tmpFlag = (excType >> MASK_16_BIT) & OS_NULL_SHORT; /* 2:in intrrupt,1:faul addr valid */
g_curNestCount++;
g_excInfo.nestCnt = (UINT16)g_curNestCount;
g_excInfo.type = excType & OS_NULL_SHORT;
if (tmpFlag & OS_EXC_FLAG_FAULTADDR_VALID) {
g_excInfo.faultAddr = faultAddr;
} else {
g_excInfo.faultAddr = OS_EXC_IMPRECISE_ACCESS_ADDR;
}
if (ArchCurrTaskGet() != NULL) {
if (tmpFlag & OS_EXC_FLAG_IN_HWI) {
g_excInfo.phase = OS_EXC_IN_HWI;
g_excInfo.intNumOrTaskId = pid;
} else {
g_excInfo.phase = OS_EXC_IN_TASK;
g_excInfo.intNumOrTaskId = ((LosTaskCB *)ArchCurrTaskGet())->taskId;
OsExcSysInfo();
}
} else {
g_excInfo.phase = OS_EXC_IN_INIT;
g_excInfo.intNumOrTaskId = OS_NULL_INT;
}
if (excType & OS_EXC_FLAG_NO_FLOAT) {
g_excInfo.context = (ExcContext *)((CHAR *)excBufAddr - LOS_OFF_SET_OF(ExcContext, R4));
} else {
g_excInfo.context = (ExcContext *)excBufAddr;
}
if (g_excInfo.phase == OS_EXC_IN_TASK) {
BufAddr = (ExcContext *)(ArchGetPsp() - OFFSET_OF_PSP);
} else {
BufAddr = g_excInfo.context;
}
OsExcInfoDisplay((const ExcInfo *)&g_excInfo, BufAddr);
#ifdef LOSCFG_BACKTRACE
ArchBackTraceWithSp(BufAddr);
#endif
#ifdef LOSCFG_KERNEL_TRACE
OsTraceRecordDump(FALSE);
#endif
while (1) { }
}
#ifdef LOSCFG_BACKTRACE
/* this function is used to validate sp or validate the checking range start and end. */
STATIC INLINE BOOL IsValidSP(UINTPTR regSP, UINTPTR start, UINTPTR end)
{
return (regSP >= start) && (regSP < end);
}
STATIC INLINE BOOL FindSuitableStack(UINTPTR *regSP, UINTPTR *start, UINTPTR *end)
{
UINT32 index, topOfStack, stackBottom;
BOOL found = FALSE;
LosTaskCB *taskCB = NULL;
/* Search in the task stacks */
for (index = 0; index < g_taskMaxNum; index++) {
taskCB = OS_TCB_FROM_TID(index);
if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {
continue;
}
topOfStack = taskCB->topOfStack;
stackBottom = taskCB->topOfStack + taskCB->stackSize;
if (IsValidSP(*regSP, topOfStack, stackBottom)) {
found = TRUE;
goto FOUND;
}
}
FOUND:
if (found == TRUE) {
*start = topOfStack;
*end = stackBottom;
} else if (*regSP < g_estack) {
*start = *regSP;
*end = g_estack;
found = TRUE;
}
return found;
}
UINTPTR LoopUntilEntry(UINTPTR addr)
{
while (addr > (UINTPTR)g_textStart) {
if (((*((UINT16 *)addr) >> HIGH_8_OFFSET) == PUSH_MASK_WITH_LR) ||
((*((UINT16 *)addr) >> HIGH_8_OFFSET) == PUSH_MASK)) {
break;
}
addr -= THUM_OFFSET;
}
return addr;
}
UINTPTR CalculateBLTargetAddress(UINTPTR bl)
{
UINTPTR target;
UINT32 off0, off1, off;
if (*(UINT16 *)bl & SIGN_BIT_MASK) {
off1 = *(UINT16 *)bl & LOW_11_BITS_MASK;
off0 = *(UINT16 *)(bl + MASK_OFFSET) & LOW_11_BITS_MASK;
} else {
off0 = *(UINT16 *)bl & LOW_11_BITS_MASK;
off1 = *(UINT16 *)(bl + MASK_OFFSET) & LOW_11_BITS_MASK;
}
off = (off0 << HIGH_OFFSET_NUMBER) + (off1 << LOW_OFFSET_NUMBER);
if (off & SIGN_BIT_MASK) {
target = bl + BL_CMD_OFFSET - ((~(off - 1)) & 0x7FFFFF); // 0x7FFFFF : offset mask
} else {
target = bl + BL_CMD_OFFSET + off;
}
return target;
}
UINTPTR CalculateTargetAddress(UINTPTR bl)
{
UINTPTR target;
STATIC UINTPTR tmpBL = 0;
if ((((*(UINT16 *)(bl - BLX_CMD_OFFSET)) & HIGH_8_BITS_MASK) == BLX_INX)) {
if (tmpBL != 0) {
target = LoopUntilEntry(tmpBL);
tmpBL = bl - BLX_CMD_OFFSET;
return target;
}
tmpBL = bl - BLX_CMD_OFFSET;
return LoopUntilEntry(tmpBL);
} else if ((*(UINT32 *)(bl - BL_CMD_OFFSET) & BL_INS) == BL_INS) {
tmpBL = bl - BL_CMD_OFFSET;
return CalculateBLTargetAddress(tmpBL);
}
return 0;
}
VOID BackTraceSub(UINTPTR sp)
{
UINTPTR stackPointer = sp;
UINT32 count = 0;
UINTPTR topOfStack = 0;
UINTPTR stackBottom = 0;
STATIC UINTPTR tmpJump = 0;
if (FindSuitableStack(&stackPointer, &topOfStack, &stackBottom) == FALSE) {
return;
}
while ((stackPointer < stackBottom) && (count < OS_MAX_BACKTRACE)) {
if ((*(UINT32 *)stackPointer >= (UINT32)g_textStart) &&
(*(UINT32 *)stackPointer <= (UINT32)g_textEnd) &&
IS_ALIGNED(*(UINT32 *)stackPointer - 1, THUM_OFFSET)) {
/* Get the entry address of current function. */
UINTPTR checkBL = CalculateTargetAddress(*(UINT32 *)stackPointer - 1);
if ((checkBL == 0) || (checkBL == tmpJump)) {
stackPointer += STACK_OFFSET;
continue;
}
tmpJump = checkBL;
count++;
PrintExcInfo("traceback %u -- lr = 0x%08x -- fp = 0x%08x\n", count, *(UINT32 *)stackPointer - 1, tmpJump);
}
stackPointer += STACK_OFFSET;
}
}
STATIC VOID BackTraceWithSp(UINTPTR sp)
{
PrintExcInfo("*******backtrace begin*******\n");
BackTraceSub(sp);
PrintExcInfo("*******backtrace end*******\n");
}
#endif
VOID ArchBackTrace(VOID)
{
#ifdef LOSCFG_BACKTRACE
UINTPTR sp = ArchGetSp();
PrintExcInfo("sp:0x%08x\n", sp);
BackTraceWithSp(sp);
#endif
}
VOID ArchBackTraceWithSp(const VOID *stackPointer)
{
#ifdef LOSCFG_BACKTRACE
BackTraceWithSp((UINTPTR)stackPointer);
#else
(VOID)stackPointer;
#endif
}
LITE_OS_SEC_TEXT_INIT VOID ArchExcInit(VOID)
{
#ifndef LOSCFG_ARCH_CORTEX_M0
#ifndef LOSCFG_ARCH_CORTEX_M0_PLUS
g_hwiVec[HARD_FAULT_IRQN + OS_SYS_VECTOR_CNT] = OsExcHardFault;
g_hwiVec[NonMaskableInt_IRQn + OS_SYS_VECTOR_CNT] = OsExcNMI;
g_hwiVec[MemoryManagement_IRQn + OS_SYS_VECTOR_CNT] = OsExcMemFault;
g_hwiVec[BusFault_IRQn + OS_SYS_VECTOR_CNT] = OsExcBusFault;
g_hwiVec[UsageFault_IRQn + OS_SYS_VECTOR_CNT] = OsExcUsageFault;
g_hwiVec[SVCall_IRQn + OS_SYS_VECTOR_CNT] = OsExcSvcCall;
#else
g_hwiVec[HARD_FAULT_IRQN + OS_SYS_VECTOR_CNT] = OsExcHardFault;
g_hwiVec[SVC_IRQn + OS_SYS_VECTOR_CNT] = OsExcSvcCall;
g_hwiVec[NonMaskableInt_IRQn + OS_SYS_VECTOR_CNT] = OsExcNMI;
#endif
#endif
/* Enable USGFAULT, BUSFAULT, MEMFAULT */
*(volatile UINT32 *)OS_NVIC_SHCSR |= (USGFAULT | BUSFAULT | MEMFAULT);
/* Enable DIV 0 and unaligned exception */
*(volatile UINT32 *)OS_NVIC_CCR |= DIV0FAULT;
}
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */

@ -0,0 +1,357 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2018-2020. All rights reserved.
* Description : LiteOS arm-m flash patch module implemention.
* Author : Huawei LiteOS Team
* Create : 2018-03-07
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------- */
#include "arch/fpb.h"
#include "los_printf.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
#define FPB_REG_MAX 254
#define LITERAL_REG1_INDEX 6
#define LITERAL_REG2_INDEX 7
#define S_POS 24
#define FPB_BASE 0xE0002000UL
#define REMAP_TABLE_ADDR 0x20000000UL
#define ROM_BASE 0x02200000UL
#define ROM_END 0x02214000UL
#define BL_DISTANCE_MAX (1U << 24)
#define NOP_INSTR 0xbf00bf00
#define FPB ((FpbReg *) FPB_BASE)
#define REMAP_UNIT_SIZE 4
#define UPPER_MASK 0xffff0000
#define LOWER_MASK 0x0000ffff
#define SHIFT_BITS 16
#define LITERAL_ALIGN 3
#define INSTR_ALIGN 2
typedef struct {
volatile UINT32 ctrl;
volatile UINT32 remap;
volatile UINT32 comp[FPB_REG_MAX];
} FpbReg;
STATIC VOID FpbControlEnable(VOID);
STATIC VOID FpbRemapRegConfig(UINT32 remapTableAddr);
STATIC UINT32 FpbComparatorRegConfig(UINT8 regIndex, UINT32 compAddr);
STATIC UINT32 FpbRedirectLiteral(UINT32 originalLiteralAddr, UINT32 targetLiteral);
STATIC UINT32 CalcBranchInstr(UINT32 instrAddr, UINT32 targetAddr);
STATIC UINT32 CalcBranchWLinkInstr(UINT32 instrAddr, UINT32 targetAddr);
STATIC UINT32 LittleEndian16Bit(UINT32 val);
STATIC UINT32 GetInstr(UINT32 instrAddr, UINT32 targetAddr, UINT32 *newInstr, UINT8 blInstr);
STATIC UINT32 GetInstrRegIndex(UINT32 oldAddr, UINT8 *regComp);
VOID FpbInit(VOID)
{
UINT32 regCount;
FpbControlEnable();
FpbRemapRegConfig(REMAP_TABLE_ADDR);
for (regCount = 0; regCount < FPB_REG_MAX; regCount++) {
FPB->comp[regCount] = 0;
}
}
UINT32 FpbAddPatch(UINT32 oldAddr, UINT32 patchValue, FpbCompTypeEnum fpbType)
{
UINT32 newInstr;
UINT32 ret;
UINT8 regIndex;
if (fpbType >= FPB_TYPE_MAX) {
PRINT_ERR("type is wrong, set fpb patch err!\r\n");
return FPB_TYPE_ERR;
}
if (fpbType == FPB_TYPE_LITERAL) {
if (oldAddr & LITERAL_ALIGN) { // check aligned
PRINT_ERR("addr is not aligned to 4!\r\n");
return FPB_ADDR_NOT_ALIGN_ERR;
}
ret = FpbRedirectLiteral(oldAddr, patchValue);
if (ret != FPB_SUCCESS) {
PRINT_ERR("set literal patch err!\r\n");
return ret;
}
} else {
if (oldAddr & INSTR_ALIGN) { // check aligned
PRINT_ERR("addr is not aligned to 4!\r\n");
return FPB_ADDR_NOT_ALIGN_ERR;
}
ret = GetInstrRegIndex(oldAddr, &regIndex);
if (ret != FPB_SUCCESS) {
return ret;
}
ret = GetInstr(oldAddr, patchValue, &newInstr, FALSE);
if (ret != FPB_SUCCESS) {
return ret;
}
PRINT_DEBUG("new_instr:%x, ", newInstr);
ret = FpbComparatorRegConfig(regIndex, oldAddr);
if (ret != FPB_SUCCESS) {
return ret;
}
PRINT_DEBUG("use COMP:%d\r\n", regIndex);
*((UINT32 *)(UINTPTR)(REMAP_TABLE_ADDR + (regIndex * REMAP_UNIT_SIZE))) = newInstr;
}
return FPB_SUCCESS;
}
UINT32 FpbDeletePatch(UINT32 oldAddr, FpbCompTypeEnum fpbType)
{
UINT8 regIndex;
UINT32 regValue;
UINT32 ret;
if (fpbType >= FPB_TYPE_MAX) {
PRINT_ERR("type is wrong, set pfb patch err!\r\n");
return FPB_TYPE_ERR;
}
if (fpbType == FPB_TYPE_LITERAL) {
regValue = FPB->comp[LITERAL_REG1_INDEX];
if ((oldAddr | 0x01UL) == regValue) {
FPB->comp[LITERAL_REG1_INDEX] = 0;
return FPB_SUCCESS;
} else {
regValue = FPB->comp[LITERAL_REG2_INDEX];
if ((oldAddr | 0x01UL) == regValue) {
FPB->comp[LITERAL_REG2_INDEX] = 0;
return FPB_SUCCESS;
}
}
} else {
ret = GetInstrRegIndex(oldAddr, &regIndex);
if (ret == FPB_COMP_REPEAT_ERR) {
PRINT_DEBUG("delete comp : %d patch\r\n", regIndex);
FPB->comp[regIndex] = 0;
return FPB_SUCCESS;
}
}
PRINT_DEBUG("no patch need to delete\r\n");
return FPB_NO_COMP_ERR;
}
STATIC UINT32 GetInstrRegIndex(UINT32 oldAddr, UINT8 *regComp)
{
UINT32 regValue;
UINT8 regCount;
UINT8 flag = 0;
for (regCount = 0; regCount < FPB_REG_MAX; regCount++) {
if (regCount == LITERAL_REG1_INDEX) {
regCount++;
continue;
}
regValue = FPB->comp[regCount];
if (regValue & 0x01UL) {
if (regValue == (oldAddr | 0x01UL)) {
PRINT_WARN("the old_func_addr already has patched\r\n");
*regComp = regCount;
return FPB_COMP_REPEAT_ERR;
}
} else {
if (flag == 0) {
*regComp = regCount;
flag = 1;
}
}
}
if ((regCount == FPB_REG_MAX) && (flag == 0)) {
PRINT_WARN("there is no free fpb comp regiter\r\n");
return FPB_NO_FREE_COMP_ERR;
}
return FPB_SUCCESS;
}
STATIC UINT32 FpbRedirectLiteral(UINT32 originalLiteralAddr, UINT32 targetLiteral)
{
UINT8 regIndex;
UINT32 ret;
UINT32 literalAddr = originalLiteralAddr | 0x01UL;
UINT32 literalReg1Value = FPB->comp[LITERAL_REG1_INDEX];
UINT32 literalReg2Value = FPB->comp[LITERAL_REG2_INDEX];
if ((originalLiteralAddr < ROM_BASE) || (originalLiteralAddr >= ROM_END)) {
return FPB_TARGET_ADDR_ERR;
}
if ((literalReg1Value == literalAddr) || (literalReg2Value == literalAddr)) {
return FPB_COMP_REPEAT_ERR;
}
if (((literalReg1Value & 0x1) != 0) && ((literalReg2Value & 0x1) != 0)) {
return FPB_NO_FREE_COMP_ERR;
}
if ((literalReg1Value & 0x1) == 0) {
regIndex = LITERAL_REG1_INDEX;
} else {
regIndex = LITERAL_REG2_INDEX;
}
ret = FpbComparatorRegConfig(regIndex, originalLiteralAddr);
if (ret != FPB_SUCCESS) {
return ret;
}
*((UINT32 *)(UINTPTR)(REMAP_TABLE_ADDR + (regIndex * REMAP_UNIT_SIZE))) = targetLiteral;
return FPB_SUCCESS;
}
STATIC VOID FpbControlEnable(VOID)
{
FPB->ctrl = 0x03UL; // set KEY bit & enable bit
}
STATIC VOID FpbRemapRegConfig(UINT32 remapTableAddr)
{
FPB->remap = remapTableAddr;
}
STATIC UINT32 FpbComparatorRegConfig(UINT8 regIndex, UINT32 compAddr)
{
if (regIndex >= FPB_REG_MAX) {
PRINT_ERR("reg_index:%d is out of range, REG_MAX is %d\r\n", regIndex, FPB_REG_MAX);
return FPB_NO_COMP_ERR;
}
FPB->comp[regIndex] = (compAddr | 0x01UL);
return FPB_SUCCESS;
}
STATIC UINT32 CalcBranchInstr(UINT32 instrAddr, UINT32 targetAddr)
{
INT32 distance = (INT32)(targetAddr - instrAddr);
if (distance == 0) {
PRINT_ERR("patch addr should not be same as buggy addr\r\n");
return NOP_INSTR;
}
if (distance > BL_DISTANCE_MAX) {
PRINT_ERR("the bl instr should whitin 16M\r\n");
return NOP_INSTR;
}
if ((distance + BL_DISTANCE_MAX) < 0) {
PRINT_ERR("the bl instr should whitin -16M\r\n");
return NOP_INSTR;
}
UINT32 offset = (UINT32)(distance - REMAP_UNIT_SIZE);
PRINT_DEBUG("instr_addr:%x, target_addr:%x\r\n", instrAddr, targetAddr);
UINT16 offset10Upper = ((offset) >> 12) & 0x03FF; // get upper 10 bits [21:12]
UINT16 offset11Lower = ((offset) >> 1) & 0x07FF; // get lower 11 bits [11:1]
UINT8 s = (offset >> S_POS) & 0x1; // get bit 24
UINT8 i1 = (offset >> (S_POS - 1)) & 0x1; // get bit 23
UINT8 i2 = (offset >> (S_POS - 2)) & 0x1; // get bit 22
UINT8 j1 = 0x01 & ((~i1) ^ s);
UINT8 j2 = 0x01 & ((~i2) ^ s);
// upper instruction : [15:11]opcode1 0x1e [10]S [9:0] imm upper 10 bits
UINT32 upperBlInstr = ((0x1E << 11) | (s << 10) | offset10Upper);
// lower instruction : [15:14]opcode2 0x2 [13]J1 [12]opcode3 [11]J2 [10:0] imm lower 11 bits
UINT32 lowerBlInstr = ((0x02 << 14) | (j1 << 13) | (0x01 << 12) | (j2 << 11) | offset11Lower);
return ((upperBlInstr << SHIFT_BITS) | lowerBlInstr); // assembling 32bit instruction
}
STATIC UINT32 CalcBranchWLinkInstr(UINT32 instrAddr, UINT32 targetAddr)
{
UINT32 branchInstr = CalcBranchInstr(instrAddr, targetAddr);
if (branchInstr == NOP_INSTR) {
return NOP_INSTR;
}
return (branchInstr | 0x00004000); // Set bit 14. This is the only difference between B and BL instructions
}
STATIC UINT32 LittleEndian16Bit(UINT32 val)
{
return ((val & UPPER_MASK) >> SHIFT_BITS) | ((val & LOWER_MASK) << SHIFT_BITS); // little_endian swap
}
STATIC UINT32 GetInstr(UINT32 instrAddr, UINT32 targetAddr, UINT32 *newInstr, UINT8 blInstr)
{
UINT32 tmpInstr;
if (blInstr != 0) {
tmpInstr = CalcBranchWLinkInstr(instrAddr, targetAddr);
} else {
tmpInstr = CalcBranchInstr(instrAddr, targetAddr);
}
if (tmpInstr == NOP_INSTR) {
return FPB_TARGET_ADDR_ERR;
}
if (((instrAddr & (~0x1)) & INSTR_ALIGN) == 0) { // check aligned
*newInstr = LittleEndian16Bit(tmpInstr);
} else {
PRINT_ERR("target addr is not 4 aligned,not support!\r\n");
return FPB_ADDR_NOT_ALIGN_ERR;
}
return FPB_SUCCESS;
}
VOID FpbDisable(VOID)
{
FPB->ctrl = 0x02UL; // set KEY bit enable write
FPB->ctrl = 0x0UL; // disable fpb
}
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */

@ -0,0 +1,187 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2019-2020. All rights reserved.
* Description: Intermit Implementation
* Author: Huawei LiteOS Team
* Create: 2019-08-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
.syntax unified
.arch armv7-m
.thumb
.global g_regBackupBuf
.section .text.OsSRSaveRegister
.weak OsSRSaveRegister
.type OsSRSaveRegister, %function
.global OsSRSaveRegister
OsSRSaveRegister:
.fnstart
.cantunwind
push {r4,r5}
ldr r5, =g_regBackupBuf
str r0, [r5]
adds r5, r5, #4
str r1, [r5]
adds r5, r5, #4
str r2, [r5]
adds r5, r5, #4
str r3, [r5]
mov r0, r5
pop {r4,r5}
adds r0, r0, #4
str r4, [r0]
adds r0, r0, #4
str r5, [r0]
mov r5, r0
adds r5, r5, #4
str r6, [r5]
adds r5, r5, #4
str r7, [r5]
adds r5, r5, #4
str r8, [r5]
adds r5, r5, #4
str r9, [r5]
adds r5, r5, #4
str r10, [r5]
adds r5, r5, #4
str r11, [r5]
adds r5, r5, #4
str r12, [r5]
adds r5, r5, #4
mrs r4, apsr
str r4, [r5]
adds r5, r5, #4
mrs r4, ipsr
str r4, [r5]
adds r5, r5, #4
mrs r4, epsr
str r4, [r5]
adds r5, r5, #4
mrs r4, primask
str r4, [r5]
adds r5, r5, #4
mrs r4, basepri
str r4, [r5]
adds r5, r5, #4
mrs r4, basepri_max
str r4, [r5]
adds r5, r5, #4
mrs r4, faultmask
str r4, [r5]
adds r5, r5, #4
mrs r4, control
str r4, [r5]
adds r5, r5, #4
mov r4, sp
str r4, [r5]
adds r5, r5, #4
mov r4, lr
str r4, [r5]
adds r5, r5, #4
mov r4, lr
str r4, [r5]
mov pc, lr
.fnend
.section .text.OsSRRestoreRegister
.weak OsSRRestoreRegister
.type OsSRRestoreRegister, %function
.global OsSRRestoreRegister
OsSRRestoreRegister:
.fnstart
.cantunwind
ldr r0, =g_regBackupBuf
adds r0, r0, #8
ldr r2, [r0], #4
ldr r3, [r0], #4
ldr r4, [r0], #4
ldr r5, [r0], #4
ldr r6, [r0], #4
ldr r7, [r0], #4
ldr r8, [r0], #4
ldr r9, [r0], #4
ldr r10, [r0], #4
ldr r11, [r0], #4
ldr r12, [r0], #4
ldr r1, [r0], #4
msr apsr, r1
ldr r1, [r0], #4
msr ipsr, r1
ldr r1, [r0], #4
msr epsr, r1
ldr r1, [r0], #4
msr primask, r1
ldr r1, [r0], #4
msr basepri, r1
ldr r1, [r0], #4
msr basepri_max, r1
ldr r1, [r0], #4
msr faultmask, r1
ldr r1, [r0], #4
msr control, r1
ldr sp, [r0], #4
ldr lr, [r0], #4
push {r2}
ldr r2, =g_regBackupBuf
ldr r0, [r2], #4
ldr r1, [r2]
pop {r2}
mov pc, lr
.fnend

@ -0,0 +1,104 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: ARMv6 Or ARMv7 JMP Implementation
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
.syntax unified
#if defined(LOSCFG_ARCH_ARM_V6M)
.arch armv6-m
#elif defined(LOSCFG_ARCH_CORTEX_M33)
.arch armv8-m.main
#else
.arch armv7e-m
#endif
.thumb
.section .text
.thumb
.type longjmp, %function
.global longjmp
longjmp:
#if defined(LOSCFG_ARCH_ARM_V6M)
adds r0, #16
ldmia r0!, {r3-r7}
mov r8, r3
mov r9, r4
mov r10, r5
mov r11, r6
mov lr, r7
subs r0, #36
ldmia r0!, {r4-r7}
adds r0, #20
ldr r3, [r0]
mov sp, r3
#else
ldmia r0!, {r4-r11,lr}
#if !defined(LOSCFG_ARCH_CORTEX_M3) && !defined(LOSCFG_ARCH_ARM_V6M)
vldmia r0!, {d8-d15}
#endif
ldr sp, [r0]
#endif
mov r0, r1
cmp r1, #0
bne 1f
#if defined(LOSCFG_ARCH_ARM_V6M)
movs r0, #1
#else
mov r0, #1
#endif
1:
bx lr
.type setjmp, %function
.global setjmp
setjmp:
#if defined(LOSCFG_ARCH_ARM_V6M)
stmia r0!, {r4-r7}
mov r3, r8
mov r4, r9
mov r5, r10
mov r6, r11
mov r7, lr
stmia r0!, {r3 - r7}
mov r3, sp
str r3, [r0]
movs r0, #0
#else
stmia r0!, {r4-r11,lr}
#if !defined(LOSCFG_ARCH_CORTEX_M3) && !defined(LOSCFG_ARCH_ARM_V6M)
vstmia r0!, {d8-d15}
#endif
str sp, [r0]
mov r0, #0
#endif
bx lr

@ -0,0 +1,222 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: Mpu Module Implementation
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#include "arch/mpu.h"
#include "los_base.h"
#include "los_typedef.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
#define SIZE_4G 0x100000000ULL
#define SIZE_2G 0x80000000ULL
/*
* 1: Region has already been used.
* 0: Region has already been unused.
*/
STATIC UINT8 g_regionNumBeUsed[MPU_MAX_SUPPORT] = {0};
/*
* Description: get region capacity
* Input : regionSize : (region size
* range: MPU_REGION_SIZE_32B ~ MPU_REGION_SIZE_4GB
* formula: capacity = 1ULL << (regionSize + 1);)
* Return : Error Information or capacity(unit: byte)
*/
STATIC UINT64 MpuGetRegionCapacity(UINT32 regionSize)
{
UINT64 capacity;
if (regionSize == MPU_REGION_SIZE_4GB) {
return SIZE_4G;
} else if (regionSize == MPU_REGION_SIZE_2GB) {
return SIZE_2G;
}
capacity = 1UL << (regionSize + 1);
return capacity;
}
/*
* Description: check parameter
* Input : mpuInfo: MPU info
* Return : Error Information or LOS_OK
*/
STATIC UINT32 MpuCheckParame(const MPU_REGION_INFO *mpuInfo)
{
UINT64 capacity;
if (mpuInfo == NULL) {
return LOS_ERRNO_MPU_PTR_NULL;
}
/* Judge whether the chip with MPU */
if (MPU->TYPE == 0) {
return LOS_ERRNO_MPU_NOT_CONFIGURED;
}
/* Number exceeds the maximum value supported by MPU */
if (mpuInfo->number > (MPU_MAX_SUPPORT - 1)) {
return LOS_ERRNO_MPU_INVALID_NUMBER;
}
/* Selected number region has been enabled */
if (g_regionNumBeUsed[mpuInfo->number]) {
return LOS_ERRNO_MPU_REGION_IS_ENABLED;
}
if ((mpuInfo->regionSize < MPU_REGION_SIZE_32B) || (mpuInfo->regionSize > MPU_REGION_SIZE_4GB)) {
return LOS_ERRNO_MPU_INVALID_CAPACITY;
}
capacity = MpuGetRegionCapacity(mpuInfo->regionSize);
if ((mpuInfo->baseAddress % capacity) != 0) {
return LOS_ERRNO_MPU_INVALID_BASE_ADDRESS;
}
if (mpuInfo->accessPermission > MPU_DEFS_RASR_AP_RO) {
return LOS_ERRNO_MPU_REGION_INVALID_ACCESS;
}
return LOS_OK;
}
UINT32 ArchRegionDisable(UINT8 number)
{
UINT32 intSave;
if (number > (MPU_MAX_SUPPORT - 1)) {
return LOS_ERRNO_MPU_INVALID_NUMBER;
}
if (g_regionNumBeUsed[number] == 0) {
return LOS_ERRNO_MPU_REGION_IS_DISABLED;
}
intSave = LOS_IntLock();
MPU->RNR = number;
MPU->RBAR = 0;
MPU->RASR = 0;
g_regionNumBeUsed[number] = 0;
LOS_IntRestore(intSave);
return LOS_OK;
}
STATIC VOID MpuDisable(VOID)
{
/* Make sure outstanding transfers are done */
__DMB();
MPU->CTRL = MPU_DISABLE;
}
/*
* Description: enable mpu
* Input : hfnmiena: Whether in the NMI and hard fault service routine is
* not mandatory in addition to MPU
*/
STATIC VOID MpuEnable(BOOL hfnmiena)
{
UINT32 enable;
enable = MPU_CTRL_ENABLE_Msk |
MPU_CTRL_PRIVDEFENA_Msk |
(hfnmiena << MPU_CTRL_HFNMIENA_Pos);
MPU->CTRL = enable;
/* Ensure MPU settings take effects */
__DSB();
/* Sequence instruction fetches using update settings */
__ISB();
}
/*
* Description: mpu config
* Input : mpuInfo: MPU info parameters to be set
* The base address must be in the range of RAM
* Return : LOS_OK or Error Information
*/
STATIC VOID MpuRegionConfig(const MPU_REGION_INFO *mpuInfo)
{
UINT32 attributeAndCapacity;
attributeAndCapacity = (mpuInfo->accessPermission << MPU_RASR_AP_Pos) |
(mpuInfo->sharable << MPU_RASR_S_Pos) |
(mpuInfo->cachable << MPU_RASR_C_Pos) |
(mpuInfo->buffable << MPU_RASR_B_Pos) |
(mpuInfo->xn << MPU_RASR_XN_Pos) |
(mpuInfo->regionSize << MPU_RASR_SIZE_Pos) |
(MPU_RASR_ENABLE_Msk);
MPU->RNR = mpuInfo->number;
MPU->RBAR = mpuInfo->baseAddress;
MPU->RASR = attributeAndCapacity;
}
UINT32 ArchProtectionRegionSet(MPU_REGION_INFO *mpuInfo)
{
UINT32 ret;
UINT32 intSave;
intSave = LOS_IntLock();
ret = MpuCheckParame(mpuInfo);
if (ret != LOS_OK) {
LOS_IntRestore(intSave);
return ret;
}
/* Dual word alignment mode enable stack */
SCB->CCR |= SCB_CCR_STKALIGN_Msk;
/* enable MemManage fault */
SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk;
MpuDisable();
MpuRegionConfig(mpuInfo);
MpuEnable(mpuInfo->hfnmiena);
g_regionNumBeUsed[mpuInfo->number] = 1;
LOS_IntRestore(intSave);
return LOS_OK;
}
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */

@ -0,0 +1,101 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: Aarch32 Cortex-M Hw Task Implementation
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#include "los_task_pri.h"
#include "arch/task.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
VOID *g_runTask = NULL;
VOID *g_oldTask = NULL;
#ifdef LOSCFG_GDB
STATIC VOID OsTaskEntrySetupLoopFrame(UINT32) __attribute__((noinline, naked));
VOID OsTaskEntrySetupLoopFrame(UINT32 arg0)
{
asm volatile("\tsub fp, sp, #0x4\n"
"\tpush {fp, lr}\n"
"\tadd fp, sp, #0x4\n"
"\tpush {fp, lr}\n"
"\tadd fp, sp, #0x4\n"
"\tbl OsTaskEntry\n"
"\tpop {fp, lr}\n"
"\tpop {fp, pc}\n");
}
#endif
LITE_OS_SEC_TEXT_MINOR VOID OsTaskExit(VOID)
{
__disable_irq();
while (1) { }
}
LITE_OS_SEC_TEXT_INIT VOID *OsTaskStackInit(UINT32 taskId, UINT32 stackSize, VOID *topStack)
{
TaskContext *taskContext = NULL;
OsStackInit(topStack, stackSize);
taskContext = (TaskContext *)(((UINTPTR)topStack + stackSize) - sizeof(TaskContext));
#ifdef LOSCFG_ARCH_FPU_ENABLE
taskContext->excReturn = 0xFFFFFFFD;
#endif
taskContext->R4 = 0x04040404L;
taskContext->R5 = 0x05050505L;
taskContext->R6 = 0x06060606L;
taskContext->R7 = 0x07070707L;
taskContext->R8 = 0x08080808L;
taskContext->R9 = 0x09090909L;
taskContext->R10 = 0x10101010L;
taskContext->R11 = 0x11111111L;
taskContext->PriMask = 0;
taskContext->R0 = taskId;
taskContext->R1 = 0x01010101L;
taskContext->R2 = 0x02020202L;
taskContext->R3 = 0x03030303L;
taskContext->R12 = 0x12121212L;
taskContext->LR = (UINT32)OsTaskExit;
taskContext->PC = (UINT32)OsTaskEntry;
taskContext->xPSR = 0x01000000L;
return (VOID *)taskContext;
}
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */

@ -0,0 +1,5 @@
ifeq ($(LOSCFG_ARCH_ARM_CORTEX_M), y)
-include $(LITEOSTOPDIR)/arch/arm/cortex_m/cpu.mk
else
-include $(LITEOSTOPDIR)/arch/arm/cortex_a_r/cpu.mk
endif

@ -0,0 +1,41 @@
# Copyright (c) Huawei Technologies Co., Ltd. 2019-2020. All rights reserved.
#
# ARM 64-bit(Aarch64) implementations
#
config LOSCFG_ARCH_ARM_AARCH64
bool
select LOSCFG_LIB_LIBC
help
64-bit ARM architecture implementations.
AArch64 was introduced in ARMv8-A and is included in subsequent versions of ARMV8-A.
#
# Architecture Versions
#
config LOSCFG_ARCH_ARM_V8A
bool
select LOSCFG_ARCH_ARM_AARCH64
help
ARMv8-A supported Aarch32 and Aarch64. If this option is chosen, the target platform needs
to specified whether the core runs in Aarch32 or Aarch64 mode.
config LOSCFG_ARCH_ARM_VER
string
default "armv8-a" if LOSCFG_ARCH_ARM_V8A
#
# Supported Processor Cores
#
config LOSCFG_ARCH_CORTEX_A53
bool
select LOSCFG_ARCH_ARM_V8A
config LOSCFG_ARCH_CORTEX_A72
bool
select LOSCFG_ARCH_ARM_V8A
config LOSCFG_ARCH_CPU
string
default "cortex-a53" if LOSCFG_ARCH_CORTEX_A53
default "cortex-a72" if LOSCFG_ARCH_CORTEX_A72

@ -0,0 +1,25 @@
include $(LITEOSTOPDIR)/config.mk
MODULE_NAME := $(LOSCFG_ARCH_CPU)
LOCAL_SRCS_y := $(wildcard src/canary.c) $(wildcard src/cpu.c) \
$(wildcard src/fault.c) $(wildcard src/task.c) \
$(wildcard src/mmu.c)
LOCAL_INCLUDE := \
-I $(LITEOSTOPDIR)/kernel/extended/include
ifeq ($(LOSCFG_KERNEL_PERF)$(LOSCFG_ARM_GIC_V2), y)
LOCAL_SRCS_y += $(LITEOSTOPDIR)/drivers/interrupt/arm_gic_v2.c \
$(wildcard src/pmu/armv8_pmu.c)
LOCAL_INCLUDE += -I $(LITEOSTOPDIR)/kernel/extended/src/pmu \
-I $(LITEOSTOPDIR)/kernel/extended/perf
endif
ASSRCS = $(wildcard src/*.S)
LOCAL_SRCS_y += $(ASSRCS)
LOCAL_SRCS = $(LOCAL_SRCS_y)
LOCAL_FLAGS := $(LOCAL_INCLUDE) $(LITEOS_GCOV_OPTS)
include $(MODULE)

@ -0,0 +1,41 @@
# strip quotation mark in configuration
LOSCFG_ARCH_CPU_STRIP := $(subst $\",,$(LOSCFG_ARCH_CPU))
LOSCFG_ARCH_CPU = $(LOSCFG_ARCH_CPU_STRIP)
LOSCFG_ARCH_FPU_STRIP := $(subst $\",,$(LOSCFG_ARCH_FPU))
LOSCFG_ARCH_FPU = $(LOSCFG_ARCH_FPU_STRIP)
LITEOS_BASELIB += -l$(LOSCFG_ARCH_CPU)
LIB_SUBDIRS += arch/arm64/
# CPU compile options
# AArch64 has no specific FPU compile options like AArch32 does.
ifeq ($(LOSCFG_ARCH_FPU_ENABLE), y)
EXTENSION :=
else
EXTENSION := +nofp
endif
LITEOS_CPU_OPTS := -mcpu=$(LOSCFG_ARCH_CPU)$(EXTENSION)
LITEOS_CORE_COPTS = $(LITEOS_CPU_OPTS) $(LITEOS_FLOAT_OPTS) $(LITEOS_FPU_OPTS)
LITEOS_INTERWORK += $(LITEOS_CORE_COPTS)
LITEOS_NODEBUG += $(LITEOS_CORE_COPTS)
LITEOS_ASOPTS += $(LITEOS_CPU_OPTS)
LITEOS_CXXOPTS_BASE += $(LITEOS_CORE_COPTS)
ARCH_INCLUDE := -I $(LITEOSTOPDIR)/arch/arm64/include \
-I $(LITEOSTOPDIR)/arch/arm64/include/arch \
-I $(LITEOSTOPDIR)/arch/arm64/src/include
LITEOS_PLATFORM_INCLUDE += $(ARCH_INCLUDE)
LITEOS_CXXINCLUDE += $(ARCH_INCLUDE)
# extra definition for other module
LITEOS_CPU_TYPE = $(LOSCFG_ARCH_CPU)
LITEOS_ARM_ARCH := -march=$(subst $\",,$(LOSCFG_ARCH_ARM_VER))
# linux style macros
LINUX_ARCH_$(LOSCFG_ARCH_ARM_V8A) = -D__LINUX_ARM_ARCH__=8
AS_OBJS_LIBC_FLAGS += $(LINUX_ARCH_y)

@ -0,0 +1,49 @@
/* ----------------------------------------------------------------------------
* Copyright (c) Huawei Technologies Co., Ltd. 2013-2020. All rights reserved.
* Description: Aarch64 Assembly Defines and Macros HeadFile
* Author: Huawei LiteOS Team
* Create: 2013-01-01
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* --------------------------------------------------------------------------- */
#ifndef _ARCH_ASM_H
#define _ARCH_ASM_H
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
#define FUNCTION(x) \
.global x; \
.text; \
x: \
#ifdef __cplusplus
#if __cplusplus
}
#endif /* __cplusplus */
#endif /* __cplusplus */
#endif /* _ARCH_ASM_H */

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save