Compare commits

...

No commits in common. 'main' and 'develop' have entirely different histories.

@ -1,3 +1,2 @@
# Brief Intro
# Iot_Cs_best
LMFAO, we do this shit just for fun :)

Binary file not shown.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 376 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 376 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 135 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 135 KiB

BIN
doc/cache/image.png vendored

Binary file not shown.

Before

Width:  |  Height:  |  Size: 134 KiB

Binary file not shown.

Binary file not shown.

@ -1,44 +0,0 @@
# base 下的代码阅读
根据官方的README.md,kernel/base下的文件主要为**基础内核包括调度、内存等模块**
# 1. brief intro
OpenHarmony liteos 的进程实现方式与 linux 类似,都是通过父进程和子进程的一系列操作来达到实现一些功能的目的.
## 1.1 描述各个子功能
* los_bitmap.c 主要提供一些位操作
* los_info.c 提供一些基本的方法来对进程的信息等内容进行操作
* los_process.c 提供对于进程操作的方法包括创建切换等..
# 2. natural expression of requirement of software
作为操作系统当中的子系统,以软件的角度来看进程管理:
## 2.1 一个完备的进程管理系统应当具备
### (1) 进程调度
鸿蒙内核的进程采用抢占式调度机制支持时间片轮转调度方式和FIFO调度机制
### (2) 进程创建
![Alt text](image-4.png)
### (3) 进程回收
### (4)
### (5)
### (6)
# 3. user case diagram and user case description
![Alt text](image.png)
# 4. software architecture
![Alt text](image-5.png)

@ -1,42 +0,0 @@
<!--
# 软件工程作业要求
## 1. 软件功能的自然语言描述
## 2. 软件的用例图和用例描述
## 3. 软件的体系结构(用包图描述其逻辑视图) -->
# LiteOS_kernel_a 泛读报告
## 一、 软件功能的自然语言描述
### 1.1 简介
Huawei LiteOS是华为面向IoT领域构建的轻量级物联网操作系统可广泛应用于智能家居、个人穿戴、车联网、城市公共服务、制造业等领域。
Huawei LiteOS发布于2015年5月的华为网络大会上。自开源社区发布以来围绕 NB-IoT 物联网市场从技术、生态、解决方案、商用支持等多维度使能合作伙伴,构建开源的物联网生态。目前已经聚合了 50+ MCU 和解决方案合作伙伴,共同推出一批开源开发套件和行业解决方案,帮助众多行业客户快速的推出物联网产品和服务。客户涵盖抄表、停车、路灯、环保、共享单车、物流等众多行业,为开发者提供 “一站式” 完整软件平台,可大幅降低设备布置及维护成本,有效降低开发门槛、缩短开发周期。
Huawei LiteOS开源项目目前支持 **ARM64、ARM Cortex-A、ARM Cortex-M0Cortex-M3Cortex-M4Cortex-M7** 等芯片架构。
### 1.2 软件功能
#### 1.2.1 提供一个简单轻便的用户交互界面以供使用。
#### 1.2.2 能够调用系统软硬件资源来完成某些操作。
#### 1.2.3 进程管理
#### 1.2.4 内存分配
#### 1.2.5 提供并行编程
## 二、 软件的用例图和用例描述
![Alt text](image.png)
## 三、 软件的体系结构
![Alt text](image-2.png)
![Alt text](image-6.png)

Binary file not shown.

Binary file not shown.

Binary file not shown.

@ -0,0 +1 @@
*.git

@ -1,442 +0,0 @@
# Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
# Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import("//build/lite/config/component/lite_component.gni")
LITEOS_MENUCONFIG_H = rebase_path("$root_out_dir/config.h")
declare_args() {
tee_enable = false
liteos_name = "OHOS_Image"
liteos_container_enable = false
liteos_skip_make = false
liteos_is_mini = false
}
tee = ""
if (tee_enable) {
tee = "_tee"
}
declare_args() {
liteos_config_file = "${ohos_build_type}${tee}.config"
}
liteos_config_file =
rebase_path(liteos_config_file, "", "$product_path/kernel_configs")
print("liteos_config_file:", liteos_config_file)
exec_script("//build/lite/run_shell_cmd.py",
[ "env" + " CONFIG_=LOSCFG_" + " KCONFIG_CONFIG_HEADER='y=true'" +
" KCONFIG_CONFIG=$liteos_config_file" +
" DEVICE_PATH=$device_path" + " srctree=" + rebase_path(".") +
" genconfig" + " --header-path $LITEOS_MENUCONFIG_H" +
" --file-list kconfig_files.txt" +
" --env-list kconfig_env.txt" + " --config-out config.gni" ],
"",
[ liteos_config_file ])
import("liteos.gni")
assert(ARCH != "", "ARCH not set!")
assert(ARCH == arch, "ARCH not match! details: $ARCH != $arch")
assert(tee_enable == defined(LOSCFG_TEE_ENABLE), "TEE switch not match!")
assert(ohos_build_compiler == "clang" == defined(LOSCFG_COMPILER_CLANG_LLVM),
"compiler not match!")
generate_notice_file("kernel_notice_file") {
module_name = "kernel"
module_source_dir_list = [
"$LITEOSTHIRDPARTY/FreeBSD",
"$LITEOSTHIRDPARTY/musl",
"$LITEOSTHIRDPARTY/zlib",
"$LITEOSTHIRDPARTY/FatFs",
"$LITEOSTHIRDPARTY/lwip",
"$LITEOSTHIRDPARTY/NuttX",
"$LITEOSTHIRDPARTY/mtd-utils",
]
}
liteos_arch_cflags = []
if (defined(LOSCFG_ARCH_ARM)) {
mcpu = LOSCFG_ARCH_CPU
if (defined(LOSCFG_ARCH_ARM_AARCH64) && defined(LOSCFG_ARCH_FPU_DISABLE)) {
mcpu += "+nofp"
}
liteos_arch_cflags += [ "-mcpu=$mcpu" ]
if (defined(LOSCFG_ARCH_ARM_AARCH32)) {
liteos_arch_cflags += [
"-mfloat-abi=softfp",
"-mfpu=$LOSCFG_ARCH_FPU",
]
}
}
cc = "$ohos_current_cc_command " + string_join(" ", liteos_arch_cflags)
if (ohos_build_compiler == "clang") {
cc += " --target=$target_triple"
}
config("arch_config") {
cflags = liteos_arch_cflags
asmflags = cflags
ldflags = cflags
if (defined(LOSCFG_ARCH_ARM_AARCH32)) {
if (!defined(LOSCFG_COMPILER_CLANG_LLVM)) {
cflags += [ "-mthumb-interwork" ]
}
}
if (defined(LOSCFG_THUMB)) {
cflags += [ "-mthumb" ]
if (defined(LOSCFG_COMPILER_CLANG_LLVM)) {
cflags += [ "-mimplicit-it=thumb" ]
} else {
cflags += [ "-Wa,-mimplicit-it=thumb" ]
}
}
}
config("stdinc_config") {
std_include = exec_script("//build/lite/run_shell_cmd.py",
[ "$cc -print-file-name=include" ],
"trim string")
cflags = [
"-isystem",
std_include,
]
if (!defined(LOSCFG_LIBC_NEWLIB)) {
cflags += [ "-nostdinc" ]
}
asmflags = cflags
}
config("ssp_config") {
cflags = []
if (defined(LOSCFG_CC_STACKPROTECTOR_ALL)) {
cflags += [ "-fstack-protector-all" ]
} else if (defined(LOSCFG_CC_STACKPROTECTOR_STRONG)) {
cflags += [ "-fstack-protector-strong" ]
} else if (defined(LOSCFG_CC_STACKPROTECTOR)) {
cflags += [
"-fstack-protector",
"--param",
"ssp-buffer-size=4",
]
} else {
cflags += [ "-fno-stack-protector" ]
}
asmflags = cflags
}
config("optimize_config") {
cflags = []
if (defined(LOSCFG_COMPILE_DEBUG)) {
cflags += [
"-g",
"-gdwarf-2",
]
optimization_cflag = "-O0"
}
if (defined(LOSCFG_COMPILE_OPTIMIZE)) {
optimization_cflag = "-O2"
}
if (defined(LOSCFG_COMPILE_OPTIMIZE_SIZE)) {
if (defined(LOSCFG_COMPILER_CLANG_LLVM)) {
optimization_cflag = "-Oz"
} else {
optimization_cflag = "-Os"
}
}
if (defined(LOSCFG_COMPILE_LTO)) {
if (defined(LOSCFG_COMPILER_CLANG_LLVM)) {
cflags += [ "-flto=thin" ]
} else {
#cflags += [ "-flto" ]
}
}
cflags += [ optimization_cflag ]
asmflags = cflags
}
config("kconfig_config") {
cflags = [
"-imacros",
"$LITEOS_MENUCONFIG_H",
]
asmflags = cflags
}
config("warn_config") {
cflags = [
"-Wall",
"-Werror",
"-Wpointer-arith",
"-Wstrict-prototypes",
"-Winvalid-pch",
]
if (defined(LOSCFG_COMPILER_CLANG_LLVM)) {
cflags += [ "-Wno-address-of-packed-member" ]
cflags += [
"-Wno-unused-but-set-variable",
"-Wno-strict-prototypes",
]
}
asmflags = cflags
}
config("dialect_config") {
cflags_c = [ "-std=c99" ]
cflags_cc = [ "-std=c++11" ]
}
config("misc_config") {
defines = [ "__LITEOS__" ]
defines += [ "__LITEOS_A__" ]
if (!defined(LOSCFG_DEBUG_VERSION)) {
defines += [ "NDEBUG" ]
}
cflags = [
"-fno-pic",
"-fno-builtin",
"-fms-extensions",
"-fno-strict-aliasing",
"-fno-common",
"-fsigned-char",
"-ffunction-sections",
"-fdata-sections",
"-fno-exceptions",
"-fno-omit-frame-pointer",
"-fno-short-enums",
"-mno-unaligned-access",
]
if (!defined(LOSCFG_COMPILER_CLANG_LLVM)) {
cflags += [ "-fno-aggressive-loop-optimizations" ]
}
asmflags = cflags
}
config("container_config") {
if (liteos_container_enable) {
cflags = [
"-DLOSCFG_KERNEL_CONTAINER",
"-DLOSCFG_PID_CONTAINER",
"-DLOSCFG_UTS_CONTAINER",
"-DLOSCFG_MNT_CONTAINER",
"-DLOSCFG_CHROOT",
"-DLOSCFG_IPC_CONTAINER",
"-DLOSCFG_TIME_CONTAINER",
"-DLOSCFG_USER_CONTAINER",
"-DLOSCFG_NET_CONTAINER",
"-DLOSCFG_PROC_PROCESS_DIR",
"-DLOSCFG_KERNEL_PLIMITS",
"-DLOSCFG_KERNEL_MEM_PLIMIT",
"-DLOSCFG_KERNEL_IPC_PLIMIT",
"-DLOSCFG_KERNEL_DEV_PLIMIT",
"-DLOSCFG_KERNEL_SCHED_PLIMIT",
]
}
}
config("los_config") {
configs = [
":arch_config",
":kconfig_config",
":stdinc_config",
":dialect_config",
":optimize_config",
":ssp_config",
":warn_config",
":misc_config",
":container_config",
]
}
cmd = "if [ -f $device_path/BUILD.gn ]; then echo true; else echo false; fi"
HAVE_DEVICE_SDK = exec_script("//build/lite/run_shell_cmd.py", [ cmd ], "value")
config("public") {
configs = [
"arch:public",
"kernel:public",
"compat:public",
"bsd:public",
"fs:public",
"drivers:public",
"security:public",
"net:public",
"shell:public",
"lib:public",
]
configs += [
"$HDFTOPDIR:public",
"$DRIVERS_LITEOS_DIR:public",
]
if (HAVE_DEVICE_SDK) {
configs += [ "$device_path:public" ]
}
}
group("modules") {
deps = [
"arch",
"bsd",
"compat",
"drivers",
"fs",
"kernel",
"lib",
"net",
"security",
"shell",
"syscall",
"testsuites/kernel:kernel_test",
]
deps += [
"$DRIVERS_LITEOS_DIR",
"$HDFTOPDIR",
]
if (HAVE_DEVICE_SDK) {
deps += [ device_path ]
}
}
group("apps") {
deps = [ "apps" ]
}
group("tests") {
deps = [ "testsuites" ]
}
group("kernel") {
deps = [ ":build_kernel_image" ]
}
group("liteos_a") {
deps = [ ":kernel" ]
if (!liteos_is_mini) {
deps += [
":apps",
":tests",
"$THIRDPARTY_MUSL_DIR/scripts/build_lite:strip",
]
if (liteos_skip_make == false) {
deps += [ ":make" ]
}
}
}
executable("liteos") {
configs = [] # clear default configs
configs += [ ":arch_config" ]
configs += [ ":public" ]
ldflags = [
"-static",
"-nostdlib",
"-Wl,--gc-sections",
"-Wl,-Map=$liteos_name.map",
"-Wl,--no-eh-frame-hdr",
]
if (defined(LOSCFG_LIBC_NEWLIB)) {
ldflags += [
"-Wl,--wrap=_free_r",
"-Wl,--wrap,_malloc_usable_size_r",
"-Wl,--wrap,_malloc_r",
"-Wl,--wrap,_memalign_r",
"-Wl,--wrap,_realloc_r",
"-Wl,--wrap,_fseeko_r",
]
ldflags -= [ "-nostdlib" ]
}
libgcc = exec_script("//build/lite/run_shell_cmd.py",
[ "$cc -print-libgcc-file-name" ],
"trim string")
libs = [ libgcc ]
if (defined(LOSCFG_COMPILER_CLANG_LLVM)) {
ldflags +=
[ "-Wl,-T" + rebase_path("tools/build/liteos_llvm.ld", root_build_dir) ]
inputs = [ "tools/build/liteos_llvm.ld" ]
} else {
ldflags +=
[ "-Wl,-T" + rebase_path("tools/build/liteos.ld", root_build_dir) ]
ldflags += [ "-nostartfiles" ]
inputs = [ "tools/build/liteos.ld" ]
}
inputs += [ "$root_out_dir/board.ld" ]
output_dir = target_out_dir
deps = [
":modules",
"platform:copy_board.ld",
]
}
copy("copy_liteos") {
deps = [ ":liteos" ]
sources = [ "$target_out_dir/unstripped/bin/liteos" ]
outputs = [ "$root_out_dir/$liteos_name" ]
}
build_ext_component("build_kernel_image") {
deps = [ ":copy_liteos" ]
exec_path = rebase_path(root_out_dir)
objcopy = "${compile_prefix}objcopy$toolchain_cmd_suffix"
objdump = "${compile_prefix}objdump$toolchain_cmd_suffix"
command = "$objcopy -O binary $liteos_name $liteos_name.bin"
command +=
" && sh -c '$objdump -t $liteos_name | sort >$liteos_name.sym.sorted'"
command += " && sh -c '$objdump -d $liteos_name >$liteos_name.asm'"
}
build_ext_component("make") {
exec_path = rebase_path(".", root_build_dir)
outdir = rebase_path("$target_out_dir/${target_name}_out")
sysroot_path = rebase_path(ohos_current_sysroot)
arch_cflags = string_join(" ", target_arch_cflags)
command = "./build.sh \"$board_name\" \"$ohos_build_compiler\" \"$root_build_dir\" \"$ohos_build_type\" \"$tee_enable\""
command += " \"$device_company\" \"$product_path\" \"$outdir\" \"$ohos_version\" \"$sysroot_path\" \"$arch_cflags\""
command += " \"$device_path\" \"$compile_prefix\" \"$liteos_config_file\""
if (liteos_skip_make) {
print("build_ext_component \"$target_name\" skipped:", command)
command = "true"
}
}

@ -54,13 +54,13 @@ int main(int argc, char * const *argv)
const char *shellPath = "/bin/mksh";
#ifdef LOSCFG_QUICK_START
const char *samplePath = "/dev/shm/sample_quickstart";
const char *samplePath = "/dev/shm/sample_quickstart";
ret = fork();
if (ret < 0) {
printf("Failed to fork for sample_quickstart\n");
} else if (ret == 0) {
(void)execve(samplePath, NULL, NULL); // 执行可执行文件
(void)execve(samplePath, NULL, NULL);
exit(0);
}
@ -72,24 +72,24 @@ int main(int argc, char * const *argv)
close(fd);
}
#endif
ret = fork(); // 创建第一个程序来跑shell
ret = fork();
if (ret < 0) {
printf("Failed to fork for shell\n");
} else if (ret == 0) {
gid = getpgrp(); // 返回进程组ID
} else if (ret == 0) {
gid = getpgrp();
if (gid < 0) {
printf("get group id failed, pgrpid %d, errno %d\n", gid, errno);
exit(0);
}
ret = tcsetpgrp(STDIN_FILENO, gid);
ret = tcsetpgrp(STDIN_FILENO, gid);
if (ret != 0) {
printf("tcsetpgrp failed, errno %d\n", errno);
exit(0);
}
(void)execve(shellPath, NULL, NULL); // 正常执行命令行程序可执行文件
(void)execve(shellPath, NULL, NULL);
exit(0);
}
// ret > 0
while (1) {
ret = waitpid(-1, 0, WNOHANG);
if (ret == 0) {

@ -32,10 +32,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
/// @brief 缓冲区写入测试
/// @param buf
/// @param start
/// @param end
static void BufWriteTest(void *buf, int start, int end)
{
for (int i = start; i <= end; i++) {
@ -43,10 +40,6 @@ static void BufWriteTest(void *buf, int start, int end)
}
}
/// @brief 缓冲区读入测试
/// @param buf
/// @param start
/// @param end
static void BufReadTest(void *buf, int start, int end)
{
char tmp;
@ -54,8 +47,7 @@ static void BufReadTest(void *buf, int start, int end)
tmp = ((char *)buf)[i];
}
}
/// @brief Lms分配测试
/// @param
static void LmsMallocTest(void)
{
#define TEST_SIZE 16
@ -72,8 +64,7 @@ static void LmsMallocTest(void)
free(buf);
printf("\n-------- LmsMallocTest End --------\n");
}
/// @brief Lms回收测试
/// @param
static void LmsReallocTest(void)
{
#define TEST_SIZE 64
@ -94,8 +85,6 @@ static void LmsReallocTest(void)
printf("\n-------- LmsReallocTest End --------\n");
}
/// @brief 分配存储器测试
/// @param
static void LmsCallocTest(void)
{
#define TEST_SIZE 16
@ -110,8 +99,6 @@ static void LmsCallocTest(void)
printf("\n-------- LmsCallocTest End --------\n");
}
/// @brief 分配虚拟存储测试
/// @param
static void LmsVallocTest(void)
{
#define TEST_SIZE 4096
@ -125,8 +112,7 @@ static void LmsVallocTest(void)
free(buf);
printf("\n-------- LmsVallocTest End --------\n");
}
/// @brief 线性分配测试
/// @param
static void LmsAlignedAllocTest(void)
{
#define TEST_ALIGN_SIZE 64
@ -141,8 +127,7 @@ static void LmsAlignedAllocTest(void)
free(buf);
printf("\n-------- LmsAlignedAllocTest End --------\n");
}
/// @brief memset测试
/// @param
static void LmsMemsetTest(void)
{
#define TEST_SIZE 32

@ -40,10 +40,7 @@
#include "perf_list.h"
#include "perf_stat.h"
#include "perf_record.h"
/// @brief 性能分析工具perf的代码
/// @param argc
/// @param argv
/// @return
int main(int argc, char **argv)
{
#define TWO_ARGS 2

@ -33,29 +33,21 @@
#include <string.h>
#include "option.h"
#include "perf_list.h"
/// @brief 解析命令行参数,并根据参数的类型将其值赋给相应的变量。
/// @param argv
/// @param index
/// @param opts
/// @return
static int ParseOption(char **argv, int *index, PerfOption *opts)
{
int ret = 0;
const char *str = NULL;
// 使用while循环遍历 opts 结构体数组,直到遇到一个空指针。
while ((opts->name != NULL) && (*opts->name != 0)) {
if (strcmp(argv[*index], opts->name) == 0) {
switch (opts->type) {
//如果选项类型是 OPTION_TYPE_UINT则将命令行参数转换为无符号整数并存储到当前选项的值中。
switch (opts->type) {
case OPTION_TYPE_UINT:
*opts->value = strtoul(argv[++(*index)], NULL, 0);
break;
//如果选项类型是 OPTION_TYPE_STRING则将命令行参数赋值给当前选项的字符串值。
case OPTION_TYPE_STRING:
*opts->str = argv[++(*index)];
break;
// 如果选项类型是 OPTION_TYPE_CALLBACK则调用当前选项的回调函数并将命令行参数作为参数传递给回调函数。
//如果回调函数返回值不为0则打印解析错误信息并将 ret 设置为-1。
case OPTION_TYPE_CALLBACK:
str = argv[++(*index)];
if ((*opts->cb)(str) != 0) {
@ -63,7 +55,6 @@ static int ParseOption(char **argv, int *index, PerfOption *opts)
ret = -1;
}
break;
//如果在while循环中没有找到匹配的选项则打印无效选项信息并将 ret 设置为-1。
default:
printf("invalid option\n");
ret = -1;
@ -76,27 +67,19 @@ static int ParseOption(char **argv, int *index, PerfOption *opts)
return -1;
}
/// @brief 这段代码是一个命令行参数解析函数它的目的是从命令行参数中提取选项和子命令并将它们存储在opts cmd中
/// @param argc
/// @param argv
/// @param opts
/// @param cmd
/// @return
int ParseOptions(int argc, char **argv, PerfOption *opts, SubCmd *cmd)
{
int i;
// 定义一个变量 index初始值为0用于表示当前正在处理的命令行参数的索引。
int index = 0;
//使用while循环遍历命令行参数直到索引超出范围或者遇到非选项参数。
//在循环中,调用 ParseOption 函数解析当前选项,并将解析结果存储在 opts 结构体中。如果解析失败,则返回-1。
while ((index < argc) && (argv[index] != NULL) && (*argv[index] == '-')) {
if (ParseOption(argv, &index, opts) != 0) {
return -1;
}
index++;
}
//如果在while循环结束后还有剩余的命令行参数则将第一个参数赋值给 cmd 结构体的 path 成员,
//并将其余参数存储在 cmd 结构体的 params 数组中。如果缺少子命令参数,则打印错误信息并返回-1。
if ((index < argc) && (argv[index] != NULL)) {
cmd->path = argv[index];
cmd->params[0] = argv[index];
@ -105,7 +88,7 @@ int ParseOptions(int argc, char **argv, PerfOption *opts, SubCmd *cmd)
printf("no subcmd to execute\n");
return -1;
}
// 使用for循环遍历剩余的命令行参数并将它们存储在 cmd 结构体的 params 数组中。循环会一直执行直到达到最大参数数量或者索引超出范围
for (i = 1; (index < argc) && (i < CMD_MAX_PARAMS); index++, i++) {
cmd->params[i] = argv[index];
}
@ -145,10 +128,7 @@ EXIT:
free(list);
return ret;
}
/// @brief 这段代码是一个C语言函数其功能是将一个字符串转换为对应的PerfEvent结构体指针。
///这个函数对于输入的字符串在全局的g_events数组中进行查找并返回匹配的PerfEvent结构体指针。
/// @param str
/// @return
static inline const PerfEvent *StrToEvent(const char *str)
{
const PerfEvent *evt = &g_events[0];
@ -160,11 +140,7 @@ static inline const PerfEvent *StrToEvent(const char *str)
}
return NULL;
}
/// @brief 这段代码定义了一个名为 ParseEvents 的函数,其目的是解析一个以逗号分隔的字符串,并将其转换为 PerfEventConfig 结构体中的事件配置。
/// @param argv
/// @param eventsCfg
/// @param len
/// @return
int ParseEvents(const char *argv, PerfEventConfig *eventsCfg, unsigned int *len)
{
int ret;

@ -2311,7 +2311,7 @@ diff -Nupr old/fs/jffs2/erase.c new/fs/jffs2/erase.c
diff -Nupr old/fs/jffs2/file.c new/fs/jffs2/file.c
--- old/fs/jffs2/file.c 2022-05-09 17:22:53.000000000 +0800
+++ new/fs/jffs2/file.c 2022-05-10 09:43:14.250000000 +0800
@@ -9,334 +9,31 @@
@@ -9,335 +9,30 @@
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
@ -2348,7 +2348,7 @@ diff -Nupr old/fs/jffs2/file.c new/fs/jffs2/file.c
- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+ /* FIXME: This works only with one file system mounted at a time */
int ret;
-
- ret = file_write_and_wait_range(filp, start, end);
+ ret = jffs2_read_inode_range(c, f, gc_buffer,
+ offset & ~(PAGE_SIZE-1), PAGE_SIZE);
@ -2361,10 +2361,8 @@ diff -Nupr old/fs/jffs2/file.c new/fs/jffs2/file.c
- inode_unlock(inode);
-
- return 0;
+ return ERR_PTR(ret);
+ return gc_buffer;
}
-}
-
-const struct file_operations jffs2_file_operations =
-{
- .llseek = generic_file_llseek,
@ -2389,10 +2387,7 @@ diff -Nupr old/fs/jffs2/file.c new/fs/jffs2/file.c
-};
-
-const struct address_space_operations jffs2_file_address_operations =
+void jffs2_gc_release_page(struct jffs2_sb_info *c,
+ unsigned char *ptr,
+ unsigned long *priv)
{
-{
- .readpage = jffs2_readpage,
- .write_begin = jffs2_write_begin,
- .write_end = jffs2_write_end,
@ -2448,29 +2443,35 @@ diff -Nupr old/fs/jffs2/file.c new/fs/jffs2/file.c
- ret = jffs2_do_readpage_unlock(pg->mapping->host, pg);
- mutex_unlock(&f->sem);
- return ret;
-}
-
+ return ERR_PTR(ret);
+ return gc_buffer;
}
-static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
-{
+void jffs2_gc_release_page(struct jffs2_sb_info *c,
+ unsigned char *ptr,
+ unsigned long *priv)
{
- struct page *pg;
- struct inode *inode = mapping->host;
- struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
- pgoff_t index = pos >> PAGE_SHIFT;
- uint32_t pageofs = index << PAGE_SHIFT;
- int ret = 0;
-
- jffs2_dbg(1, "%s()\n", __func__);
-
- if (pos > inode->i_size) {
- /* Make new hole frag from old EOF to new position */
- if (pageofs > inode->i_size) {
- /* Make new hole frag from old EOF to new page */
- struct jffs2_raw_inode ri;
- struct jffs2_full_dnode *fn;
- uint32_t alloc_len;
-
- jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new position\n",
- (unsigned int)inode->i_size, (uint32_t)pos);
- jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
- (unsigned int)inode->i_size, pageofs);
-
- ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
- ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
@ -2490,10 +2491,10 @@ diff -Nupr old/fs/jffs2/file.c new/fs/jffs2/file.c
- ri.mode = cpu_to_jemode(inode->i_mode);
- ri.uid = cpu_to_je16(i_uid_read(inode));
- ri.gid = cpu_to_je16(i_gid_read(inode));
- ri.isize = cpu_to_je32((uint32_t)pos);
- ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs));
- ri.atime = ri.ctime = ri.mtime = cpu_to_je32(JFFS2_NOW());
- ri.offset = cpu_to_je32(inode->i_size);
- ri.dsize = cpu_to_je32((uint32_t)pos - inode->i_size);
- ri.dsize = cpu_to_je32(pageofs - inode->i_size);
- ri.csize = cpu_to_je32(0);
- ri.compr = JFFS2_COMPR_ZERO;
- ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
@ -2523,7 +2524,7 @@ diff -Nupr old/fs/jffs2/file.c new/fs/jffs2/file.c
- goto out_err;
- }
- jffs2_complete_reservation(c);
- inode->i_size = pos;
- inode->i_size = pageofs;
- mutex_unlock(&f->sem);
- }
-

@ -33,46 +33,25 @@
#include "los_printf.h"
#include "los_toolchain.h"
/*
function:
interpretation:
*/
#define OS_BITMAP_MASK 0x1FU
#define OS_BITMAP_WORD_MASK ~0UL
/*
function: 1
interpretation:
*/
/* find first zero bit starting from LSB */
STATIC INLINE UINT16 Ffz(UINTPTR x)
{
// gcc 自带的内建函数
return __builtin_ffsl(~x) - 1;
}
/*
funciton: 1
interpretation:
*/
VOID LOS_BitmapSet(UINT32 *bitmap, UINT16 pos)
{
if (bitmap == NULL) {
return;
}
*bitmap |= 1U << (pos & OS_BITMAP_MASK); //00011111
*bitmap |= 1U << (pos & OS_BITMAP_MASK);
}
/*
function: 0
interpretation:
*/
VOID LOS_BitmapClr(UINT32 *bitmap, UINT16 pos)
{
if (bitmap == NULL) {
@ -82,9 +61,6 @@ VOID LOS_BitmapClr(UINT32 *bitmap, UINT16 pos)
*bitmap &= ~(1U << (pos & OS_BITMAP_MASK));
}
/*
function:
*/
UINT16 LOS_HighBitGet(UINT32 bitmap)
{
if (bitmap == 0) {
@ -94,9 +70,6 @@ UINT16 LOS_HighBitGet(UINT32 bitmap)
return (OS_BITMAP_MASK - CLZ(bitmap));
}
/*
function: 1
*/
UINT16 LOS_LowBitGet(UINT32 bitmap)
{
if (bitmap == 0) {
@ -106,9 +79,6 @@ UINT16 LOS_LowBitGet(UINT32 bitmap)
return CTZ(bitmap);
}
/*
function: startnumsSetbit 1
*/
VOID LOS_BitmapSetNBits(UINTPTR *bitmap, UINT32 start, UINT32 numsSet)
{
UINTPTR *p = bitmap + BITMAP_WORD(start);
@ -129,9 +99,6 @@ VOID LOS_BitmapSetNBits(UINTPTR *bitmap, UINT32 start, UINT32 numsSet)
}
}
/*
fuction: start numsSetbit0 ,0
*/
VOID LOS_BitmapClrNBits(UINTPTR *bitmap, UINT32 start, UINT32 numsClear)
{
UINTPTR *p = bitmap + BITMAP_WORD(start);
@ -151,9 +118,7 @@ VOID LOS_BitmapClrNBits(UINTPTR *bitmap, UINT32 start, UINT32 numsClear)
*p &= ~maskToClear;
}
}
/*
fuction: numBits0
*/
INT32 LOS_BitmapFfz(UINTPTR *bitmap, UINT32 numBits)
{
INT32 bit, i;

@ -32,10 +32,8 @@
#include "los_task_pri.h"
#include "los_vm_dump.h"
// 得到父进程的pid
STATIC UINT32 GetCurrParentPid(UINT32 pid, const LosProcessCB *processCB)
{
// 如果没有父进程
if (processCB->parentProcess == NULL) {
return 0;
}
@ -52,7 +50,6 @@ STATIC UINT32 GetCurrParentPid(UINT32 pid, const LosProcessCB *processCB)
return processCB->parentProcess->processID;
}
// 得到当前任务ID
STATIC INLINE UINT32 GetCurrTid(const LosTaskCB *taskCB)
{
#ifdef LOSCFG_PID_CONTAINER
@ -63,7 +60,6 @@ STATIC INLINE UINT32 GetCurrTid(const LosTaskCB *taskCB)
return taskCB->taskID;
}
// 得到进程的状态
STATIC UINT16 GetProcessStatus(LosProcessCB *processCB)
{
UINT16 status;
@ -80,7 +76,6 @@ STATIC UINT16 GetProcessStatus(LosProcessCB *processCB)
return status;
}
// 得到进程的信息
STATIC VOID GetProcessInfo(ProcessInfo *pcbInfo, const LosProcessCB *processCB)
{
SchedParam param = {0};
@ -138,7 +133,6 @@ STATIC VOID GetProcessMemInfo(ProcessInfo *pcbInfo, const LosProcessCB *processC
}
#endif
// 得到线程的信息
STATIC VOID GetThreadInfo(ProcessThreadInfo *threadInfo, LosProcessCB *processCB)
{
SchedParam param = {0};

File diff suppressed because it is too large Load Diff

@ -40,7 +40,7 @@
#ifdef LOSCFG_KERNEL_SMP
STATIC struct SmpOps *g_smpOps = NULL;
/// 多核中次级CPU核初始化,每个核都会调用一次
STATIC VOID OsSmpSecondaryInit(VOID *arg)
{
UNUSED(arg);
@ -56,7 +56,7 @@ STATIC VOID OsSmpSecondaryInit(VOID *arg)
OsSchedStart();
}
/// 设置多核操作接口, 通过外部注册
VOID LOS_SmpOpsSet(struct SmpOps *ops)
{
g_smpOps = ops;

@ -28,62 +28,7 @@
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*基本概念
TickTick
Tick
使
TickHuawei LiteOS
TickTick
TickTick
Tick
OS_SWTMR_STATUS_UNUSED使
OS_SWTMR_STATUS_TICKING
LOS_SwtmrStart
OS_SWTMR_STATUS_CREATED
使
make menuconfig
LOS_SwtmrCreate
LOS_SwtmrStart
TickLOS_SwtmrTimeGet
LOS_SwtmrStop
LOS_SwtmrDelete
使
使
使0
使使
使
Tick*/
#include "los_swtmr_pri.h"
#include "los_init.h"
#include "los_process_pri.h"
@ -106,14 +51,14 @@ LITE_OS_SEC_BSS UINT8 *g_swtmrHandlerPool = NULL; /* Pool of Swtmr Han
LITE_OS_SEC_BSS LOS_DL_LIST g_swtmrFreeList; /* Free list of Software Timer */
/* spinlock for swtmr module, only available on SMP mode */
LITE_OS_SEC_BSS SPIN_LOCK_INIT(g_swtmrSpin);//初始化软件钟自旋锁只有SMP情况才需要只要是自旋锁都是由于CPU多核的同步
#define SWTMR_LOCK(state) LOS_SpinLockSave(&g_swtmrSpin, &(state))//持有软时钟自旋锁
#define SWTMR_UNLOCK(state) LOS_SpinUnlockRestore(&g_swtmrSpin, (state))//释放软时钟自旋锁
LITE_OS_SEC_BSS SPIN_LOCK_INIT(g_swtmrSpin);
#define SWTMR_LOCK(state) LOS_SpinLockSave(&g_swtmrSpin, &(state))
#define SWTMR_UNLOCK(state) LOS_SpinUnlockRestore(&g_swtmrSpin, (state))
typedef struct {
SortLinkAttribute swtmrSortLink;
LosTaskCB *swtmrTask; /* software timer task id *///定时器任务ID
LOS_DL_LIST swtmrHandlerQueue; /* software timer timeout queue id *///定时器超时队列
LosTaskCB *swtmrTask; /* software timer task id */
LOS_DL_LIST swtmrHandlerQueue; /* software timer timeout queue id */
} SwtmrRunqueue;
STATIC SwtmrRunqueue g_swtmrRunqueue[LOSCFG_KERNEL_CORE_NUM];
@ -309,7 +254,7 @@ STATIC INLINE VOID ScanSwtmrTimeList(SwtmrRunqueue *srq)
LOS_SpinUnlockRestore(&swtmrSortLink->spinLock, intSave);
return;
}
//软时钟的入口函数拥有任务的最高优先级0级
STATIC VOID SwtmrTask(VOID)
{
SwtmrHandlerItem swtmrHandle;
@ -318,7 +263,7 @@ STATIC VOID SwtmrTask(VOID)
SwtmrRunqueue *srq = &g_swtmrRunqueue[ArchCurrCpuid()];
LOS_DL_LIST *head = &srq->swtmrHandlerQueue;
for (;;) {//死循环获取队列item一直读干净为止
for (;;) {
waitTime = OsSortLinkGetNextExpireTime(OsGetCurrSchedTimeCycle(), &srq->swtmrSortLink);
if (waitTime != 0) {
SCHEDULER_LOCK(intSave);
@ -334,30 +279,29 @@ STATIC VOID SwtmrTask(VOID)
LOS_ListDelete(&swtmrHandlePtr->node);
(VOID)memcpy_s(&swtmrHandle, sizeof(SwtmrHandlerItem), swtmrHandlePtr, sizeof(SwtmrHandlerItem));
(VOID)LOS_MemboxFree(g_swtmrHandlerPool, swtmrHandlePtr);//静态释放内存,注意在鸿蒙内核只有软时钟注册用到了静态内存
(VOID)LOS_MemboxFree(g_swtmrHandlerPool, swtmrHandlePtr);
SwtmrHandler(&swtmrHandle);
}
}
}
//创建软时钟任务每个cpu core都可以拥有自己的软时钟任务
STATIC UINT32 SwtmrTaskCreate(UINT16 cpuid, UINT32 *swtmrTaskID)
{
UINT32 ret;
TSK_INIT_PARAM_S swtmrTask;
(VOID)memset_s(&swtmrTask, sizeof(TSK_INIT_PARAM_S), 0, sizeof(TSK_INIT_PARAM_S));//清0
swtmrTask.pfnTaskEntry = (TSK_ENTRY_FUNC)SwtmrTask;//入口函数
swtmrTask.uwStackSize = LOSCFG_BASE_CORE_TSK_DEFAULT_STACK_SIZE;//16k默认内核任务栈
swtmrTask.pcName = "Swt_Task";//任务名称
swtmrTask.usTaskPrio = 0;//
swtmrTask.uwResved = LOS_TASK_STATUS_DETACHED;//分离模式
(VOID)memset_s(&swtmrTask, sizeof(TSK_INIT_PARAM_S), 0, sizeof(TSK_INIT_PARAM_S));
swtmrTask.pfnTaskEntry = (TSK_ENTRY_FUNC)SwtmrTask;
swtmrTask.uwStackSize = LOSCFG_BASE_CORE_TSK_DEFAULT_STACK_SIZE;
swtmrTask.pcName = "Swt_Task";
swtmrTask.usTaskPrio = 0;
swtmrTask.uwResved = LOS_TASK_STATUS_DETACHED;
#ifdef LOSCFG_KERNEL_SMP
swtmrTask.usCpuAffiMask = CPUID_TO_AFFI_MASK(cpuid);//交给当前CPU执行这个任务
swtmrTask.usCpuAffiMask = CPUID_TO_AFFI_MASK(cpuid);
#endif
ret = LOS_TaskCreate(swtmrTaskID, &swtmrTask);//创建任务并申请调度
ret = LOS_TaskCreate(swtmrTaskID, &swtmrTask);
if (ret == LOS_OK) {
OS_TCB_FROM_TID(*swtmrTaskID)->taskStatus |= OS_TASK_FLAG_SYSTEM_TASK;//告知这是个任务系统
OS_TCB_FROM_TID(*swtmrTaskID)->taskStatus |= OS_TASK_FLAG_SYSTEM_TASK;
}
return ret;
@ -375,16 +319,16 @@ BOOL OsIsSwtmrTask(const LosTaskCB *taskCB)
}
return FALSE;
}
//回收指定进程的软时钟
LITE_OS_SEC_TEXT_INIT VOID OsSwtmrRecycle(UINTPTR ownerID)
{
for (UINT16 index = 0; index < LOSCFG_BASE_CORE_SWTMR_LIMIT; index++) {//一个进程往往会有多个定时器
for (UINT16 index = 0; index < LOSCFG_BASE_CORE_SWTMR_LIMIT; index++) {
if (g_swtmrCBArray[index].uwOwnerPid == ownerID) {
LOS_SwtmrDelete(index);//删除定时器
LOS_SwtmrDelete(index);
}
}
}
//软时钟初始化注意函数在多CPU情况下会执行多次
STATIC UINT32 SwtmrBaseInit(VOID)
{
UINT32 ret;
@ -394,15 +338,15 @@ STATIC UINT32 SwtmrBaseInit(VOID)
return LOS_ERRNO_SWTMR_NO_MEMORY;
}
(VOID)memset_s(swtmr, size, 0, size);//清0
g_swtmrCBArray = swtmr;//软时钟
LOS_ListInit(&g_swtmrFreeList);//初始化空间链表
(VOID)memset_s(swtmr, size, 0, size);
g_swtmrCBArray = swtmr;
LOS_ListInit(&g_swtmrFreeList);
for (UINT16 index = 0; index < LOSCFG_BASE_CORE_SWTMR_LIMIT; index++, swtmr++) {
swtmr->usTimerID = index;//按顺序赋值
LOS_ListTailInsert(&g_swtmrFreeList, &swtmr->stSortList.sortLinkNode);//通过sortLinkNode将节点挂到空闲链表
swtmr->usTimerID = index;
LOS_ListTailInsert(&g_swtmrFreeList, &swtmr->stSortList.sortLinkNode);
}
//想要用静态内存池管理就必须要使用LOS_MEMBOX_SIZE来计算申请列表的内存大小因为需要点前缀内存承载头部信息
size = LOS_MEMBOX_SIZE(sizeof(SwtmrHandlerItem), OS_SWTMR_HANDLE_QUEUE_SIZE);//规划一片内存区域作为软时钟处理函数的静态内存池
size = LOS_MEMBOX_SIZE(sizeof(SwtmrHandlerItem), OS_SWTMR_HANDLE_QUEUE_SIZE);
g_swtmrHandlerPool = (UINT8 *)LOS_MemAlloc(m_aucSysMem1, size); /* system resident resource */
if (g_swtmrHandlerPool == NULL) {
return LOS_ERRNO_SWTMR_NO_MEMORY;
@ -530,14 +474,14 @@ STATIC UINT64 SwtmrToStart(SWTMR_CTRL_S *swtmr, UINT16 cpuid)
if ((swtmr->uwOverrun == 0) && ((swtmr->ucMode == LOS_SWTMR_MODE_ONCE) ||
(swtmr->ucMode == LOS_SWTMR_MODE_OPP) ||
(swtmr->ucMode == LOS_SWTMR_MODE_NO_SELFDELETE))) {//如果是一次性的定时器
ticks = swtmr->uwExpiry;//获取定时间隔
(swtmr->ucMode == LOS_SWTMR_MODE_NO_SELFDELETE))) {
ticks = swtmr->uwExpiry;
} else {
ticks = swtmr->uwInterval;
}
swtmr->ucState = OS_SWTMR_STATUS_TICKING;//获取周期性定时器时间间隔
swtmr->ucState = OS_SWTMR_STATUS_TICKING;
UINT64 period = (UINT64)ticks * OS_CYCLE_PER_TICK;//计数状态
UINT64 period = (UINT64)ticks * OS_CYCLE_PER_TICK;
UINT64 responseTime = swtmr->startTime + period;
UINT64 currTime = OsGetCurrSchedTimeCycle();
if (responseTime < currTime) {
@ -578,7 +522,7 @@ STATIC INLINE VOID SwtmrStart(SWTMR_CTRL_S *swtmr)
STATIC INLINE VOID SwtmrDelete(SWTMR_CTRL_S *swtmr)
{
/* insert to free list */
LOS_ListTailInsert(&g_swtmrFreeList, &swtmr->stSortList.sortLinkNode);//直接插入空闲链表中,回收再利用
LOS_ListTailInsert(&g_swtmrFreeList, &swtmr->stSortList.sortLinkNode);
swtmr->ucState = OS_SWTMR_STATUS_UNUSED;
swtmr->uwOwnerPid = OS_INVALID_VALUE;
@ -697,7 +641,7 @@ LITE_OS_SEC_TEXT STATIC UINT32 OsSwtmrTimeGet(const SWTMR_CTRL_S *swtmr)
}
return (UINT32)time;
}
//创建定时器设置定时器的定时时长、定时模式、回调函数、并返回定时器ID
LITE_OS_SEC_TEXT_INIT UINT32 LOS_SwtmrCreate(UINT32 interval,
UINT8 mode,
SWTMR_PROC_FUNC handler,
@ -726,7 +670,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_SwtmrCreate(UINT32 interval,
}
SWTMR_LOCK(intSave);
if (LOS_ListEmpty(&g_swtmrFreeList)) {//空闲列表不能为空
if (LOS_ListEmpty(&g_swtmrFreeList)) {
SWTMR_UNLOCK(intSave);
return LOS_ERRNO_SWTMR_MAXSIZE;
}
@ -737,19 +681,19 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_SwtmrCreate(UINT32 interval,
SWTMR_UNLOCK(intSave);
swtmr->uwOwnerPid = (UINTPTR)OsCurrProcessGet();
swtmr->pfnHandler = handler;//时间到了的回调函数
swtmr->ucMode = mode;//定时模式
swtmr->pfnHandler = handler;
swtmr->ucMode = mode;
swtmr->uwOverrun = 0;
swtmr->uwInterval = interval;//周期性超时间隔
swtmr->uwExpiry = interval;//一次性超时间隔
swtmr->uwArg = arg;//回调函数的参数
swtmr->ucState = OS_SWTMR_STATUS_CREATED;//已创建状态
swtmr->uwInterval = interval;
swtmr->uwExpiry = interval;
swtmr->uwArg = arg;
swtmr->ucState = OS_SWTMR_STATUS_CREATED;
SET_SORTLIST_VALUE(&swtmr->stSortList, OS_SORT_LINK_INVALID_TIME);
*swtmrID = swtmr->usTimerID;
OsHookCall(LOS_HOOK_TYPE_SWTMR_CREATE, swtmr);
return LOS_OK;
}
//接口函数 启动定时器 参数定时任务ID
LITE_OS_SEC_TEXT UINT32 LOS_SwtmrStart(UINT16 swtmrID)
{
SWTMR_CTRL_S *swtmr = NULL;
@ -761,27 +705,27 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrStart(UINT16 swtmrID)
return LOS_ERRNO_SWTMR_ID_INVALID;
}
swtmrCBID = swtmrID % LOSCFG_BASE_CORE_SWTMR_LIMIT;//取模
swtmr = g_swtmrCBArray + swtmrCBID;//获取定时器控制结构体
swtmrCBID = swtmrID % LOSCFG_BASE_CORE_SWTMR_LIMIT;
swtmr = g_swtmrCBArray + swtmrCBID;
SWTMR_LOCK(intSave);
if (swtmr->usTimerID != swtmrID) {//ID必须一样
if (swtmr->usTimerID != swtmrID) {
SWTMR_UNLOCK(intSave);
return LOS_ERRNO_SWTMR_ID_INVALID;
}
switch (swtmr->ucState) {//判断定时器状态
switch (swtmr->ucState) {
case OS_SWTMR_STATUS_UNUSED:
ret = LOS_ERRNO_SWTMR_NOT_CREATED;
break;
/*如果定时器的状态为启动中,应先停止定时器再重新启动
/*
* If the status of swtmr is timing, it should stop the swtmr first,
* then start the swtmr again.
*/
case OS_SWTMR_STATUS_TICKING://正在计数的定时器
SwtmrStop(swtmr);//先停止定时器注意这里没有break在OsSwtmrStop中状态将会回到了OS_SWTMR_STATUS_CRWEATED接下来就是执行启动了
case OS_SWTMR_STATUS_TICKING:
SwtmrStop(swtmr);
/* fall-through */
case OS_SWTMR_STATUS_CREATED://已经创建好了
case OS_SWTMR_STATUS_CREATED:
SwtmrStart(swtmr);
break;
default:
@ -793,7 +737,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrStart(UINT16 swtmrID)
OsHookCall(LOS_HOOK_TYPE_SWTMR_START, swtmr);
return ret;
}
//接口函数 停止计时器 参加定时任务ID
LITE_OS_SEC_TEXT UINT32 LOS_SwtmrStop(UINT16 swtmrID)
{
SWTMR_CTRL_S *swtmr = NULL;
@ -805,24 +749,24 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrStop(UINT16 swtmrID)
return LOS_ERRNO_SWTMR_ID_INVALID;
}
swtmrCBID = swtmrID % LOSCFG_BASE_CORE_SWTMR_LIMIT;//取模
swtmr = g_swtmrCBArray + swtmrCBID;//获取定时器控制结构体
swtmrCBID = swtmrID % LOSCFG_BASE_CORE_SWTMR_LIMIT;
swtmr = g_swtmrCBArray + swtmrCBID;
SWTMR_LOCK(intSave);
if (swtmr->usTimerID != swtmrID) {//ID必须一样
if (swtmr->usTimerID != swtmrID) {
SWTMR_UNLOCK(intSave);
return LOS_ERRNO_SWTMR_ID_INVALID;
}
switch (swtmr->ucState) {//判断定时器状态
switch (swtmr->ucState) {
case OS_SWTMR_STATUS_UNUSED:
ret = LOS_ERRNO_SWTMR_NOT_CREATED;//返回没有创建
ret = LOS_ERRNO_SWTMR_NOT_CREATED;
break;
case OS_SWTMR_STATUS_CREATED:
ret = LOS_ERRNO_SWTMR_NOT_STARTED;//返回没有开始
ret = LOS_ERRNO_SWTMR_NOT_STARTED;
break;
case OS_SWTMR_STATUS_TICKING://正在计数
SwtmrStop(swtmr);//执行正在停止计时器操作
case OS_SWTMR_STATUS_TICKING:
SwtmrStop(swtmr);
break;
default:
ret = LOS_ERRNO_SWTMR_STATUS_INVALID;
@ -833,7 +777,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrStop(UINT16 swtmrID)
OsHookCall(LOS_HOOK_TYPE_SWTMR_STOP, swtmr);
return ret;
}
//接口函数 获得软件定时器剩余Tick数 通过 *tick 带走
LITE_OS_SEC_TEXT UINT32 LOS_SwtmrTimeGet(UINT16 swtmrID, UINT32 *tick)
{
SWTMR_CTRL_S *swtmr = NULL;
@ -849,11 +793,11 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrTimeGet(UINT16 swtmrID, UINT32 *tick)
return LOS_ERRNO_SWTMR_TICK_PTR_NULL;
}
swtmrCBID = swtmrID % LOSCFG_BASE_CORE_SWTMR_LIMIT;//取模
swtmr = g_swtmrCBArray + swtmrCBID;//获取定时器控制结构体
swtmrCBID = swtmrID % LOSCFG_BASE_CORE_SWTMR_LIMIT;
swtmr = g_swtmrCBArray + swtmrCBID;
SWTMR_LOCK(intSave);
if (swtmr->usTimerID != swtmrID) {//ID必须一样
if (swtmr->usTimerID != swtmrID) {
SWTMR_UNLOCK(intSave);
return LOS_ERRNO_SWTMR_ID_INVALID;
}
@ -864,8 +808,8 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrTimeGet(UINT16 swtmrID, UINT32 *tick)
case OS_SWTMR_STATUS_CREATED:
ret = LOS_ERRNO_SWTMR_NOT_STARTED;
break;
case OS_SWTMR_STATUS_TICKING://正在计数的定时器
*tick = OsSwtmrTimeGet(swtmr);//获取
case OS_SWTMR_STATUS_TICKING:
*tick = OsSwtmrTimeGet(swtmr);
break;
default:
ret = LOS_ERRNO_SWTMR_STATUS_INVALID;
@ -874,7 +818,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrTimeGet(UINT16 swtmrID, UINT32 *tick)
SWTMR_UNLOCK(intSave);
return ret;
}
//接口函数 删除定时器
LITE_OS_SEC_TEXT UINT32 LOS_SwtmrDelete(UINT16 swtmrID)
{
SWTMR_CTRL_S *swtmr = NULL;
@ -886,11 +830,11 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrDelete(UINT16 swtmrID)
return LOS_ERRNO_SWTMR_ID_INVALID;
}
swtmrCBID = swtmrID % LOSCFG_BASE_CORE_SWTMR_LIMIT;//取模
swtmr = g_swtmrCBArray + swtmrCBID;//获取定时器控制结构体
swtmrCBID = swtmrID % LOSCFG_BASE_CORE_SWTMR_LIMIT;
swtmr = g_swtmrCBArray + swtmrCBID;
SWTMR_LOCK(intSave);
if (swtmr->usTimerID != swtmrID) {//ID必须一样
if (swtmr->usTimerID != swtmrID) {
SWTMR_UNLOCK(intSave);
return LOS_ERRNO_SWTMR_ID_INVALID;
}
@ -899,10 +843,10 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrDelete(UINT16 swtmrID)
case OS_SWTMR_STATUS_UNUSED:
ret = LOS_ERRNO_SWTMR_NOT_CREATED;
break;
case OS_SWTMR_STATUS_TICKING://正在计数就先停止在删除这里没有break
case OS_SWTMR_STATUS_TICKING:
SwtmrStop(swtmr);
/* fall-through */
case OS_SWTMR_STATUS_CREATED://再删除定时器
case OS_SWTMR_STATUS_CREATED:
SwtmrDelete(swtmr);
break;
default:
@ -915,4 +859,4 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrDelete(UINT16 swtmrID)
return ret;
}
#endif /* LOSCFG_BASE_CORE_SWTMR_ENABLE */
#endif /* LOSCFG_BASE_CORE_SWTMR_ENABLE */

@ -68,83 +68,13 @@
#if (LOSCFG_BASE_CORE_TSK_LIMIT <= 0)
#error "task maxnum cannot be zero"
#endif /* LOSCFG_BASE_CORE_TSK_LIMIT <= 0 */
/*
使CPU
使
线
32[0-31]031
ReadyCPU
Running
Blockedsuspenddelay
退Dead
退
退退退
InvalidInvalid退
退
退
ID
IDID
ID
使
Huawei LiteOS便
TCB
(TCB)TCBstack pointer
IDTCB
*/
LITE_OS_SEC_BSS LosTaskCB *g_taskCBArray;//任务池 128个
LITE_OS_SEC_BSS LOS_DL_LIST g_losFreeTask;//空闲任务链表
LITE_OS_SEC_BSS LOS_DL_LIST g_taskRecycleList;//回收任务链表
LITE_OS_SEC_BSS UINT32 g_taskMaxNum;//任务最大个数
LITE_OS_SEC_BSS LosTaskCB *g_taskCBArray;
LITE_OS_SEC_BSS LOS_DL_LIST g_losFreeTask;
LITE_OS_SEC_BSS LOS_DL_LIST g_taskRecycleList;
LITE_OS_SEC_BSS UINT32 g_taskMaxNum;
LITE_OS_SEC_BSS UINT32 g_taskScheduled; /* one bit for each cores */
LITE_OS_SEC_BSS EVENT_CB_S g_resourceEvent;//资源的事件
LITE_OS_SEC_BSS EVENT_CB_S g_resourceEvent;
/* spinlock for task module, only available on SMP mode */
LITE_OS_SEC_BSS SPIN_LOCK_INIT(g_taskSpin);
@ -152,7 +82,7 @@ STATIC VOID OsConsoleIDSetHook(UINT32 param1,
UINT32 param2) __attribute__((weakref("OsSetConsoleID")));
/* temp task blocks for booting procedure */
LITE_OS_SEC_BSS STATIC LosTaskCB g_mainTask[LOSCFG_KERNEL_CORE_NUM];//启动引导过程中使用的临时任务
LITE_OS_SEC_BSS STATIC LosTaskCB g_mainTask[LOSCFG_KERNEL_CORE_NUM];
LosTaskCB *OsGetMainTask(VOID)
{
@ -162,23 +92,23 @@ LosTaskCB *OsGetMainTask(VOID)
VOID OsSetMainTask(VOID)
{
UINT32 i;
CHAR *name = "osMain";//任务名称
CHAR *name = "osMain";
SchedParam schedParam = { 0 };
schedParam.policy = LOS_SCHED_RR;
schedParam.basePrio = OS_PROCESS_PRIORITY_HIGHEST;
schedParam.priority = OS_TASK_PRIORITY_LOWEST;
//为每个CPU core 设置mainTask
for (i = 0; i < LOSCFG_KERNEL_CORE_NUM; i++) {
g_mainTask[i].taskStatus = OS_TASK_STATUS_UNUSED;
g_mainTask[i].taskID = LOSCFG_BASE_CORE_TSK_LIMIT;//128
g_mainTask[i].taskID = LOSCFG_BASE_CORE_TSK_LIMIT;
g_mainTask[i].processCB = OS_KERNEL_PROCESS_GROUP;
#ifdef LOSCFG_KERNEL_SMP_LOCKDEP
g_mainTask[i].lockDep.lockDepth = 0;
g_mainTask[i].lockDep.waitLock = NULL;
#endif
(VOID)strncpy_s(g_mainTask[i].taskName, OS_TCB_NAME_LEN, name, OS_TCB_NAME_LEN - 1);
LOS_ListInit(&g_mainTask[i].lockList);//初始化任务锁链表,上面挂的是任务已申请到的互斥锁
LOS_ListInit(&g_mainTask[i].lockList);
(VOID)OsSchedParamInit(&g_mainTask[i], schedParam.policy, &schedParam, NULL);
}
}
@ -192,34 +122,31 @@ VOID OsSetMainTaskProcess(UINTPTR processCB)
#endif
}
}
//空闲任务每个CPU都有自己的空闲任务
LITE_OS_SEC_TEXT WEAK VOID OsIdleTask(VOID)
{
while (1) {//只有一个死循环
WFI;//WFI指令arm core立即进入low-power standly state,进入休眠模式,等待中断
while (1) {
WFI;
}
}
VOID OsTaskInsertToRecycleList(LosTaskCB *taskCB)
{
LOS_ListTailInsert(&g_taskRecycleList, &taskCB->pendList);//将任务挂入回收链表,等待回收
LOS_ListTailInsert(&g_taskRecycleList, &taskCB->pendList);
}
/*
taskOS_TCB_FROM_PENDLISTLOS_DL_LISTLosTaskCB,
task
*/
LITE_OS_SEC_TEXT_INIT VOID OsTaskJoinPostUnsafe(LosTaskCB *taskCB)
{
if (taskCB->taskStatus & OS_TASK_FLAG_PTHREAD_JOIN) {//join任务处理
if (!LOS_ListEmpty(&taskCB->joinList)) {//joinList中的节点身上都有阻塞标签
LosTaskCB *resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(taskCB->joinList)));//通过贴有JOIN标签链表的第一个节点找到Task
OsTaskWakeClearPendMask(resumedTask);//清除任务的挂起标记
if (taskCB->taskStatus & OS_TASK_FLAG_PTHREAD_JOIN) {
if (!LOS_ListEmpty(&taskCB->joinList)) {
LosTaskCB *resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(taskCB->joinList)));
OsTaskWakeClearPendMask(resumedTask);
resumedTask->ops->wake(resumedTask);
}
}
taskCB->taskStatus |= OS_TASK_STATUS_EXIT;//贴上任务退出标签
taskCB->taskStatus |= OS_TASK_STATUS_EXIT;
}
//挂起任务任务进入等待链表Join代表是支持通过的第一个任务去唤醒其他的任务
LITE_OS_SEC_TEXT UINT32 OsTaskJoinPendUnsafe(LosTaskCB *taskCB)
{
if (taskCB->taskStatus & OS_TASK_STATUS_INIT) {
@ -238,13 +165,13 @@ LITE_OS_SEC_TEXT UINT32 OsTaskJoinPendUnsafe(LosTaskCB *taskCB)
return LOS_EINVAL;
}
//任务设置分离模式Deatch和JOIN是一对有你没我的状态
LITE_OS_SEC_TEXT UINT32 OsTaskSetDetachUnsafe(LosTaskCB *taskCB)
{
if (taskCB->taskStatus & OS_TASK_FLAG_PTHREAD_JOIN) {//join状态时
if (LOS_ListEmpty(&(taskCB->joinList))) {//joinlist中没有数据了
LOS_ListDelete(&(taskCB->joinList));//所谓删除就是自己指向自己
taskCB->taskStatus &= ~OS_TASK_FLAG_PTHREAD_JOIN;//去掉JOIN标签
if (taskCB->taskStatus & OS_TASK_FLAG_PTHREAD_JOIN) {
if (LOS_ListEmpty(&(taskCB->joinList))) {
LOS_ListDelete(&(taskCB->joinList));
taskCB->taskStatus &= ~OS_TASK_FLAG_PTHREAD_JOIN;
return LOS_OK;
}
/* This error code has a special purpose and is not allowed to appear again on the interface */
@ -254,40 +181,39 @@ LITE_OS_SEC_TEXT UINT32 OsTaskSetDetachUnsafe(LosTaskCB *taskCB)
return LOS_EINVAL;
}
//初始化任务模块
LITE_OS_SEC_TEXT_INIT UINT32 OsTaskInit(UINTPTR processCB)
{
UINT32 index;
UINT32 size;
UINT32 ret;
g_taskMaxNum = LOSCFG_BASE_CORE_TSK_LIMIT;//任务池中最多默认128个可谓铁打的任务池流水的线程
size = (g_taskMaxNum + 1) * sizeof(LosTaskCB);//计算需分配内存总大小
g_taskMaxNum = LOSCFG_BASE_CORE_TSK_LIMIT;
size = (g_taskMaxNum + 1) * sizeof(LosTaskCB);
/*
* This memory is resident memory and is used to save the system resources
* of task control block and will not be freed.
*/
g_taskCBArray = (LosTaskCB *)LOS_MemAlloc(m_aucSysMem0, size);//任务池常驻内存,不被释放
g_taskCBArray = (LosTaskCB *)LOS_MemAlloc(m_aucSysMem0, size);
if (g_taskCBArray == NULL) {
ret = LOS_ERRNO_TSK_NO_MEMORY;
goto EXIT;
}
(VOID)memset_s(g_taskCBArray, size, 0, size);
LOS_ListInit(&g_losFreeTask);//初始化空闲任务链表
LOS_ListInit(&g_taskRecycleList);//初始化回收任务链表
for (index = 0; index < g_taskMaxNum; index++) {//任务挨个初始化
g_taskCBArray[index].taskStatus = OS_TASK_STATUS_UNUSED;//默认未使用,
g_taskCBArray[index].taskID = index;//任务ID[0~g_taskMaxNum-1]
LOS_ListInit(&g_losFreeTask);
LOS_ListInit(&g_taskRecycleList);
for (index = 0; index < g_taskMaxNum; index++) {
g_taskCBArray[index].taskStatus = OS_TASK_STATUS_UNUSED;
g_taskCBArray[index].taskID = index;
g_taskCBArray[index].processCB = processCB;
LOS_ListTailInsert(&g_losFreeTask, &g_taskCBArray[index].pendList);//通过pendlist节点插入空闲任务列表
}//注意这里挂的是pendList节点可以取TCB也要通过OS_TCB_FROM-PENDLIST取
LOS_ListTailInsert(&g_losFreeTask, &g_taskCBArray[index].pendList);
}
g_taskCBArray[index].taskStatus = OS_TASK_STATUS_UNUSED;
g_taskCBArray[index].taskID = index;
g_taskCBArray[index].processCB = processCB;
ret = OsSchedInit();//调度器初始化
ret = OsSchedInit();
EXIT:
if (ret != LOS_OK) {
@ -295,41 +221,41 @@ EXIT:
}
return ret;
}
//获取IdletaskId每个CPU核都对Task进行了内部管理做到真正的并行处理
UINT32 OsGetIdleTaskId(VOID)
{
return OsSchedRunqueueIdleGet()->taskID;
}
//创建一个空闲任务
LITE_OS_SEC_TEXT_INIT UINT32 OsIdleTaskCreate(UINTPTR processID)
{
UINT32 ret;
TSK_INIT_PARAM_S taskInitParam;
UINT32 idleTaskID;
(VOID)memset_s((VOID *)(&taskInitParam), sizeof(TSK_INIT_PARAM_S), 0, sizeof(TSK_INIT_PARAM_S));//任务初始参数清零
taskInitParam.pfnTaskEntry = (TSK_ENTRY_FUNC)OsIdleTask;//入口函数
taskInitParam.uwStackSize = LOSCFG_BASE_CORE_TSK_IDLE_STACK_SIZE;//任务栈大小 2K
taskInitParam.pcName = "Idle";//任务名称叫pcNAME
(VOID)memset_s((VOID *)(&taskInitParam), sizeof(TSK_INIT_PARAM_S), 0, sizeof(TSK_INIT_PARAM_S));
taskInitParam.pfnTaskEntry = (TSK_ENTRY_FUNC)OsIdleTask;
taskInitParam.uwStackSize = LOSCFG_BASE_CORE_TSK_IDLE_STACK_SIZE;
taskInitParam.pcName = "Idle";
taskInitParam.policy = LOS_SCHED_IDLE;
taskInitParam.usTaskPrio = OS_TASK_PRIORITY_LOWEST;//默认最低优先级31
taskInitParam.usTaskPrio = OS_TASK_PRIORITY_LOWEST;
taskInitParam.processID = processID;
#ifdef LOSCFG_KERNEL_SMP
taskInitParam.usCpuAffiMask = CPUID_TO_AFFI_MASK(ArchCurrCpuid());//每个idle任务只在单独的CPU上运行
taskInitParam.usCpuAffiMask = CPUID_TO_AFFI_MASK(ArchCurrCpuid());
#endif
ret = LOS_TaskCreateOnly(&idleTaskID, &taskInitParam);
if (ret != LOS_OK) {
return ret;
}
LosTaskCB *idleTask = OS_TCB_FROM_TID(idleTaskID);
idleTask->taskStatus |= OS_TASK_FLAG_SYSTEM_TASK;//标记为系统任务idle任务是给CPU休息用的当然是个系统任务
idleTask->taskStatus |= OS_TASK_FLAG_SYSTEM_TASK;
OsSchedRunqueueIdleInit(idleTask);
return LOS_TaskResume(idleTaskID);
}
/*
* Description : get id of current running task. |CPUID
* Description : get id of current running task.
* Return : task id
*/
LITE_OS_SEC_TEXT UINT32 LOS_CurTaskIDGet(VOID)
@ -341,7 +267,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_CurTaskIDGet(VOID)
}
return runTask->taskID;
}
//创建指定任务同步信号量
STATIC INLINE UINT32 TaskSyncCreate(LosTaskCB *taskCB)
{
#ifdef LOSCFG_KERNEL_SMP_TASK_SYNC
@ -354,7 +280,7 @@ STATIC INLINE UINT32 TaskSyncCreate(LosTaskCB *taskCB)
#endif
return LOS_OK;
}
//销毁指定任务同步信号量
STATIC INLINE VOID OsTaskSyncDestroy(UINT32 syncSignal)
{
#ifdef LOSCFG_KERNEL_SMP_TASK_SYNC
@ -365,7 +291,6 @@ STATIC INLINE VOID OsTaskSyncDestroy(UINT32 syncSignal)
}
#ifdef LOSCFG_KERNEL_SMP
//任务同步等待,通过信号量保持同步
STATIC INLINE UINT32 OsTaskSyncWait(const LosTaskCB *taskCB)
{
#ifdef LOSCFG_KERNEL_SMP_TASK_SYNC
@ -391,7 +316,7 @@ STATIC INLINE UINT32 OsTaskSyncWait(const LosTaskCB *taskCB)
#endif
}
#endif
//同步唤醒
STATIC INLINE VOID OsTaskSyncWake(const LosTaskCB *taskCB)
{
#ifdef LOSCFG_KERNEL_SMP_TASK_SYNC
@ -413,14 +338,14 @@ STATIC INLINE VOID OsInsertTCBToFreeList(LosTaskCB *taskCB)
taskCB->taskStatus = OS_TASK_STATUS_UNUSED;
LOS_ListAdd(&g_losFreeTask, &taskCB->pendList);
}
//释放任务在内核状态下占用的资源
STATIC VOID OsTaskKernelResourcesToFree(UINT32 syncSignal, UINTPTR topOfStack)
{
OsTaskSyncDestroy(syncSignal);//任务销毁,同步信息
OsTaskSyncDestroy(syncSignal);
(VOID)LOS_MemFree((VOID *)m_aucSysMem1, (VOID *)topOfStack);//释放内核态空间
(VOID)LOS_MemFree((VOID *)m_aucSysMem1, (VOID *)topOfStack);
}
//释放任务资源
STATIC VOID OsTaskResourcesToFree(LosTaskCB *taskCB)
{
UINT32 syncSignal = LOSCFG_BASE_IPC_SEM_LIMIT;
@ -428,7 +353,7 @@ STATIC VOID OsTaskResourcesToFree(LosTaskCB *taskCB)
UINTPTR topOfStack;
#ifdef LOSCFG_KERNEL_VM
if ((taskCB->taskStatus & OS_TASK_FLAG_USER_MODE) && (taskCB->userMapBase != 0)) {//释放用户态栈
if ((taskCB->taskStatus & OS_TASK_FLAG_USER_MODE) && (taskCB->userMapBase != 0)) {
SCHEDULER_LOCK(intSave);
UINT32 mapBase = (UINTPTR)taskCB->userMapBase;
UINT32 mapSize = taskCB->userMapSize;
@ -438,7 +363,7 @@ STATIC VOID OsTaskResourcesToFree(LosTaskCB *taskCB)
LosProcessCB *processCB = OS_PCB_FROM_TCB(taskCB);
LOS_ASSERT(!(OsProcessVmSpaceGet(processCB) == NULL));
UINT32 ret = OsUnMMap(OsProcessVmSpaceGet(processCB), (UINTPTR)mapBase, mapSize);//解除映射
UINT32 ret = OsUnMMap(OsProcessVmSpaceGet(processCB), (UINTPTR)mapBase, mapSize);
if ((ret != LOS_OK) && (mapBase != 0) && !OsProcessIsInit(processCB)) {
PRINT_ERR("process(%u) unmmap user task(%u) stack failed! mapbase: 0x%x size :0x%x, error: %d\n",
processCB->processID, taskCB->taskID, mapBase, mapSize, ret);
@ -450,36 +375,36 @@ STATIC VOID OsTaskResourcesToFree(LosTaskCB *taskCB)
}
#endif
if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {//任务还未使用情况
if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {
topOfStack = taskCB->topOfStack;
taskCB->topOfStack = 0;
#ifdef LOSCFG_KERNEL_SMP_TASK_SYNC
syncSignal = taskCB->syncSignal;
taskCB->syncSignal = LOSCFG_BASE_IPC_SEM_LIMIT;
#endif
OsTaskKernelResourcesToFree(syncSignal, topOfStack);//释放内核所占内存,即内核栈的栈空间
OsTaskKernelResourcesToFree(syncSignal, topOfStack);
SCHEDULER_LOCK(intSave);
#ifdef LOSCFG_KERNEL_VM
OsClearSigInfoTmpList(&(taskCB->sig));//归还信号控制块的内存
OsClearSigInfoTmpList(&(taskCB->sig));
#endif
OsInsertTCBToFreeList(taskCB);
SCHEDULER_UNLOCK(intSave);
}
return;
}
//批量回收任务
LITE_OS_SEC_TEXT VOID OsTaskCBRecycleToFree(void)
{
UINT32 intSave;
SCHEDULER_LOCK(intSave);
while (!LOS_ListEmpty(&g_taskRecycleList)) {//遍历回收链表
LosTaskCB *taskCB = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&g_taskRecycleList));//取出任务
LOS_ListDelete(&taskCB->pendList);//重置节点
while (!LOS_ListEmpty(&g_taskRecycleList)) {
LosTaskCB *taskCB = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&g_taskRecycleList));
LOS_ListDelete(&taskCB->pendList);
SCHEDULER_UNLOCK(intSave);
OsTaskResourcesToFree(taskCB);//释放任务所占资源
OsTaskResourcesToFree(taskCB);
SCHEDULER_LOCK(intSave);
}
@ -489,7 +414,7 @@ LITE_OS_SEC_TEXT VOID OsTaskCBRecycleToFree(void)
/*
* Description : All task entry
* Input : taskID --- The ID of the task to be run
*///所有任务的入口函数OsTaskEntry是new task OsTaskStackInit时指定的
*/
LITE_OS_SEC_TEXT_INIT VOID OsTaskEntry(UINT32 taskID)
{
LOS_ASSERT(!OS_TID_CHECK_INVALID(taskID));
@ -499,19 +424,19 @@ LITE_OS_SEC_TEXT_INIT VOID OsTaskEntry(UINT32 taskID)
* from interrupt and other cores. release task spinlock and enable
* interrupt in sequence at the task entry.
*/
LOS_SpinUnlock(&g_taskSpin);//释放任务自旋锁
(VOID)LOS_IntUnLock();//恢复中断
LOS_SpinUnlock(&g_taskSpin);
(VOID)LOS_IntUnLock();
LosTaskCB *taskCB = OS_TCB_FROM_TID(taskID);
taskCB->joinRetval = taskCB->taskEntry(taskCB->args[0], taskCB->args[1],//调出任务的入口函数
taskCB->joinRetval = taskCB->taskEntry(taskCB->args[0], taskCB->args[1],
taskCB->args[2], taskCB->args[3]); /* 2 & 3: just for args array index */
if (!(taskCB->taskStatus & OS_TASK_FLAG_PTHREAD_JOIN)) {
taskCB->joinRetval = 0;//结合数为0
taskCB->joinRetval = 0;
}
OsRunningTaskToExit(taskCB, 0);
}
//任务创建参数检查
STATIC UINT32 TaskCreateParamCheck(const UINT32 *taskID, TSK_INIT_PARAM_S *initParam)
{
UINT32 poolSize = OS_SYS_MEM_SIZE;
@ -530,30 +455,30 @@ STATIC UINT32 TaskCreateParamCheck(const UINT32 *taskID, TSK_INIT_PARAM_S *initP
}
}
if (initParam->pfnTaskEntry == NULL) {//入口函数不能为空
if (initParam->pfnTaskEntry == NULL) {
return LOS_ERRNO_TSK_ENTRY_NULL;
}
if (initParam->usTaskPrio > OS_TASK_PRIORITY_LOWEST) {//优先级必须大于31
if (initParam->usTaskPrio > OS_TASK_PRIORITY_LOWEST) {
return LOS_ERRNO_TSK_PRIOR_ERROR;
}
if (initParam->uwStackSize > poolSize) {//希望申请的栈大小不能大于总池子
if (initParam->uwStackSize > poolSize) {
return LOS_ERRNO_TSK_STKSZ_TOO_LARGE;
}
if (initParam->uwStackSize == 0) {//任何任务都必须由内核态栈所以uwStackSize不能为0
if (initParam->uwStackSize == 0) {
initParam->uwStackSize = LOSCFG_BASE_CORE_TSK_DEFAULT_STACK_SIZE;
}
initParam->uwStackSize = (UINT32)ALIGN(initParam->uwStackSize, LOSCFG_STACK_POINT_ALIGN_SIZE);
if (initParam->uwStackSize < LOS_TASK_MIN_STACK_SIZE) {//运行栈空间不能低于最低值
if (initParam->uwStackSize < LOS_TASK_MIN_STACK_SIZE) {
return LOS_ERRNO_TSK_STKSZ_TOO_SMALL;
}
return LOS_OK;
}
//任务栈内核态内存分配由内核态进程空间提供即KProcess进程空间
STATIC VOID TaskCBDeInit(LosTaskCB *taskCB)
{
UINT32 intSave;
@ -606,13 +531,13 @@ STATIC VOID TaskCBBaseInit(LosTaskCB *taskCB, const TSK_INIT_PARAM_S *initParam)
LOS_ListInit(&taskCB->joinList);
}
LOS_ListInit(&taskCB->lockList);//初始化互斥锁链表
LOS_ListInit(&taskCB->lockList);
SET_SORTLIST_VALUE(&taskCB->sortList, OS_SORT_LINK_INVALID_TIME);
#ifdef LOSCFG_KERNEL_VM
taskCB->futex.index = OS_INVALID_VALUE;
#endif
}
//任务初始化
STATIC UINT32 TaskCBInit(LosTaskCB *taskCB, const TSK_INIT_PARAM_S *initParam)
{
UINT32 ret;
@ -621,7 +546,7 @@ STATIC UINT32 TaskCBInit(LosTaskCB *taskCB, const TSK_INIT_PARAM_S *initParam)
LosSchedParam initSchedParam = {0};
UINT16 policy = (initParam->policy == LOS_SCHED_NORMAL) ? LOS_SCHED_RR : initParam->policy;
TaskCBBaseInit(taskCB, initParam);//初始化任务的基本信息task->stackPointer指向内核态栈sp位置该位置存着任务初始上下文
TaskCBBaseInit(taskCB, initParam);
schedParam.policy = policy;
ret = OsProcessAddNewTask(initParam->processID, taskCB, &schedParam, &numCount);
@ -673,27 +598,25 @@ STATIC UINT32 TaskStackInit(LosTaskCB *taskCB, const TSK_INIT_PARAM_S *initParam
#endif
return LOS_OK;
}
//获取一个空闲TCB
STATIC LosTaskCB *GetFreeTaskCB(VOID)
{
UINT32 intSave;
SCHEDULER_LOCK(intSave);
if (LOS_ListEmpty(&g_losFreeTask)) {//全局空闲task为空
if (LOS_ListEmpty(&g_losFreeTask)) {
SCHEDULER_UNLOCK(intSave);
PRINT_ERR("No idle TCB in the system!\n");
return NULL;
}
LosTaskCB *taskCB = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&g_losFreeTask));
LOS_ListDelete(LOS_DL_LIST_FIRST(&g_losFreeTask));//从g_losFreeTask链表中摘除自己
LOS_ListDelete(LOS_DL_LIST_FIRST(&g_losFreeTask));
SCHEDULER_UNLOCK(intSave);
return taskCB;
}
/*
使suspendLOS_TaskResume使ready
*/
LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskCreateOnly(UINT32 *taskID, TSK_INIT_PARAM_S *initParam)
{
UINT32 errRet = TaskCreateParamCheck(taskID, initParam);
@ -733,7 +656,7 @@ DEINIT_TCB:
TaskCBDeInit(taskCB);
return errRet;
}
//创建任务并使该任务进入ready状态如果就绪队列中没有更高优先级的任务则运行该任务
LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskCreate(UINT32 *taskID, TSK_INIT_PARAM_S *initParam)
{
UINT32 ret;
@ -747,7 +670,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskCreate(UINT32 *taskID, TSK_INIT_PARAM_S *in
return LOS_ERRNO_TSK_YIELD_IN_INT;
}
if (OsProcessIsUserMode(OsCurrProcessGet())) {//当前进程为用户进程
if (OsProcessIsUserMode(OsCurrProcessGet())) {
initParam->processID = (UINTPTR)OsGetKernelInitProcess();
} else {
initParam->processID = (UINTPTR)OsCurrProcessGet();
@ -773,7 +696,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskCreate(UINT32 *taskID, TSK_INIT_PARAM_S *in
return LOS_OK;
}
//恢复挂起的任务是该任务进入ready状态
LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskResume(UINT32 taskID)
{
UINT32 intSave;
@ -788,7 +711,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskResume(UINT32 taskID)
SCHEDULER_LOCK(intSave);
/* clear pending signal */
taskCB->signal &= ~SIGNAL_SUSPEND;//清楚挂起信号
taskCB->signal &= ~SIGNAL_SUSPEND;
if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {
errRet = LOS_ERRNO_TSK_NOT_CREATED;
@ -814,13 +737,13 @@ LOS_ERREND:
}
/*
* Check if needs to do the suspend operation on the running task.//检查是否需要对正在运行的任务执行挂起操作。
* Return TRUE, if needs to do the suspension. //如果需要暂停返回TRUE。
* Return FALSE, if meets following circumstances: //如果满足一下情况则返回FALSE
* 1. Do the suspension across cores, if SMP is enabled //1.如果启用了SMP则跨CPU核执行挂起操作
* 2. Do the suspension when preemption is disabled //2.当禁用抢占时则挂起
* 3. Do the suspension in hard-irq //3.在硬中断时则挂起
* then LOS_TaskSuspend will directly return with 'ret' value. //那么LOS_taskssuspend将直接返回ret值。
* Check if needs to do the suspend operation on the running task.
* Return TRUE, if needs to do the suspension.
* Return FALSE, if meets following circumstances:
* 1. Do the suspension across cores, if SMP is enabled
* 2. Do the suspension when preemption is disabled
* 3. Do the suspension in hard-irq
* then LOS_TaskSuspend will directly return with 'ret' value.
*/
LITE_OS_SEC_TEXT_INIT STATIC BOOL OsTaskSuspendCheckOnRun(LosTaskCB *taskCB, UINT32 *ret)
{
@ -829,20 +752,20 @@ LITE_OS_SEC_TEXT_INIT STATIC BOOL OsTaskSuspendCheckOnRun(LosTaskCB *taskCB, UIN
#ifdef LOSCFG_KERNEL_SMP
/* ASYNCHRONIZED. No need to do task lock checking */
if (taskCB->currCpu != ArchCurrCpuid()) {//跨CPU核的情况
if (taskCB->currCpu != ArchCurrCpuid()) {
taskCB->signal = SIGNAL_SUSPEND;
LOS_MpSchedule(taskCB->currCpu);//task所属CPU执行调度
LOS_MpSchedule(taskCB->currCpu);
return FALSE;
}
#endif
if (!OsPreemptableInSched()) {//不能抢占时
if (!OsPreemptableInSched()) {
/* Suspending the current core's running task */
*ret = LOS_ERRNO_TSK_SUSPEND_LOCKED;
return FALSE;
}
if (OS_INT_ACTIVE) {//正在硬抢断时
if (OS_INT_ACTIVE) {
/* suspend running task in interrupt */
taskCB->signal = SIGNAL_SUSPEND;
return FALSE;
@ -850,7 +773,7 @@ LITE_OS_SEC_TEXT_INIT STATIC BOOL OsTaskSuspendCheckOnRun(LosTaskCB *taskCB, UIN
return TRUE;
}
//任务暂停参数可以不是当前任务也就是说A任务可以让B任务处于阻塞状态挂起指定的任务然后切换任务
LITE_OS_SEC_TEXT STATIC UINT32 OsTaskSuspend(LosTaskCB *taskCB)
{
UINT32 errRet;
@ -863,14 +786,14 @@ LITE_OS_SEC_TEXT STATIC UINT32 OsTaskSuspend(LosTaskCB *taskCB)
return LOS_ERRNO_TSK_ALREADY_SUSPENDED;
}
if ((tempStatus & OS_TASK_STATUS_RUNNING) &&//如果参数任务正在运行注意多Cpu core情况贴着正在运行标签的任务并不一定是当前CPU的执行任务
!OsTaskSuspendCheckOnRun(taskCB, &errRet)) {//很有可能是别的CPU core在跑的任务
if ((tempStatus & OS_TASK_STATUS_RUNNING) &&
!OsTaskSuspendCheckOnRun(taskCB, &errRet)) {
return errRet;
}
return taskCB->ops->suspend(taskCB);
}
//外部接口对OsTaskSuspend的封装
LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskSuspend(UINT32 taskID)
{
UINT32 intSave;
@ -890,7 +813,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskSuspend(UINT32 taskID)
SCHEDULER_UNLOCK(intSave);
return errRet;
}
//设置任务为不使用状态
STATIC INLINE VOID OsTaskStatusUnusedSet(LosTaskCB *taskCB)
{
taskCB->taskStatus |= OS_TASK_STATUS_UNUSED;
@ -1036,7 +959,7 @@ LOS_ERREND:
}
return ret;
}
//任务延时等待释放CPU等待时间到期后该任务会重新进入ready状态
LITE_OS_SEC_TEXT UINT32 LOS_TaskDelay(UINT32 tick)
{
UINT32 intSave;
@ -1066,7 +989,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_TaskDelay(UINT32 tick)
SCHEDULER_UNLOCK(intSave);
return ret;
}
//获取任务的优先级
LITE_OS_SEC_TEXT_MINOR UINT16 LOS_TaskPriGet(UINT32 taskID)
{
UINT32 intSave;
@ -1087,7 +1010,7 @@ LITE_OS_SEC_TEXT_MINOR UINT16 LOS_TaskPriGet(UINT32 taskID)
SCHEDULER_UNLOCK(intSave);
return param.priority;
}
//设置指定任务的优先级
LITE_OS_SEC_TEXT_MINOR UINT32 LOS_TaskPriSet(UINT32 taskID, UINT16 taskPrio)
{
UINT32 intSave;
@ -1125,12 +1048,12 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_TaskPriSet(UINT32 taskID, UINT16 taskPrio)
}
return LOS_OK;
}
//设置当前任务的优先级
LITE_OS_SEC_TEXT_MINOR UINT32 LOS_CurTaskPriSet(UINT16 taskPrio)
{
return LOS_TaskPriSet(OsCurrTaskGet()->taskID, taskPrio);
}
//当前任务释放CPU并将其移到具有相同优先级的就绪任务队列的末尾。
LITE_OS_SEC_TEXT_MINOR UINT32 LOS_TaskYield(VOID)
{
UINT32 intSave;
@ -1176,7 +1099,7 @@ LITE_OS_SEC_TEXT_MINOR VOID LOS_TaskUnlock(VOID)
LOS_Schedule();
}
}
//获取任务信息给shell使用的
LITE_OS_SEC_TEXT_MINOR UINT32 LOS_TaskInfoGet(UINT32 taskID, TSK_INFO_S *taskInfo)
{
UINT32 intSave;
@ -1206,8 +1129,8 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_TaskInfoGet(UINT32 taskID, TSK_INFO_S *taskInf
taskCB->ops->schedParamGet(taskCB, &param);
taskInfo->usTaskStatus = taskCB->taskStatus;
taskInfo->usTaskPrio = param.priority;
taskInfo->uwStackSize = taskCB->stackSize;//内核态栈大小
taskInfo->uwTopOfStack = taskCB->topOfStack;//内核态栈顶位置
taskInfo->uwStackSize = taskCB->stackSize;
taskInfo->uwTopOfStack = taskCB->topOfStack;
taskInfo->uwEventMask = taskCB->eventMask;
taskInfo->taskEvent = taskCB->taskEvent;
taskInfo->pTaskMux = taskCB->taskMux;
@ -1218,16 +1141,16 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_TaskInfoGet(UINT32 taskID, TSK_INFO_S *taskInf
}
taskInfo->acName[LOS_TASK_NAMELEN - 1] = '\0';
taskInfo->uwBottomOfStack = TRUNCATE(((UINTPTR)taskCB->topOfStack + taskCB->stackSize),//这里可以看出栈顶地址是高于栈顶
taskInfo->uwBottomOfStack = TRUNCATE(((UINTPTR)taskCB->topOfStack + taskCB->stackSize),
OS_TASK_STACK_ADDR_ALIGN);
taskInfo->uwCurrUsed = (UINT32)(taskInfo->uwBottomOfStack - taskInfo->uwSP);//当前任务栈已使用了多少
taskInfo->uwCurrUsed = (UINT32)(taskInfo->uwBottomOfStack - taskInfo->uwSP);
taskInfo->bOvf = OsStackWaterLineGet((const UINTPTR *)taskInfo->uwBottomOfStack,//获取栈的使用情况
taskInfo->bOvf = OsStackWaterLineGet((const UINTPTR *)taskInfo->uwBottomOfStack,
(const UINTPTR *)taskInfo->uwTopOfStack, &taskInfo->uwPeakUsed);
SCHEDULER_UNLOCK(intSave);
return LOS_OK;
}
//CPU亲和性affinity将任务绑定在指定CPU上用于多核CPU情况该函数仅在SMP模式下支持
LITE_OS_SEC_TEXT BOOL OsTaskCpuAffiSetUnsafe(UINT32 taskID, UINT16 newCpuAffiMask, UINT16 *oldCpuAffiMask)
{
#ifdef LOSCFG_KERNEL_SMP
@ -1253,17 +1176,17 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_TaskCpuAffiSet(UINT32 taskID, UINT16 cpuAffiMa
UINT32 intSave;
UINT16 currCpuMask;
if (OS_TID_CHECK_INVALID(taskID)) {//检测taskid是否有效task由task池分配鸿蒙默认128个任务 ID范围[0:127]
if (OS_TID_CHECK_INVALID(taskID)) {
return LOS_ERRNO_TSK_ID_INVALID;
}
if (!(cpuAffiMask & LOSCFG_KERNEL_CPU_MASK)) {//检测cpu亲和力
if (!(cpuAffiMask & LOSCFG_KERNEL_CPU_MASK)) {
return LOS_ERRNO_TSK_CPU_AFFINITY_MASK_ERR;
}
LosTaskCB *taskCB = OS_TCB_FROM_TID(taskID);
SCHEDULER_LOCK(intSave);
if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {//贴有未使用标签的处理
if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {
SCHEDULER_UNLOCK(intSave);
return LOS_ERRNO_TSK_NOT_CREATED;
}
@ -1271,13 +1194,13 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_TaskCpuAffiSet(UINT32 taskID, UINT16 cpuAffiMa
SCHEDULER_UNLOCK(intSave);
if (needSched && OS_SCHEDULER_ACTIVE) {
LOS_MpSchedule(currCpuMask);//发送信号调度信号给目标cpu
LOS_Schedule();//申请调度
LOS_MpSchedule(currCpuMask);
LOS_Schedule();
}
return LOS_OK;
}
//查询任务被绑在哪个cpu上
LITE_OS_SEC_TEXT_MINOR UINT16 LOS_TaskCpuAffiGet(UINT32 taskID)
{
#ifdef LOSCFG_KERNEL_SMP
@ -1291,18 +1214,18 @@ LITE_OS_SEC_TEXT_MINOR UINT16 LOS_TaskCpuAffiGet(UINT32 taskID)
LosTaskCB *taskCB = OS_TCB_FROM_TID(taskID);
SCHEDULER_LOCK(intSave);
if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {//任务必须在使用
if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {
SCHEDULER_UNLOCK(intSave);
return INVALID_CPU_AFFI_MASK;
}
cpuAffiMask = taskCB->cpuAffiMask;//获取亲和力掩码
cpuAffiMask = taskCB->cpuAffiMask;
SCHEDULER_UNLOCK(intSave);
return cpuAffiMask;
#else
(VOID)taskID;
return 1;//单核情况直接返回10号cpu对应0x01
return 1;
#endif
}
@ -1312,14 +1235,14 @@ LITE_OS_SEC_TEXT_MINOR UINT16 LOS_TaskCpuAffiGet(UINT32 taskID)
LITE_OS_SEC_TEXT_MINOR VOID OsTaskProcSignal(VOID)
{
UINT32 ret;
//私有且不可中断无需保护。这个任务在其他cpu核看到它时总是在运行所以它在执行代码的同时也可以继续接收信号
/*
* private and uninterruptable, no protection needed.
* while this task is always running when others cores see it,
* so it keeps receiving signals while follow code executing.
*/
LosTaskCB *runTask = OsCurrTaskGet();
if (runTask->signal == SIGNAL_NONE) {//意思是其他cpu发起了要干掉你的信号
if (runTask->signal == SIGNAL_NONE) {
return;
}
@ -1327,23 +1250,23 @@ LITE_OS_SEC_TEXT_MINOR VOID OsTaskProcSignal(VOID)
/*
* clear the signal, and do the task deletion. if the signaled task has been
* scheduled out, then this deletion will wait until next run.
*///如果发出信号的任务以出调度就绪队列,则此删除将等待下次运行
runTask->signal = SIGNAL_NONE;//清除信号
*/
runTask->signal = SIGNAL_NONE;
ret = LOS_TaskDelete(runTask->taskID);
if (ret != LOS_OK) {
PRINT_ERR("Task proc signal delete task(%u) failed err:0x%x\n", runTask->taskID, ret);
}
} else if (runTask->signal & SIGNAL_SUSPEND) {//意思是其他cpu发起了要挂起你的信号
runTask->signal &= ~SIGNAL_SUSPEND;//任务贴在被其他cpu挂起的标签
} else if (runTask->signal & SIGNAL_SUSPEND) {
runTask->signal &= ~SIGNAL_SUSPEND;
/* suspend killed task may fail, ignore the result */
(VOID)LOS_TaskSuspend(runTask->taskID);
#ifdef LOSCFG_KERNEL_SMP
} else if (runTask->signal & SIGNAL_AFFI) {//意思是下次调度其他cpu要媾和你
runTask->signal &= ~SIGNAL_AFFI;//任务贴上被其他CPU媾和的标签
} else if (runTask->signal & SIGNAL_AFFI) {
runTask->signal &= ~SIGNAL_AFFI;
/* priority queue has updated, notify the target cpu */
LOS_MpSchedule((UINT32)runTask->cpuAffiMask);//发生调度,此任务将移交给媾和CPU运行.
LOS_MpSchedule((UINT32)runTask->cpuAffiMask);
#endif
}
}
@ -1416,7 +1339,7 @@ INT32 OsUserProcessOperatePermissionsCheck(const LosTaskCB *taskCB, UINTPTR proc
return LOS_OK;
}
//创建任务之前,检查用户态任务栈的参数,是否地址在用户空间
LITE_OS_SEC_TEXT_INIT STATIC UINT32 OsCreateUserTaskParamCheck(UINT32 processID, TSK_INIT_PARAM_S *param)
{
UserTaskParam *userParam = NULL;
@ -1426,25 +1349,25 @@ LITE_OS_SEC_TEXT_INIT STATIC UINT32 OsCreateUserTaskParamCheck(UINT32 processID,
}
userParam = &param->userParam;
if ((processID == OS_INVALID_VALUE) && !LOS_IsUserAddress(userParam->userArea)) {//堆地址必须在用户空间
if ((processID == OS_INVALID_VALUE) && !LOS_IsUserAddress(userParam->userArea)) {
return OS_INVALID_VALUE;
}
if (!LOS_IsUserAddress((UINTPTR)param->pfnTaskEntry)) {//入口函数必须在用户空间
if (!LOS_IsUserAddress((UINTPTR)param->pfnTaskEntry)) {
return OS_INVALID_VALUE;
}
//堆栈必须在用户空间
if (userParam->userMapBase && !LOS_IsUserAddressRange(userParam->userMapBase, userParam->userMapSize)) {
return OS_INVALID_VALUE;
}
//检查堆,栈范围
if (!LOS_IsUserAddress(userParam->userSP)) {
return OS_INVALID_VALUE;
}
return LOS_OK;
}
//创建一个用户态任务
LITE_OS_SEC_TEXT_INIT UINT32 OsCreateUserTask(UINTPTR processID, TSK_INIT_PARAM_S *initParam)
{
UINT32 taskID;
@ -1453,18 +1376,18 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsCreateUserTask(UINTPTR processID, TSK_INIT_PARAM_
INT32 policy;
SchedParam param;
ret = OsCreateUserTaskParamCheck(processID, initParam);//检查参数,堆栈,入口地址必须在用户空间
ret = OsCreateUserTaskParamCheck(processID, initParam);
if (ret != LOS_OK) {
return ret;
}
//这里可看出一个任务有两个栈,内核态栈(内核指定栈大小)和用户态栈(用户指定栈大小)
initParam->uwStackSize = OS_USER_TASK_SYSCALL_STACK_SIZE;
initParam->usTaskPrio = OS_TASK_PRIORITY_LOWEST;//设置最低优先级31级
if (processID == OS_INVALID_VALUE) {//外面没指定进程ID的处理
initParam->usTaskPrio = OS_TASK_PRIORITY_LOWEST;
if (processID == OS_INVALID_VALUE) {
SCHEDULER_LOCK(intSave);
LosProcessCB *processCB = OsCurrProcessGet();
initParam->processID = (UINTPTR)processCB;
initParam->consoleID = processCB->consoleID;//任务控制台ID归属
initParam->consoleID = processCB->consoleID;
SCHEDULER_UNLOCK(intSave);
ret = LOS_GetProcessScheduler(processCB->processID, &policy, NULL);
if (ret != LOS_OK) {
@ -1477,20 +1400,20 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsCreateUserTask(UINTPTR processID, TSK_INIT_PARAM_
initParam->deadlineUs = param.deadlineUs;
initParam->periodUs = param.periodUs;
}
} else {//进程已经创建
initParam->policy = LOS_SCHED_RR;//调度方式为抢占式,注意鸿蒙不仅仅只支持抢占式调度方式
initParam->processID = processID;//进程ID赋值
initParam->consoleID = 0;//默认0号控制台
} else {
initParam->policy = LOS_SCHED_RR;
initParam->processID = processID;
initParam->consoleID = 0;
}
ret = LOS_TaskCreateOnly(&taskID, initParam);//只创建task实体不申请调度
ret = LOS_TaskCreateOnly(&taskID, initParam);
if (ret != LOS_OK) {
return OS_INVALID_VALUE;
}
return taskID;
}
//获取任务的调度方式
LITE_OS_SEC_TEXT INT32 LOS_GetTaskScheduler(INT32 taskID)
{
UINT32 intSave;
@ -1503,7 +1426,7 @@ LITE_OS_SEC_TEXT INT32 LOS_GetTaskScheduler(INT32 taskID)
LosTaskCB *taskCB = OS_TCB_FROM_TID(taskID);
SCHEDULER_LOCK(intSave);
if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {//任务不能是没有在使用
if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {
policy = -LOS_EINVAL;
OS_GOTO_ERREND();
}
@ -1515,7 +1438,7 @@ LOS_ERREND:
SCHEDULER_UNLOCK(intSave);
return policy;
}
//设置任务的调度信息
LITE_OS_SEC_TEXT INT32 LOS_SetTaskScheduler(INT32 taskID, UINT16 policy, UINT16 priority)
{
SchedParam param = { 0 };
@ -1659,12 +1582,12 @@ UINT32 LOS_TaskDetach(UINT32 taskID)
SCHEDULER_UNLOCK(intSave);
return errRet;
}
//获取最大任务数
LITE_OS_SEC_TEXT UINT32 LOS_GetSystemTaskMaximum(VOID)
{
return g_taskMaxNum;
}
//任务池中最后一个
LosTaskCB *OsGetDefaultTaskCB(VOID)
{
return &g_taskCBArray[g_taskMaxNum];
@ -1679,44 +1602,44 @@ LITE_OS_SEC_TEXT VOID OsWriteResourceEventUnsafe(UINT32 events)
{
(VOID)OsEventWriteUnsafe(&g_resourceEvent, events, FALSE, NULL);
}
//资源回收任务
STATIC VOID OsResourceRecoveryTask(VOID)
{
UINT32 ret;
while (1) {//死循环,回收资源不存在退出情况,只要系统在运行资源就需要回收
while (1) {
ret = LOS_EventRead(&g_resourceEvent, OS_RESOURCE_EVENT_MASK,
LOS_WAITMODE_OR | LOS_WAITMODE_CLR, LOS_WAIT_FOREVER);//读取资源事件
if (ret & (OS_RESOURCE_EVENT_FREE | OS_RESOURCE_EVENT_OOM)) {//收到资源释放或内存异常情况
LOS_WAITMODE_OR | LOS_WAITMODE_CLR, LOS_WAIT_FOREVER);
if (ret & (OS_RESOURCE_EVENT_FREE | OS_RESOURCE_EVENT_OOM)) {
OsTaskCBRecycleToFree();
OsProcessCBRecycleToFree();//回收进程到空闲进程池
OsProcessCBRecycleToFree();
}
#ifdef LOSCFG_ENABLE_OOM_LOOP_TASK//内存溢出检测任务开关
if (ret & OS_RESOURCE_EVENT_OOM) {//触发了这个事件
(VOID)OomCheckProcess();//检查进程的内存溢出情况
#ifdef LOSCFG_ENABLE_OOM_LOOP_TASK
if (ret & OS_RESOURCE_EVENT_OOM) {
(VOID)OomCheckProcess();
}
#endif
}
}
//创建一个回收资源的任务
LITE_OS_SEC_TEXT UINT32 OsResourceFreeTaskCreate(VOID)
{
UINT32 ret;
UINT32 taskID;
TSK_INIT_PARAM_S taskInitParam;
ret = LOS_EventInit((PEVENT_CB_S)&g_resourceEvent);//初始化资源事件
ret = LOS_EventInit((PEVENT_CB_S)&g_resourceEvent);
if (ret != LOS_OK) {
return LOS_NOK;
}
(VOID)memset_s((VOID *)(&taskInitParam), sizeof(TSK_INIT_PARAM_S), 0, sizeof(TSK_INIT_PARAM_S));
taskInitParam.pfnTaskEntry = (TSK_ENTRY_FUNC)OsResourceRecoveryTask;//入口函数
taskInitParam.pfnTaskEntry = (TSK_ENTRY_FUNC)OsResourceRecoveryTask;
taskInitParam.uwStackSize = OS_TASK_RESOURCE_STATIC_SIZE;
taskInitParam.pcName = "ResourcesTask";
taskInitParam.usTaskPrio = OS_TASK_RESOURCE_FREE_PRIORITY;//5优先级很高
taskInitParam.usTaskPrio = OS_TASK_RESOURCE_FREE_PRIORITY;
ret = LOS_TaskCreate(&taskID, &taskInitParam);
if (ret == LOS_OK) {
OS_TCB_FROM_TID(taskID)->taskStatus |= OS_TASK_FLAG_NO_DELETE;
@ -1724,5 +1647,5 @@ LITE_OS_SEC_TEXT UINT32 OsResourceFreeTaskCreate(VOID)
return ret;
}
LOS_MODULE_INIT(OsResourceFreeTaskCreate, LOS_INIT_LEVEL_KMOD_TASK);//资源回收任务初始化
LOS_MODULE_INIT(OsResourceFreeTaskCreate, LOS_INIT_LEVEL_KMOD_TASK);

@ -37,30 +37,30 @@
#endif
LITE_OS_SEC_DATA_INIT UINT32 g_sysClock; //系统时钟,绝大部分部件工作的时钟源,以及所有外设的始终来源
LITE_OS_SEC_DATA_INIT UINT32 g_tickPerSecond; //每秒Tick数harmony默认为每秒100次即10ms
LITE_OS_SEC_BSS DOUBLE g_cycle2NsScale; //将周期转为纳秒级
LITE_OS_SEC_DATA_INIT UINT32 g_sysClock;
LITE_OS_SEC_DATA_INIT UINT32 g_tickPerSecond;
LITE_OS_SEC_BSS DOUBLE g_cycle2NsScale;
/* spinlock for task module */
LITE_OS_SEC_BSS SPIN_LOCK_INIT(g_tickSpin); //节拍器自旋锁
LITE_OS_SEC_BSS SPIN_LOCK_INIT(g_tickSpin);
/*
* Description : Tick interruption handler
*/
LITE_OS_SEC_TEXT VOID OsTickHandler(VOID)//节拍中断处理函数harmony默认1ms触发一次
LITE_OS_SEC_TEXT VOID OsTickHandler(VOID)
{
#ifdef LOSCFG_SCHED_TICK_DEBUG
OsSchedDebugRecordData();
#endif
#ifdef LOSCFG_KERNEL_VDSO
OsVdsoTimevalUpdate(); //更新vdso数据页时间vsdo可以直接在用户进程空间绕过系统调用获取系统时间
OsVdsoTimevalUpdate();
#endif
#ifdef LOSCFG_BASE_CORE_TICK_HW_TIME
HalClockIrqClear(); /* diff from every platform */
#endif
OsSchedTick();//由时钟发起的调度
OsSchedTick();
}

@ -57,10 +57,10 @@
typedef enum {
CONTAINER = 0,
PID_CONTAINER, //进程容器
PID_CHILD_CONTAINER, //子进程容器
UTS_CONTAINER, //
MNT_CONTAINER, //挂载容器
PID_CONTAINER,
PID_CHILD_CONTAINER,
UTS_CONTAINER,
MNT_CONTAINER,
IPC_CONTAINER,
USER_CONTAINER,
TIME_CONTAINER,
@ -70,29 +70,29 @@ typedef enum {
} ContainerType;
typedef struct Container {
Atomic rc; //原子操作
Atomic rc;
#ifdef LOSCFG_PID_CONTAINER
struct PidContainer *pidContainer; //进程容器
struct PidContainer *pidForChildContainer;//进程的孩子容器
struct PidContainer *pidContainer;
struct PidContainer *pidForChildContainer;
#endif
#ifdef LOSCFG_UTS_CONTAINER
struct UtsContainer *utsContainer; //
struct UtsContainer *utsContainer;
#endif
#ifdef LOSCFG_MNT_CONTAINER
struct MntContainer *mntContainer; //挂载容器
struct MntContainer *mntContainer;
#endif
#ifdef LOSCFG_IPC_CONTAINER
struct IpcContainer *ipcContainer; //IPC容器
struct IpcContainer *ipcContainer;
#endif
#ifdef LOSCFG_TIME_CONTAINER
struct TimeContainer *timeContainer; //时间容器
struct TimeContainer *timeForChildContainer;
struct TimeContainer *timeContainer;
struct TimeContainer *timeForChildContainer;
#endif
#ifdef LOSCFG_NET_CONTAINER
struct NetContainer *netContainer; //网络容器
struct NetContainer *netContainer;
#endif
} Container;
//容器数量上限
typedef struct TagContainerLimit {
#ifdef LOSCFG_PID_CONTAINER
UINT32 pidLimit;

@ -40,21 +40,78 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
/**
* @ingroup los_err
* Define the error magic word.
*/
#define OS_ERR_MAGIC_WORD 0xa1b2c3f8
/**
* @ingroup los_err
* @brief Error handling macro capable of returning error codes.
*
* @par Description:
* This API is used to call the error handling function by using an error code and return the same error code.
* @attention
* <ul>
* <li>None.</li>
* </ul>
*
* @param errNo [IN] Error code.
*
* @retval errNo
* @par Dependency:
* <ul><li>los_err_pri.h: the header file that contains the API declaration.</li></ul>
* @see None.
*/
#define OS_RETURN_ERROR(errNo) do { \
(VOID)LOS_ErrHandle("os_unspecific_file", OS_ERR_MAGIC_WORD, errNo, 0, NULL); \
return errNo; \
} while (0)
/**
* @ingroup los_err
* @brief Error handling macro capable of returning error codes.
*
* @par Description:
* This API is used to call the error handling function by using an error code and the line number of
* the erroneous line, and return the same error code.
* @attention
* <ul>
* <li>None.</li>
* </ul>
*
* @param errLine [IN] Line number of the erroneous line.
* @param errNo [IN] Error code.
*
* @retval errNo
* @par Dependency:
* <ul><li>los_err_pri.h: the header file that contains the API declaration.</li></ul>
* @see None.
*/
#define OS_RETURN_ERROR_P2(errLine, errNo) do { \
(VOID)LOS_ErrHandle("os_unspecific_file", errLine, errNo, 0, NULL); \
return errNo; \
} while (0)
/**
* @ingroup los_err
* @brief Macro for jumping to error handler.
*
* @par Description:
* This API is used to call the error handling function by using an error code.
* @attention
* <ul>
* <li>None.</li>
* </ul>
*
* @param errorNo [IN] Error code.
*
* @retval None.
* @par Dependency:
* <ul><li>los_err_pri.h: the header file that contains the API declaration.</li></ul>
* @see None.
*/
#define OS_GOTO_ERR_HANDLER(errorNo) do { \
errNo = errorNo; \
errLine = OS_ERR_MAGIC_WORD; \

@ -33,27 +33,25 @@
#define _LOS_FUTEX_PRI_H
#include "los_list.h"
#define FUTEX_WAIT 0 ///< 原子性的检查 uaddr 中计数器的值是否为 val如果是则让任务休眠直到 FUTEX_WAKE 或者超时time-out
//也就是把任务挂到 uaddr 相对应的等待队列上去。
#define FUTEX_WAKE 1 ///< 最多唤醒 val 个等待在 uaddr 上任务。
#define FUTEX_REQUEUE 3 ///< 调整指定锁在Futex表中的位置
#define FUTEX_WAKE_OP 5
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
#define FUTEX_REQUEUE 3
#define FUTEX_WAKE_OP 5
#define FUTEX_LOCK_PI 6
#define FUTEX_UNLOCK_PI 7
#define FUTEX_TRYLOCK_PI 8
#define FUTEX_WAIT_BITSET 9
#define FUTEX_PRIVATE 128 //私有快锁(以虚拟地址进行哈希)
#define FUTEX_PRIVATE 128
#define FUTEX_MASK 0x3U
/// 每个futex node对应一个被挂起的task key值唯一标识一把用户态锁具有相同key值的node被queue_list串联起来表示被同一把锁阻塞的task队列。
typedef struct {
UINTPTR key; /* private:uvaddr | 私有锁,用虚拟地址 shared:paddr | 共享锁,用物理地址*/
UINT32 index; /* hash bucket index | 哈希桶索引 OsFutexKeyToIndex */
UINT32 pid; /* private:process id shared:OS_INVALID(-1) | 私有锁:进程ID , 共享锁为 -1 */
LOS_DL_LIST pendList; /* point to pendList in TCB struct | 指向 TCB 结构中的 pendList, 通过它找到任务*/
LOS_DL_LIST queueList; /* thread list blocked by this lock | 挂等待这把锁的任务其实这里挂到是FutexNode.queueList ,
queueList pendList , pendList*/
LOS_DL_LIST futexList; /* point to the next FutexNode | 下一把Futex锁*/
UINTPTR key; /* private:uvaddr shared:paddr */
UINT32 index; /* hash bucket index */
UINT32 pid; /* private:process id shared:OS_INVALID(-1) */
LOS_DL_LIST pendList; /* point to pendList in TCB struct */
LOS_DL_LIST queueList; /* thread list blocked by this lock */
LOS_DL_LIST futexList; /* point to the next FutexNode */
} FutexNode;
extern UINT32 OsFutexInit(VOID);

@ -42,13 +42,13 @@ typedef struct TagQueueCB LosQueueCB;
typedef struct OsMux LosMux;
typedef LosMux pthread_mutex_t;
typedef struct ProcessCB LosProcessCB;
//IPC容器
typedef struct IpcContainer {
Atomic rc;
LosQueueCB *allQueue; //队列控制块(读写分离模式)
LOS_DL_LIST freeQueueList;//空闲队列链表
LosQueueCB *allQueue;
LOS_DL_LIST freeQueueList;
fd_set queueFdSet;
struct mqarray queueTable[LOSCFG_BASE_IPC_QUEUE_LIMIT];//队列池
struct mqarray queueTable[LOSCFG_BASE_IPC_QUEUE_LIMIT];
pthread_mutex_t mqueueMutex;
struct mqpersonal *mqPrivBuf[MAX_MQ_FD];
struct shminfo shmInfo;

@ -42,7 +42,7 @@ extern "C" {
#endif /* __cplusplus */
typedef struct {
UINT32 memUsed; ///< 记录任务内存使用量
UINT32 memUsed;
} TskMemUsedInfo;
extern VOID OsTaskMemUsedInc(UINT32 usedSize, UINT32 taskID);
@ -53,7 +53,7 @@ extern VOID OsTaskMemClear(UINT32 taskID);
#ifdef LOS_MEM_SLAB
typedef struct {
UINT32 slabUsed; ///< 任务占用以slab分配方式内存量
UINT32 slabUsed;
} TskSlabUsedInfo;
extern VOID OsTaskSlabUsedInc(UINT32 usedSize, UINT32 taskID);

@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
* Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:

@ -43,9 +43,9 @@ extern "C" {
#ifdef LOSCFG_KERNEL_SMP
typedef enum {
CPU_RUNNING = 0, ///< cpu is running | CPU正在运行状态
CPU_HALT, ///< cpu in the halt | CPU处于暂停状态
CPU_EXC ///< cpu in the exc | CPU处于异常状态
CPU_RUNNING = 0, /* cpu is running */
CPU_HALT, /* cpu in the halt */
CPU_EXC /* cpu in the exc */
} ExcFlag;
typedef struct {
@ -55,14 +55,14 @@ typedef struct {
#endif
} Percpu;
/*! the kernel per-cpu structure | 每个cpu的内核描述符 */
/* the kernel per-cpu structure */
extern Percpu g_percpu[LOSCFG_KERNEL_CORE_NUM];
/*! 获得当前运行CPU的信息 */
STATIC INLINE Percpu *OsPercpuGet(VOID)
{
return &g_percpu[ArchCurrCpuid()];
return &g_percpu[ArchCurrCpuid()];
}
/*! 获得参数CPU的信息 */
STATIC INLINE Percpu *OsPercpuGetByID(UINT32 cpuid)
{
return &g_percpu[cpuid];

@ -38,29 +38,29 @@ typedef struct TagTaskCB LosTaskCB;
typedef struct ProcessCB LosProcessCB;
struct ProcessGroup;
struct Container;
//虚拟进程/任务 信息
typedef struct {
UINT32 vid; /* Virtual ID | 虚拟ID*/
UINT32 vpid; /* Virtual parent ID | 父进程虚拟ID*/
UINTPTR cb; /* Control block | 控制块*/
LosProcessCB *realParent; /* process real parent | 进程真实的父进程 */
LOS_DL_LIST node;//用于挂入 PidContainer.pidFreeList | tidFreeList
UINT32 vid; /* Virtual ID */
UINT32 vpid; /* Virtual parent ID */
UINTPTR cb; /* Control block */
LosProcessCB *realParent; /* process real parent */
LOS_DL_LIST node;
} ProcessVid;
#define PID_CONTAINER_LEVEL_LIMIT 3
//进程容器
typedef struct PidContainer {
Atomic rc; //原子操作
Atomic level; //等级0为最高级父比子高一级
Atomic lock; //锁
BOOL referenced; //是否被引用
UINT32 containerID; //容器ID
struct PidContainer *parent; //父进程容器
struct ProcessGroup *rootPGroup; //进程组
LOS_DL_LIST tidFreeList; //任务空闲链表
ProcessVid tidArray[LOSCFG_BASE_CORE_TSK_LIMIT];//虚拟任务池
LOS_DL_LIST pidFreeList; //进程空闲链表
ProcessVid pidArray[LOSCFG_BASE_CORE_PROCESS_LIMIT];//虚拟进程池
Atomic rc;
Atomic level;
Atomic lock;
BOOL referenced;
UINT32 containerID;
struct PidContainer *parent;
struct ProcessGroup *rootPGroup;
LOS_DL_LIST tidFreeList;
ProcessVid tidArray[LOSCFG_BASE_CORE_TSK_LIMIT];
LOS_DL_LIST pidFreeList;
ProcessVid pidArray[LOSCFG_BASE_CORE_PROCESS_LIMIT];
} PidContainer;
#define OS_PID_CONTAINER_FROM_PCB(processCB) ((processCB)->container->pidContainer)

@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
* Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@ -65,86 +65,82 @@ extern "C" {
#ifdef LOSCFG_SECURITY_CAPABILITY
#define OS_GROUPS_NUMBER_MAX 256
/*! 用户描述体*/
typedef struct {
UINT32 userID; ///<用户ID [0,60000],0为root用户
UINT32 userID;
UINT32 effUserID;
UINT32 gid; ///<用户组ID [0,60000],0为root用户组
UINT32 gid;
UINT32 effGid;
UINT32 groupNumber;///< 用户组数量
UINT32 groups[1]; //所属用户组列表,一个用户可属多个用户组
UINT32 groupNumber;
UINT32 groups[1];
} User;
#endif
/*! 进程组结构体*/
typedef struct ProcessGroup {
UINTPTR pgroupLeader; /**< Process group leader is the the process that created the group | 负责创建进程组的进程首地址*/
LOS_DL_LIST processList; /**< List of processes under this process group | 属于该进程组的进程链表*/
LOS_DL_LIST exitProcessList; /**< List of closed processes (zombie processes) under this group | 进程组的僵死进程链表*/
LOS_DL_LIST groupList; /**< Process group list | 进程组链表,上面挂的都是进程组*/
UINTPTR pgroupLeader; /**< Process group leader is the the process that created the group */
LOS_DL_LIST processList; /**< List of processes under this process group */
LOS_DL_LIST exitProcessList; /**< List of closed processes (zombie processes) under this group */
LOS_DL_LIST groupList; /**< Process group list */
} ProcessGroup;
/**
* .
*/
typedef struct ProcessCB {
CHAR processName[OS_PCB_NAME_LEN]; /**< Process name | 进程名称 */
UINT32 processID; /**< Process ID = leader thread ID | 进程ID,由进程池分配,范围[0,64] */
CHAR processName[OS_PCB_NAME_LEN]; /**< Process name */
UINT32 processID; /**< Process ID */
UINT16 processStatus; /**< [15:4] Process Status; [3:0] The number of threads currently
running in the process | . ,! @note_good */
UINT16 consoleID; /**< The console id of task belongs | 任务的控制台id归属 */
UINT16 processMode; /**< Kernel Mode:0; User Mode:1; | 模式指定为内核还是用户进程 */
running in the process */
UINT16 consoleID; /**< The console id of task belongs */
UINT16 processMode; /**< Kernel Mode:0; User Mode:1; */
struct ProcessCB *parentProcess; /**< Parent process */
UINT32 exitCode; /**< Process exit status | 进程退出状态码*/
LOS_DL_LIST pendList; /**< Block list to which the process belongs | 进程所在的阻塞列表,进程因阻塞挂入相应的链表.*/
LOS_DL_LIST childrenList; /**< Children process list | 孩子进程都挂到这里,形成双循环链表*/
LOS_DL_LIST exitChildList; /**< Exit children process list | 要退出的孩子进程链表,白发人要送黑发人.*/
LOS_DL_LIST siblingList; /**< Linkage in parent's children list | 兄弟进程链表, 56个民族是一家,来自同一个父进程.*/
ProcessGroup *pgroup; /**< Process group to which a process belongs | 所属进程组*/
LOS_DL_LIST subordinateGroupList; /**< Linkage in group list | 进程组员链表*/
UINT32 exitCode; /**< Process exit status */
LOS_DL_LIST pendList; /**< Block list to which the process belongs */
LOS_DL_LIST childrenList; /**< Children process list */
LOS_DL_LIST exitChildList; /**< Exit children process list */
LOS_DL_LIST siblingList; /**< Linkage in parent's children list */
ProcessGroup *pgroup; /**< Process group to which a process belongs */
LOS_DL_LIST subordinateGroupList; /**< Linkage in group list */
LosTaskCB *threadGroup;
LOS_DL_LIST threadSiblingList; /**< List of threads under this process | 进程的线程(任务)列表 */
volatile UINT32 threadNumber; /**< Number of threads alive under this process | 此进程下的活动线程数*/
UINT32 threadCount; /**< Total number of threads created under this process | 在此进程下创建的线程总数*/ //
LOS_DL_LIST waitList; /**< The process holds the waitLits to support wait/waitpid | 父进程通过进程等待的方式,回收子进程资源,获取子进程退出信息*/
LOS_DL_LIST threadSiblingList; /**< List of threads under this process */
volatile UINT32 threadNumber; /**< Number of threads alive under this process */
UINT32 threadCount; /**< Total number of threads created under this process */
LOS_DL_LIST waitList; /**< The process holds the waitLits to support wait/waitpid */
#ifdef LOSCFG_KERNEL_SMP
UINT32 timerCpu; /**< CPU core number of this task is delayed or pended | 统计各线程被延期或阻塞的时间*/
UINT32 timerCpu; /**< CPU core number of this task is delayed or pended */
#endif
UINTPTR sigHandler; /**< Signal handler | 信号处理函数,处理如 SIGSYS 等信号*/
sigset_t sigShare; /**< Signal share bit | 信号共享位 sigset_t是个64位的变量,对应64种信号*/
UINTPTR sigHandler; /**< Signal handler */
sigset_t sigShare; /**< Signal share bit */
#ifdef LOSCFG_KERNEL_LITEIPC
ProcIpcInfo *ipcInfo; /**< Memory pool for lite ipc | 用于进程间通讯的虚拟设备文件系统,设备装载点为 /dev/lite_ipc*/
ProcIpcInfo *ipcInfo; /**< Memory pool for lite ipc */
#endif
#ifdef LOSCFG_KERNEL_VM
LosVmSpace *vmSpace; /**< VMM space for processes | 虚拟空间,描述进程虚拟内存的数据结构linux称为内存描述符 */
LosVmSpace *vmSpace; /**< VMM space for processes */
#endif
#ifdef LOSCFG_FS_VFS
struct files_struct *files; /**< Files held by the process | 进程所持有的所有文件,注者称之为进程的文件管理器*/
#endif //每个进程都有属于自己的文件管理器,记录对文件的操作. 注意:一个文件可以被多个进程操作
timer_t timerID; /**< iTimer */
struct files_struct *files; /**< Files held by the process */
#endif
timer_t timerID; /**< ITimer */
#ifdef LOSCFG_SECURITY_CAPABILITY //安全能力
User *user; ///< 进程的拥有者
UINT32 capability; ///< 安全能力范围 对应 CAP_SETGID
#ifdef LOSCFG_SECURITY_CAPABILITY
User *user;
UINT32 capability;
#endif
#ifdef LOSCFG_SECURITY_VID //虚拟ID映射功能
TimerIdMap timerIdMap;
#ifdef LOSCFG_SECURITY_VID
TimerIdMap timerIdMap;
#endif
#ifdef LOSCFG_DRIVERS_TZDRIVER
struct Vnode *execVnode; /**< Exec bin of the process | 进程的可执行文件 */
struct Vnode *execVnode; /**< Exec bin of the process */
#endif
mode_t umask; ///< umask(user file-creatiopn mode mask)为用户文件创建掩码,是创建文件或文件夹时默认权限的基础。
mode_t umask;
#ifdef LOSCFG_KERNEL_CPUP
OsCpupBase *processCpup; /**< Process cpu usage | 进程占用CPU情况统计*/
OsCpupBase *processCpup; /**< Process cpu usage */
#endif
struct rlimit *resourceLimit; ///< 每个进程在运行时系统不会无限制的允许单个进程不断的消耗资源,因此都会设置资源限制。
struct rlimit *resourceLimit;
#ifdef LOSCFG_KERNEL_CONTAINER
Container *container; ///< 内核容器
Container *container;
#ifdef LOSCFG_USER_CONTAINER
struct Credentials *credentials; ///< 用户身份证
struct Credentials *credentials;
#endif
#endif
#ifdef LOSCFG_PROC_PROCESS_DIR
struct ProcDirEntry *procDir; ///< 目录文件项
struct ProcDirEntry *procDir;
#endif
#ifdef LOSCFG_KERNEL_PLIMITS
ProcLimiterSet *plimits;
@ -165,8 +161,8 @@ extern UINT32 g_processMaxNum;
#define OS_PCB_FROM_TCB(taskCB) ((LosProcessCB *)((taskCB)->processCB))
#define OS_PCB_FROM_TID(taskID) ((LosProcessCB *)(OS_TCB_FROM_TID(taskID)->processCB))
#define OS_GET_PGROUP_LEADER(pgroup) ((LosProcessCB *)((pgroup)->pgroupLeader))
#define OS_PCB_FROM_SIBLIST(ptr) LOS_DL_LIST_ENTRY((ptr), LosProcessCB, siblingList)///< 通过siblingList节点找到 LosProcessCB
#define OS_PCB_FROM_PENDLIST(ptr) LOS_DL_LIST_ENTRY((ptr), LosProcessCB, pendList) ///< 通过pendlist节点找到 LosProcessCB
#define OS_PCB_FROM_SIBLIST(ptr) LOS_DL_LIST_ENTRY((ptr), LosProcessCB, siblingList)
#define OS_PCB_FROM_PENDLIST(ptr) LOS_DL_LIST_ENTRY((ptr), LosProcessCB, pendList)
/**
* @ingroup los_process
@ -206,7 +202,7 @@ extern UINT32 g_processMaxNum;
*
* The process is run out but the resources occupied by the process are not recovered.
*/
#define OS_PROCESS_STATUS_ZOMBIES 0x0100U ///< 进程状态: 僵死
#define OS_PROCESS_STATUS_ZOMBIES 0x0100U
/**
* @ingroup los_process
@ -215,7 +211,7 @@ extern UINT32 g_processMaxNum;
* The process status equal this is process control block unused,
* coexisting with OS_PROCESS_STATUS_ZOMBIES means that the control block is not recovered.
*/
#define OS_PROCESS_FLAG_UNUSED 0x0200U ///< 进程未使用标签,一般用于进程的初始状态 freelist里面都是这种标签
#define OS_PROCESS_FLAG_UNUSED 0x0200U
/**
* @ingroup los_process
@ -223,7 +219,7 @@ extern UINT32 g_processMaxNum;
*
* The process has been call exit, it only works with multiple cores.
*/
#define OS_PROCESS_FLAG_EXIT 0x0400U ///< 进程退出标签,退出的进程进入回收链表等待回收资源
#define OS_PROCESS_FLAG_EXIT 0x0400U
/**
* @ingroup los_process
@ -231,7 +227,7 @@ extern UINT32 g_processMaxNum;
*
* The process is the leader of the process group.
*/
#define OS_PROCESS_FLAG_GROUP_LEADER 0x0800U ///< 进程当了进程组领导标签
#define OS_PROCESS_FLAG_GROUP_LEADER 0x0800U
/**
* @ingroup los_process
@ -239,21 +235,21 @@ extern UINT32 g_processMaxNum;
*
* The process has performed the exec operation.
*/
#define OS_PROCESS_FLAG_ALREADY_EXEC 0x1000U ///< 进程已执行exec操作 load elf时使用
#define OS_PROCESS_FLAG_ALREADY_EXEC 0x1000U
/**
* @ingroup los_process
* Flag that indicates the process or process control block status.
*
* The process is dying or already dying.
*/ /// 进程不活跃状态定义: 身上贴有退出便签且状态为僵死的进程
#define OS_PROCESS_STATUS_INACTIVE (OS_PROCESS_FLAG_EXIT | OS_PROCESS_STATUS_ZOMBIES)
*/
#define OS_PROCESS_STATUS_INACTIVE (OS_PROCESS_FLAG_EXIT | OS_PROCESS_STATUS_ZOMBIES)
/**
* @ingroup los_process
* Used to check if the process control block is unused.
*/
STATIC INLINE BOOL OsProcessIsUnused(const LosProcessCB *processCB)//查下进程是否还在使用?
STATIC INLINE BOOL OsProcessIsUnused(const LosProcessCB *processCB)
{
return ((processCB->processStatus & OS_PROCESS_FLAG_UNUSED) != 0);
}
@ -261,8 +257,8 @@ STATIC INLINE BOOL OsProcessIsUnused(const LosProcessCB *processCB)//查下进
/**
* @ingroup los_process
* Used to check if the process is inactive.
*/ /// 进程不活跃函数定义:身上贴有不使用且不活跃标签的进程
STATIC INLINE BOOL OsProcessIsInactive(const LosProcessCB *processCB)//查下进程是否不活跃?
*/
STATIC INLINE BOOL OsProcessIsInactive(const LosProcessCB *processCB)
{
return ((processCB->processStatus & (OS_PROCESS_FLAG_UNUSED | OS_PROCESS_STATUS_INACTIVE)) != 0);
}
@ -270,8 +266,8 @@ STATIC INLINE BOOL OsProcessIsInactive(const LosProcessCB *processCB)//查下进
/**
* @ingroup los_process
* Used to check if the process is dead.
*/ /// 进程死啦死啦的定义: 身上贴有不使用且状态为僵死的进程
STATIC INLINE BOOL OsProcessIsDead(const LosProcessCB *processCB)//查下进程是否死啦死啦滴?
*/
STATIC INLINE BOOL OsProcessIsDead(const LosProcessCB *processCB)
{
return ((processCB->processStatus & OS_PROCESS_STATUS_ZOMBIES) != 0);
}
@ -286,44 +282,68 @@ STATIC INLINE BOOL OsProcessIsPGroupLeader(const LosProcessCB *processCB)
return ((processCB->processStatus & OS_PROCESS_FLAG_GROUP_LEADER) != 0);
}
/**
* @ingroup los_process
* The highest priority of a kernel mode process.
*/
#define OS_PROCESS_PRIORITY_HIGHEST 0
#define OS_PROCESS_PRIORITY_HIGHEST 0 ///< 进程最高优先级
#define OS_PROCESS_PRIORITY_LOWEST 31 ///< 进程最低优先级
#define OS_USER_PROCESS_PRIORITY_HIGHEST 10 ///< 内核模式和用户模式的优先级分割线 10-31 用户级, 0-9内核级
#define OS_USER_PROCESS_PRIORITY_LOWEST OS_PROCESS_PRIORITY_LOWEST ///< 用户进程的最低优先级
/**
* @ingroup los_process
* The lowest priority of a kernel mode process
*/
#define OS_PROCESS_PRIORITY_LOWEST 31
#define OS_PROCESS_USERINIT_PRIORITY 28 ///< 用户进程默认的优先级,28级好低啊
/**
* @ingroup los_process
* The highest priority of a user mode process.
*/
#define OS_USER_PROCESS_PRIORITY_HIGHEST 10
/**
* @ingroup los_process
* The lowest priority of a user mode process
*/
#define OS_USER_PROCESS_PRIORITY_LOWEST OS_PROCESS_PRIORITY_LOWEST
#define OS_KERNEL_IDLE_PROCESS_ID 0U //0号进程为空闲进程
/**
* @ingroup los_process
* User state root process default priority
*/
#define OS_PROCESS_USERINIT_PRIORITY 28
/**
* @ingroup los_process
* ID of the kernel idle process
*/
#define OS_KERNEL_IDLE_PROCESS_ID 0U
#define OS_USER_ROOT_PROCESS_ID 1U //1号为用户态根进程
/**
* @ingroup los_process
* ID of the user root process
*/
#define OS_USER_ROOT_PROCESS_ID 1U
/**
* @ingroup los_process
* ID of the kernel root process
*/
#define OS_KERNEL_ROOT_PROCESS_ID 2U
#define OS_KERNEL_ROOT_PROCESS_ID 2U //1号为内核态根进程
#define OS_TASK_DEFAULT_STACK_SIZE 0x2000 ///< task默认栈大小 8K
#define OS_USER_TASK_SYSCALL_STACK_SIZE 0x3000 ///< 用户通过系统调用的栈大小 12K ,这时是运行在内核模式下
#define OS_USER_TASK_STACK_SIZE 0x100000 ///< 用户任务运行在用户空间的栈大小 1M
#define OS_TASK_DEFAULT_STACK_SIZE 0x2000
#define OS_USER_TASK_SYSCALL_STACK_SIZE 0x3000
#define OS_USER_TASK_STACK_SIZE 0x100000
#define OS_KERNEL_MODE 0x0U ///< 内核态
#define OS_USER_MODE 0x1U ///< 用户态
/*! 用户态进程*/
#define OS_KERNEL_MODE 0x0U
#define OS_USER_MODE 0x1U
STATIC INLINE BOOL OsProcessIsUserMode(const LosProcessCB *processCB)
{
return (processCB->processMode == OS_USER_MODE);
}
#define LOS_PRIO_PROCESS 0U ///< 进程标识
#define LOS_PRIO_PGRP 1U ///< 进程组标识
#define LOS_PRIO_USER 2U ///< 用户标识
#define LOS_PRIO_PROCESS 0U
#define LOS_PRIO_PGRP 1U
#define LOS_PRIO_USER 2U
#define OS_USER_PRIVILEGE_PROCESS_GROUP ((UINTPTR)OsGetUserInitProcess())
#define OS_KERNEL_PROCESS_GROUP ((UINTPTR)OsGetKernelInitProcess())
@ -333,40 +353,40 @@ STATIC INLINE BOOL OsProcessIsUserMode(const LosProcessCB *processCB)
* 31 15 8 7 0
* | | exit code | core dump | signal |
*/
#define OS_PRO_EXIT_OK 0 ///< 进程正常退出
/// 置进程退出码第七位为1
#define OS_PRO_EXIT_OK 0
STATIC INLINE VOID OsProcessExitCodeCoreDumpSet(LosProcessCB *processCB)
{
processCB->exitCode |= 0x80U; // 0b10000000
processCB->exitCode |= 0x80U;
}
/// 设置进程退出信号(0 ~ 7)
STATIC INLINE VOID OsProcessExitCodeSignalSet(LosProcessCB *processCB, UINT32 signal)
{
processCB->exitCode |= signal & 0x7FU;// 0b01111111
processCB->exitCode |= signal & 0x7FU;
}
/// 清除进程退出信号(0 ~ 7)
STATIC INLINE VOID OsProcessExitCodeSignalClear(LosProcessCB *processCB)
{
processCB->exitCode &= (~0x7FU);// 低7位全部清0
processCB->exitCode &= (~0x7FU);
}
/// 进程退出码是否被设置过,默认是 0 ,如果 & 0x7FU 还是 0 ,说明没有被设置过.
STATIC INLINE BOOL OsProcessExitCodeSignalIsSet(LosProcessCB *processCB)
{
return (processCB->exitCode) & 0x7FU;
}
/// 设置进程退出号(8 ~ 15)
STATIC INLINE VOID OsProcessExitCodeSet(LosProcessCB *processCB, UINT32 code)
{
processCB->exitCode |= ((code & 0x000000FFU) << 8U) & 0x0000FF00U; /* 8: Move 8 bits to the left, exitCode */
}
#define OS_PID_CHECK_INVALID(pid) (((UINT32)(pid)) >= g_processMaxNum)
/*! 内联函数 进程ID是否有效 */
STATIC INLINE BOOL OsProcessIDUserCheckInvalid(UINT32 pid)
{
return ((pid >= g_processMaxNum) || (pid == 0));
}
/*! 获取当前进程PCB */
STATIC INLINE LosProcessCB *OsCurrProcessGet(VOID)
{
UINT32 intSave;
@ -378,7 +398,6 @@ STATIC INLINE LosProcessCB *OsCurrProcessGet(VOID)
}
#ifdef LOSCFG_SECURITY_CAPABILITY
/*! 获取当前进程的所属用户 */
STATIC INLINE User *OsCurrUserGet(VOID)
{
User *user = NULL;
@ -450,14 +469,14 @@ STATIC INLINE UINT32 OsGetRootPid(const LosProcessCB *processCB)
/*
* return immediately if no child has exited.
*/
#define LOS_WAIT_WNOHANG (1 << 0U) ///< 如果没有孩子进程退出,则立即返回,而不是阻塞在这个函数上等待;如果结束了,则返回该子进程的进程号。
#define LOS_WAIT_WNOHANG (1 << 0U)
/*
* return if a child has stopped (but not traced via ptrace(2)).
* Status for traced children which have stopped is provided even
* if this option is not specified.
*/
#define LOS_WAIT_WUNTRACED (1 << 1U) ///< 如果子进程进入暂停情况则马上返回不予以理会结束状态。untraced
#define LOS_WAIT_WUNTRACED (1 << 1U)
#define LOS_WAIT_WSTOPPED (1 << 1U)
/*
@ -469,7 +488,7 @@ STATIC INLINE UINT32 OsGetRootPid(const LosProcessCB *processCB)
* return if a stopped child has been resumed by delivery of SIGCONT.
* (For Linux-only options, see below.)
*/
#define LOS_WAIT_WCONTINUED (1 << 3U) ///< 可获取子进程恢复执行的状态也就是可获取continued状态 continued
#define LOS_WAIT_WCONTINUED (1 << 3U)
/*
* Leave the child in a waitable state;
@ -480,30 +499,30 @@ STATIC INLINE UINT32 OsGetRootPid(const LosProcessCB *processCB)
/*
* Indicates that you are already in a wait state
*/
#define OS_PROCESS_WAIT (1 << 15U) ///< 表示已经处于等待状态
#define OS_PROCESS_WAIT (1 << 15U)
/*
* Wait for any child process to finish
*/
#define OS_PROCESS_WAIT_ANY OS_TASK_WAIT_ANYPROCESS ///< 等待任意子进程完成
#define OS_PROCESS_WAIT_ANY OS_TASK_WAIT_ANYPROCESS
/*
* Wait for the child process specified by the pid to finish
*/
#define OS_PROCESS_WAIT_PRO OS_TASK_WAIT_PROCESS ///< 等待pid指定的子进程完成
#define OS_PROCESS_WAIT_PRO OS_TASK_WAIT_PROCESS
/*
* Waits for any child process in the specified process group to finish.
*/
#define OS_PROCESS_WAIT_GID OS_TASK_WAIT_GID ///< 等待指定进程组中的任意子进程完成
#define OS_PROCESS_WAIT_GID OS_TASK_WAIT_GID
#define OS_PROCESS_INFO_ALL 1
#define OS_PROCESS_DEFAULT_UMASK 0022 ///< 系统默认的用户掩码(umask),大多数的Linux系统的默认掩码为022。
//用户掩码的作用是用户在创建文件时从文件的默认权限中去除掩码中的权限。所以文件创建之后的权限实际为:创建文件的权限为0666-0022=0644。创建文件夹的权限为0777-0022=0755
extern UINTPTR __user_init_entry; ///< 第一个用户态进程(init)的入口地址 查看 LITE_USER_SEC_ENTRY
extern UINTPTR __user_init_bss; ///< 查看 LITE_USER_SEC_BSS ,赋值由liteos.ld完成
extern UINTPTR __user_init_end; ///< init 进程的用户空间初始化结束地址
extern UINTPTR __user_init_load_addr;///< init 进程的加载地址 ,由链接器赋值
#define OS_PROCESS_DEFAULT_UMASK 0022
extern UINTPTR __user_init_entry;
extern UINTPTR __user_init_bss;
extern UINTPTR __user_init_end;
extern UINTPTR __user_init_load_addr;
extern UINT32 OsProcessInit(VOID);
extern UINT32 OsSystemProcessCreate(VOID);
extern VOID OsProcessNaturalExit(LosProcessCB *processCB, UINT32 status);

@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
* Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@ -40,7 +40,6 @@
#include "los_exc.h"
#endif
/// 初始化一个事件控制块
LITE_OS_SEC_TEXT_INIT UINT32 LOS_EventInit(PEVENT_CB_S eventCB)
{
UINT32 intSave;
@ -49,14 +48,14 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_EventInit(PEVENT_CB_S eventCB)
return LOS_ERRNO_EVENT_PTR_NULL;
}
intSave = LOS_IntLock();//锁中断
eventCB->uwEventID = 0;//事件类型初始化
LOS_ListInit(&eventCB->stEventList);//事件链表初始化
LOS_IntRestore(intSave);//恢复中断
intSave = LOS_IntLock();
eventCB->uwEventID = 0;
LOS_ListInit(&eventCB->stEventList);
LOS_IntRestore(intSave);
OsHookCall(LOS_HOOK_TYPE_EVENT_INIT, eventCB);
return LOS_OK;
}
///事件参数检查
LITE_OS_SEC_TEXT STATIC UINT32 OsEventParamCheck(const VOID *ptr, UINT32 eventMask, UINT32 mode)
{
if (ptr == NULL) {
@ -78,53 +77,52 @@ LITE_OS_SEC_TEXT STATIC UINT32 OsEventParamCheck(const VOID *ptr, UINT32 eventMa
}
return LOS_OK;
}
///根据用户传入的事件值、事件掩码及校验模式,返回用户传入的事件是否符合预期
LITE_OS_SEC_TEXT UINT32 OsEventPoll(UINT32 *eventID, UINT32 eventMask, UINT32 mode)
{
UINT32 ret = 0;
LOS_ASSERT(OsIntLocked());//断言不允许中断了
LOS_ASSERT(LOS_SpinHeld(&g_taskSpin));//任务自旋锁
LOS_ASSERT(OsIntLocked());
LOS_ASSERT(LOS_SpinHeld(&g_taskSpin));
if (mode & LOS_WAITMODE_OR) {//如果模式是读取掩码中任意事件
if (mode & LOS_WAITMODE_OR) {
if ((*eventID & eventMask) != 0) {
ret = *eventID & eventMask;
}
} else {//等待全部事件发生
if ((eventMask != 0) && (eventMask == (*eventID & eventMask))) {//必须满足全部事件发生
} else {
if ((eventMask != 0) && (eventMask == (*eventID & eventMask))) {
ret = *eventID & eventMask;
}
}
if (ret && (mode & LOS_WAITMODE_CLR)) {//读取完成后清除事件
if (ret && (mode & LOS_WAITMODE_CLR)) {
*eventID = *eventID & ~ret;
}
return ret;
}
///检查读事件
LITE_OS_SEC_TEXT STATIC UINT32 OsEventReadCheck(const PEVENT_CB_S eventCB, UINT32 eventMask, UINT32 mode)
{
UINT32 ret;
LosTaskCB *runTask = NULL;
ret = OsEventParamCheck(eventCB, eventMask, mode);//事件参数检查
ret = OsEventParamCheck(eventCB, eventMask, mode);
if (ret != LOS_OK) {
return ret;
}
if (OS_INT_ACTIVE) {//中断正在进行
return LOS_ERRNO_EVENT_READ_IN_INTERRUPT;//不能在中断发送时读事件
if (OS_INT_ACTIVE) {
return LOS_ERRNO_EVENT_READ_IN_INTERRUPT;
}
runTask = OsCurrTaskGet();//获取当前任务
if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {//任务属于系统任务
runTask = OsCurrTaskGet();
if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {
OsBackTrace();
return LOS_ERRNO_EVENT_READ_IN_SYSTEM_TASK;//不能在系统任务中读取事件
return LOS_ERRNO_EVENT_READ_IN_SYSTEM_TASK;
}
return LOS_OK;
}
/// 读取指定事件类型的实现函数超时时间为相对时间单位为Tick
LITE_OS_SEC_TEXT STATIC UINT32 OsEventReadImp(PEVENT_CB_S eventCB, UINT32 eventMask, UINT32 mode,
UINT32 timeout, BOOL once)
{
@ -133,57 +131,57 @@ LITE_OS_SEC_TEXT STATIC UINT32 OsEventReadImp(PEVENT_CB_S eventCB, UINT32 eventM
OsHookCall(LOS_HOOK_TYPE_EVENT_READ, eventCB, eventMask, mode, timeout);
if (once == FALSE) {
ret = OsEventPoll(&eventCB->uwEventID, eventMask, mode);//检测事件是否符合预期
ret = OsEventPoll(&eventCB->uwEventID, eventMask, mode);
}
if (ret == 0) {//不符合预期时
if (timeout == 0) {//不等待的情况
if (ret == 0) {
if (timeout == 0) {
return ret;
}
if (!OsPreemptableInSched()) {//不能抢占式调度
if (!OsPreemptableInSched()) {
return LOS_ERRNO_EVENT_READ_IN_LOCK;
}
runTask->eventMask = eventMask; //等待事件
runTask->eventMode = mode; //事件模式
runTask->taskEvent = eventCB; //事件控制块
OsTaskWaitSetPendMask(OS_TASK_WAIT_EVENT, eventMask, timeout);//任务进入等待状态,等待事件的到来并设置时长和掩码
runTask->eventMask = eventMask;
runTask->eventMode = mode;
runTask->taskEvent = eventCB;
OsTaskWaitSetPendMask(OS_TASK_WAIT_EVENT, eventMask, timeout);
ret = runTask->ops->wait(runTask, &eventCB->stEventList, timeout);
if (ret == LOS_ERRNO_TSK_TIMEOUT) {
return LOS_ERRNO_EVENT_READ_TIMEOUT;
}
ret = OsEventPoll(&eventCB->uwEventID, eventMask, mode);//检测事件是否符合预期
ret = OsEventPoll(&eventCB->uwEventID, eventMask, mode);
}
return ret;
}
///读取指定事件类型超时时间为相对时间单位为Tick
LITE_OS_SEC_TEXT STATIC UINT32 OsEventRead(PEVENT_CB_S eventCB, UINT32 eventMask, UINT32 mode, UINT32 timeout,
BOOL once)
{
UINT32 ret;
UINT32 intSave;
ret = OsEventReadCheck(eventCB, eventMask, mode);//读取事件检查
ret = OsEventReadCheck(eventCB, eventMask, mode);
if (ret != LOS_OK) {
return ret;
}
SCHEDULER_LOCK(intSave);
ret = OsEventReadImp(eventCB, eventMask, mode, timeout, once);//读事件实现函数
ret = OsEventReadImp(eventCB, eventMask, mode, timeout, once);
SCHEDULER_UNLOCK(intSave);
return ret;
}
///事件恢复操作
LITE_OS_SEC_TEXT STATIC UINT8 OsEventResume(LosTaskCB *resumedTask, const PEVENT_CB_S eventCB, UINT32 events)
{
UINT8 exitFlag = 0;//是否唤醒
UINT8 exitFlag = 0;
if (((resumedTask->eventMode & LOS_WAITMODE_OR) && ((resumedTask->eventMask & events) != 0)) ||
((resumedTask->eventMode & LOS_WAITMODE_AND) &&
((resumedTask->eventMask & eventCB->uwEventID) == resumedTask->eventMask))) {//逻辑与 和 逻辑或 的处理
exitFlag = 1;
((resumedTask->eventMask & eventCB->uwEventID) == resumedTask->eventMask))) {
exitFlag = 1;
resumedTask->taskEvent = NULL;
OsTaskWakeClearPendMask(resumedTask);
@ -192,33 +190,33 @@ LITE_OS_SEC_TEXT STATIC UINT8 OsEventResume(LosTaskCB *resumedTask, const PEVENT
return exitFlag;
}
///以不安全的方式写事件
LITE_OS_SEC_TEXT VOID OsEventWriteUnsafe(PEVENT_CB_S eventCB, UINT32 events, BOOL once, UINT8 *exitFlag)
{
LosTaskCB *resumedTask = NULL;
LosTaskCB *nextTask = NULL;
BOOL schedFlag = FALSE;
OsHookCall(LOS_HOOK_TYPE_EVENT_WRITE, eventCB, events);
eventCB->uwEventID |= events;//对应位贴上标签
if (!LOS_ListEmpty(&eventCB->stEventList)) {//等待事件链表判断,处理等待事件的任务
eventCB->uwEventID |= events;
if (!LOS_ListEmpty(&eventCB->stEventList)) {
for (resumedTask = LOS_DL_LIST_ENTRY((&eventCB->stEventList)->pstNext, LosTaskCB, pendList);
&resumedTask->pendList != &eventCB->stEventList;) {//循环获取任务链表
nextTask = LOS_DL_LIST_ENTRY(resumedTask->pendList.pstNext, LosTaskCB, pendList);//获取任务实体
if (OsEventResume(resumedTask, eventCB, events)) {//是否恢复任务
schedFlag = TRUE;//任务已加至就绪队列,申请发生一次调度
&resumedTask->pendList != &eventCB->stEventList;) {
nextTask = LOS_DL_LIST_ENTRY(resumedTask->pendList.pstNext, LosTaskCB, pendList);
if (OsEventResume(resumedTask, eventCB, events)) {
schedFlag = TRUE;
}
if (once == TRUE) {//是否只处理一次任务
break;//退出循环
if (once == TRUE) {
break;
}
resumedTask = nextTask;//检查链表中下一个任务
resumedTask = nextTask;
}
}
if ((exitFlag != NULL) && (schedFlag == TRUE)) {//是否让外面调度
if ((exitFlag != NULL) && (schedFlag == TRUE)) {
*exitFlag = 1;
}
}
///写入事件
LITE_OS_SEC_TEXT STATIC UINT32 OsEventWrite(PEVENT_CB_S eventCB, UINT32 events, BOOL once)
{
UINT32 intSave;
@ -232,54 +230,54 @@ LITE_OS_SEC_TEXT STATIC UINT32 OsEventWrite(PEVENT_CB_S eventCB, UINT32 events,
return LOS_ERRNO_EVENT_SETBIT_INVALID;
}
SCHEDULER_LOCK(intSave); //禁止调度
OsEventWriteUnsafe(eventCB, events, once, &exitFlag);//写入事件
SCHEDULER_UNLOCK(intSave); //允许调度
SCHEDULER_LOCK(intSave);
OsEventWriteUnsafe(eventCB, events, once, &exitFlag);
SCHEDULER_UNLOCK(intSave);
if (exitFlag == 1) { //需要发生调度
LOS_MpSchedule(OS_MP_CPU_ALL);//通知所有CPU调度
LOS_Schedule();//执行调度
if (exitFlag == 1) {
LOS_MpSchedule(OS_MP_CPU_ALL);
LOS_Schedule();
}
return LOS_OK;
}
///根据用户传入的事件值、事件掩码及校验模式,返回用户传入的事件是否符合预期
LITE_OS_SEC_TEXT UINT32 LOS_EventPoll(UINT32 *eventID, UINT32 eventMask, UINT32 mode)
{
UINT32 ret;
UINT32 intSave;
//事件参数检查
ret = OsEventParamCheck((VOID *)eventID, eventMask, mode);
if (ret != LOS_OK) {
return ret;
}
SCHEDULER_LOCK(intSave);//申请任务自旋锁
SCHEDULER_LOCK(intSave);
ret = OsEventPoll(eventID, eventMask, mode);
SCHEDULER_UNLOCK(intSave);
return ret;
}
///读取指定事件类型超时时间为相对时间单位为Tick
LITE_OS_SEC_TEXT UINT32 LOS_EventRead(PEVENT_CB_S eventCB, UINT32 eventMask, UINT32 mode, UINT32 timeout)
{
return OsEventRead(eventCB, eventMask, mode, timeout, FALSE);
}
///写指定的事件类型
LITE_OS_SEC_TEXT UINT32 LOS_EventWrite(PEVENT_CB_S eventCB, UINT32 events)
{
return OsEventWrite(eventCB, events, FALSE);
}
///只读一次事件
LITE_OS_SEC_TEXT_MINOR UINT32 OsEventReadOnce(PEVENT_CB_S eventCB, UINT32 eventMask, UINT32 mode,
UINT32 timeout)
{
return OsEventRead(eventCB, eventMask, mode, timeout, TRUE);
}
///只写一次事件
LITE_OS_SEC_TEXT_MINOR UINT32 OsEventWriteOnce(PEVENT_CB_S eventCB, UINT32 events)
{
return OsEventWrite(eventCB, events, TRUE);
}
///销毁指定的事件控制块
LITE_OS_SEC_TEXT_INIT UINT32 LOS_EventDestroy(PEVENT_CB_S eventCB)
{
UINT32 intSave;
@ -300,7 +298,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_EventDestroy(PEVENT_CB_S eventCB)
OsHookCall(LOS_HOOK_TYPE_EVENT_DESTROY, eventCB);
return LOS_OK;
}
///清除指定的事件类型
LITE_OS_SEC_TEXT_MINOR UINT32 LOS_EventClear(PEVENT_CB_S eventCB, UINT32 eventMask)
{
UINT32 intSave;
@ -315,7 +313,7 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_EventClear(PEVENT_CB_S eventCB, UINT32 eventMa
return LOS_OK;
}
///有条件式读事件
#ifdef LOSCFG_COMPAT_POSIX
LITE_OS_SEC_TEXT UINT32 OsEventReadWithCond(const EventCond *cond, PEVENT_CB_S eventCB,
UINT32 eventMask, UINT32 mode, UINT32 timeout)

@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
* Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@ -43,30 +43,28 @@
#ifdef LOSCFG_KERNEL_VM
#define OS_FUTEX_FROM_FUTEXLIST(ptr) LOS_DL_LIST_ENTRY(ptr, FutexNode, futexList) // 通过快锁节点找到结构体
#define OS_FUTEX_FROM_QUEUELIST(ptr) LOS_DL_LIST_ENTRY(ptr, FutexNode, queueList) // 通过队列节点找到结构体
#define OS_FUTEX_KEY_BASE USER_ASPACE_BASE ///< 进程用户空间基址
#define OS_FUTEX_KEY_MAX (USER_ASPACE_BASE + USER_ASPACE_SIZE) ///< 进程用户空间尾址
#define OS_FUTEX_FROM_FUTEXLIST(ptr) LOS_DL_LIST_ENTRY(ptr, FutexNode, futexList)
#define OS_FUTEX_FROM_QUEUELIST(ptr) LOS_DL_LIST_ENTRY(ptr, FutexNode, queueList)
#define OS_FUTEX_KEY_BASE USER_ASPACE_BASE
#define OS_FUTEX_KEY_MAX (USER_ASPACE_BASE + USER_ASPACE_SIZE)
/* private: 0~63 hash index_num
* shared: 64~79 hash index_num */
#define FUTEX_INDEX_PRIVATE_MAX 64 ///< 0~63号桶用于存放私有锁以虚拟地址进行哈希,同一进程不同线程共享futex变量表明变量在进程地址空间中的位置
///< 它告诉内核这个futex是进程专有的不可以与其他进程共享。它仅仅用作同一进程的线程间同步。
#define FUTEX_INDEX_SHARED_MAX 16 ///< 64~79号桶用于存放共享锁以物理地址进行哈希,不同进程间通过文件共享futex变量表明该变量在文件中的位置
#define FUTEX_INDEX_MAX (FUTEX_INDEX_PRIVATE_MAX + FUTEX_INDEX_SHARED_MAX) ///< 80个哈希桶
#define FUTEX_INDEX_PRIVATE_MAX 64
#define FUTEX_INDEX_SHARED_MAX 16
#define FUTEX_INDEX_MAX (FUTEX_INDEX_PRIVATE_MAX + FUTEX_INDEX_SHARED_MAX)
#define FUTEX_INDEX_SHARED_POS FUTEX_INDEX_PRIVATE_MAX ///< 共享锁开始位置
#define FUTEX_INDEX_SHARED_POS FUTEX_INDEX_PRIVATE_MAX
#define FUTEX_HASH_PRIVATE_MASK (FUTEX_INDEX_PRIVATE_MAX - 1)
#define FUTEX_HASH_SHARED_MASK (FUTEX_INDEX_SHARED_MAX - 1)
/// 单独哈希桶,上面挂了一个个 FutexNode
typedef struct {
LosMux listLock;///< 内核操作lockList的互斥锁
LOS_DL_LIST lockList;///< 用于挂载 FutexNode (Fast userspace mutex用户态快速互斥锁)
LosMux listLock;
LOS_DL_LIST lockList;
} FutexHash;
FutexHash g_futexHash[FUTEX_INDEX_MAX];///< 80个哈希桶
FutexHash g_futexHash[FUTEX_INDEX_MAX];
/// 对互斥锁封装
STATIC INT32 OsFutexLock(LosMux *lock)
{
UINT32 ret = LOS_MuxLock(lock, LOS_WAIT_FOREVER);
@ -86,15 +84,15 @@ STATIC INT32 OsFutexUnlock(LosMux *lock)
}
return LOS_OK;
}
///< 初始化Futex(Fast userspace mutex用户态快速互斥锁)模块
UINT32 OsFutexInit(VOID)
{
INT32 count;
UINT32 ret;
// 初始化 80个哈希桶
for (count = 0; count < FUTEX_INDEX_MAX; count++) {
LOS_ListInit(&g_futexHash[count].lockList); // 初始化双向链表,上面挂 FutexNode
ret = LOS_MuxInit(&(g_futexHash[count].listLock), NULL);//初始化互斥锁
LOS_ListInit(&g_futexHash[count].lockList);
ret = LOS_MuxInit(&(g_futexHash[count].listLock), NULL);
if (ret) {
return ret;
}
@ -103,7 +101,7 @@ UINT32 OsFutexInit(VOID)
return LOS_OK;
}
LOS_MODULE_INIT(OsFutexInit, LOS_INIT_LEVEL_KMOD_EXTENDED);///< 注册Futex模块
LOS_MODULE_INIT(OsFutexInit, LOS_INIT_LEVEL_KMOD_EXTENDED);
#ifdef LOS_FUTEX_DEBUG
STATIC VOID OsFutexShowTaskNodeAttr(const LOS_DL_LIST *futexList)
@ -154,63 +152,63 @@ VOID OsFutexHashShow(VOID)
}
}
#endif
/// 通过用户空间地址获取哈希key
STATIC INLINE UINTPTR OsFutexFlagsToKey(const UINT32 *userVaddr, const UINT32 flags)
{
UINTPTR futexKey;
if (flags & FUTEX_PRIVATE) {
futexKey = (UINTPTR)userVaddr;//私有锁(以虚拟地址进行哈希)
futexKey = (UINTPTR)userVaddr;
} else {
futexKey = (UINTPTR)LOS_PaddrQuery((UINT32 *)userVaddr);//共享锁(以物理地址进行哈希)
futexKey = (UINTPTR)LOS_PaddrQuery((UINT32 *)userVaddr);
}
return futexKey;
}
/// 通过哈希key获取索引
STATIC INLINE UINT32 OsFutexKeyToIndex(const UINTPTR futexKey, const UINT32 flags)
{
UINT32 index = LOS_HashFNV32aBuf(&futexKey, sizeof(UINTPTR), FNV1_32A_INIT);//获取哈希桶索引
UINT32 index = LOS_HashFNV32aBuf(&futexKey, sizeof(UINTPTR), FNV1_32A_INIT);
if (flags & FUTEX_PRIVATE) {
index &= FUTEX_HASH_PRIVATE_MASK;//将index锁定在 0 ~ 63号
index &= FUTEX_HASH_PRIVATE_MASK;
} else {
index &= FUTEX_HASH_SHARED_MASK;
index += FUTEX_INDEX_SHARED_POS;//共享锁索引,将index锁定在 64 ~ 79号
index += FUTEX_INDEX_SHARED_POS;
}
return index;
}
/// 设置快锁哈希key
STATIC INLINE VOID OsFutexSetKey(UINTPTR futexKey, UINT32 flags, FutexNode *node)
{
node->key = futexKey;//哈希key
node->index = OsFutexKeyToIndex(futexKey, flags);//哈希桶索引
node->pid = (flags & FUTEX_PRIVATE) ? LOS_GetCurrProcessID() : OS_INVALID;//获取进程ID,共享快锁时 快锁节点没有进程ID
node->key = futexKey;
node->index = OsFutexKeyToIndex(futexKey, flags);
node->pid = (flags & FUTEX_PRIVATE) ? LOS_GetCurrProcessID() : OS_INVALID;
}
//析构参数节点
STATIC INLINE VOID OsFutexDeinitFutexNode(FutexNode *node)
{
node->index = OS_INVALID_VALUE;
node->pid = 0;
LOS_ListDelete(&node->queueList);
}
/// 新旧两个节点交换 futexList 位置
STATIC INLINE VOID OsFutexReplaceQueueListHeadNode(FutexNode *oldHeadNode, FutexNode *newHeadNode)
{
LOS_DL_LIST *futexList = oldHeadNode->futexList.pstPrev;
LOS_ListDelete(&oldHeadNode->futexList);//将旧节点从futexList链表上摘除
LOS_ListHeadInsert(futexList, &newHeadNode->futexList);//将新节点从头部插入futexList链表
if ((newHeadNode->queueList.pstNext == NULL) || (newHeadNode->queueList.pstPrev == NULL)) {//新节点前后没有等待这把锁的任务
LOS_ListInit(&newHeadNode->queueList);//初始化等锁任务链表
LOS_ListDelete(&oldHeadNode->futexList);
LOS_ListHeadInsert(futexList, &newHeadNode->futexList);
if ((newHeadNode->queueList.pstNext == NULL) || (newHeadNode->queueList.pstPrev == NULL)) {
LOS_ListInit(&newHeadNode->queueList);
}
}
/// 将参数节点从futexList上摘除
STATIC INLINE VOID OsFutexDeleteKeyFromFutexList(FutexNode *node)
{
LOS_ListDelete(&node->futexList);
}
/// 从哈希桶中删除快锁节点
STATIC VOID OsFutexDeleteKeyNodeFromHash(FutexNode *node, BOOL isDeleteHead, FutexNode **headNode, BOOL *queueFlags)
{
FutexNode *nextNode = NULL;
@ -219,8 +217,8 @@ STATIC VOID OsFutexDeleteKeyNodeFromHash(FutexNode *node, BOOL isDeleteHead, Fut
return;
}
if (LOS_ListEmpty(&node->queueList)) {//如果没有任务在等锁
OsFutexDeleteKeyFromFutexList(node);//从快锁链表上摘除
if (LOS_ListEmpty(&node->queueList)) {
OsFutexDeleteKeyFromFutexList(node);
if (queueFlags != NULL) {
*queueFlags = TRUE;
}
@ -228,10 +226,10 @@ STATIC VOID OsFutexDeleteKeyNodeFromHash(FutexNode *node, BOOL isDeleteHead, Fut
}
/* FutexList is not NULL, but the header node of queueList */
if (node->futexList.pstNext != NULL) {//是头节点
if (isDeleteHead == TRUE) {//是否要删除头节点
nextNode = OS_FUTEX_FROM_QUEUELIST(LOS_DL_LIST_FIRST(&node->queueList));//取出第一个快锁节点
OsFutexReplaceQueueListHeadNode(node, nextNode);//两个节点交换位置
if (node->futexList.pstNext != NULL) {
if (isDeleteHead == TRUE) {
nextNode = OS_FUTEX_FROM_QUEUELIST(LOS_DL_LIST_FIRST(&node->queueList));
OsFutexReplaceQueueListHeadNode(node, nextNode);
if (headNode != NULL) {
*headNode = nextNode;
}
@ -244,22 +242,22 @@ EXIT:
OsFutexDeinitFutexNode(node);
return;
}
/// 从哈希桶上删除快锁
VOID OsFutexNodeDeleteFromFutexHash(FutexNode *node, BOOL isDeleteHead, FutexNode **headNode, BOOL *queueFlags)
{
FutexHash *hashNode = NULL;
//通过key找到桶号
UINT32 index = OsFutexKeyToIndex(node->key, (node->pid == OS_INVALID) ? 0 : FUTEX_PRIVATE);
if (index >= FUTEX_INDEX_MAX) {
return;
}
hashNode = &g_futexHash[index];//找到hash桶
hashNode = &g_futexHash[index];
if (OsMuxLockUnsafe(&hashNode->listLock, LOS_WAIT_FOREVER)) {
return;
}
if (node->index != index) {//快锁节点桶号需和哈希桶号一致
if (node->index != index) {
goto EXIT;
}
@ -272,6 +270,7 @@ EXIT:
return;
}
STATIC FutexNode *OsFutexDeleteAlreadyWakeTaskAndGetNext(const FutexNode *node, FutexNode **headNode, BOOL isDeleteHead)
{
FutexNode *tempNode = (FutexNode *)node;
@ -293,7 +292,7 @@ STATIC FutexNode *OsFutexDeleteAlreadyWakeTaskAndGetNext(const FutexNode *node,
return tempNode;
}
/// 插入一把新Futex锁到哈希桶中,只有是新的key时才会插入,因为其实存在多个FutexNode是一个key
STATIC VOID OsFutexInsertNewFutexKeyToHash(FutexNode *node)
{
FutexNode *headNode = NULL;
@ -323,16 +322,16 @@ STATIC VOID OsFutexInsertNewFutexKeyToHash(FutexNode *node)
futexList != &(hashNode->lockList);
futexList = futexList->pstNext) {
headNode = OS_FUTEX_FROM_FUTEXLIST(futexList);
if (node->key <= headNode->key) {
if (node->key <= headNode->key) {
LOS_ListTailInsert(&(headNode->futexList), &(node->futexList));
break;
}
}
EXIT:
return;
}
STATIC INT32 OsFutexInsertFindFormBackToFront(LOS_DL_LIST *queueList, const LosTaskCB *runTask, FutexNode *node)
{
LOS_DL_LIST *listHead = queueList;
@ -408,54 +407,55 @@ STATIC INT32 OsFutexRecycleAndFindHeadNode(FutexNode *headNode, FutexNode *node,
return LOS_OK;
}
///< 将快锁挂到任务的阻塞链表上
STATIC INT32 OsFutexInsertTasktoPendList(FutexNode **firstNode, FutexNode *node, const LosTaskCB *run)
{
LosTaskCB *taskHead = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&((*firstNode)->pendList)));//获取阻塞链表首个任务
LosTaskCB *taskHead = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&((*firstNode)->pendList)));
LOS_DL_LIST *queueList = &((*firstNode)->queueList);
INT32 ret1 = OsSchedParamCompare(run, taskHead);
if (ret1 < 0) {
/* The one with the highest priority is inserted at the top of the queue */
LOS_ListTailInsert(queueList, &(node->queueList));//查到queueList的尾部
OsFutexReplaceQueueListHeadNode(*firstNode, node);//同时交换futexList链表上的位置
LOS_ListTailInsert(queueList, &(node->queueList));
OsFutexReplaceQueueListHeadNode(*firstNode, node);
*firstNode = node;
return LOS_OK;
}
//如果等锁链表上没有任务或者当前任务大于链表首个任务
if (LOS_ListEmpty(queueList) && (ret1 >= 0)) {
/* Insert the next position in the queue with equal priority */
LOS_ListHeadInsert(queueList, &(node->queueList));//从头部插入当前任务,当前任务是要被挂起的
LOS_ListHeadInsert(queueList, &(node->queueList));
return LOS_OK;
}
FutexNode *tailNode = OS_FUTEX_FROM_QUEUELIST(LOS_DL_LIST_LAST(queueList));//获取尾部节点
LosTaskCB *taskTail = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(tailNode->pendList)));//获取阻塞任务的最后一个
FutexNode *tailNode = OS_FUTEX_FROM_QUEUELIST(LOS_DL_LIST_LAST(queueList));
LosTaskCB *taskTail = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(tailNode->pendList)));
INT32 ret2 = OsSchedParamCompare(taskTail, run);
if ((ret2 <= 0) || (ret1 > ret2)) {
return OsFutexInsertFindFormBackToFront(queueList, run, node);//从后往前插入
return OsFutexInsertFindFormBackToFront(queueList, run, node);
}
return OsFutexInsertFindFromFrontToBack(queueList, run, node);//否则从前往后插入
return OsFutexInsertFindFromFrontToBack(queueList, run, node);
}
/// 由指定快锁找到对应哈希桶
STATIC FutexNode *OsFindFutexNode(const FutexNode *node)
{
FutexHash *hashNode = &g_futexHash[node->index];//先找到所在哈希桶
FutexHash *hashNode = &g_futexHash[node->index];
LOS_DL_LIST *futexList = &(hashNode->lockList);
FutexNode *headNode = NULL;
for (futexList = futexList->pstNext;
futexList != &(hashNode->lockList);//判断循环结束条件,相等时说明跑完一轮了
futexList != &(hashNode->lockList);
futexList = futexList->pstNext) {
headNode = OS_FUTEX_FROM_FUTEXLIST(futexList);//拿到快锁节点实体
if ((headNode->key == node->key) && (headNode->pid == node->pid)) {//已经存在这个节点,注意这里的比较
return headNode;//是key和pid 一起比较,因为只有这样才能确定唯一性
headNode = OS_FUTEX_FROM_FUTEXLIST(futexList);
if ((headNode->key == node->key) && (headNode->pid == node->pid)) {
return headNode;
}
}
return NULL;
}
///< 查找快锁并插入哈希桶中
STATIC INT32 OsFindAndInsertToHash(FutexNode *node)
{
FutexNode *headNode = NULL;
@ -464,7 +464,7 @@ STATIC INT32 OsFindAndInsertToHash(FutexNode *node)
INT32 ret;
headNode = OsFindFutexNode(node);
if (headNode == NULL) {//没有找到,说明这是一把新锁
if (headNode == NULL) {
OsFutexInsertNewFutexKeyToHash(node);
LOS_ListInit(&(node->queueList));
return LOS_OK;
@ -483,14 +483,14 @@ STATIC INT32 OsFindAndInsertToHash(FutexNode *node)
return ret;
}
/// 共享内存检查
STATIC INT32 OsFutexKeyShmPermCheck(const UINT32 *userVaddr, const UINT32 flags)
{
PADDR_T paddr;
/* Check whether the futexKey is a shared lock */
if (!(flags & FUTEX_PRIVATE)) {//非私有快锁
paddr = (UINTPTR)LOS_PaddrQuery((UINT32 *)userVaddr);//能否查询到物理地址
if (!(flags & FUTEX_PRIVATE)) {
paddr = (UINTPTR)LOS_PaddrQuery((UINT32 *)userVaddr);
if (paddr == 0) return LOS_NOK;
}
@ -549,13 +549,13 @@ STATIC INT32 OsFutexDeleteTimeoutTaskNode(FutexHash *hashNode, FutexNode *node)
}
return LOS_ETIMEDOUT;
}
/// 将快锁节点插入任务
STATIC INT32 OsFutexInsertTaskToHash(LosTaskCB **taskCB, FutexNode **node, const UINTPTR futexKey, const UINT32 flags)
{
INT32 ret;
*taskCB = OsCurrTaskGet(); //获取当前任务
*node = &((*taskCB)->futex); //获取当前任务的快锁节点
OsFutexSetKey(futexKey, flags, *node);//设置参数 key index pid
*taskCB = OsCurrTaskGet();
*node = &((*taskCB)->futex);
OsFutexSetKey(futexKey, flags, *node);
ret = OsFindAndInsertToHash(*node);
if (ret) {
@ -565,33 +565,33 @@ STATIC INT32 OsFutexInsertTaskToHash(LosTaskCB **taskCB, FutexNode **node, const
LOS_ListInit(&((*node)->pendList));
return LOS_OK;
}
/// 将当前任务挂入等待链表中
STATIC INT32 OsFutexWaitTask(const UINT32 *userVaddr, const UINT32 flags, const UINT32 val, const UINT32 timeout)
{
INT32 futexRet;
UINT32 intSave, lockVal;
LosTaskCB *taskCB = NULL;
FutexNode *node = NULL;
UINTPTR futexKey = OsFutexFlagsToKey(userVaddr, flags);//通过地址和flags 找到 key
UINT32 index = OsFutexKeyToIndex(futexKey, flags);//通过key找到哈希桶
UINTPTR futexKey = OsFutexFlagsToKey(userVaddr, flags);
UINT32 index = OsFutexKeyToIndex(futexKey, flags);
FutexHash *hashNode = &g_futexHash[index];
if (OsFutexLock(&hashNode->listLock)) {//操作快锁节点链表前先上互斥锁
if (OsFutexLock(&hashNode->listLock)) {
return LOS_EINVAL;
}
//userVaddr必须是用户空间虚拟地址
if (LOS_ArchCopyFromUser(&lockVal, userVaddr, sizeof(UINT32))) {//将值拷贝到内核空间
if (LOS_ArchCopyFromUser(&lockVal, userVaddr, sizeof(UINT32))) {
PRINT_ERR("Futex wait param check failed! copy from user failed!\n");
futexRet = LOS_EINVAL;
goto EXIT_ERR;
}
if (lockVal != val) {//对参数内部逻辑检查
if (lockVal != val) {
futexRet = LOS_EBADF;
goto EXIT_ERR;
}
//注意第二个参数 FutexNode *node = NULL
if (OsFutexInsertTaskToHash(&taskCB, &node, futexKey, flags)) {// node = taskCB->futex
if (OsFutexInsertTaskToHash(&taskCB, &node, futexKey, flags)) {
futexRet = LOS_NOK;
goto EXIT_ERR;
}
@ -602,7 +602,7 @@ STATIC INT32 OsFutexWaitTask(const UINT32 *userVaddr, const UINT32 flags, const
taskCB->ops->wait(taskCB, &(node->pendList), timeout);
LOS_SpinUnlock(&g_taskSpin);
futexRet = OsFutexUnlock(&hashNode->listLock);//
futexRet = OsFutexUnlock(&hashNode->listLock);
if (futexRet) {
OsSchedUnlock();
LOS_IntRestore(intSave);
@ -632,21 +632,21 @@ EXIT_ERR:
EXIT_UNLOCK_ERR:
return futexRet;
}
/// 设置线程等待 | 向Futex表中插入代表被阻塞的线程的node
INT32 OsFutexWait(const UINT32 *userVaddr, UINT32 flags, UINT32 val, UINT32 absTime)
{
INT32 ret;
UINT32 timeout = LOS_WAIT_FOREVER;
ret = OsFutexWaitParamCheck(userVaddr, flags, absTime);//参数检查
ret = OsFutexWaitParamCheck(userVaddr, flags, absTime);
if (ret) {
return ret;
}
if (absTime != LOS_WAIT_FOREVER) {//转换时间 , 内核的时间单位是 tick
timeout = OsNS2Tick((UINT64)absTime * OS_SYS_NS_PER_US); //转成 tick
if (absTime != LOS_WAIT_FOREVER) {
timeout = OsNS2Tick((UINT64)absTime * OS_SYS_NS_PER_US);
}
return OsFutexWaitTask(userVaddr, flags, val, timeout);//将任务挂起 timeOut 时长
return OsFutexWaitTask(userVaddr, flags, val, timeout);
}
STATIC INT32 OsFutexWakeParamCheck(const UINT32 *userVaddr, UINT32 flags)
@ -657,12 +657,12 @@ STATIC INT32 OsFutexWakeParamCheck(const UINT32 *userVaddr, UINT32 flags)
PRINT_ERR("Futex wake param check failed! error flags: 0x%x\n", flags);
return LOS_EINVAL;
}
//地址必须在用户空间
if ((vaddr % sizeof(INT32)) || (vaddr < OS_FUTEX_KEY_BASE) || (vaddr >= OS_FUTEX_KEY_MAX)) {
PRINT_ERR("Futex wake param check failed! error userVaddr: 0x%x\n", userVaddr);
return LOS_EINVAL;
}
//必须得是个共享内存地址
if (flags && (OsFutexKeyShmPermCheck(userVaddr, flags) != LOS_OK)) {
PRINT_ERR("Futex wake param check failed! error shared memory perm userVaddr: 0x%x\n", userVaddr);
return LOS_EINVAL;
@ -672,8 +672,7 @@ STATIC INT32 OsFutexWakeParamCheck(const UINT32 *userVaddr, UINT32 flags)
}
/* Check to see if the task to be awakened has timed out
* if time out, to weak next pend task.
* | ,
* if time out, to weak next pend task.
*/
STATIC VOID OsFutexCheckAndWakePendTask(FutexNode *headNode, const INT32 wakeNumber,
FutexHash *hashNode, FutexNode **nextNode, BOOL *wakeAny)
@ -708,7 +707,6 @@ STATIC VOID OsFutexCheckAndWakePendTask(FutexNode *headNode, const INT32 wakeNum
}
return;
}
STATIC INT32 OsFutexWakeTask(UINTPTR futexKey, UINT32 flags, INT32 wakeNumber, FutexNode **newHeadNode, BOOL *wakeAny)
{
@ -717,13 +715,13 @@ STATIC INT32 OsFutexWakeTask(UINTPTR futexKey, UINT32 flags, INT32 wakeNumber, F
FutexNode *headNode = NULL;
UINT32 index = OsFutexKeyToIndex(futexKey, flags);
FutexHash *hashNode = &g_futexHash[index];
FutexNode tempNode = { //先组成一个临时快锁节点,目的是为了找到哈希桶中是否有这个节点
FutexNode tempNode = {
.key = futexKey,
.index = index,
.pid = (flags & FUTEX_PRIVATE) ? LOS_GetCurrProcessID() : OS_INVALID,
};
node = OsFindFutexNode(&tempNode);//找快锁节点
node = OsFindFutexNode(&tempNode);
if (node == NULL) {
return LOS_EBADF;
}
@ -731,7 +729,7 @@ STATIC INT32 OsFutexWakeTask(UINTPTR futexKey, UINT32 flags, INT32 wakeNumber, F
headNode = node;
SCHEDULER_LOCK(intSave);
OsFutexCheckAndWakePendTask(headNode, wakeNumber, hashNode, newHeadNode, wakeAny);//再找到等这把锁的唤醒指向数量的任务
OsFutexCheckAndWakePendTask(headNode, wakeNumber, hashNode, newHeadNode, wakeAny);
if ((*newHeadNode) != NULL) {
OsFutexReplaceQueueListHeadNode(headNode, *newHeadNode);
OsFutexDeinitFutexNode(headNode);
@ -743,7 +741,7 @@ STATIC INT32 OsFutexWakeTask(UINTPTR futexKey, UINT32 flags, INT32 wakeNumber, F
return LOS_OK;
}
/// 唤醒一个被指定锁阻塞的线程
INT32 OsFutexWake(const UINT32 *userVaddr, UINT32 flags, INT32 wakeNumber)
{
INT32 ret, futexRet;
@ -752,11 +750,11 @@ INT32 OsFutexWake(const UINT32 *userVaddr, UINT32 flags, INT32 wakeNumber)
FutexHash *hashNode = NULL;
FutexNode *headNode = NULL;
BOOL wakeAny = FALSE;
//1.检查参数
if (OsFutexWakeParamCheck(userVaddr, flags)) {
return LOS_EINVAL;
}
//2.找到指定用户空间地址对应的桶
futexKey = OsFutexFlagsToKey(userVaddr, flags);
index = OsFutexKeyToIndex(futexKey, flags);
@ -764,7 +762,7 @@ INT32 OsFutexWake(const UINT32 *userVaddr, UINT32 flags, INT32 wakeNumber)
if (OsFutexLock(&hashNode->listLock)) {
return LOS_EINVAL;
}
//3.换起等待该锁的进程
ret = OsFutexWakeTask(futexKey, flags, wakeNumber, &headNode, &wakeAny);
if (ret) {
goto EXIT_ERR;
@ -778,7 +776,7 @@ INT32 OsFutexWake(const UINT32 *userVaddr, UINT32 flags, INT32 wakeNumber)
if (futexRet) {
goto EXIT_UNLOCK_ERR;
}
//4.根据指定参数决定是否发起调度
if (wakeAny == TRUE) {
LOS_MpSchedule(OS_MP_CPU_ALL);
LOS_Schedule();
@ -887,7 +885,7 @@ STATIC VOID OsFutexRequeueSplitTwoLists(FutexHash *oldHashNode, FutexNode *oldHe
tailNode->queueList.pstNext = &newHeadNode->queueList;
return;
}
/// 删除旧key并获取头节点
STATIC FutexNode *OsFutexRequeueRemoveOldKeyAndGetHead(UINTPTR oldFutexKey, UINT32 flags, INT32 wakeNumber,
UINTPTR newFutexKey, INT32 requeueCount, BOOL *wakeAny)
{
@ -923,7 +921,7 @@ STATIC FutexNode *OsFutexRequeueRemoveOldKeyAndGetHead(UINTPTR oldFutexKey, UINT
return oldHeadNode;
}
/// 检查锁在Futex表中的状态
STATIC INT32 OsFutexRequeueParamCheck(const UINT32 *oldUserVaddr, UINT32 flags, const UINT32 *newUserVaddr)
{
VADDR_T oldVaddr = (VADDR_T)(UINTPTR)oldUserVaddr;
@ -932,12 +930,12 @@ STATIC INT32 OsFutexRequeueParamCheck(const UINT32 *oldUserVaddr, UINT32 flags,
if (oldVaddr == newVaddr) {
return LOS_EINVAL;
}
//检查标记
if ((flags & (~FUTEX_PRIVATE)) != FUTEX_REQUEUE) {
PRINT_ERR("Futex requeue param check failed! error flags: 0x%x\n", flags);
return LOS_EINVAL;
}
//检查地址范围,必须在用户空间
if ((oldVaddr % sizeof(INT32)) || (oldVaddr < OS_FUTEX_KEY_BASE) || (oldVaddr >= OS_FUTEX_KEY_MAX)) {
PRINT_ERR("Futex requeue param check failed! error old userVaddr: 0x%x\n", oldUserVaddr);
return LOS_EINVAL;
@ -950,7 +948,7 @@ STATIC INT32 OsFutexRequeueParamCheck(const UINT32 *oldUserVaddr, UINT32 flags,
return LOS_OK;
}
/// 调整指定锁在Futex表中的位置
INT32 OsFutexRequeue(const UINT32 *userVaddr, UINT32 flags, INT32 wakeNumber, INT32 count, const UINT32 *newUserVaddr)
{
INT32 ret;
@ -967,12 +965,12 @@ INT32 OsFutexRequeue(const UINT32 *userVaddr, UINT32 flags, INT32 wakeNumber, IN
return LOS_EINVAL;
}
oldFutexKey = OsFutexFlagsToKey(userVaddr, flags);//先拿key
oldFutexKey = OsFutexFlagsToKey(userVaddr, flags);
newFutexKey = OsFutexFlagsToKey(newUserVaddr, flags);
oldIndex = OsFutexKeyToIndex(oldFutexKey, flags);//再拿所在哈希桶位置,共有80个哈希桶
oldIndex = OsFutexKeyToIndex(oldFutexKey, flags);
newIndex = OsFutexKeyToIndex(newFutexKey, flags);
oldHashNode = &g_futexHash[oldIndex];//拿到对应哈希桶实体
oldHashNode = &g_futexHash[oldIndex];
if (OsFutexLock(&oldHashNode->listLock)) {
return LOS_EINVAL;
}

@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
* Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@ -40,19 +40,19 @@
#ifdef LOSCFG_BASE_IPC_MUX
#define MUTEXATTR_TYPE_MASK 0x0FU
///互斥属性初始化
LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrInit(LosMuxAttr *attr)
{
if (attr == NULL) {
return LOS_EINVAL;
}
attr->protocol = LOS_MUX_PRIO_INHERIT; //协议默认用继承方式, A(4)task等B(19)释放锁时,B的调度优先级直接升到(4)
attr->prioceiling = OS_TASK_PRIORITY_LOWEST;//最低优先级
attr->type = LOS_MUX_DEFAULT; //默认 LOS_MUX_RECURSIVE
attr->protocol = LOS_MUX_PRIO_INHERIT;
attr->prioceiling = OS_TASK_PRIORITY_LOWEST;
attr->type = LOS_MUX_DEFAULT;
return LOS_OK;
}
/// ????? 销毁互斥属 ,这里啥也没干呀
LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrDestroy(LosMuxAttr *attr)
{
if (attr == NULL) {
@ -61,7 +61,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrDestroy(LosMuxAttr *attr)
return LOS_OK;
}
///获取互斥锁的类型属性,由outType接走,不送!
LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrGetType(const LosMuxAttr *attr, INT32 *outType)
{
INT32 type;
@ -79,7 +79,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrGetType(const LosMuxAttr *attr, INT32 *outTyp
return LOS_OK;
}
///设置互斥锁的类型属性
LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrSetType(LosMuxAttr *attr, INT32 type)
{
if ((attr == NULL) || (type < LOS_MUX_NORMAL) || (type > LOS_MUX_ERRORCHECK)) {
@ -89,7 +89,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrSetType(LosMuxAttr *attr, INT32 type)
attr->type = (UINT8)((attr->type & ~MUTEXATTR_TYPE_MASK) | (UINT32)type);
return LOS_OK;
}
///获取互斥锁的类型属性
LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrGetProtocol(const LosMuxAttr *attr, INT32 *protocol)
{
if ((attr != NULL) && (protocol != NULL)) {
@ -100,7 +100,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrGetProtocol(const LosMuxAttr *attr, INT32 *pr
return LOS_OK;
}
///设置互斥锁属性的协议
LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrSetProtocol(LosMuxAttr *attr, INT32 protocol)
{
if (attr == NULL) {
@ -117,7 +117,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrSetProtocol(LosMuxAttr *attr, INT32 protocol)
return LOS_EINVAL;
}
}
///获取互斥锁属性优先级
LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrGetPrioceiling(const LosMuxAttr *attr, INT32 *prioceiling)
{
if (attr == NULL) {
@ -130,7 +130,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrGetPrioceiling(const LosMuxAttr *attr, INT32
return LOS_OK;
}
///设置互斥锁属性的优先级的上限
LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrSetPrioceiling(LosMuxAttr *attr, INT32 prioceiling)
{
if ((attr == NULL) ||
@ -143,7 +143,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrSetPrioceiling(LosMuxAttr *attr, INT32 prioce
return LOS_OK;
}
///设置互斥锁的优先级的上限,老优先级由oldPrioceiling带走
LITE_OS_SEC_TEXT UINT32 LOS_MuxSetPrioceiling(LosMux *mutex, INT32 prioceiling, INT32 *oldPrioceiling)
{
INT32 ret;
@ -172,7 +172,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxSetPrioceiling(LosMux *mutex, INT32 prioceiling,
return ret;
}
///获取互斥锁的优先级的上限
LITE_OS_SEC_TEXT UINT32 LOS_MuxGetPrioceiling(const LosMux *mutex, INT32 *prioceiling)
{
if ((mutex != NULL) && (prioceiling != NULL) && (mutex->magic == OS_MUX_MAGIC)) {
@ -182,7 +182,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxGetPrioceiling(const LosMux *mutex, INT32 *prioce
return LOS_EINVAL;
}
///互斥锁是否有效
LITE_OS_SEC_TEXT BOOL LOS_MuxIsValid(const LosMux *mutex)
{
if ((mutex != NULL) && (mutex->magic == OS_MUX_MAGIC)) {
@ -191,7 +191,7 @@ LITE_OS_SEC_TEXT BOOL LOS_MuxIsValid(const LosMux *mutex)
return FALSE;
}
///检查互斥锁属性是否OK,否则 no ok :|)
STATIC UINT32 OsCheckMutexAttr(const LosMuxAttr *attr)
{
if (((INT8)(attr->type) < LOS_MUX_NORMAL) || (attr->type > LOS_MUX_ERRORCHECK)) {
@ -205,7 +205,7 @@ STATIC UINT32 OsCheckMutexAttr(const LosMuxAttr *attr)
}
return LOS_OK;
}
/// 初始化互斥锁
LITE_OS_SEC_TEXT UINT32 LOS_MuxInit(LosMux *mutex, const LosMuxAttr *attr)
{
UINT32 intSave;
@ -215,24 +215,24 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxInit(LosMux *mutex, const LosMuxAttr *attr)
}
if (attr == NULL) {
(VOID)LOS_MuxAttrInit(&mutex->attr);//属性初始化
(VOID)LOS_MuxAttrInit(&mutex->attr);
} else {
(VOID)memcpy_s(&mutex->attr, sizeof(LosMuxAttr), attr, sizeof(LosMuxAttr));//把attr 拷贝到 mutex->attr
(VOID)memcpy_s(&mutex->attr, sizeof(LosMuxAttr), attr, sizeof(LosMuxAttr));
}
if (OsCheckMutexAttr(&mutex->attr) != LOS_OK) {//检查属性
if (OsCheckMutexAttr(&mutex->attr) != LOS_OK) {
return LOS_EINVAL;
}
SCHEDULER_LOCK(intSave); //拿到调度自旋锁
mutex->muxCount = 0; //锁定互斥量的次数
mutex->owner = NULL; //谁持有该锁
LOS_ListInit(&mutex->muxList); //互斥量双循环链表
mutex->magic = OS_MUX_MAGIC; //固定标识,互斥锁的魔法数字
SCHEDULER_UNLOCK(intSave); //释放调度自旋锁
SCHEDULER_LOCK(intSave);
mutex->muxCount = 0;
mutex->owner = NULL;
LOS_ListInit(&mutex->muxList);
mutex->magic = OS_MUX_MAGIC;
SCHEDULER_UNLOCK(intSave);
return LOS_OK;
}
///销毁互斥锁
LITE_OS_SEC_TEXT UINT32 LOS_MuxDestroy(LosMux *mutex)
{
UINT32 intSave;
@ -241,22 +241,22 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxDestroy(LosMux *mutex)
return LOS_EINVAL;
}
SCHEDULER_LOCK(intSave); //保存调度自旋锁
SCHEDULER_LOCK(intSave);
if (mutex->magic != OS_MUX_MAGIC) {
SCHEDULER_UNLOCK(intSave);//释放调度自旋锁
SCHEDULER_UNLOCK(intSave);
return LOS_EBADF;
}
if (mutex->muxCount != 0) {
SCHEDULER_UNLOCK(intSave);//释放调度自旋锁
SCHEDULER_UNLOCK(intSave);
return LOS_EBUSY;
}
(VOID)memset_s(mutex, sizeof(LosMux), 0, sizeof(LosMux));//很简单,全部清0处理.
SCHEDULER_UNLOCK(intSave); //释放调度自旋锁
(VOID)memset_s(mutex, sizeof(LosMux), 0, sizeof(LosMux));
SCHEDULER_UNLOCK(intSave);
return LOS_OK;
}
///设置互斥锁位图
STATIC VOID OsMuxBitmapSet(const LosMux *mutex, const LosTaskCB *runTask)
{
if (mutex->attr.protocol != LOS_MUX_PRIO_INHERIT) {
@ -271,7 +271,7 @@ STATIC VOID OsMuxBitmapSet(const LosMux *mutex, const LosTaskCB *runTask)
owner->ops->priorityInheritance(owner, &param);
}
}
///恢复互斥锁位图
VOID OsMuxBitmapRestore(const LosMux *mutex, const LOS_DL_LIST *list, const LosTaskCB *runTask)
{
if (mutex->attr.protocol != LOS_MUX_PRIO_INHERIT) {
@ -284,21 +284,20 @@ VOID OsMuxBitmapRestore(const LosMux *mutex, const LOS_DL_LIST *list, const LosT
owner->ops->priorityRestore(owner, list, &param);
}
/// 最坏情况就是拿锁失败,让出CPU,变成阻塞任务,等别的任务释放锁后排到自己了接着执行.
STATIC UINT32 OsMuxPendOp(LosTaskCB *runTask, LosMux *mutex, UINT32 timeout)
{
UINT32 ret;
if ((mutex->muxList.pstPrev == NULL) || (mutex->muxList.pstNext == NULL)) {//列表为空时的处理
if ((mutex->muxList.pstPrev == NULL) || (mutex->muxList.pstNext == NULL)) {
/* This is for mutex macro initialization. */
mutex->muxCount = 0;//锁计数器清0
mutex->owner = NULL;//锁没有归属任务
LOS_ListInit(&mutex->muxList);//初始化锁的任务链表,后续申请这把锁任务都会挂上去
mutex->muxCount = 0;
mutex->owner = NULL;
LOS_ListInit(&mutex->muxList);
}
if (mutex->muxCount == 0) {//无task用锁时,肯定能拿到锁了.在里面返回
mutex->muxCount++; //互斥锁计数器加1
mutex->owner = (VOID *)runTask; //当前任务拿到锁
if (mutex->muxCount == 0) {
mutex->muxCount++;
mutex->owner = (VOID *)runTask;
LOS_ListTailInsert(&runTask->lockList, &mutex->holdList);
if (mutex->attr.protocol == LOS_MUX_PRIO_PROTECT) {
SchedParam param = { 0 };
@ -308,23 +307,23 @@ STATIC UINT32 OsMuxPendOp(LosTaskCB *runTask, LosMux *mutex, UINT32 timeout)
}
return LOS_OK;
}
//递归锁muxCount>0 如果是递归锁就要处理两种情况 1.runtask持有锁 2.锁被别的任务拿走了
if (((LosTaskCB *)mutex->owner == runTask) && (mutex->attr.type == LOS_MUX_RECURSIVE)) {//第一种情况 runtask是锁持有方
mutex->muxCount++; //递归锁计数器加1,递归锁的目的是防止死锁,鸿蒙默认用的就是递归锁(LOS_MUX_DEFAULT = LOS_MUX_RECURSIVE)
return LOS_OK; //成功退出
if (((LosTaskCB *)mutex->owner == runTask) && (mutex->attr.type == LOS_MUX_RECURSIVE)) {
mutex->muxCount++;
return LOS_OK;
}
//到了这里说明锁在别的任务那里,当前任务只能被阻塞了.
if (!timeout) {//参数timeout表示等待多久再来拿锁
return LOS_EINVAL;//timeout = 0表示不等了,没拿到锁就返回不纠结,返回错误.见于LOS_MuxTrylock
if (!timeout) {
return LOS_EINVAL;
}
//自己要被阻塞,只能申请调度,让出CPU core 让别的任务上
if (!OsPreemptableInSched()) {//不能申请调度 (不能调度的原因是因为没有持有调度任务自旋锁)
return LOS_EDEADLK;//返回错误,自旋锁被别的CPU core 持有
if (!OsPreemptableInSched()) {
return LOS_EDEADLK;
}
OsMuxBitmapSet(mutex, runTask);//设置锁位图,尽可能的提高锁持有任务的优先级
OsMuxBitmapSet(mutex, runTask);
runTask->taskMux = (VOID *)mutex; //记下当前任务在等待这把锁
runTask->taskMux = (VOID *)mutex;
LOS_DL_LIST *node = OsSchedLockPendFindPos(runTask, &mutex->muxList);
if (node == NULL) {
ret = LOS_NOK;
@ -333,10 +332,10 @@ STATIC UINT32 OsMuxPendOp(LosTaskCB *runTask, LosMux *mutex, UINT32 timeout)
OsTaskWaitSetPendMask(OS_TASK_WAIT_MUTEX, (UINTPTR)mutex, timeout);
ret = runTask->ops->wait(runTask, node, timeout);
if (ret == LOS_ERRNO_TSK_TIMEOUT) {//这行代码虽和OsTaskWait挨在一起,但要过很久才会执行到,因为在OsTaskWait中CPU切换了任务上下文
if (ret == LOS_ERRNO_TSK_TIMEOUT) {
OsMuxBitmapRestore(mutex, NULL, runTask);
runTask->taskMux = NULL;// 所以重新回到这里时可能已经超时了
ret = LOS_ETIMEDOUT;//返回超时
runTask->taskMux = NULL;
ret = LOS_ETIMEDOUT;
}
return ret;
@ -344,7 +343,7 @@ STATIC UINT32 OsMuxPendOp(LosTaskCB *runTask, LosMux *mutex, UINT32 timeout)
UINT32 OsMuxLockUnsafe(LosMux *mutex, UINT32 timeout)
{
LosTaskCB *runTask = OsCurrTaskGet();//获取当前任务
LosTaskCB *runTask = OsCurrTaskGet();
if (mutex->magic != OS_MUX_MAGIC) {
return LOS_EBADF;
@ -353,23 +352,23 @@ UINT32 OsMuxLockUnsafe(LosMux *mutex, UINT32 timeout)
if (OsCheckMutexAttr(&mutex->attr) != LOS_OK) {
return LOS_EINVAL;
}
//LOS_MUX_ERRORCHECK 时 muxCount是要等于0 ,当前任务持有锁就不能再lock了. 鸿蒙默认用的是递归锁LOS_MUX_RECURSIVE
if ((mutex->attr.type == LOS_MUX_ERRORCHECK) && (mutex->owner == (VOID *)runTask)) {
return LOS_EDEADLK;
}
return OsMuxPendOp(runTask, mutex, timeout);
}
/// 尝试加锁,
UINT32 OsMuxTrylockUnsafe(LosMux *mutex, UINT32 timeout)
{
LosTaskCB *runTask = OsCurrTaskGet();//获取当前任务
LosTaskCB *runTask = OsCurrTaskGet();
if (mutex->magic != OS_MUX_MAGIC) {//检查MAGIC有没有被改变
if (mutex->magic != OS_MUX_MAGIC) {
return LOS_EBADF;
}
if (OsCheckMutexAttr(&mutex->attr) != LOS_OK) {//检查互斥锁属性
if (OsCheckMutexAttr(&mutex->attr) != LOS_OK) {
return LOS_EINVAL;
}
@ -378,9 +377,9 @@ UINT32 OsMuxTrylockUnsafe(LosMux *mutex, UINT32 timeout)
return LOS_EBUSY;
}
return OsMuxPendOp(runTask, mutex, timeout);//当前任务去拿锁,拿不到就等timeout
return OsMuxPendOp(runTask, mutex, timeout);
}
/// 拿互斥锁,
LITE_OS_SEC_TEXT UINT32 LOS_MuxLock(LosMux *mutex, UINT32 timeout)
{
LosTaskCB *runTask = NULL;
@ -395,19 +394,19 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxLock(LosMux *mutex, UINT32 timeout)
return LOS_EINTR;
}
runTask = (LosTaskCB *)OsCurrTaskGet();//获取当前任务
runTask = (LosTaskCB *)OsCurrTaskGet();
/* DO NOT Call blocking API in system tasks */
if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {//不要在内核任务里用mux锁
if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {
PRINTK("Warning: DO NOT call %s in system tasks.\n", __FUNCTION__);
OsBackTrace();//打印task信息
OsBackTrace();
}
SCHEDULER_LOCK(intSave);//调度自旋锁
ret = OsMuxLockUnsafe(mutex, timeout);//如果任务没拿到锁,将进入阻塞队列一直等待,直到timeout或者持锁任务释放锁时唤醒它
SCHEDULER_LOCK(intSave);
ret = OsMuxLockUnsafe(mutex, timeout);
SCHEDULER_UNLOCK(intSave);
return ret;
}
///尝试要锁,没拿到也不等,直接返回,不纠结
LITE_OS_SEC_TEXT UINT32 LOS_MuxTrylock(LosMux *mutex)
{
LosTaskCB *runTask = NULL;
@ -422,40 +421,39 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxTrylock(LosMux *mutex)
return LOS_EINTR;
}
runTask = (LosTaskCB *)OsCurrTaskGet();//获取当前执行的任务
runTask = (LosTaskCB *)OsCurrTaskGet();
/* DO NOT Call blocking API in system tasks */
if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {//系统任务不能
if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {
PRINTK("Warning: DO NOT call %s in system tasks.\n", __FUNCTION__);
OsBackTrace();
}
SCHEDULER_LOCK(intSave);
ret = OsMuxTrylockUnsafe(mutex, 0);//timeout = 0,不等待,没拿到锁就算了
ret = OsMuxTrylockUnsafe(mutex, 0);
SCHEDULER_UNLOCK(intSave);
return ret;
}
STATIC UINT32 OsMuxPostOp(LosTaskCB *taskCB, LosMux *mutex, BOOL *needSched)
{
if (LOS_ListEmpty(&mutex->muxList)) {//如果互斥锁列表为空
LOS_ListDelete(&mutex->holdList);//把持有互斥锁的节点摘掉
if (LOS_ListEmpty(&mutex->muxList)) {
LOS_ListDelete(&mutex->holdList);
mutex->owner = NULL;
return LOS_OK;
}
LosTaskCB *resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(mutex->muxList)));//拿到等待互斥锁链表的第一个任务实体,接下来要唤醒任务
LosTaskCB *resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(mutex->muxList)));
OsMuxBitmapRestore(mutex, &mutex->muxList, resumedTask);
mutex->muxCount = 1;//互斥锁数量为1
mutex->owner = (VOID *)resumedTask;//互斥锁的持有人换了
LOS_ListDelete(&mutex->holdList);//自然要从等锁链表中把自己摘出去
LOS_ListTailInsert(&resumedTask->lockList, &mutex->holdList);//把锁挂到恢复任务的锁链表上,lockList是任务持有的所有锁记录
mutex->muxCount = 1;
mutex->owner = (VOID *)resumedTask;
LOS_ListDelete(&mutex->holdList);
LOS_ListTailInsert(&resumedTask->lockList, &mutex->holdList);
OsTaskWakeClearPendMask(resumedTask);
resumedTask->ops->wake(resumedTask);
resumedTask->taskMux = NULL;
if (needSched != NULL) {//如果不为空
*needSched = TRUE;//就走起再次调度流程
if (needSched != NULL) {
*needSched = TRUE;
}
return LOS_OK;
@ -478,21 +476,21 @@ UINT32 OsMuxUnlockUnsafe(LosTaskCB *taskCB, LosMux *mutex, BOOL *needSched)
if (mutex->muxCount == 0) {
return LOS_EPERM;
}
//注意 --mutex->muxCount 先执行了-- 操作.
if ((--mutex->muxCount != 0) && (mutex->attr.type == LOS_MUX_RECURSIVE)) {//属性类型为LOS_MUX_RECURSIVE时,muxCount是可以不为0的
if ((--mutex->muxCount != 0) && (mutex->attr.type == LOS_MUX_RECURSIVE)) {
return LOS_OK;
}
if (mutex->attr.protocol == LOS_MUX_PRIO_PROTECT) {//属性协议为保护时
if (mutex->attr.protocol == LOS_MUX_PRIO_PROTECT) {
SchedParam param = { 0 };
taskCB->ops->schedParamGet(taskCB, &param);
taskCB->ops->priorityRestore(taskCB, NULL, &param);
}
/* Whether a task block the mutex lock. *///任务是否阻塞互斥锁
return OsMuxPostOp(taskCB, mutex, needSched);//一个任务去唤醒另一个在等锁的任务
/* Whether a task block the mutex lock. */
return OsMuxPostOp(taskCB, mutex, needSched);
}
///释放锁
LITE_OS_SEC_TEXT UINT32 LOS_MuxUnlock(LosMux *mutex)
{
LosTaskCB *runTask = NULL;
@ -508,9 +506,9 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxUnlock(LosMux *mutex)
return LOS_EINTR;
}
runTask = (LosTaskCB *)OsCurrTaskGet();//获取当前任务
runTask = (LosTaskCB *)OsCurrTaskGet();
/* DO NOT Call blocking API in system tasks */
if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {//不能在系统任务里调用,因为很容易让系统任务发生死锁
if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {
PRINTK("Warning: DO NOT call %s in system tasks.\n", __FUNCTION__);
OsBackTrace();
}
@ -518,12 +516,12 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxUnlock(LosMux *mutex)
SCHEDULER_LOCK(intSave);
ret = OsMuxUnlockUnsafe(runTask, mutex, &needSched);
SCHEDULER_UNLOCK(intSave);
if (needSched == TRUE) {//需要调度的情况
LOS_MpSchedule(OS_MP_CPU_ALL);//向所有CPU发送调度指令
LOS_Schedule();//发起调度
if (needSched == TRUE) {
LOS_MpSchedule(OS_MP_CPU_ALL);
LOS_Schedule();
}
return ret;
}
#endif /* (LOSCFG_BASE_IPC_MUX == YES) */
#endif /* LOSCFG_BASE_IPC_MUX */

@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
* Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@ -47,8 +47,8 @@
#endif /* LOSCFG_BASE_IPC_QUEUE_LIMIT <= 0 */
#ifndef LOSCFG_IPC_CONTAINER
LITE_OS_SEC_BSS LosQueueCB *g_allQueue = NULL;///< 消息队列池
LITE_OS_SEC_BSS STATIC LOS_DL_LIST g_freeQueueList;///< 空闲队列链表,管分配的,需要队列从这里申请
LITE_OS_SEC_BSS LosQueueCB *g_allQueue = NULL;
LITE_OS_SEC_BSS STATIC LOS_DL_LIST g_freeQueueList;
#define FREE_QUEUE_LIST g_freeQueueList
#endif
@ -83,6 +83,10 @@ LITE_OS_SEC_TEXT_INIT LosQueueCB *OsAllQueueCBInit(LOS_DL_LIST *freeQueueList)
return allQueue;
}
/*
* Description : queue initial
* Return : LOS_OK on success or error code on failure
*/
LITE_OS_SEC_TEXT_INIT UINT32 OsQueueInit(VOID)
{
#ifndef LOSCFG_IPC_CONTAINER
@ -93,7 +97,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsQueueInit(VOID)
#endif
return LOS_OK;
}
///创建一个队列,根据用户传入队列长度和消息节点大小来开辟相应的内存空间以供该队列使用参数queueID带走队列ID
LITE_OS_SEC_TEXT_INIT UINT32 LOS_QueueCreate(CHAR *queueName, UINT16 len, UINT32 *queueID,
UINT32 flags, UINT16 maxMsgSize)
{
@ -110,7 +114,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_QueueCreate(CHAR *queueName, UINT16 len, UINT32
return LOS_ERRNO_QUEUE_CREAT_PTR_NULL;
}
if (maxMsgSize > (OS_NULL_SHORT - sizeof(UINT32))) {// maxMsgSize上限 为啥要减去 sizeof(UINT32) ,因为前面存的是队列的大小
if (maxMsgSize > (OS_NULL_SHORT - sizeof(UINT32))) {
return LOS_ERRNO_QUEUE_SIZE_TOO_BIG;
}
@ -118,83 +122,83 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_QueueCreate(CHAR *queueName, UINT16 len, UINT32
return LOS_ERRNO_QUEUE_PARA_ISZERO;
}
msgSize = maxMsgSize + sizeof(UINT32);//总size = 消息体内容长度 + 消息大小(UINT32)
msgSize = maxMsgSize + sizeof(UINT32);
/*
* Memory allocation is time-consuming, to shorten the time of disable interrupt,
* move the memory allocation to here.
*///内存分配非常耗时,为了缩短禁用中断的时间,将内存分配移到此处,用的时候分配队列内存
queue = (UINT8 *)LOS_MemAlloc(m_aucSysMem1, (UINT32)len * msgSize);//从系统内存池中分配,由这里提供读写队列的内存
if (queue == NULL) {//这里是一次把队列要用到的所有最大内存都申请下来了,能保证不会出现后续使用过程中内存不够的问题出现
return LOS_ERRNO_QUEUE_CREATE_NO_MEMORY;//调用处有 OsSwtmrInit sys_mbox_new DoMqueueCreate ==
*/
queue = (UINT8 *)LOS_MemAlloc(m_aucSysMem1, (UINT32)len * msgSize);
if (queue == NULL) {
return LOS_ERRNO_QUEUE_CREATE_NO_MEMORY;
}
SCHEDULER_LOCK(intSave);
if (LOS_ListEmpty(&FREE_QUEUE_LIST)) {//没有空余的队列ID的处理,注意软时钟定时器是由 g_swtmrCBArray统一管理的,里面有正在使用和可分配空闲的队列
SCHEDULER_UNLOCK(intSave);//g_freeQueueList是管理可用于分配的队列链表,申请消息队列的ID需要向它要
if (LOS_ListEmpty(&FREE_QUEUE_LIST)) {
SCHEDULER_UNLOCK(intSave);
OsQueueCheckHook();
(VOID)LOS_MemFree(m_aucSysMem1, queue);//没有就要释放 queue申请的内存
(VOID)LOS_MemFree(m_aucSysMem1, queue);
return LOS_ERRNO_QUEUE_CB_UNAVAILABLE;
}
unusedQueue = LOS_DL_LIST_FIRST(&FREE_QUEUE_LIST);//找到一个没有被使用的队列
LOS_ListDelete(unusedQueue);//将自己从g_freeQueueList中摘除, unusedQueue只是个 LOS_DL_LIST 结点.
queueCB = GET_QUEUE_LIST(unusedQueue);//通过unusedQueue找到整个消息队列(LosQueueCB)
queueCB->queueLen = len; //队列中消息的总个数,注意这个一旦创建是不能变的.
queueCB->queueSize = msgSize;//消息节点的大小,注意这个一旦创建也是不能变的.
queueCB->queueHandle = queue; //队列句柄,队列内容存储区.
queueCB->queueState = OS_QUEUE_INUSED; //队列状态使用中
queueCB->readWriteableCnt[OS_QUEUE_READ] = 0;//可读资源计数OS_QUEUE_READ(0):可读.
queueCB->readWriteableCnt[OS_QUEUE_WRITE] = len;//可些资源计数 OS_QUEUE_WRITE(1):可写, 默认len可写.
queueCB->queueHead = 0;//队列头节点
queueCB->queueTail = 0;//队列尾节点
LOS_ListInit(&queueCB->readWriteList[OS_QUEUE_READ]);//初始化可读队列任务链表
LOS_ListInit(&queueCB->readWriteList[OS_QUEUE_WRITE]);//初始化可写队列任务链表
LOS_ListInit(&queueCB->memList);//
OsQueueDbgUpdateHook(queueCB->queueID, OsCurrTaskGet()->taskEntry);//在创建或删除队列调试信息时更新任务条目
unusedQueue = LOS_DL_LIST_FIRST(&FREE_QUEUE_LIST);
LOS_ListDelete(unusedQueue);
queueCB = GET_QUEUE_LIST(unusedQueue);
queueCB->queueLen = len;
queueCB->queueSize = msgSize;
queueCB->queueHandle = queue;
queueCB->queueState = OS_QUEUE_INUSED;
queueCB->readWriteableCnt[OS_QUEUE_READ] = 0;
queueCB->readWriteableCnt[OS_QUEUE_WRITE] = len;
queueCB->queueHead = 0;
queueCB->queueTail = 0;
LOS_ListInit(&queueCB->readWriteList[OS_QUEUE_READ]);
LOS_ListInit(&queueCB->readWriteList[OS_QUEUE_WRITE]);
LOS_ListInit(&queueCB->memList);
OsQueueDbgUpdateHook(queueCB->queueID, OsCurrTaskGet()->taskEntry);
SCHEDULER_UNLOCK(intSave);
*queueID = queueCB->queueID;//带走队列ID
*queueID = queueCB->queueID;
OsHookCall(LOS_HOOK_TYPE_QUEUE_CREATE, queueCB);
return LOS_OK;
}
///读队列参数检查
STATIC LITE_OS_SEC_TEXT UINT32 OsQueueReadParameterCheck(UINT32 queueID, const VOID *bufferAddr,
const UINT32 *bufferSize, UINT32 timeout)
{
if (GET_QUEUE_INDEX(queueID) >= LOSCFG_BASE_IPC_QUEUE_LIMIT) {//队列ID不能超上限
if (GET_QUEUE_INDEX(queueID) >= LOSCFG_BASE_IPC_QUEUE_LIMIT) {
return LOS_ERRNO_QUEUE_INVALID;
}
if ((bufferAddr == NULL) || (bufferSize == NULL)) {//缓存地址和大小参数判断
if ((bufferAddr == NULL) || (bufferSize == NULL)) {
return LOS_ERRNO_QUEUE_READ_PTR_NULL;
}
if ((*bufferSize == 0) || (*bufferSize > (OS_NULL_SHORT - sizeof(UINT32)))) {//限制了读取数据的上限64K, sizeof(UINT32)代表的是队列的长度
return LOS_ERRNO_QUEUE_READSIZE_IS_INVALID; //所以要减去
if ((*bufferSize == 0) || (*bufferSize > (OS_NULL_SHORT - sizeof(UINT32)))) {
return LOS_ERRNO_QUEUE_READSIZE_IS_INVALID;
}
OsQueueDbgTimeUpdateHook(queueID);
if (timeout != LOS_NO_WAIT) {//等待一定时间再读取
if (OS_INT_ACTIVE) {//如果碰上了硬中断
return LOS_ERRNO_QUEUE_READ_IN_INTERRUPT;//意思是:硬中断发生时是不能读消息队列的
if (timeout != LOS_NO_WAIT) {
if (OS_INT_ACTIVE) {
return LOS_ERRNO_QUEUE_READ_IN_INTERRUPT;
}
}
return LOS_OK;
}
///写队列参数检查
STATIC LITE_OS_SEC_TEXT UINT32 OsQueueWriteParameterCheck(UINT32 queueID, const VOID *bufferAddr,
const UINT32 *bufferSize, UINT32 timeout)
{
if (GET_QUEUE_INDEX(queueID) >= LOSCFG_BASE_IPC_QUEUE_LIMIT) {//队列ID不能超上限
if (GET_QUEUE_INDEX(queueID) >= LOSCFG_BASE_IPC_QUEUE_LIMIT) {
return LOS_ERRNO_QUEUE_INVALID;
}
if (bufferAddr == NULL) {//没有数据源
if (bufferAddr == NULL) {
return LOS_ERRNO_QUEUE_WRITE_PTR_NULL;
}
if (*bufferSize == 0) {//这里没有限制写队列的大小,如果写入一个很大buf 会怎样?
if (*bufferSize == 0) {
return LOS_ERRNO_QUEUE_WRITESIZE_ISZERO;
}
@ -207,129 +211,128 @@ STATIC LITE_OS_SEC_TEXT UINT32 OsQueueWriteParameterCheck(UINT32 queueID, const
}
return LOS_OK;
}
///队列buf操作,注意队列数据是按顺序来读取的,要不从头,要不从尾部,不会出现从中间读写,所有可由 head 和 tail 来管理队列.
STATIC VOID OsQueueBufferOperate(LosQueueCB *queueCB, UINT32 operateType, VOID *bufferAddr, UINT32 *bufferSize)
{
UINT8 *queueNode = NULL;
UINT32 msgDataSize;
UINT16 queuePosition;
/* get the queue position | 先找到队列的位置*/
switch (OS_QUEUE_OPERATE_GET(operateType)) {//获取操作类型
case OS_QUEUE_READ_HEAD://从列队头开始读
queuePosition = queueCB->queueHead;//拿到头部位置
((queueCB->queueHead + 1) == queueCB->queueLen) ? (queueCB->queueHead = 0) : (queueCB->queueHead++);//调整队列头部位置
/* get the queue position */
switch (OS_QUEUE_OPERATE_GET(operateType)) {
case OS_QUEUE_READ_HEAD:
queuePosition = queueCB->queueHead;
((queueCB->queueHead + 1) == queueCB->queueLen) ? (queueCB->queueHead = 0) : (queueCB->queueHead++);
break;
case OS_QUEUE_WRITE_HEAD://从列队头开始写
(queueCB->queueHead == 0) ? (queueCB->queueHead = queueCB->queueLen - 1) : (--queueCB->queueHead);//调整队列头部位置
queuePosition = queueCB->queueHead;//拿到头部位置
case OS_QUEUE_WRITE_HEAD:
(queueCB->queueHead == 0) ? (queueCB->queueHead = queueCB->queueLen - 1) : (--queueCB->queueHead);
queuePosition = queueCB->queueHead;
break;
case OS_QUEUE_WRITE_TAIL://从列队尾部开始写
queuePosition = queueCB->queueTail;//设置队列位置为尾部位置
((queueCB->queueTail + 1) == queueCB->queueLen) ? (queueCB->queueTail = 0) : (queueCB->queueTail++);//调整队列尾部位置
case OS_QUEUE_WRITE_TAIL:
queuePosition = queueCB->queueTail;
((queueCB->queueTail + 1) == queueCB->queueLen) ? (queueCB->queueTail = 0) : (queueCB->queueTail++);
break;
default: /* read tail, reserved. */
PRINT_ERR("invalid queue operate type!\n");
return;
}
//queueHandle是create队列时,由外界参数申请的一块内存. 用于copy 使用
queueNode = &(queueCB->queueHandle[(queuePosition * (queueCB->queueSize))]);//拿到队列节点
if (OS_QUEUE_IS_READ(operateType)) {//读操作处理,读队列分两步走
queueNode = &(queueCB->queueHandle[(queuePosition * (queueCB->queueSize))]);
if (OS_QUEUE_IS_READ(operateType)) {
if (memcpy_s(&msgDataSize, sizeof(UINT32), queueNode + queueCB->queueSize - sizeof(UINT32),
sizeof(UINT32)) != EOK) {//1.先读出队列大小,由队列头四个字节表示
sizeof(UINT32)) != EOK) {
PRINT_ERR("get msgdatasize failed\n");
return;
}
msgDataSize = (*bufferSize < msgDataSize) ? *bufferSize : msgDataSize;
if (memcpy_s(bufferAddr, *bufferSize, queueNode, msgDataSize) != EOK) {//2.读表示读走已有数据,所以相当于bufferAddr接着了queueNode的数据
if (memcpy_s(bufferAddr, *bufferSize, queueNode, msgDataSize) != EOK) {
PRINT_ERR("copy message to buffer failed\n");
return;
}
*bufferSize = msgDataSize;//通过入参 带走消息的大小
} else {//只有读写两种操作,这里就是写队列了.写也分两步走 , @note_thinking 这里建议鸿蒙加上 OS_QUEUE_IS_WRITE 判断
if (memcpy_s(queueNode, queueCB->queueSize, bufferAddr, *bufferSize) != EOK) {//1.写入消息内容
PRINT_ERR("store message failed\n");//表示把外面数据写进来,所以相当于queueNode接着了bufferAddr的数据
*bufferSize = msgDataSize;
} else {
if (memcpy_s(queueNode, queueCB->queueSize, bufferAddr, *bufferSize) != EOK) {
PRINT_ERR("store message failed\n");
return;
}
if (memcpy_s(queueNode + queueCB->queueSize - sizeof(UINT32), sizeof(UINT32), bufferSize,
sizeof(UINT32)) != EOK) {//2.写入消息数据的长度,sizeof(UINT32)
sizeof(UINT32)) != EOK) {
PRINT_ERR("store message size failed\n");
return;
}
}
}
///队列操作参数检查
STATIC UINT32 OsQueueOperateParamCheck(const LosQueueCB *queueCB, UINT32 queueID,
UINT32 operateType, const UINT32 *bufferSize)
{
if ((queueCB->queueID != queueID) || (queueCB->queueState == OS_QUEUE_UNUSED)) {//队列ID和状态判断
if ((queueCB->queueID != queueID) || (queueCB->queueState == OS_QUEUE_UNUSED)) {
return LOS_ERRNO_QUEUE_NOT_CREATE;
}
if (OS_QUEUE_IS_WRITE(operateType) && (*bufferSize > (queueCB->queueSize - sizeof(UINT32)))) {//写时判断
return LOS_ERRNO_QUEUE_WRITE_SIZE_TOO_BIG;//塞进来的数据太大,大于队列节点能承受的范围
if (OS_QUEUE_IS_WRITE(operateType) && (*bufferSize > (queueCB->queueSize - sizeof(UINT32)))) {
return LOS_ERRNO_QUEUE_WRITE_SIZE_TOO_BIG;
}
return LOS_OK;
}
UINT32 OsQueueOperate(UINT32 queueID, UINT32 operateType, VOID *bufferAddr, UINT32 *bufferSize, UINT32 timeout)
{
UINT32 ret;
UINT32 readWrite = OS_QUEUE_READ_WRITE_GET(operateType);//获取读/写操作标识
UINT32 readWrite = OS_QUEUE_READ_WRITE_GET(operateType);
UINT32 intSave;
OsHookCall(LOS_HOOK_TYPE_QUEUE_READ, (LosQueueCB *)GET_QUEUE_HANDLE(queueID), operateType, *bufferSize, timeout);
SCHEDULER_LOCK(intSave);
LosQueueCB *queueCB = (LosQueueCB *)GET_QUEUE_HANDLE(queueID);//获取对应的队列控制块
ret = OsQueueOperateParamCheck(queueCB, queueID, operateType, bufferSize);//参数检查
LosQueueCB *queueCB = (LosQueueCB *)GET_QUEUE_HANDLE(queueID);
ret = OsQueueOperateParamCheck(queueCB, queueID, operateType, bufferSize);
if (ret != LOS_OK) {
goto QUEUE_END;
}
if (queueCB->readWriteableCnt[readWrite] == 0) {//根据readWriteableCnt判断队列是否有消息读/写
if (timeout == LOS_NO_WAIT) {//不等待直接退出
if (queueCB->readWriteableCnt[readWrite] == 0) {
if (timeout == LOS_NO_WAIT) {
ret = OS_QUEUE_IS_READ(operateType) ? LOS_ERRNO_QUEUE_ISEMPTY : LOS_ERRNO_QUEUE_ISFULL;
goto QUEUE_END;
}
if (!OsPreemptableInSched()) {//不支持抢占式调度
if (!OsPreemptableInSched()) {
ret = LOS_ERRNO_QUEUE_PEND_IN_LOCK;
goto QUEUE_END;
}
//任务等待,这里很重要啊,将自己从就绪列表摘除,让出了CPU并发起了调度,并挂在readWriteList[readWrite]上,挂的都等待读/写消息的task
LosTaskCB *runTask = OsCurrTaskGet();
OsTaskWaitSetPendMask(OS_TASK_WAIT_QUEUE, queueCB->queueID, timeout);
ret = runTask->ops->wait(runTask, &queueCB->readWriteList[readWrite], timeout);
if (ret == LOS_ERRNO_TSK_TIMEOUT) {//唤醒后如果超时了,返回读/写消息失败
if (ret == LOS_ERRNO_TSK_TIMEOUT) {
ret = LOS_ERRNO_QUEUE_TIMEOUT;
goto QUEUE_END;//
goto QUEUE_END;
}
} else {
queueCB->readWriteableCnt[readWrite]--;//对应队列中计数器--,说明一条消息只能被读/写一次
queueCB->readWriteableCnt[readWrite]--;
}
OsQueueBufferOperate(queueCB, operateType, bufferAddr, bufferSize);//发起读或写队列操作
OsQueueBufferOperate(queueCB, operateType, bufferAddr, bufferSize);
if (!LOS_ListEmpty(&queueCB->readWriteList[!readWrite])) {//如果还有任务在排着队等待读/写入消息(当时不能读/写的原因有可能当时队列满了==)
LosTaskCB *resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&queueCB->readWriteList[!readWrite]));//取出要读/写消息的任务
if (!LOS_ListEmpty(&queueCB->readWriteList[!readWrite])) {
LosTaskCB *resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&queueCB->readWriteList[!readWrite]));
OsTaskWakeClearPendMask(resumedTask);
resumedTask->ops->wake(resumedTask);
SCHEDULER_UNLOCK(intSave);
LOS_MpSchedule(OS_MP_CPU_ALL);//让所有CPU发出调度申请,因为很可能那个要读/写消息的队列是由其他CPU执行
LOS_Schedule();//申请调度
LOS_MpSchedule(OS_MP_CPU_ALL);
LOS_Schedule();
return LOS_OK;
} else {
queueCB->readWriteableCnt[!readWrite]++;//对应队列读/写中计数器++
queueCB->readWriteableCnt[!readWrite]++;
}
QUEUE_END:
SCHEDULER_UNLOCK(intSave);
return ret;
}
///接口函数定时读取消息队列
LITE_OS_SEC_TEXT UINT32 LOS_QueueReadCopy(UINT32 queueID,
VOID *bufferAddr,
UINT32 *bufferSize,
@ -338,15 +341,15 @@ LITE_OS_SEC_TEXT UINT32 LOS_QueueReadCopy(UINT32 queueID,
UINT32 ret;
UINT32 operateType;
ret = OsQueueReadParameterCheck(queueID, bufferAddr, bufferSize, timeout);//参数检查
ret = OsQueueReadParameterCheck(queueID, bufferAddr, bufferSize, timeout);
if (ret != LOS_OK) {
return ret;
}
operateType = OS_QUEUE_OPERATE_TYPE(OS_QUEUE_READ, OS_QUEUE_HEAD);//从头开始读
return OsQueueOperate(queueID, operateType, bufferAddr, bufferSize, timeout);//定时执行读操作
operateType = OS_QUEUE_OPERATE_TYPE(OS_QUEUE_READ, OS_QUEUE_HEAD);
return OsQueueOperate(queueID, operateType, bufferAddr, bufferSize, timeout);
}
///接口函数从队列头开始写
LITE_OS_SEC_TEXT UINT32 LOS_QueueWriteHeadCopy(UINT32 queueID,
VOID *bufferAddr,
UINT32 bufferSize,
@ -355,15 +358,15 @@ LITE_OS_SEC_TEXT UINT32 LOS_QueueWriteHeadCopy(UINT32 queueID,
UINT32 ret;
UINT32 operateType;
ret = OsQueueWriteParameterCheck(queueID, bufferAddr, &bufferSize, timeout);//参数检查
ret = OsQueueWriteParameterCheck(queueID, bufferAddr, &bufferSize, timeout);
if (ret != LOS_OK) {
return ret;
}
operateType = OS_QUEUE_OPERATE_TYPE(OS_QUEUE_WRITE, OS_QUEUE_HEAD);//从头开始写
return OsQueueOperate(queueID, operateType, bufferAddr, &bufferSize, timeout);//执行写操作
operateType = OS_QUEUE_OPERATE_TYPE(OS_QUEUE_WRITE, OS_QUEUE_HEAD);
return OsQueueOperate(queueID, operateType, bufferAddr, &bufferSize, timeout);
}
///接口函数 从队列尾部开始写
LITE_OS_SEC_TEXT UINT32 LOS_QueueWriteCopy(UINT32 queueID,
VOID *bufferAddr,
UINT32 bufferSize,
@ -372,16 +375,15 @@ LITE_OS_SEC_TEXT UINT32 LOS_QueueWriteCopy(UINT32 queueID,
UINT32 ret;
UINT32 operateType;
ret = OsQueueWriteParameterCheck(queueID, bufferAddr, &bufferSize, timeout);//参数检查
ret = OsQueueWriteParameterCheck(queueID, bufferAddr, &bufferSize, timeout);
if (ret != LOS_OK) {
return ret;
}
operateType = OS_QUEUE_OPERATE_TYPE(OS_QUEUE_WRITE, OS_QUEUE_TAIL);//从尾部开始写
return OsQueueOperate(queueID, operateType, bufferAddr, &bufferSize, timeout);//执行写操作
operateType = OS_QUEUE_OPERATE_TYPE(OS_QUEUE_WRITE, OS_QUEUE_TAIL);
return OsQueueOperate(queueID, operateType, bufferAddr, &bufferSize, timeout);
}
LITE_OS_SEC_TEXT UINT32 LOS_QueueRead(UINT32 queueID, VOID *bufferAddr, UINT32 bufferSize, UINT32 timeout)
{
return LOS_QueueReadCopy(queueID, bufferAddr, &bufferSize, timeout);
@ -396,7 +398,6 @@ LITE_OS_SEC_TEXT UINT32 LOS_QueueWrite(UINT32 queueID, VOID *bufferAddr, UINT32
return LOS_QueueWriteCopy(queueID, &bufferAddr, bufferSize, timeout);
}
LITE_OS_SEC_TEXT UINT32 LOS_QueueWriteHead(UINT32 queueID,
VOID *bufferAddr,
UINT32 bufferSize,
@ -409,7 +410,6 @@ LITE_OS_SEC_TEXT UINT32 LOS_QueueWriteHead(UINT32 queueID,
return LOS_QueueWriteHeadCopy(queueID, &bufferAddr, bufferSize, timeout);
}
LITE_OS_SEC_TEXT_INIT UINT32 LOS_QueueDelete(UINT32 queueID)
{
LosQueueCB *queueCB = NULL;
@ -422,50 +422,50 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_QueueDelete(UINT32 queueID)
}
SCHEDULER_LOCK(intSave);
queueCB = (LosQueueCB *)GET_QUEUE_HANDLE(queueID);//拿到队列实体
queueCB = (LosQueueCB *)GET_QUEUE_HANDLE(queueID);
if ((queueCB->queueID != queueID) || (queueCB->queueState == OS_QUEUE_UNUSED)) {
ret = LOS_ERRNO_QUEUE_NOT_CREATE;
goto QUEUE_END;
}
if (!LOS_ListEmpty(&queueCB->readWriteList[OS_QUEUE_READ])) {//尚有任务要读数据
if (!LOS_ListEmpty(&queueCB->readWriteList[OS_QUEUE_READ])) {
ret = LOS_ERRNO_QUEUE_IN_TSKUSE;
goto QUEUE_END;
}
if (!LOS_ListEmpty(&queueCB->readWriteList[OS_QUEUE_WRITE])) {//尚有任务要写数据
if (!LOS_ListEmpty(&queueCB->readWriteList[OS_QUEUE_WRITE])) {
ret = LOS_ERRNO_QUEUE_IN_TSKUSE;
goto QUEUE_END;
}
if (!LOS_ListEmpty(&queueCB->memList)) {//
if (!LOS_ListEmpty(&queueCB->memList)) {
ret = LOS_ERRNO_QUEUE_IN_TSKUSE;
goto QUEUE_END;
}
if ((queueCB->readWriteableCnt[OS_QUEUE_WRITE] + queueCB->readWriteableCnt[OS_QUEUE_READ]) !=
queueCB->queueLen) {//读写队列的内容长度不等于总长度
queueCB->queueLen) {
ret = LOS_ERRNO_QUEUE_IN_TSKWRITE;
goto QUEUE_END;
}
queue = queueCB->queueHandle; //队列buf
queueCB->queueHandle = NULL; //
queueCB->queueState = OS_QUEUE_UNUSED;//重置队列状态
queueCB->queueID = SET_QUEUE_ID(GET_QUEUE_COUNT(queueCB->queueID) + 1, GET_QUEUE_INDEX(queueCB->queueID));//@note_why 这里需要这样做吗?
queue = queueCB->queueHandle;
queueCB->queueHandle = NULL;
queueCB->queueState = OS_QUEUE_UNUSED;
queueCB->queueID = SET_QUEUE_ID(GET_QUEUE_COUNT(queueCB->queueID) + 1, GET_QUEUE_INDEX(queueCB->queueID));
OsQueueDbgUpdateHook(queueCB->queueID, NULL);
LOS_ListTailInsert(&FREE_QUEUE_LIST, &queueCB->readWriteList[OS_QUEUE_WRITE]);//回收,将节点挂入可分配链表,等待重新被分配再利用
SCHEDULER_UNLOCK(intSave);
LOS_ListTailInsert(&FREE_QUEUE_LIST, &queueCB->readWriteList[OS_QUEUE_WRITE]);
SCHEDULER_UNLOCK(intSave);
OsHookCall(LOS_HOOK_TYPE_QUEUE_DELETE, queueCB);
ret = LOS_MemFree(m_aucSysMem1, (VOID *)queue);//释放队列句柄
ret = LOS_MemFree(m_aucSysMem1, (VOID *)queue);
return ret;
QUEUE_END:
SCHEDULER_UNLOCK(intSave);
return ret;
}
///外部接口, 获取队列信息,用queueInfo 把 LosQueueCB数据接走,QUEUE_INFO_S对内部数据的封装
LITE_OS_SEC_TEXT_MINOR UINT32 LOS_QueueInfoGet(UINT32 queueID, QUEUE_INFO_S *queueInfo)
{
UINT32 intSave;
@ -477,14 +477,14 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_QueueInfoGet(UINT32 queueID, QUEUE_INFO_S *que
return LOS_ERRNO_QUEUE_PTR_NULL;
}
if (GET_QUEUE_INDEX(queueID) >= LOSCFG_BASE_IPC_QUEUE_LIMIT) {//1024
if (GET_QUEUE_INDEX(queueID) >= LOSCFG_BASE_IPC_QUEUE_LIMIT) {
return LOS_ERRNO_QUEUE_INVALID;
}
(VOID)memset_s((VOID *)queueInfo, sizeof(QUEUE_INFO_S), 0, sizeof(QUEUE_INFO_S));//接走数据之前先清0
(VOID)memset_s((VOID *)queueInfo, sizeof(QUEUE_INFO_S), 0, sizeof(QUEUE_INFO_S));
SCHEDULER_LOCK(intSave);
queueCB = (LosQueueCB *)GET_QUEUE_HANDLE(queueID);//通过队列ID获取 QCB
queueCB = (LosQueueCB *)GET_QUEUE_HANDLE(queueID);
if ((queueCB->queueID != queueID) || (queueCB->queueState == OS_QUEUE_UNUSED)) {
ret = LOS_ERRNO_QUEUE_NOT_CREATE;
goto QUEUE_END;
@ -495,19 +495,19 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_QueueInfoGet(UINT32 queueID, QUEUE_INFO_S *que
queueInfo->usQueueSize = queueCB->queueSize;
queueInfo->usQueueHead = queueCB->queueHead;
queueInfo->usQueueTail = queueCB->queueTail;
queueInfo->usReadableCnt = queueCB->readWriteableCnt[OS_QUEUE_READ];//可读数
queueInfo->usWritableCnt = queueCB->readWriteableCnt[OS_QUEUE_WRITE];//可写数
queueInfo->usReadableCnt = queueCB->readWriteableCnt[OS_QUEUE_READ];
queueInfo->usWritableCnt = queueCB->readWriteableCnt[OS_QUEUE_WRITE];
LOS_DL_LIST_FOR_EACH_ENTRY(tskCB, &queueCB->readWriteList[OS_QUEUE_READ], LosTaskCB, pendList) {//找出哪些task需要读消息
queueInfo->uwWaitReadTask |= 1ULL << tskCB->taskID;//记录等待读消息的任务号, uwWaitReadTask 每一位代表一个任务编号
}//0b..011011011 代表 0,1,3,4,6,7 号任务有数据等待读消息.
LOS_DL_LIST_FOR_EACH_ENTRY(tskCB, &queueCB->readWriteList[OS_QUEUE_READ], LosTaskCB, pendList) {
queueInfo->uwWaitReadTask |= 1ULL << tskCB->taskID;
}
LOS_DL_LIST_FOR_EACH_ENTRY(tskCB, &queueCB->readWriteList[OS_QUEUE_WRITE], LosTaskCB, pendList) {//找出哪些task需要写消息
queueInfo->uwWaitWriteTask |= 1ULL << tskCB->taskID;//记录等待写消息的任务号, uwWaitWriteTask 每一位代表一个任务编号
}////0b..011011011 代表 0,1,3,4,6,7 号任务有数据等待写消息.
LOS_DL_LIST_FOR_EACH_ENTRY(tskCB, &queueCB->readWriteList[OS_QUEUE_WRITE], LosTaskCB, pendList) {
queueInfo->uwWaitWriteTask |= 1ULL << tskCB->taskID;
}
LOS_DL_LIST_FOR_EACH_ENTRY(tskCB, &queueCB->memList, LosTaskCB, pendList) {//同上
queueInfo->uwWaitMemTask |= 1ULL << tskCB->taskID; //MailBox模块使用
LOS_DL_LIST_FOR_EACH_ENTRY(tskCB, &queueCB->memList, LosTaskCB, pendList) {
queueInfo->uwWaitMemTask |= 1ULL << tskCB->taskID;
}
QUEUE_END:
@ -515,5 +515,5 @@ QUEUE_END:
return ret;
}
#endif /* (LOSCFG_BASE_IPC_QUEUE == YES) */
#endif /* LOSCFG_BASE_IPC_QUEUE */

@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
* Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@ -37,10 +37,9 @@
#include "los_exc.h"
#include "los_sched_pri.h"
#ifdef LOSCFG_BASE_IPC_RWLOCK
#define RWLOCK_COUNT_MASK 0x00FFFFFFU
/// 判断读写锁有效性
BOOL LOS_RwlockIsValid(const LosRwlock *rwlock)
{
if ((rwlock != NULL) && ((rwlock->magic & RWLOCK_COUNT_MASK) == OS_RWLOCK_MAGIC)) {
@ -49,7 +48,7 @@ BOOL LOS_RwlockIsValid(const LosRwlock *rwlock)
return FALSE;
}
/// 创建读写锁,初始化锁信息
UINT32 LOS_RwlockInit(LosRwlock *rwlock)
{
UINT32 intSave;
@ -72,7 +71,7 @@ UINT32 LOS_RwlockInit(LosRwlock *rwlock)
SCHEDULER_UNLOCK(intSave);
return LOS_OK;
}
/// 删除指定的读写锁
UINT32 LOS_RwlockDestroy(LosRwlock *rwlock)
{
UINT32 intSave;
@ -96,18 +95,18 @@ UINT32 LOS_RwlockDestroy(LosRwlock *rwlock)
SCHEDULER_UNLOCK(intSave);
return LOS_OK;
}
/// 读写锁检查
STATIC UINT32 OsRwlockCheck(const LosRwlock *rwlock)
{
if (rwlock == NULL) {
return LOS_EINVAL;
}
if (OS_INT_ACTIVE) { // 读写锁不能在中断服务程序中使用。请想想为什么 ?
if (OS_INT_ACTIVE) {
return LOS_EINTR;
}
/* DO NOT Call blocking API in system tasks | 系统任务不能使用读写锁 */
/* DO NOT Call blocking API in system tasks */
LosTaskCB *runTask = (LosTaskCB *)OsCurrTaskGet();
if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {
return LOS_EPERM;
@ -115,23 +114,19 @@ STATIC UINT32 OsRwlockCheck(const LosRwlock *rwlock)
return LOS_OK;
}
/// 指定任务优先级优先级是否低于 写锁任务最高优先级
STATIC BOOL OsRwlockPriCompare(LosTaskCB *runTask, LOS_DL_LIST *rwList)
{
if (!LOS_ListEmpty(rwList)) {
LosTaskCB *highestTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(rwList));//首个写锁任务优先级是最高的
if (OsSchedParamCompare(runTask, highestTask) < 0) {//如果当前任务优先级低于等待写锁任务
LosTaskCB *highestTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(rwList));
if (OsSchedParamCompare(runTask, highestTask) < 0) {
return TRUE;
}
return FALSE;
}
return TRUE;
}
/* 申请读模式下的锁,分三种情况:
1.
2.
3.
*/
STATIC UINT32 OsRwlockRdPendOp(LosTaskCB *runTask, LosRwlock *rwlock, UINT32 timeout)
{
UINT32 ret;
@ -140,12 +135,12 @@ STATIC UINT32 OsRwlockRdPendOp(LosTaskCB *runTask, LosRwlock *rwlock, UINT32 tim
* When the rwlock mode is read mode or free mode and the priority of the current read task
* is higher than the first pended write task. current read task can obtain this rwlock.
*/
if (rwlock->rwCount >= 0) {//第一和第二种情况
if (OsRwlockPriCompare(runTask, &(rwlock->writeList))) {//读优先级低于写优先级,意思就是必须先写再读
if (rwlock->rwCount == INT8_MAX) {//读锁任务达到上限
if (rwlock->rwCount >= 0) {
if (OsRwlockPriCompare(runTask, &(rwlock->writeList))) {
if (rwlock->rwCount == INT8_MAX) {
return LOS_EINVAL;
}
rwlock->rwCount++;//拿读锁成功
rwlock->rwCount++;
return LOS_OK;
}
}
@ -154,51 +149,45 @@ STATIC UINT32 OsRwlockRdPendOp(LosTaskCB *runTask, LosRwlock *rwlock, UINT32 tim
return LOS_EINVAL;
}
if (!OsPreemptableInSched()) {//不可抢占时
if (!OsPreemptableInSched()) {
return LOS_EDEADLK;
}
/* The current task is not allowed to obtain the write lock when it obtains the read lock.
| */
if ((LosTaskCB *)(rwlock->writeOwner) == runTask) { //拥有写锁任务是否为当前任务
/* The current task is not allowed to obtain the write lock when it obtains the read lock. */
if ((LosTaskCB *)(rwlock->writeOwner) == runTask) {
return LOS_EINVAL;
}
/*
* When the rwlock mode is write mode or the priority of the current read task
* is lower than the first pended write task, current read task will be pended.
| rwlock
*/
LOS_DL_LIST *node = OsSchedLockPendFindPos(runTask, &(rwlock->readList));//找到要挂入的位置
//例如现有链表内任务优先级为 0 3 8 9 23 当前为 10 时, 返回的是 9 这个节点
ret = runTask->ops->wait(runTask, node, timeout);//从尾部插入读锁链表 由此变成了 0 3 8 9 10 23
LOS_DL_LIST *node = OsSchedLockPendFindPos(runTask, &(rwlock->readList));
ret = runTask->ops->wait(runTask, node, timeout);
if (ret == LOS_ERRNO_TSK_TIMEOUT) {
return LOS_ETIMEDOUT;
}
return ret;
}
/// 申请写模式下的锁
STATIC UINT32 OsRwlockWrPendOp(LosTaskCB *runTask, LosRwlock *rwlock, UINT32 timeout)
{
UINT32 ret;
/* When the rwlock is free mode, current write task can obtain this rwlock.
| */
/* When the rwlock is free mode, current write task can obtain this rwlock. */
if (rwlock->rwCount == 0) {
rwlock->rwCount = -1;
rwlock->writeOwner = (VOID *)runTask;//直接给当前进程锁
rwlock->writeOwner = (VOID *)runTask;
return LOS_OK;
}
/* Current write task can use one rwlock once again if the rwlock owner is it.
| rwlock 使*/
/* Current write task can use one rwlock once again if the rwlock owner is it. */
if ((rwlock->rwCount < 0) && ((LosTaskCB *)(rwlock->writeOwner) == runTask)) {
if (rwlock->rwCount == INT8_MIN) {
return LOS_EINVAL;
}
rwlock->rwCount--;//注意再次拥有算是两把写锁了.
rwlock->rwCount--;
return LOS_OK;
}
@ -212,9 +201,9 @@ STATIC UINT32 OsRwlockWrPendOp(LosTaskCB *runTask, LosRwlock *rwlock, UINT32 tim
/*
* When the rwlock is read mode or other write task obtains this rwlock, current
* write task will be pended. | rwlock rwlock
* write task will be pended.
*/
LOS_DL_LIST *node = OsSchedLockPendFindPos(runTask, &(rwlock->writeList));//找到要挂入的位置
LOS_DL_LIST *node = OsSchedLockPendFindPos(runTask, &(rwlock->writeList));
ret = runTask->ops->wait(runTask, node, timeout);
if (ret == LOS_ERRNO_TSK_TIMEOUT) {
ret = LOS_ETIMEDOUT;
@ -276,22 +265,20 @@ UINT32 OsRwlockTryWrUnsafe(LosRwlock *rwlock, UINT32 timeout)
return LOS_EBADF;
}
/* When the rwlock is read mode, current write task will be pended.
| rwlock */
/* When the rwlock is read mode, current write task will be pended. */
if (rwlock->rwCount > 0) {
return LOS_EBUSY;
}
/* When other write task obtains this rwlock, current write task will be pended.
| rwlock*/
/* When other write task obtains this rwlock, current write task will be pended. */
LosTaskCB *runTask = OsCurrTaskGet();
if ((rwlock->rwCount < 0) && ((LosTaskCB *)(rwlock->writeOwner) != runTask)) {
return LOS_EBUSY;
}
return OsRwlockWrPendOp(runTask, rwlock, timeout);//
return OsRwlockWrPendOp(runTask, rwlock, timeout);
}
/// 申请指定的读模式下的锁
UINT32 LOS_RwlockRdLock(LosRwlock *rwlock, UINT32 timeout)
{
UINT32 intSave;
@ -306,7 +293,7 @@ UINT32 LOS_RwlockRdLock(LosRwlock *rwlock, UINT32 timeout)
SCHEDULER_UNLOCK(intSave);
return ret;
}
/// 尝试申请指定的读模式下的锁
UINT32 LOS_RwlockTryRdLock(LosRwlock *rwlock)
{
UINT32 intSave;
@ -317,11 +304,11 @@ UINT32 LOS_RwlockTryRdLock(LosRwlock *rwlock)
}
SCHEDULER_LOCK(intSave);
ret = OsRwlockTryRdUnsafe(rwlock, 0);//所谓尝试就是没锁爷就返回,不等待,不纠结.当前任务也不会被挂起
ret = OsRwlockTryRdUnsafe(rwlock, 0);
SCHEDULER_UNLOCK(intSave);
return ret;
}
/// 申请指定的写模式下的锁
UINT32 LOS_RwlockWrLock(LosRwlock *rwlock, UINT32 timeout)
{
UINT32 intSave;
@ -336,7 +323,7 @@ UINT32 LOS_RwlockWrLock(LosRwlock *rwlock, UINT32 timeout)
SCHEDULER_UNLOCK(intSave);
return ret;
}
/// 尝试申请指定的写模式下的锁
UINT32 LOS_RwlockTryWrLock(LosRwlock *rwlock)
{
UINT32 intSave;
@ -347,32 +334,32 @@ UINT32 LOS_RwlockTryWrLock(LosRwlock *rwlock)
}
SCHEDULER_LOCK(intSave);
ret = OsRwlockTryWrUnsafe(rwlock, 0);//所谓尝试就是没锁爷就返回,不等待,不纠结.当前任务也不会被挂起
ret = OsRwlockTryWrUnsafe(rwlock, 0);
SCHEDULER_UNLOCK(intSave);
return ret;
}
/// 获取读写锁模式
STATIC UINT32 OsRwlockGetMode(LOS_DL_LIST *readList, LOS_DL_LIST *writeList)
{
BOOL isReadEmpty = LOS_ListEmpty(readList);
BOOL isWriteEmpty = LOS_ListEmpty(writeList);
if (isReadEmpty && isWriteEmpty) { //读写链表都没有内容
return RWLOCK_NONE_MODE; //自由模式
if (isReadEmpty && isWriteEmpty) {
return RWLOCK_NONE_MODE;
}
if (!isReadEmpty && isWriteEmpty) { //读链表有数据,写链表没有数据
if (!isReadEmpty && isWriteEmpty) {
return RWLOCK_READ_MODE;
}
if (isReadEmpty && !isWriteEmpty) { //写链表有数据,读链表没有数据
if (isReadEmpty && !isWriteEmpty) {
return RWLOCK_WRITE_MODE;
}
LosTaskCB *pendedReadTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(readList));
LosTaskCB *pendedWriteTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(writeList));
if (OsSchedParamCompare(pendedWriteTask, pendedReadTask) <= 0) {
return RWLOCK_WRITEFIRST_MODE; //写的优先级高时,为写优先模式
return RWLOCK_WRITEFIRST_MODE;
}
return RWLOCK_READFIRST_MODE; //读的优先级高时,为读优先模式
return RWLOCK_READFIRST_MODE;
}
/// 释放锁
STATIC UINT32 OsRwlockPostOp(LosRwlock *rwlock, BOOL *needSched)
{
UINT32 rwlockMode;
@ -380,15 +367,15 @@ STATIC UINT32 OsRwlockPostOp(LosRwlock *rwlock, BOOL *needSched)
rwlock->rwCount = 0;
rwlock->writeOwner = NULL;
rwlockMode = OsRwlockGetMode(&(rwlock->readList), &(rwlock->writeList));//先获取模式
if (rwlockMode == RWLOCK_NONE_MODE) {//自由模式则正常返回
rwlockMode = OsRwlockGetMode(&(rwlock->readList), &(rwlock->writeList));
if (rwlockMode == RWLOCK_NONE_MODE) {
return LOS_OK;
}
/* In this case, rwlock will wake the first pended write task. | 在这种情况下rwlock 将唤醒第一个挂起的写任务。 */
if ((rwlockMode == RWLOCK_WRITE_MODE) || (rwlockMode == RWLOCK_WRITEFIRST_MODE)) {//如果当前是写模式 (有任务在等写锁涅)
resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(rwlock->writeList)));//获取任务实体
rwlock->rwCount = -1;//直接干成-1,注意这里并不是 --
rwlock->writeOwner = (VOID *)resumedTask;//有锁了则唤醒等锁的任务(写模式)
/* In this case, rwlock will wake the first pended write task. */
if ((rwlockMode == RWLOCK_WRITE_MODE) || (rwlockMode == RWLOCK_WRITEFIRST_MODE)) {
resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(rwlock->writeList)));
rwlock->rwCount = -1;
rwlock->writeOwner = (VOID *)resumedTask;
resumedTask->ops->wake(resumedTask);
if (needSched != NULL) {
*needSched = TRUE;
@ -396,30 +383,29 @@ STATIC UINT32 OsRwlockPostOp(LosRwlock *rwlock, BOOL *needSched)
return LOS_OK;
}
rwlock->rwCount = 1; //直接干成1,因为是释放操作
rwlock->rwCount = 1;
resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(rwlock->readList)));
resumedTask->ops->wake(resumedTask);
while (!LOS_ListEmpty(&(rwlock->readList))) {//遍历读链表,目的是要唤醒其他读模式的任务(优先级得要高于pendedWriteTaskPri才行)
while (!LOS_ListEmpty(&(rwlock->readList))) {
resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(rwlock->readList)));
if (rwlockMode == RWLOCK_READFIRST_MODE) {
LosTaskCB *pendedWriteTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(rwlock->writeList)));
if (OsSchedParamCompare(resumedTask, pendedWriteTask) >= 0) {
break;//跳出循环
}
break;
}
}
if (rwlock->rwCount == INT8_MAX) {
return EINVAL;
}
rwlock->rwCount++;//读锁任务数量增加
resumedTask->ops->wake(resumedTask);//不断唤醒读锁任务,由此实现了允许多个读操作并发,因为在多核情况下resumedTask很大可能
//与当前任务并不在同一个核上运行, 此处非常有意思,点赞! @note_good
rwlock->rwCount++;
resumedTask->ops->wake(resumedTask);
}
if (needSched != NULL) {
*needSched = TRUE;
}
return LOS_OK;
}
/// 释放锁,唤醒任务
UINT32 OsRwlockUnlockUnsafe(LosRwlock *rwlock, BOOL *needSched)
{
if ((rwlock->magic & RWLOCK_COUNT_MASK) != OS_RWLOCK_MAGIC) {
@ -431,28 +417,27 @@ UINT32 OsRwlockUnlockUnsafe(LosRwlock *rwlock, BOOL *needSched)
}
LosTaskCB *runTask = OsCurrTaskGet();
if ((rwlock->rwCount < 0) && ((LosTaskCB *)(rwlock->writeOwner) != runTask)) {//写模式时,当前任务未持有锁
if ((rwlock->rwCount < 0) && ((LosTaskCB *)(rwlock->writeOwner) != runTask)) {
return LOS_EPERM;
}
/*
* When the rwCount of the rwlock more than 1 or less than -1, the rwlock mode will
* not changed after current unlock operation, so pended tasks can not be waken.
| rwlock rwCount 1 -1 rwlock
*/
if (rwlock->rwCount > 1) {//读模式
if (rwlock->rwCount > 1) {
rwlock->rwCount--;
return LOS_OK;
}
if (rwlock->rwCount < -1) {//写模式
if (rwlock->rwCount < -1) {
rwlock->rwCount++;
return LOS_OK;
}
return OsRwlockPostOp(rwlock, needSched);
}
/// 释放指定读写锁
UINT32 LOS_RwlockUnLock(LosRwlock *rwlock)
{
UINT32 intSave;
@ -466,9 +451,9 @@ UINT32 LOS_RwlockUnLock(LosRwlock *rwlock)
SCHEDULER_LOCK(intSave);
ret = OsRwlockUnlockUnsafe(rwlock, &needSched);
SCHEDULER_UNLOCK(intSave);
LOS_MpSchedule(OS_MP_CPU_ALL);//设置调度CPU的方式,所有CPU参与调度
if (needSched == TRUE) {//是否需要调度
LOS_Schedule();//产生调度,切换任务执行
LOS_MpSchedule(OS_MP_CPU_ALL);
if (needSched == TRUE) {
LOS_Schedule();
}
return ret;
}

@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
* Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@ -40,18 +40,17 @@
#include "los_percpu_pri.h"
#include "los_hook.h"
#ifdef LOSCFG_BASE_IPC_SEM
#if (LOSCFG_BASE_IPC_SEM_LIMIT <= 0)
#error "sem maxnum cannot be zero"
#endif /* LOSCFG_BASE_IPC_SEM_LIMIT <= 0 */
LITE_OS_SEC_DATA_INIT STATIC LOS_DL_LIST g_unusedSemList; ///< 可用的信号量列表,干嘛不用freeList? 可以看出这里是另一个人写的代码
LITE_OS_SEC_BSS LosSemCB *g_allSem = NULL; ///< 信号池,一次分配 LOSCFG_BASE_IPC_SEM_LIMIT 个信号量
LITE_OS_SEC_DATA_INIT STATIC LOS_DL_LIST g_unusedSemList;
LITE_OS_SEC_BSS LosSemCB *g_allSem = NULL;
/*
* Description : Initialize the semaphore doubly linked list |
* Description : Initialize the semaphore doubly linked list
* Return : LOS_OK on success, or error code on failure
*/
LITE_OS_SEC_TEXT_INIT UINT32 OsSemInit(VOID)
@ -59,18 +58,18 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsSemInit(VOID)
LosSemCB *semNode = NULL;
UINT32 index;
LOS_ListInit(&g_unusedSemList);//初始化链表,链表上挂未使用的信号量,用于分配信号量,鸿蒙信号量的个数是有限的,默认1024个
LOS_ListInit(&g_unusedSemList);
/* system resident memory, don't free */
g_allSem = (LosSemCB *)LOS_MemAlloc(m_aucSysMem0, (LOSCFG_BASE_IPC_SEM_LIMIT * sizeof(LosSemCB)));//分配信号池
g_allSem = (LosSemCB *)LOS_MemAlloc(m_aucSysMem0, (LOSCFG_BASE_IPC_SEM_LIMIT * sizeof(LosSemCB)));
if (g_allSem == NULL) {
return LOS_ERRNO_SEM_NO_MEMORY;
}
for (index = 0; index < LOSCFG_BASE_IPC_SEM_LIMIT; index++) {
semNode = ((LosSemCB *)g_allSem) + index;//拿信号控制块, 可以直接g_allSem[index]来嘛
semNode->semID = SET_SEM_ID(0, index);//保存ID
semNode->semStat = OS_SEM_UNUSED;//标记未使用
LOS_ListTailInsert(&g_unusedSemList, &semNode->semList);//通过semList把 信号块挂到空闲链表上
semNode = ((LosSemCB *)g_allSem) + index;
semNode->semID = SET_SEM_ID(0, index);
semNode->semStat = OS_SEM_UNUSED;
LOS_ListTailInsert(&g_unusedSemList, &semNode->semList);
}
if (OsSemDbgInitHook() != LOS_OK) {
@ -79,7 +78,13 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsSemInit(VOID)
return LOS_OK;
}
/*
* Description : Create a semaphore,
* Input : count --- semaphore count,
* maxCount --- Max number of available semaphores,
* semHandle --- Index of semaphore,
* Return : LOS_OK on success ,or error code on failure
*/
LITE_OS_SEC_TEXT_INIT UINT32 OsSemCreate(UINT16 count, UINT16 maxCount, UINT32 *semHandle)
{
UINT32 intSave;
@ -92,46 +97,45 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsSemCreate(UINT16 count, UINT16 maxCount, UINT32 *
return LOS_ERRNO_SEM_PTR_NULL;
}
if (count > maxCount) {//信号量不能大于最大值,两参数都是外面给的
if (count > maxCount) {
OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_OVERFLOW);
}
SCHEDULER_LOCK(intSave);//进入临界区,拿自旋锁
SCHEDULER_LOCK(intSave);
if (LOS_ListEmpty(&g_unusedSemList)) {//没有可分配的空闲信号提供
if (LOS_ListEmpty(&g_unusedSemList)) {
SCHEDULER_UNLOCK(intSave);
OsSemInfoGetFullDataHook();
OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_ALL_BUSY);
}
unusedSem = LOS_DL_LIST_FIRST(&g_unusedSemList);//从未使用信号量池中取首个
LOS_ListDelete(unusedSem);//从空闲链表上摘除
unusedSem = LOS_DL_LIST_FIRST(&g_unusedSemList);
LOS_ListDelete(unusedSem);
SCHEDULER_UNLOCK(intSave);
semCreated = GET_SEM_LIST(unusedSem);//通过semList挂到链表上的,这里也要通过它把LosSemCB头查到. 进程,线程等结构体也都是这么干的.
semCreated->semCount = count;//设置数量
semCreated->semStat = OS_SEM_USED;//设置可用状态
semCreated->maxSemCount = maxCount;//设置最大信号数量
LOS_ListInit(&semCreated->semList);//初始化链表,后续阻塞任务通过task->pendList挂到semList链表上,就知道哪些任务在等它了.
*semHandle = semCreated->semID;//参数带走 semID
semCreated = GET_SEM_LIST(unusedSem);
semCreated->semCount = count;
semCreated->semStat = OS_SEM_USED;
semCreated->maxSemCount = maxCount;
LOS_ListInit(&semCreated->semList);
*semHandle = semCreated->semID;
OsHookCall(LOS_HOOK_TYPE_SEM_CREATE, semCreated);
OsSemDbgUpdateHook(semCreated->semID, OsCurrTaskGet()->taskEntry, count);
return LOS_OK;
ERR_HANDLER:
OS_RETURN_ERROR_P2(errLine, errNo);
}
///对外接口 创建信号量
LITE_OS_SEC_TEXT_INIT UINT32 LOS_SemCreate(UINT16 count, UINT32 *semHandle)
{
return OsSemCreate(count, OS_SEM_COUNT_MAX, semHandle);
}
///对外接口 创建二值信号量其计数值最大为1可以当互斥锁用
LITE_OS_SEC_TEXT_INIT UINT32 LOS_BinarySemCreate(UINT16 count, UINT32 *semHandle)
{
return OsSemCreate(count, OS_SEM_BINARY_COUNT_MAX, semHandle);
}
///对外接口 删除指定的信号量,参数就是 semID
LITE_OS_SEC_TEXT_INIT UINT32 LOS_SemDelete(UINT32 semHandle)
{
UINT32 intSave;
@ -143,23 +147,23 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_SemDelete(UINT32 semHandle)
OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_INVALID);
}
semDeleted = GET_SEM(semHandle);//通过ID拿到信号量实体
semDeleted = GET_SEM(semHandle);
SCHEDULER_LOCK(intSave);
if ((semDeleted->semStat == OS_SEM_UNUSED) || (semDeleted->semID != semHandle)) {//参数判断
if ((semDeleted->semStat == OS_SEM_UNUSED) || (semDeleted->semID != semHandle)) {
SCHEDULER_UNLOCK(intSave);
OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_INVALID);
}
if (!LOS_ListEmpty(&semDeleted->semList)) {//当前还有任务挂在这个信号上面,当然不能删除
if (!LOS_ListEmpty(&semDeleted->semList)) {
SCHEDULER_UNLOCK(intSave);
OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_PENDED);//这个宏很有意思,里面goto到ERR_HANDLER
OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_PENDED);
}
LOS_ListTailInsert(&g_unusedSemList, &semDeleted->semList);//通过semList从尾部插入空闲链表
semDeleted->semStat = OS_SEM_UNUSED;//状态变成了未使用
semDeleted->semID = SET_SEM_ID(GET_SEM_COUNT(semDeleted->semID) + 1, GET_SEM_INDEX(semDeleted->semID));//设置ID
LOS_ListTailInsert(&g_unusedSemList, &semDeleted->semList);
semDeleted->semStat = OS_SEM_UNUSED;
semDeleted->semID = SET_SEM_ID(GET_SEM_COUNT(semDeleted->semID) + 1, GET_SEM_INDEX(semDeleted->semID));
OsHookCall(LOS_HOOK_TYPE_SEM_DELETE, semDeleted);
OsSemDbgUpdateHook(semDeleted->semID, NULL, 0);
@ -170,11 +174,11 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_SemDelete(UINT32 semHandle)
ERR_HANDLER:
OS_RETURN_ERROR_P2(errLine, errNo);
}
///对外接口 申请指定的信号量,并设置超时时间
LITE_OS_SEC_TEXT UINT32 LOS_SemPend(UINT32 semHandle, UINT32 timeout)
{
UINT32 intSave;
LosSemCB *semPended = GET_SEM(semHandle);//通过ID拿到信号体
LosSemCB *semPended = GET_SEM(semHandle);
UINT32 retErr = LOS_OK;
LosTaskCB *runTask = NULL;
@ -188,7 +192,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_SemPend(UINT32 semHandle, UINT32 timeout)
return LOS_ERRNO_SEM_PEND_INTERR;
}
runTask = OsCurrTaskGet();//获取当前任务
runTask = OsCurrTaskGet();
if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {
OsBackTrace();
return LOS_ERRNO_SEM_PEND_IN_SYSTEM_TASK;
@ -200,20 +204,19 @@ LITE_OS_SEC_TEXT UINT32 LOS_SemPend(UINT32 semHandle, UINT32 timeout)
retErr = LOS_ERRNO_SEM_INVALID;
goto OUT;
}
/* Update the operate time, no matter the actual Pend success or not */
OsSemDbgTimeUpdateHook(semHandle);
if (semPended->semCount > 0) {//还有资源可用,返回肯定得成功,semCount=0时代表没资源了,task会必须去睡眠了
semPended->semCount--;//资源少了一个
if (semPended->semCount > 0) {
semPended->semCount--;
OsHookCall(LOS_HOOK_TYPE_SEM_PEND, semPended, runTask, timeout);
goto OUT;//注意这里 retErr = LOS_OK ,所以返回是OK的
goto OUT;
} else if (!timeout) {
retErr = LOS_ERRNO_SEM_UNAVAILABLE;
goto OUT;
}
if (!OsPreemptableInSched()) {//不能申请调度 (不能调度的原因是因为没有持有调度任务自旋锁)
if (!OsPreemptableInSched()) {
PRINT_ERR("!!!LOS_ERRNO_SEM_PEND_IN_LOCK!!!\n");
OsBackTrace();
retErr = LOS_ERRNO_SEM_PEND_IN_LOCK;
@ -223,7 +226,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_SemPend(UINT32 semHandle, UINT32 timeout)
OsHookCall(LOS_HOOK_TYPE_SEM_PEND, semPended, runTask, timeout);
OsTaskWaitSetPendMask(OS_TASK_WAIT_SEM, semPended->semID, timeout);
retErr = runTask->ops->wait(runTask, &semPended->semList, timeout);
if (retErr == LOS_ERRNO_TSK_TIMEOUT) {//注意:这里是涉及到task切换的,把自己挂起,唤醒其他task
if (retErr == LOS_ERRNO_TSK_TIMEOUT) {
retErr = LOS_ERRNO_SEM_TIMEOUT;
}
@ -231,7 +234,7 @@ OUT:
SCHEDULER_UNLOCK(intSave);
return retErr;
}
///以不安全的方式释放指定的信号量,所谓不安全指的是不用自旋锁
LITE_OS_SEC_TEXT UINT32 OsSemPostUnsafe(UINT32 semHandle, BOOL *needSched)
{
LosTaskCB *resumedTask = NULL;
@ -243,23 +246,23 @@ LITE_OS_SEC_TEXT UINT32 OsSemPostUnsafe(UINT32 semHandle, BOOL *needSched)
/* Update the operate time, no matter the actual Post success or not */
OsSemDbgTimeUpdateHook(semHandle);
if (semPosted->semCount == OS_SEM_COUNT_MAX) {//当前信号资源不能大于最大资源量
if (semPosted->semCount == OS_SEM_COUNT_MAX) {
return LOS_ERRNO_SEM_OVERFLOW;
}
if (!LOS_ListEmpty(&semPosted->semList)) {//当前有任务挂在semList上,要去唤醒任务
resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(semPosted->semList)));//semList上面挂的都是task->pendlist节点,取第一个task下来唤醒
if (!LOS_ListEmpty(&semPosted->semList)) {
resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(semPosted->semList)));
OsTaskWakeClearPendMask(resumedTask);
resumedTask->ops->wake(resumedTask);
if (needSched != NULL) {//参数不为空,就返回需要调度的标签
*needSched = TRUE;//TRUE代表需要调度
if (needSched != NULL) {
*needSched = TRUE;
}
} else {//当前没有任务挂在semList上,
semPosted->semCount++;//信号资源多一个
} else {
semPosted->semCount++;
}
OsHookCall(LOS_HOOK_TYPE_SEM_POST, semPosted, resumedTask);
return LOS_OK;
}
///对外接口 释放指定的信号量
LITE_OS_SEC_TEXT UINT32 LOS_SemPost(UINT32 semHandle)
{
UINT32 intSave;
@ -269,15 +272,16 @@ LITE_OS_SEC_TEXT UINT32 LOS_SemPost(UINT32 semHandle)
if (GET_SEM_INDEX(semHandle) >= LOSCFG_BASE_IPC_SEM_LIMIT) {
return LOS_ERRNO_SEM_INVALID;
}
SCHEDULER_LOCK(intSave);
ret = OsSemPostUnsafe(semHandle, &needSched);
SCHEDULER_UNLOCK(intSave);
if (needSched) {//需要调度的情况
LOS_MpSchedule(OS_MP_CPU_ALL);//向所有CPU发送调度指令
LOS_Schedule();////发起调度
if (needSched) {
LOS_MpSchedule(OS_MP_CPU_ALL);
LOS_Schedule();
}
return ret;
}
#endif /* (LOSCFG_BASE_IPC_SEM == YES) */
#endif /* LOSCFG_BASE_IPC_SEM */

@ -78,11 +78,11 @@ STATIC VOID OsSemPendedTaskNamePrint(LosSemCB *semNode)
#ifdef LOSCFG_DEBUG_SEMAPHORE
typedef struct {
UINT16 origSemCount; /* Number of orignal available semaphores *///原始可用信号量数
UINT64 lastAccessTime; /* The last operation time */ //最后操作时间
TSK_ENTRY_FUNC creator; /* The task entry who created this sem */ //由哪个task的入口函数创建了这个任务
UINT16 origSemCount; /* Number of original available semaphores */
UINT64 lastAccessTime; /* The last operation time */
TSK_ENTRY_FUNC creator; /* The task entry who created this sem */
} SemDebugCB;
STATIC SemDebugCB *g_semDebugArray = NULL;//默认1024个SemDebugCB debug信号量池
STATIC SemDebugCB *g_semDebugArray = NULL;
STATIC BOOL SemCompareValue(const IpcSortParam *sortParam, UINT32 left, UINT32 right)
{
@ -102,23 +102,23 @@ UINT32 OsSemDbgInit(VOID)
(VOID)memset_s(g_semDebugArray, size, 0, size);
return LOS_OK;
}
///更新最后访问时间
VOID OsSemDbgTimeUpdate(UINT32 semID)
{
SemDebugCB *semDebug = &g_semDebugArray[GET_SEM_INDEX(semID)];
semDebug->lastAccessTime = LOS_TickCountGet();//获取tick总数
semDebug->lastAccessTime = LOS_TickCountGet();
return;
}
///更新信号量
VOID OsSemDbgUpdate(UINT32 semID, TSK_ENTRY_FUNC creator, UINT16 count)
{
SemDebugCB *semDebug = &g_semDebugArray[GET_SEM_INDEX(semID)];
semDebug->creator = creator; //改为由参数入口函数创建了这个任务
semDebug->lastAccessTime = LOS_TickCountGet();//获取tick总数
semDebug->origSemCount = count;//原始信号量改变
semDebug->creator = creator;
semDebug->lastAccessTime = LOS_TickCountGet();
semDebug->origSemCount = count;
return;
}
///按信号量访问时间排序
STATIC VOID OsSemSort(UINT32 *semIndexArray, UINT32 usedCount)
{
UINT32 i, intSave;
@ -296,6 +296,6 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdSemInfoGet(UINT32 argc, const CHAR **arg
return ret;
}
SHELLCMD_ENTRY(sem_shellcmd, CMD_TYPE_EX, "sem", 1, (CmdCallBackFunc)OsShellCmdSemInfoGet);//采用shell命令静态注册方式
SHELLCMD_ENTRY(sem_shellcmd, CMD_TYPE_EX, "sem", 1, (CmdCallBackFunc)OsShellCmdSemInfoGet);
#endif

@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
* Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@ -53,17 +53,15 @@ int raise(int sig)
#define GETUNMASKSET(procmask, pendFlag) ((~(procmask)) & (sigset_t)(pendFlag))
#define UINT64_BIT_SIZE 64
int OsSigIsMember(const sigset_t *set, int signo)
{
int ret = LOS_NOK;
/* In musl, sig No bits 00000100 present sig No 3, but 1<< 3 = 00001000, so signo needs minus 1 */
//在musl中sig No bits 00000100表示sig No 3但是在SIGNO2SET中 1<<3 = 00001000,因此signo需要减1
/* In musl, sig No bits 00000100 present sig No 3, but 1<< 3 = 00001000, so signo needs minus 1 */
signo -= 1;
/* Verify the signal */
if (GOOD_SIGNO(signo)) {//有效信号判断
if (GOOD_SIGNO(signo)) {
/* Check if the signal is in the set */
ret = ((*set & SIGNO2SET((unsigned int)signo)) != 0);//检查信号是否还在集合中
ret = ((*set & SIGNO2SET((unsigned int)signo)) != 0);
}
return ret;
@ -122,6 +120,7 @@ VOID OsClearSigInfoTmpList(sig_cb *sigcb)
(VOID)LOS_MemFree(m_aucSysMem0, tmpInfoNode);
}
}
STATIC INLINE VOID OsSigWaitTaskWake(LosTaskCB *taskCB, INT32 signo)
{
sig_cb *sigcb = &taskCB->sig;
@ -133,14 +132,14 @@ STATIC INLINE VOID OsSigWaitTaskWake(LosTaskCB *taskCB, INT32 signo)
OsSigEmptySet(&sigcb->sigwaitmask);
}
}
///< 唤醒被挂起的处于等待指定信号的任务
STATIC UINT32 OsPendingTaskWake(LosTaskCB *taskCB, INT32 signo)
{
if (!OsTaskIsPending(taskCB) || !OsProcessIsUserMode(OS_PCB_FROM_TCB(taskCB))) {
return 0;
}
if ((signo != SIGKILL) && (taskCB->waitFlag != OS_TASK_WAIT_SIGNAL)) { // @note_thinking 这个判断会不会有问题 ?
if ((signo != SIGKILL) && (taskCB->waitFlag != OS_TASK_WAIT_SIGNAL)) {
return 0;
}
@ -154,16 +153,16 @@ STATIC UINT32 OsPendingTaskWake(LosTaskCB *taskCB, INT32 signo)
OsTaskWakeClearPendMask(taskCB);
taskCB->ops->wake(taskCB);
break;
case OS_TASK_WAIT_SIGNAL://等待普通信号
case OS_TASK_WAIT_SIGNAL:
OsSigWaitTaskWake(taskCB, signo);
break;
case OS_TASK_WAIT_LITEIPC://等待liteipc信号
OsTaskWakeClearPendMask(taskCB);//重置任务的等待信息
case OS_TASK_WAIT_LITEIPC:
OsTaskWakeClearPendMask(taskCB);
taskCB->ops->wake(taskCB);
break;
case OS_TASK_WAIT_FUTEX://等待快锁信号
OsFutexNodeDeleteFromFutexHash(&taskCB->futex, TRUE, NULL, NULL);//从哈希桶中删除快锁
OsTaskWakeClearPendMask(taskCB);//重置任务的等待信息
case OS_TASK_WAIT_FUTEX:
OsFutexNodeDeleteFromFutexHash(&taskCB->futex, TRUE, NULL, NULL);
OsTaskWakeClearPendMask(taskCB);
taskCB->ops->wake(taskCB);
break;
default:
@ -172,7 +171,7 @@ STATIC UINT32 OsPendingTaskWake(LosTaskCB *taskCB, INT32 signo)
return 0;
}
///给任务(线程)发送一个信号
int OsTcbDispatch(LosTaskCB *stcb, siginfo_t *info)
{
bool masked = FALSE;
@ -180,19 +179,19 @@ int OsTcbDispatch(LosTaskCB *stcb, siginfo_t *info)
OS_RETURN_IF_NULL(sigcb);
/* If signo is 0, not send signal, just check process or pthread exist */
if (info->si_signo == 0) {//如果信号为0,则不发送信号,只是作为检查进程和线程是否还存在.
if (info->si_signo == 0) {
return 0;
}
masked = (bool)OsSigIsMember(&sigcb->sigprocmask, info->si_signo);//@note_thinking 这里还有 masked= -1的情况要处理!!!
if (masked) {//如果信号被屏蔽了,要看等待信号集,sigwaitmask
/* If signal is in wait list and mask list, need unblock it */ //如果信号在等待列表和掩码列表中,需要解除阻止
masked = (bool)OsSigIsMember(&sigcb->sigprocmask, info->si_signo);
if (masked) {
/* If signal is in wait list and mask list, need unblock it */
if (LOS_ListEmpty(&sigcb->waitList) ||
(!LOS_ListEmpty(&sigcb->waitList) && !OsSigIsMember(&sigcb->sigwaitmask, info->si_signo))) {
OsSigAddSet(&sigcb->sigPendFlag, info->si_signo);//将信号加入挂起/待办集
(!LOS_ListEmpty(&sigcb->waitList) && !OsSigIsMember(&sigcb->sigwaitmask, info->si_signo))) {
OsSigAddSet(&sigcb->sigPendFlag, info->si_signo);
}
} else {//信号没有被屏蔽的处理
} else {
/* unmasked signal actions */
OsSigAddSet(&sigcb->sigFlag, info->si_signo);//不屏蔽的信号集
OsSigAddSet(&sigcb->sigFlag, info->si_signo);
}
if (OsAddSigInfoToTmpList(sigcb, info) == LOS_NOK) {
@ -207,15 +206,14 @@ void OsSigMaskSwitch(LosTaskCB * const rtcb, sigset_t set)
sigset_t unmaskset;
rtcb->sig.sigprocmask = set;
unmaskset = GETUNMASKSET(rtcb->sig.sigprocmask, rtcb->sig.sigPendFlag);//过滤出没有被屏蔽的信号集
unmaskset = GETUNMASKSET(rtcb->sig.sigprocmask, rtcb->sig.sigPendFlag);
if (unmaskset != NULL_SIGNAL_SET) {
/* pendlist do */
rtcb->sig.sigFlag |= unmaskset; //加入不屏蔽信号集
rtcb->sig.sigPendFlag ^= unmaskset;//从挂起/待办集中去掉unmaskset
rtcb->sig.sigFlag |= unmaskset;
rtcb->sig.sigPendFlag ^= unmaskset;
}
}
int OsSigprocMask(int how, const sigset_t_l *setl, sigset_t_l *oldsetl)
{
LosTaskCB *spcb = NULL;
@ -225,11 +223,11 @@ int OsSigprocMask(int how, const sigset_t_l *setl, sigset_t_l *oldsetl)
SCHEDULER_LOCK(intSave);
spcb = OsCurrTaskGet();
/* If requested, copy the old mask to user. | 如果需要,请将旧掩码复制给用户*/
/* If requested, copy the old mask to user. */
if (oldsetl != NULL) {
*(sigset_t *)oldsetl = spcb->sig.sigprocmask;
}
/* If requested, modify the current signal mask. | 如有要求,修改当前信号屏蔽*/
/* If requested, modify the current signal mask. */
if (setl != NULL) {
set = *(sigset_t *)setl;
/* Okay, determine what we are supposed to do */
@ -238,46 +236,46 @@ int OsSigprocMask(int how, const sigset_t_l *setl, sigset_t_l *oldsetl)
* set pointed to by set as the new sigprocmask.
*/
case SIG_BLOCK:
spcb->sig.sigprocmask |= set;//增加信号屏蔽位
spcb->sig.sigprocmask |= set;
break;
/* Set the intersection of the current set and the
* signal set pointed to by set as the new sigprocmask.
*/
case SIG_UNBLOCK:
spcb->sig.sigprocmask &= ~(set);//解除信号屏蔽位
spcb->sig.sigprocmask &= ~(set);
break;
/* Set the signal set pointed to by set as the new sigprocmask. */
case SIG_SETMASK:
spcb->sig.sigprocmask = set;//设置一个新的屏蔽掩码
spcb->sig.sigprocmask = set;
break;
default:
ret = -EINVAL;
break;
}
/* If pending mask not in sigmask, need set sigflag. */
OsSigMaskSwitch(spcb, spcb->sig.sigprocmask);//更新与屏蔽信号相关的变量
OsSigMaskSwitch(spcb, spcb->sig.sigprocmask);
}
SCHEDULER_UNLOCK(intSave);
return ret;
}
///让进程的每一个task执行参数函数
int OsSigProcessForeachChild(LosProcessCB *spcb, ForEachTaskCB handler, void *arg)
{
int ret;
/* Visit the main thread last (if present) */
LosTaskCB *taskCB = NULL;//遍历进程的 threadList 链表,里面存放的都是task节点
LOS_DL_LIST_FOR_EACH_ENTRY(taskCB, &(spcb->threadSiblingList), LosTaskCB, threadList) {//遍历进程的任务列表
ret = handler(taskCB, arg);//回调参数函数
OS_RETURN_IF(ret != 0, ret);//这个宏的意思就是只有ret = 0时,啥也不处理.其余就返回 ret
LosTaskCB *taskCB = NULL;
LOS_DL_LIST_FOR_EACH_ENTRY(taskCB, &(spcb->threadSiblingList), LosTaskCB, threadList) {
ret = handler(taskCB, arg);
OS_RETURN_IF(ret != 0, ret);
}
return LOS_OK;
}
///信号处理函数,这里就是上面的 handler = SigProcessSignalHandler,见于 OsSigProcessSend
static int SigProcessSignalHandler(LosTaskCB *tcb, void *arg)
{
struct ProcessSignalInfo *info = (struct ProcessSignalInfo *)arg;//先把参数解出来
struct ProcessSignalInfo *info = (struct ProcessSignalInfo *)arg;
int ret;
int isMember;
@ -285,130 +283,128 @@ static int SigProcessSignalHandler(LosTaskCB *tcb, void *arg)
return 0;
}
/* If the default tcb is not setted, then set this one as default. */
if (!info->defaultTcb) {//如果没有默认发送方的任务,即默认参数任务.
/* If the default tcb is not set, then set this one as default. */
if (!info->defaultTcb) {
info->defaultTcb = tcb;
}
isMember = OsSigIsMember(&tcb->sig.sigwaitmask, info->sigInfo->si_signo);//任务是否在等待这个信号
if (isMember && (!info->awakenedTcb)) {//是在等待,并尚未向该任务时发送信号时
isMember = OsSigIsMember(&tcb->sig.sigwaitmask, info->sigInfo->si_signo);
if (isMember && (!info->awakenedTcb)) {
/* This means the task is waiting for this signal. Stop looking for it and use this tcb.
* The requirement is: if more than one task in this task group is waiting for the signal,
* then only one indeterminate task in the group will receive the signal.
*/
ret = OsTcbDispatch(tcb, info->sigInfo);//发送信号,注意这是给其他任务发送信号,tcb不是当前任务
OS_RETURN_IF(ret < 0, ret);//这种写法很有意思
ret = OsTcbDispatch(tcb, info->sigInfo);
OS_RETURN_IF(ret < 0, ret);
/* set this tcb as awakenedTcb */
info->awakenedTcb = tcb;
OS_RETURN_IF(info->receivedTcb != NULL, SIG_STOP_VISIT); /* Stop search */
}
/* Is this signal unblocked on this thread? */
isMember = OsSigIsMember(&tcb->sig.sigprocmask, info->sigInfo->si_signo);//任务是否屏蔽了这个信号
if ((!isMember) && (!info->receivedTcb) && (tcb != info->awakenedTcb)) {//没有屏蔽,有唤醒任务没有接收任务.
/* if unblockedTcb of this signal is not setted, then set it. */
isMember = OsSigIsMember(&tcb->sig.sigprocmask, info->sigInfo->si_signo);
if ((!isMember) && (!info->receivedTcb) && (tcb != info->awakenedTcb)) {
/* if unblockedTcb of this signal is not set, then set it. */
if (!info->unblockedTcb) {
info->unblockedTcb = tcb;
}
ret = OsTcbDispatch(tcb, info->sigInfo);//向任务发送信号
ret = OsTcbDispatch(tcb, info->sigInfo);
OS_RETURN_IF(ret < 0, ret);
/* set this tcb as receivedTcb */
info->receivedTcb = tcb;//设置这个任务为接收任务
info->receivedTcb = tcb;
OS_RETURN_IF(info->awakenedTcb != NULL, SIG_STOP_VISIT); /* Stop search */
}
return 0; /* Keep searching */
}
///进程收到 SIGKILL 信号后,通知任务tcb处理.
static int SigProcessKillSigHandler(LosTaskCB *tcb, void *arg)
{
struct ProcessSignalInfo *info = (struct ProcessSignalInfo *)arg;//转参
struct ProcessSignalInfo *info = (struct ProcessSignalInfo *)arg;
return OsPendingTaskWake(tcb, info->sigInfo->si_signo);
}
//处理信号发送
static void SigProcessLoadTcb(struct ProcessSignalInfo *info, siginfo_t *sigInfo)
{
LosTaskCB *tcb = NULL;
if (info->awakenedTcb == NULL && info->receivedTcb == NULL) {//信号即没有指定接收task 也没有指定被唤醒task
if (info->unblockedTcb) {//如果进程信号信息体中有阻塞task
tcb = info->unblockedTcb;//
} else if (info->defaultTcb) {//如果有默认的发送方task
if (info->awakenedTcb == NULL && info->receivedTcb == NULL) {
if (info->unblockedTcb) {
tcb = info->unblockedTcb;
} else if (info->defaultTcb) {
tcb = info->defaultTcb;
} else {
return;
}
/* Deliver the signal to the selected task */
(void)OsTcbDispatch(tcb, sigInfo);//向所选任务发送信号
(void)OsTcbDispatch(tcb, sigInfo);
}
}
///给参数进程发送参数信号
int OsSigProcessSend(LosProcessCB *spcb, siginfo_t *sigInfo)
{
int ret;
struct ProcessSignalInfo info = {
.sigInfo = sigInfo, //信号内容
.defaultTcb = NULL, //以下四个值将在OsSigProcessForeachChild中根据条件完善
.sigInfo = sigInfo,
.defaultTcb = NULL,
.unblockedTcb = NULL,
.awakenedTcb = NULL,
.receivedTcb = NULL
};
//总之是要从进程中找个至少一个任务来接受这个信号,优先级
//awakenedTcb > receivedTcb > unblockedTcb > defaultTcb
if (info.sigInfo == NULL){
if (info.sigInfo == NULL) {
return -EFAULT;
}
/* visit all taskcb and dispatch signal */ //访问所有任务和分发信号
if (info.sigInfo->si_signo == SIGKILL) {//需要干掉进程时 SIGKILL = 9 #linux kill 9 14
OsSigAddSet(&spcb->sigShare, info.sigInfo->si_signo);//信号集中增加信号
/* visit all taskcb and dispatch signal */
if (info.sigInfo->si_signo == SIGKILL) {
OsSigAddSet(&spcb->sigShare, info.sigInfo->si_signo);
(void)OsSigProcessForeachChild(spcb, SigProcessKillSigHandler, &info);
return 0;
} else {
ret = OsSigProcessForeachChild(spcb, SigProcessSignalHandler, &info);//进程通知所有task处理信号
ret = OsSigProcessForeachChild(spcb, SigProcessSignalHandler, &info);
}
if (ret < 0) {
return ret;
}
SigProcessLoadTcb(&info, sigInfo);//确保能给一个任务发送信号
SigProcessLoadTcb(&info, sigInfo);
return 0;
}
///信号集全部清0
int OsSigEmptySet(sigset_t *set)
{
*set = NULL_SIGNAL_SET;
return 0;
}
/* Privilege process can't send to kernel and privilege process */ //内核进程组和用户特权进程组无法发送
/* Privilege process can't send to kernel and privilege process */
static int OsSignalPermissionToCheck(const LosProcessCB *spcb)
{
UINTPTR gid = (UINTPTR)OS_GET_PGROUP_LEADER(spcb->pgroup);
if (gid == OS_KERNEL_PROCESS_GROUP) {//内核进程组
if (gid == OS_KERNEL_PROCESS_GROUP) {
return -EPERM;
} else if (gid == OS_USER_PRIVILEGE_PROCESS_GROUP) {//用户特权进程组
} else if (gid == OS_USER_PRIVILEGE_PROCESS_GROUP) {
return -EPERM;
}
return 0;
}
///信号分发,发送信号权限/进程组过滤.
STATIC int SendSigPermissionCheck(LosProcessCB *spcb, int permission)
{
if (spcb == NULL) {
return -ESRCH;
}
if (OsProcessIsUnused(spcb)) {//进程是否还在使用,不一定是当前进程但必须是个有效进程
if (OsProcessIsUnused(spcb)) {
return -ESRCH;
}
#ifdef LOSCFG_SECURITY_CAPABILITY //启用能力安全模式
LosProcessCB *current = OsCurrProcessGet();//获取当前进程,检查当前进程是否有发送信号的权限.
/* Kernel process always has kill permission and user process should check permission *///内核进程总是有kill权限用户进程需要检查权限
if (OsProcessIsUserMode(current) && !(current->processStatus & OS_PROCESS_FLAG_EXIT)) {//用户进程检查能力范围
#ifdef LOSCFG_SECURITY_CAPABILITY
LosProcessCB *current = OsCurrProcessGet();
/* Kernel process always has kill permission and user process should check permission */
if (OsProcessIsUserMode(current) && !(current->processStatus & OS_PROCESS_FLAG_EXIT)) {
if ((current != spcb) && (!IsCapPermit(CAP_KILL)) && (current->user->userID != spcb->user->userID)) {
return -EPERM;
}
@ -441,7 +437,7 @@ int OsSendSigToProcess(LosProcessCB *spcb, int sig, int permission)
info.si_code = SI_USER;
info.si_value.sival_ptr = NULL;
return OsSigProcessSend(spcb, &info);//给参数进程发送信号
return OsSigProcessSend(spcb, &info);
}
int OsDispatch(pid_t pid, siginfo_t *info, int permission)
@ -474,14 +470,14 @@ int OsKill(pid_t pid, int sig, int permission)
return -EINVAL;
}
/* Create the siginfo structure */ //创建信号结构体
info.si_signo = sig; //信号编号
info.si_code = SI_USER; //来自用户进程信号
/* Create the siginfo structure */
info.si_signo = sig;
info.si_code = SI_USER;
info.si_value.sival_ptr = NULL;
if (pid > 0) {
/* Send the signal to the specify process */
ret = OsDispatch(pid, &info, permission);//发送信号
ret = OsDispatch(pid, &info, permission);
} else if (pid == -1) {
/* Send SIG to all processes */
ret = OsSendSignalToAllProcess(&info, permission);
@ -493,17 +489,18 @@ int OsKill(pid_t pid, int sig, int permission)
}
return ret;
}
///给发送信号过程加锁
int OsKillLock(pid_t pid, int sig)
{
int ret;
unsigned int intSave;
SCHEDULER_LOCK(intSave);
ret = OsKill(pid, sig, OS_USER_KILL_PERMISSION);//用户权限向进程发送信号
ret = OsKill(pid, sig, OS_USER_KILL_PERMISSION);
SCHEDULER_UNLOCK(intSave);
return ret;
}
INT32 OsTaskKillUnsafe(UINT32 taskID, INT32 signo)
{
siginfo_t info;
@ -522,7 +519,7 @@ INT32 OsTaskKillUnsafe(UINT32 taskID, INT32 signo)
* dispatch rules. */
return OsTcbDispatch(taskCB, &info);
}
///发送信号
int OsPthreadKill(UINT32 tid, int signo)
{
int ret;
@ -540,7 +537,7 @@ int OsPthreadKill(UINT32 tid, int signo)
SCHEDULER_UNLOCK(intSave);
return ret;
}
///向信号集中加入signo信号
int OsSigAddSet(sigset_t *set, int signo)
{
/* Verify the signal */
@ -548,13 +545,13 @@ int OsSigAddSet(sigset_t *set, int signo)
return -EINVAL;
} else {
/* In musl, sig No bits 00000100 present sig No 3, but 1<< 3 = 00001000, so signo needs minus 1 */
signo -= 1;// 信号范围是 [1 ~ 64 ],而保存变量位的范围是[0 ~ 63]
signo -= 1;
/* Add the signal to the set */
*set |= SIGNO2SET((unsigned int)signo);//填充信号集
*set |= SIGNO2SET((unsigned int)signo);
return LOS_OK;
}
}
///获取阻塞当前任务的信号集
int OsSigPending(sigset_t *set)
{
LosTaskCB *tcb = NULL;
@ -566,7 +563,7 @@ int OsSigPending(sigset_t *set)
SCHEDULER_LOCK(intSave);
tcb = OsCurrTaskGet();
*set = tcb->sig.sigPendFlag;//被阻塞的信号集
*set = tcb->sig.sigPendFlag;
SCHEDULER_UNLOCK(intSave);
return LOS_OK;
}
@ -581,7 +578,7 @@ STATIC int FindFirstSetedBit(UINT64 n)
for (count = 0; (count < UINT64_BIT_SIZE) && (n ^ 1ULL); n >>= 1, count++) {}
return (count < UINT64_BIT_SIZE) ? count : (-1);
}
///等待信号时间
int OsSigTimedWaitNoLock(sigset_t *set, siginfo_t *info, unsigned int timeout)
{
LosTaskCB *task = NULL;
@ -592,19 +589,19 @@ int OsSigTimedWaitNoLock(sigset_t *set, siginfo_t *info, unsigned int timeout)
sigcb = &task->sig;
if (sigcb->waitList.pstNext == NULL) {
LOS_ListInit(&sigcb->waitList);//初始化信号等待链表
LOS_ListInit(&sigcb->waitList);
}
/* If pendingflag & set > 0, shound clear pending flag */
/* If pendingflag & set > 0, should clear pending flag */
sigset_t clear = sigcb->sigPendFlag & *set;
if (clear) {
sigcb->sigPendFlag ^= clear;
ret = FindFirstSetedBit((UINT64)clear) + 1;
OsMoveTmpInfoToUnbInfo(sigcb, ret);
} else {
OsSigAddSet(set, SIGKILL);//kill 9 14 必须要处理
OsSigAddSet(set, SIGSTOP);//终止进程的信号也必须处理
OsSigAddSet(set, SIGKILL);
OsSigAddSet(set, SIGSTOP);
sigcb->sigwaitmask |= *set;//按位加到等待集上,也就是说sigwaitmask的信号来了都是要处理的.
sigcb->sigwaitmask |= *set;
OsTaskWaitSetPendMask(OS_TASK_WAIT_SIGNAL, sigcb->sigwaitmask, timeout);
ret = task->ops->wait(task, &sigcb->waitList, timeout);
if (ret == LOS_ERRNO_TSK_TIMEOUT) {
@ -617,7 +614,7 @@ int OsSigTimedWaitNoLock(sigset_t *set, siginfo_t *info, unsigned int timeout)
}
return ret;
}
///让当前任务等待的信号
int OsSigTimedWait(sigset_t *set, siginfo_t *info, unsigned int timeout)
{
int ret;
@ -625,12 +622,12 @@ int OsSigTimedWait(sigset_t *set, siginfo_t *info, unsigned int timeout)
SCHEDULER_LOCK(intSave);
ret = OsSigTimedWaitNoLock(set, info, timeout);//以不加锁的方式等待
ret = OsSigTimedWaitNoLock(set, info, timeout);
SCHEDULER_UNLOCK(intSave);
return ret;
}
///通过信号挂起当前任务
int OsPause(void)
{
LosTaskCB *spcb = NULL;
@ -640,7 +637,7 @@ int OsPause(void)
oldSigprocmask = spcb->sig.sigprocmask;
return OsSigSuspend(&oldSigprocmask);
}
///用参数set代替进程的原有掩码并暂停进程执行直到收到信号再恢复原有掩码并继续执行进程。
int OsSigSuspend(const sigset_t *set)
{
unsigned int intSave;
@ -680,7 +677,6 @@ int OsSigSuspend(const sigset_t *set)
return -EINTR;
}
int OsSigAction(int sig, const sigaction_t *act, sigaction_t *oact)
{
UINTPTR addr;
@ -689,17 +685,14 @@ int OsSigAction(int sig, const sigaction_t *act, sigaction_t *oact)
if (!GOOD_SIGNO(sig) || sig < 1 || act == NULL) {
return -EINVAL;
}
//将数据从用户空间拷贝到内核空间
if (LOS_ArchCopyFromUser(&action, act, sizeof(sigaction_t)) != LOS_OK) {
return -EFAULT;
}
if (sig == SIGSYS) {//鸿蒙此处通过错误的系统调用 来安装信号处理函数,有点巧妙.
addr = OsGetSigHandler();//是否已存在信号处理函数
if (addr == 0) {//进程没有设置信号处理函数时
OsSetSigHandler((unsigned long)(UINTPTR)action.sa_handler);//设置进程信号处理函数
//void (*sa_handler)(int); //信号处理函数——普通版
//void (*sa_sigaction)(int, siginfo_t *, void *);//信号处理函数——高级版
if (LOS_ArchCopyFromUser(&action, act, sizeof(sigaction_t)) != LOS_OK) {
return -EFAULT;
}
if (sig == SIGSYS) {
addr = OsGetSigHandler();
if (addr == 0) {
OsSetSigHandler((unsigned long)(UINTPTR)action.sa_handler);
return LOS_OK;
}
return -EINVAL;
@ -724,11 +717,11 @@ VOID OsSigIntUnlock(VOID)
(VOID)LOS_AtomicSub((Atomic *)&sigcb->sigIntLock, 1);
}
VOID *OsSaveSignalContext(VOID *sp, VOID *newSp)
{
UINTPTR sigHandler;
UINT32 intSave;
LosTaskCB *task = OsCurrTaskGet();
LosProcessCB *process = OsCurrProcessGet();
sig_cb *sigcb = &task->sig;
@ -761,7 +754,7 @@ VOID *OsSaveSignalContext(VOID *sp, VOID *newSp)
OsProcessExitCodeSignalSet(process, signo);
sigcb->sigContext = sp;
OsInitSignalContext(sp, newSp, sigHandler, signo, sigVal);//初始化信号上下文
OsInitSignalContext(sp, newSp, sigHandler, signo, sigVal);
/* sig No bits 00000100 present sig No 3, but 1<< 3 = 00001000, so signo needs minus 1 */
sigcb->sigFlag ^= 1ULL << (signo - 1);
@ -774,7 +767,6 @@ VOID *OsSaveSignalContext(VOID *sp, VOID *newSp)
return sp;
}
VOID *OsRestorSignalContext(VOID *sp)
{
UINT32 intSave;
@ -793,8 +785,8 @@ VOID *OsRestorSignalContext(VOID *sp)
VOID *saveContext = sigcb->sigContext;
sigcb->sigContext = NULL;
sigcb->count--;
process->sigShare = 0; //回到用户态,信号共享清0
OsProcessExitCodeSignalClear(process);//清空进程退出码
process->sigShare = 0;
OsProcessExitCodeSignalClear(process);
SCHEDULER_UNLOCK(intSave);
return saveContext;
}

@ -32,9 +32,9 @@
#include "los_memstat_pri.h"
#include "los_task_pri.h"
/// 记录每个任务对内存的使用情况
LITE_OS_SEC_BSS_MINOR STATIC TskMemUsedInfo g_tskMemUsedInfo[LOSCFG_BASE_CORE_TSK_LIMIT];
/// 计算指定任务对内存使用增加量
LITE_OS_SEC_TEXT_MINOR VOID OsTaskMemUsedInc(UINT32 usedSize, UINT32 taskID)
{
if (taskID >= LOSCFG_BASE_CORE_TSK_LIMIT) {
@ -43,9 +43,9 @@ LITE_OS_SEC_TEXT_MINOR VOID OsTaskMemUsedInc(UINT32 usedSize, UINT32 taskID)
if (OS_INT_ACTIVE) {
return;
}
g_tskMemUsedInfo[taskID].memUsed += usedSize; ///< 叠加
g_tskMemUsedInfo[taskID].memUsed += usedSize;
}
/// 计算指定任务对内存使用减少量
LITE_OS_SEC_TEXT_MINOR VOID OsTaskMemUsedDec(UINT32 usedSize, UINT32 taskID)
{
if (taskID >= LOSCFG_BASE_CORE_TSK_LIMIT) {
@ -59,9 +59,9 @@ LITE_OS_SEC_TEXT_MINOR VOID OsTaskMemUsedDec(UINT32 usedSize, UINT32 taskID)
OsCurrTaskGet()->taskName, g_tskMemUsedInfo[taskID].memUsed, usedSize);
return;
}
g_tskMemUsedInfo[taskID].memUsed -= usedSize; ///< 递减
g_tskMemUsedInfo[taskID].memUsed -= usedSize;
}
/// 获取指定任务对内存的使用情况
LITE_OS_SEC_TEXT_MINOR UINT32 OsTaskMemUsage(UINT32 taskID)
{
if (taskID >= LOSCFG_BASE_CORE_TSK_LIMIT) {
@ -70,7 +70,7 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsTaskMemUsage(UINT32 taskID)
return g_tskMemUsedInfo[taskID].memUsed;
}
/// 清空任务内存使用记录
LITE_OS_SEC_TEXT_MINOR VOID OsTaskMemClear(UINT32 taskID)
{
if (taskID >= LOSCFG_BASE_CORE_TSK_LIMIT) {
@ -82,8 +82,8 @@ LITE_OS_SEC_TEXT_MINOR VOID OsTaskMemClear(UINT32 taskID)
}
g_tskMemUsedInfo[taskID].memUsed = 0;
}
// Slab是一种内存分配器通过将内存划分不同大小的空间分配给对象使用来进行缓存管理应用于内核对象的缓存。
#ifdef LOS_MEM_SLAB //
#ifdef LOS_MEM_SLAB
LITE_OS_SEC_BSS_MINOR STATIC TskSlabUsedInfo g_tskSlabUsedInfo[LOSCFG_BASE_CORE_TSK_LIMIT];
LITE_OS_SEC_TEXT_MINOR VOID OsTaskSlabUsedInc(UINT32 usedSize, UINT32 taskID)

@ -1,22 +1,4 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list
* of conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CON/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
@ -51,211 +33,6 @@
#include "los_hwi.h"
#include "los_spinlock.h"
#ifdef LOSCFG_AARCH64
#define OS_MEMBOX_MAGIC 0xa55a5aa5a55a5aa5
#else
#define OS_MEMBOX_MAGIC 0xa55a5aa5
#endif
#define OS_MEMBOX_SET_MAGIC(addr) \
((LOS_MEMBOX_NODE *)(addr))->pstNext = (LOS_MEMBOX_NODE *)OS_MEMBOX_MAGIC //设置魔法数字
#define OS_MEMBOX_CHECK_MAGIC(addr) \
((((LOS_MEMBOX_NODE *)(addr))->pstNext == (LOS_MEMBOX_NODE *)OS_MEMBOX_MAGIC) ? LOS_OK : LOS_NOK)
#define OS_MEMBOX_USER_ADDR(addr) \
((VOID *)((UINT8 *)(addr) + OS_MEMBOX_NODE_HEAD_SIZE))
#define OS_MEMBOX_NODE_ADDR(addr) \
((LOS_MEMBOX_NODE *)(VOID *)((UINT8 *)(addr) - OS_MEMBOX_NODE_HEAD_SIZE)) //节块 = (节头 + 节体) addr = 节体
/* spinlock for mem module, only available on SMP mode */
LITE_OS_SEC_BSS SPIN_LOCK_INIT(g_memboxSpin);
#define MEMBOX_LOCK(state) LOS_SpinLockSave(&g_memboxSpin, &(state)) ///< 获取静态内存池自旋锁
#define MEMBOX_UNLOCK(state) LOS_SpinUnlockRestore(&g_memboxSpin, (state))///< 释放静态内存池自旋锁
/// 检查静态内存块
STATIC INLINE UINT32 OsCheckBoxMem(const LOS_MEMBOX_INFO *boxInfo, const VOID *node)
{
UINT32 offset;
if (boxInfo->uwBlkSize == 0) {
return LOS_NOK;
}
offset = (UINT32)((UINTPTR)node - (UINTPTR)(boxInfo + 1));
if ((offset % boxInfo->uwBlkSize) != 0) {
return LOS_NOK;
}
if ((offset / boxInfo->uwBlkSize) >= boxInfo->uwBlkNum) {
return LOS_NOK;
}
return OS_MEMBOX_CHECK_MAGIC(node);//检查魔法数字是否被修改过了
}
/// 初始化一个静态内存池,根据入参设定其起始地址、总大小及每个内存块大小
LITE_OS_SEC_TEXT_INIT UINT32 LOS_MemboxInit(VOID *pool, UINT32 poolSize, UINT32 blkSize)
{
LOS_MEMBOX_INFO *boxInfo = (LOS_MEMBOX_INFO *)pool;//在内存起始处安置池头
LOS_MEMBOX_NODE *node = NULL;
UINT32 index;
UINT32 intSave;
if (pool == NULL) {
return LOS_NOK;
}
if (blkSize == 0) {
return LOS_NOK;
}
if (poolSize < sizeof(LOS_MEMBOX_INFO)) {
return LOS_NOK;
}
MEMBOX_LOCK(intSave);
boxInfo->uwBlkSize = LOS_MEMBOX_ALIGNED(blkSize + OS_MEMBOX_NODE_HEAD_SIZE); //节块总大小(节头+节体)
boxInfo->uwBlkNum = (poolSize - sizeof(LOS_MEMBOX_INFO)) / boxInfo->uwBlkSize;//总节块数量
boxInfo->uwBlkCnt = 0; //已分配的数量
if (boxInfo->uwBlkNum == 0) {//只有0块的情况
MEMBOX_UNLOCK(intSave);
return LOS_NOK;
}
node = (LOS_MEMBOX_NODE *)(boxInfo + 1);//去除池头,找到第一个节块位置
boxInfo->stFreeList.pstNext = node;//池头空闲链表指向第一个节块
for (index = 0; index < boxInfo->uwBlkNum - 1; ++index) {//切割节块,挂入空闲链表
node->pstNext = OS_MEMBOX_NEXT(node, boxInfo->uwBlkSize);//按块大小切割好,统一由pstNext指向
node = node->pstNext;//node存储了下一个节点的地址信息
}
node->pstNext = NULL;//最后一个为null
MEMBOX_UNLOCK(intSave);
return LOS_OK;
}
/// 从指定的静态内存池中申请一块静态内存块,整个内核源码只有 OsSwtmrScan中用到了静态内存.
LITE_OS_SEC_TEXT VOID *LOS_MemboxAlloc(VOID *pool)
{
LOS_MEMBOX_INFO *boxInfo = (LOS_MEMBOX_INFO *)pool;
LOS_MEMBOX_NODE *node = NULL;
LOS_MEMBOX_NODE *nodeTmp = NULL;
UINT32 intSave;
if (pool == NULL) {
return NULL;
}
MEMBOX_LOCK(intSave);
node = &(boxInfo->stFreeList);//拿到空闲单链表
if (node->pstNext != NULL) {//不需要遍历链表,因为这是空闲链表
nodeTmp = node->pstNext;//先记录要使用的节点
node->pstNext = nodeTmp->pstNext;//不再空闲了,把节点摘出去了.
OS_MEMBOX_SET_MAGIC(nodeTmp);//为已使用的节块设置魔法数字
boxInfo->uwBlkCnt++;//已使用块数增加
}
MEMBOX_UNLOCK(intSave);
return (nodeTmp == NULL) ? NULL : OS_MEMBOX_USER_ADDR(nodeTmp);//返回可用的虚拟地址
}
/// 释放指定的一块静态内存块
LITE_OS_SEC_TEXT UINT32 LOS_MemboxFree(VOID *pool, VOID *box)
{
LOS_MEMBOX_INFO *boxInfo = (LOS_MEMBOX_INFO *)pool;
UINT32 ret = LOS_NOK;
UINT32 intSave;
if ((pool == NULL) || (box == NULL)) {
return LOS_NOK;
}
MEMBOX_LOCK(intSave);
do {
LOS_MEMBOX_NODE *node = OS_MEMBOX_NODE_ADDR(box);//通过节体获取节块首地址
if (OsCheckBoxMem(boxInfo, node) != LOS_OK) {
break;
}
node->pstNext = boxInfo->stFreeList.pstNext;//节块指向空闲链表表头
boxInfo->stFreeList.pstNext = node;//空闲链表表头反指向它,意味节块排到第一,下次申请将首个分配它
boxInfo->uwBlkCnt--;//已经使用的内存块减一
ret = LOS_OK;
} while (0);//将被编译时优化
MEMBOX_UNLOCK(intSave);
return ret;
}
/// 清零指定静态内存块的内容
LITE_OS_SEC_TEXT_MINOR VOID LOS_MemboxClr(VOID *pool, VOID *box)
{
LOS_MEMBOX_INFO *boxInfo = (LOS_MEMBOX_INFO *)pool;
if ((pool == NULL) || (box == NULL)) {
return;
}
//将魔法数字一并清除了.
(VOID)memset_s(box, (boxInfo->uwBlkSize - OS_MEMBOX_NODE_HEAD_SIZE), 0,
(boxInfo->uwBlkSize - OS_MEMBOX_NODE_HEAD_SIZE));
}
/// 打印指定静态内存池所有节点信息打印等级是LOS_INFO_LEVEL包括内存池起始地址、
/// 内存块大小、总内存块数量、每个空闲内存块的起始地址、所有内存块的起始地址
LITE_OS_SEC_TEXT_MINOR VOID LOS_ShowBox(VOID *pool)
{
UINT32 index;
UINT32 intSave;
LOS_MEMBOX_INFO *boxInfo = (LOS_MEMBOX_INFO *)pool;
LOS_MEMBOX_NODE *node = NULL;
if (pool == NULL) {
return;
}
MEMBOX_LOCK(intSave);
PRINT_INFO("membox(%p,0x%x,0x%x):\r\n", pool, boxInfo->uwBlkSize, boxInfo->uwBlkNum);
PRINT_INFO("free node list:\r\n");
for (node = boxInfo->stFreeList.pstNext, index = 0; node != NULL;
node = node->pstNext, ++index) {
PRINT_INFO("(%u,%p)\r\n", index, node);
}
PRINT_INFO("all node list:\r\n");
node = (LOS_MEMBOX_NODE *)(boxInfo + 1);
for (index = 0; index < boxInfo->uwBlkNum; ++index, node = OS_MEMBOX_NEXT(node, boxInfo->uwBlkSize)) {
PRINT_INFO("(%u,%p,%p)\r\n", index, node, node->pstNext);
}
MEMBOX_UNLOCK(intSave);
}
/// 获取指定静态内存池的信息,包括内存池中总内存块数量、已经分配出去的内存块数量、每个内存块的大小
LITE_OS_SEC_TEXT_MINOR UINT32 LOS_MemboxStatisticsGet(const VOID *boxMem, UINT32 *maxBlk,
UINT32 *blkCnt, UINT32 *blkSize)
{
if ((boxMem == NULL) || (maxBlk == NULL) || (blkCnt == NULL) || (blkSize == NULL)) {
return LOS_NOK;
}
*maxBlk = ((OS_MEMBOX_S *)boxMem)->uwBlkNum;
*blkCnt = ((OS_MEMBOX_S *)boxMem)->uwBlkCnt;
*blkSize = ((OS_MEMBOX_S *)boxMem)->uwBlkSize;
return LOS_OK;
}
TRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "los_membox.h"
#include "los_hwi.h"
#include "los_spinlock.h"
#ifdef LOSCFG_AARCH64
#define OS_MEMBOX_MAGIC 0xa55a5aa5a55a5aa5

File diff suppressed because it is too large Load Diff

@ -48,23 +48,6 @@
#include "shell.h"
#endif
/*********************************************
kill [signo | -signo] [pid]
signo ID [1,30]
pid ID [1,MAX_INT]
signo[0,64][1,30]
使
pid256[1-256]
*********************************************/
LITE_OS_SEC_TEXT_MINOR VOID OsPrintKillUsage(VOID)
{
PRINTK("\nkill: usage: kill [sigspec] [pid]\n");

@ -31,10 +31,7 @@
#include "los_task_pri.h"
// 这个函数接受两个参数一个是地址addr另一个是对齐边界boundary。它返回一个对齐后的地址。
// 这个函数首先检查地址加上边界再减去1是否大于地址如果是
// 就返回地址加上边界再减去1然后与~((UINTPTR)(boundary - 1))进行位与运算的结果,
// 否则就返回地址与~((UINTPTR)(boundary - 1))进行位与运算的结果。这实际上是在对地址进行对齐。
LITE_OS_SEC_TEXT UINTPTR LOS_Align(UINTPTR addr, UINT32 boundary)
{
if ((addr + boundary - 1) > addr) {
@ -44,14 +41,13 @@ LITE_OS_SEC_TEXT UINTPTR LOS_Align(UINTPTR addr, UINT32 boundary)
}
}
LITE_OS_SEC_TEXT_MINOR VOID LOS_Msleep(UINT32 msecs)
{
UINT32 interval;
// 这个函数接受一个参数毫秒数msecs。这个函数首先检查毫秒数是否为0如果是就将间隔设置为0。
if (msecs == 0) {
interval = 0;
// } else { 否则它将毫秒数转换为tick数可能是操作系统的时间单位如果转换后的间隔为0就将间隔设置为1。然后它调用LOS_TaskDelay函数来延迟指定的间隔。
} else {
interval = LOS_MS2Tick(msecs);
if (interval == 0) {
interval = 1;

@ -37,53 +37,28 @@
#include "shcmd.h"
#include "shell.h"
#endif
/**
* @file los_stackinfo.c
* @brief
* @verbatim
@note_pic OsExcStackInfo CPU,,CPU
__undef_stack(SMP)
+-------------------+ <--- cpu1 top
| |
| CPU core1 |
| |
+--------------------<--- cpu2 top
| |
| cpu core 2 |
| |
+--------------------<--- cpu3 top
| |
| cpu core 3 |
| |
+--------------------<--- cpu4 top
| |
| cpu core 4 |
| |
+-------------------+
* @endverbatim
*/
const StackInfo *g_stackInfo = NULL; ///< CPU所有工作模式的栈信息
UINT32 g_stackNum; ///< CPU所有工作模式的栈数量
///获取栈的吃水线
const StackInfo *g_stackInfo = NULL;
UINT32 g_stackNum;
UINT32 OsStackWaterLineGet(const UINTPTR *stackBottom, const UINTPTR *stackTop, UINT32 *peakUsed)
{
UINT32 size;
const UINTPTR *tmp = NULL;
if (*stackTop == OS_STACK_MAGIC_WORD) {//栈顶值是否等于 magic 0xCCCCCCCC
if (*stackTop == OS_STACK_MAGIC_WORD) {
tmp = stackTop + 1;
while ((tmp < stackBottom) && (*tmp == OS_STACK_INIT)) {//记录从栈顶到栈低有多少个连续的 0xCACACACA
while ((tmp < stackBottom) && (*tmp == OS_STACK_INIT)) {
tmp++;
}
size = (UINT32)((UINTPTR)stackBottom - (UINTPTR)tmp);//剩余多少非0xCACACACA的栈空间
*peakUsed = (size == 0) ? size : (size + sizeof(CHAR *));//得出高峰用值,还剩多少可用
size = (UINT32)((UINTPTR)stackBottom - (UINTPTR)tmp);
*peakUsed = (size == 0) ? size : (size + sizeof(CHAR *));
return LOS_OK;
} else {
*peakUsed = OS_INVALID_WATERLINE;//栈溢出了
*peakUsed = OS_INVALID_WATERLINE;
return LOS_NOK;
}
}
///异常情况下的栈检查,主要就是检查栈顶值有没有被改写
VOID OsExcStackCheck(VOID)
{
UINT32 index;
@ -96,7 +71,7 @@ VOID OsExcStackCheck(VOID)
for (index = 0; index < g_stackNum; index++) {
for (cpuid = 0; cpuid < LOSCFG_KERNEL_CORE_NUM; cpuid++) {
stackTop = (UINTPTR *)((UINTPTR)g_stackInfo[index].stackTop + cpuid * g_stackInfo[index].stackSize);
if (*stackTop != OS_STACK_MAGIC_WORD) {// 只要栈顶内容不是 0xCCCCCCCCC 就是溢出了.
if (*stackTop != OS_STACK_MAGIC_WORD) {
PRINT_ERR("cpu:%u %s overflow , magic word changed to 0x%x\n",
LOSCFG_KERNEL_CORE_NUM - 1 - cpuid, g_stackInfo[index].stackName, *stackTop);
}
@ -104,7 +79,6 @@ VOID OsExcStackCheck(VOID)
}
}
///打印栈的信息 把每个CPU的栈信息打印出来
VOID OsExcStackInfo(VOID)
{
UINT32 index;
@ -119,37 +93,36 @@ VOID OsExcStackInfo(VOID)
PrintExcInfo("\n stack name cpu id stack addr total size used size\n"
" ---------- ------ --------- -------- --------\n");
for (index = 0; index < g_stackNum; index++) {
for (cpuid = 0; cpuid < LOSCFG_KERNEL_CORE_NUM; cpuid++) {//可以看出 各个CPU的栈是紧挨的的
for (cpuid = 0; cpuid < LOSCFG_KERNEL_CORE_NUM; cpuid++) {
stackTop = (UINTPTR *)((UINTPTR)g_stackInfo[index].stackTop + cpuid * g_stackInfo[index].stackSize);
stack = (UINTPTR *)((UINTPTR)stackTop + g_stackInfo[index].stackSize);
(VOID)OsStackWaterLineGet(stack, stackTop, &size);//获取吃水线, 鸿蒙用WaterLine 这个词用的很妙
(VOID)OsStackWaterLineGet(stack, stackTop, &size);
PrintExcInfo("%11s %-5d %-10p 0x%-8x 0x%-4x\n", g_stackInfo[index].stackName,
LOSCFG_KERNEL_CORE_NUM - 1 - cpuid, stackTop, g_stackInfo[index].stackSize, size);
}
}
OsExcStackCheck();//发生异常时栈检查
OsExcStackCheck();
}
///注册栈信息
VOID OsExcStackInfoReg(const StackInfo *stackInfo, UINT32 stackNum)
{
g_stackInfo = stackInfo; //全局变量指向g_excStack
g_stackInfo = stackInfo;
g_stackNum = stackNum;
}
///task栈的初始化,设置固定的值. 0xcccccccc 和 0xcacacaca
VOID OsStackInit(VOID *stacktop, UINT32 stacksize)
{
/* initialize the task stack, write magic num to stack top */
errno_t ret = memset_s(stacktop, stacksize, (INT32)OS_STACK_INIT, stacksize);//清一色填 0xCACACACA
errno_t ret = memset_s(stacktop, stacksize, (INT32)OS_STACK_INIT, stacksize);
if (ret == EOK) {
*((UINTPTR *)stacktop) = OS_STACK_MAGIC_WORD;//0xCCCCCCCCC 中文就是"烫烫烫烫" 这几个字懂点计算机的人都不会陌生了.
*((UINTPTR *)stacktop) = OS_STACK_MAGIC_WORD;
}
}
#ifdef LOSCFG_SHELL_CMD_DEBUG
SHELLCMD_ENTRY(stack_shellcmd, CMD_TYPE_EX, "stack", 1, (CmdCallBackFunc)OsExcStackInfo);//采用shell命令静态注册方式
SHELLCMD_ENTRY(stack_shellcmd, CMD_TYPE_EX, "stack", 1, (CmdCallBackFunc)OsExcStackInfo);
#endif

@ -193,7 +193,7 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdUname(INT32 argc, const CHAR *argv[])
if (argc == 1) {
if (strcmp(argv[0], "-a") == 0) {
PRINTK("%s %d.%d.%d.%d %s %s\n", KERNEL_NAME, KERNEL_MAJOR, KERNEL_MINOR, KERNEL_PATCH, KERNEL_ITRE,\
PRINTK("%s %d.%d.%d.%d %s %s\n", KERNEL_NAME, KERNEL_MAJOR, KERNEL_MINOR, KERNEL_PATCH, KERNEL_ITRE, \
__DATE__, __TIME__);
return 0;
} else if (strcmp(argv[0], "-s") == 0) {

@ -58,21 +58,20 @@ STATIC VOID OsPrintSwtmrMsg(const SWTMR_CTRL_S *swtmr)
(VOID)LOS_SwtmrTimeGet(swtmr->usTimerID, &ticks);
PRINTK("%7u%10s%8s%12u%7u%#12x%#12x\n",
swtmr->usTimerID % LOSCFG_BASE_CORE_SWTMR_LIMIT, //软件定时器ID。
g_shellSwtmrStatus[swtmr->ucState], //软件定时器状态,状态可能为:"UnUsed", "Created", "Ticking"。
g_shellSwtmrMode[swtmr->ucMode], //软件定时器模式。模式可能为:"Once", "Period", "NSD单次定时器定时结束后不会自动删除"
swtmr->uwInterval, //软件定时器使用的Tick数。
swtmr->usTimerID % LOSCFG_BASE_CORE_SWTMR_LIMIT,
g_shellSwtmrStatus[swtmr->ucState],
g_shellSwtmrMode[swtmr->ucMode],
swtmr->uwInterval,
ticks,
swtmr->uwArg, //传入的参数。
swtmr->pfnHandler); //回调函数的地址。
swtmr->uwArg,
swtmr->pfnHandler);
}
STATIC INLINE VOID OsPrintSwtmrMsgHead(VOID)
{
PRINTK("\r\nSwTmrID State Mode Interval Count Arg handlerAddr\n");
}
///shell命令之swtmr 命令用于查询系统软件定时器相关信息。
//参数缺省时,默认显示所有软件定时器的相关信息。
STATIC UINT32 SwtmrBaseInfoGet(UINT32 timerID)
{
SWTMR_CTRL_S *swtmr = g_swtmrCBArray;
@ -174,6 +173,7 @@ SWTMR_HELP:
PRINTK(" swtmr ID --- Specifies information about a software timer.\n");
return LOS_OK;
}
SHELLCMD_ENTRY(swtmr_shellcmd, CMD_TYPE_EX, "swtmr", 1, (CmdCallBackFunc)OsShellCmdSwtmrInfoGet);//采用shell命令静态注册方式
SHELLCMD_ENTRY(swtmr_shellcmd, CMD_TYPE_EX, "swtmr", 1, (CmdCallBackFunc)OsShellCmdSwtmrInfoGet);
#endif /* LOSCFG_SHELL */

@ -118,7 +118,7 @@ UINT32 OsShellCmdSwtmrCntGet(VOID)
LOS_IntRestore(intSave);
return swtmrCnt;
}
///查看系统资源使用情况
LITE_OS_SEC_TEXT_MINOR VOID OsShellCmdSystemInfoGet(VOID)
{
UINT8 isTaskEnable = TRUE;
@ -137,27 +137,27 @@ LITE_OS_SEC_TEXT_MINOR VOID OsShellCmdSystemInfoGet(VOID)
#else
UINT8 isSwtmrEnable = FALSE;
#endif
//模块名称 当前使用量 最大可用量 模块是否开启
PRINTK("\n Module Used Total Enabled\n");
PRINTK("--------------------------------------------\n");
PRINTK(" Task %-10u%-10d%s\n",
OsShellCmdTaskCntGet(), //有效任务数
LOSCFG_BASE_CORE_TSK_LIMIT, //任务最大数 128
SYSINFO_ENABLED(isTaskEnable));//任务是否失效 YES or NO
OsShellCmdTaskCntGet(),
LOSCFG_BASE_CORE_TSK_LIMIT,
SYSINFO_ENABLED(isTaskEnable));
PRINTK(" Sem %-10u%-10d%s\n",
OsShellCmdSemCntGet(), //信号量的数量
LOSCFG_BASE_IPC_SEM_LIMIT, //信号量最大数 1024
SYSINFO_ENABLED(isSemEnable));//信号量是否失效 YES or NO
OsShellCmdSemCntGet(),
LOSCFG_BASE_IPC_SEM_LIMIT,
SYSINFO_ENABLED(isSemEnable));
PRINTK(" Queue %-10u%-10d%s\n",
OsShellCmdQueueCntGet(), //队列的数量
LOSCFG_BASE_IPC_QUEUE_LIMIT, //队列的最大数 1024
SYSINFO_ENABLED(isQueueEnable));//队列是否失效 YES or NO
OsShellCmdQueueCntGet(),
LOSCFG_BASE_IPC_QUEUE_LIMIT,
SYSINFO_ENABLED(isQueueEnable));
PRINTK(" SwTmr %-10u%-10d%s\n",
OsShellCmdSwtmrCntGet(), //定时器的数量
LOSCFG_BASE_CORE_SWTMR_LIMIT, //定时器的总数 1024
SYSINFO_ENABLED(isSwtmrEnable)); //定时器是否失效 YES or NO
OsShellCmdSwtmrCntGet(),
LOSCFG_BASE_CORE_SWTMR_LIMIT,
SYSINFO_ENABLED(isSwtmrEnable));
}
///systeminfo命令用于显示当前操作系统内资源使用情况包括任务、信号量、互斥量、队列、定时器等。
INT32 OsShellCmdSystemInfo(INT32 argc, const CHAR **argv)
{
if (argc == 0) {

@ -54,7 +54,7 @@
#define VMM_CMD "vmm"
#define OOM_CMD "oom"
#define VMM_PMM_CMD "v2p"
//dump内核空间
LITE_OS_SEC_TEXT_MINOR VOID OsDumpKernelAspace(VOID)
{
LosVmSpace *kAspace = LOS_GetKVmSpace();
@ -104,26 +104,26 @@ LITE_OS_SEC_TEXT_MINOR VOID OsDoDumpVm(pid_t pid)
PRINTK("\tThe process [%d] not active\n", pid);
}
}
///查看进程的虚拟内存使用情况。vmm [-a / -h / --help] vmm [pid]
LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdDumpVm(INT32 argc, const CHAR *argv[])
{
if (argc == 0) { //没有参数 使用 # vmm 查看所有进程使用虚拟内存的情况
if (argc == 0) {
OsDumpAllAspace();
} else if (argc == 1) {
pid_t pid = OsPid(argv[0]);
if (strcmp(argv[0], "-a") == 0) { //# vmm -a 查看所有进程使用虚拟内存的情况
if (strcmp(argv[0], "-a") == 0) {
OsDumpAllAspace();
} else if (strcmp(argv[0], "-k") == 0) {//# vmm -k 查看内核进程使用虚拟内存的情况
} else if (strcmp(argv[0], "-k") == 0) {
OsDumpKernelAspace();
} else if (pid >= 0) { //# vmm 3 查看3号进程使用虚拟内存的情况
} else if (pid >= 0) {
OsDoDumpVm(pid);
} else if (strcmp(argv[0], "-h") == 0 || strcmp(argv[0], "--help") == 0) { //# vmm -h 或者 vmm --help
} else if (strcmp(argv[0], "-h") == 0 || strcmp(argv[0], "--help") == 0) {
OsPrintUsage();
} else {
PRINTK("%s: invalid option: %s\n", VMM_CMD, argv[0]); //格式错误,输出规范格式
PRINTK("%s: invalid option: %s\n", VMM_CMD, argv[0]);
OsPrintUsage();
}
} else { //多于一个参数 例如 # vmm 3 9
} else {
OsPrintUsage();
}
@ -135,7 +135,7 @@ LITE_OS_SEC_TEXT_MINOR VOID V2PPrintUsage(VOID)
PRINTK("pid vaddr(0x1000000~0x3e000000), print physical address of virtual address\n"
"-h | --help, print v2p command usage\n");
}
///v2p 虚拟内存对应的物理内存
LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdV2P(INT32 argc, const CHAR *argv[])
{
UINT32 vaddr;
@ -180,7 +180,7 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdV2P(INT32 argc, const CHAR *argv[])
return LOS_OK;
}
///查看系统内存物理页及pagecache物理页使用情况 , Debug版本才具备的命令 # pmm
LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdDumpPmm(VOID)
{
OsVmPhysDump();
@ -192,13 +192,12 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdDumpPmm(VOID)
LITE_OS_SEC_TEXT_MINOR VOID OomPrintUsage(VOID)
{
PRINTK("\t-i [interval], set oom check interval (ms)\n" //设置oom线程任务检查的时间间隔。
"\t-m [mem byte], set oom low memory threshold (Byte)\n" //设置低内存阈值。
"\t-r [mem byte], set page cache reclaim memory threshold (Byte)\n" //设置pagecache内存回收阈值。
"\t-h | --help, print vmm command usage\n"); //使用帮助。
PRINTK("\t-i [interval], set oom check interval (ms)\n"
"\t-m [mem byte], set oom low memory threshold (Byte)\n"
"\t-r [mem byte], set page cache reclaim memory threshold (Byte)\n"
"\t-h | --help, print vmm command usage\n");
}
///查看和设置低内存阈值以及pagecache内存回收阈值。参数缺省时显示oom功能当前配置信息。
//当系统内存不足时,会打印出内存不足的提示信息。
LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdOom(INT32 argc, const CHAR *argv[])
{
UINT32 lowMemThreshold;
@ -220,7 +219,7 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdOom(INT32 argc, const CHAR *argv[])
PRINTK("[oom] low mem threshold %s(byte) invalid.\n", argv[1]);
return OS_ERROR;
} else {
OomSetLowMemThreashold(lowMemThreshold);//设置低内存阈值
OomSetLowMemThreashold(lowMemThreshold);
}
} else if (strcmp(argv[0], "-i") == 0) {
checkInterval = strtoul((CHAR *)argv[1], &endPtr, 0);
@ -228,7 +227,7 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdOom(INT32 argc, const CHAR *argv[])
PRINTK("[oom] check interval %s(us) invalid.\n", argv[1]);
return OS_ERROR;
} else {
OomSetCheckInterval(checkInterval);//设置oom线程任务检查的时间间隔
OomSetCheckInterval(checkInterval);
}
} else if (strcmp(argv[0], "-r") == 0) {
reclaimMemThreshold = strtoul((CHAR *)argv[1], &endPtr, 0);
@ -236,7 +235,7 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdOom(INT32 argc, const CHAR *argv[])
PRINTK("[oom] reclaim mem threshold %s(byte) invalid.\n", argv[1]);
return OS_ERROR;
} else {
OomSetReclaimMemThreashold(reclaimMemThreshold);//设置pagecache内存回收阈值
OomSetReclaimMemThreashold(reclaimMemThreshold);
}
} else {
PRINTK("%s: invalid option: %s %s\n", OOM_CMD, argv[0], argv[1]);
@ -251,13 +250,13 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdOom(INT32 argc, const CHAR *argv[])
}
#ifdef LOSCFG_SHELL_CMD_DEBUG
SHELLCMD_ENTRY(oom_shellcmd, CMD_TYPE_SHOW, OOM_CMD, 2, (CmdCallBackFunc)OsShellCmdOom);//采用shell命令静态注册方式
SHELLCMD_ENTRY(vm_shellcmd, CMD_TYPE_SHOW, VMM_CMD, 1, (CmdCallBackFunc)OsShellCmdDumpVm);//采用shell命令静态注册方式 vmm
SHELLCMD_ENTRY(v2p_shellcmd, CMD_TYPE_SHOW, VMM_PMM_CMD, 1, (CmdCallBackFunc)OsShellCmdV2P);//采用shell命令静态注册方式 v2p
SHELLCMD_ENTRY(oom_shellcmd, CMD_TYPE_SHOW, OOM_CMD, 2, (CmdCallBackFunc)OsShellCmdOom);
SHELLCMD_ENTRY(vm_shellcmd, CMD_TYPE_SHOW, VMM_CMD, 1, (CmdCallBackFunc)OsShellCmdDumpVm);
SHELLCMD_ENTRY(v2p_shellcmd, CMD_TYPE_SHOW, VMM_PMM_CMD, 1, (CmdCallBackFunc)OsShellCmdV2P);
#endif
#ifdef LOSCFG_SHELL
SHELLCMD_ENTRY(pmm_shellcmd, CMD_TYPE_SHOW, "pmm", 0, (CmdCallBackFunc)OsShellCmdDumpPmm);//采用shell命令静态注册方式
SHELLCMD_ENTRY(pmm_shellcmd, CMD_TYPE_SHOW, "pmm", 0, (CmdCallBackFunc)OsShellCmdDumpPmm);
#endif
#endif

@ -36,43 +36,43 @@
#include "los_swtmr.h"
#include "los_task_pri.h"
#ifdef LOSCFG_KERNEL_SMP
//给参数CPU发送调度信号
#ifdef LOSCFG_KERNEL_SMP_CALL
LITE_OS_SEC_BSS SPIN_LOCK_INIT(g_mpCallSpin);
#define MP_CALL_LOCK(state) LOS_SpinLockSave(&g_mpCallSpin, &(state))
#define MP_CALL_UNLOCK(state) LOS_SpinUnlockRestore(&g_mpCallSpin, (state))
#endif
VOID LOS_MpSchedule(UINT32 target)//target每位对应CPU core
VOID LOS_MpSchedule(UINT32 target)
{
UINT32 cpuid = ArchCurrCpuid();
target &= ~(1U << cpuid);//获取除了自身之外的其他CPU
HalIrqSendIpi(target, LOS_MP_IPI_SCHEDULE);//向目标CPU发送调度信号,核间中断(Inter-Processor Interrupts),IPI
target &= ~(1U << cpuid);
HalIrqSendIpi(target, LOS_MP_IPI_SCHEDULE);
}
///硬中断唤醒处理函数
VOID OsMpWakeHandler(VOID)
{
/* generic wakeup ipi, do nothing */
}
///硬中断调度处理函数
VOID OsMpScheduleHandler(VOID)
{//将调度标志设置为与唤醒功能不同,这样就可以在硬中断结束时触发调度程序。
{
/*
* set schedule flag to differ from wake function,
* so that the scheduler can be triggered at the end of irq.
*/
OsSchedRunqueuePendingSet();
}
///硬中断暂停处理函数
VOID OsMpHaltHandler(VOID)
{
(VOID)LOS_IntLock();
OsPercpuGet()->excFlag = CPU_HALT;//让当前Cpu停止工作
OsPercpuGet()->excFlag = CPU_HALT;
while (1) {}//陷入空循环,也就是空闲状态
while (1) {}
}
///MP定时器处理函数, 递归检查所有可用任务
VOID OsMpCollectTasks(VOID)
{
LosTaskCB *taskCB = NULL;
@ -80,19 +80,19 @@ VOID OsMpCollectTasks(VOID)
UINT32 ret;
/* recursive checking all the available task */
for (; taskID <= g_taskMaxNum; taskID++) { //递归检查所有可用任务
for (; taskID <= g_taskMaxNum; taskID++) {
taskCB = &g_taskCBArray[taskID];
if (OsTaskIsUnused(taskCB) || OsTaskIsRunning(taskCB)) {
continue;
}
/* 虽然任务状态不是原子的,但此检查可能成功,但无法完成删除,此删除将在下次运行之前处理
/*
* though task status is not atomic, this check may success but not accomplish
* the deletion; this deletion will be handled until the next run.
*/
if (taskCB->signal & SIGNAL_KILL) {//任务收到被干掉信号
ret = LOS_TaskDelete(taskID);//干掉任务,回归任务池
if (taskCB->signal & SIGNAL_KILL) {
ret = LOS_TaskDelete(taskID);
if (ret != LOS_OK) {
PRINT_WARN("GC collect task failed err:0x%x\n", ret);
}
@ -101,7 +101,6 @@ VOID OsMpCollectTasks(VOID)
}
#ifdef LOSCFG_KERNEL_SMP_CALL
VOID OsMpFuncCall(UINT32 target, SMP_FUNC_CALL func, VOID *args)
{
UINT32 index;
@ -111,13 +110,13 @@ VOID OsMpFuncCall(UINT32 target, SMP_FUNC_CALL func, VOID *args)
return;
}
if (!(target & OS_MP_CPU_ALL)) {//检查目标CPU是否正确
if (!(target & OS_MP_CPU_ALL)) {
return;
}
for (index = 0; index < LOSCFG_KERNEL_CORE_NUM; index++) {//遍历所有核
for (index = 0; index < LOSCFG_KERNEL_CORE_NUM; index++) {
if (CPUID_TO_AFFI_MASK(index) & target) {
MpCallFunc *mpCallFunc = (MpCallFunc *)LOS_MemAlloc(m_aucSysMem0, sizeof(MpCallFunc));//从内核空间 分配回调结构体
MpCallFunc *mpCallFunc = (MpCallFunc *)LOS_MemAlloc(m_aucSysMem0, sizeof(MpCallFunc));
if (mpCallFunc == NULL) {
PRINT_ERR("smp func call malloc failed\n");
return;
@ -126,66 +125,59 @@ VOID OsMpFuncCall(UINT32 target, SMP_FUNC_CALL func, VOID *args)
mpCallFunc->args = args;
MP_CALL_LOCK(intSave);
LOS_ListAdd(&g_percpu[index].funcLink, &(mpCallFunc->node));//将回调结构体挂入链表尾部
LOS_ListAdd(&g_percpu[index].funcLink, &(mpCallFunc->node));
MP_CALL_UNLOCK(intSave);
}
}
HalIrqSendIpi(target, LOS_MP_IPI_FUNC_CALL);//向目标CPU发起核间中断
HalIrqSendIpi(target, LOS_MP_IPI_FUNC_CALL);
}
/*!
* @brief OsMpFuncCallHandler
* CPU
* @return
*
* @see
*/
VOID OsMpFuncCallHandler(VOID)
{
UINT32 intSave;
UINT32 cpuid = ArchCurrCpuid();//获取当前CPU
UINT32 cpuid = ArchCurrCpuid();
LOS_DL_LIST *list = NULL;
MpCallFunc *mpCallFunc = NULL;
MP_CALL_LOCK(intSave);
while (!LOS_ListEmpty(&g_percpu[cpuid].funcLink)) {//遍历回调函数链表,知道为空
list = LOS_DL_LIST_FIRST(&g_percpu[cpuid].funcLink);//获取链表第一个数据
LOS_ListDelete(list);//将自己从链表上摘除
while (!LOS_ListEmpty(&g_percpu[cpuid].funcLink)) {
list = LOS_DL_LIST_FIRST(&g_percpu[cpuid].funcLink);
LOS_ListDelete(list);
MP_CALL_UNLOCK(intSave);
mpCallFunc = LOS_DL_LIST_ENTRY(list, MpCallFunc, node);//获取回调函数
mpCallFunc->func(mpCallFunc->args);//获取参数并回调该函数
(VOID)LOS_MemFree(m_aucSysMem0, mpCallFunc);//释放回调函数内存
mpCallFunc = LOS_DL_LIST_ENTRY(list, MpCallFunc, node);
mpCallFunc->func(mpCallFunc->args);
(VOID)LOS_MemFree(m_aucSysMem0, mpCallFunc);
MP_CALL_LOCK(intSave);
}
MP_CALL_UNLOCK(intSave);
}
/// CPU层级的回调模块初始化
VOID OsMpFuncCallInit(VOID)
{
UINT32 index;
/* init funclink for each core | 为每个CPU核整一个回调函数链表*/
/* init funclink for each core */
for (index = 0; index < LOSCFG_KERNEL_CORE_NUM; index++) {
LOS_ListInit(&g_percpu[index].funcLink);//链表初始化
LOS_ListInit(&g_percpu[index].funcLink);
}
}
#endif /* LOSCFG_KERNEL_SMP_CALL */
//MP(multiprocessing) 多核处理器初始化
UINT32 OsMpInit(VOID)
{
UINT16 swtmrId;
(VOID)LOS_SwtmrCreate(OS_MP_GC_PERIOD, LOS_SWTMR_MODE_PERIOD, //创建一个周期性,持续时间为 100个tick的定时器
(SWTMR_PROC_FUNC)OsMpCollectTasks, &swtmrId, 0);//OsMpCollectTasks为超时回调函数
(VOID)LOS_SwtmrStart(swtmrId);//开始定时任务
(VOID)LOS_SwtmrCreate(OS_MP_GC_PERIOD, LOS_SWTMR_MODE_PERIOD,
(SWTMR_PROC_FUNC)OsMpCollectTasks, &swtmrId, 0);
(VOID)LOS_SwtmrStart(swtmrId);
#ifdef LOSCFG_KERNEL_SMP_CALL
OsMpFuncCallInit();
#endif
return LOS_OK;
}
LOS_MODULE_INIT(OsMpInit, LOS_INIT_LEVEL_KMOD_TASK);//多处理器模块初始化
LOS_MODULE_INIT(OsMpInit, LOS_INIT_LEVEL_KMOD_TASK);
#endif

@ -33,7 +33,7 @@
#include "los_printf.h"
#ifdef LOSCFG_KERNEL_SMP
Percpu g_percpu[LOSCFG_KERNEL_CORE_NUM]; ///< CPU池,池大小由CPU核数决定
Percpu g_percpu[LOSCFG_KERNEL_CORE_NUM];
VOID OsAllCpuStatusOutput(VOID)
{

@ -31,29 +31,9 @@
#include "los_err.h"
/**
便
LITE_OS_SEC_BSS STATIC LOS_ERRORHANDLE_FUNC g_errHandleHook = NULL;
*/
LITE_OS_SEC_BSS STATIC LOS_ERRORHANDLE_FUNC g_errHandleHook = NULL;///< 错误接管钩子函数
/**
* @brief
* @param fileName ,"os_unspecific_file"
* @param lineNo 0xa1b2c3f8
* @param errorNo
* @param paraLen para0
* @param para NULL
* @return LITE_OS_SEC_TEXT_INIT
*/
LITE_OS_SEC_TEXT_INIT UINT32 LOS_ErrHandle(CHAR *fileName, UINT32 lineNo, UINT32 errorNo,
UINT32 paraLen, VOID *para)
{
@ -63,7 +43,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_ErrHandle(CHAR *fileName, UINT32 lineNo, UINT32
return LOS_OK;
}
///设置钩子函数,处理错误
LITE_OS_SEC_TEXT_INIT VOID LOS_SetErrHandleHook(LOS_ERRORHANDLE_FUNC fun)
{
g_errHandleHook = fun;

@ -44,7 +44,7 @@ STATIC VOID IdleTimeSliceUpdate(SchedRunqueue *rq, LosTaskCB *taskCB, UINT64 cur
STATIC INT32 IdleParamCompare(const SchedPolicy *sp1, const SchedPolicy *sp2);
STATIC VOID IdlePriorityInheritance(LosTaskCB *owner, const SchedParam *param);
STATIC VOID IdlePriorityRestore(LosTaskCB *owner, const LOS_DL_LIST *list, const SchedParam *param);
//空闲调度
const STATIC SchedOps g_idleOps = {
.dequeue = IdleDequeue,
.enqueue = IdleEnqueue,

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022-2022 Huawei Device Co., Ltd. All rights reserved.
* Copyright (c) 2022-2023 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@ -43,8 +43,6 @@
#define OS_SCHED_READY_MAX 30
#define OS_TIME_SLICE_MIN (INT32)((50 * OS_SYS_NS_PER_US) / OS_NS_PER_CYCLE) /* 50us */
//基于优先数调度算法 Highest-Priority-First (HPF)
STATIC HPFRunqueue g_schedHPF;
STATIC VOID HPFDequeue(SchedRunqueue *rq, LosTaskCB *taskCB);
@ -65,7 +63,7 @@ STATIC VOID HPFTimeSliceUpdate(SchedRunqueue *rq, LosTaskCB *taskCB, UINT64 curr
STATIC INT32 HPFParamCompare(const SchedPolicy *sp1, const SchedPolicy *sp2);
STATIC VOID HPFPriorityInheritance(LosTaskCB *owner, const SchedParam *param);
STATIC VOID HPFPriorityRestore(LosTaskCB *owner, const LOS_DL_LIST *list, const SchedParam *param);
//优先级调度算法操作
const STATIC SchedOps g_priorityOps = {
.dequeue = HPFDequeue,
.enqueue = HPFEnqueue,
@ -245,7 +243,7 @@ STATIC INLINE VOID PriQueInsert(HPFRunqueue *rq, LosTaskCB *taskCB)
taskCB->taskStatus &= ~OS_TASK_STATUS_BLOCKED;
taskCB->taskStatus |= OS_TASK_STATUS_READY;
}
//入就绪队列
STATIC VOID HPFEnqueue(SchedRunqueue *rq, LosTaskCB *taskCB)
{
#ifdef LOSCFG_SCHED_HPF_DEBUG
@ -255,14 +253,14 @@ STATIC VOID HPFEnqueue(SchedRunqueue *rq, LosTaskCB *taskCB)
#endif
PriQueInsert(rq->hpfRunqueue, taskCB);
}
//出就绪队列
STATIC VOID HPFDequeue(SchedRunqueue *rq, LosTaskCB *taskCB)
{
SchedHPF *sched = (SchedHPF *)&taskCB->sp;
if (taskCB->taskStatus & OS_TASK_STATUS_READY) {//是否有就绪状态
if (taskCB->taskStatus & OS_TASK_STATUS_READY) {
PriQueDelete(rq->hpfRunqueue, sched->basePrio, &taskCB->pendList, sched->priority);
taskCB->taskStatus &= ~OS_TASK_STATUS_READY;//更新成非就绪状态
taskCB->taskStatus &= ~OS_TASK_STATUS_READY;
}
}
@ -477,7 +475,7 @@ STATIC VOID HPFPriorityInheritance(LosTaskCB *owner, const SchedParam *param)
LOS_BitmapSet(&sp->priBitmap, sp->priority);
sp->priority = param->priority;
}
/// 恢复任务优先级
STATIC VOID HPFPriorityRestore(LosTaskCB *owner, const LOS_DL_LIST *list, const SchedParam *param)
{
UINT16 priority;
@ -500,8 +498,8 @@ STATIC VOID HPFPriorityRestore(LosTaskCB *owner, const LOS_DL_LIST *list, const
}
if ((list != NULL) && !LOS_ListEmpty((LOS_DL_LIST *)list)) {
priority = LOS_HighBitGet(sp->priBitmap);//获取在历史调度中最高优先级
LOS_DL_LIST_FOR_EACH_ENTRY(pendedTask, list, LosTaskCB, pendList) {//遍历链表
priority = LOS_HighBitGet(sp->priBitmap);
LOS_DL_LIST_FOR_EACH_ENTRY(pendedTask, list, LosTaskCB, pendList) {
SchedHPF *pendSp = (SchedHPF *)&pendedTask->sp;
if ((pendedTask->ops == owner->ops) && (priority != pendSp->priority)) {
LOS_BitmapClr(&sp->priBitmap, pendSp->priority);
@ -539,7 +537,7 @@ VOID HPFProcessDefaultSchedParamGet(SchedParam *param)
{
param->basePrio = OS_USER_PROCESS_PRIORITY_HIGHEST;
}
//HPF 调度策略初始化
VOID HPFSchedPolicyInit(SchedRunqueue *rq)
{
if (ArchCurrCpuid() > 0) {

@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
* Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@ -30,7 +30,7 @@
*/
#include "los_sortlink_pri.h"
/// 排序链表初始化
VOID OsSortLinkInit(SortLinkAttribute *sortLinkHeader)
{
LOS_ListInit(&sortLinkHeader->sortLink);
@ -38,47 +38,38 @@ VOID OsSortLinkInit(SortLinkAttribute *sortLinkHeader)
sortLinkHeader->nodeNum = 0;
}
/*!
* @brief OsAddNode2SortLink ,
*
* @param sortLinkHeader
* @param sortList
* @return
*
* @see
*/
STATIC INLINE VOID AddNode2SortLink(SortLinkAttribute *sortLinkHeader, SortLinkList *sortList)
{
LOS_DL_LIST *head = (LOS_DL_LIST *)&sortLinkHeader->sortLink; //获取双向链表
LOS_DL_LIST *head = (LOS_DL_LIST *)&sortLinkHeader->sortLink;
if (LOS_ListEmpty(head)) { //空链表,直接插入
LOS_ListHeadInsert(head, &sortList->sortLinkNode);//插入结点
sortLinkHeader->nodeNum++;//CPU的工作量增加了
if (LOS_ListEmpty(head)) {
LOS_ListHeadInsert(head, &sortList->sortLinkNode);
sortLinkHeader->nodeNum++;
return;
}
//链表不为空时,插入分三种情况, responseTime 大于,等于,小于的处理
SortLinkList *listSorted = LOS_DL_LIST_ENTRY(head->pstNext, SortLinkList, sortLinkNode);
if (listSorted->responseTime > sortList->responseTime) {//如果要插入的节点 responseTime 最小
LOS_ListAdd(head, &sortList->sortLinkNode);//能跑进来说明是最小的,直接插入到第一的位置
sortLinkHeader->nodeNum++;//CPU的工作量增加了
return;//直接返回了
} else if (listSorted->responseTime == sortList->responseTime) {//相等的情况
LOS_ListAdd(head->pstNext, &sortList->sortLinkNode);//插到第二的位置
if (listSorted->responseTime > sortList->responseTime) {
LOS_ListAdd(head, &sortList->sortLinkNode);
sortLinkHeader->nodeNum++;
return;
} else if (listSorted->responseTime == sortList->responseTime) {
LOS_ListAdd(head->pstNext, &sortList->sortLinkNode);
sortLinkHeader->nodeNum++;
return;
}
//处理大于链表中第一个responseTime的情况,需要遍历链表
LOS_DL_LIST *prevNode = head->pstPrev;//注意这里用的前一个结点,也就是说前一个结点中的responseTime 是最大的
do { // @note_good 这里写的有点妙,也是双向链表的魅力所在
listSorted = LOS_DL_LIST_ENTRY(prevNode, SortLinkList, sortLinkNode);//一个个遍历,先比大的再比小的
if (listSorted->responseTime <= sortList->responseTime) {//如果时间比你小,就插到后面
LOS_DL_LIST *prevNode = head->pstPrev;
do {
listSorted = LOS_DL_LIST_ENTRY(prevNode, SortLinkList, sortLinkNode);
if (listSorted->responseTime <= sortList->responseTime) {
LOS_ListAdd(prevNode, &sortList->sortLinkNode);
sortLinkHeader->nodeNum++;
break;
}
prevNode = prevNode->pstPrev;//再拿上一个更小的responseTime进行比较
} while (1);//死循环
prevNode = prevNode->pstPrev;
} while (1);
}
VOID OsAdd2SortLink(SortLinkAttribute *head, SortLinkList *node, UINT64 responseTime, UINT16 idleCpu)

@ -159,26 +159,26 @@ UINT32 OsKProcessPmUsage(LosVmSpace *kSpace, UINT32 *actualPm)
/* Kernel resident memory, include default heap memory */
memUsed = SYS_MEM_SIZE_DEFAULT - (totalCount << PAGE_SHIFT);
spaceList = LOS_GetVmSpaceList();//获取虚拟空间链表,上面挂了所有虚拟空间
spaceList = LOS_GetVmSpaceList();
LosMux *vmSpaceListMux = OsGVmSpaceMuxGet();
(VOID)LOS_MuxAcquire(vmSpaceListMux);
LOS_DL_LIST_FOR_EACH_ENTRY(space, spaceList, LosVmSpace, node) {//遍历链表
if (space == LOS_GetKVmSpace()) {//内核空间不统计
LOS_DL_LIST_FOR_EACH_ENTRY(space, spaceList, LosVmSpace, node) {
if (space == LOS_GetKVmSpace()) {
continue;
}
UProcessUsed += OsUProcessPmUsage(space, NULL, NULL);
}
(VOID)LOS_MuxRelease(vmSpaceListMux);
/* Kernel dynamic memory, include extended heap memory */ //内核动态内存,包括扩展堆内存
/* Kernel dynamic memory, include extended heap memory */
memUsed += ((usedCount << PAGE_SHIFT) - UProcessUsed);
/* Remaining heap memory */ //剩余堆内存
/* Remaining heap memory */
memUsed -= freeMem;
*actualPm = memUsed;
return memUsed;
}
///shell task 物理内存的使用情况
UINT32 OsShellCmdProcessPmUsage(LosVmSpace *space, UINT32 *sharePm, UINT32 *actualPm)
{
if (space == NULL) {
@ -194,7 +194,7 @@ UINT32 OsShellCmdProcessPmUsage(LosVmSpace *space, UINT32 *sharePm, UINT32 *actu
}
return OsUProcessPmUsage(space, sharePm, actualPm);
}
///虚拟空间物理内存的使用情况参数同时带走共享物理内存sharePm和actualPm单位是字节
UINT32 OsUProcessPmUsage(LosVmSpace *space, UINT32 *sharePm, UINT32 *actualPm)
{
LosVmMapRegion *region = NULL;
@ -230,10 +230,10 @@ UINT32 OsUProcessPmUsage(LosVmSpace *space, UINT32 *sharePm, UINT32 *actualPm)
continue;
}
shareRef = LOS_AtomicRead(&page->refCounts);//ref 大于1 说明page被其他空间也引用了
shareRef = LOS_AtomicRead(&page->refCounts);
if (shareRef > 1) {
if (sharePm != NULL) {
*sharePm += PAGE_SIZE;//一页4K字节
*sharePm += PAGE_SIZE;
}
pmSize += PAGE_SIZE / shareRef;
} else {
@ -250,9 +250,7 @@ UINT32 OsUProcessPmUsage(LosVmSpace *space, UINT32 *sharePm, UINT32 *actualPm)
return pmSize;
}
/// @brief 通过虚拟空间获取进程实体
/// @param space
/// @return
LosProcessCB *OsGetPIDByAspace(const LosVmSpace *space)
{
UINT32 pid;
@ -260,13 +258,13 @@ LosProcessCB *OsGetPIDByAspace(const LosVmSpace *space)
LosProcessCB *processCB = NULL;
SCHEDULER_LOCK(intSave);
for (pid = 0; pid < g_processMaxNum; ++pid) {//循环进程池,进程池本质是个数组
for (pid = 0; pid < g_processMaxNum; ++pid) {
processCB = g_processCBArray + pid;
if (OsProcessIsUnused(processCB)) {//进程还没被分配使用
if (OsProcessIsUnused(processCB)) {
continue;
}
if (processCB->vmSpace == space) {//找到了
if (processCB->vmSpace == space) {
SCHEDULER_UNLOCK(intSave);
return processCB;
}
@ -274,11 +272,7 @@ LosProcessCB *OsGetPIDByAspace(const LosVmSpace *space)
SCHEDULER_UNLOCK(intSave);
return NULL;
}
/// @brief 统计虚拟空间中某个线性区的页数
/// @param space
/// @param region
/// @param pssPages
/// @return
UINT32 OsCountRegionPages(LosVmSpace *space, LosVmMapRegion *region, UINT32 *pssPages)
{
UINT32 regionPages = 0;
@ -312,7 +306,7 @@ UINT32 OsCountRegionPages(LosVmSpace *space, LosVmMapRegion *region, UINT32 *pss
return regionPages;
}
///统计虚拟空间的总页数
UINT32 OsCountAspacePages(LosVmSpace *space)
{
UINT32 spacePages = 0;
@ -396,30 +390,27 @@ VOID OsDumpRegion2(LosVmSpace *space, LosVmMapRegion *region)
region->range.size, flagsStr, regionPages, pssPages);
(VOID)LOS_MemFree(m_aucSysMem0, flagsStr);
}
///dump 指定虚拟空间的信息
VOID OsDumpAspace(LosVmSpace *space)
{
LosVmMapRegion *region = NULL;
LosRbNode *pstRbNode = NULL;
LosRbNode *pstRbNodeNext = NULL;
UINT32 spacePages;
LosProcessCB *pcb = OsGetPIDByAspace(space);//通过虚拟空间找到进程实体
LosProcessCB *pcb = OsGetPIDByAspace(space);
if (pcb == NULL) {
return;
}
//进程ID | 进程虚拟内存控制块地址信息 | 虚拟内存起始地址 | 虚拟内存大小 | 已使用的物理页数量
spacePages = OsCountAspacePages(space);//获取空间的页数
spacePages = OsCountAspacePages(space);
PRINTK("\r\n PID aspace name base size pages \n");
PRINTK(" ---- ------ ---- ---- ----- ----\n");
PRINTK(" %-4d %#010x %-10.10s %#010x %#010x %d\n", pcb->processID, space, pcb->processName,
space->base, space->size, spacePages);
//虚拟区间控制块地址信息 | 虚拟区间类型 | 虚拟区间起始地址 | 虚拟区间大小 | 虚拟区间mmu映射属性 | 已使用的物理页数量(包括共享内存部分 | 已使用的物理页数量
PRINTK("\r\n\t region name base size mmu_flags pages pg/ref\n");
PRINTK("\t ------ ---- ---- ---- --------- ----- -----\n");
RB_SCAN_SAFE(&space->regionRbTree, pstRbNode, pstRbNodeNext)//按region 轮询统计
RB_SCAN_SAFE(&space->regionRbTree, pstRbNode, pstRbNodeNext)
region = (LosVmMapRegion *)pstRbNode;
if (region != NULL) {
OsDumpRegion2(space, region);
@ -430,14 +421,14 @@ VOID OsDumpAspace(LosVmSpace *space)
RB_SCAN_SAFE_END(&space->regionRbTree, pstRbNode, pstRbNodeNext)
return;
}
///查看所有进程使用虚拟内存的情况
VOID OsDumpAllAspace(VOID)
{
LosVmSpace *space = NULL;
LOS_DL_LIST *aspaceList = LOS_GetVmSpaceList();//获取所有空间链表
LOS_DL_LIST_FOR_EACH_ENTRY(space, aspaceList, LosVmSpace, node) {//循环取出进程虚拟
LOS_DL_LIST *aspaceList = LOS_GetVmSpaceList();
LOS_DL_LIST_FOR_EACH_ENTRY(space, aspaceList, LosVmSpace, node) {
(VOID)LOS_MuxAcquire(&space->regionMux);
OsDumpAspace(space);//dump 空间
OsDumpAspace(space);
(VOID)LOS_MuxRelease(&space->regionMux);
}
return;
@ -456,11 +447,11 @@ STATUS_T OsRegionOverlapCheck(LosVmSpace *space, LosVmMapRegion *region)
(VOID)LOS_MuxRelease(&space->regionMux);
return ret;
}
///dump 页表项
VOID OsDumpPte(VADDR_T vaddr)
{
UINT32 l1Index = vaddr >> MMU_DESCRIPTOR_L1_SMALL_SHIFT;
LosVmSpace *space = LOS_SpaceGet(vaddr);//通过虚拟地址获取空间,内核分三个空间 内核进程空间,内核堆空间,用户进程空间
LosVmSpace *space = LOS_SpaceGet(vaddr);
UINT32 ttEntry;
LosVmPage *page = NULL;
PTE_T *l2Table = NULL;
@ -470,27 +461,27 @@ VOID OsDumpPte(VADDR_T vaddr)
return;
}
ttEntry = space->archMmu.virtTtb[l1Index];//找到 L1 页面项
ttEntry = space->archMmu.virtTtb[l1Index];
if (ttEntry) {
l2Table = LOS_PaddrToKVaddr(MMU_DESCRIPTOR_L1_PAGE_TABLE_ADDR(ttEntry));//找到L1页面项对应的 L2表
l2Index = (vaddr % MMU_DESCRIPTOR_L1_SMALL_SIZE) >> PAGE_SHIFT;//找到L2页面项
l2Table = LOS_PaddrToKVaddr(MMU_DESCRIPTOR_L1_PAGE_TABLE_ADDR(ttEntry));
l2Index = (vaddr % MMU_DESCRIPTOR_L1_SMALL_SIZE) >> PAGE_SHIFT;
if (l2Table == NULL) {
goto ERR;
}
page = LOS_VmPageGet(l2Table[l2Index] & ~(PAGE_SIZE - 1));//获取物理页框
page = LOS_VmPageGet(l2Table[l2Index] & ~(PAGE_SIZE - 1));
if (page == NULL) {
goto ERR;
}
PRINTK("vaddr %p, l1Index %d, ttEntry %p, l2Table %p, l2Index %d, pfn %p count %d\n",
vaddr, l1Index, ttEntry, l2Table, l2Index, l2Table[l2Index], LOS_AtomicRead(&page->refCounts));//打印L1 L2 页表项
} else {//不在L1表
vaddr, l1Index, ttEntry, l2Table, l2Index, l2Table[l2Index], LOS_AtomicRead(&page->refCounts));
} else {
PRINTK("vaddr %p, l1Index %d, ttEntry %p\n", vaddr, l1Index, ttEntry);
}
return;
ERR:
PRINTK("%s, error vaddr: %#x, l2Table: %#x, l2Index: %#x\n", __FUNCTION__, vaddr, l2Table, l2Index);
}
///获取段剩余页框数
UINT32 OsVmPhySegPagesGet(LosVmPhysSeg *seg)
{
UINT32 intSave;
@ -498,25 +489,14 @@ UINT32 OsVmPhySegPagesGet(LosVmPhysSeg *seg)
UINT32 segFreePages = 0;
LOS_SpinLockSave(&seg->freeListLock, &intSave);
for (flindex = 0; flindex < VM_LIST_ORDER_MAX; flindex++) {//遍历块组
segFreePages += ((1 << flindex) * seg->freeList[flindex].listCnt);//1 << flindex等于页数, * 节点数 得到组块的总页数.
for (flindex = 0; flindex < VM_LIST_ORDER_MAX; flindex++) {
segFreePages += ((1 << flindex) * seg->freeList[flindex].listCnt);
}
LOS_SpinUnlockRestore(&seg->freeListLock, intSave);
return segFreePages;//返回剩余未分配的总物理页框
return segFreePages;
}
///dump 物理内存
/***********************************************************
* phys_seg:
* base:
* size:
* free_pages:
* active anon: pagecache
* inactive anon: pagecache
* active file: pagecache
* inactive file: pagecache
* pmm pages totalused使free
************************************************************/
VOID OsVmPhysDump(VOID)
{
LosVmPhysSeg *seg = NULL;
@ -528,7 +508,7 @@ VOID OsVmPhysDump(VOID)
UINT32 flindex;
UINT32 listCount[VM_LIST_ORDER_MAX] = {0};
for (segIndex = 0; segIndex < g_vmPhysSegNum; segIndex++) {//循环取段
for (segIndex = 0; segIndex < g_vmPhysSegNum; segIndex++) {
seg = &g_vmPhysSeg[segIndex];
if (seg->size > 0) {
segFreePages = OsVmPhySegPagesGet(seg);
@ -558,7 +538,7 @@ VOID OsVmPhysDump(VOID)
PRINTK("\n\rpmm pages: total = %u, used = %u, free = %u\n",
totalPages, (totalPages - totalFreePages), totalFreePages);
}
///获取物理内存的使用信息,两个参数接走数据
VOID OsVmPhysUsedInfoGet(UINT32 *usedCount, UINT32 *totalCount)
{
UINT32 index;
@ -571,12 +551,12 @@ VOID OsVmPhysUsedInfoGet(UINT32 *usedCount, UINT32 *totalCount)
*usedCount = 0;
*totalCount = 0;
for (index = 0; index < g_vmPhysSegNum; index++) {//循环取段
for (index = 0; index < g_vmPhysSegNum; index++) {
physSeg = &g_vmPhysSeg[index];
if (physSeg->size > 0) {
*totalCount += physSeg->size >> PAGE_SHIFT;//叠加段的总页数
segFreePages = OsVmPhySegPagesGet(physSeg);//获取段的剩余页数
*usedCount += (*totalCount - segFreePages);//叠加段的使用页数
*totalCount += physSeg->size >> PAGE_SHIFT;
segFreePages = OsVmPhySegPagesGet(physSeg);
*usedCount += (*totalCount - segFreePages);
}
}
}

@ -54,7 +54,7 @@
extern char __exc_table_start[];
extern char __exc_table_end[];
//线性正确性检查
STATIC STATUS_T OsVmRegionPermissionCheck(LosVmMapRegion *region, UINT32 flags)
{
if ((region->regionFlags & VM_MAP_REGION_FLAG_PERM_READ) != VM_MAP_REGION_FLAG_PERM_READ) {
@ -62,14 +62,14 @@ STATIC STATUS_T OsVmRegionPermissionCheck(LosVmMapRegion *region, UINT32 flags)
return LOS_NOK;
}
if ((flags & VM_MAP_PF_FLAG_WRITE) == VM_MAP_PF_FLAG_WRITE) {//写入许可
if ((flags & VM_MAP_PF_FLAG_WRITE) == VM_MAP_PF_FLAG_WRITE) {
if ((region->regionFlags & VM_MAP_REGION_FLAG_PERM_WRITE) != VM_MAP_REGION_FLAG_PERM_WRITE) {
VM_ERR("write permission check failed operation flags %x, region flags %x", flags, region->regionFlags);
return LOS_NOK;
}
}
if ((flags & VM_MAP_PF_FLAG_INSTRUCTION) == VM_MAP_PF_FLAG_INSTRUCTION) {//指令
if ((flags & VM_MAP_PF_FLAG_INSTRUCTION) == VM_MAP_PF_FLAG_INSTRUCTION) {
if ((region->regionFlags & VM_MAP_REGION_FLAG_PERM_EXECUTE) != VM_MAP_REGION_FLAG_PERM_EXECUTE) {
VM_ERR("exec permission check failed operation flags %x, region flags %x", flags, region->regionFlags);
return LOS_NOK;
@ -97,8 +97,7 @@ STATIC VOID OsFaultTryFixup(ExcContext *frame, VADDR_T excVaddr, STATUS_T *statu
}
#ifdef LOSCFG_FS_VFS
//读页时发生缺页的处理
STATIC STATUS_T OsDoReadFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)//读缺页
STATIC STATUS_T OsDoReadFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)
{
status_t ret;
PADDR_T paddr;
@ -106,26 +105,26 @@ STATIC STATUS_T OsDoReadFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)//
VADDR_T vaddr = (VADDR_T)vmPgFault->vaddr;
LosVmSpace *space = region->space;
ret = LOS_ArchMmuQuery(&space->archMmu, vaddr, NULL, NULL);//查询是否缺页
if (ret == LOS_OK) {//注意这里时LOS_OK却返回,都OK了说明查到了物理地址,有页了。
return LOS_OK;//查到了就说明不缺页的,缺页就是因为虚拟地址没有映射到物理地址嘛
ret = LOS_ArchMmuQuery(&space->archMmu, vaddr, NULL, NULL);
if (ret == LOS_OK) {
return LOS_OK;
}
if (region->unTypeData.rf.vmFOps == NULL || region->unTypeData.rf.vmFOps->fault == NULL) {//线性区必须有实现了缺页接口
if (region->unTypeData.rf.vmFOps == NULL || region->unTypeData.rf.vmFOps->fault == NULL) {
VM_ERR("region args invalid, file path: %s", region->unTypeData.rf.vnode->filePath);
return LOS_ERRNO_VM_INVALID_ARGS;
}
(VOID)LOS_MuxAcquire(&region->unTypeData.rf.vnode->mapping.mux_lock);
ret = region->unTypeData.rf.vmFOps->fault(region, vmPgFault);// 函数指针执行的是g_commVmOps.OsVmmFileFault
ret = region->unTypeData.rf.vmFOps->fault(region, vmPgFault);
if (ret == LOS_OK) {
paddr = LOS_PaddrQuery(vmPgFault->pageKVaddr);//查询物理地址
page = LOS_VmPageGet(paddr);//获取page
paddr = LOS_PaddrQuery(vmPgFault->pageKVaddr);
page = LOS_VmPageGet(paddr);
if (page != NULL) { /* just incase of page null */
LOS_AtomicInc(&page->refCounts);//ref 自增
LOS_AtomicInc(&page->refCounts);
OsCleanPageLocked(page);
}
ret = LOS_ArchMmuMap(&space->archMmu, vaddr, paddr, 1,
region->regionFlags & (~VM_MAP_REGION_FLAG_PERM_WRITE));//重新映射为非可写
region->regionFlags & (~VM_MAP_REGION_FLAG_PERM_WRITE));
if (ret < 0) {
VM_ERR("LOS_ArchMmuMap failed");
OsDelMapInfo(region, vmPgFault, false);
@ -141,7 +140,7 @@ STATIC STATUS_T OsDoReadFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)//
return LOS_ERRNO_VM_NO_MEMORY;
}
/* unmap a page when cow happened only *///仅当写时拷贝发生时取消页面映射
/* unmap a page when cow happened only */
STATIC LosVmPage *OsCowUnmapOrg(LosArchMmu *archMmu, LosVmMapRegion *region, LosVmPgFault *vmf)
{
UINT32 intSave;
@ -169,7 +168,7 @@ STATIC LosVmPage *OsCowUnmapOrg(LosArchMmu *archMmu, LosVmMapRegion *region, Los
return oldPage;
}
#endif
//在私有线性区写入文件时发生缺页的处理
status_t OsDoCowFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)
{
STATUS_T ret;
@ -187,23 +186,23 @@ status_t OsDoCowFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)
}
space = region->space;
ret = LOS_ArchMmuQuery(&space->archMmu, (VADDR_T)vmPgFault->vaddr, &oldPaddr, NULL);//查询出老物理地址
ret = LOS_ArchMmuQuery(&space->archMmu, (VADDR_T)vmPgFault->vaddr, &oldPaddr, NULL);
if (ret == LOS_OK) {
oldPage = OsCowUnmapOrg(&space->archMmu, region, vmPgFault);//取消页面映射
oldPage = OsCowUnmapOrg(&space->archMmu, region, vmPgFault);
}
newPage = LOS_PhysPageAlloc();//分配一个新页面
newPage = LOS_PhysPageAlloc();
if (newPage == NULL) {
VM_ERR("LOS_PhysPageAlloc failed");
ret = LOS_ERRNO_VM_NO_MEMORY;
goto ERR_OUT;
}
newPaddr = VM_PAGE_TO_PHYS(newPage);//拿到新的物理地址
kvaddr = OsVmPageToVaddr(newPage);//拿到新的虚拟地址
newPaddr = VM_PAGE_TO_PHYS(newPage);
kvaddr = OsVmPageToVaddr(newPage);
(VOID)LOS_MuxAcquire(&region->unTypeData.rf.vnode->mapping.mux_lock);
ret = region->unTypeData.rf.vmFOps->fault(region, vmPgFault);// 函数指针 g_commVmOps.OsVmmFileFault
ret = region->unTypeData.rf.vmFOps->fault(region, vmPgFault);
if (ret != LOS_OK) {
VM_ERR("call region->vm_ops->fault fail");
(VOID)LOS_MuxRelease(&region->unTypeData.rf.vnode->mapping.mux_lock);
@ -215,20 +214,20 @@ status_t OsDoCowFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)
* we can take it as a normal file cow map. 2.this page has done file cow map,
* we can take it as a anonymous cow map.
*/
if ((oldPaddr == 0) || (LOS_PaddrToKVaddr(oldPaddr) == vmPgFault->pageKVaddr)) {//没有映射或者 已在pagecache有映射
(VOID)memcpy_s(kvaddr, PAGE_SIZE, vmPgFault->pageKVaddr, PAGE_SIZE);//直接copy到新页
LOS_AtomicInc(&newPage->refCounts);//引用ref++
OsCleanPageLocked(LOS_VmPageGet(LOS_PaddrQuery(vmPgFault->pageKVaddr)));//解锁
if ((oldPaddr == 0) || (LOS_PaddrToKVaddr(oldPaddr) == vmPgFault->pageKVaddr)) {
(VOID)memcpy_s(kvaddr, PAGE_SIZE, vmPgFault->pageKVaddr, PAGE_SIZE);
LOS_AtomicInc(&newPage->refCounts);
OsCleanPageLocked(LOS_VmPageGet(LOS_PaddrQuery(vmPgFault->pageKVaddr)));
} else {
OsPhysSharePageCopy(oldPaddr, &newPaddr, newPage);//调用之前 oldPaddr肯定不等于newPaddr
OsPhysSharePageCopy(oldPaddr, &newPaddr, newPage);
/* use old page free the new one */
if (newPaddr == oldPaddr) {//注意这里newPaddr可能已经被改变了,参数传入的是 &newPaddr
LOS_PhysPageFree(newPage);//释放新页,别浪费的内存,内核使用内存是一分钱当十块用.
if (newPaddr == oldPaddr) {
LOS_PhysPageFree(newPage);
newPage = NULL;
}
}
ret = LOS_ArchMmuMap(&space->archMmu, (VADDR_T)vmPgFault->vaddr, newPaddr, 1, region->regionFlags);//把新物理地址映射给缺页的虚拟地址,这样就不会缺页啦
ret = LOS_ArchMmuMap(&space->archMmu, (VADDR_T)vmPgFault->vaddr, newPaddr, 1, region->regionFlags);
if (ret < 0) {
VM_ERR("LOS_ArchMmuMap failed");
ret = LOS_ERRNO_VM_NO_MEMORY;
@ -253,7 +252,7 @@ ERR_OUT:
return ret;
}
///在共享线性区写文件操作发生缺页的情况处理,因为线性区是共享的
status_t OsDoSharedFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)
{
STATUS_T ret;
@ -269,10 +268,10 @@ status_t OsDoSharedFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)
return LOS_ERRNO_VM_INVALID_ARGS;
}
ret = LOS_ArchMmuQuery(&space->archMmu, vmPgFault->vaddr, &paddr, NULL);//查询物理地址
ret = LOS_ArchMmuQuery(&space->archMmu, vmPgFault->vaddr, &paddr, NULL);
if (ret == LOS_OK) {
LOS_ArchMmuUnmap(&space->archMmu, vmPgFault->vaddr, 1);//先取消映射
ret = LOS_ArchMmuMap(&space->archMmu, vaddr, paddr, 1, region->regionFlags);//再重新映射,为啥这么干,是因为regionFlags变了,
LOS_ArchMmuUnmap(&space->archMmu, vmPgFault->vaddr, 1);
ret = LOS_ArchMmuMap(&space->archMmu, vaddr, paddr, 1, region->regionFlags);
if (ret < 0) {
VM_ERR("LOS_ArchMmuMap failed. ret=%d", ret);
return LOS_ERRNO_VM_NO_MEMORY;
@ -280,16 +279,16 @@ status_t OsDoSharedFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)
LOS_SpinLockSave(&region->unTypeData.rf.vnode->mapping.list_lock, &intSave);
fpage = OsFindGetEntry(&region->unTypeData.rf.vnode->mapping, vmPgFault->pgoff);
if (fpage) {//在页高速缓存(page cache)中找到了
OsMarkPageDirty(fpage, region, 0, 0);//标记为脏页
if (fpage) {
OsMarkPageDirty(fpage, region, 0, 0);
}
LOS_SpinUnlockRestore(&region->unTypeData.rf.vnode->mapping.list_lock, intSave);
return LOS_OK;
}
//以下是没有映射到物理地址的处理
(VOID)LOS_MuxAcquire(&region->unTypeData.rf.vnode->mapping.mux_lock);
ret = region->unTypeData.rf.vmFOps->fault(region, vmPgFault);//函数指针执行的是g_commVmOps.OsVmmFileFault
ret = region->unTypeData.rf.vmFOps->fault(region, vmPgFault);
if (ret == LOS_OK) {
paddr = LOS_PaddrQuery(vmPgFault->pageKVaddr);
page = LOS_VmPageGet(paddr);
@ -320,36 +319,23 @@ status_t OsDoSharedFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)
* For COW fault, pagecache is copied to private anonyous pages and the changes on this page
* won't write through to the underlying file. For SHARED fault, pagecache is mapping with
* region->arch_mmu_flags and the changes on this page will write through to the underlying file
*/ //操作文件时产生缺页中断
*/
STATIC STATUS_T OsDoFileFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault, UINT32 flags)
{
STATUS_T ret;
if (flags & VM_MAP_PF_FLAG_WRITE) {//写页的时候产生缺页
if (region->regionFlags & VM_MAP_REGION_FLAG_SHARED) {//共享线性区
ret = OsDoSharedFault(region, vmPgFault);//写操作时的共享缺页,最复杂,此页上的更改将写入磁盘文件
} else {//非共享线性区
ret = OsDoCowFault(region, vmPgFault);//(写时拷贝技术)写操作时的私有缺页,pagecache被复制到私有的任意一个页面上并在此页面上进行更改,不会直接写入磁盘文件
if (flags & VM_MAP_PF_FLAG_WRITE) {
if (region->regionFlags & VM_MAP_REGION_FLAG_SHARED) {
ret = OsDoSharedFault(region, vmPgFault);
} else {
ret = OsDoCowFault(region, vmPgFault);
}
} else {//读页的时候产生缺页
ret = OsDoReadFault(region, vmPgFault);//页面读取操作很简单只需共享页面缓存节省内存并进行读权限映射region->arch_mmu_flags&~arch_mmu_FLAG_PERM_WRITE
} else {
ret = OsDoReadFault(region, vmPgFault);
}
return ret;
}
/***************************************************************
:
:
***************************************************************/
/**
* @brief
* @param vaddr
* @param flags
* @param frame
* @return STATUS_T
*/
STATUS_T OsVmPageFaultHandler(VADDR_T vaddr, UINT32 flags, ExcContext *frame)
{
LosVmSpace *space = LOS_SpaceGet(vaddr);
@ -368,9 +354,9 @@ STATUS_T OsVmPageFaultHandler(VADDR_T vaddr, UINT32 flags, ExcContext *frame)
return status;
}
if (((flags & VM_MAP_PF_FLAG_USER) != 0) && (!LOS_IsUserAddress(vaddr))) {//地址保护,用户空间不允许跨界访问
if (((flags & VM_MAP_PF_FLAG_USER) != 0) && (!LOS_IsUserAddress(vaddr))) {
VM_ERR("user space not allowed to access invalid address: %#x", vaddr);
return LOS_ERRNO_VM_ACCESS_DENIED;//拒绝访问
return LOS_ERRNO_VM_ACCESS_DENIED;
}
#ifdef LOSCFG_KERNEL_PLIMITS
@ -380,7 +366,7 @@ STATUS_T OsVmPageFaultHandler(VADDR_T vaddr, UINT32 flags, ExcContext *frame)
#endif
(VOID)LOS_MuxAcquire(&space->regionMux);
region = LOS_RegionFind(space, vaddr);//通过虚拟地址找到所在线性区
region = LOS_RegionFind(space, vaddr);
if (region == NULL) {
VM_ERR("region not exists, vaddr: %#x", vaddr);
status = LOS_ERRNO_VM_NOT_FOUND;
@ -389,11 +375,11 @@ STATUS_T OsVmPageFaultHandler(VADDR_T vaddr, UINT32 flags, ExcContext *frame)
status = OsVmRegionPermissionCheck(region, flags);
if (status != LOS_OK) {
status = LOS_ERRNO_VM_ACCESS_DENIED;//拒绝访问
status = LOS_ERRNO_VM_ACCESS_DENIED;
goto CHECK_FAILED;
}
if (OomCheckProcess()) {//低内存检查
if (OomCheckProcess()) {
/*
* under low memory, when user process request memory allocation
* it will fail, and result is LOS_NOK and current user process
@ -403,18 +389,18 @@ STATUS_T OsVmPageFaultHandler(VADDR_T vaddr, UINT32 flags, ExcContext *frame)
goto CHECK_FAILED;
}
vaddr = ROUNDDOWN(vaddr, PAGE_SIZE);//为啥要向下圆整,因为这一页要重新使用,需找到页面基地址
vaddr = ROUNDDOWN(vaddr, PAGE_SIZE);
#ifdef LOSCFG_FS_VFS
if (LOS_IsRegionFileValid(region)) {//是否为文件线性区
if (LOS_IsRegionFileValid(region)) {
if (region->unTypeData.rf.vnode == NULL) {
goto CHECK_FAILED;
}
vmPgFault.vaddr = vaddr;//虚拟地址
vmPgFault.pgoff = ((vaddr - region->range.base) >> PAGE_SHIFT) + region->pgOff;//计算出文件读取位置
vmPgFault.vaddr = vaddr;
vmPgFault.pgoff = ((vaddr - region->range.base) >> PAGE_SHIFT) + region->pgOff;
vmPgFault.flags = flags;
vmPgFault.pageKVaddr = NULL;//缺失页初始化没有物理地址
vmPgFault.pageKVaddr = NULL;
status = OsDoFileFault(region, &vmPgFault, flags);//缺页处理
status = OsDoFileFault(region, &vmPgFault, flags);
if (status) {
VM_ERR("vm fault error, status=%d", status);
goto CHECK_FAILED;
@ -422,27 +408,27 @@ STATUS_T OsVmPageFaultHandler(VADDR_T vaddr, UINT32 flags, ExcContext *frame)
goto DONE;
}
#endif
//请求调页:推迟到不能再推迟为止
newPage = LOS_PhysPageAlloc();//分配一个新的物理页
newPage = LOS_PhysPageAlloc();
if (newPage == NULL) {
status = LOS_ERRNO_VM_NO_MEMORY;
goto CHECK_FAILED;
}
newPaddr = VM_PAGE_TO_PHYS(newPage);//获取物理地址
(VOID)memset_s(OsVmPageToVaddr(newPage), PAGE_SIZE, 0, PAGE_SIZE);//获取虚拟地址 清0
status = LOS_ArchMmuQuery(&space->archMmu, vaddr, &oldPaddr, NULL);//通过虚拟地址查询老物理地址
newPaddr = VM_PAGE_TO_PHYS(newPage);
(VOID)memset_s(OsVmPageToVaddr(newPage), PAGE_SIZE, 0, PAGE_SIZE);
status = LOS_ArchMmuQuery(&space->archMmu, vaddr, &oldPaddr, NULL);
if (status >= 0) {
LOS_ArchMmuUnmap(&space->archMmu, vaddr, 1);//解除映射关系
OsPhysSharePageCopy(oldPaddr, &newPaddr, newPage);//将oldPaddr的数据拷贝到newPage
LOS_ArchMmuUnmap(&space->archMmu, vaddr, 1);
OsPhysSharePageCopy(oldPaddr, &newPaddr, newPage);
/* use old page free the new one */
if (newPaddr == oldPaddr) {//新老物理地址一致
LOS_PhysPageFree(newPage);//继续使用旧页释放新页
if (newPaddr == oldPaddr) {
LOS_PhysPageFree(newPage);
newPage = NULL;
}
/* map all of the pages */
status = LOS_ArchMmuMap(&space->archMmu, vaddr, newPaddr, 1, region->regionFlags);//重新映射新物理地址
status = LOS_ArchMmuMap(&space->archMmu, vaddr, newPaddr, 1, region->regionFlags);
if (status < 0) {
VM_ERR("failed to map replacement page, status:%d", status);
status = LOS_ERRNO_VM_MAP_FAILED;
@ -453,8 +439,8 @@ STATUS_T OsVmPageFaultHandler(VADDR_T vaddr, UINT32 flags, ExcContext *frame)
goto DONE;
} else {
/* map all of the pages */
LOS_AtomicInc(&newPage->refCounts);//引用数自增
status = LOS_ArchMmuMap(&space->archMmu, vaddr, newPaddr, 1, region->regionFlags);//映射新物理地址,如此下次就不会缺页了
LOS_AtomicInc(&newPage->refCounts);
status = LOS_ArchMmuMap(&space->archMmu, vaddr, newPaddr, 1, region->regionFlags);
if (status < 0) {
VM_ERR("failed to map page, status:%d", status);
status = LOS_ERRNO_VM_MAP_FAILED;

@ -66,114 +66,98 @@ VOID ResetPageCacheHitInfo(int *try, int *hit)
#define TRACE_TRY_CACHE()
#define TRACE_HIT_CACHE()
#endif
#ifdef LOSCFG_KERNEL_VM
/**
* @brief
@verbatim
(page cache)
LosFilePage,seek,,
pgoff,cache.
@endverbatim
* @param page
* @param mapping
* @param pgoff
* @return STATIC
*/
STATIC VOID OsPageCacheAdd(LosFilePage *page, struct page_mapping *mapping, VM_OFFSET_T pgoff)
{
LosFilePage *fpage = NULL;
LOS_DL_LIST_FOR_EACH_ENTRY(fpage, &mapping->page_list, LosFilePage, node) {//遍历page_list链表
if (fpage->pgoff > pgoff) {//插入的条件,这样插入保证了按pgoff 从小到大排序
LOS_ListTailInsert(&fpage->node, &page->node);//等于挂到fpage节点的前面了
LOS_DL_LIST_FOR_EACH_ENTRY(fpage, &mapping->page_list, LosFilePage, node) {
if (fpage->pgoff > pgoff) {
LOS_ListTailInsert(&fpage->node, &page->node);
goto done_add;
}
}
LOS_ListTailInsert(&mapping->page_list, &page->node);//将页挂到文件映射的链表上,相当于挂到了最后
LOS_ListTailInsert(&mapping->page_list, &page->node);
done_add:
mapping->nrpages++; //文件在缓存中多了一个 文件页
mapping->nrpages++;
}
///将页面加到活动文件页LRU链表上
VOID OsAddToPageacheLru(LosFilePage *page, struct page_mapping *mapping, VM_OFFSET_T pgoff)
{
OsPageCacheAdd(page, mapping, pgoff);
OsLruCacheAdd(page, VM_LRU_ACTIVE_FILE);
}
///从页高速缓存上删除页
VOID OsPageCacheDel(LosFilePage *fpage)
{
/* delete from file cache list */
LOS_ListDelete(&fpage->node);//将自己从链表上摘除
fpage->mapping->nrpages--;//文件映射的页总数减少
LOS_ListDelete(&fpage->node);
fpage->mapping->nrpages--;
/* unmap and remove map info */
if (OsIsPageMapped(fpage)) {//是否映射过
if (OsIsPageMapped(fpage)) {
OsUnmapAllLocked(fpage);
}
LOS_PhysPageFree(fpage->vmPage);//释放物理内存
LOS_PhysPageFree(fpage->vmPage);
LOS_MemFree(m_aucSysMem0, fpage);//释放文件页结构体内存
LOS_MemFree(m_aucSysMem0, fpage);
}
/**************************************************************************************************
, 访LosFilePage,使
LosFilePage.
1. 2.
**************************************************************************************************/
VOID OsAddMapInfo(LosFilePage *page, LosArchMmu *archMmu, VADDR_T vaddr)
{
LosMapInfo *info = NULL;
info = (LosMapInfo *)LOS_MemAlloc(m_aucSysMem0, sizeof(LosMapInfo));//分配一个映射信息
info = (LosMapInfo *)LOS_MemAlloc(m_aucSysMem0, sizeof(LosMapInfo));
if (info == NULL) {
VM_ERR("OsAddMapInfo alloc memory failed!");
return;
}
info->page = page; //文件页
info->archMmu = archMmu;//进程MMU,完成虚实地址转换
info->vaddr = vaddr; //虚拟地址
info->page = page;
info->archMmu = archMmu;
info->vaddr = vaddr;
LOS_ListAdd(&page->i_mmap, &info->node);//将 LosMapInfo 节点挂入链表
page->n_maps++;//映射总数++
LOS_ListAdd(&page->i_mmap, &info->node);
page->n_maps++;
}
///通过虚拟地址获取文件页映射信息,archMmu每个进程都有属于自己的mmu
LosMapInfo *OsGetMapInfo(const LosFilePage *page, const LosArchMmu *archMmu, VADDR_T vaddr)
{
LosMapInfo *info = NULL;
const LOS_DL_LIST *immap = &page->i_mmap;//一个文件页被多个进程映射
const LOS_DL_LIST *immap = &page->i_mmap;
LOS_DL_LIST_FOR_EACH_ENTRY(info, immap, LosMapInfo, node) {//遍历每个节点
if ((info->archMmu == archMmu) && (info->vaddr == vaddr) && (info->page == page)) {//全等时返回
LOS_DL_LIST_FOR_EACH_ENTRY(info, immap, LosMapInfo, node) {
if ((info->archMmu == archMmu) && (info->vaddr == vaddr) && (info->page == page)) {
return info;
}
}
return NULL;
}
///删除页高速缓存和LRU,对应 OsAddToPageacheLru
VOID OsDeletePageCacheLru(LosFilePage *page)
{
/* delete form lru list */
OsLruCacheDel(page); //将页面从lru列表中删除
/* delete from cache lits and free pmm if need */
OsPageCacheDel(page); //从page缓存中删除
/* delete from lru list */
OsLruCacheDel(page);
/* delete from cache list and free pmm if needed */
OsPageCacheDel(page);
}
//解除文件页和进程的映射关系
STATIC VOID OsPageCacheUnmap(LosFilePage *fpage, LosArchMmu *archMmu, VADDR_T vaddr)
{
UINT32 intSave;
LosMapInfo *info = NULL;
LOS_SpinLockSave(&fpage->physSeg->lruLock, &intSave);
info = OsGetMapInfo(fpage, archMmu, vaddr);//获取文件页在进程的映射信息
info = OsGetMapInfo(fpage, archMmu, vaddr);
if (info == NULL) {
VM_ERR("OsPageCacheUnmap get map info failed!");
} else {
OsUnmapPageLocked(fpage, info);//解除进程和文件页映射关系
OsUnmapPageLocked(fpage, info);
}
if (!(OsIsPageMapped(fpage) && ((fpage->flags & VM_MAP_REGION_FLAG_PERM_EXECUTE) ||
OsIsPageDirty(fpage->vmPage)))) {
@ -182,7 +166,7 @@ STATIC VOID OsPageCacheUnmap(LosFilePage *fpage, LosArchMmu *archMmu, VADDR_T va
LOS_SpinUnlockRestore(&fpage->physSeg->lruLock, intSave);
}
///删除文件
VOID OsVmmFileRemove(LosVmMapRegion *region, LosArchMmu *archMmu, VM_OFFSET_T pgoff)
{
UINT32 intSave;
@ -195,31 +179,31 @@ VOID OsVmmFileRemove(LosVmMapRegion *region, LosArchMmu *archMmu, VM_OFFSET_T pg
LosVmPage *mapPage = NULL;
if (!LOS_IsRegionFileValid(region) || (region->unTypeData.rf.vnode == NULL)) {
return;//判断是否为文件映射是否已map
return;
}
vnode = region->unTypeData.rf.vnode;
mapping = &vnode->mapping;
vaddr = region->range.base + ((UINT32)(pgoff - region->pgOff) << PAGE_SHIFT);//得到虚拟地址
vaddr = region->range.base + ((UINT32)(pgoff - region->pgOff) << PAGE_SHIFT);
status_t status = LOS_ArchMmuQuery(archMmu, vaddr, &paddr, NULL);//获取物理地址
status_t status = LOS_ArchMmuQuery(archMmu, vaddr, &paddr, NULL);
if (status != LOS_OK) {
return;
}
mapPage = LOS_VmPageGet(paddr);//获取物理页框
mapPage = LOS_VmPageGet(paddr);
/* is page is in cache list */
LOS_SpinLockSave(&mapping->list_lock, &intSave);
fpage = OsFindGetEntry(mapping, pgoff);//获取fpage
fpage = OsFindGetEntry(mapping, pgoff);
/* no cache or have cache but not map(cow), free it direct */
if ((fpage == NULL) || (fpage->vmPage != mapPage)) {//没有缓存或有缓存但没有映射cow直接释放它
LOS_PhysPageFree(mapPage);//释放物理页框
LOS_ArchMmuUnmap(archMmu, vaddr, 1);//取消虚拟地址的映射
if ((fpage == NULL) || (fpage->vmPage != mapPage)) {
LOS_PhysPageFree(mapPage);
LOS_ArchMmuUnmap(archMmu, vaddr, 1);
/* this is a page cache map! */
} else {
OsPageCacheUnmap(fpage, archMmu, vaddr);////取消缓存中的映射
if (OsIsPageDirty(fpage->vmPage)) {//脏页处理
tmpPage = OsDumpDirtyPage(fpage);//dump 脏页
OsPageCacheUnmap(fpage, archMmu, vaddr);
if (OsIsPageDirty(fpage->vmPage)) {
tmpPage = OsDumpDirtyPage(fpage);
}
}
LOS_SpinUnlockRestore(&mapping->list_lock, intSave);
@ -229,15 +213,15 @@ VOID OsVmmFileRemove(LosVmMapRegion *region, LosArchMmu *archMmu, VM_OFFSET_T pg
}
return;
}
///标记page为脏页 进程修改了高速缓存里的数据时,该页就被内核标记为脏页
VOID OsMarkPageDirty(LosFilePage *fpage, const LosVmMapRegion *region, INT32 off, INT32 len)
{
if (region != NULL) {
OsSetPageDirty(fpage->vmPage);//设置为脏页
fpage->dirtyOff = off;//脏页偏移位置
fpage->dirtyEnd = len;//脏页结束位置
OsSetPageDirty(fpage->vmPage);
fpage->dirtyOff = off;
fpage->dirtyEnd = len;
} else {
OsSetPageDirty(fpage->vmPage);//设置为脏页
OsSetPageDirty(fpage->vmPage);
if ((off + len) > fpage->dirtyEnd) {
fpage->dirtyEnd = off + len;
}
@ -274,22 +258,22 @@ STATIC UINT32 GetDirtySize(LosFilePage *fpage, struct Vnode *vnode)
return PAGE_SIZE;
}
///冲洗脏页,回写磁盘
STATIC INT32 OsFlushDirtyPage(LosFilePage *fpage)
{
UINT32 ret;
size_t len;
char *buff = NULL;
struct Vnode *vnode = fpage->mapping->host;/* owner of this mapping */ //此映射属于哪个文件,注意<file,page_mapping>是1:1的关系.
struct Vnode *vnode = fpage->mapping->host;
if (vnode == NULL) {
VM_ERR("page cache vnode error");
return LOS_NOK;
}
len = fpage->dirtyEnd - fpage->dirtyOff;//计算出脏数据长度
len = fpage->dirtyEnd - fpage->dirtyOff;
len = (len == 0) ? GetDirtySize(fpage, vnode) : len;
if (len == 0) {//没有脏数据
OsCleanPageDirty(fpage->vmPage);//页面取消脏标签
if (len == 0) {
OsCleanPageDirty(fpage->vmPage);
return LOS_OK;
}
@ -306,7 +290,7 @@ STATIC INT32 OsFlushDirtyPage(LosFilePage *fpage)
return ret;
}
///备份脏页,老脏页撕掉脏页标签
LosFilePage *OsDumpDirtyPage(LosFilePage *oldFPage)
{
LosFilePage *newFPage = NULL;
@ -318,11 +302,11 @@ LosFilePage *OsDumpDirtyPage(LosFilePage *oldFPage)
}
OsCleanPageDirty(oldFPage->vmPage);
(VOID)memcpy_s(newFPage, sizeof(LosFilePage), oldFPage, sizeof(LosFilePage));//直接内存拷贝
(VOID)memcpy_s(newFPage, sizeof(LosFilePage), oldFPage, sizeof(LosFilePage));
return newFPage;
}
///冲洗脏页数据,将脏页数据回写磁盘
VOID OsDoFlushDirtyPage(LosFilePage *fpage)
{
if (fpage == NULL) {
@ -344,7 +328,7 @@ STATIC VOID OsReleaseFpage(struct page_mapping *mapping, LosFilePage *fpage)
LOS_SpinUnlockRestore(lruLock, lruSave);
LOS_SpinUnlockRestore(&mapping->list_lock, intSave);
}
///删除映射信息
VOID OsDelMapInfo(LosVmMapRegion *region, LosVmPgFault *vmf, BOOL cleanDirty)
{
UINT32 intSave;
@ -365,9 +349,9 @@ VOID OsDelMapInfo(LosVmMapRegion *region, LosVmPgFault *vmf, BOOL cleanDirty)
}
if (cleanDirty) {
OsCleanPageDirty(fpage->vmPage);//恢复干净页
OsCleanPageDirty(fpage->vmPage);
}
info = OsGetMapInfo(fpage, &region->space->archMmu, (vaddr_t)vmf->vaddr);//通过虚拟地址获取映射信息
info = OsGetMapInfo(fpage, &region->space->archMmu, (vaddr_t)vmf->vaddr);
if (info != NULL) {
fpage->n_maps--;
LOS_ListDelete(&info->node);
@ -378,10 +362,7 @@ VOID OsDelMapInfo(LosVmMapRegion *region, LosVmPgFault *vmf, BOOL cleanDirty)
}
LOS_SpinUnlockRestore(&mapping->list_lock, intSave);
}
/*!
,
OsDoReadFault(...),OsDoCowFault(...),OsDoSharedFault(...)
*/
INT32 OsVmmFileFault(LosVmMapRegion *region, LosVmPgFault *vmf)
{
INT32 ret;
@ -393,7 +374,7 @@ INT32 OsVmmFileFault(LosVmMapRegion *region, LosVmPgFault *vmf)
struct page_mapping *mapping = NULL;
LosFilePage *fpage = NULL;
if (!LOS_IsRegionFileValid(region) || (region->unTypeData.rf.vnode == NULL) || (vmf == NULL)) {//文件是否映射到了内存
if (!LOS_IsRegionFileValid(region) || (region->unTypeData.rf.vnode == NULL) || (vmf == NULL)) {
VM_ERR("Input param is NULL");
return LOS_NOK;
}
@ -402,26 +383,26 @@ INT32 OsVmmFileFault(LosVmMapRegion *region, LosVmPgFault *vmf)
/* get or create a new cache node */
LOS_SpinLockSave(&mapping->list_lock, &intSave);
fpage = OsFindGetEntry(mapping, vmf->pgoff);//获取文件页
fpage = OsFindGetEntry(mapping, vmf->pgoff);
TRACE_TRY_CACHE();
if (fpage != NULL) {//找到了,说明该页已经在页高速缓存中
if (fpage != NULL) {
TRACE_HIT_CACHE();
OsPageRefIncLocked(fpage);
} else {//真的缺页了,页高速缓存中没找到
fpage = OsPageCacheAlloc(mapping, vmf->pgoff);//分配一个文件页将数据初始化好包括vmpage(物理页框)
} else {
fpage = OsPageCacheAlloc(mapping, vmf->pgoff);
if (fpage == NULL) {
LOS_SpinUnlockRestore(&mapping->list_lock, intSave);
VM_ERR("Failed to alloc a page frame");
return LOS_NOK;
}
newCache = true;//分配了新文件页
newCache = true;
}
OsSetPageLocked(fpage->vmPage);//对vmpage上锁
OsSetPageLocked(fpage->vmPage);
LOS_SpinUnlockRestore(&mapping->list_lock, intSave);
kvaddr = OsVmPageToVaddr(fpage->vmPage);//获取该页框在内核空间的虚拟地址,因为 page cache本身就是在内核空间,
kvaddr = OsVmPageToVaddr(fpage->vmPage);
/* read file to new page cache */
if (newCache) {//新cache
if (newCache) {
ret = vnode->vop->ReadPage(vnode, kvaddr, fpage->pgoff << PAGE_SHIFT);
if (ret == 0) {
VM_ERR("Failed to read from file!");
@ -429,32 +410,32 @@ INT32 OsVmmFileFault(LosVmMapRegion *region, LosVmPgFault *vmf)
return LOS_NOK;
}
LOS_SpinLockSave(&mapping->list_lock, &intSave);
OsAddToPageacheLru(fpage, mapping, vmf->pgoff);//将fpage挂入pageCache 和 LruCache
OsAddToPageacheLru(fpage, mapping, vmf->pgoff);
LOS_SpinUnlockRestore(&mapping->list_lock, intSave);
}
LOS_SpinLockSave(&mapping->list_lock, &intSave);
/* cow fault case no need to save mapinfo */
if (!((vmf->flags & VM_MAP_PF_FLAG_WRITE) && !(region->regionFlags & VM_MAP_REGION_FLAG_SHARED))) {
OsAddMapInfo(fpage, &region->space->archMmu, (vaddr_t)vmf->vaddr);//添加<虚拟地址,文件页>的映射关系,如此进程以后就能通过虚拟地址操作文件页了.
OsAddMapInfo(fpage, &region->space->archMmu, (vaddr_t)vmf->vaddr);
fpage->flags = region->regionFlags;
}
/* share page fault, mark the page dirty */
if ((vmf->flags & VM_MAP_PF_FLAG_WRITE) && (region->regionFlags & VM_MAP_REGION_FLAG_SHARED)) {//有过写操作或者为共享线性区
OsMarkPageDirty(fpage, region, 0, 0);//标记为脏页,要回写磁盘,内核会在适当的时候回写磁盘
if ((vmf->flags & VM_MAP_PF_FLAG_WRITE) && (region->regionFlags & VM_MAP_REGION_FLAG_SHARED)) {
OsMarkPageDirty(fpage, region, 0, 0);
}
vmf->pageKVaddr = kvaddr;//缺陷页记录文件页的虚拟地址
vmf->pageKVaddr = kvaddr;
LOS_SpinUnlockRestore(&mapping->list_lock, intSave);
return LOS_OK;
}
///文件缓存冲洗,把所有fpage冲洗一边把脏页洗到dirtyList中,配合OsFileCacheRemove理解
VOID OsFileCacheFlush(struct page_mapping *mapping)
{
UINT32 intSave;
UINT32 lruLock;
LOS_DL_LIST_HEAD(dirtyList);//LOS_DL_LIST list = { &(list), &(list) };
LOS_DL_LIST_HEAD(dirtyList);
LosFilePage *ftemp = NULL;
LosFilePage *fpage = NULL;
@ -462,77 +443,70 @@ VOID OsFileCacheFlush(struct page_mapping *mapping)
return;
}
LOS_SpinLockSave(&mapping->list_lock, &intSave);
LOS_DL_LIST_FOR_EACH_ENTRY(fpage, &mapping->page_list, LosFilePage, node) {//循环从page_list中取node给fpage
LOS_DL_LIST_FOR_EACH_ENTRY(fpage, &mapping->page_list, LosFilePage, node) {
LOS_SpinLockSave(&fpage->physSeg->lruLock, &lruLock);
if (OsIsPageDirty(fpage->vmPage)) {//是否为脏页
ftemp = OsDumpDirtyPage(fpage);//这里挺妙的copy出一份新页老页变成了非脏页继续用
if (OsIsPageDirty(fpage->vmPage)) {
ftemp = OsDumpDirtyPage(fpage);
if (ftemp != NULL) {
LOS_ListTailInsert(&dirtyList, &ftemp->node);//将新页插入脏页List,等待回写磁盘
LOS_ListTailInsert(&dirtyList, &ftemp->node);
}
}
LOS_SpinUnlockRestore(&fpage->physSeg->lruLock, lruLock);
}
LOS_SpinUnlockRestore(&mapping->list_lock, intSave);
LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, ftemp, &dirtyList, LosFilePage, node) {//仔细看这个宏,关键在 &(item)->member != (list);
OsDoFlushDirtyPage(fpage);//立马洗掉所以dirtyList可以不是全局变量
LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, ftemp, &dirtyList, LosFilePage, node) {
OsDoFlushDirtyPage(fpage);
}
}
/******************************************************************************
,page cache
mapping
******************************************************************************/
VOID OsFileCacheRemove(struct page_mapping *mapping)
{
UINT32 intSave;
UINT32 lruSave;
SPIN_LOCK_S *lruLock = NULL;
LOS_DL_LIST_HEAD(dirtyList);//定义一个叫dirtyList的双循环链表并初始化,用于挂脏页
LOS_DL_LIST_HEAD(dirtyList);
LosFilePage *ftemp = NULL;
LosFilePage *fpage = NULL;
LosFilePage *fnext = NULL;
LOS_SpinLockSave(&mapping->list_lock, &intSave);//多进程操作,必须上锁.
LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, &mapping->page_list, LosFilePage, node) {//遍历文件在内存中产生的所有文件页(例如1,4,8页)不一定连续,取决于用户的读取顺序
LOS_SpinLockSave(&mapping->list_lock, &intSave);
LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, &mapping->page_list, LosFilePage, node) {
lruLock = &fpage->physSeg->lruLock;
LOS_SpinLockSave(lruLock, &lruSave);//@note_why 自旋锁有必要从这里开始上锁吗?
if (OsIsPageDirty(fpage->vmPage)) {//数据是脏页吗,脏页就是被修改过数据的页
ftemp = OsDumpDirtyPage(fpage);//做这个拷贝动作是为了fpage的统一下线,因为数据回写磁盘的速度是很慢的,如果直接在这里处理脏数据
if (ftemp != NULL) {//会导致函数持有mapping->list_lock自旋锁的时间太长了,影响其他CPU的处理效率
LOS_ListTailInsert(&dirtyList, &ftemp->node);//将临时脏页挂到记录脏页链表上
LOS_SpinLockSave(lruLock, &lruSave);
if (OsIsPageDirty(fpage->vmPage)) {
ftemp = OsDumpDirtyPage(fpage);
if (ftemp != NULL) {
LOS_ListTailInsert(&dirtyList, &ftemp->node);
}
}
OsDeletePageCacheLru(fpage);//删除高速缓存和从置换链表中下线
OsDeletePageCacheLru(fpage);
LOS_SpinUnlockRestore(lruLock, lruSave);
}
LOS_SpinUnlockRestore(&mapping->list_lock, intSave);//恢复自旋锁,不能让别的CPU等太久
LOS_SpinUnlockRestore(&mapping->list_lock, intSave);
LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, &dirtyList, LosFilePage, node) {//到这里,再来慢慢的统一处理脏页数据
OsDoFlushDirtyPage(fpage);//遍历脏页链表,一页一页处理
LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, &dirtyList, LosFilePage, node) {
OsDoFlushDirtyPage(fpage);
}
}
///虚拟内存文件操作实现类
LosVmFileOps g_commVmOps = {//
LosVmFileOps g_commVmOps = {
.open = NULL,
.close = NULL,
.fault = OsVmmFileFault, //缺页中断处理
.remove = OsVmmFileRemove,//删除页
.fault = OsVmmFileFault,
.remove = OsVmmFileRemove,
};
//文件映射
INT32 OsVfsFileMmap(struct file *filep, LosVmMapRegion *region)
{
region->unTypeData.rf.vmFOps = &g_commVmOps;//文件操作
region->unTypeData.rf.vmFOps = &g_commVmOps;
region->unTypeData.rf.vnode = filep->f_vnode;
region->unTypeData.rf.f_oflags = filep->f_oflags;
return ENOERR;
}
/*!
,,
filep广,鸿,//////
*/
STATUS_T OsNamedMMap(struct file *filep, LosVmMapRegion *region)
{
struct Vnode *vnode = NULL;
@ -545,10 +519,10 @@ STATUS_T OsNamedMMap(struct file *filep, LosVmMapRegion *region)
vnode->useCount++;
VnodeDrop();
if (filep->ops != NULL && filep->ops->mmap != NULL) {
if (vnode->type == VNODE_TYPE_CHR || vnode->type == VNODE_TYPE_BLK) {//块设备或者字符设备 /dev/..
LOS_SetRegionTypeDev(region);//设置为设备类型
if (vnode->type == VNODE_TYPE_CHR || vnode->type == VNODE_TYPE_BLK) {
LOS_SetRegionTypeDev(region);
} else {
LOS_SetRegionTypeFile(region);//设置为文件类型
LOS_SetRegionTypeFile(region);
}
int ret = filep->ops->mmap(filep, region);
if (ret != LOS_OK) {
@ -564,21 +538,17 @@ STATUS_T OsNamedMMap(struct file *filep, LosVmMapRegion *region)
return LOS_OK;
}
/**************************************************************************************************
:mapping->page_list 1,3,4,6 ,5
**************************************************************************************************/
LosFilePage *OsFindGetEntry(struct page_mapping *mapping, VM_OFFSET_T pgoff)
{
LosFilePage *fpage = NULL;
LOS_DL_LIST_FOR_EACH_ENTRY(fpage, &mapping->page_list, LosFilePage, node) {//遍历文件页
if (fpage->pgoff == pgoff) {//找到指定的页,
LOS_DL_LIST_FOR_EACH_ENTRY(fpage, &mapping->page_list, LosFilePage, node) {
if (fpage->pgoff == pgoff) {
return fpage;
}
if (fpage->pgoff > pgoff) {//大于之前还没有找到,说明不在链表中,往后的也不用找了,
break;//因为 mapping->page_list节点上的数据都是按 fpage->pgoff 从小到大的顺序排列的.
if (fpage->pgoff > pgoff) {
break;
}
}
@ -586,11 +556,6 @@ LosFilePage *OsFindGetEntry(struct page_mapping *mapping, VM_OFFSET_T pgoff)
}
/* need mutex & change memory to dma zone. */
/*!
LosFilePage
Direct Memory Access访
"DMA控制器"CPU
*/
LosFilePage *OsPageCacheAlloc(struct page_mapping *mapping, VM_OFFSET_T pgoff)
{
VOID *kvaddr = NULL;
@ -598,39 +563,39 @@ LosFilePage *OsPageCacheAlloc(struct page_mapping *mapping, VM_OFFSET_T pgoff)
LosVmPage *vmPage = NULL;
LosFilePage *fpage = NULL;
vmPage = LOS_PhysPageAlloc(); //先分配一个物理页
vmPage = LOS_PhysPageAlloc();
if (vmPage == NULL) {
VM_ERR("alloc vm page failed");
return NULL;
}
physSeg = OsVmPhysSegGet(vmPage);//通过页获取所在seg
kvaddr = OsVmPageToVaddr(vmPage);//获取内核空间的虚拟地址,具体点进去看函数说明,这里一定要理解透彻!
physSeg = OsVmPhysSegGet(vmPage);
kvaddr = OsVmPageToVaddr(vmPage);
if ((physSeg == NULL) || (kvaddr == NULL)) {
LOS_PhysPageFree(vmPage); //异常情况要释放vmPage
LOS_PhysPageFree(vmPage);
VM_ERR("alloc vm page failed!");
return NULL;
}
fpage = (LosFilePage *)LOS_MemAlloc(m_aucSysMem0, sizeof(LosFilePage));//从内存池中分配一个filePage
fpage = (LosFilePage *)LOS_MemAlloc(m_aucSysMem0, sizeof(LosFilePage));
if (fpage == NULL) {
LOS_PhysPageFree(vmPage); //异常情况要释放vmPage
LOS_PhysPageFree(vmPage);
VM_ERR("Failed to allocate for page!");
return NULL;
}
(VOID)memset_s((VOID *)fpage, sizeof(LosFilePage), 0, sizeof(LosFilePage));//调标准库函数 置0
(VOID)memset_s((VOID *)fpage, sizeof(LosFilePage), 0, sizeof(LosFilePage));
LOS_ListInit(&fpage->i_mmap); //初始化映射,链表上挂 MapInfo
LOS_ListInit(&fpage->node); //节点初始化
LOS_ListInit(&fpage->lru); //LRU初始化
fpage->n_maps = 0; //映射次数
fpage->dirtyOff = PAGE_SIZE; //默认页尾部,相当于没有脏数据
fpage->dirtyEnd = 0; //脏页结束位置
fpage->physSeg = physSeg; //页框所属段.其中包含了 LRU LIST ==
fpage->vmPage = vmPage; //物理页框
fpage->mapping = mapping; //记录所有文件页映射
fpage->pgoff = pgoff; //将文件切成一页页,页标
(VOID)memset_s(kvaddr, PAGE_SIZE, 0, PAGE_SIZE);//页内数据清0
LOS_ListInit(&fpage->i_mmap);
LOS_ListInit(&fpage->node);
LOS_ListInit(&fpage->lru);
fpage->n_maps = 0;
fpage->dirtyOff = PAGE_SIZE;
fpage->dirtyEnd = 0;
fpage->physSeg = physSeg;
fpage->vmPage = vmPage;
fpage->mapping = mapping;
fpage->pgoff = pgoff;
(VOID)memset_s(kvaddr, PAGE_SIZE, 0, PAGE_SIZE);
return fpage;
}
@ -644,4 +609,4 @@ INT32 OsVfsFileMmap(struct file *filep, LosVmMapRegion *region)
}
#endif
#endif
#endif

@ -1,42 +1,3 @@
/*
访
访Direct Memory AccessDMA访
CPU
DMA使DMA
DMA
使
DMA使
DMA
使DMA
ISA DMA8DMA7
DMA1616DMA
DMA
"分散-收集"Scatter-gatherDMADMADMA
DRQDMADACKDMADMA
DMA线
DMADMA访
访
DMADMA
访访
Cache-coherent system
Non-coherent systemDMA
DMA
DMA
*/
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
@ -75,7 +36,7 @@
#include "los_vm_map.h"
#include "los_memory.h"
///分配DMA空间
VOID *LOS_DmaMemAlloc(DMA_ADDR_T *dmaAddr, size_t size, size_t align, enum DmaMemType type)
{
VOID *kVaddr = NULL;
@ -92,24 +53,24 @@ VOID *LOS_DmaMemAlloc(DMA_ADDR_T *dmaAddr, size_t size, size_t align, enum DmaMe
#ifdef LOSCFG_KERNEL_VM
kVaddr = LOS_KernelMallocAlign(size, align);
#else
kVaddr = LOS_MemAllocAlign(OS_SYS_MEM_ADDR, size, align);//不走内存池方式, 直接申请物理页
kVaddr = LOS_MemAllocAlign(OS_SYS_MEM_ADDR, size, align);
#endif
if (kVaddr == NULL) {
VM_ERR("failed, size = %u, align = %u", size, align);//从内存池中申请
VM_ERR("failed, size = %u, align = %u", size, align);
return NULL;
}
if (dmaAddr != NULL) {
*dmaAddr = (DMA_ADDR_T)LOS_PaddrQuery(kVaddr);//查询物理地址, DMA直接将数据灌到物理地址
*dmaAddr = (DMA_ADDR_T)LOS_PaddrQuery(kVaddr);
}
if (type == DMA_NOCACHE) {//无缓存模式 , 计算新的虚拟地址
if (type == DMA_NOCACHE) {
kVaddr = (VOID *)VMM_TO_UNCACHED_ADDR((UINTPTR)kVaddr);
}
return kVaddr;
}
///释放DMA指针
VOID LOS_DmaMemFree(VOID *vaddr)
{
UINTPTR addr;
@ -118,13 +79,13 @@ VOID LOS_DmaMemFree(VOID *vaddr)
return;
}
addr = (UINTPTR)vaddr;
// 未缓存区
if ((addr >= UNCACHED_VMM_BASE) && (addr < UNCACHED_VMM_BASE + UNCACHED_VMM_SIZE)) {
addr = UNCACHED_TO_VMM_ADDR(addr);//转换成未缓存区地址
addr = UNCACHED_TO_VMM_ADDR(addr);
#ifdef LOSCFG_KERNEL_VM
LOS_KernelFree((VOID *)addr);
#else
LOS_MemFree(OS_SYS_MEM_ADDR, (VOID *)addr);//内存池方式释放
LOS_MemFree(OS_SYS_MEM_ADDR, (VOID *)addr);
#endif
} else if ((addr >= KERNEL_VMM_BASE) && (addr < KERNEL_VMM_BASE + KERNEL_VMM_SIZE)) {
#ifdef LOSCFG_KERNEL_VM

File diff suppressed because it is too large Load Diff

@ -40,26 +40,26 @@
#ifdef LOSCFG_KERNEL_VM
LosVmPage *g_vmPageArray = NULL;//物理页框数组
size_t g_vmPageArraySize;//物理页框大小
//页框初始化
LosVmPage *g_vmPageArray = NULL;
size_t g_vmPageArraySize;
STATIC VOID OsVmPageInit(LosVmPage *page, paddr_t pa, UINT8 segID)
{
LOS_ListInit(&page->node);//页节点初始化
page->flags = FILE_PAGE_FREE;//页标签,初始为空闲页
LOS_AtomicSet(&page->refCounts, 0);//引用次数0
page->physAddr = pa;//物理地址
page->segID = segID;//物理地址使用段管理段ID
page->order = VM_LIST_ORDER_MAX;//初始化值,不属于任何块组
LOS_ListInit(&page->node);
page->flags = FILE_PAGE_FREE;
LOS_AtomicSet(&page->refCounts, 0);
page->physAddr = pa;
page->segID = segID;
page->order = VM_LIST_ORDER_MAX;
page->nPages = 0;
#ifdef LOSCFG_PAGE_TABLE_FINE_LOCK
LOS_SpinInit(&page->lock);
#endif
}
///伙伴算法初始化
STATIC INLINE VOID OsVmPageOrderListInit(LosVmPage *page, size_t nPages)
{//@note_why 此时所有页面 page->order = VM_LIST_ORDER_MAX,能挂入伙伴算法的链表吗?
OsVmPhysPagesFreeContiguous(page, nPages);//释放连续的物理页框
{
OsVmPhysPagesFreeContiguous(page, nPages);
}
#define VMPAGEINIT(page, pa, segID) do { \
@ -68,10 +68,6 @@ STATIC INLINE VOID OsVmPageOrderListInit(LosVmPage *page, size_t nPages)
(pa) += PAGE_SIZE; \
} while (0)
/*!
,
1.g_vmPageArrayLosVmPage,4K.
*/
VOID OsVmPageStartup(VOID)
{
struct VmPhysSeg *seg = NULL;
@ -80,7 +76,7 @@ VOID OsVmPageStartup(VOID)
UINT32 nPage;
INT32 segID;
OsVmPhysAreaSizeAdjust(ROUNDUP((g_vmBootMemBase - KERNEL_ASPACE_BASE), PAGE_SIZE));//校正 g_physArea size
OsVmPhysAreaSizeAdjust(ROUNDUP((g_vmBootMemBase - KERNEL_ASPACE_BASE), PAGE_SIZE));
/*
* Pages getting from OsVmPhysPageNumGet() interface here contain the memory
@ -89,20 +85,20 @@ VOID OsVmPageStartup(VOID)
*/
UINT32 pageNum = OsVmPhysPageNumGet();
nPage = pageNum * PAGE_SIZE / (sizeof(LosVmPage) + PAGE_SIZE);
g_vmPageArraySize = nPage * sizeof(LosVmPage);//页表总大小
g_vmPageArray = (LosVmPage *)OsVmBootMemAlloc(g_vmPageArraySize);//实模式下申请内存,此时还没有初始化MMU
g_vmPageArraySize = nPage * sizeof(LosVmPage);
g_vmPageArray = (LosVmPage *)OsVmBootMemAlloc(g_vmPageArraySize);
OsVmPhysAreaSizeAdjust(ROUNDUP(g_vmPageArraySize, PAGE_SIZE));
OsVmPhysSegAdd();// 完成对段的初始化
OsVmPhysInit();// 加入空闲链表和设置置换算法,LRU(最近最久未使用)算法
OsVmPhysSegAdd();
OsVmPhysInit();
#ifdef LOSCFG_KERNEL_PLIMITS
OsMemLimitSetLimit(pageNum * PAGE_SIZE);
#endif
for (segID = 0; segID < g_vmPhysSegNum; segID++) {//遍历物理段,将段切成一页一页
for (segID = 0; segID < g_vmPhysSegNum; segID++) {
seg = &g_vmPhysSeg[segID];
nPage = seg->size >> PAGE_SHIFT;//本段总页数
nPage = seg->size >> PAGE_SHIFT;
UINT32 count = nPage >> 3; /* 3: 2 ^ 3, nPage / 8, cycle count */
UINT32 left = nPage & 0x7; /* 0x7: nPage % 8, left page */
@ -120,17 +116,17 @@ VOID OsVmPageStartup(VOID)
for (; left > 0; left--) {
VMPAGEINIT(page, pa, segID);
}
OsVmPageOrderListInit(seg->pageBase, nPage);//伙伴算法初始化,将所有页加入空闲链表供分配
OsVmPageOrderListInit(seg->pageBase, nPage);
}
}
///通过物理地址获取页框
LosVmPage *LOS_VmPageGet(PADDR_T paddr)
{
INT32 segID;
LosVmPage *page = NULL;
for (segID = 0; segID < g_vmPhysSegNum; segID++) {//物理内存采用段页管理
page = OsVmPhysToPage(paddr, segID);//通过物理地址和段ID找出物理页框
for (segID = 0; segID < g_vmPhysSegNum; segID++) {
page = OsVmPhysToPage(paddr, segID);
if (page != NULL) {
break;
}

@ -1,29 +1,3 @@
/*!
CPU线
LiteOS-A
4KiB便
LiteOS-A使
-----------------------------------------------------
kernel.bin | heap | page frames
() | () | ()
-----------------------------------------------------
92020
1828256
20KiB4K,592820KiB12KiB
3320121
1010
12KiB3322111
201
1
*/
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
@ -67,36 +41,36 @@
#define ONE_PAGE 1
/* Physical memory area array | 物理内存区数组 */
STATIC struct VmPhysArea g_physArea[] = {///< 这里只有一个区域,即只生成一个段
/* Physical memory area array */
STATIC struct VmPhysArea g_physArea[] = {
{
.start = SYS_MEM_BASE, //整个物理内存基地址,#define SYS_MEM_BASE DDR_MEM_ADDR , 0x80000000
.size = SYS_MEM_SIZE_DEFAULT,//整个物理内存总大小 0x07f00000
.start = SYS_MEM_BASE,
.size = SYS_MEM_SIZE_DEFAULT,
},
};
struct VmPhysSeg g_vmPhysSeg[VM_PHYS_SEG_MAX]; ///< 最大32段
INT32 g_vmPhysSegNum = 0; ///< 段数
/// 获取段数组,全局变量,变量放在 .bbs 区
struct VmPhysSeg g_vmPhysSeg[VM_PHYS_SEG_MAX];
INT32 g_vmPhysSegNum = 0;
LosVmPhysSeg *OsGVmPhysSegGet(void)
{
return g_vmPhysSeg;
}
/// 初始化Lru置换链表
STATIC VOID OsVmPhysLruInit(struct VmPhysSeg *seg)
{
INT32 i;
UINT32 intSave;
LOS_SpinInit(&seg->lruLock);//初始化自旋锁,自旋锁用于CPU多核同步
LOS_SpinInit(&seg->lruLock);
LOS_SpinLockSave(&seg->lruLock, &intSave);
for (i = 0; i < VM_NR_LRU_LISTS; i++) { //五个双循环链表
seg->lruSize[i] = 0; //记录链表节点数
LOS_ListInit(&seg->lruList[i]); //初始化LRU链表
for (i = 0; i < VM_NR_LRU_LISTS; i++) {
seg->lruSize[i] = 0;
LOS_ListInit(&seg->lruList[i]);
}
LOS_SpinUnlockRestore(&seg->lruLock, intSave);
}
/// 创建物理段,由区划分转成段管理
STATIC INT32 OsVmPhysSegCreate(paddr_t start, size_t size)
{
struct VmPhysSeg *seg = NULL;
@ -105,8 +79,8 @@ STATIC INT32 OsVmPhysSegCreate(paddr_t start, size_t size)
return -1;
}
seg = &g_vmPhysSeg[g_vmPhysSegNum++];//拿到一段数据
for (; (seg > g_vmPhysSeg) && ((seg - 1)->start > (start + size)); seg--) {//定位到合适的段
seg = &g_vmPhysSeg[g_vmPhysSegNum++];
for (; (seg > g_vmPhysSeg) && ((seg - 1)->start > (start + size)); seg--) {
*seg = *(seg - 1);
}
seg->start = start;
@ -114,21 +88,21 @@ STATIC INT32 OsVmPhysSegCreate(paddr_t start, size_t size)
return 0;
}
/// 添加物理段
VOID OsVmPhysSegAdd(VOID)
{
INT32 i, ret;
LOS_ASSERT(g_vmPhysSegNum < VM_PHYS_SEG_MAX);
for (i = 0; i < (sizeof(g_physArea) / sizeof(g_physArea[0])); i++) {//遍历g_physArea数组
ret = OsVmPhysSegCreate(g_physArea[i].start, g_physArea[i].size);//由区划分转成段管理
for (i = 0; i < (sizeof(g_physArea) / sizeof(g_physArea[0])); i++) {
ret = OsVmPhysSegCreate(g_physArea[i].start, g_physArea[i].size);
if (ret != 0) {
VM_ERR("create phys seg failed");
}
}
}
/// 段区域大小调整
VOID OsVmPhysAreaSizeAdjust(size_t size)
{
/*
@ -139,36 +113,35 @@ VOID OsVmPhysAreaSizeAdjust(size_t size)
g_physArea[0].size -= size;
}
/// 获得物理内存的总页数
UINT32 OsVmPhysPageNumGet(VOID)
{
UINT32 nPages = 0;
INT32 i;
for (i = 0; i < (sizeof(g_physArea) / sizeof(g_physArea[0])); i++) {
nPages += g_physArea[i].size >> PAGE_SHIFT;//右移12位相当于除以4K, 计算出总页数
nPages += g_physArea[i].size >> PAGE_SHIFT;
}
return nPages;//返回所有物理内存总页数
return nPages;
}
/// 初始化空闲链表,分配物理页框使用伙伴算法
STATIC INLINE VOID OsVmPhysFreeListInit(struct VmPhysSeg *seg)
{
int i;
UINT32 intSave;
struct VmFreeList *list = NULL;
LOS_SpinInit(&seg->freeListLock);//初始化用于分配的自旋锁
LOS_SpinInit(&seg->freeListLock);
LOS_SpinLockSave(&seg->freeListLock, &intSave);
for (i = 0; i < VM_LIST_ORDER_MAX; i++) {//遍历伙伴算法空闲块组链表
list = &seg->freeList[i]; //一个个来
LOS_ListInit(&list->node); //LosVmPage.node将挂到list->node上
list->listCnt = 0; //链表上的数量默认0
for (i = 0; i < VM_LIST_ORDER_MAX; i++) {
list = &seg->freeList[i];
LOS_ListInit(&list->node);
list->listCnt = 0;
}
LOS_SpinUnlockRestore(&seg->freeListLock, intSave);
}
/// 物理段初始化
VOID OsVmPhysInit(VOID)
{
struct VmPhysSeg *seg = NULL;
@ -177,13 +150,13 @@ VOID OsVmPhysInit(VOID)
for (i = 0; i < g_vmPhysSegNum; i++) {
seg = &g_vmPhysSeg[i];
seg->pageBase = &g_vmPageArray[nPages];//记录本段首页物理页框地址
nPages += seg->size >> PAGE_SHIFT;//偏移12位,按4K一页,算出本段总页数
OsVmPhysFreeListInit(seg); //初始化空闲链表,分配页框使用伙伴算法
OsVmPhysLruInit(seg); //初始化LRU置换链表
seg->pageBase = &g_vmPageArray[nPages];
nPages += seg->size >> PAGE_SHIFT;
OsVmPhysFreeListInit(seg);
OsVmPhysLruInit(seg);
}
}
/// 将页框挂入空闲链表,分配物理页框从空闲链表里拿
STATIC VOID OsVmPhysFreeListAddUnsafe(LosVmPage *page, UINT8 order)
{
struct VmPhysSeg *seg = NULL;
@ -200,44 +173,36 @@ STATIC VOID OsVmPhysFreeListAddUnsafe(LosVmPage *page, UINT8 order)
LOS_ListTailInsert(&list->node, &page->node);
list->listCnt++;
}
///将物理页框从空闲链表上摘除,见于物理页框被分配的情况
STATIC VOID OsVmPhysFreeListDelUnsafe(LosVmPage *page)
{
struct VmPhysSeg *seg = NULL;
struct VmFreeList *list = NULL;
if ((page->segID >= VM_PHYS_SEG_MAX) || (page->order >= VM_LIST_ORDER_MAX)) {//等于VM_LIST_ORDER_MAX也不行,说明伙伴算法最大支持 2^8的分配
if ((page->segID >= VM_PHYS_SEG_MAX) || (page->order >= VM_LIST_ORDER_MAX)) {
LOS_Panic("The page segment id(%u) or order(%u) is invalid\n", page->segID, page->order);
}
seg = &g_vmPhysSeg[page->segID]; //找到物理页框对应的段
list = &seg->freeList[page->order]; //根据伙伴算法组序号找到空闲链表
list->listCnt--; //链表节点总数减一
LOS_ListDelete(&page->node); //将自己从链表上摘除
page->order = VM_LIST_ORDER_MAX; //告诉系统物理页框已不在空闲链表上, 用于OsVmPhysPagesSpiltUnsafe的断言
seg = &g_vmPhysSeg[page->segID];
list = &seg->freeList[page->order];
list->listCnt--;
LOS_ListDelete(&page->node);
page->order = VM_LIST_ORDER_MAX;
}
/**
* @brief ,,.
* @param page
* @param oldOrder 2^2
* @param newOrder 2^8
* @return STATIC
*/
STATIC VOID OsVmPhysPagesSpiltUnsafe(LosVmPage *page, UINT8 oldOrder, UINT8 newOrder)
{
UINT32 order;
LosVmPage *buddyPage = NULL;
for (order = newOrder; order > oldOrder;) {//把肉剁碎的过程,把多余的肉块切成2^7,2^6...标准块,
order--;//越切越小,逐一挂到对应的空闲链表上
buddyPage = &page[VM_ORDER_TO_PAGES(order)];//@note_good 先把多余的肉割出来,这句代码很赞!因为LosVmPage本身是在一个大数组上,page[nPages]可直接定位
LOS_ASSERT(buddyPage->order == VM_LIST_ORDER_MAX);//没挂到伙伴算法对应组块空闲链表上的物理页框的order必须是VM_LIST_ORDER_MAX
OsVmPhysFreeListAddUnsafe(buddyPage, order);//将劈开的节点挂到对应序号的链表上,buddyPage->order = order
for (order = newOrder; order > oldOrder;) {
order--;
buddyPage = &page[VM_ORDER_TO_PAGES(order)];
LOS_ASSERT(buddyPage->order == VM_LIST_ORDER_MAX);
OsVmPhysFreeListAddUnsafe(buddyPage, order);
}
}
///通过物理地址获取所属参数段的物理页框
LosVmPage *OsVmPhysToPage(paddr_t pa, UINT8 segID)
{
struct VmPhysSeg *seg = NULL;
@ -251,8 +216,8 @@ LosVmPage *OsVmPhysToPage(paddr_t pa, UINT8 segID)
return NULL;
}
offset = pa - seg->start;//得到物理地址的偏移量
return (seg->pageBase + (offset >> PAGE_SHIFT));//得到对应的物理页框
offset = pa - seg->start;
return (seg->pageBase + (offset >> PAGE_SHIFT));
}
LosVmPage *OsVmPaddrToPage(paddr_t paddr)
@ -268,37 +233,31 @@ LosVmPage *OsVmPaddrToPage(paddr_t paddr)
}
return NULL;
}
/*!
* @brief page OsArchMmuInit
\n #define SYS_MEM_BASE DDR_MEM_ADDR /* physical memory base 物理地址的起始地址 * /
\n
\n :,
* @param page
* @return VOID*
*/
VOID *OsVmPageToVaddr(LosVmPage *page)//
VOID *OsVmPageToVaddr(LosVmPage *page)
{
VADDR_T vaddr;
vaddr = KERNEL_ASPACE_BASE + page->physAddr - SYS_MEM_BASE;//表示申请的物理地址在物理空间的偏移量等于映射的虚拟地址在内核空间的偏移量
return (VOID *)(UINTPTR)vaddr;//不需要存储映射关系,这简直就是神来之笔,拍案叫绝。@note_good 详见 鸿蒙内核源码分析(页表管理篇)
vaddr = KERNEL_ASPACE_BASE + page->physAddr - SYS_MEM_BASE;
return (VOID *)(UINTPTR)vaddr;
}
///通过虚拟地址找映射的物理页框
LosVmPage *OsVmVaddrToPage(VOID *ptr)
{
struct VmPhysSeg *seg = NULL;
PADDR_T pa = LOS_PaddrQuery(ptr);//通过空间的虚拟地址查询物理地址
PADDR_T pa = LOS_PaddrQuery(ptr);
UINT32 segID;
for (segID = 0; segID < g_vmPhysSegNum; segID++) {//遍历所有段
for (segID = 0; segID < g_vmPhysSegNum; segID++) {
seg = &g_vmPhysSeg[segID];
if ((pa >= seg->start) && (pa < (seg->start + seg->size))) {//找到物理地址所在的段
return seg->pageBase + ((pa - seg->start) >> PAGE_SHIFT);//段基地址+页偏移索引 得到虚拟地址经映射所在物理页框
if ((pa >= seg->start) && (pa < (seg->start + seg->size))) {
return seg->pageBase + ((pa - seg->start) >> PAGE_SHIFT);
}
}
return NULL;
}
/// 回收一定范围内的页框
STATIC INLINE VOID OsVmRecycleExtraPages(LosVmPage *page, size_t startPage, size_t endPage)
{
if (startPage >= endPage) {
@ -307,7 +266,7 @@ STATIC INLINE VOID OsVmRecycleExtraPages(LosVmPage *page, size_t startPage, size
OsVmPhysPagesFreeContiguous(page, endPage - startPage);
}
/// 大块的物理内存分配
STATIC LosVmPage *OsVmPhysLargeAlloc(struct VmPhysSeg *seg, size_t nPages)
{
struct VmFreeList *list = NULL;
@ -317,11 +276,11 @@ STATIC LosVmPage *OsVmPhysLargeAlloc(struct VmPhysSeg *seg, size_t nPages)
PADDR_T paEnd;
size_t size = nPages << PAGE_SHIFT;
list = &seg->freeList[VM_LIST_ORDER_MAX - 1];//先找伙伴算法中内存块最大的开撸
LOS_DL_LIST_FOR_EACH_ENTRY(page, &list->node, LosVmPage, node) {//遍历链表
list = &seg->freeList[VM_LIST_ORDER_MAX - 1];
LOS_DL_LIST_FOR_EACH_ENTRY(page, &list->node, LosVmPage, node) {
paStart = page->physAddr;
paEnd = paStart + size;
if (paEnd > (seg->start + seg->size)) {//匹配物理地址范围
if (paEnd > (seg->start + seg->size)) {
continue;
}
@ -343,7 +302,7 @@ STATIC LosVmPage *OsVmPhysLargeAlloc(struct VmPhysSeg *seg, size_t nPages)
return NULL;
}
/// 申请物理页并挂在对应的链表上
STATIC LosVmPage *OsVmPhysPagesAlloc(struct VmPhysSeg *seg, size_t nPages)
{
struct VmFreeList *list = NULL;
@ -353,13 +312,13 @@ STATIC LosVmPage *OsVmPhysPagesAlloc(struct VmPhysSeg *seg, size_t nPages)
UINT32 newOrder;
order = OsVmPagesToOrder(nPages);
if (order < VM_LIST_ORDER_MAX) {//按正常的伙伴算法分配
for (newOrder = order; newOrder < VM_LIST_ORDER_MAX; newOrder++) {//从小往大了撸
if (order < VM_LIST_ORDER_MAX) {
for (newOrder = order; newOrder < VM_LIST_ORDER_MAX; newOrder++) {
list = &seg->freeList[newOrder];
if (LOS_ListEmpty(&list->node)) {//这条链路上没有可分配的物理页框
continue;//继续往大的找
if (LOS_ListEmpty(&list->node)) {
continue;
}
page = LOS_DL_LIST_ENTRY(LOS_DL_LIST_FIRST(&list->node), LosVmPage, node);//找到了直接返回第一个节点
page = LOS_DL_LIST_ENTRY(LOS_DL_LIST_FIRST(&list->node), LosVmPage, node);
goto DONE;
}
} else {
@ -380,7 +339,7 @@ DONE:
return page;
}
/// 释放物理页框,所谓释放物理页就是把页挂到空闲链表中
VOID OsVmPhysPagesFree(LosVmPage *page, UINT8 order)
{
paddr_t pa;
@ -390,59 +349,51 @@ VOID OsVmPhysPagesFree(LosVmPage *page, UINT8 order)
return;
}
if (order < VM_LIST_ORDER_MAX - 1) {//order[0,7]
pa = VM_PAGE_TO_PHYS(page);//获取物理地址
do {//按位异或
pa ^= VM_ORDER_TO_PHYS(order);//@note_good 注意这里是高位和低位的 ^= ,也就是说跳到order块组物理地址处,此处处理甚妙!
buddyPage = OsVmPhysToPage(pa, page->segID);//通过物理地址拿到页框
if ((buddyPage == NULL) || (buddyPage->order != order)) {//页框所在组块必须要对应
if (order < VM_LIST_ORDER_MAX - 1) {
pa = VM_PAGE_TO_PHYS(page);
do {
pa ^= VM_ORDER_TO_PHYS(order);
buddyPage = OsVmPhysToPage(pa, page->segID);
if ((buddyPage == NULL) || (buddyPage->order != order)) {
break;
}
OsVmPhysFreeListDelUnsafe(buddyPage);//注意buddypage是连续的物理页框 例如order=2时,2^2=4页就是一个块组 |_|_|_|_|
OsVmPhysFreeListDelUnsafe(buddyPage);
order++;
pa &= ~(VM_ORDER_TO_PHYS(order) - 1);
page = OsVmPhysToPage(pa, page->segID);
} while (order < VM_LIST_ORDER_MAX - 1);
}
OsVmPhysFreeListAddUnsafe(page, order);//伙伴算法 空闲节点增加
OsVmPhysFreeListAddUnsafe(page, order);
}
///连续的释放物理页框, 如果8页连在一块是一起释放的
VOID OsVmPhysPagesFreeContiguous(LosVmPage *page, size_t nPages)
{
paddr_t pa;
UINT32 order;
size_t n;
while (TRUE) {//死循环
pa = VM_PAGE_TO_PHYS(page);//获取页面物理地址
order = VM_PHYS_TO_ORDER(pa);//通过物理地址找到伙伴算法的级别
n = VM_ORDER_TO_PAGES(order);//通过级别找到物理页块 (1<<order),意思是如果order=3就可以释放8个页块
if (n > nPages) {//nPages只剩下小于2^order时退出循环
while (TRUE) {
pa = VM_PAGE_TO_PHYS(page);
order = VM_PHYS_TO_ORDER(pa);
n = VM_ORDER_TO_PAGES(order);
if (n > nPages) {
break;
}
OsVmPhysPagesFree(page, order);//释放伙伴算法对应块组
nPages -= n;//总页数减少
page += n;//释放的页数增多
OsVmPhysPagesFree(page, order);
nPages -= n;
page += n;
}
//举例剩下 7个页框时依次用 2^2 2^1 2^0 方式释放
while (nPages > 0) {
order = LOS_HighBitGet(nPages);//从高到低块组释放
n = VM_ORDER_TO_PAGES(order);//2^order次方
OsVmPhysPagesFree(page, order);//释放块组
order = LOS_HighBitGet(nPages);
n = VM_ORDER_TO_PAGES(order);
OsVmPhysPagesFree(page, order);
nPages -= n;
page += n;//相当于page[n]
page += n;
}
}
/*!
* @brief OsVmPhysPagesGet LosVmPage,
* LosVmPage->nPages
* @param nPages
* @return
*
* @see
*/
STATIC LosVmPage *OsVmPhysPagesGet(size_t nPages)
{
UINT32 intSave;
@ -453,11 +404,11 @@ STATIC LosVmPage *OsVmPhysPagesGet(size_t nPages)
for (segID = 0; segID < g_vmPhysSegNum; segID++) {
seg = &g_vmPhysSeg[segID];
LOS_SpinLockSave(&seg->freeListLock, &intSave);
page = OsVmPhysPagesAlloc(seg, nPages);//分配指定页数的物理页,nPages需小于伙伴算法一次能分配的最大页数
if (page != NULL) {//分配成功
/* */
LOS_AtomicSet(&page->refCounts, 0);//设置引用次数为0
page->nPages = nPages;//页数
page = OsVmPhysPagesAlloc(seg, nPages);
if (page != NULL) {
/* the first page of continuous physical addresses holds refCounts */
LOS_AtomicSet(&page->refCounts, 0);
page->nPages = nPages;
LOS_SpinUnlockRestore(&seg->freeListLock, intSave);
return page;
}
@ -465,7 +416,7 @@ STATIC LosVmPage *OsVmPhysPagesGet(size_t nPages)
}
return NULL;
}
///分配连续的物理页
VOID *LOS_PhysPagesAllocContiguous(size_t nPages)
{
LosVmPage *page = NULL;
@ -473,15 +424,15 @@ VOID *LOS_PhysPagesAllocContiguous(size_t nPages)
if (nPages == 0) {
return NULL;
}
//鸿蒙 nPages 不能大于 2^8 次方,即256个页,1M内存,仅限于内核态,用户态不限制分配大小.
page = OsVmPhysPagesGet(nPages);//通过伙伴算法获取物理上连续的页
page = OsVmPhysPagesGet(nPages);
if (page == NULL) {
return NULL;
}
return OsVmPageToVaddr(page);//通过物理页找虚拟地址
return OsVmPageToVaddr(page);
}
/// 释放指定页数地址连续的物理内存
VOID LOS_PhysPagesFreeContiguous(VOID *ptr, size_t nPages)
{
UINT32 intSave;
@ -492,17 +443,17 @@ VOID LOS_PhysPagesFreeContiguous(VOID *ptr, size_t nPages)
return;
}
page = OsVmVaddrToPage(ptr);//通过虚拟地址找到页框
page = OsVmVaddrToPage(ptr);
if (page == NULL) {
VM_ERR("vm page of ptr(%#x) is null", ptr);
return;
}
page->nPages = 0;//被分配的页数置为0,表示不被分配
page->nPages = 0;
seg = &g_vmPhysSeg[page->segID];
LOS_SpinLockSave(&seg->freeListLock, &intSave);
OsVmPhysPagesFreeContiguous(page, nPages);//具体释放实现
OsVmPhysPagesFreeContiguous(page, nPages);
LOS_SpinUnlockRestore(&seg->freeListLock, intSave);
#ifdef LOSCFG_KERNEL_PLIMITS
@ -517,7 +468,7 @@ PADDR_T OsKVaddrToPaddr(VADDR_T kvaddr)
}
return (kvaddr - KERNEL_ASPACE_BASE + SYS_MEM_BASE);
}
/// 通过物理地址获取内核虚拟地址
VADDR_T *LOS_PaddrToKVaddr(PADDR_T paddr)
{
struct VmPhysSeg *seg = NULL;
@ -533,10 +484,10 @@ VADDR_T *LOS_PaddrToKVaddr(PADDR_T paddr)
return (VADDR_T *)(UINTPTR)(paddr - SYS_MEM_BASE + KERNEL_ASPACE_BASE);
}
}
//内核
return (VADDR_T *)(UINTPTR)(paddr - SYS_MEM_BASE + KERNEL_ASPACE_BASE);//
return (VADDR_T *)(UINTPTR)(paddr - SYS_MEM_BASE + KERNEL_ASPACE_BASE);
}
///释放一个物理页框
VOID LOS_PhysPageFree(LosVmPage *page)
{
UINT32 intSave;
@ -546,12 +497,12 @@ VOID LOS_PhysPageFree(LosVmPage *page)
return;
}
if (LOS_AtomicDecRet(&page->refCounts) <= 0) {//减少引用数后不能小于0
if (LOS_AtomicDecRet(&page->refCounts) <= 0) {
seg = &g_vmPhysSeg[page->segID];
LOS_SpinLockSave(&seg->freeListLock, &intSave);
OsVmPhysPagesFreeContiguous(page, ONE_PAGE);//释放一页
LOS_AtomicSet(&page->refCounts, 0);//只要物理内存被释放了,引用数就必须得重置为 0
OsVmPhysPagesFreeContiguous(page, ONE_PAGE);
LOS_AtomicSet(&page->refCounts, 0);
LOS_SpinUnlockRestore(&seg->freeListLock, intSave);
}
@ -559,22 +510,12 @@ VOID LOS_PhysPageFree(LosVmPage *page)
OsMemLimitMemFree(PAGE_SIZE);
#endif
}
/// 申请一个物理页
LosVmPage *LOS_PhysPageAlloc(VOID)
{
return OsVmPhysPagesGet(ONE_PAGE);//分配一页物理页
return OsVmPhysPagesGet(ONE_PAGE);
}
/*!
* @brief LOS_PhysPagesAlloc nPages,list
\n ,nPages
*
* @param list
* @param nPages
* @return
*
* @see
*/
size_t LOS_PhysPagesAlloc(size_t nPages, LOS_DL_LIST *list)
{
LosVmPage *page = NULL;
@ -585,17 +526,17 @@ size_t LOS_PhysPagesAlloc(size_t nPages, LOS_DL_LIST *list)
}
while (nPages--) {
page = OsVmPhysPagesGet(ONE_PAGE);//一页一页分配,由伙伴算法分配
page = OsVmPhysPagesGet(ONE_PAGE);
if (page == NULL) {
break;
}
LOS_ListTailInsert(list, &page->node);//从参数链表list尾部挂入新页面结点
LOS_ListTailInsert(list, &page->node);
count++;
}
return count;
}
///拷贝共享页面
VOID OsPhysSharePageCopy(PADDR_T oldPaddr, PADDR_T *newPaddr, LosVmPage *newPage)
{
UINT32 intSave;
@ -609,43 +550,43 @@ VOID OsPhysSharePageCopy(PADDR_T oldPaddr, PADDR_T *newPaddr, LosVmPage *newPage
return;
}
oldPage = LOS_VmPageGet(oldPaddr);//由物理地址得到页框
oldPage = LOS_VmPageGet(oldPaddr);
if (oldPage == NULL) {
VM_ERR("invalid oldPaddr %p", oldPaddr);
return;
}
seg = &g_vmPhysSeg[oldPage->segID];//拿到物理段
seg = &g_vmPhysSeg[oldPage->segID];
LOS_SpinLockSave(&seg->freeListLock, &intSave);
if (LOS_AtomicRead(&oldPage->refCounts) == 1) {//页面引用次数仅一次,说明只有一个进程在操作
*newPaddr = oldPaddr;//新老指向同一块物理地址
} else {//是个共享内存
newMem = LOS_PaddrToKVaddr(*newPaddr); //新页虚拟地址
oldMem = LOS_PaddrToKVaddr(oldPaddr); //老页虚拟地址
if (LOS_AtomicRead(&oldPage->refCounts) == 1) {
*newPaddr = oldPaddr;
} else {
newMem = LOS_PaddrToKVaddr(*newPaddr);
oldMem = LOS_PaddrToKVaddr(oldPaddr);
if ((newMem == NULL) || (oldMem == NULL)) {
LOS_SpinUnlockRestore(&seg->freeListLock, intSave);
return;
}//请记住,在保护模式下,物理地址只能用于计算,操作(包括拷贝)需要虚拟地址!
if (memcpy_s(newMem, PAGE_SIZE, oldMem, PAGE_SIZE) != EOK) {//老页内容复制给新页,需操作虚拟地址,拷贝一页数据
}
if (memcpy_s(newMem, PAGE_SIZE, oldMem, PAGE_SIZE) != EOK) {
VM_ERR("memcpy_s failed");
}
LOS_AtomicInc(&newPage->refCounts);//新页引用次数以原子方式自动减量
LOS_AtomicDec(&oldPage->refCounts);//老页引用次数以原子方式自动减量
LOS_AtomicInc(&newPage->refCounts);
LOS_AtomicDec(&oldPage->refCounts);
}
LOS_SpinUnlockRestore(&seg->freeListLock, intSave);
return;
}
///获取物理页框所在段
struct VmPhysSeg *OsVmPhysSegGet(LosVmPage *page)
{
if ((page == NULL) || (page->segID >= VM_PHYS_SEG_MAX)) {
return NULL;
}
return (OsGVmPhysSegGet() + page->segID);//等用于OsGVmPhysSegGet()[page->segID]
return (OsGVmPhysSegGet() + page->segID);
}
///获取参数nPages对应的块组,例如 7 -> 2^3 返回 3
UINT32 OsVmPagesToOrder(size_t nPages)
{
UINT32 order;
@ -654,7 +595,7 @@ UINT32 OsVmPagesToOrder(size_t nPages)
return order;
}
///释放双链表中的所有节点内存,本质是回归到伙伴orderlist中
size_t LOS_PhysPagesFree(LOS_DL_LIST *list)
{
UINT32 intSave;
@ -667,16 +608,16 @@ size_t LOS_PhysPagesFree(LOS_DL_LIST *list)
return 0;
}
LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(page, nPage, list, LosVmPage, node) {//宏循环
LOS_ListDelete(&page->node);//先把自己摘出去
if (LOS_AtomicDecRet(&page->refCounts) <= 0) {//无引用
seg = &g_vmPhysSeg[page->segID];//获取物理段
LOS_SpinLockSave(&seg->freeListLock, &intSave);//锁住freeList
OsVmPhysPagesFreeContiguous(page, ONE_PAGE);//连续释放,注意这里的ONE_PAGE其实有误导,让人以为是释放4K,其实是指连续的物理页框,如果3页连在一块是一起释放的.
LOS_AtomicSet(&page->refCounts, 0);//引用重置为0
LOS_SpinUnlockRestore(&seg->freeListLock, intSave);//恢复锁
LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(page, nPage, list, LosVmPage, node) {
LOS_ListDelete(&page->node);
if (LOS_AtomicDecRet(&page->refCounts) <= 0) {
seg = &g_vmPhysSeg[page->segID];
LOS_SpinLockSave(&seg->freeListLock, &intSave);
OsVmPhysPagesFreeContiguous(page, ONE_PAGE);
LOS_AtomicSet(&page->refCounts, 0);
LOS_SpinUnlockRestore(&seg->freeListLock, intSave);
}
count++;//继续取下一个node
count++;
}
return count;
@ -691,3 +632,4 @@ VADDR_T *LOS_PaddrToKVaddr(PADDR_T paddr)
return (VADDR_T *)DMA_TO_VMM_ADDR(paddr);
}
#endif

@ -37,10 +37,6 @@
#ifdef LOSCFG_KERNEL_VM
/* unmap a lru page by map record info caller need lru lock */
/**************************************************************************************************
(mmu)
infoMMU
**************************************************************************************************/
VOID OsUnmapPageLocked(LosFilePage *page, LosMapInfo *info)
{
if (page == NULL || info == NULL) {
@ -51,88 +47,88 @@ VOID OsUnmapPageLocked(LosFilePage *page, LosMapInfo *info)
LOS_ListDelete(&info->node);
LOS_AtomicDec(&page->vmPage->refCounts);
LOS_ArchMmuUnmap(info->archMmu, info->vaddr, 1);
LOS_MemFree(m_aucSysMem0, info);//释放虚拟
LOS_MemFree(m_aucSysMem0, info);
}
///解除文件页在所有进程的映射
VOID OsUnmapAllLocked(LosFilePage *page)
{
LosMapInfo *info = NULL;
LosMapInfo *next = NULL;
LOS_DL_LIST *immap = &page->i_mmap;
LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(info, next, immap, LosMapInfo, node) {//遍历 immap->info 链表
LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(info, next, immap, LosMapInfo, node) {
OsUnmapPageLocked(page, info);
}
}
/* add a new lru node to lru list, lruType can be file or anon */
VOID OsLruCacheAdd(LosFilePage *fpage, enum OsLruList lruType)//在lru列表中添加一个新的lru节点lruType可以是文件或匿名
VOID OsLruCacheAdd(LosFilePage *fpage, enum OsLruList lruType)
{
UINT32 intSave;
LosVmPhysSeg *physSeg = fpage->physSeg; //得到页面对应段
LosVmPage *page = fpage->vmPage; //得到物理页面
LosVmPhysSeg *physSeg = fpage->physSeg;
LosVmPage *page = fpage->vmPage;
LOS_SpinLockSave(&physSeg->lruLock, &intSave);//自旋锁:最多只能被一个内核持有CPU内核 互斥锁
OsSetPageActive(page); //设置页面为活动页
OsCleanPageReferenced(page);//清除页面被引用位
physSeg->lruSize[lruType]++; //lruType页总size++
LOS_ListTailInsert(&physSeg->lruList[lruType], &fpage->lru);//加入lruType页双循环链表中
LOS_SpinLockSave(&physSeg->lruLock, &intSave);
OsSetPageActive(page);
OsCleanPageReferenced(page);
physSeg->lruSize[lruType]++;
LOS_ListTailInsert(&physSeg->lruList[lruType], &fpage->lru);
LOS_SpinUnlockRestore(&physSeg->lruLock, intSave);//解锁
LOS_SpinUnlockRestore(&physSeg->lruLock, intSave);
}
/* dellete a lru node, caller need hold lru_lock */
VOID OsLruCacheDel(LosFilePage *fpage)//删除lru节点调用者需要拿到lru锁
/* delete a lru node, caller need hold lru_lock */
VOID OsLruCacheDel(LosFilePage *fpage)
{
LosVmPhysSeg *physSeg = fpage->physSeg; //得到页面对应段
int type = OsIsPageActive(fpage->vmPage) ? VM_LRU_ACTIVE_FILE : VM_LRU_INACTIVE_FILE;//得到页面LRU类型
LosVmPhysSeg *physSeg = fpage->physSeg;
int type = OsIsPageActive(fpage->vmPage) ? VM_LRU_ACTIVE_FILE : VM_LRU_INACTIVE_FILE;
physSeg->lruSize[type]--; //type页总size--
LOS_ListDelete(&fpage->lru);//将自己从lru链表中摘出来
physSeg->lruSize[type]--;
LOS_ListDelete(&fpage->lru);
}
///非活动文件页低于活动文件页吗
BOOL OsInactiveListIsLow(LosVmPhysSeg *physSeg)
{
return (physSeg->lruSize[VM_LRU_ACTIVE_FILE] >
physSeg->lruSize[VM_LRU_INACTIVE_FILE]) ? TRUE : FALSE;//直接对比size效率杠杠的
physSeg->lruSize[VM_LRU_INACTIVE_FILE]) ? TRUE : FALSE;
}
/* move a page from inactive list to active list head */
STATIC INLINE VOID OsMoveToActiveList(LosFilePage *fpage)//将页面从非活动列表移动到活动列表
STATIC INLINE VOID OsMoveToActiveList(LosFilePage *fpage)
{
LosVmPhysSeg *physSeg = fpage->physSeg; //得到页面对应段
LosVmPhysSeg *physSeg = fpage->physSeg;
physSeg->lruSize[VM_LRU_ACTIVE_FILE]++; //活动页总size++
physSeg->lruSize[VM_LRU_INACTIVE_FILE]--; //不活动页总size--
LOS_ListDelete(&fpage->lru); //将自己从lru链表中摘出来
LOS_ListTailInsert(&physSeg->lruList[VM_LRU_ACTIVE_FILE], &fpage->lru);//加入活动页双循环链表中
physSeg->lruSize[VM_LRU_ACTIVE_FILE]++;
physSeg->lruSize[VM_LRU_INACTIVE_FILE]--;
LOS_ListDelete(&fpage->lru);
LOS_ListTailInsert(&physSeg->lruList[VM_LRU_ACTIVE_FILE], &fpage->lru);
}
/* move a page from active list to inactive list head */
STATIC INLINE VOID OsMoveToInactiveList(LosFilePage *fpage)//将页面从活动列表移动到非活动列表
STATIC INLINE VOID OsMoveToInactiveList(LosFilePage *fpage)
{
LosVmPhysSeg *physSeg = fpage->physSeg; //得到页面对应段
LosVmPhysSeg *physSeg = fpage->physSeg;
physSeg->lruSize[VM_LRU_ACTIVE_FILE]--; //活动页总size--
physSeg->lruSize[VM_LRU_INACTIVE_FILE]++; //不活动页总size++
LOS_ListDelete(&fpage->lru); //将自己从lru链表中摘出来
LOS_ListTailInsert(&physSeg->lruList[VM_LRU_INACTIVE_FILE], &fpage->lru);//加入不活动页双循环链表中
physSeg->lruSize[VM_LRU_ACTIVE_FILE]--;
physSeg->lruSize[VM_LRU_INACTIVE_FILE]++;
LOS_ListDelete(&fpage->lru);
LOS_ListTailInsert(&physSeg->lruList[VM_LRU_INACTIVE_FILE], &fpage->lru);
}
/* move a page to the most active pos in lru list(active head) *///将页面移至lru列表中最活跃的位置
/* move a page to the most active pos in lru list(active head) */
STATIC INLINE VOID OsMoveToActiveHead(LosFilePage *fpage)
{
LosVmPhysSeg *physSeg = fpage->physSeg; //得到页面对应段
LOS_ListDelete(&fpage->lru); //将自己从lru链表中摘出来
LOS_ListTailInsert(&physSeg->lruList[VM_LRU_ACTIVE_FILE], &fpage->lru);//加入活动页双循环链表中
LosVmPhysSeg *physSeg = fpage->physSeg;
LOS_ListDelete(&fpage->lru);
LOS_ListTailInsert(&physSeg->lruList[VM_LRU_ACTIVE_FILE], &fpage->lru);
}
/* move a page to the most active pos in lru list(inactive head) */
STATIC INLINE VOID OsMoveToInactiveHead(LosFilePage *fpage)//鸿蒙会从inactive链表的尾部开始进行回收,跟linux一样
STATIC INLINE VOID OsMoveToInactiveHead(LosFilePage *fpage)
{
LosVmPhysSeg *physSeg = fpage->physSeg; //得到页面对应段
LOS_ListDelete(&fpage->lru); //将自己从lru链表中摘出来
LOS_ListTailInsert(&physSeg->lruList[VM_LRU_INACTIVE_FILE], &fpage->lru);//加入不活动页双循环链表中
LosVmPhysSeg *physSeg = fpage->physSeg;
LOS_ListDelete(&fpage->lru);
LOS_ListTailInsert(&physSeg->lruList[VM_LRU_INACTIVE_FILE], &fpage->lru);
}
/* page referced add: (call by page cache get)
@ -142,7 +138,7 @@ ref:0, act:0 --> ref:1, act:0
ref:1, act:0 --> ref:0, act:1
ref:0, act:1 --> ref:1, act:1
*/
VOID OsPageRefIncLocked(LosFilePage *fpage)// ref ,act 标签转换功能
VOID OsPageRefIncLocked(LosFilePage *fpage)
{
BOOL isOrgActive;
UINT32 intSave;
@ -152,16 +148,16 @@ VOID OsPageRefIncLocked(LosFilePage *fpage)// ref ,act 标签转换功能
return;
}
LOS_SpinLockSave(&fpage->physSeg->lruLock, &intSave);//要处理lruList,先拿锁
LOS_SpinLockSave(&fpage->physSeg->lruLock, &intSave);
page = fpage->vmPage;//拿到物理页框
isOrgActive = OsIsPageActive(page);//页面是否在活动
page = fpage->vmPage;
isOrgActive = OsIsPageActive(page);
if (OsIsPageReferenced(page) && !OsIsPageActive(page)) {//身兼 不活动和引用标签
OsCleanPageReferenced(page);//撕掉引用标签 ref:1, act:0 --> ref:0, act:1
OsSetPageActive(page); //贴上活动标签
if (OsIsPageReferenced(page) && !OsIsPageActive(page)) {
OsCleanPageReferenced(page);
OsSetPageActive(page);
} else if (!OsIsPageReferenced(page)) {
OsSetPageReferenced(page);//ref:0, act:0 --> ref:1, act:0
OsSetPageReferenced(page);
}
if (!isOrgActive && OsIsPageActive(page)) {
@ -179,14 +175,14 @@ VOID OsPageRefIncLocked(LosFilePage *fpage)// ref ,act 标签转换功能
LOS_SpinUnlockRestore(&fpage->physSeg->lruLock, intSave);
}
/* page referced dec: (call by thrinker)
/* page referced dec: (call by shrinker)
----------inactive----------|----------active------------
[ref:0,act:0], [ref:1,act:0]|[ref:0,act:1], [ref:1,act:1]
ref:1, act:1 --> ref:0, act:1
ref:0, act:1 --> ref:1, act:0
ref:1, act:0 --> ref:0, act:0
*/
VOID OsPageRefDecNoLock(LosFilePage *fpage) // ref ,act 标签转换功能
VOID OsPageRefDecNoLock(LosFilePage *fpage)
{
BOOL isOrgActive;
LosVmPage *page = NULL;
@ -198,7 +194,7 @@ VOID OsPageRefDecNoLock(LosFilePage *fpage) // ref ,act 标签转换功能
page = fpage->vmPage;
isOrgActive = OsIsPageActive(page);
if (!OsIsPageReferenced(page) && OsIsPageActive(page)) {//[ref:0,act:1]的情况
if (!OsIsPageReferenced(page) && OsIsPageActive(page)) {
OsCleanPageActive(page);
OsSetPageReferenced(page);
} else if (OsIsPageReferenced(page)) {
@ -209,39 +205,39 @@ VOID OsPageRefDecNoLock(LosFilePage *fpage) // ref ,act 标签转换功能
OsMoveToInactiveList(fpage);
}
}
///缩小活动页链表
VOID OsShrinkActiveList(LosVmPhysSeg *physSeg, int nScan)
{
LosFilePage *fpage = NULL;
LosFilePage *fnext = NULL;
LOS_DL_LIST *activeFile = &physSeg->lruList[VM_LRU_ACTIVE_FILE];
LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, activeFile, LosFilePage, lru) {//一页一页处理
if (LOS_SpinTrylock(&fpage->mapping->list_lock) != LOS_OK) {//尝试获取文件页所在的page_mapping锁
continue;//接着处理下一文件页
LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, activeFile, LosFilePage, lru) {
if (LOS_SpinTrylock(&fpage->mapping->list_lock) != LOS_OK) {
continue;
}
/* happend when caller hold cache lock and try reclaim this page *///调用方持有缓存锁并尝试回收此页时发生
if (OsIsPageLocked(fpage->vmPage)) {//页面是否被锁
LOS_SpinUnlock(&fpage->mapping->list_lock);//失败时,一定要释放page_mapping锁.
continue;//接着处理下一文件页
/* happened when caller hold cache lock and try reclaim this page */
if (OsIsPageLocked(fpage->vmPage)) {
LOS_SpinUnlock(&fpage->mapping->list_lock);
continue;
}
if (OsIsPageMapped(fpage) && (fpage->flags & VM_MAP_REGION_FLAG_PERM_EXECUTE)) {//文件页是否被映射而且是个可执行文件 ?
LOS_SpinUnlock(&fpage->mapping->list_lock);//是时,一定要释放page_mapping锁.
continue;//接着处理下一文件页
if (OsIsPageMapped(fpage) && (fpage->flags & VM_MAP_REGION_FLAG_PERM_EXECUTE)) {
LOS_SpinUnlock(&fpage->mapping->list_lock);
continue;
}
//找了可以收缩的文件页
OsPageRefDecNoLock(fpage); //将页面移到未活动文件链表
LOS_SpinUnlock(&fpage->mapping->list_lock); //释放page_mapping锁.
OsPageRefDecNoLock(fpage);
LOS_SpinUnlock(&fpage->mapping->list_lock);
if (--nScan <= 0) {
break;
}
}
}
///缩小未活动页链表
int OsShrinkInactiveList(LosVmPhysSeg *physSeg, int nScan, LOS_DL_LIST *list)
{
UINT32 nrReclaimed = 0;
@ -252,36 +248,36 @@ int OsShrinkInactiveList(LosVmPhysSeg *physSeg, int nScan, LOS_DL_LIST *list)
LosFilePage *ftemp = NULL;
LOS_DL_LIST *inactive_file = &physSeg->lruList[VM_LRU_INACTIVE_FILE];
LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, inactive_file, LosFilePage, lru) {//遍历链表一页一页处理
LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, inactive_file, LosFilePage, lru) {
flock = &fpage->mapping->list_lock;
if (LOS_SpinTrylock(flock) != LOS_OK) {//尝试获取文件页所在的page_mapping锁
continue;//接着处理下一文件页
if (LOS_SpinTrylock(flock) != LOS_OK) {
continue;
}
page = fpage->vmPage;//获取物理页框
if (OsIsPageLocked(page)) {//页面是否被锁
page = fpage->vmPage;
if (OsIsPageLocked(page)) {
LOS_SpinUnlock(flock);
continue;//接着处理下一文件页
continue;
}
if (OsIsPageMapped(fpage) && (OsIsPageDirty(page) || (fpage->flags & VM_MAP_REGION_FLAG_PERM_EXECUTE))) {
LOS_SpinUnlock(flock);//文件页是否被映射而且是个脏页获取是个可执行文件 ?
continue;//接着处理下一文件页
LOS_SpinUnlock(flock);
continue;
}
if (OsIsPageDirty(page)) {//是脏页
ftemp = OsDumpDirtyPage(fpage);//备份脏页
if (ftemp != NULL) {//备份成功了
LOS_ListTailInsert(list, &ftemp->node);//将脏页挂到参数链表上带走
if (OsIsPageDirty(page)) {
ftemp = OsDumpDirtyPage(fpage);
if (ftemp != NULL) {
LOS_ListTailInsert(list, &ftemp->node);
}
}
OsDeletePageCacheLru(fpage);//将文件页从LRU和pagecache上摘除
OsDeletePageCacheLru(fpage);
LOS_SpinUnlock(flock);
nrReclaimed++;//成功回收了一页
nrReclaimed++;
if (--nScan <= 0) {//继续回收
if (--nScan <= 0) {
break;
}
}
@ -290,48 +286,48 @@ int OsShrinkInactiveList(LosVmPhysSeg *physSeg, int nScan, LOS_DL_LIST *list)
}
#ifdef LOSCFG_FS_VFS
int OsTryShrinkMemory(size_t nPage)//尝试收缩文件页
int OsTryShrinkMemory(size_t nPage)
{
UINT32 intSave;
size_t totalPages;
size_t nReclaimed = 0;
LosVmPhysSeg *physSeg = NULL;
UINT32 index;
LOS_DL_LIST_HEAD(dirtyList);//初始化脏页链表,上面将挂所有脏页用于同步到磁盘后回收
LOS_DL_LIST_HEAD(dirtyList);
LosFilePage *fpage = NULL;
LosFilePage *fnext = NULL;
if (nPage == 0) {
nPage = VM_FILEMAP_MIN_SCAN;//
nPage = VM_FILEMAP_MIN_SCAN;
}
if (nPage > VM_FILEMAP_MAX_SCAN) {
nPage = VM_FILEMAP_MAX_SCAN;
}
for (index = 0; index < g_vmPhysSegNum; index++) {//遍历整个物理段组
physSeg = &g_vmPhysSeg[index];//一段段来
for (index = 0; index < g_vmPhysSegNum; index++) {
physSeg = &g_vmPhysSeg[index];
LOS_SpinLockSave(&physSeg->lruLock, &intSave);
totalPages = physSeg->lruSize[VM_LRU_ACTIVE_FILE] + physSeg->lruSize[VM_LRU_INACTIVE_FILE];//统计所有文件页
if (totalPages < VM_FILEMAP_MIN_SCAN) {//文件页占用内存不多的情况下,怎么处理?
totalPages = physSeg->lruSize[VM_LRU_ACTIVE_FILE] + physSeg->lruSize[VM_LRU_INACTIVE_FILE];
if (totalPages < VM_FILEMAP_MIN_SCAN) {
LOS_SpinUnlockRestore(&physSeg->lruLock, intSave);
continue;//放过这一段,找下一段
continue;
}
if (OsInactiveListIsLow(physSeg)) {
OsShrinkActiveList(physSeg, (nPage < VM_FILEMAP_MIN_SCAN) ? VM_FILEMAP_MIN_SCAN : nPage);//缩小活动页
OsShrinkActiveList(physSeg, (nPage < VM_FILEMAP_MIN_SCAN) ? VM_FILEMAP_MIN_SCAN : nPage);
}
nReclaimed += OsShrinkInactiveList(physSeg, nPage, &dirtyList);//缩小未活动页,带出脏页链表
nReclaimed += OsShrinkInactiveList(physSeg, nPage, &dirtyList);
LOS_SpinUnlockRestore(&physSeg->lruLock, intSave);
if (nReclaimed >= nPage) {//够了,够了,达到目的了.
break;//退出收缩
if (nReclaimed >= nPage) {
break;
}
}
LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, &dirtyList, LosFilePage, node) {//遍历处理脏页数据
OsDoFlushDirtyPage(fpage);//冲洗脏页数据,将脏页数据回写磁盘
LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, &dirtyList, LosFilePage, node) {
OsDoFlushDirtyPage(fpage);
}
return nReclaimed;
@ -344,4 +340,4 @@ int OsTryShrinkMemory(size_t nPage)
#endif
#endif
#endif
#endif

@ -65,7 +65,7 @@ STATUS_T OsCheckMMapParams(VADDR_T *vaddr, unsigned long flags, size_t len, unsi
return -EINVAL;
}
if ((flags & MAP_SUPPORT_MASK) == 0) {//映射权限限制
if ((flags & MAP_SUPPORT_MASK) == 0) {
return -EINVAL;
}
if (((flags & MAP_SHARED_PRIVATE) == 0) || ((flags & MAP_SHARED_PRIVATE) == MAP_SHARED_PRIVATE)) {
@ -95,54 +95,20 @@ STATUS_T OsNamedMmapingPermCheck(struct file *filep, unsigned long flags, unsign
return LOS_OK;
}
///匿名映射
STATUS_T OsAnonMMap(LosVmMapRegion *region)
{
LOS_SetRegionTypeAnon(region);
return LOS_OK;
}
/**
mmap:
.
read,write
https://www.cnblogs.com/huxiao-tee/p/4660352.html
http://abcdxyzk.github.io/blog/2015/09/11/kernel-mm-mmap/
addr NULL
length
prot 访
PROT_EXEC
PROT_READ
PROT_WRITE
PROT_NONE
flags
MAP_FIXED start
MAP_SHARED
MAP_PRIVATE copy on write
MAP_ANONYMOUSfd
MAP_DENYWRITE
MAP_LOCKED swap
fd: 使flagsMAP_ANONYMOUSfd-1
使fopen/dev/zero
offset 0offsetPAGE_SIZE
(void *)-1
*/
VADDR_T LOS_MMap(VADDR_T vaddr, size_t len, unsigned prot, unsigned long flags, int fd, unsigned long pgoff)
{
STATUS_T status;
VADDR_T resultVaddr;
UINT32 regionFlags;
LosVmMapRegion *newRegion = NULL;//应用的内存分配对应到内核就是分配一个线性区
struct file *filep = NULL;// inode : file = 1:N ,一对多关系,一个inode可以被多个进程打开,返回不同的file但都指向同一个inode
LosVmMapRegion *newRegion = NULL;
struct file *filep = NULL;
LosVmSpace *vmSpace = OsCurrProcessGet()->vmSpace;
len = ROUNDUP(len, PAGE_SIZE);
@ -150,9 +116,9 @@ VADDR_T LOS_MMap(VADDR_T vaddr, size_t len, unsigned prot, unsigned long flags,
if (checkRst != LOS_OK) {
return checkRst;
}
if (LOS_IsNamedMapping(flags)) {//是否文件映射
status = fs_getfilep(fd, &filep);//获取文件描述符和状态
if (LOS_IsNamedMapping(flags)) {
status = fs_getfilep(fd, &filep);
if (status < 0) {
return -EBADF;
}
@ -165,30 +131,30 @@ VADDR_T LOS_MMap(VADDR_T vaddr, size_t len, unsigned prot, unsigned long flags,
(VOID)LOS_MuxAcquire(&vmSpace->regionMux);
/* user mode calls mmap to release heap physical memory without releasing heap virtual space */
status = OsUserHeapFree(vmSpace, vaddr, len);//用户模式释放堆物理内存而不释放堆虚拟空间
if (status == LOS_OK) {//OsUserHeapFree 干两件事 1.解除映射关系 2.释放物理页
status = OsUserHeapFree(vmSpace, vaddr, len);
if (status == LOS_OK) {
resultVaddr = vaddr;
goto MMAP_DONE;
}
//地址不在堆区
regionFlags = OsCvtProtFlagsToRegionFlags(prot, flags);//将参数flag转换Region的flag
newRegion = LOS_RegionAlloc(vmSpace, vaddr, len, regionFlags, pgoff);//分配一个线性区
regionFlags = OsCvtProtFlagsToRegionFlags(prot, flags);
newRegion = LOS_RegionAlloc(vmSpace, vaddr, len, regionFlags, pgoff);
if (newRegion == NULL) {
resultVaddr = (VADDR_T)-ENOMEM;//ENOMEM:内存溢出
resultVaddr = (VADDR_T)-ENOMEM;
goto MMAP_DONE;
}
newRegion->regionFlags |= VM_MAP_REGION_FLAG_MMAP;
resultVaddr = newRegion->range.base;//线性区基地址为分配的地址
resultVaddr = newRegion->range.base;
if (LOS_IsNamedMapping(flags)) {
status = OsNamedMMap(filep, newRegion);//文件映射
status = OsNamedMMap(filep, newRegion);
} else {
status = OsAnonMMap(newRegion);//匿名映射
status = OsAnonMMap(newRegion);
}
if (status != LOS_OK) {
LOS_RbDelNode(&vmSpace->regionRbTree, &newRegion->rbNode);//从红黑树和双循环链表中删除
LOS_RegionFree(vmSpace, newRegion);//释放
LOS_RbDelNode(&vmSpace->regionRbTree, &newRegion->rbNode);
LOS_RegionFree(vmSpace, newRegion);
resultVaddr = (VADDR_T)-ENOMEM;
goto MMAP_DONE;
}
@ -197,7 +163,7 @@ MMAP_DONE:
(VOID)LOS_MuxRelease(&vmSpace->regionMux);
return resultVaddr;
}
///解除映射关系
STATUS_T LOS_UnMMap(VADDR_T addr, size_t size)
{
if ((addr <= 0) || (size == 0)) {
@ -206,6 +172,7 @@ STATUS_T LOS_UnMMap(VADDR_T addr, size_t size)
return OsUnMMap(OsCurrProcessGet()->vmSpace, addr, size);
}
STATIC INLINE BOOL OsProtMprotectPermCheck(unsigned long prot, LosVmMapRegion *region)
{
UINT32 protFlags = 0;
@ -219,28 +186,20 @@ STATIC INLINE BOOL OsProtMprotectPermCheck(unsigned long prot, LosVmMapRegion *r
return ((protFlags & permFlags) == protFlags);
}
/// 收缩堆区
VOID *OsShrinkHeap(VOID *addr, LosVmSpace *space)
{
VADDR_T newBrk, oldBrk;
newBrk = LOS_Align((VADDR_T)(UINTPTR)addr, PAGE_SIZE);//新堆顶
oldBrk = LOS_Align(space->heapNow, PAGE_SIZE);//旧堆顶
if (LOS_UnMMap(newBrk, (oldBrk - newBrk)) < 0) {//解除相差区的映射
return (void *)(UINTPTR)space->heapNow;//解除失败就持续现有的
newBrk = LOS_Align((VADDR_T)(UINTPTR)addr, PAGE_SIZE);
oldBrk = LOS_Align(space->heapNow, PAGE_SIZE);
if (LOS_UnMMap(newBrk, (oldBrk - newBrk)) < 0) {
return (void *)(UINTPTR)space->heapNow;
}
space->heapNow = (VADDR_T)(UINTPTR)addr;//返回新堆顶
space->heapNow = (VADDR_T)(UINTPTR)addr;
return addr;
}
/**
线线
线
线线线
线
*/
VOID *LOS_DoBrk(VOID *addr)
{
LosVmSpace *space = OsCurrProcessGet()->vmSpace;
@ -250,60 +209,60 @@ VOID *LOS_DoBrk(VOID *addr)
VOID *alignAddr = NULL;
VOID *shrinkAddr = NULL;
if (addr == NULL) {//参数地址未传情况
return (void *)(UINTPTR)space->heapNow;//以现有指向地址为基础进行扩展
if (addr == NULL) {
return (void *)(UINTPTR)space->heapNow;
}
if ((UINTPTR)addr < (UINTPTR)space->heapBase) {//heapBase是堆区的开始地址所以参数地址不能低于它
if ((UINTPTR)addr < (UINTPTR)space->heapBase) {
return (VOID *)-ENOMEM;
}
size = (UINTPTR)addr - (UINTPTR)space->heapBase;//算出大小
size = ROUNDUP(size, PAGE_SIZE); //圆整size
alignAddr = (CHAR *)(UINTPTR)(space->heapBase) + size;//得到新的线性区的结束地址
size = (UINTPTR)addr - (UINTPTR)space->heapBase;
size = ROUNDUP(size, PAGE_SIZE);
alignAddr = (CHAR *)(UINTPTR)(space->heapBase) + size;
PRINT_INFO("brk addr %p , size 0x%x, alignAddr %p, align %d\n", addr, size, alignAddr, PAGE_SIZE);
(VOID)LOS_MuxAcquire(&space->regionMux);
if (addr < (VOID *)(UINTPTR)space->heapNow) {//如果地址小于堆区现地址
shrinkAddr = OsShrinkHeap(addr, space);//收缩堆区
if (addr < (VOID *)(UINTPTR)space->heapNow) {
shrinkAddr = OsShrinkHeap(addr, space);
(VOID)LOS_MuxRelease(&space->regionMux);
return shrinkAddr;
}
if ((UINTPTR)alignAddr >= space->mapBase) {//参数地址 大于映射区地址
VM_ERR("Process heap memory space is insufficient");//进程堆空间不足
if ((UINTPTR)alignAddr >= space->mapBase) {
VM_ERR("Process heap memory space is insufficient");
ret = (VOID *)-ENOMEM;
goto REGION_ALLOC_FAILED;
}
if (space->heapBase == space->heapNow) {//往往是第一次调用本函数才会出现,因为初始化时 heapBase = heapNow
region = LOS_RegionAlloc(space, space->heapBase, size,//分配一个可读/可写/可使用的线性区,只需分配一次
VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE |//线性区的大小由range.size决定
if (space->heapBase == space->heapNow) {
region = LOS_RegionAlloc(space, space->heapBase, size,
VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE |
VM_MAP_REGION_FLAG_FIXED | VM_MAP_REGION_FLAG_PERM_USER, 0);
if (region == NULL) {
ret = (VOID *)-ENOMEM;
VM_ERR("LOS_RegionAlloc failed");
goto REGION_ALLOC_FAILED;
}
region->regionFlags |= VM_MAP_REGION_FLAG_HEAP;//贴上线性区类型为堆区的标签,注意一个线性区可以有多种标签
space->heap = region;//指定线性区为堆区
region->regionFlags |= VM_MAP_REGION_FLAG_HEAP;
space->heap = region;
}
space->heapNow = (VADDR_T)(UINTPTR)alignAddr;//更新堆区顶部位置
space->heap->range.size = size; //更新堆区大小,经此操作线性区变大或缩小了
ret = (VOID *)(UINTPTR)space->heapNow;//返回堆顶
space->heapNow = (VADDR_T)(UINTPTR)alignAddr;
space->heap->range.size = size;
ret = (VOID *)(UINTPTR)space->heapNow;
REGION_ALLOC_FAILED:
(VOID)LOS_MuxRelease(&space->regionMux);
return ret;
}
/// 继承老线性区的标签
STATIC UINT32 OsInheritOldRegionName(UINT32 oldRegionFlags)
{
UINT32 vmFlags = 0;
if (oldRegionFlags & VM_MAP_REGION_FLAG_HEAP) { //如果是从大堆区中申请的
vmFlags |= VM_MAP_REGION_FLAG_HEAP; //线性区则贴上堆区标签
if (oldRegionFlags & VM_MAP_REGION_FLAG_HEAP) {
vmFlags |= VM_MAP_REGION_FLAG_HEAP;
} else if (oldRegionFlags & VM_MAP_REGION_FLAG_STACK) {
vmFlags |= VM_MAP_REGION_FLAG_STACK;
} else if (oldRegionFlags & VM_MAP_REGION_FLAG_TEXT) {
@ -318,7 +277,7 @@ STATIC UINT32 OsInheritOldRegionName(UINT32 oldRegionFlags)
return vmFlags;
}
///修改内存段的访问权限
INT32 LOS_DoMprotect(VADDR_T vaddr, size_t len, unsigned long prot)
{
LosVmSpace *space = OsCurrProcessGet()->vmSpace;
@ -328,7 +287,7 @@ INT32 LOS_DoMprotect(VADDR_T vaddr, size_t len, unsigned long prot)
int ret;
(VOID)LOS_MuxAcquire(&space->regionMux);
region = LOS_RegionFind(space, vaddr);//通过虚拟地址找到线性区
region = LOS_RegionFind(space, vaddr);
if (!IS_ALIGNED(vaddr, PAGE_SIZE) || (region == NULL) || (vaddr > vaddr + len)) {
ret = -EINVAL;
goto OUT_MPROTECT;
@ -338,18 +297,19 @@ INT32 LOS_DoMprotect(VADDR_T vaddr, size_t len, unsigned long prot)
ret = -EINVAL;
goto OUT_MPROTECT;
}
//如果是堆区或VDSO区,说明区内容是不能修改的
if ((region->regionFlags & VM_MAP_REGION_FLAG_VDSO) || (region->regionFlags & VM_MAP_REGION_FLAG_HEAP)) {
ret = -EPERM;
goto OUT_MPROTECT;
}
//如果是共享文件,说明内容也不能修改
if (LOS_IsRegionTypeFile(region) && (region->regionFlags & VM_MAP_REGION_FLAG_SHARED)) {
if (!OsProtMprotectPermCheck(prot, region)) {
ret = -EACCES;
goto OUT_MPROTECT;
}
}
len = LOS_Align(len, PAGE_SIZE);
/* can't operation cross region */
if ((region->range.base + region->range.size) < (vaddr + len)) {
@ -358,11 +318,11 @@ INT32 LOS_DoMprotect(VADDR_T vaddr, size_t len, unsigned long prot)
}
/* if only move some part of region, we need to split first */
if (region->range.size > len) {//如果只修改部分区域,我们需要先拆分区
OsVmRegionAdjust(space, vaddr, len);//调整下线性区范围
if (region->range.size > len) {
OsVmRegionAdjust(space, vaddr, len);
}
vmFlags = OsCvtProtFlagsToRegionFlags(prot, 0);//转换FLAGS
vmFlags = OsCvtProtFlagsToRegionFlags(prot, 0);
vmFlags |= (region->regionFlags & VM_MAP_REGION_FLAG_SHARED) ? VM_MAP_REGION_FLAG_SHARED : 0;
vmFlags |= OsInheritOldRegionName(region->regionFlags);
region = LOS_RegionFind(space, vaddr);
@ -372,7 +332,7 @@ INT32 LOS_DoMprotect(VADDR_T vaddr, size_t len, unsigned long prot)
}
region->regionFlags = vmFlags;
count = len >> PAGE_SHIFT;
ret = LOS_ArchMmuChangeProt(&space->archMmu, vaddr, count, region->regionFlags);//修改访问权限实体函数
ret = LOS_ArchMmuChangeProt(&space->archMmu, vaddr, count, region->regionFlags);
if (ret) {
ret = -ENOMEM;
goto OUT_MPROTECT;
@ -427,7 +387,7 @@ STATUS_T OsMremapCheck(VADDR_T addr, size_t oldLen, VADDR_T newAddr, size_t newL
}
}
/* avoid new region overlaping with the old one */
/* avoid new region overlapping with the old one */
if (flags & MREMAP_FIXED) {
if (((region->range.base + region->range.size) > newAddr) &&
(region->range.base < (newAddr + newLen))) {
@ -441,7 +401,7 @@ STATUS_T OsMremapCheck(VADDR_T addr, size_t oldLen, VADDR_T newAddr, size_t newL
return LOS_OK;
}
///重新映射虚拟内存地址。
VADDR_T LOS_DoMremap(VADDR_T oldAddress, size_t oldSize, size_t newSize, int flags, VADDR_T newAddr)
{
LosVmMapRegion *regionOld = NULL;
@ -536,7 +496,7 @@ OUT_MREMAP:
(VOID)LOS_MuxRelease(&space->regionMux);
return ret;
}
///输出内存线性区
VOID LOS_DumpMemRegion(VADDR_T vaddr)
{
LosVmSpace *space = NULL;
@ -546,11 +506,12 @@ VOID LOS_DumpMemRegion(VADDR_T vaddr)
return;
}
if (LOS_IsRangeInSpace(space, ROUNDDOWN(vaddr, MB), MB) == FALSE) {//是否在空间范围内
if (LOS_IsRangeInSpace(space, ROUNDDOWN(vaddr, MB), MB) == FALSE) {
return;
}
OsDumpPte(vaddr);//dump L1 L2
OsDumpAspace(space);//dump 空间
OsDumpPte(vaddr);
OsDumpAspace(space);
}
#endif
#endif

@ -47,8 +47,8 @@
#ifdef LOSCFG_KERNEL_VM
LITE_OS_SEC_BSS OomCB *g_oomCB = NULL; //全局内存溢出控制块
static SPIN_LOCK_INIT(g_oomSpinLock);//内存溢出自旋锁
LITE_OS_SEC_BSS OomCB *g_oomCB = NULL;
static SPIN_LOCK_INIT(g_oomSpinLock);
LITE_OS_SEC_TEXT_MINOR STATIC UINT32 OomScoreProcess(LosProcessCB *candidateProcess)
{
@ -57,20 +57,20 @@ LITE_OS_SEC_TEXT_MINOR STATIC UINT32 OomScoreProcess(LosProcessCB *candidateProc
#ifndef LOSCFG_KERNEL_SMP
(VOID)LOS_MuxAcquire(&candidateProcess->vmSpace->regionMux);
#endif
/* we only consider actual physical memory here. */ //只考虑实际的物理内存
/* we only consider actual physical memory here. */
OsUProcessPmUsage(candidateProcess->vmSpace, NULL, &actualPm);
#ifndef LOSCFG_KERNEL_SMP
(VOID)LOS_MuxRelease(&candidateProcess->vmSpace->regionMux);
#endif
return actualPm;
}
///用于设置 g_oomCB->processVictimCB 回调函数
LITE_OS_SEC_TEXT_MINOR STATIC UINT32 OomKillProcess(UINTPTR param)
{
/* we will not kill process, and do nothing here */
return LOS_OK;
}
///强制收缩内存
LITE_OS_SEC_TEXT_MINOR STATIC UINT32 OomForceShrinkMemory(VOID)
{
UINT32 i;
@ -80,14 +80,13 @@ LITE_OS_SEC_TEXT_MINOR STATIC UINT32 OomForceShrinkMemory(VOID)
* TryShrinkMemory maybe reclaim 0 pages in the first time from active list
* to inactive list, and in the second time reclaim memory from inactive list.
*/
//TryShrinkMemory可能会在第一时间从活动列表中回收0页到非活动列表并在第二次从非活动列表中回收内存。
for (i = 0; i < MAX_SHRINK_PAGECACHE_TRY; i++) {
reclaimMemPages += OsTryShrinkMemory(0);
}
return reclaimMemPages;
}
///内存不足时回收页高速缓存
LITE_OS_SEC_TEXT_MINOR STATIC BOOL OomReclaimPageCache(VOID)
{
UINT32 totalPm = 0;
@ -97,44 +96,43 @@ LITE_OS_SEC_TEXT_MINOR STATIC BOOL OomReclaimPageCache(VOID)
UINT32 i;
for (i = 0; i < MAX_SHRINK_PAGECACHE_TRY; i++) {
OsVmPhysUsedInfoGet(&usedPm, &totalPm);//获取总的和已经使用的物理内存数量
isReclaimMemory = ((totalPm - usedPm) << PAGE_SHIFT) < g_oomCB->reclaimMemThreshold;//检查是否过了回收门槛
if (isReclaimMemory) {//要回收了
OsVmPhysUsedInfoGet(&usedPm, &totalPm);
isReclaimMemory = ((totalPm - usedPm) << PAGE_SHIFT) < g_oomCB->reclaimMemThreshold;
if (isReclaimMemory) {
/*
* we do force memory reclaim from page cache here.
* if we get memory, we will reclaim pagecache memory again.
* if there is no memory to reclaim, we will return.
*/
//在这里强制从页缓存中回收内存,
reclaimMemPages = OomForceShrinkMemory();//强制回收内存
if (reclaimMemPages > 0) {//如果得到内存将再次回收pagecache内存
reclaimMemPages = OomForceShrinkMemory();
if (reclaimMemPages > 0) {
continue;
}
}
break;//实在没有内存可回收
break;
}
return isReclaimMemory;//返回回收的数量
return isReclaimMemory;
}
/*
* check is low memory or not, if low memory, try to kill process.
* return is kill process or not.
* check is low memory or not, if low memory, try to kill process.
* return is kill process or not.
*/
LITE_OS_SEC_TEXT_MINOR BOOL OomCheckProcess(VOID)//检查内存是否不足,如果内存不足,请尝试终止进程,返回是否kill进程
LITE_OS_SEC_TEXT_MINOR BOOL OomCheckProcess(VOID)
{
UINT32 totalPm;
UINT32 usedPm;
BOOL isLowMemory = FALSE;
/*
* spinlock the current core schedule, make sure oom process atomic //旋转锁定当前核心计划确保oom进程原子化
* spinlock other place entering OomCheckProcess, make sure oom process mutex //旋转锁定其他进入OomCheckProcess的地方确保oom进程互斥
* spinlock the current core schedule, make sure oom process atomic
* spinlock other place entering OomCheckProcess, make sure oom process mutex
*/
LOS_SpinLock(&g_oomSpinLock);
/* first we will check if we need to reclaim pagecache memory */
if (OomReclaimPageCache() == FALSE) {//
if (OomReclaimPageCache() == FALSE) {
LOS_SpinUnlock(&g_oomSpinLock);
goto NO_VICTIM_PROCESS;
}
@ -142,7 +140,9 @@ LITE_OS_SEC_TEXT_MINOR BOOL OomCheckProcess(VOID)//检查内存是否不足,
/* get free bytes */
OsVmPhysUsedInfoGet(&usedPm, &totalPm);
isLowMemory = ((totalPm - usedPm) << PAGE_SHIFT) < g_oomCB->lowMemThreshold;
LOS_SpinUnlock(&g_oomSpinLock);
if (isLowMemory) {
PRINTK("[oom] OS is in low memory state\n"
"total physical memory: %#x(byte), used: %#x(byte),"
@ -155,14 +155,14 @@ NO_VICTIM_PROCESS:
return isLowMemory;
}
#ifdef LOSCFG_ENABLE_OOM_LOOP_TASK //内存溢出监测任务开关
STATIC VOID OomWriteEvent(VOID) // OomTaskInit中创建的定时器回调
#ifdef LOSCFG_ENABLE_OOM_LOOP_TASK
STATIC VOID OomWriteEvent(VOID)
{
OsWriteResourceEvent(OS_RESOURCE_EVENT_OOM);//广播内存溢出事件
OsWriteResourceEvent(OS_RESOURCE_EVENT_OOM);
}
#endif
//打印内存不足时的信息
LITE_OS_SEC_TEXT_MINOR VOID OomInfodump(VOID) //打印内存溢出信息
LITE_OS_SEC_TEXT_MINOR VOID OomInfodump(VOID)
{
PRINTK("[oom] oom loop task status: %s\n"
" oom low memory threshold: %#x(byte)\n"
@ -172,7 +172,7 @@ LITE_OS_SEC_TEXT_MINOR VOID OomInfodump(VOID) //打印内存溢出信息
g_oomCB->lowMemThreshold, g_oomCB->reclaimMemThreshold,
g_oomCB->checkInterval);
}
///设置低内存门槛
LITE_OS_SEC_TEXT_MINOR VOID OomSetLowMemThreashold(UINT32 lowMemThreshold)
{
if ((lowMemThreshold > OOM_DEFAULT_LOW_MEM_THRESHOLD_MAX)) {
@ -186,7 +186,7 @@ LITE_OS_SEC_TEXT_MINOR VOID OomSetLowMemThreashold(UINT32 lowMemThreshold)
g_oomCB->lowMemThreshold);
}
}
///设置回收内存的门槛
LITE_OS_SEC_TEXT_MINOR VOID OomSetReclaimMemThreashold(UINT32 reclaimMemThreshold)
{
UINT32 totalPm = 0;
@ -204,7 +204,7 @@ LITE_OS_SEC_TEXT_MINOR VOID OomSetReclaimMemThreashold(UINT32 reclaimMemThreshol
g_oomCB->reclaimMemThreshold);
}
}
///设置监控间隔
LITE_OS_SEC_TEXT_MINOR VOID OomSetCheckInterval(UINT32 checkInterval)
{
if ((checkInterval >= OOM_CHECK_MIN) && (checkInterval <= OOM_CHECK_MAX)) {
@ -216,7 +216,7 @@ LITE_OS_SEC_TEXT_MINOR VOID OomSetCheckInterval(UINT32 checkInterval)
g_oomCB->checkInterval, OOM_CHECK_MIN, OOM_CHECK_MAX);
}
}
///内存不足监控任务初始化, OOM 通过开一个软件定时器来检查内存的使用情况
LITE_OS_SEC_TEXT_MINOR UINT32 OomTaskInit(VOID)
{
g_oomCB = (OomCB *)LOS_MemAlloc(m_aucSysMem0, sizeof(OomCB));
@ -225,27 +225,28 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OomTaskInit(VOID)
return LOS_NOK;
}
g_oomCB->lowMemThreshold = OOM_DEFAULT_LOW_MEM_THRESHOLD; //运行任务的门槛
g_oomCB->reclaimMemThreshold = OOM_DEFAULT_RECLAIM_MEM_THRESHOLD; //回收内存的门槛
g_oomCB->checkInterval = OOM_DEFAULT_CHECK_INTERVAL; //检测时间间隔 1S
g_oomCB->processVictimCB = (OomFn)OomKillProcess; //出问题时对进程的处理函数
g_oomCB->scoreCB = (OomFn)OomScoreProcess; //统计进程占用的物理内存
g_oomCB->enabled = FALSE; //是否启用监控
g_oomCB->lowMemThreshold = OOM_DEFAULT_LOW_MEM_THRESHOLD;
g_oomCB->reclaimMemThreshold = OOM_DEFAULT_RECLAIM_MEM_THRESHOLD;
g_oomCB->checkInterval = OOM_DEFAULT_CHECK_INTERVAL;
g_oomCB->processVictimCB = (OomFn)OomKillProcess;
g_oomCB->scoreCB = (OomFn)OomScoreProcess;
g_oomCB->enabled = FALSE;
#ifdef LOSCFG_ENABLE_OOM_LOOP_TASK //内存溢出检测开关
#ifdef LOSCFG_ENABLE_OOM_LOOP_TASK
g_oomCB->enabled = TRUE;
UINT32 ret = LOS_SwtmrCreate(g_oomCB->checkInterval, LOS_SWTMR_MODE_PERIOD, (SWTMR_PROC_FUNC)OomWriteEvent,
&g_oomCB->swtmrID, (UINTPTR)g_oomCB);//创建检测定时器
&g_oomCB->swtmrID, (UINTPTR)g_oomCB);
if (ret != LOS_OK) {
return ret;
}
return LOS_SwtmrStart(g_oomCB->swtmrID);//启动定时器
return LOS_SwtmrStart(g_oomCB->swtmrID);
#else
return LOS_OK;
#endif
}
LOS_MODULE_INIT(OomTaskInit, LOS_INIT_LEVEL_KMOD_TASK);//初始化内存监控模块
LOS_MODULE_INIT(OomTaskInit, LOS_INIT_LEVEL_KMOD_TASK);
#endif
#endif

@ -1,17 +1,3 @@
/*!
访
访C
malloc()访
访
线,使线.
*/
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved.
@ -69,9 +55,9 @@
#ifdef LOSCFG_KERNEL_SHM
#define SHM_SEG_FREE 0x2000 //空闲未使用
#define SHM_SEG_USED 0x4000 //已使用
#define SHM_SEG_REMOVE 0x8000 //删除
#define SHM_SEG_FREE 0x2000
#define SHM_SEG_USED 0x4000
#define SHM_SEG_REMOVE 0x8000
#ifndef SHM_M
#define SHM_M 010000
@ -80,17 +66,21 @@
#ifndef SHM_X
#define SHM_X 0100
#endif
#ifndef ACCESSPERMS
#define ACCESSPERMS (S_IRWXU | S_IRWXG | S_IRWXO)//文件权限值意思就是 用户,用户组,其他可读可写.
#endif //代表含义U:user G:group O:other
#define ACCESSPERMS (S_IRWXU | S_IRWXG | S_IRWXO)
#endif
#define SHM_S_IRUGO (S_IRUSR | S_IRGRP | S_IROTH)
#define SHM_S_IWUGO (S_IWUSR | S_IWGRP | S_IWOTH)
#define SHM_S_IXUGO (S_IXUSR | S_IXGRP | S_IXOTH)
#define SHM_GROUPE_TO_USER 3
#define SHM_OTHER_TO_USER 6
#ifndef LOSCFG_IPC_CONTAINER
STATIC LosMux g_sysvShmMux;
/* private data */
STATIC struct shminfo g_shmInfo;
STATIC struct shmIDSource *g_shmSegs = NULL;
@ -106,60 +96,6 @@ STATIC UINT32 g_shmUsedPageCount;
#define SYSV_SHM_LOCK() (VOID)LOS_MuxLock(&IPC_SHM_SYS_VSHM_MUTEX, LOS_WAIT_FOREVER)
#define SYSV_SHM_UNLOCK() (VOID)LOS_MuxUnlock(&IPC_SHM_SYS_VSHM_MUTEX)
#if 0 // @note_#if0
//内核为每一个IPC对象保存一个ipc_perm结构体该结构说明了IPC对象的权限和所有者
struct ipc_perm {
key_t __ipc_perm_key; //调用shmget()时给出的关键字
uid_t uid; //共享内存所有者的有效用户ID
gid_t gid; //共享内存所有者所属组的有效组ID
uid_t cuid; //共享内存创建 者的有效用户ID
gid_t cgid; //共享内存创建者所属组的有效组ID
mode_t mode; //权限 + SHM_DEST / SHM_LOCKED /SHM_HUGETLB 标志位
int __ipc_perm_seq; //序列号
long __pad1; //保留扩展用
long __pad2;
};
//每个共享内存段在内核中维护着一个内部结构shmid_ds
struct shmid_ds {
struct ipc_perm shm_perm;///< 操作许可里面包含共享内存的用户ID、组ID等信息
size_t shm_segsz; ///< 共享内存段的大小,单位为字节
time_t shm_atime; ///< 最后一个进程访问共享内存的时间
time_t shm_dtime; ///< 最后一个进程离开共享内存的时间
time_t shm_ctime; ///< 创建时间
pid_t shm_cpid; ///< 创建共享内存的进程ID
pid_t shm_lpid; ///< 最后操作共享内存的进程ID
unsigned long shm_nattch; ///< 当前使用该共享内存段的进程数量
unsigned long __pad1; //保留扩展用
unsigned long __pad2;
};
// 共享内存模块设置信息
struct shminfo {
unsigned long shmmax, shmmin, shmmni, shmseg, shmall, __unused[4];
};
struct shmIDSource {//共享内存描述符
struct shmid_ds ds; //是内核为每一个共享内存段维护的数据结构
UINT32 status; //状态 SHM_SEG_FREE ...
LOS_DL_LIST node; //节点,挂VmPage
#ifdef LOSCFG_SHELL
CHAR ownerName[OS_PCB_NAME_LEN];
#endif
};
/* private data */
STATIC struct shminfo g_shmInfo = { //描述共享内存范围的全局变量
.shmmax = SHM_MAX,//共享内存单个上限 4096页 即 16M
.shmmin = SHM_MIN,//共享内存单个下限 1页 即:4K
.shmmni = SHM_MNI,//共享内存总数 默认192
.shmseg = SHM_SEG,//每个用户进程可以使用的最多的共享内存段的数目 128
.shmall = SHM_ALL,//系统范围内共享内存的总页数, 4096页
};
STATIC struct shmIDSource *g_shmSegs = NULL;
STATIC UINT32 g_shmUsedPageCount;
#endif
//共享内存初始化
struct shmIDSource *OsShmCBInit(LosMux *sysvShmMux, struct shminfo *shmInfo, UINT32 *shmUsedPageCount)
{
UINT32 ret;
@ -168,6 +104,7 @@ struct shmIDSource *OsShmCBInit(LosMux *sysvShmMux, struct shminfo *shmInfo, UIN
if ((sysvShmMux == NULL) || (shmInfo == NULL) || (shmUsedPageCount == NULL)) {
return NULL;
}
ret = LOS_MuxInit(sysvShmMux, NULL);
if (ret != LOS_OK) {
goto ERROR;
@ -178,6 +115,7 @@ struct shmIDSource *OsShmCBInit(LosMux *sysvShmMux, struct shminfo *shmInfo, UIN
shmInfo->shmmni = SHM_MNI;
shmInfo->shmseg = SHM_SEG;
shmInfo->shmall = SHM_ALL;
struct shmIDSource *shmSegs = LOS_MemAlloc((VOID *)OS_SYS_MEM_ADDR, sizeof(struct shmIDSource) * shmInfo->shmmni);
if (shmSegs == NULL) {
(VOID)LOS_MuxDestroy(sysvShmMux);
@ -187,9 +125,9 @@ struct shmIDSource *OsShmCBInit(LosMux *sysvShmMux, struct shminfo *shmInfo, UIN
0, (sizeof(struct shmIDSource) * shmInfo->shmmni));
for (i = 0; i < shmInfo->shmmni; i++) {
shmSegs[i].status = SHM_SEG_FREE;//节点初始状态为空闲
shmSegs[i].ds.shm_perm.seq = i + 1;//struct ipc_perm shm_perm;系统为每一个IPC对象保存一个ipc_perm结构体,结构说明了IPC对象的权限和所有者
LOS_ListInit(&shmSegs[i].node);//初始化节点
shmSegs[i].status = SHM_SEG_FREE;
shmSegs[i].ds.shm_perm.seq = i + 1;
LOS_ListInit(&shmSegs[i].node);
}
*shmUsedPageCount = 0;
@ -199,6 +137,7 @@ ERROR:
VM_ERR("ShmInit fail\n");
return NULL;
}
UINT32 ShmInit(VOID)
{
#ifndef LOSCFG_IPC_CONTAINER
@ -210,9 +149,8 @@ UINT32 ShmInit(VOID)
return LOS_OK;
}
LOS_MODULE_INIT(ShmInit, LOS_INIT_LEVEL_VM_COMPLETE);
LOS_MODULE_INIT(ShmInit, LOS_INIT_LEVEL_VM_COMPLETE);//共享内存模块初始化
//共享内存反初始化
UINT32 ShmDeinit(VOID)
{
UINT32 ret;
@ -227,7 +165,7 @@ UINT32 ShmDeinit(VOID)
return 0;
}
///给共享段中所有物理页框贴上共享标签
STATIC inline VOID ShmSetSharedFlag(struct shmIDSource *seg)
{
LosVmPage *page = NULL;
@ -236,7 +174,7 @@ STATIC inline VOID ShmSetSharedFlag(struct shmIDSource *seg)
OsSetPageShared(page);
}
}
///给共享段中所有物理页框撕掉共享标签
STATIC inline VOID ShmClearSharedFlag(struct shmIDSource *seg)
{
LosVmPage *page = NULL;
@ -245,7 +183,7 @@ STATIC inline VOID ShmClearSharedFlag(struct shmIDSource *seg)
OsCleanPageShared(page);
}
}
///seg下所有共享页引用减少
STATIC VOID ShmPagesRefDec(struct shmIDSource *seg)
{
LosVmPage *page = NULL;
@ -255,15 +193,6 @@ STATIC VOID ShmPagesRefDec(struct shmIDSource *seg)
}
}
/**
* @brief
:size = 4097, LOS_Align(size, PAGE_SIZE) = 8192
size >> PAGE_SHIFT = 2
* @param key
* @param size
* @param shmflg
* @return STATIC
*/
STATIC INT32 ShmAllocSegCheck(key_t key, size_t *size, INT32 *segNum)
{
INT32 i;
@ -272,7 +201,7 @@ STATIC INT32 ShmAllocSegCheck(key_t key, size_t *size, INT32 *segNum)
return -EINVAL;
}
*size = LOS_Align(*size, PAGE_SIZE);//必须对齐
*size = LOS_Align(*size, PAGE_SIZE);
if ((IPC_SHM_USED_PAGE_COUNT + (*size >> PAGE_SHIFT)) > IPC_SHM_INFO.shmall) {
return -ENOMEM;
}
@ -282,10 +211,11 @@ STATIC INT32 ShmAllocSegCheck(key_t key, size_t *size, INT32 *segNum)
return -ENOMEM;
}
#endif
for (i = 0; i < IPC_SHM_INFO.shmmni; i++) {//试图找到一个空闲段与参数key绑定
if (IPC_SHM_SEGS[i].status & SHM_SEG_FREE) {//找到空闲段
IPC_SHM_SEGS[i].status &= ~SHM_SEG_FREE;//变成非空闲状态
*segNum = i;//标号
for (i = 0; i < IPC_SHM_INFO.shmmni; i++) {
if (IPC_SHM_SEGS[i].status & SHM_SEG_FREE) {
IPC_SHM_SEGS[i].status &= ~SHM_SEG_FREE;
*segNum = i;
break;
}
}
@ -306,47 +236,49 @@ STATIC INT32 ShmAllocSeg(key_t key, size_t size, INT32 shmflg)
if (ret < 0) {
return ret;
}
seg = &IPC_SHM_SEGS[segNum];
count = LOS_PhysPagesAlloc(size >> PAGE_SHIFT, &seg->node);//分配共享页面,函数内部把node都挂好了.
if (count != (size >> PAGE_SHIFT)) {//当未分配到足够的内存时,处理方式是:不稀罕给那么点,舍弃!
(VOID)LOS_PhysPagesFree(&seg->node);//释放节点上的物理页框
seg->status = SHM_SEG_FREE;//共享段变回空闲状态
count = LOS_PhysPagesAlloc(size >> PAGE_SHIFT, &seg->node);
if (count != (size >> PAGE_SHIFT)) {
(VOID)LOS_PhysPagesFree(&seg->node);
seg->status = SHM_SEG_FREE;
#ifdef LOSCFG_KERNEL_IPC_PLIMIT
OsIPCLimitShmFree(size);
#endif
return -ENOMEM;
}
ShmSetSharedFlag(seg);//将node的每个页面设置为共享页
ShmSetSharedFlag(seg);
IPC_SHM_USED_PAGE_COUNT += size >> PAGE_SHIFT;
seg->status |= SHM_SEG_USED; //共享段贴上已在使用的标签
seg->status |= SHM_SEG_USED;
seg->ds.shm_perm.mode = (UINT32)shmflg & ACCESSPERMS;
seg->ds.shm_perm.key = key;//保存参数key,如此 key 和 共享ID绑定在一块
seg->ds.shm_segsz = size; //共享段的大小
seg->ds.shm_perm.cuid = LOS_GetUserID(); //设置用户ID
seg->ds.shm_perm.uid = LOS_GetUserID(); //设置用户ID
seg->ds.shm_perm.cgid = LOS_GetGroupID(); //设置组ID
seg->ds.shm_perm.gid = LOS_GetGroupID(); //设置组ID
seg->ds.shm_lpid = 0; //最后一个操作的进程
seg->ds.shm_nattch = 0; //绑定进程的数量
seg->ds.shm_cpid = LOS_GetCurrProcessID(); //获取进程ID
seg->ds.shm_atime = 0; //访问时间
seg->ds.shm_dtime = 0; //detach 分离时间 共享内存使用完之后,需要将它从进程地址空间中分离出来;将共享内存分离并不是删除它,只是使该共享内存对当前的进程不再可用
seg->ds.shm_ctime = time(NULL);//创建时间
seg->ds.shm_perm.key = key;
seg->ds.shm_segsz = size;
seg->ds.shm_perm.cuid = LOS_GetUserID();
seg->ds.shm_perm.uid = LOS_GetUserID();
seg->ds.shm_perm.cgid = LOS_GetGroupID();
seg->ds.shm_perm.gid = LOS_GetGroupID();
seg->ds.shm_lpid = 0;
seg->ds.shm_nattch = 0;
seg->ds.shm_cpid = LOS_GetCurrProcessID();
seg->ds.shm_atime = 0;
seg->ds.shm_dtime = 0;
seg->ds.shm_ctime = time(NULL);
#ifdef LOSCFG_SHELL
(VOID)memcpy_s(seg->ownerName, OS_PCB_NAME_LEN, OsCurrProcessGet()->processName, OS_PCB_NAME_LEN);
#endif
return segNum;
}
///释放seg->node 所占物理页框,seg本身重置
STATIC INLINE VOID ShmFreeSeg(struct shmIDSource *seg, UINT32 *shmUsedPageCount)
{
UINT32 count;
ShmClearSharedFlag(seg);//先撕掉 seg->node 中vmpage的共享标签
count = LOS_PhysPagesFree(&seg->node);//再挨个删除物理页框
if (count != (seg->ds.shm_segsz >> PAGE_SHIFT)) {//异常,必须要一样
ShmClearSharedFlag(seg);
count = LOS_PhysPagesFree(&seg->node);
if (count != (seg->ds.shm_segsz >> PAGE_SHIFT)) {
VM_ERR("free physical pages failed, count = %d, size = %d", count, seg->ds.shm_segsz >> PAGE_SHIFT);
return;
}
@ -356,31 +288,31 @@ STATIC INLINE VOID ShmFreeSeg(struct shmIDSource *seg, UINT32 *shmUsedPageCount)
if (shmUsedPageCount != NULL) {
(*shmUsedPageCount) -= seg->ds.shm_segsz >> PAGE_SHIFT;
}
seg->status = SHM_SEG_FREE;//seg恢复自由之身
LOS_ListInit(&seg->node);//重置node
seg->status = SHM_SEG_FREE;
LOS_ListInit(&seg->node);
}
///通过key查找 shmId
STATIC INT32 ShmFindSegByKey(key_t key)
{
INT32 i;
struct shmIDSource *seg = NULL;
for (i = 0; i < IPC_SHM_INFO.shmmni; i++) {//遍历共享段池,找到与key绑定的共享ID
for (i = 0; i < IPC_SHM_INFO.shmmni; i++) {
seg = &IPC_SHM_SEGS[i];
if ((seg->status & SHM_SEG_USED) &&
(seg->ds.shm_perm.key == key)) {//满足两个条件,找到后返回
(seg->ds.shm_perm.key == key)) {
return i;
}
}
return -1;
}
///共享内存段有效性检查
STATIC INT32 ShmSegValidCheck(INT32 segNum, size_t size, INT32 shmFlg)
{
struct shmIDSource *seg = &IPC_SHM_SEGS[segNum];//拿到shmID
struct shmIDSource *seg = &IPC_SHM_SEGS[segNum];
if (size > seg->ds.shm_segsz) {//段长
if (size > seg->ds.shm_segsz) {
return -EINVAL;
}
@ -391,7 +323,7 @@ STATIC INT32 ShmSegValidCheck(INT32 segNum, size_t size, INT32 shmFlg)
return segNum;
}
///通过ID找到共享内存资源
STATIC struct shmIDSource *ShmFindSeg(int shmid)
{
struct shmIDSource *seg = NULL;
@ -409,7 +341,7 @@ STATIC struct shmIDSource *ShmFindSeg(int shmid)
return seg;
}
///共享内存映射
STATIC VOID ShmVmmMapping(LosVmSpace *space, LOS_DL_LIST *pageList, VADDR_T vaddr, UINT32 regionFlags)
{
LosVmPage *vmPage = NULL;
@ -417,64 +349,64 @@ STATIC VOID ShmVmmMapping(LosVmSpace *space, LOS_DL_LIST *pageList, VADDR_T vadd
PADDR_T pa;
STATUS_T ret;
LOS_DL_LIST_FOR_EACH_ENTRY(vmPage, pageList, LosVmPage, node) {//遍历一页一页映射
pa = VM_PAGE_TO_PHYS(vmPage);//拿到物理地址
LOS_AtomicInc(&vmPage->refCounts);//自增
ret = LOS_ArchMmuMap(&space->archMmu, va, pa, 1, regionFlags);//虚实映射
LOS_DL_LIST_FOR_EACH_ENTRY(vmPage, pageList, LosVmPage, node) {
pa = VM_PAGE_TO_PHYS(vmPage);
LOS_AtomicInc(&vmPage->refCounts);
ret = LOS_ArchMmuMap(&space->archMmu, va, pa, 1, regionFlags);
if (ret != 1) {
VM_ERR("LOS_ArchMmuMap failed, ret = %d", ret);
}
va += PAGE_SIZE;
}
}
///fork 一个共享线性区
VOID OsShmFork(LosVmSpace *space, LosVmMapRegion *oldRegion, LosVmMapRegion *newRegion)
{
struct shmIDSource *seg = NULL;
SYSV_SHM_LOCK();
seg = ShmFindSeg(oldRegion->shmid);//通过老区ID获取对应的共享资源ID结构体
seg = ShmFindSeg(oldRegion->shmid);
if (seg == NULL) {
SYSV_SHM_UNLOCK();
VM_ERR("shm fork failed!");
return;
}
newRegion->shmid = oldRegion->shmid;//一样的共享区ID
newRegion->forkFlags = oldRegion->forkFlags;//forkFlags也一样了
ShmVmmMapping(space, &seg->node, newRegion->range.base, newRegion->regionFlags);//新线性区与共享内存进行映射
seg->ds.shm_nattch++;//附在共享线性区上的进程数++
newRegion->shmid = oldRegion->shmid;
newRegion->forkFlags = oldRegion->forkFlags;
ShmVmmMapping(space, &seg->node, newRegion->range.base, newRegion->regionFlags);
seg->ds.shm_nattch++;
SYSV_SHM_UNLOCK();
}
///释放共享线性区
VOID OsShmRegionFree(LosVmSpace *space, LosVmMapRegion *region)
{
struct shmIDSource *seg = NULL;
SYSV_SHM_LOCK();
seg = ShmFindSeg(region->shmid);//通过线性区ID获取对应的共享资源ID结构体
seg = ShmFindSeg(region->shmid);
if (seg == NULL) {
SYSV_SHM_UNLOCK();
return;
}
LOS_ArchMmuUnmap(&space->archMmu, region->range.base, region->range.size >> PAGE_SHIFT);//解除线性区的映射
ShmPagesRefDec(seg);//ref --
seg->ds.shm_nattch--;//附在共享线性区上的进程数--
LOS_ArchMmuUnmap(&space->archMmu, region->range.base, region->range.size >> PAGE_SHIFT);
ShmPagesRefDec(seg);
seg->ds.shm_nattch--;
if (seg->ds.shm_nattch <= 0 && (seg->status & SHM_SEG_REMOVE)) {
ShmFreeSeg(seg, &IPC_SHM_USED_PAGE_COUNT);//就释放掉物理内存!注意是:物理内存
ShmFreeSeg(seg, &IPC_SHM_USED_PAGE_COUNT);
} else {
seg->ds.shm_dtime = time(NULL);
seg->ds.shm_lpid = LOS_GetCurrProcessID(); /* may not be the space's PID. */
}
SYSV_SHM_UNLOCK();
}
///是否为共享线性区,是否有标签?
BOOL OsIsShmRegion(LosVmMapRegion *region)
{
return (region->regionFlags & VM_MAP_REGION_FLAG_SHM) ? TRUE : FALSE;
}
///获取共享内存池中已被使用的段数量
STATIC INT32 ShmSegUsedCount(VOID)
{
INT32 i;
@ -483,16 +415,16 @@ STATIC INT32 ShmSegUsedCount(VOID)
for (i = 0; i < IPC_SHM_INFO.shmmni; i++) {
seg = &IPC_SHM_SEGS[i];
if (seg->status & SHM_SEG_USED) {//找到一个
if (seg->status & SHM_SEG_USED) {
count++;
}
}
return count;
}
///对共享内存段权限检查
STATIC INT32 ShmPermCheck(struct shmIDSource *seg, mode_t mode)
{
INT32 uid = LOS_GetUserID();//当前进程的用户ID
INT32 uid = LOS_GetUserID();
UINT32 tmpMode = 0;
mode_t privMode = seg->ds.shm_perm.mode;
mode_t accMode;
@ -534,22 +466,6 @@ STATIC INT32 ShmPermCheck(struct shmIDSource *seg, mode_t mode)
}
}
/*!
* @brief ShmGet
*
* @param key IPC使IPC
IPCkey,IPCmsggetsemgetshmget
IPC, key_tftok,ftok,.
* @param shmflg IPC_CREAT IPC_EXCL
IPC_CREAT IPCkeyIPC_PRIVATEIPCflagIPC_CREAT
IPCIPCIPC_CREATIPC_CREAT
IPC_EXCL IPC_CREAT使IPCIPC
EEXISTopenO_CREATO_EXCL
* @param size
* @return
*
* @see
*/
INT32 ShmGet(key_t key, size_t size, INT32 shmflg)
{
INT32 ret;
@ -560,13 +476,13 @@ INT32 ShmGet(key_t key, size_t size, INT32 shmflg)
if (key == IPC_PRIVATE) {
ret = ShmAllocSeg(key, size, shmflg);
} else {
ret = ShmFindSegByKey(key);//通过key查找资源ID
ret = ShmFindSegByKey(key);
if (ret < 0) {
if (((UINT32)shmflg & IPC_CREAT) == 0) {//
if (((UINT32)shmflg & IPC_CREAT) == 0) {
ret = -ENOENT;
goto ERROR;
} else {
ret = ShmAllocSeg(key, size, shmflg);//分配一个共享内存
ret = ShmAllocSeg(key, size, shmflg);
}
} else {
shmid = ret;
@ -575,7 +491,7 @@ INT32 ShmGet(key_t key, size_t size, INT32 shmflg)
ret = -EEXIST;
goto ERROR;
}
ret = ShmPermCheck(ShmFindSeg(shmid), (UINT32)shmflg & ACCESSPERMS);//对共享内存权限检查
ret = ShmPermCheck(ShmFindSeg(shmid), (UINT32)shmflg & ACCESSPERMS);
if (ret != 0) {
ret = -ret;
goto ERROR;
@ -610,13 +526,13 @@ INT32 ShmatParamCheck(const VOID *shmaddr, INT32 shmflg)
return 0;
}
///分配一个共享线性区并映射好
LosVmMapRegion *ShmatVmmAlloc(struct shmIDSource *seg, const VOID *shmaddr,
INT32 shmflg, UINT32 prot)
{
LosVmSpace *space = OsCurrProcessGet()->vmSpace;
LosVmMapRegion *region = NULL;
UINT32 flags = MAP_ANONYMOUS | MAP_SHARED;//本线性区为共享+匿名标签
UINT32 flags = MAP_ANONYMOUS | MAP_SHARED;
UINT32 mapFlags = flags | MAP_FIXED;
VADDR_T vaddr;
UINT32 regionFlags;
@ -627,29 +543,29 @@ LosVmMapRegion *ShmatVmmAlloc(struct shmIDSource *seg, const VOID *shmaddr,
}
regionFlags = OsCvtProtFlagsToRegionFlags(prot, flags);
(VOID)LOS_MuxAcquire(&space->regionMux);
if (shmaddr == NULL) {//未指定了共享内存连接到当前进程中的地址位置
region = LOS_RegionAlloc(space, 0, seg->ds.shm_segsz, regionFlags, 0);//分配线性区
} else {//指定时,就需要先找地址所在的线性区
if (shmaddr == NULL) {
region = LOS_RegionAlloc(space, 0, seg->ds.shm_segsz, regionFlags, 0);
} else {
if ((UINT32)shmflg & SHM_RND) {
vaddr = ROUNDDOWN((VADDR_T)(UINTPTR)shmaddr, SHMLBA);
} else {
vaddr = (VADDR_T)(UINTPTR)shmaddr;
}//找到线性区并重新映射,当指定地址时需贴上重新映射的标签
}
if (!((UINT32)shmflg & SHM_REMAP) && (LOS_RegionFind(space, vaddr) ||
LOS_RegionFind(space, vaddr + seg->ds.shm_segsz - 1) ||
LOS_RegionRangeFind(space, vaddr, seg->ds.shm_segsz - 1))) {
ret = EINVAL;
goto ERROR;
}
vaddr = (VADDR_T)LOS_MMap(vaddr, seg->ds.shm_segsz, prot, mapFlags, -1, 0);//做好映射
region = LOS_RegionFind(space, vaddr);//重新查找线性区,用于返回.
vaddr = (VADDR_T)LOS_MMap(vaddr, seg->ds.shm_segsz, prot, mapFlags, -1, 0);
region = LOS_RegionFind(space, vaddr);
}
if (region == NULL) {
ret = ENOMEM;
goto ERROR;
}
ShmVmmMapping(space, &seg->node, region->range.base, regionFlags);//共享内存映射
ShmVmmMapping(space, &seg->node, region->range.base, regionFlags);
(VOID)LOS_MuxRelease(&space->regionMux);
return region;
ERROR:
@ -658,17 +574,6 @@ ERROR:
return NULL;
}
/*!
* @brief ShmAt
* 访
* @param shm_flg 0
* @param shmaddr
* @param shmid shmget()
* @return
* shmat使shmid_dsshm_nattch1
shmid ,线ID g_shmSegs[shmid] shmid > 192
* @see
*/
VOID *ShmAt(INT32 shmid, const VOID *shmaddr, INT32 shmflg)
{
INT32 ret;
@ -677,13 +582,13 @@ VOID *ShmAt(INT32 shmid, const VOID *shmaddr, INT32 shmflg)
struct shmIDSource *seg = NULL;
LosVmMapRegion *r = NULL;
ret = ShmatParamCheck(shmaddr, shmflg);//参数检查
ret = ShmatParamCheck(shmaddr, shmflg);
if (ret != 0) {
set_errno(ret);
return (VOID *)-1;
}
if ((UINT32)shmflg & SHM_EXEC) {//flag 转换
if ((UINT32)shmflg & SHM_EXEC) {
prot |= PROT_EXEC;
acc_mode |= SHM_S_IXUGO;
} else if (((UINT32)shmflg & SHM_RDONLY) == 0) {
@ -692,7 +597,7 @@ VOID *ShmAt(INT32 shmid, const VOID *shmaddr, INT32 shmflg)
}
SYSV_SHM_LOCK();
seg = ShmFindSeg(shmid);//找到段
seg = ShmFindSeg(shmid);
if (seg == NULL) {
SYSV_SHM_UNLOCK();
return (VOID *)-1;
@ -703,18 +608,18 @@ VOID *ShmAt(INT32 shmid, const VOID *shmaddr, INT32 shmflg)
goto ERROR;
}
seg->ds.shm_nattch++;//ds上记录有一个进程绑定上来
r = ShmatVmmAlloc(seg, shmaddr, shmflg, prot);//在当前进程空间分配一个线性区并映射到共享内存
seg->ds.shm_nattch++;
r = ShmatVmmAlloc(seg, shmaddr, shmflg, prot);
if (r == NULL) {
seg->ds.shm_nattch--;
SYSV_SHM_UNLOCK();
return (VOID *)-1;
}
r->shmid = shmid;//把ID给线性区的shmid
r->regionFlags |= VM_MAP_REGION_FLAG_SHM;//这是一个共享线性区
seg->ds.shm_atime = time(NULL);//访问时间
seg->ds.shm_lpid = LOS_GetCurrProcessID();//进程ID
r->shmid = shmid;
r->regionFlags |= VM_MAP_REGION_FLAG_SHM;
seg->ds.shm_atime = time(NULL);
seg->ds.shm_lpid = LOS_GetCurrProcessID();
SYSV_SHM_UNLOCK();
return (VOID *)(UINTPTR)r->range.base;
@ -725,19 +630,6 @@ ERROR:
return (VOID *)-1;
}
/*!
* @brief ShmCtl
* shmid
* @param buf 访
* @param cmd command
IPC_STATshmid_dsshmid_ds
IPC_SETshmid_ds
IPC_RMID
* @param shmid shmget()
* @return
*
* @see
*/
INT32 ShmCtl(INT32 shmid, INT32 cmd, struct shmid_ds *buf)
{
struct shmIDSource *seg = NULL;
@ -750,7 +642,7 @@ INT32 ShmCtl(INT32 shmid, INT32 cmd, struct shmid_ds *buf)
SYSV_SHM_LOCK();
if ((cmd != IPC_INFO) && (cmd != SHM_INFO)) {
seg = ShmFindSeg(shmid);//通过索引ID找到seg
seg = ShmFindSeg(shmid);
if (seg == NULL) {
SYSV_SHM_UNLOCK();
return -1;
@ -764,13 +656,13 @@ INT32 ShmCtl(INT32 shmid, INT32 cmd, struct shmid_ds *buf)
switch (cmd) {
case IPC_STAT:
case SHM_STAT://取段结构
case SHM_STAT:
ret = ShmPermCheck(seg, SHM_S_IRUGO);
if (ret != 0) {
goto ERROR;
}
ret = LOS_ArchCopyToUser(buf, &seg->ds, sizeof(struct shmid_ds));//把内核空间的共享页数据拷贝到用户空间
ret = LOS_ArchCopyToUser(buf, &seg->ds, sizeof(struct shmid_ds));
if (ret != 0) {
ret = EFAULT;
goto ERROR;
@ -779,13 +671,13 @@ INT32 ShmCtl(INT32 shmid, INT32 cmd, struct shmid_ds *buf)
ret = (unsigned int)((unsigned int)seg->ds.shm_perm.seq << 16) | (unsigned int)((unsigned int)shmid & 0xffff); /* 16: use the seq as the upper 16 bits */
}
break;
case IPC_SET://重置共享段
case IPC_SET:
ret = ShmPermCheck(seg, SHM_M);
if (ret != 0) {
ret = EPERM;
goto ERROR;
}
//从用户空间拷贝数据到内核空间
ret = LOS_ArchCopyFromUser(&shm_perm, &buf->shm_perm, sizeof(struct ipc_perm));
if (ret != 0) {
ret = EFAULT;
@ -794,14 +686,14 @@ INT32 ShmCtl(INT32 shmid, INT32 cmd, struct shmid_ds *buf)
seg->ds.shm_perm.uid = shm_perm.uid;
seg->ds.shm_perm.gid = shm_perm.gid;
seg->ds.shm_perm.mode = (seg->ds.shm_perm.mode & ~ACCESSPERMS) |
(shm_perm.mode & ACCESSPERMS);//可访问
(shm_perm.mode & ACCESSPERMS);
seg->ds.shm_ctime = time(NULL);
#ifdef LOSCFG_SHELL
(VOID)memcpy_s(seg->ownerName, OS_PCB_NAME_LEN, OS_PCB_FROM_PID(shm_perm.uid)->processName,
OS_PCB_NAME_LEN);
#endif
break;
case IPC_RMID://删除共享段
case IPC_RMID:
ret = ShmPermCheck(seg, SHM_M);
if (ret != 0) {
ret = EPERM;
@ -809,11 +701,11 @@ INT32 ShmCtl(INT32 shmid, INT32 cmd, struct shmid_ds *buf)
}
seg->status |= SHM_SEG_REMOVE;
if (seg->ds.shm_nattch <= 0) {//没有任何进程在使用了
if (seg->ds.shm_nattch <= 0) {
ShmFreeSeg(seg, &IPC_SHM_USED_PAGE_COUNT);
}
break;
case IPC_INFO://把内核空间的共享页数据拷贝到用户空间
case IPC_INFO:
ret = LOS_ArchCopyToUser(buf, &IPC_SHM_INFO, sizeof(struct shminfo));
if (ret != 0) {
ret = EFAULT;
@ -827,8 +719,8 @@ INT32 ShmCtl(INT32 shmid, INT32 cmd, struct shmid_ds *buf)
shmInfo.shm_tot = 0;
shmInfo.swap_attempts = 0;
shmInfo.swap_successes = 0;
shmInfo.used_ids = ShmSegUsedCount();//在使用的seg数
ret = LOS_ArchCopyToUser(buf, &shmInfo, sizeof(struct shm_info));//把内核空间的共享页数据拷贝到用户空间
shmInfo.used_ids = ShmSegUsedCount();
ret = LOS_ArchCopyToUser(buf, &shmInfo, sizeof(struct shm_info));
if (ret != 0) {
ret = EFAULT;
goto ERROR;
@ -851,63 +743,55 @@ ERROR:
return -1;
}
/**
* @brief shmdt
shmat使shmid_dsshm_nattch1
* @attention
IPC_RMIDshmctl
* @param shmaddr
* @return INT32
*/
INT32 ShmDt(const VOID *shmaddr)
{
LosVmSpace *space = OsCurrProcessGet()->vmSpace;//获取进程空间
LosVmSpace *space = OsCurrProcessGet()->vmSpace;
struct shmIDSource *seg = NULL;
LosVmMapRegion *region = NULL;
INT32 shmid;
INT32 ret;
if (IS_PAGE_ALIGNED(shmaddr) == 0) {//地址是否对齐
if (IS_PAGE_ALIGNED(shmaddr) == 0) {
ret = EINVAL;
goto ERROR;
}
(VOID)LOS_MuxAcquire(&space->regionMux);
region = LOS_RegionFind(space, (VADDR_T)(UINTPTR)shmaddr);//找到线性区
region = LOS_RegionFind(space, (VADDR_T)(UINTPTR)shmaddr);
if (region == NULL) {
ret = EINVAL;
goto ERROR_WITH_LOCK;
}
shmid = region->shmid;//线性区共享ID
shmid = region->shmid;
if (region->range.base != (VADDR_T)(UINTPTR)shmaddr) {//这是用户空间和内核空间的一次解绑
ret = EINVAL; //shmaddr 必须要等于region->range.base
if (region->range.base != (VADDR_T)(UINTPTR)shmaddr) {
ret = EINVAL;
goto ERROR_WITH_LOCK;
}
/* remove it from aspace */
LOS_RbDelNode(&space->regionRbTree, &region->rbNode);//从红黑树和链表中摘除节点
LOS_ArchMmuUnmap(&space->archMmu, region->range.base, region->range.size >> PAGE_SHIFT);//解除线性区的映射
LOS_RbDelNode(&space->regionRbTree, &region->rbNode);
LOS_ArchMmuUnmap(&space->archMmu, region->range.base, region->range.size >> PAGE_SHIFT);
(VOID)LOS_MuxRelease(&space->regionMux);
/* free it */
free(region);//释放线性区所占内存池中的内存
free(region);
SYSV_SHM_LOCK();
seg = ShmFindSeg(shmid);//找到seg,线性区和共享段的关系是 1:N 的关系,其他空间的线性区也会绑在共享段上
seg = ShmFindSeg(shmid);
if (seg == NULL) {
ret = EINVAL;
SYSV_SHM_UNLOCK();
goto ERROR;
}
ShmPagesRefDec(seg);//页面引用数 --
seg->ds.shm_nattch--;//使用共享内存的进程数少了一个
if ((seg->ds.shm_nattch <= 0) && //无任何进程使用共享内存
(seg->status & SHM_SEG_REMOVE)) {//状态为删除时需要释放物理页内存了,否则其他进程还要继续使用共享内存
ShmFreeSeg(seg, &IPC_SHM_USED_PAGE_COUNT);//释放seg 页框链表中的页框内存,再重置seg状态
ShmPagesRefDec(seg);
seg->ds.shm_nattch--;
if ((seg->ds.shm_nattch <= 0) &&
(seg->status & SHM_SEG_REMOVE)) {
ShmFreeSeg(seg, &IPC_SHM_USED_PAGE_COUNT);
} else {
seg->ds.shm_dtime = time(NULL);//记录分离的时间
seg->ds.shm_lpid = LOS_GetCurrProcessID();//记录操作进程ID
seg->ds.shm_dtime = time(NULL);
seg->ds.shm_lpid = LOS_GetCurrProcessID();
}
SYSV_SHM_UNLOCK();
@ -963,6 +847,7 @@ STATIC VOID OsShmInfoCmd(VOID)
}
SYSV_SHM_UNLOCK();
}
STATIC VOID OsShmDeleteCmd(INT32 shmid)
{
struct shmIDSource *seg = NULL;
@ -991,7 +876,7 @@ STATIC VOID OsShmCmdUsage(VOID)
"\t-r [shmid], Recycle the specified shared memory about shmid\n"
"\t-h | --help, print shm command usage\n");
}
///共享内存
UINT32 OsShellCmdShm(INT32 argc, const CHAR *argv[])
{
INT32 shmid;
@ -1026,3 +911,4 @@ DONE:
SHELLCMD_ENTRY(shm_shellcmd, CMD_TYPE_SHOW, "shm", 2, (CmdCallBackFunc)OsShellCmdShm);
#endif
#endif

Loading…
Cancel
Save