diff --git a/src/kernel_liteos_a/kernel/base/core/los_bitmap.c b/src/kernel_liteos_a/kernel/base/core/los_bitmap.c
index 4e8b8600..039b6637 100644
--- a/src/kernel_liteos_a/kernel/base/core/los_bitmap.c
+++ b/src/kernel_liteos_a/kernel/base/core/los_bitmap.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2013-2019, Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
@@ -31,54 +31,78 @@
#include "los_bitmap.h"
#include "los_printf.h"
-#include "los_toolchain.h"
-
-
-#define OS_BITMAP_MASK 0x1FU
+#include "los_toolchain.h" //GCC 编译器的内置函数
+
+/**
+ * @brief
+ * @verbatim
+ 基本概念
+ 位操作是指对二进制数的bit位进行操作。程序可以设置某一变量为状态字,状态字中的
+ 每一bit位(标志位)可以具有自定义的含义。
+
+ 使用场景
+ 系统提供标志位的置1和清0操作,可以改变标志位的内容,同时还提供获取状态字中标志位
+ 为1的最高位和最低位的功能。用户也可以对系统的寄存器进行位操作。
+
+ 参考
+ https://www.geeksforgeeks.org/builtin-functions-gcc-compiler/
+ * @endverbatim
+ */
+#define OS_BITMAP_MASK 0x1FU //
#define OS_BITMAP_WORD_MASK ~0UL
-/* find first zero bit starting from LSB */
+/*! find first zero bit starting from LSB */
STATIC INLINE UINT16 Ffz(UINTPTR x)
-{
- return __builtin_ffsl(~x) - 1;
+{//__builtin_ffsl: 返回右起第一个1的位置,函数来自 glibc
+ return __builtin_ffsl(~x) - 1;//从LSB开始查找第一个零位 LSB(最低有效位) 对应 最高有效位(MSB)
}
-
+///对状态字的某一标志位进行置1操作
VOID LOS_BitmapSet(UINT32 *bitmap, UINT16 pos)
{
if (bitmap == NULL) {
return;
}
- *bitmap |= 1U << (pos & OS_BITMAP_MASK);
+ *bitmap |= 1U << (pos & OS_BITMAP_MASK);//在对应位上置1
}
-
+///对状态字的某一标志位进行清0操作
VOID LOS_BitmapClr(UINT32 *bitmap, UINT16 pos)
{
if (bitmap == NULL) {
return;
}
- *bitmap &= ~(1U << (pos & OS_BITMAP_MASK));
+ *bitmap &= ~(1U << (pos & OS_BITMAP_MASK));//在对应位上置0
}
+/**
+ * @brief 获取参数位图中最高位为1的索引位 例如: 00110110 返回 5
+ * @verbatim
+ CLZ 用于计算操作数最高端0的个数,这条指令主要用于以下两个场合
+ 1.计算操作数规范化(使其最高位为1)时需要左移的位数
+ 2.确定一个优先级掩码中最高优先级
+ * @endverbatim
+ * @param bitmap
+ * @return UINT16
+ */
UINT16 LOS_HighBitGet(UINT32 bitmap)
{
if (bitmap == 0) {
return LOS_INVALID_BIT_INDEX;
}
- return (OS_BITMAP_MASK - CLZ(bitmap));
+ return (OS_BITMAP_MASK - CLZ(bitmap));//CLZ = count leading zeros 用于计算整数的前导零
}
-
+/// 获取参数位图中最低位为1的索引位, 例如: 00110110 返回 1
UINT16 LOS_LowBitGet(UINT32 bitmap)
{
if (bitmap == 0) {
return LOS_INVALID_BIT_INDEX;
}
- return CTZ(bitmap);
+ return CTZ(bitmap);// CTZ = count trailing zeros 用于计算给定整数的尾随零
}
-
+/// 从start位置开始设置numsSet个bit位 置1
VOID LOS_BitmapSetNBits(UINTPTR *bitmap, UINT32 start, UINT32 numsSet)
{
UINTPTR *p = bitmap + BITMAP_WORD(start);
@@ -98,7 +122,7 @@ VOID LOS_BitmapSetNBits(UINTPTR *bitmap, UINT32 start, UINT32 numsSet)
*p |= maskToSet;
}
}
-
+///从start位置开始 清除numsSet个bit位置0 ,对状态字的连续标志位进行清0操作
VOID LOS_BitmapClrNBits(UINTPTR *bitmap, UINT32 start, UINT32 numsClear)
{
UINTPTR *p = bitmap + BITMAP_WORD(start);
@@ -118,7 +142,7 @@ VOID LOS_BitmapClrNBits(UINTPTR *bitmap, UINT32 start, UINT32 numsClear)
*p &= ~maskToClear;
}
}
-
+///从numBits位置开始找到第一个0位
INT32 LOS_BitmapFfz(UINTPTR *bitmap, UINT32 numBits)
{
INT32 bit, i;
diff --git a/src/kernel_liteos_a/kernel/base/core/los_info.c b/src/kernel_liteos_a/kernel/base/core/los_info.c
index f03bac81..85c79be3 100644
--- a/src/kernel_liteos_a/kernel/base/core/los_info.c
+++ b/src/kernel_liteos_a/kernel/base/core/los_info.c
@@ -31,15 +31,15 @@
#include "los_info_pri.h"
#include "los_task_pri.h"
#include "los_vm_dump.h"
-
+//获取当前进程的父进程ID
STATIC UINT32 GetCurrParentPid(UINT32 pid, const LosProcessCB *processCB)
{
if (processCB->parentProcess == NULL) {
return 0;
}
-#ifdef LOSCFG_PID_CONTAINER
- if (pid == OS_USER_ROOT_PROCESS_ID) {
+#ifdef LOSCFG_PID_CONTAINER //从容器中获取
+ if (pid == OS_USER_ROOT_PROCESS_ID) {//从这里可以看出 0号进程(kidle)是,1,2号进程的父进程
return 0;
}
@@ -49,7 +49,7 @@ STATIC UINT32 GetCurrParentPid(UINT32 pid, const LosProcessCB *processCB)
#endif
return processCB->parentProcess->processID;
}
-
+//获取当前任务ID
STATIC INLINE UINT32 GetCurrTid(const LosTaskCB *taskCB)
{
#ifdef LOSCFG_PID_CONTAINER
diff --git a/src/kernel_liteos_a/kernel/base/core/los_process.c b/src/kernel_liteos_a/kernel/base/core/los_process.c
index 724ecf66..632190a6 100644
--- a/src/kernel_liteos_a/kernel/base/core/los_process.c
+++ b/src/kernel_liteos_a/kernel/base/core/los_process.c
@@ -1,6 +1,25 @@
+/*!
+ * @file los_process.c
+ * @brief 进程模块主文件
+ * @link
+ @verbatim
+
+ 并发(Concurrent):多个线程在单个核心运行,同一时间只能一个线程运行,内核不停切换线程,
+ 看起来像同时运行,实际上是线程不停切换
+ 并行(Parallel)每个线程分配给独立的CPU核心,线程同时运行
+ 单核CPU多个进程或多个线程内能实现并发(微观上的串行,宏观上的并行)
+ 多核CPU线程间可以实现宏观和微观上的并行
+ LITE_OS_SEC_BSS 和 LITE_OS_SEC_DATA_INIT 是告诉编译器这些全局变量放在哪个数据段
+
+ @endverbatim
+ * @version
+ * @author weharmonyos.com | 鸿蒙研究站 | 每天死磕一点点
+ * @date 2021-12-15
+ */
+
/*
- * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
- * Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved.
+ * Copyright (c) 2013-2019, Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -59,26 +78,33 @@
#include "los_vm_phys.h"
#include "los_vm_syscall.h"
-LITE_OS_SEC_BSS LosProcessCB *g_processCBArray = NULL;
-LITE_OS_SEC_DATA_INIT STATIC LOS_DL_LIST g_freeProcess;
-LITE_OS_SEC_DATA_INIT STATIC LOS_DL_LIST g_processRecycleList;
-LITE_OS_SEC_BSS UINT32 g_processMaxNum;
+LITE_OS_SEC_BSS LosProcessCB *g_processCBArray = NULL; ///< 进程池数组
+LITE_OS_SEC_DATA_INIT STATIC LOS_DL_LIST g_freeProcess;///< 空闲状态下的进程链表, .个人觉得应该取名为 g_freeProcessList @note_thinking
+LITE_OS_SEC_DATA_INIT STATIC LOS_DL_LIST g_processRecycleList;///< 需要回收的进程列表
+LITE_OS_SEC_BSS UINT32 g_processMaxNum;///< 进程最大数量,默认64个
#ifndef LOSCFG_PID_CONTAINER
-LITE_OS_SEC_BSS ProcessGroup *g_processGroup = NULL;
+LITE_OS_SEC_BSS ProcessGroup *g_processGroup = NULL;///< 全局进程组,负责管理所有进程组
#define OS_ROOT_PGRP(processCB) (g_processGroup)
#endif
-
+/*
+ * @brief 将进程插入到空闲链表中
+ * @details
+ * @param argc 1
+ * @param[LosProcessCB] processCB 指定进程
+ * @return 函数执行结果
+ * - VOID 无
+*/
STATIC INLINE VOID OsInsertPCBToFreeList(LosProcessCB *processCB)
{
#ifdef LOSCFG_PID_CONTAINER
OsPidContainerDestroy(processCB->container, processCB);
#endif
- UINT32 pid = processCB->processID;
- (VOID)memset_s(processCB, sizeof(LosProcessCB), 0, sizeof(LosProcessCB));
- processCB->processID = pid;
- processCB->processStatus = OS_PROCESS_FLAG_UNUSED;
- processCB->timerID = (timer_t)(UINTPTR)MAX_INVALID_TIMER_VID;
- LOS_ListTailInsert(&g_freeProcess, &processCB->pendList);
+ UINT32 pid = processCB->processID;//获取进程ID
+ (VOID)memset_s(processCB, sizeof(LosProcessCB), 0, sizeof(LosProcessCB));//进程描述符数据清0
+ processCB->processID = pid;//进程ID
+ processCB->processStatus = OS_PROCESS_FLAG_UNUSED;//设置为进程未使用
+ processCB->timerID = (timer_t)(UINTPTR)MAX_INVALID_TIMER_VID;//timeID初始化值
+ LOS_ListTailInsert(&g_freeProcess, &processCB->pendList);//进程节点挂入g_freeProcess以分配给后续进程使用
}
VOID OsDeleteTaskFromProcess(LosTaskCB *taskCB)
@@ -132,21 +158,28 @@ UINT32 OsProcessAddNewTask(UINTPTR processID, LosTaskCB *taskCB, SchedParam *par
SCHEDULER_UNLOCK(intSave);
return LOS_OK;
}
-
+/**
+ * @brief 创建进程组
+ * @details
+ * @param argc 1
+ * @param[UINT32] pid 进程ID
+ * @return 函数执行结果
+ * - ProcessGroup 返回进程组
+*/
ProcessGroup *OsCreateProcessGroup(LosProcessCB *processCB)
{
- ProcessGroup *pgroup = LOS_MemAlloc(m_aucSysMem1, sizeof(ProcessGroup));
+ ProcessGroup *pgroup = LOS_MemAlloc(m_aucSysMem1, sizeof(ProcessGroup));//分配一个进程组
if (pgroup == NULL) {
return NULL;
}
- pgroup->pgroupLeader = (UINTPTR)processCB;
- LOS_ListInit(&pgroup->processList);
- LOS_ListInit(&pgroup->exitProcessList);
+ pgroup->pgroupLeader = (UINTPTR)processCB;//指定进程组负责人
+ LOS_ListInit(&pgroup->processList);//初始化组员链表
+ LOS_ListInit(&pgroup->exitProcessList);//初始化僵死进程链表
LOS_ListTailInsert(&pgroup->processList, &processCB->subordinateGroupList);
processCB->pgroup = pgroup;
- processCB->processStatus |= OS_PROCESS_FLAG_GROUP_LEADER;
+ processCB->processStatus |= OS_PROCESS_FLAG_GROUP_LEADER;//进程状态贴上当老大的标签
ProcessGroup *rootPGroup = OS_ROOT_PGRP(processCB);
if (rootPGroup == NULL) {
@@ -158,29 +191,32 @@ ProcessGroup *OsCreateProcessGroup(LosProcessCB *processCB)
return pgroup;
}
+/*! 退出进程组,参数是进程地址和进程组地址的地址 */
STATIC VOID ExitProcessGroup(LosProcessCB *processCB, ProcessGroup **pgroup)
{
LosProcessCB *pgroupCB = OS_GET_PGROUP_LEADER(processCB->pgroup);
- LOS_ListDelete(&processCB->subordinateGroupList);
- if (LOS_ListEmpty(&processCB->pgroup->processList) && LOS_ListEmpty(&processCB->pgroup->exitProcessList)) {
+
+ LOS_ListDelete(&processCB->subordinateGroupList);//从进程组进程链表上摘出去
+ if (LOS_ListEmpty(&processCB->pgroup->processList) && LOS_ListEmpty(&processCB->pgroup->exitProcessList)) {//进程组进程链表和退出进程链表都为空时
#ifdef LOSCFG_PID_CONTAINER
if (processCB->pgroup != OS_ROOT_PGRP(processCB)) {
#endif
- LOS_ListDelete(&processCB->pgroup->groupList);
- *pgroup = processCB->pgroup;
+ LOS_ListDelete(&processCB->pgroup->groupList);//从全局进程组链表上把自己摘出去 记住它是 LOS_ListTailInsert(&g_processGroup->groupList, &group->groupList) 挂上去的
+ *pgroup = processCB->pgroup;//????? 这步操作没看明白,谁能告诉我为何要这么做?
#ifdef LOSCFG_PID_CONTAINER
}
#endif
pgroupCB->processStatus &= ~OS_PROCESS_FLAG_GROUP_LEADER;
- if (OsProcessIsUnused(pgroupCB) && !(pgroupCB->processStatus & OS_PROCESS_FLAG_EXIT)) {
- LOS_ListDelete(&pgroupCB->pendList);
- OsInsertPCBToFreeList(pgroupCB);
+ if (OsProcessIsUnused(pgroupCB) && !(pgroupCB->processStatus & OS_PROCESS_FLAG_EXIT)) {//组长进程时退出的标签
+ LOS_ListDelete(&pgroupCB->pendList);//进程从全局进程链表上摘除
+ OsInsertPCBToFreeList(pgroupCB);//释放进程的资源,回到freelist再利用
}
}
processCB->pgroup = NULL;
}
+/*! 通过指定组ID找到进程组 */
STATIC ProcessGroup *OsFindProcessGroup(UINT32 gid)
{
ProcessGroup *pgroup = NULL;
@@ -201,6 +237,7 @@ STATIC ProcessGroup *OsFindProcessGroup(UINT32 gid)
return NULL;
}
+/*! 给指定进程组发送信号 */
STATIC INT32 OsSendSignalToSpecifyProcessGroup(ProcessGroup *pgroup, siginfo_t *info, INT32 permission)
{
INT32 ret, success, err;
@@ -294,15 +331,17 @@ STATIC LosProcessCB *OsFindExitChildProcess(const LosProcessCB *processCB, const
return NULL;
}
+/*! 唤醒等待wakePID结束的任务 */
VOID OsWaitWakeTask(LosTaskCB *taskCB, UINTPTR wakePID)
{
taskCB->waitID = wakePID;
taskCB->ops->wake(taskCB);
#ifdef LOSCFG_KERNEL_SMP
- LOS_MpSchedule(OS_MP_CPU_ALL);
+ LOS_MpSchedule(OS_MP_CPU_ALL);//向所有cpu发送调度指令
#endif
}
+/*! 唤醒等待参数进程结束的任务 */
STATIC BOOL OsWaitWakeSpecifiedProcess(LOS_DL_LIST *head, const LosProcessCB *processCB, LOS_DL_LIST **anyList)
{
LOS_DL_LIST *list = head;
@@ -310,17 +349,17 @@ STATIC BOOL OsWaitWakeSpecifiedProcess(LOS_DL_LIST *head, const LosProcessCB *pr
UINTPTR processID = 0;
BOOL find = FALSE;
- while (list->pstNext != head) {
- taskCB = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(list));
+ while (list->pstNext != head) {//遍历等待链表 processCB->waitList
+ taskCB = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(list));//一个一个来
if ((taskCB->waitFlag == OS_PROCESS_WAIT_PRO) && (taskCB->waitID == (UINTPTR)processCB)) {
if (processID == 0) {
processID = taskCB->waitID;
- find = TRUE;
- } else {
+ find = TRUE;//找到了
+ } else {// @note_thinking 这个代码有点多余吧,会执行到吗?
processID = OS_INVALID_VALUE;
}
- OsWaitWakeTask(taskCB, processID);
+ OsWaitWakeTask(taskCB, processID);//唤醒这个任务,此时会切到 LOS_Wait runTask->waitFlag = 0;处运行
continue;
}
@@ -334,6 +373,7 @@ STATIC BOOL OsWaitWakeSpecifiedProcess(LOS_DL_LIST *head, const LosProcessCB *pr
return find;
}
+/*! 检查父进程的等待任务并唤醒父进程去处理等待任务 */
STATIC VOID OsWaitCheckAndWakeParentProcess(LosProcessCB *parentCB, const LosProcessCB *processCB)
{
LOS_DL_LIST *head = &parentCB->waitList;
@@ -341,30 +381,30 @@ STATIC VOID OsWaitCheckAndWakeParentProcess(LosProcessCB *parentCB, const LosPro
LosTaskCB *taskCB = NULL;
BOOL findSpecified = FALSE;
- if (LOS_ListEmpty(&parentCB->waitList)) {
- return;
+ if (LOS_ListEmpty(&parentCB->waitList)) {//父进程中是否有在等待子进程退出的任务?
+ return;//没有就退出
}
-
- findSpecified = OsWaitWakeSpecifiedProcess(head, processCB, &list);
+ // TODO
+ findSpecified = OsWaitWakeSpecifiedProcess(head, processCB, &list);//找到指定的任务
if (findSpecified == TRUE) {
/* No thread is waiting for any child process to finish */
- if (LOS_ListEmpty(&parentCB->waitList)) {
- return;
+ if (LOS_ListEmpty(&parentCB->waitList)) {//没有线程正在等待任何子进程结束
+ return;//已经处理完了,注意在OsWaitWakeSpecifiedProcess中做了频繁的任务切换
} else if (!LOS_ListEmpty(&parentCB->childrenList)) {
/* Other child processes exist, and other threads that are waiting
* for the child to finish continue to wait
- */
+ *///存在其他子进程,正在等待它们的子进程结束而将继续等待
return;
}
}
/* Waiting threads are waiting for a specified child process to finish */
- if (list == NULL) {
+ if (list == NULL) {//等待线程正在等待指定的子进程结束
return;
}
/* No child processes exist and all waiting threads are awakened */
- if (findSpecified == TRUE) {
+ if (findSpecified == TRUE) {//所有等待的任务都被一一唤醒
while (list->pstNext != head) {
taskCB = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(list));
OsWaitWakeTask(taskCB, OS_INVALID_VALUE);
@@ -372,7 +412,7 @@ STATIC VOID OsWaitCheckAndWakeParentProcess(LosProcessCB *parentCB, const LosPro
return;
}
- while (list->pstNext != head) {
+ while (list->pstNext != head) {//处理 OS_PROCESS_WAIT_GID 标签
taskCB = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(list));
if (taskCB->waitFlag == OS_PROCESS_WAIT_GID) {
if (taskCB->waitID != (UINTPTR)OS_GET_PGROUP_LEADER(processCB->pgroup)) {
@@ -396,6 +436,7 @@ STATIC VOID OsWaitCheckAndWakeParentProcess(LosProcessCB *parentCB, const LosPro
return;
}
+/*! 回收指定进程的资源 */
LITE_OS_SEC_TEXT VOID OsProcessResourcesToFree(LosProcessCB *processCB)
{
#ifdef LOSCFG_KERNEL_VM
@@ -404,10 +445,10 @@ LITE_OS_SEC_TEXT VOID OsProcessResourcesToFree(LosProcessCB *processCB)
}
#endif
-#ifdef LOSCFG_SECURITY_CAPABILITY
+#ifdef LOSCFG_SECURITY_CAPABILITY //安全开关
if (processCB->user != NULL) {
- (VOID)LOS_MemFree(m_aucSysMem1, processCB->user);
- processCB->user = NULL;
+ (VOID)LOS_MemFree(m_aucSysMem1, processCB->user);//删除用户
+ processCB->user = NULL; //重置指针为空
}
#endif
@@ -465,26 +506,28 @@ LITE_OS_SEC_TEXT VOID OsProcessResourcesToFree(LosProcessCB *processCB)
}
}
+/*! 回收僵死状态进程的资源 */
STATIC VOID OsRecycleZombiesProcess(LosProcessCB *childCB, ProcessGroup **pgroup)
{
- ExitProcessGroup(childCB, pgroup);
- LOS_ListDelete(&childCB->siblingList);
+ ExitProcessGroup(childCB, pgroup);//退出进程组
+ LOS_ListDelete(&childCB->siblingList);//从父亲大人的子孙链表上摘除
if (OsProcessIsDead(childCB)) {
OsDeleteTaskFromProcess(childCB->threadGroup);
- childCB->processStatus &= ~OS_PROCESS_STATUS_ZOMBIES;
- childCB->processStatus |= OS_PROCESS_FLAG_UNUSED;
+ childCB->processStatus &= ~OS_PROCESS_STATUS_ZOMBIES;//去掉僵死标签
+ childCB->processStatus |= OS_PROCESS_FLAG_UNUSED;//贴上没使用标签,进程由进程池分配,进程退出后重新回到空闲进程池
}
- LOS_ListDelete(&childCB->pendList);
- if (childCB->processStatus & OS_PROCESS_FLAG_EXIT) {
- LOS_ListHeadInsert(&g_processRecycleList, &childCB->pendList);
+ LOS_ListDelete(&childCB->pendList);//将自己从阻塞链表上摘除,注意有很多原因引起阻塞,pendList挂在哪里就以为这属于哪类阻塞
+ if (childCB->processStatus & OS_PROCESS_FLAG_EXIT) {//如果有退出标签
+ LOS_ListHeadInsert(&g_processRecycleList, &childCB->pendList);//从头部插入,注意g_processRecyleList挂的是pendList节点,所以要通过OS_PCB_FROM_PENDLIST找.
} else if (OsProcessIsPGroupLeader(childCB)) {
- LOS_ListTailInsert(&g_processRecycleList, &childCB->pendList);
+ LOS_ListTailInsert(&g_processRecycleList, &childCB->pendList);//从尾部插入,意思就是组长尽量最后一个处理
} else {
- OsInsertPCBToFreeList(childCB);
+ OsInsertPCBToFreeList(childCB);//直接插到freeList中去,可用于重新分配了。
}
}
+/*! 当一个进程自然退出的时候,它的孩子进程由两位老祖宗收养 */
STATIC VOID OsDealAliveChildProcess(LosProcessCB *processCB)
{
LosProcessCB *childCB = NULL;
@@ -497,67 +540,70 @@ STATIC VOID OsDealAliveChildProcess(LosProcessCB *processCB)
return;
}
#endif
-
- if (!LOS_ListEmpty(&processCB->childrenList)) {
- childHead = processCB->childrenList.pstNext;
- LOS_ListDelete(&(processCB->childrenList));
- if (OsProcessIsUserMode(processCB)) {
+ if (!LOS_ListEmpty(&processCB->childrenList)) {//如果存在孩子进程
+ childHead = processCB->childrenList.pstNext;//获取孩子链表
+ LOS_ListDelete(&(processCB->childrenList));//清空自己的孩子链表
+ if (OsProcessIsUserMode(processCB)) {//是用户态进程
parentCB = OS_PCB_FROM_PID(OS_USER_ROOT_PROCESS_ID);
} else {
parentCB = OsGetKernelInitProcess();
}
- for (nextList = childHead; ;) {
- childCB = OS_PCB_FROM_SIBLIST(nextList);
+ for (nextList = childHead; ;) {//遍历孩子链表
+ childCB = OS_PCB_FROM_SIBLIST(nextList);//找到孩子的真身
childCB->parentProcess = parentCB;
- nextList = nextList->pstNext;
- if (nextList == childHead) {
+ nextList = nextList->pstNext;//找下一个孩子进程
+ if (nextList == childHead) {//一圈下来,孩子们都磕完头了
break;
}
}
- LOS_ListTailInsertList(&parentCB->childrenList, childHead);
+ LOS_ListTailInsertList(&parentCB->childrenList, childHead);//挂到老祖宗的孩子链表上
}
return;
}
+/*! 回收指定进程的已经退出(死亡)的孩子进程所占资源 */
STATIC VOID OsChildProcessResourcesFree(const LosProcessCB *processCB)
{
LosProcessCB *childCB = NULL;
ProcessGroup *pgroup = NULL;
- while (!LOS_ListEmpty(&((LosProcessCB *)processCB)->exitChildList)) {
- childCB = LOS_DL_LIST_ENTRY(processCB->exitChildList.pstNext, LosProcessCB, siblingList);
- OsRecycleZombiesProcess(childCB, &pgroup);
+ while (!LOS_ListEmpty(&((LosProcessCB *)processCB)->exitChildList)) {//遍历直到没有了退出(死亡)的孩子进程
+ childCB = LOS_DL_LIST_ENTRY(processCB->exitChildList.pstNext, LosProcessCB, siblingList);//获取孩子进程,
+ OsRecycleZombiesProcess(childCB, &pgroup);//其中会将childCB从exitChildList链表上摘出去
(VOID)LOS_MemFree(m_aucSysMem1, pgroup);
}
}
+/*! 一个进程的自然消亡过程,参数是当前运行的任务*/
VOID OsProcessNaturalExit(LosProcessCB *processCB, UINT32 status)
{
- OsChildProcessResourcesFree(processCB);
+ OsChildProcessResourcesFree(processCB);//释放孩子进程的资源
+
/* is a child process */
if (processCB->parentProcess != NULL) {
LosProcessCB *parentCB = processCB->parentProcess;
- LOS_ListDelete(&processCB->siblingList);
- if (!OsProcessExitCodeSignalIsSet(processCB)) {
- OsProcessExitCodeSet(processCB, status);
+ LOS_ListDelete(&processCB->siblingList);//将自己从兄弟链表中摘除,家人们,永别了!
+ if (!OsProcessExitCodeSignalIsSet(processCB)) {//是否设置了退出码?
+ OsProcessExitCodeSet(processCB, status);//将进程状态设为退出码
}
- LOS_ListTailInsert(&parentCB->exitChildList, &processCB->siblingList);
- LOS_ListDelete(&processCB->subordinateGroupList);
- LOS_ListTailInsert(&processCB->pgroup->exitProcessList, &processCB->subordinateGroupList);
+ LOS_ListTailInsert(&parentCB->exitChildList, &processCB->siblingList);//挂到父进程的孩子消亡链表,家人中,永别的可不止我一个.
+ LOS_ListDelete(&processCB->subordinateGroupList);//和志同道合的朋友们永别了,注意家里可不一定是朋友的,所有各有链表.
+ LOS_ListTailInsert(&processCB->pgroup->exitProcessList, &processCB->subordinateGroupList);//挂到进程组消亡链表,朋友中,永别的可不止我一个.
- OsWaitCheckAndWakeParentProcess(parentCB, processCB);
+ OsWaitCheckAndWakeParentProcess(parentCB, processCB);//检查父进程的等待任务链表并唤醒对应的任务,此处将会频繁的切到其他任务运行.
- OsDealAliveChildProcess(processCB);
+ OsDealAliveChildProcess(processCB);//孩子们要怎么处理,移交给(用户态和内核态)根进程
+
+ processCB->processStatus |= OS_PROCESS_STATUS_ZOMBIES;//贴上僵死进程的标签
- processCB->processStatus |= OS_PROCESS_STATUS_ZOMBIES;
#ifdef LOSCFG_KERNEL_VM
(VOID)OsSendSigToProcess(parentCB, SIGCHLD, OS_KERNEL_KILL_PERMISSION);
#endif
- LOS_ListHeadInsert(&g_processRecycleList, &processCB->pendList);
+ LOS_ListHeadInsert(&g_processRecycleList, &processCB->pendList);//将进程通过其阻塞节点挂入全局进程回收链表
return;
}
@@ -571,33 +617,33 @@ STATIC VOID SystemProcessEarlyInit(LosProcessCB *processCB)
#ifdef LOSCFG_KERNEL_CONTAINER
OsContainerInitSystemProcess(processCB);
#endif
- if (processCB == OsGetKernelInitProcess()) {
- OsSetMainTaskProcess((UINTPTR)processCB);
+ if (processCB == OsGetKernelInitProcess()) {//2号进程
+ OsSetMainTaskProcess((UINTPTR)processCB);//将内核根进程设为主任务所属进程
}
}
-
+/*! 进程模块初始化,被编译放在代码段 .init 中*/
UINT32 OsProcessInit(VOID)
{
UINT32 index;
UINT32 size;
UINT32 ret;
- g_processMaxNum = LOSCFG_BASE_CORE_PROCESS_LIMIT;
+ g_processMaxNum = LOSCFG_BASE_CORE_PROCESS_LIMIT;//默认支持64个进程
size = (g_processMaxNum + 1) * sizeof(LosProcessCB);
- g_processCBArray = (LosProcessCB *)LOS_MemAlloc(m_aucSysMem1, size);
+ g_processCBArray = (LosProcessCB *)LOS_MemAlloc(m_aucSysMem1, size);// 进程池,占用内核堆,内存池分配
if (g_processCBArray == NULL) {
return LOS_NOK;
}
- (VOID)memset_s(g_processCBArray, size, 0, size);
+ (VOID)memset_s(g_processCBArray, size, 0, size);//安全方式重置清0
- LOS_ListInit(&g_freeProcess);
- LOS_ListInit(&g_processRecycleList);
+ LOS_ListInit(&g_freeProcess);//进程空闲链表初始化,创建一个进程时从g_freeProcess中申请一个进程描述符使用
+ LOS_ListInit(&g_processRecycleList);//进程回收链表初始化,回收完成后进入g_freeProcess等待再次被申请使用
- for (index = 0; index < g_processMaxNum; index++) {
- g_processCBArray[index].processID = index;
- g_processCBArray[index].processStatus = OS_PROCESS_FLAG_UNUSED;
- LOS_ListTailInsert(&g_freeProcess, &g_processCBArray[index].pendList);
+ for (index = 0; index < g_processMaxNum; index++) {//进程池循环创建
+ g_processCBArray[index].processID = index;//进程ID[0-g_processMaxNum-1]赋值
+ g_processCBArray[index].processStatus = OS_PROCESS_FLAG_UNUSED;// 默认都是白纸一张,贴上未使用标签
+ LOS_ListTailInsert(&g_freeProcess, &g_processCBArray[index].pendList);//注意g_freeProcess挂的是pendList节点,所以使用要通过OS_PCB_FROM_PENDLIST找到进程实体.
}
/* Default process to prevent thread PCB from being empty */
@@ -616,21 +662,23 @@ UINT32 OsProcessInit(VOID)
#ifdef LOSCFG_KERNEL_PLIMITS
OsProcLimiterSetInit();
#endif
- SystemProcessEarlyInit(OsGetIdleProcess());
+ SystemProcessEarlyInit(OsGetIdleProcess());//初始化 0,1,2号进程
SystemProcessEarlyInit(OsGetUserInitProcess());
SystemProcessEarlyInit(OsGetKernelInitProcess());
return LOS_OK;
}
+/*! 进程回收再利用过程*/
LITE_OS_SEC_TEXT VOID OsProcessCBRecycleToFree(VOID)
{
UINT32 intSave;
LosProcessCB *processCB = NULL;
SCHEDULER_LOCK(intSave);
- while (!LOS_ListEmpty(&g_processRecycleList)) {
- processCB = OS_PCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&g_processRecycleList));
- if (!(processCB->processStatus & OS_PROCESS_FLAG_EXIT)) {
+ while (!LOS_ListEmpty(&g_processRecycleList)) {//循环任务回收链表,直到为空
+ processCB = OS_PCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&g_processRecycleList));//找到回收链表中第一个进程实体
+ //OS_PCB_FROM_PENDLIST 代表的是通过pendlist节点找到 PCB实体,因为g_processRecyleList上面挂的是pendlist节点位置
+ if (!(processCB->processStatus & OS_PROCESS_FLAG_EXIT)) {//进程没有退出标签
break;
}
SCHEDULER_UNLOCK(intSave);
@@ -638,27 +686,27 @@ LITE_OS_SEC_TEXT VOID OsProcessCBRecycleToFree(VOID)
OsTaskCBRecycleToFree();
SCHEDULER_LOCK(intSave);
- processCB->processStatus &= ~OS_PROCESS_FLAG_EXIT;
+ processCB->processStatus &= ~OS_PROCESS_FLAG_EXIT;//给进程撕掉退出标签,(可能进程并没有这个标签)
#ifdef LOSCFG_KERNEL_VM
LosVmSpace *space = NULL;
- if (OsProcessIsUserMode(processCB)) {
- space = processCB->vmSpace;
+ if (OsProcessIsUserMode(processCB)) {//进程是否是用户态进程
+ space = processCB->vmSpace;//只有用户态的进程才需要释放虚拟内存空间
}
processCB->vmSpace = NULL;
#endif
/* OS_PROCESS_FLAG_GROUP_LEADER: The lead process group cannot be recycled without destroying the PCB.
* !OS_PROCESS_FLAG_UNUSED: Parent process does not reclaim child process resources.
*/
- LOS_ListDelete(&processCB->pendList);
+ LOS_ListDelete(&processCB->pendList);//将进程从进程链表上摘除
if (OsProcessIsPGroupLeader(processCB) || OsProcessIsDead(processCB)) {
- LOS_ListTailInsert(&g_processRecycleList, &processCB->pendList);
+ LOS_ListTailInsert(&g_processRecycleList, &processCB->pendList);//将进程挂到进程回收链表上,因为组长不能走啊
} else {
/* Clear the bottom 4 bits of process status */
- OsInsertPCBToFreeList(processCB);
+ OsInsertPCBToFreeList(processCB);//进程回到可分配池中,再分配利用
}
#ifdef LOSCFG_KERNEL_VM
SCHEDULER_UNLOCK(intSave);
- (VOID)LOS_VmSpaceFree(space);
+ (VOID)LOS_VmSpaceFree(space);//释放用户态进程的虚拟内存空间,因为内核只有一个虚拟空间,因此不需要释放虚拟空间.
SCHEDULER_LOCK(intSave);
#endif
}
@@ -666,6 +714,7 @@ LITE_OS_SEC_TEXT VOID OsProcessCBRecycleToFree(VOID)
SCHEDULER_UNLOCK(intSave);
}
+/*! 删除PCB块 其实是 PCB块回归进程池,先进入回收链表*/
STATIC VOID OsDeInitPCB(LosProcessCB *processCB)
{
UINT32 intSave;
@@ -680,12 +729,11 @@ STATIC VOID OsDeInitPCB(LosProcessCB *processCB)
return;
}
#endif
-
- OsProcessResourcesToFree(processCB);
+ OsProcessResourcesToFree(processCB);//释放进程所占用的资源
SCHEDULER_LOCK(intSave);
if (processCB->parentProcess != NULL) {
- LOS_ListDelete(&processCB->siblingList);
+ LOS_ListDelete(&processCB->siblingList);//将进程从兄弟链表中摘除
processCB->parentProcess = NULL;
}
@@ -693,8 +741,8 @@ STATIC VOID OsDeInitPCB(LosProcessCB *processCB)
ExitProcessGroup(processCB, &pgroup);
}
- processCB->processStatus &= ~OS_PROCESS_STATUS_INIT;
- processCB->processStatus |= OS_PROCESS_FLAG_EXIT;
+ processCB->processStatus &= ~OS_PROCESS_STATUS_INIT;//设置进程状态为非初始化
+ processCB->processStatus |= OS_PROCESS_FLAG_EXIT; //设置进程状态为退出
LOS_ListHeadInsert(&g_processRecycleList, &processCB->pendList);
SCHEDULER_UNLOCK(intSave);
@@ -703,6 +751,7 @@ STATIC VOID OsDeInitPCB(LosProcessCB *processCB)
return;
}
+/*! 设置进程的名字*/
UINT32 OsSetProcessName(LosProcessCB *processCB, const CHAR *name)
{
errno_t errRet;
@@ -735,30 +784,31 @@ UINT32 OsSetProcessName(LosProcessCB *processCB, const CHAR *name)
return LOS_OK;
}
+/*! 初始化PCB(进程控制块)*/
STATIC UINT32 OsInitPCB(LosProcessCB *processCB, UINT32 mode, const CHAR *name)
{
- processCB->processMode = mode;
- processCB->processStatus = OS_PROCESS_STATUS_INIT;
+ processCB->processMode = mode; //用户态进程还是内核态进程
+ processCB->processStatus = OS_PROCESS_STATUS_INIT; //进程初始状态
processCB->parentProcess = NULL;
processCB->threadGroup = NULL;
- processCB->umask = OS_PROCESS_DEFAULT_UMASK;
+ processCB->umask = OS_PROCESS_DEFAULT_UMASK; //掩码
processCB->timerID = (timer_t)(UINTPTR)MAX_INVALID_TIMER_VID;
- LOS_ListInit(&processCB->threadSiblingList);
- LOS_ListInit(&processCB->childrenList);
- LOS_ListInit(&processCB->exitChildList);
- LOS_ListInit(&(processCB->waitList));
+ LOS_ListInit(&processCB->threadSiblingList);//初始化孩子任务/线程链表,上面挂的都是由此fork的孩子线程 见于 OsTaskCBInit LOS_ListTailInsert(&(processCB->threadSiblingList), &(taskCB->threadList));
+ LOS_ListInit(&processCB->childrenList); //初始化孩子进程链表,上面挂的都是由此fork的孩子进程 见于 OsCopyParent LOS_ListTailInsert(&parentProcessCB->childrenList, &childProcessCB->siblingList);
+ LOS_ListInit(&processCB->exitChildList); //初始化记录退出孩子进程链表,上面挂的是哪些exit 见于 OsProcessNaturalExit LOS_ListTailInsert(&parentCB->exitChildList, &processCB->siblingList);
+ LOS_ListInit(&(processCB->waitList)); //初始化等待任务链表 上面挂的是处于等待的 见于 OsWaitInsertWaitLIstInOrder LOS_ListHeadInsert(&processCB->waitList, &runTask->pendList);
-#ifdef LOSCFG_KERNEL_VM
- if (OsProcessIsUserMode(processCB)) {
- processCB->vmSpace = OsCreateUserVmSpace();
+#ifdef LOSCFG_KERNEL_VM
+ if (OsProcessIsUserMode(processCB)) {//如果是用户态进程
+ processCB->vmSpace = OsCreateUserVmSpace();//创建用户空间
if (processCB->vmSpace == NULL) {
processCB->processStatus = OS_PROCESS_FLAG_UNUSED;
return LOS_ENOMEM;
}
} else {
- processCB->vmSpace = LOS_GetKVmSpace();
- }
+ processCB->vmSpace = LOS_GetKVmSpace();//从这里也可以看出,所有内核态进程是共享一个进程空间的
+ }//在鸿蒙内核态进程只有kprocess 和 kidle 两个
#endif
#ifdef LOSCFG_KERNEL_CPUP
@@ -768,16 +818,14 @@ STATIC UINT32 OsInitPCB(LosProcessCB *processCB, UINT32 mode, const CHAR *name)
}
(VOID)memset_s(processCB->processCpup, sizeof(OsCpupBase), 0, sizeof(OsCpupBase));
#endif
-
#ifdef LOSCFG_SECURITY_VID
status_t status = VidMapListInit(processCB);
if (status != LOS_OK) {
return LOS_ENOMEM;
}
#endif
-
#ifdef LOSCFG_SECURITY_CAPABILITY
- OsInitCapability(processCB);
+ OsInitCapability(processCB);//初始化进程安全相关功能
#endif
if (OsSetProcessName(processCB, name) != LOS_OK) {
@@ -786,10 +834,10 @@ STATIC UINT32 OsInitPCB(LosProcessCB *processCB, UINT32 mode, const CHAR *name)
return LOS_OK;
}
-
+//创建用户
#ifdef LOSCFG_SECURITY_CAPABILITY
-STATIC User *OsCreateUser(UINT32 userID, UINT32 gid, UINT32 size)
-{
+STATIC User *OsCreateUser(UINT32 userID, UINT32 gid, UINT32 size)//参数size 表示组数量
+{ //(size - 1) * sizeof(UINT32) 用于 user->groups[..],这种设计节约了内存,不造成不需要的浪费
User *user = LOS_MemAlloc(m_aucSysMem1, sizeof(User) + (size - 1) * sizeof(UINT32));
if (user == NULL) {
return NULL;
@@ -799,11 +847,12 @@ STATIC User *OsCreateUser(UINT32 userID, UINT32 gid, UINT32 size)
user->effUserID = userID;
user->gid = gid;
user->effGid = gid;
- user->groupNumber = size;
- user->groups[0] = gid;
+ user->groupNumber = size;//用户组数量
+ user->groups[0] = gid; //用户组列表,一个用户可以属于多个用户组
return user;
}
+/*! 检查参数群组ID是否在当前用户所属群组中*/
LITE_OS_SEC_TEXT BOOL LOS_CheckInGroups(UINT32 gid)
{
UINT32 intSave;
@@ -811,8 +860,8 @@ LITE_OS_SEC_TEXT BOOL LOS_CheckInGroups(UINT32 gid)
User *user = NULL;
SCHEDULER_LOCK(intSave);
- user = OsCurrUserGet();
- for (count = 0; count < user->groupNumber; count++) {
+ user = OsCurrUserGet();//当前进程所属用户
+ for (count = 0; count < user->groupNumber; count++) {//循环对比
if (user->groups[count] == gid) {
SCHEDULER_UNLOCK(intSave);
return TRUE;
@@ -824,6 +873,7 @@ LITE_OS_SEC_TEXT BOOL LOS_CheckInGroups(UINT32 gid)
}
#endif
+/*! 获取当前进程的用户ID*/
LITE_OS_SEC_TEXT INT32 LOS_GetUserID(VOID)
{
#ifdef LOSCFG_SECURITY_CAPABILITY
@@ -843,6 +893,7 @@ LITE_OS_SEC_TEXT INT32 LOS_GetUserID(VOID)
#endif
}
+/*! 获取当前进程的用户组ID*/
LITE_OS_SEC_TEXT INT32 LOS_GetGroupID(VOID)
{
#ifdef LOSCFG_SECURITY_CAPABILITY
@@ -863,6 +914,7 @@ LITE_OS_SEC_TEXT INT32 LOS_GetGroupID(VOID)
#endif
}
+/*! 进程创建初始化*/
STATIC UINT32 OsSystemProcessInit(LosProcessCB *processCB, UINT32 flags, const CHAR *name)
{
UINT32 ret = OsInitPCB(processCB, flags, name);
@@ -871,7 +923,7 @@ STATIC UINT32 OsSystemProcessInit(LosProcessCB *processCB, UINT32 flags, const C
}
#ifdef LOSCFG_FS_VFS
- processCB->files = alloc_files();
+ processCB->files = alloc_files();//分配进程的文件的管理器
if (processCB->files == NULL) {
ret = LOS_ENOMEM;
goto EXIT;
@@ -884,8 +936,8 @@ STATIC UINT32 OsSystemProcessInit(LosProcessCB *processCB, UINT32 flags, const C
goto EXIT;
}
-#ifdef LOSCFG_SECURITY_CAPABILITY
- processCB->user = OsCreateUser(0, 0, 1);
+#ifdef LOSCFG_SECURITY_CAPABILITY //用户安全宏
+ processCB->user = OsCreateUser(0, 0, 1);//创建用户
if (processCB->user == NULL) {
ret = LOS_ENOMEM;
goto EXIT;
@@ -902,10 +954,10 @@ STATIC UINT32 OsSystemProcessInit(LosProcessCB *processCB, UINT32 flags, const C
return LOS_OK;
EXIT:
- OsDeInitPCB(processCB);
+ OsDeInitPCB(processCB);//删除进程控制块,归还内存
return ret;
}
-
+/*! 创建2,0号进程,即内核态进程的老祖宗*/
LITE_OS_SEC_TEXT_INIT UINT32 OsSystemProcessCreate(VOID)
{
LosProcessCB *kerInitProcess = OsGetKernelInitProcess();
@@ -913,22 +965,22 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsSystemProcessCreate(VOID)
if (ret != LOS_OK) {
return ret;
}
- kerInitProcess->processStatus &= ~OS_PROCESS_STATUS_INIT;
+ kerInitProcess->processStatus &= ~OS_PROCESS_STATUS_INIT;//去掉初始化标签
LosProcessCB *idleProcess = OsGetIdleProcess();
- ret = OsInitPCB(idleProcess, OS_KERNEL_MODE, "KIdle");
+ ret = OsInitPCB(idleProcess, OS_KERNEL_MODE, "KIdle");//创建内核态0号进程
if (ret != LOS_OK) {
return ret;
}
idleProcess->parentProcess = kerInitProcess;
- LOS_ListTailInsert(&kerInitProcess->childrenList, &idleProcess->siblingList);
+ LOS_ListTailInsert(&kerInitProcess->childrenList, &idleProcess->siblingList);//挂到内核态祖宗进程的子孙链接上
idleProcess->pgroup = kerInitProcess->pgroup;
LOS_ListTailInsert(&kerInitProcess->pgroup->processList, &idleProcess->subordinateGroupList);
#ifdef LOSCFG_SECURITY_CAPABILITY
- idleProcess->user = kerInitProcess->user;
+ idleProcess->user = kerInitProcess->user;//共享用户
#endif
#ifdef LOSCFG_FS_VFS
- idleProcess->files = kerInitProcess->files;
+ idleProcess->files = kerInitProcess->files;//共享文件
#endif
idleProcess->processStatus &= ~OS_PROCESS_STATUS_INIT;
@@ -938,7 +990,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsSystemProcessCreate(VOID)
}
return LOS_OK;
}
-
+/// 进程调度参数检查
INT32 OsSchedulerParamCheck(UINT16 policy, BOOL isThread, const LosSchedParam *param)
{
if (param == NULL) {
@@ -971,24 +1023,24 @@ INT32 OsSchedulerParamCheck(UINT16 policy, BOOL isThread, const LosSchedParam *p
STATIC INLINE INT32 ProcessSchedulerParamCheck(INT32 which, INT32 pid, UINT16 policy, const LosSchedParam *param)
{
- if (OS_PID_CHECK_INVALID(pid)) {
+ if (OS_PID_CHECK_INVALID(pid)) {//进程ID是否有效,默认 g_processMaxNum = 64
return LOS_EINVAL;
}
- if (which != LOS_PRIO_PROCESS) {
+ if (which != LOS_PRIO_PROCESS) {//进程标识
return LOS_EINVAL;
}
return OsSchedulerParamCheck(policy, FALSE, param);
}
-#ifdef LOSCFG_SECURITY_CAPABILITY
+#ifdef LOSCFG_SECURITY_CAPABILITY //检查进程的安全许可证
STATIC BOOL OsProcessCapPermitCheck(const LosProcessCB *processCB, const SchedParam *param, UINT16 policy, UINT16 prio)
{
- LosProcessCB *runProcess = OsCurrProcessGet();
+ LosProcessCB *runProcess = OsCurrProcessGet();//获得当前进程
/* always trust kernel process */
- if (!OsProcessIsUserMode(runProcess)) {
+ if (!OsProcessIsUserMode(runProcess)) {//进程必须在内核模式下,也就是说在内核模式下是安全的.
return TRUE;
}
@@ -1004,7 +1056,7 @@ STATIC BOOL OsProcessCapPermitCheck(const LosProcessCB *processCB, const SchedPa
return FALSE;
}
#endif
-
+/// 设置进程调度计划
LITE_OS_SEC_TEXT INT32 OsSetProcessScheduler(INT32 which, INT32 pid, UINT16 policy, const LosSchedParam *schedParam)
{
SchedParam param = { 0 };
@@ -1017,8 +1069,8 @@ LITE_OS_SEC_TEXT INT32 OsSetProcessScheduler(INT32 which, INT32 pid, UINT16 poli
}
LosProcessCB *processCB = OS_PCB_FROM_PID(pid);
- SCHEDULER_LOCK(intSave);
- if (OsProcessIsInactive(processCB)) {
+ SCHEDULER_LOCK(intSave);//持有调度自旋锁,多CPU情况下调度期间需要原子处理
+ if (OsProcessIsInactive(processCB)) {//进程未活动的处理
SCHEDULER_UNLOCK(intSave);
return -LOS_ESRCH;
}
@@ -1058,20 +1110,20 @@ LITE_OS_SEC_TEXT INT32 OsSetProcessScheduler(INT32 which, INT32 pid, UINT16 poli
needSched = taskCB->ops->schedParamModify(taskCB, ¶m);
TO_SCHED:
- SCHEDULER_UNLOCK(intSave);
+ SCHEDULER_UNLOCK(intSave);//还锁
- LOS_MpSchedule(OS_MP_CPU_ALL);
+ LOS_MpSchedule(OS_MP_CPU_ALL);//核间中断
if (needSched && OS_SCHEDULER_ACTIVE) {
- LOS_Schedule();
+ LOS_Schedule();//发起调度
}
return LOS_OK;
}
-
+/// 设置指定进程的调度参数,包括优先级和调度策略
LITE_OS_SEC_TEXT INT32 LOS_SetProcessScheduler(INT32 pid, UINT16 policy, const LosSchedParam *schedParam)
{
return OsSetProcessScheduler(LOS_PRIO_PROCESS, pid, policy, schedParam);
}
-
+/// 获得指定进程的调度策略
LITE_OS_SEC_TEXT INT32 LOS_GetProcessScheduler(INT32 pid, INT32 *policy, LosSchedParam *schedParam)
{
UINT32 intSave;
@@ -1115,7 +1167,7 @@ LITE_OS_SEC_TEXT INT32 LOS_GetProcessScheduler(INT32 pid, INT32 *policy, LosSche
}
return LOS_OK;
}
-
+/// 接口封装 - 设置进程优先级
LITE_OS_SEC_TEXT INT32 LOS_SetProcessPriority(INT32 pid, INT32 prio)
{
INT32 ret;
@@ -1135,7 +1187,7 @@ LITE_OS_SEC_TEXT INT32 LOS_SetProcessPriority(INT32 pid, INT32 prio)
return OsSetProcessScheduler(LOS_PRIO_PROCESS, pid, (UINT16)policy, ¶m);
}
-
+/// 接口封装 - 获取进程优先级 which:标识进程,进程组,用户
LITE_OS_SEC_TEXT INT32 OsGetProcessPriority(INT32 which, INT32 pid)
{
UINT32 intSave;
@@ -1168,12 +1220,16 @@ LITE_OS_SEC_TEXT INT32 OsGetProcessPriority(INT32 which, INT32 pid)
SCHEDULER_UNLOCK(intSave);
return param.basePrio;
}
-
+/// 接口封装 - 获取指定进程优先级
LITE_OS_SEC_TEXT INT32 LOS_GetProcessPriority(INT32 pid)
{
return OsGetProcessPriority(LOS_PRIO_PROCESS, pid);
}
+/*!
+* 将任务挂入进程的waitList链表,表示这个任务在等待某个进程的退出
+* 当被等待进程退出时候会将自己挂到父进程的退出子进程链表和进程组的退出进程链表.
+*/
STATIC VOID OsWaitInsertWaitListInOrder(LosTaskCB *runTask, LosProcessCB *processCB)
{
LOS_DL_LIST *head = &processCB->waitList;
@@ -1205,7 +1261,7 @@ STATIC VOID OsWaitInsertWaitListInOrder(LosTaskCB *runTask, LosProcessCB *proces
(VOID)runTask->ops->wait(runTask, list->pstNext, LOS_WAIT_FOREVER);
return;
}
-
+/// 设置等待子进程退出方式方法
STATIC UINT32 WaitFindSpecifiedProcess(UINT32 pid, LosTaskCB *runTask,
const LosProcessCB *processCB, LosProcessCB **childCB)
{
@@ -1226,16 +1282,15 @@ STATIC UINT32 WaitFindSpecifiedProcess(UINT32 pid, LosTaskCB *runTask,
#endif
/* Wait for the child process whose process number is pid. */
*childCB = OsFindExitChildProcess(processCB, waitProcess);
- if (*childCB != NULL) {
+ if (*childCB != NULL) {//找到了,确实有一个已经退出的PID,注意一个进程退出时会挂到父进程的exitChildList上
return LOS_OK;
- }
-
- if (OsFindChildProcess(processCB, waitProcess) != LOS_OK) {
- return LOS_ECHILD;
- }
+ }
- runTask->waitFlag = OS_PROCESS_WAIT_PRO;
- runTask->waitID = (UINTPTR)waitProcess;
+ if (OsFindChildProcess(processCB, waitProcess) != LOS_OK) {
+ return LOS_ECHILD;
+ }
+ runTask->waitFlag = OS_PROCESS_WAIT_PRO;//设置当前任务的等待类型
+ runTask->waitID = (UINTPTR)waitProcess;
return LOS_OK;
}
@@ -1253,23 +1308,23 @@ STATIC UINT32 OsWaitSetFlag(const LosProcessCB *processCB, INT32 pid, LosProcess
if (childCB != NULL) {
goto WAIT_BACK;
}
- } else if (pid == 0) {
+ } else if (pid == 0) {//等待同一进程组中的任何子进程
/* Wait for any child process in the same process group */
childCB = OsFindGroupExitProcess(processCB->pgroup, OS_INVALID_VALUE);
- if (childCB != NULL) {
- goto WAIT_BACK;
+ if (childCB != NULL) {//找到了,确实有一个已经退出的PID
+ goto WAIT_BACK;//直接成功返回
}
runTask->waitID = (UINTPTR)OS_GET_PGROUP_LEADER(processCB->pgroup);
- runTask->waitFlag = OS_PROCESS_WAIT_GID;
- } else if (pid == -1) {
+ runTask->waitFlag = OS_PROCESS_WAIT_GID;//设置当前任务的等待类型
+ } else if (pid == -1) {//等待任意子进程
/* Wait for any child process */
childCB = OsFindExitChildProcess(processCB, NULL);
- if (childCB != NULL) {
+ if (childCB != NULL) {//找到了,确实有一个已经退出的PID
goto WAIT_BACK;
}
- runTask->waitID = pid;
- runTask->waitFlag = OS_PROCESS_WAIT_ANY;
- } else { /* pid < -1 */
+ runTask->waitID = pid;//等待PID,这个PID可以和当前进程没有任何关系
+ runTask->waitFlag = OS_PROCESS_WAIT_ANY;//设置当前任务的等待类型
+ } else { /* pid < -1 */ //等待指定进程组内为|pid|的所有子进程
/* Wait for any child process whose group number is the pid absolute value. */
ProcessGroup *pgroup = OsFindProcessGroup(-pid);
if (pgroup == NULL) {
@@ -1282,14 +1337,14 @@ STATIC UINT32 OsWaitSetFlag(const LosProcessCB *processCB, INT32 pid, LosProcess
}
runTask->waitID = (UINTPTR)OS_GET_PGROUP_LEADER(pgroup);
- runTask->waitFlag = OS_PROCESS_WAIT_GID;
+ runTask->waitFlag = OS_PROCESS_WAIT_GID;//设置当前任务的等待类型
}
WAIT_BACK:
*child = childCB;
return LOS_OK;
}
-
+/// 等待回收孩子进程 @note_thinking 这样写Porcess不太好吧
STATIC UINT32 OsWaitRecycleChildProcess(const LosProcessCB *childCB, UINT32 intSave, INT32 *status, siginfo_t *info)
{
ProcessGroup *pgroup = NULL;
@@ -1308,8 +1363,8 @@ STATIC UINT32 OsWaitRecycleChildProcess(const LosProcessCB *childCB, UINT32 intS
SCHEDULER_UNLOCK(intSave);
if (status != NULL) {
- if (mode == OS_USER_MODE) {
- (VOID)LOS_ArchCopyToUser((VOID *)status, (const VOID *)(&(exitCode)), sizeof(INT32));
+ if (mode == OS_USER_MODE) {//孩子为用户态进程
+ (VOID)LOS_ArchCopyToUser((VOID *)status, (const VOID *)(&(exitCode)), sizeof(INT32));//从内核空间拷贝退出码
} else {
*status = exitCode;
}
@@ -1344,14 +1399,14 @@ STATIC UINT32 OsWaitRecycleChildProcess(const LosProcessCB *childCB, UINT32 intS
(VOID)LOS_MemFree(m_aucSysMem1, pgroup);
return pid;
}
-
+/// 检查要等待的孩子进程
STATIC UINT32 OsWaitChildProcessCheck(LosProcessCB *processCB, INT32 pid, LosProcessCB **childCB)
-{
+{ //当进程没有孩子且没有退出的孩子进程
if (LOS_ListEmpty(&(processCB->childrenList)) && LOS_ListEmpty(&(processCB->exitChildList))) {
return LOS_ECHILD;
}
- return OsWaitSetFlag(processCB, pid, childCB);
+ return OsWaitSetFlag(processCB, pid, childCB);//设置等待子进程退出方式方法
}
STATIC UINT32 OsWaitOptionsCheck(UINT32 options)
@@ -1359,21 +1414,21 @@ STATIC UINT32 OsWaitOptionsCheck(UINT32 options)
UINT32 flag = LOS_WAIT_WNOHANG | LOS_WAIT_WUNTRACED | LOS_WAIT_WCONTINUED;
flag = ~flag & options;
- if (flag != 0) {
- return LOS_EINVAL;
+ if (flag != 0) {//三种方式中一种都没有
+ return LOS_EINVAL;//无效参数
}
- if ((options & (LOS_WAIT_WCONTINUED | LOS_WAIT_WUNTRACED)) != 0) {
- return LOS_EOPNOTSUPP;
+ if ((options & (LOS_WAIT_WCONTINUED | LOS_WAIT_WUNTRACED)) != 0) {//暂不支持这两种方式.
+ return LOS_EOPNOTSUPP;//不支持
}
- if (OS_INT_ACTIVE) {
- return LOS_EINTR;
+ if (OS_INT_ACTIVE) {//中断发生期间
+ return LOS_EINTR;//中断提示
}
return LOS_OK;
-}
-
+}
+///等待子进程结束并回收子进程,返回已经终止的子进程的进程ID号,并清除僵死进程。
STATIC INT32 OsWait(INT32 pid, USER INT32 *status, USER siginfo_t *info, UINT32 options, VOID *rusage)
{
(VOID)rusage;
@@ -1384,13 +1439,13 @@ STATIC INT32 OsWait(INT32 pid, USER INT32 *status, USER siginfo_t *info, UINT32
LosProcessCB *processCB = OsCurrProcessGet();
LosTaskCB *runTask = OsCurrTaskGet();
SCHEDULER_LOCK(intSave);
- ret = OsWaitChildProcessCheck(processCB, pid, &childCB);
+ ret = OsWaitChildProcessCheck(processCB, pid, &childCB);//先检查下看能不能找到参数要求的退出子进程
if (ret != LOS_OK) {
pid = -ret;
goto ERROR;
}
- if (childCB != NULL) {
+ if (childCB != NULL) {//找到了进程
#ifdef LOSCFG_PID_CONTAINER
if (childCB == processCB) {
SCHEDULER_UNLOCK(intSave);
@@ -1402,18 +1457,19 @@ STATIC INT32 OsWait(INT32 pid, USER INT32 *status, USER siginfo_t *info, UINT32
#endif
return (INT32)OsWaitRecycleChildProcess(childCB, intSave, status, info);
}
-
- if ((options & LOS_WAIT_WNOHANG) != 0) {
- runTask->waitFlag = 0;
- pid = 0;
+ //没有找到,看是否要返回还是去做个登记
+ if ((options & LOS_WAIT_WNOHANG) != 0) {//有LOS_WAIT_WNOHANG标签
+ runTask->waitFlag = 0;//等待标识置0
+ pid = 0;//这里置0,是为了 return 0
goto ERROR;
}
-
- OsWaitInsertWaitListInOrder(runTask, processCB);
-
+ //等待孩子进程退出
+ OsWaitInsertWaitListInOrder(runTask, processCB);//将当前任务挂入进程waitList链表
+ //发起调度的目的是为了让出CPU,让其他进程/任务运行
+
runTask->waitFlag = 0;
if (runTask->waitID == OS_INVALID_VALUE) {
- pid = -LOS_ECHILD;
+ pid = -LOS_ECHILD;//没有此子进程
goto ERROR;
}
@@ -1422,7 +1478,7 @@ STATIC INT32 OsWait(INT32 pid, USER INT32 *status, USER siginfo_t *info, UINT32
pid = -LOS_ESRCH;
goto ERROR;
}
-
+ //回收僵死进程
return (INT32)OsWaitRecycleChildProcess(childCB, intSave, status, info);
ERROR:
@@ -1503,12 +1559,12 @@ UINT32 OsGetProcessGroupCB(UINT32 pid, UINTPTR *ppgroupLeader)
STATIC UINT32 OsSetProcessGroupCheck(const LosProcessCB *processCB, LosProcessCB *pgroupCB)
{
- LosProcessCB *runProcessCB = OsCurrProcessGet();
+ LosProcessCB *runProcessCB = OsCurrProcessGet();//拿到当前运行进程
- if (OsProcessIsInactive(processCB)) {
+ if (OsProcessIsInactive(processCB)) {//进程是否活动
return LOS_ESRCH;
}
-
+ //参数进程不在用户态或者组长不在用户态
#ifdef LOSCFG_PID_CONTAINER
if ((processCB->processID == OS_USER_ROOT_PROCESS_ID) || OS_PROCESS_CONTAINER_CHECK(processCB, runProcessCB)) {
return LOS_EPERM;
@@ -1625,12 +1681,12 @@ EXIT:
SCHEDULER_UNLOCK(intSave);
return gid;
}
-
+/// 获取当前进程的组ID
LITE_OS_SEC_TEXT INT32 LOS_GetCurrProcessGroupID(VOID)
{
return LOS_GetProcessGroupID(OsCurrProcessGet()->processID);
}
-
+/// 为用户态任务分配栈空间
#ifdef LOSCFG_KERNEL_VM
STATIC LosProcessCB *OsGetFreePCB(VOID)
{
@@ -1654,19 +1710,19 @@ STATIC LosProcessCB *OsGetFreePCB(VOID)
STATIC VOID *OsUserInitStackAlloc(LosProcessCB *processCB, UINT32 *size)
{
LosVmMapRegion *region = NULL;
- UINT32 stackSize = ALIGN(OS_USER_TASK_STACK_SIZE, PAGE_SIZE);
-
+ UINT32 stackSize = ALIGN(OS_USER_TASK_STACK_SIZE, PAGE_SIZE);//1M栈空间 按页对齐
+ //线性区分配虚拟内存
region = LOS_RegionAlloc(processCB->vmSpace, 0, stackSize,
VM_MAP_REGION_FLAG_PERM_USER | VM_MAP_REGION_FLAG_PERM_READ |
- VM_MAP_REGION_FLAG_PERM_WRITE, 0);
+ VM_MAP_REGION_FLAG_PERM_WRITE, 0);//可使用可读可写区
if (region == NULL) {
return NULL;
}
- LOS_SetRegionTypeAnon(region);
- region->regionFlags |= VM_MAP_REGION_FLAG_STACK;
+ LOS_SetRegionTypeAnon(region);//匿名映射
+ region->regionFlags |= VM_MAP_REGION_FLAG_STACK;//标记该线性区为栈区
- *size = stackSize;
+ *size = stackSize;//记录栈大小
return (VOID *)(UINTPTR)region->range.base;
}
@@ -1701,6 +1757,14 @@ LITE_OS_SEC_TEXT LosVmSpace *OsExecProcessVmSpaceReplace(LosVmSpace *newSpace, U
return oldSpace;
}
+/**
+ * @brief 进程的回收再利用,被LOS_DoExecveFile调用
+ * @param processCB
+ * @param name
+ * @param oldSpace
+ * @param oldFiles
+ * @return LITE_OS_SEC_TEXT
+ */
LITE_OS_SEC_TEXT UINT32 OsExecRecycleAndInit(LosProcessCB *processCB, const CHAR *name,
LosVmSpace *oldSpace, UINTPTR oldFiles)
{
@@ -1745,12 +1809,12 @@ LITE_OS_SEC_TEXT UINT32 OsExecRecycleAndInit(LosProcessCB *processCB, const CHAR
}
#endif
- processCB->processStatus &= ~OS_PROCESS_FLAG_EXIT;
- processCB->processStatus |= OS_PROCESS_FLAG_ALREADY_EXEC;
+ processCB->processStatus &= ~OS_PROCESS_FLAG_EXIT; //去掉进程退出标签
+ processCB->processStatus |= OS_PROCESS_FLAG_ALREADY_EXEC;//加上进程运行elf标签
return LOS_OK;
}
-
+/// 执行用户态任务, entry为入口函数 ,其中 创建好task,task上下文 等待调度真正执行, sp:栈指针 mapBase:栈底 mapSize:栈大小
LITE_OS_SEC_TEXT UINT32 OsExecStart(const TSK_ENTRY_FUNC entry, UINTPTR sp, UINTPTR mapBase, UINT32 mapSize)
{
UINT32 intSave;
@@ -1759,29 +1823,30 @@ LITE_OS_SEC_TEXT UINT32 OsExecStart(const TSK_ENTRY_FUNC entry, UINTPTR sp, UINT
return LOS_NOK;
}
- if ((sp == 0) || (LOS_Align(sp, LOSCFG_STACK_POINT_ALIGN_SIZE) != sp)) {
+ if ((sp == 0) || (LOS_Align(sp, LOSCFG_STACK_POINT_ALIGN_SIZE) != sp)) {//对齐
return LOS_NOK;
}
-
- if ((mapBase == 0) || (mapSize == 0) || (sp <= mapBase) || (sp > (mapBase + mapSize))) {
+ //注意 sp此时指向栈底,栈底地址要大于栈顶
+ if ((mapBase == 0) || (mapSize == 0) || (sp <= mapBase) || (sp > (mapBase + mapSize))) {//参数检查
return LOS_NOK;
}
- LosTaskCB *taskCB = OsCurrTaskGet();
-
- SCHEDULER_LOCK(intSave);
- taskCB->userMapBase = mapBase;
- taskCB->userMapSize = mapSize;
- taskCB->taskEntry = (TSK_ENTRY_FUNC)entry;
+ LosTaskCB *taskCB = OsCurrTaskGet();//获取当前任务
+ SCHEDULER_LOCK(intSave);//拿自旋锁
+ taskCB->userMapBase = mapBase;//用户态栈顶位置
+ taskCB->userMapSize = mapSize;//用户态栈
+ taskCB->taskEntry = (TSK_ENTRY_FUNC)entry;//任务的入口函数
+ //初始化内核态栈
TaskContext *taskContext = (TaskContext *)OsTaskStackInit(taskCB->taskID, taskCB->stackSize,
(VOID *)taskCB->topOfStack, FALSE);
- OsUserTaskStackInit(taskContext, (UINTPTR)taskCB->taskEntry, sp);
- SCHEDULER_UNLOCK(intSave);
+ OsUserTaskStackInit(taskContext, (UINTPTR)taskCB->taskEntry, sp);//初始化用户栈,将内核栈中上下文的 context->R[0] = sp ,context->sp = sp
+ //这样做的目的是将用户栈SP保存到内核栈中,
+ SCHEDULER_UNLOCK(intSave);//解锁
return LOS_OK;
}
#endif
-
+/// 用户进程开始初始化
STATIC UINT32 OsUserInitProcessStart(LosProcessCB *processCB, TSK_INIT_PARAM_S *param)
{
UINT32 intSave;
@@ -1802,7 +1867,7 @@ STATIC UINT32 OsUserInitProcessStart(LosProcessCB *processCB, TSK_INIT_PARAM_S *
processCB->processStatus &= ~OS_PROCESS_STATUS_INIT;
SCHEDULER_UNLOCK(intSave);
- ret = LOS_SetTaskScheduler(taskID, LOS_SCHED_RR, OS_TASK_PRIORITY_LOWEST);
+ ret = LOS_SetTaskScheduler(taskID, LOS_SCHED_RR, OS_TASK_PRIORITY_LOWEST);//调度器:设置为抢占式调度和最低任务优先级(31级)
if (ret != LOS_OK) {
PRINT_ERR("User init process set scheduler failed! ERROR:%d \n", ret);
goto EXIT;
@@ -1921,7 +1986,7 @@ ERROR:
OsDeInitPCB(processCB);
return ret;
}
-
+/// 拷贝用户信息 直接用memcpy_s
STATIC UINT32 OsCopyUser(LosProcessCB *childCB, LosProcessCB *parentCB)
{
#ifdef LOSCFG_SECURITY_CAPABILITY
@@ -1936,6 +2001,7 @@ STATIC UINT32 OsCopyUser(LosProcessCB *childCB, LosProcessCB *parentCB)
return LOS_OK;
}
+//拷贝一个Task过程
STATIC VOID GetCopyTaskParam(LosProcessCB *childProcessCB, UINTPTR entry, UINT32 size,
TSK_INIT_PARAM_S *taskParam, SchedParam *param)
{
@@ -1943,15 +2009,15 @@ STATIC VOID GetCopyTaskParam(LosProcessCB *childProcessCB, UINTPTR entry, UINT32
LosTaskCB *runTask = OsCurrTaskGet();
SCHEDULER_LOCK(intSave);
- if (OsProcessIsUserMode(childProcessCB)) {
- taskParam->pfnTaskEntry = runTask->taskEntry;
- taskParam->uwStackSize = runTask->stackSize;
- taskParam->userParam.userArea = runTask->userArea;
- taskParam->userParam.userMapBase = runTask->userMapBase;
- taskParam->userParam.userMapSize = runTask->userMapSize;
- } else {
- taskParam->pfnTaskEntry = (TSK_ENTRY_FUNC)entry;
- taskParam->uwStackSize = size;
+ if (OsProcessIsUserMode(childProcessCB)) {//用户态进程
+ taskParam->pfnTaskEntry = runTask->taskEntry;//拷贝当前任务入口地址
+ taskParam->uwStackSize = runTask->stackSize; //栈空间大小
+ taskParam->userParam.userArea = runTask->userArea; //用户态栈区栈顶位置
+ taskParam->userParam.userMapBase = runTask->userMapBase; //用户态栈底
+ taskParam->userParam.userMapSize = runTask->userMapSize; //用户态栈大小
+ } else {//注意内核态进程创建任务的入口由外界指定,例如 OsCreateIdleProcess 指定了OsIdleTask
+ taskParam->pfnTaskEntry = (TSK_ENTRY_FUNC)entry;//参数(sp)为内核态入口地址
+ taskParam->uwStackSize = size;//参数(size)为内核态栈大小
}
if (runTask->taskStatus & OS_TASK_FLAG_PTHREAD_JOIN) {
taskParam->uwResved = LOS_TASK_ATTR_JOINABLE;
@@ -1987,25 +2053,25 @@ STATIC UINT32 OsCopyTask(UINT32 flags, LosProcessCB *childProcessCB, const CHAR
}
LosTaskCB *childTaskCB = childProcessCB->threadGroup;
- childTaskCB->taskStatus = runTask->taskStatus;
+ childTaskCB->taskStatus = runTask->taskStatus;//任务状态先同步,注意这里是赋值操作. ...01101001
childTaskCB->ops->schedParamModify(childTaskCB, ¶m);
- if (childTaskCB->taskStatus & OS_TASK_STATUS_RUNNING) {
- childTaskCB->taskStatus &= ~OS_TASK_STATUS_RUNNING;
- } else {
- if (OS_SCHEDULER_ACTIVE) {
+ if (childTaskCB->taskStatus & OS_TASK_STATUS_RUNNING) {//因只能有一个运行的task,所以如果一样要改4号位
+ childTaskCB->taskStatus &= ~OS_TASK_STATUS_RUNNING;//将四号位清0 ,变成 ...01100001
+ } else {//非运行状态下会发生什么?
+ if (OS_SCHEDULER_ACTIVE) {//克隆线程发生错误未运行
LOS_Panic("Clone thread status not running error status: 0x%x\n", childTaskCB->taskStatus);
}
- childTaskCB->taskStatus &= ~OS_TASK_STATUS_UNUSED;
+ childTaskCB->taskStatus &= ~OS_TASK_STATUS_UNUSED;//干净的Task
}
- if (OsProcessIsUserMode(childProcessCB)) {
+ if (OsProcessIsUserMode(childProcessCB)) {//是否是用户进程
SCHEDULER_LOCK(intSave);
OsUserCloneParentStack(childTaskCB->stackPointer, entry, runTask->topOfStack, runTask->stackSize);
SCHEDULER_UNLOCK(intSave);
}
return LOS_OK;
}
-
+//拷贝父亲大人的遗传基因信息
STATIC UINT32 OsCopyParent(UINT32 flags, LosProcessCB *childProcessCB, LosProcessCB *runProcessCB)
{
UINT32 intSave;
@@ -2013,15 +2079,15 @@ STATIC UINT32 OsCopyParent(UINT32 flags, LosProcessCB *childProcessCB, LosProces
SCHEDULER_LOCK(intSave);
if (childProcessCB->parentProcess == NULL) {
- if (flags & CLONE_PARENT) {
- parentProcessCB = runProcessCB->parentProcess;
- } else {
- parentProcessCB = runProcessCB;
- }
- childProcessCB->parentProcess = parentProcessCB;
- LOS_ListTailInsert(&parentProcessCB->childrenList, &childProcessCB->siblingList);
+ if (flags & CLONE_PARENT) { //这里指明 childProcessCB 和 runProcessCB 有同一个父亲,是兄弟关系
+ parentProcessCB = runProcessCB->parentProcess;
+ } else {
+ parentProcessCB = runProcessCB;
+ }
+ childProcessCB->parentProcess = parentProcessCB;//指认父亲,这个赋值代表从此是你儿了
+ LOS_ListTailInsert(&parentProcessCB->childrenList, &childProcessCB->siblingList);//通过我的兄弟姐妹节点,挂到父亲的孩子链表上,于我而言,父亲的这个链表上挂的都是我的兄弟姐妹
+ //不会被排序,老大,老二,老三 老天爷指定了。
}
-
if (childProcessCB->pgroup == NULL) {
childProcessCB->pgroup = parentProcessCB->pgroup;
LOS_ListTailInsert(&parentProcessCB->pgroup->processList, &childProcessCB->subordinateGroupList);
@@ -2029,31 +2095,31 @@ STATIC UINT32 OsCopyParent(UINT32 flags, LosProcessCB *childProcessCB, LosProces
SCHEDULER_UNLOCK(intSave);
return LOS_OK;
}
-
+//拷贝虚拟空间
STATIC UINT32 OsCopyMM(UINT32 flags, LosProcessCB *childProcessCB, LosProcessCB *runProcessCB)
{
status_t status;
UINT32 intSave;
- if (!OsProcessIsUserMode(childProcessCB)) {
+ if (!OsProcessIsUserMode(childProcessCB)) {//不是用户模式,直接返回,什么意思?内核虚拟空间只有一个,无需COPY !!!
return LOS_OK;
}
- if (flags & CLONE_VM) {
+ if (flags & CLONE_VM) {//贴有虚拟内存的标签
SCHEDULER_LOCK(intSave);
- childProcessCB->vmSpace->archMmu.virtTtb = runProcessCB->vmSpace->archMmu.virtTtb;
- childProcessCB->vmSpace->archMmu.physTtb = runProcessCB->vmSpace->archMmu.physTtb;
+ childProcessCB->vmSpace->archMmu.virtTtb = runProcessCB->vmSpace->archMmu.virtTtb;//TTB虚拟地址基地址,即L1表存放位置,virtTtb是个指针,进程的虚拟空间是指定的范围的
+ childProcessCB->vmSpace->archMmu.physTtb = runProcessCB->vmSpace->archMmu.physTtb;//TTB物理地址基地址,physTtb是个值,取决于运行时映射到物理内存的具体哪个位置.
SCHEDULER_UNLOCK(intSave);
return LOS_OK;
}
- status = LOS_VmSpaceClone(flags, runProcessCB->vmSpace, childProcessCB->vmSpace);
+ status = LOS_VmSpaceClone(flags, runProcessCB->vmSpace, childProcessCB->vmSpace);//虚拟空间clone
if (status != LOS_OK) {
return LOS_ENOMEM;
}
return LOS_OK;
}
-
+/// 拷贝进程文件描述符(proc_fd)信息
STATIC UINT32 OsCopyFile(UINT32 flags, LosProcessCB *childProcessCB, LosProcessCB *runProcessCB)
{
#ifdef LOSCFG_FS_VFS
@@ -2083,7 +2149,7 @@ STATIC UINT32 OsCopyFile(UINT32 flags, LosProcessCB *childProcessCB, LosProcessC
#endif
#endif
- childProcessCB->consoleID = runProcessCB->consoleID;
+ childProcessCB->consoleID = runProcessCB->consoleID;//控制台也是文件
childProcessCB->umask = runProcessCB->umask;
return LOS_OK;
}
@@ -2091,16 +2157,16 @@ STATIC UINT32 OsCopyFile(UINT32 flags, LosProcessCB *childProcessCB, LosProcessC
STATIC UINT32 OsForkInitPCB(UINT32 flags, LosProcessCB *child, const CHAR *name, UINTPTR sp, UINT32 size)
{
UINT32 ret;
- LosProcessCB *run = OsCurrProcessGet();
+ LosProcessCB *run = OsCurrProcessGet();//获取当前进程
- ret = OsCopyParent(flags, child, run);
+ ret = OsCopyParent(flags, child, run);//拷贝父亲大人的基因信息
if (ret != LOS_OK) {
return ret;
}
- return OsCopyTask(flags, child, name, sp, size);
+ return OsCopyTask(flags, child, name, sp, size);//拷贝任务,设置任务入口函数,栈大小
}
-
+//设置进程组和加入进程调度就绪队列
STATIC UINT32 OsChildSetProcessGroupAndSched(LosProcessCB *child, LosProcessCB *run)
{
UINT32 intSave;
@@ -2124,7 +2190,7 @@ STATIC UINT32 OsChildSetProcessGroupAndSched(LosProcessCB *child, LosProcessCB *
(VOID)LOS_MemFree(m_aucSysMem1, pgroup);
return LOS_OK;
}
-
+/// 拷贝进程资源
STATIC UINT32 OsCopyProcessResources(UINT32 flags, LosProcessCB *child, LosProcessCB *run)
{
UINT32 ret;
@@ -2134,37 +2200,38 @@ STATIC UINT32 OsCopyProcessResources(UINT32 flags, LosProcessCB *child, LosProce
return ret;
}
- ret = OsCopyMM(flags, child, run);
+ ret = OsCopyMM(flags, child, run);//拷贝虚拟空间
if (ret != LOS_OK) {
return ret;
}
- ret = OsCopyFile(flags, child, run);
+ ret = OsCopyFile(flags, child, run);//拷贝文件信息
if (ret != LOS_OK) {
return ret;
}
#ifdef LOSCFG_KERNEL_LITEIPC
- if (run->ipcInfo != NULL) {
- child->ipcInfo = LiteIpcPoolReInit((const ProcIpcInfo *)(run->ipcInfo));
- if (child->ipcInfo == NULL) {
- return LOS_ENOMEM;
+ if (run->ipcInfo != NULL) { //重新初始化IPC池
+ child->ipcInfo = LiteIpcPoolReInit((const ProcIpcInfo *)(run->ipcInfo));//@note_good 将沿用用户态空间地址(即线性区地址)
+ if (child->ipcInfo == NULL) {//因为整个进程虚拟空间都是拷贝的,ipc的用户态虚拟地址当然可以拷贝,但因进程不同了,所以需要重新申请ipc池和重新
+ return LOS_ENOMEM;//映射池中两个地址.
}
}
#endif
#ifdef LOSCFG_SECURITY_CAPABILITY
- OsCopyCapability(run, child);
+ OsCopyCapability(run, child);//拷贝安全能力
#endif
+
return LOS_OK;
}
-
+/// 拷贝进程
STATIC INT32 OsCopyProcess(UINT32 flags, const CHAR *name, UINTPTR sp, UINT32 size)
{
UINT32 ret, processID;
- LosProcessCB *run = OsCurrProcessGet();
+ LosProcessCB *run = OsCurrProcessGet();//获取当前进程
- LosProcessCB *child = OsGetFreePCB();
+ LosProcessCB *child = OsGetFreePCB();//从进程池中申请一个进程控制块,鸿蒙进程池默认64
if (child == NULL) {
return -LOS_EAGAIN;
}
@@ -2180,7 +2247,6 @@ STATIC INT32 OsCopyProcess(UINT32 flags, const CHAR *name, UINTPTR sp, UINT32 si
if (ret != LOS_OK) {
goto ERROR_INIT;
}
-
#ifdef LOSCFG_KERNEL_PLIMITS
ret = OsPLimitsAddProcess(run->plimits, child);
if (ret != LOS_OK) {
@@ -2188,25 +2254,24 @@ STATIC INT32 OsCopyProcess(UINT32 flags, const CHAR *name, UINTPTR sp, UINT32 si
}
#endif
#endif
-
- ret = OsForkInitPCB(flags, child, name, sp, size);
+ ret = OsForkInitPCB(flags, child, name, sp, size);//初始化进程控制块
if (ret != LOS_OK) {
goto ERROR_INIT;
}
- ret = OsCopyProcessResources(flags, child, run);
+ ret = OsCopyProcessResources(flags, child, run);//拷贝进程的资源,包括虚拟空间,文件,安全,IPC ==
if (ret != LOS_OK) {
goto ERROR_TASK;
}
- ret = OsChildSetProcessGroupAndSched(child, run);
+ ret = OsChildSetProcessGroupAndSched(child, run);//设置进程组和加入进程调度就绪队列
if (ret != LOS_OK) {
goto ERROR_TASK;
}
- LOS_MpSchedule(OS_MP_CPU_ALL);
- if (OS_SCHEDULER_ACTIVE) {
- LOS_Schedule();
+ LOS_MpSchedule(OS_MP_CPU_ALL);//给各CPU发送准备接受调度信号
+ if (OS_SCHEDULER_ACTIVE) {//当前CPU core处于活动状态
+ LOS_Schedule();// 申请调度
}
return processID;
@@ -2218,6 +2283,16 @@ ERROR_INIT:
return -ret;
}
+/*!
+ * @brief OsClone 进程克隆
+ *
+ * @param flags
+ * @param size 进程主任务内核栈大小
+ * @param sp 进程主任务的入口函数
+ * @return
+ *
+ * @see
+ */
LITE_OS_SEC_TEXT INT32 OsClone(UINT32 flags, UINTPTR sp, UINT32 size)
{
UINT32 cloneFlag = CLONE_PARENT | CLONE_THREAD | SIGCHLD;
@@ -2266,7 +2341,7 @@ LITE_OS_SEC_TEXT INT32 OsClone(UINT32 flags, UINTPTR sp, UINT32 size)
return OsCopyProcess(cloneFlag & flags, NULL, sp, size);
}
-
+//著名的 fork 函数 记得前往 https://gitee.com/weharmony/kernel_liteos_a_note fork一下 :)
LITE_OS_SEC_TEXT INT32 LOS_Fork(UINT32 flags, const CHAR *name, const TSK_ENTRY_FUNC entry, UINT32 stackSize)
{
UINT32 cloneFlag = CLONE_PARENT | CLONE_THREAD | CLONE_VFORK | CLONE_FILES;
@@ -2276,7 +2351,7 @@ LITE_OS_SEC_TEXT INT32 LOS_Fork(UINT32 flags, const CHAR *name, const TSK_ENTRY_
}
flags |= CLONE_FILES;
- return OsCopyProcess(cloneFlag & flags, name, (UINTPTR)entry, stackSize);
+ return OsCopyProcess(cloneFlag & flags, name, (UINTPTR)entry, stackSize);//拷贝一个进程
}
#else
LITE_OS_SEC_TEXT_INIT UINT32 OsUserInitProcess(VOID)
@@ -2285,16 +2360,23 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsUserInitProcess(VOID)
}
#endif
+/*!
+ * @brief LOS_Exit
+ * 进程退出
+ * @param status
+ * @return
+ *
+ * @see
+ */
LITE_OS_SEC_TEXT VOID LOS_Exit(INT32 status)
{
UINT32 intSave;
(void)status;
-
/* The exit of a kernel - state process must be kernel - state and all threads must actively exit */
LosProcessCB *processCB = OsCurrProcessGet();
SCHEDULER_LOCK(intSave);
- if (!OsProcessIsUserMode(processCB) && (processCB->threadNumber != 1)) {
+ if (!OsProcessIsUserMode(processCB) && (processCB->threadNumber != 1)) {//内核态下进程的退出方式,必须是所有的任务都退出了
SCHEDULER_UNLOCK(intSave);
PRINT_ERR("Kernel-state processes with multiple threads are not allowed to exit directly\n");
return;
@@ -2305,6 +2387,16 @@ LITE_OS_SEC_TEXT VOID LOS_Exit(INT32 status)
OsRunningTaskToExit(OsCurrTaskGet(), OS_PRO_EXIT_OK);
}
+
+/*!
+ * @brief LOS_GetUsedPIDList
+ * 获取使用中的进程列表
+ * @param pidList
+ * @param pidMaxNum
+ * @return
+ *
+ * @see
+ */
LITE_OS_SEC_TEXT INT32 LOS_GetUsedPIDList(UINT32 *pidList, INT32 pidMaxNum)
{
LosProcessCB *pcb = NULL;
@@ -2316,13 +2408,13 @@ LITE_OS_SEC_TEXT INT32 LOS_GetUsedPIDList(UINT32 *pidList, INT32 pidMaxNum)
return 0;
}
SCHEDULER_LOCK(intSave);
- while (OsProcessIDUserCheckInvalid(pid) == false) {
+ while (OsProcessIDUserCheckInvalid(pid) == false) {//遍历进程池
pcb = OS_PCB_FROM_PID(pid);
pid++;
- if (OsProcessIsUnused(pcb)) {
+ if (OsProcessIsUnused(pcb)) {//未使用的不算
continue;
}
- pidList[num] = pcb->processID;
+ pidList[num] = pcb->processID;//由参数带走
num++;
if (num >= pidMaxNum) {
break;
@@ -2348,12 +2440,12 @@ LITE_OS_SEC_TEXT struct fd_table_s *LOS_GetFdTable(UINT32 pid)
return files->fdt;
}
#endif
-
+/// 获取当前进程的进程ID
LITE_OS_SEC_TEXT UINT32 LOS_GetCurrProcessID(VOID)
{
return OsCurrProcessGet()->processID;
}
-
+/// 按指定状态退出指定进程
#ifdef LOSCFG_KERNEL_VM
STATIC VOID ThreadGroupActiveTaskKilled(LosTaskCB *taskCB)
{
@@ -2426,12 +2518,12 @@ LITE_OS_SEC_TEXT VOID OsProcessThreadGroupDestroy(VOID)
#endif
return;
}
-
+/// 获取系统支持的最大进程数目
LITE_OS_SEC_TEXT UINT32 LOS_GetSystemProcessMaximum(VOID)
{
return g_processMaxNum;
}
-
+/// 获取用户态进程的根进程,所有用户进程都是g_processCBArray[g_userInitProcess] fork来的
LITE_OS_SEC_TEXT LosProcessCB *OsGetUserInitProcess(VOID)
{
return &g_processCBArray[OS_USER_ROOT_PROCESS_ID];
@@ -2441,7 +2533,7 @@ LITE_OS_SEC_TEXT LosProcessCB *OsGetKernelInitProcess(VOID)
{
return &g_processCBArray[OS_KERNEL_ROOT_PROCESS_ID];
}
-
+/// 获取空闲进程,0号进程为空闲进程,该进程不干活,专给CPU休息的。
LITE_OS_SEC_TEXT LosProcessCB *OsGetIdleProcess(VOID)
{
return &g_processCBArray[OS_KERNEL_IDLE_PROCESS_ID];
diff --git a/src/kernel_liteos_a/kernel/base/core/los_smp.c b/src/kernel_liteos_a/kernel/base/core/los_smp.c
index bbeb9c22..abab4853 100644
--- a/src/kernel_liteos_a/kernel/base/core/los_smp.c
+++ b/src/kernel_liteos_a/kernel/base/core/los_smp.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
- * Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -40,7 +40,7 @@
#ifdef LOSCFG_KERNEL_SMP
STATIC struct SmpOps *g_smpOps = NULL;
-
+/// 多核中次级CPU核初始化,每个核都会调用一次
STATIC VOID OsSmpSecondaryInit(VOID *arg)
{
UNUSED(arg);
@@ -56,7 +56,7 @@ STATIC VOID OsSmpSecondaryInit(VOID *arg)
OsSchedStart();
}
-
+/// 设置多核操作接口, 通过外部注册
VOID LOS_SmpOpsSet(struct SmpOps *ops)
{
g_smpOps = ops;
diff --git a/src/kernel_liteos_a/kernel/base/core/los_swtmr.c b/src/kernel_liteos_a/kernel/base/core/los_swtmr.c
index 285753c3..2610e582 100644
--- a/src/kernel_liteos_a/kernel/base/core/los_swtmr.c
+++ b/src/kernel_liteos_a/kernel/base/core/los_swtmr.c
@@ -1,6 +1,70 @@
+/*!
+* @file los_swtmr.c
+* @brief 软定时器主文件
+* @details
+* @attention @verbatim
+基本概念
+ 软件定时器,是基于系统Tick时钟中断且由软件来模拟的定时器。当经过设定的Tick数后,会触发用户自定义的回调函数。
+ 硬件定时器受硬件的限制,数量上不足以满足用户的实际需求。因此为了满足用户需求,提供更多的定时器,
+ 软件定时器功能,支持如下特性:
+ 创建软件定时器。
+ 启动软件定时器。
+ 停止软件定时器。
+ 删除软件定时器。
+ 获取软件定时器剩余Tick数。
+ 可配置支持的软件定时器个数。
+
+ 运作机制
+ 软件定时器是系统资源,在模块初始化的时候已经分配了一块连续内存。
+ 软件定时器使用了系统的一个队列和一个任务资源,软件定时器的触发遵循队列规则,
+ 先进先出。定时时间短的定时器总是比定时时间长的靠近队列头,满足优先触发的准则。
+ 软件定时器以Tick为基本计时单位,当创建并启动一个软件定时器时,Huawei LiteOS会根据
+ 当前系统Tick时间及设置的定时时长确定该定时器的到期Tick时间,并将该定时器控制结构挂入计时全局链表。
+ 当Tick中断到来时,在Tick中断处理函数中扫描软件定时器的计时全局链表,检查是否有定时器超时,
+ 若有则将超时的定时器记录下来。Tick中断处理函数结束后,软件定时器任务(优先级为最高)
+ 被唤醒,在该任务中调用已经记录下来的定时器的回调函数。
+
+ 定时器状态
+ OS_SWTMR_STATUS_UNUSED(定时器未使用)
+ 系统在定时器模块初始化时,会将系统中所有定时器资源初始化成该状态。
+
+ OS_SWTMR_STATUS_TICKING(定时器处于计数状态)
+ 在定时器创建后调用LOS_SwtmrStart接口启动,定时器将变成该状态,是定时器运行时的状态。
+
+ OS_SWTMR_STATUS_CREATED(定时器创建后未启动,或已停止)
+ 定时器创建后,不处于计数状态时,定时器将变成该状态。
+
+ 软件定时器提供了三类模式:
+ 单次触发定时器,这类定时器在启动后只会触发一次定时器事件,然后定时器自动删除。
+ 周期触发定时器,这类定时器会周期性的触发定时器事件,直到用户手动停止定时器,否则将永远持续执行下去。
+ 单次触发定时器,但这类定时器超时触发后不会自动删除,需要调用定时器删除接口删除定时器。
+
+ 使用场景
+ 创建一个单次触发的定时器,超时后执行用户自定义的回调函数。
+ 创建一个周期性触发的定时器,超时后执行用户自定义的回调函数。
+
+ 软件定时器的典型开发流程
+ 通过make menuconfig配置软件定时器
+ 创建定时器LOS_SwtmrCreate,设置定时器的定时时长、定时器模式、超时后的回调函数。
+ 启动定时器LOS_SwtmrStart。
+ 获得软件定时器剩余Tick数LOS_SwtmrTimeGet。
+ 停止定时器LOS_SwtmrStop。
+ 删除定时器LOS_SwtmrDelete。
+
+ 注意事项
+ 软件定时器的回调函数中不应执行过多操作,不建议使用可能引起任务挂起或者阻塞的接口或操作,
+ 如果使用会导致软件定时器响应不及时,造成的影响无法确定。
+ 软件定时器使用了系统的一个队列和一个任务资源。软件定时器任务的优先级设定为0,且不允许修改 。
+ 系统可配置的软件定时器个数是指:整个系统可使用的软件定时器总个数,并非用户可使用的软件定时器个数。
+ 例如:系统多占用一个软件定时器,那么用户能使用的软件定时器资源就会减少一个。
+ 创建单次不自删除属性的定时器,用户需要自行调用定时器删除接口删除定时器,回收定时器资源,避免资源泄露。
+ 软件定时器的定时精度与系统Tick时钟的周期有关。
+ @endverbatim
+*/
+
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
- * Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -45,20 +109,19 @@
STATIC INLINE VOID SwtmrDelete(SWTMR_CTRL_S *swtmr);
STATIC INLINE UINT64 SwtmrToStart(SWTMR_CTRL_S *swtmr, UINT16 cpuid);
-
-LITE_OS_SEC_BSS SWTMR_CTRL_S *g_swtmrCBArray = NULL; /* First address in Timer memory space */
-LITE_OS_SEC_BSS UINT8 *g_swtmrHandlerPool = NULL; /* Pool of Swtmr Handler */
-LITE_OS_SEC_BSS LOS_DL_LIST g_swtmrFreeList; /* Free list of Software Timer */
+LITE_OS_SEC_BSS SWTMR_CTRL_S *g_swtmrCBArray = NULL; /**< First address in Timer memory space \n 定时器池 */
+LITE_OS_SEC_BSS UINT8 *g_swtmrHandlerPool = NULL; /**< Pool of Swtmr Handler \n 用于注册软时钟的回调函数 */
+LITE_OS_SEC_BSS LOS_DL_LIST g_swtmrFreeList; /**< Free list of Software Timer \n 空闲定时器链表 */
/* spinlock for swtmr module, only available on SMP mode */
-LITE_OS_SEC_BSS SPIN_LOCK_INIT(g_swtmrSpin);
-#define SWTMR_LOCK(state) LOS_SpinLockSave(&g_swtmrSpin, &(state))
-#define SWTMR_UNLOCK(state) LOS_SpinUnlockRestore(&g_swtmrSpin, (state))
+LITE_OS_SEC_BSS SPIN_LOCK_INIT(g_swtmrSpin);///< 初始化软时钟自旋锁,只有SMP情况才需要,只要是自旋锁都是用于CPU多核的同步
+#define SWTMR_LOCK(state) LOS_SpinLockSave(&g_swtmrSpin, &(state))///< 持有软时钟自旋锁
+#define SWTMR_UNLOCK(state) LOS_SpinUnlockRestore(&g_swtmrSpin, (state))///< 释放软时钟自旋锁
typedef struct {
SortLinkAttribute swtmrSortLink;
- LosTaskCB *swtmrTask; /* software timer task id */
- LOS_DL_LIST swtmrHandlerQueue; /* software timer timeout queue id */
+ LosTaskCB *swtmrTask; /* software timer task id | 定时器任务ID */
+ LOS_DL_LIST swtmrHandlerQueue; /* software timer timeout queue id | 定时器超时队列*/
} SwtmrRunqueue;
STATIC SwtmrRunqueue g_swtmrRunqueue[LOSCFG_KERNEL_CORE_NUM];
@@ -254,7 +317,11 @@ STATIC INLINE VOID ScanSwtmrTimeList(SwtmrRunqueue *srq)
LOS_SpinUnlockRestore(&swtmrSortLink->spinLock, intSave);
return;
}
-
+/**
+ * @brief 软时钟的入口函数,拥有任务的最高优先级 0 级!
+ *
+ * @return LITE_OS_SEC_TEXT
+ */
STATIC VOID SwtmrTask(VOID)
{
SwtmrHandlerItem swtmrHandle;
@@ -263,7 +330,7 @@ STATIC VOID SwtmrTask(VOID)
SwtmrRunqueue *srq = &g_swtmrRunqueue[ArchCurrCpuid()];
LOS_DL_LIST *head = &srq->swtmrHandlerQueue;
- for (;;) {
+ for (;;) {//死循环获取队列item,一直读干净为止
waitTime = OsSortLinkGetNextExpireTime(OsGetCurrSchedTimeCycle(), &srq->swtmrSortLink);
if (waitTime != 0) {
SCHEDULER_LOCK(intSave);
@@ -279,29 +346,30 @@ STATIC VOID SwtmrTask(VOID)
LOS_ListDelete(&swtmrHandlePtr->node);
(VOID)memcpy_s(&swtmrHandle, sizeof(SwtmrHandlerItem), swtmrHandlePtr, sizeof(SwtmrHandlerItem));
- (VOID)LOS_MemboxFree(g_swtmrHandlerPool, swtmrHandlePtr);
+ (VOID)LOS_MemboxFree(g_swtmrHandlerPool, swtmrHandlePtr);//静态释放内存,注意在鸿蒙内核只有软时钟注册用到了静态内存
SwtmrHandler(&swtmrHandle);
+ }
}
}
-}
+///创建软时钟任务,每个cpu core都可以拥有自己的软时钟任务
STATIC UINT32 SwtmrTaskCreate(UINT16 cpuid, UINT32 *swtmrTaskID)
{
UINT32 ret;
TSK_INIT_PARAM_S swtmrTask;
- (VOID)memset_s(&swtmrTask, sizeof(TSK_INIT_PARAM_S), 0, sizeof(TSK_INIT_PARAM_S));
- swtmrTask.pfnTaskEntry = (TSK_ENTRY_FUNC)SwtmrTask;
- swtmrTask.uwStackSize = LOSCFG_BASE_CORE_TSK_DEFAULT_STACK_SIZE;
- swtmrTask.pcName = "Swt_Task";
- swtmrTask.usTaskPrio = 0;
- swtmrTask.uwResved = LOS_TASK_STATUS_DETACHED;
+ (VOID)memset_s(&swtmrTask, sizeof(TSK_INIT_PARAM_S), 0, sizeof(TSK_INIT_PARAM_S));//清0
+ swtmrTask.pfnTaskEntry = (TSK_ENTRY_FUNC)SwtmrTask;//入口函数
+ swtmrTask.uwStackSize = LOSCFG_BASE_CORE_TSK_DEFAULT_STACK_SIZE;//16K默认内核任务栈
+ swtmrTask.pcName = "Swt_Task";//任务名称
+ swtmrTask.usTaskPrio = 0;//哇塞! 逮到一个最高优先级的任务 @note_thinking 这里应该用 OS_TASK_PRIORITY_HIGHEST 表示
+ swtmrTask.uwResved = LOS_TASK_STATUS_DETACHED;//分离模式
#ifdef LOSCFG_KERNEL_SMP
- swtmrTask.usCpuAffiMask = CPUID_TO_AFFI_MASK(cpuid);
+ swtmrTask.usCpuAffiMask = CPUID_TO_AFFI_MASK(cpuid);//交给当前CPU执行这个任务
#endif
- ret = LOS_TaskCreate(swtmrTaskID, &swtmrTask);
+ ret = LOS_TaskCreate(swtmrTaskID, &swtmrTask);//创建任务并申请调度
if (ret == LOS_OK) {
- OS_TCB_FROM_TID(*swtmrTaskID)->taskStatus |= OS_TASK_FLAG_SYSTEM_TASK;
+ OS_TCB_FROM_TID(*swtmrTaskID)->taskStatus |= OS_TASK_FLAG_SYSTEM_TASK;//告知这是一个系统任务
}
return ret;
@@ -319,16 +387,16 @@ BOOL OsIsSwtmrTask(const LosTaskCB *taskCB)
}
return FALSE;
}
-
+///回收指定进程的软时钟
LITE_OS_SEC_TEXT_INIT VOID OsSwtmrRecycle(UINTPTR ownerID)
{
- for (UINT16 index = 0; index < LOSCFG_BASE_CORE_SWTMR_LIMIT; index++) {
+ for (UINT16 index = 0; index < LOSCFG_BASE_CORE_SWTMR_LIMIT; index++) {//一个进程往往会有多个定时器
if (g_swtmrCBArray[index].uwOwnerPid == ownerID) {
- LOS_SwtmrDelete(index);
+ LOS_SwtmrDelete(index);//删除定时器
}
}
}
-
+///软时钟初始化 ,注意函数在多CPU情况下会执行多次
STATIC UINT32 SwtmrBaseInit(VOID)
{
UINT32 ret;
@@ -338,15 +406,15 @@ STATIC UINT32 SwtmrBaseInit(VOID)
return LOS_ERRNO_SWTMR_NO_MEMORY;
}
- (VOID)memset_s(swtmr, size, 0, size);
- g_swtmrCBArray = swtmr;
- LOS_ListInit(&g_swtmrFreeList);
+ (VOID)memset_s(swtmr, size, 0, size);//清0
+ g_swtmrCBArray = swtmr;//软时钟
+ LOS_ListInit(&g_swtmrFreeList);//初始化空闲链表
for (UINT16 index = 0; index < LOSCFG_BASE_CORE_SWTMR_LIMIT; index++, swtmr++) {
- swtmr->usTimerID = index;
- LOS_ListTailInsert(&g_swtmrFreeList, &swtmr->stSortList.sortLinkNode);
+ swtmr->usTimerID = index;//按顺序赋值
+ LOS_ListTailInsert(&g_swtmrFreeList, &swtmr->stSortList.sortLinkNode);//通过sortLinkNode将节点挂到空闲链表
}
-
- size = LOS_MEMBOX_SIZE(sizeof(SwtmrHandlerItem), OS_SWTMR_HANDLE_QUEUE_SIZE);
+ //想要用静态内存池管理,就必须要使用LOS_MEMBOX_SIZE来计算申请的内存大小,因为需要点前缀内存承载头部信息.
+ size = LOS_MEMBOX_SIZE(sizeof(SwtmrHandlerItem), OS_SWTMR_HANDLE_QUEUE_SIZE);//规划一片内存区域作为软时钟处理函数的静态内存池。
g_swtmrHandlerPool = (UINT8 *)LOS_MemAlloc(m_aucSysMem1, size); /* system resident resource */
if (g_swtmrHandlerPool == NULL) {
return LOS_ERRNO_SWTMR_NO_MEMORY;
@@ -474,12 +542,12 @@ STATIC UINT64 SwtmrToStart(SWTMR_CTRL_S *swtmr, UINT16 cpuid)
if ((swtmr->uwOverrun == 0) && ((swtmr->ucMode == LOS_SWTMR_MODE_ONCE) ||
(swtmr->ucMode == LOS_SWTMR_MODE_OPP) ||
- (swtmr->ucMode == LOS_SWTMR_MODE_NO_SELFDELETE))) {
- ticks = swtmr->uwExpiry;
+ (swtmr->ucMode == LOS_SWTMR_MODE_NO_SELFDELETE))) {//如果是一次性的定时器
+ ticks = swtmr->uwExpiry; //获取时间间隔
} else {
- ticks = swtmr->uwInterval;
+ ticks = swtmr->uwInterval;//获取周期性定时器时间间隔
}
- swtmr->ucState = OS_SWTMR_STATUS_TICKING;
+ swtmr->ucState = OS_SWTMR_STATUS_TICKING;//计数状态
UINT64 period = (UINT64)ticks * OS_CYCLE_PER_TICK;
UINT64 responseTime = swtmr->startTime + period;
@@ -522,10 +590,9 @@ STATIC INLINE VOID SwtmrStart(SWTMR_CTRL_S *swtmr)
STATIC INLINE VOID SwtmrDelete(SWTMR_CTRL_S *swtmr)
{
/* insert to free list */
- LOS_ListTailInsert(&g_swtmrFreeList, &swtmr->stSortList.sortLinkNode);
- swtmr->ucState = OS_SWTMR_STATUS_UNUSED;
+ LOS_ListTailInsert(&g_swtmrFreeList, &swtmr->stSortList.sortLinkNode);//直接插入空闲链表中,回收再利用
+ swtmr->ucState = OS_SWTMR_STATUS_UNUSED;//又干净着呢
swtmr->uwOwnerPid = OS_INVALID_VALUE;
-
SwtmrDebugDataClear(swtmr->usTimerID);
}
@@ -641,7 +708,7 @@ LITE_OS_SEC_TEXT STATIC UINT32 OsSwtmrTimeGet(const SWTMR_CTRL_S *swtmr)
}
return (UINT32)time;
}
-
+///创建定时器,设置定时器的定时时长、定时器模式、回调函数,并返回定时器ID
LITE_OS_SEC_TEXT_INIT UINT32 LOS_SwtmrCreate(UINT32 interval,
UINT8 mode,
SWTMR_PROC_FUNC handler,
@@ -670,30 +737,30 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_SwtmrCreate(UINT32 interval,
}
SWTMR_LOCK(intSave);
- if (LOS_ListEmpty(&g_swtmrFreeList)) {
+ if (LOS_ListEmpty(&g_swtmrFreeList)) {//空闲链表不能为空
SWTMR_UNLOCK(intSave);
return LOS_ERRNO_SWTMR_MAXSIZE;
}
sortList = LOS_DL_LIST_ENTRY(g_swtmrFreeList.pstNext, SortLinkList, sortLinkNode);
swtmr = LOS_DL_LIST_ENTRY(sortList, SWTMR_CTRL_S, stSortList);
- LOS_ListDelete(LOS_DL_LIST_FIRST(&g_swtmrFreeList));
+ LOS_ListDelete(LOS_DL_LIST_FIRST(&g_swtmrFreeList));//
SWTMR_UNLOCK(intSave);
swtmr->uwOwnerPid = (UINTPTR)OsCurrProcessGet();
- swtmr->pfnHandler = handler;
- swtmr->ucMode = mode;
+ swtmr->pfnHandler = handler;//时间到了的回调函数
+ swtmr->ucMode = mode; //定时器模式
swtmr->uwOverrun = 0;
- swtmr->uwInterval = interval;
- swtmr->uwExpiry = interval;
- swtmr->uwArg = arg;
- swtmr->ucState = OS_SWTMR_STATUS_CREATED;
+ swtmr->uwInterval = interval; //周期性超时间隔
+ swtmr->uwExpiry = interval; //一次性超时间隔
+ swtmr->uwArg = arg; //回调函数的参数
+ swtmr->ucState = OS_SWTMR_STATUS_CREATED; //已创建状态
SET_SORTLIST_VALUE(&swtmr->stSortList, OS_SORT_LINK_INVALID_TIME);
*swtmrID = swtmr->usTimerID;
OsHookCall(LOS_HOOK_TYPE_SWTMR_CREATE, swtmr);
return LOS_OK;
}
-
+///接口函数 启动定时器 参数定时任务ID
LITE_OS_SEC_TEXT UINT32 LOS_SwtmrStart(UINT16 swtmrID)
{
SWTMR_CTRL_S *swtmr = NULL;
@@ -705,27 +772,27 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrStart(UINT16 swtmrID)
return LOS_ERRNO_SWTMR_ID_INVALID;
}
- swtmrCBID = swtmrID % LOSCFG_BASE_CORE_SWTMR_LIMIT;
- swtmr = g_swtmrCBArray + swtmrCBID;
+ swtmrCBID = swtmrID % LOSCFG_BASE_CORE_SWTMR_LIMIT;//取模
+ swtmr = g_swtmrCBArray + swtmrCBID;//获取定时器控制结构体
SWTMR_LOCK(intSave);
- if (swtmr->usTimerID != swtmrID) {
+ if (swtmr->usTimerID != swtmrID) {//ID必须一样
SWTMR_UNLOCK(intSave);
return LOS_ERRNO_SWTMR_ID_INVALID;
}
- switch (swtmr->ucState) {
+ switch (swtmr->ucState) {//判断定时器状态
case OS_SWTMR_STATUS_UNUSED:
ret = LOS_ERRNO_SWTMR_NOT_CREATED;
break;
- /*
+ /* 如果定时器的状态为启动中,应先停止定时器再重新启动
* If the status of swtmr is timing, it should stop the swtmr first,
* then start the swtmr again.
*/
- case OS_SWTMR_STATUS_TICKING:
- SwtmrStop(swtmr);
+ case OS_SWTMR_STATUS_TICKING://正在计数的定时器
+ SwtmrStop(swtmr);//先停止定时器,注意这里没有break;,在OsSwtmrStop中状态将会回到了OS_SWTMR_STATUS_CREATED 接下来就是执行启动了
/* fall-through */
- case OS_SWTMR_STATUS_CREATED:
+ case OS_SWTMR_STATUS_CREATED://已经创建好了
SwtmrStart(swtmr);
break;
default:
@@ -737,7 +804,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrStart(UINT16 swtmrID)
OsHookCall(LOS_HOOK_TYPE_SWTMR_START, swtmr);
return ret;
}
-
+///接口函数 停止定时器 参数定时任务ID
LITE_OS_SEC_TEXT UINT32 LOS_SwtmrStop(UINT16 swtmrID)
{
SWTMR_CTRL_S *swtmr = NULL;
@@ -749,24 +816,24 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrStop(UINT16 swtmrID)
return LOS_ERRNO_SWTMR_ID_INVALID;
}
- swtmrCBID = swtmrID % LOSCFG_BASE_CORE_SWTMR_LIMIT;
- swtmr = g_swtmrCBArray + swtmrCBID;
+ swtmrCBID = swtmrID % LOSCFG_BASE_CORE_SWTMR_LIMIT;//取模
+ swtmr = g_swtmrCBArray + swtmrCBID;//获取定时器控制结构体
SWTMR_LOCK(intSave);
- if (swtmr->usTimerID != swtmrID) {
+ if (swtmr->usTimerID != swtmrID) {//ID必须一样
SWTMR_UNLOCK(intSave);
return LOS_ERRNO_SWTMR_ID_INVALID;
}
- switch (swtmr->ucState) {
+ switch (swtmr->ucState) {//判断定时器状态
case OS_SWTMR_STATUS_UNUSED:
- ret = LOS_ERRNO_SWTMR_NOT_CREATED;
+ ret = LOS_ERRNO_SWTMR_NOT_CREATED;//返回没有创建
break;
case OS_SWTMR_STATUS_CREATED:
- ret = LOS_ERRNO_SWTMR_NOT_STARTED;
+ ret = LOS_ERRNO_SWTMR_NOT_STARTED;//返回没有开始
break;
- case OS_SWTMR_STATUS_TICKING:
- SwtmrStop(swtmr);
+ case OS_SWTMR_STATUS_TICKING://正在计数
+ SwtmrStop(swtmr);//执行正在停止定时器操作
break;
default:
ret = LOS_ERRNO_SWTMR_STATUS_INVALID;
@@ -777,7 +844,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrStop(UINT16 swtmrID)
OsHookCall(LOS_HOOK_TYPE_SWTMR_STOP, swtmr);
return ret;
}
-
+///接口函数 获得软件定时器剩余Tick数 通过 *tick 带走
LITE_OS_SEC_TEXT UINT32 LOS_SwtmrTimeGet(UINT16 swtmrID, UINT32 *tick)
{
SWTMR_CTRL_S *swtmr = NULL;
@@ -793,11 +860,11 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrTimeGet(UINT16 swtmrID, UINT32 *tick)
return LOS_ERRNO_SWTMR_TICK_PTR_NULL;
}
- swtmrCBID = swtmrID % LOSCFG_BASE_CORE_SWTMR_LIMIT;
- swtmr = g_swtmrCBArray + swtmrCBID;
+ swtmrCBID = swtmrID % LOSCFG_BASE_CORE_SWTMR_LIMIT;//取模
+ swtmr = g_swtmrCBArray + swtmrCBID;//获取定时器控制结构体
SWTMR_LOCK(intSave);
- if (swtmr->usTimerID != swtmrID) {
+ if (swtmr->usTimerID != swtmrID) {//ID必须一样
SWTMR_UNLOCK(intSave);
return LOS_ERRNO_SWTMR_ID_INVALID;
}
@@ -808,8 +875,8 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrTimeGet(UINT16 swtmrID, UINT32 *tick)
case OS_SWTMR_STATUS_CREATED:
ret = LOS_ERRNO_SWTMR_NOT_STARTED;
break;
- case OS_SWTMR_STATUS_TICKING:
- *tick = OsSwtmrTimeGet(swtmr);
+ case OS_SWTMR_STATUS_TICKING://正在计数的定时器
+ *tick = OsSwtmrTimeGet(swtmr);//获取
break;
default:
ret = LOS_ERRNO_SWTMR_STATUS_INVALID;
@@ -818,7 +885,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrTimeGet(UINT16 swtmrID, UINT32 *tick)
SWTMR_UNLOCK(intSave);
return ret;
}
-
+///接口函数 删除定时器
LITE_OS_SEC_TEXT UINT32 LOS_SwtmrDelete(UINT16 swtmrID)
{
SWTMR_CTRL_S *swtmr = NULL;
@@ -830,11 +897,11 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrDelete(UINT16 swtmrID)
return LOS_ERRNO_SWTMR_ID_INVALID;
}
- swtmrCBID = swtmrID % LOSCFG_BASE_CORE_SWTMR_LIMIT;
- swtmr = g_swtmrCBArray + swtmrCBID;
+ swtmrCBID = swtmrID % LOSCFG_BASE_CORE_SWTMR_LIMIT;//取模
+ swtmr = g_swtmrCBArray + swtmrCBID;//获取定时器控制结构体
SWTMR_LOCK(intSave);
- if (swtmr->usTimerID != swtmrID) {
+ if (swtmr->usTimerID != swtmrID) {//ID必须一样
SWTMR_UNLOCK(intSave);
return LOS_ERRNO_SWTMR_ID_INVALID;
}
@@ -843,10 +910,10 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrDelete(UINT16 swtmrID)
case OS_SWTMR_STATUS_UNUSED:
ret = LOS_ERRNO_SWTMR_NOT_CREATED;
break;
- case OS_SWTMR_STATUS_TICKING:
+ case OS_SWTMR_STATUS_TICKING://正在计数就先停止再删除,这里没有break;
SwtmrStop(swtmr);
/* fall-through */
- case OS_SWTMR_STATUS_CREATED:
+ case OS_SWTMR_STATUS_CREATED://再删除定时器
SwtmrDelete(swtmr);
break;
default:
@@ -860,3 +927,4 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrDelete(UINT16 swtmrID)
}
#endif /* LOSCFG_BASE_CORE_SWTMR_ENABLE */
+
diff --git a/src/kernel_liteos_a/kernel/base/core/los_sys.c b/src/kernel_liteos_a/kernel/base/core/los_sys.c
index 8a9d373a..593dc25b 100644
--- a/src/kernel_liteos_a/kernel/base/core/los_sys.c
+++ b/src/kernel_liteos_a/kernel/base/core/los_sys.c
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
- * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
+ * Copyright (c) 2013-2019, Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2020, Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -32,19 +32,72 @@
#include "los_sys_pri.h"
#include "los_sched_pri.h"
+/*!
+ * @file los_sys.c
+ * @brief 系统时间转化
+ * @details
+ * @link kernel-small-basic-time http://weharmonyos.com/openharmony/zh-cn/device-dev/kernel/kernel-small-basic-time.html @endlink
+ * @verbatim
+ 基本概念
+ 时间管理以系统时钟为基础,给应用程序提供所有和时间有关的服务。
+
+ 系统时钟是由定时器/计数器产生的输出脉冲触发中断产生的,一般定义为整数或长整数。
+ 输出脉冲的周期叫做一个“时钟滴答”。系统时钟也称为时标或者Tick。
+
+ 用户以秒、毫秒为单位计时,而操作系统以Tick为单位计时,当用户需要对系统进行操作时,
+ 例如任务挂起、延时等,此时需要时间管理模块对Tick和秒/毫秒进行转换。
+ 时间管理模块提供时间转换、统计、延迟功能
+
+ 相关概念
+ Cycle
+ 系统最小的计时单位。Cycle的时长由系统主时钟频率决定,系统主时钟频率就是每秒钟的Cycle数。
+
+ Tick
+ Tick是操作系统的基本时间单位,由用户配置的每秒Tick数决定。
+
+ 使用场景
+ 用户需要了解当前系统运行的时间以及Tick与秒、毫秒之间的转换关系等。
+
+ 时间管理的典型开发流程
+ 根据实际需求,在板级配置适配时确认是否使能LOSCFG_BASE_CORE_TICK_HW_TIME宏选择外部定时器,
+ 并配置系统主时钟频率OS_SYS_CLOCK(单位Hz)。OS_SYS_CLOCK的默认值基于硬件平台配置。
+ 通过make menuconfig配置LOSCFG_BASE_CORE_TICK_PER_SECOND。
+
+ 参考
+ http://weharmonyos.com/openharmony/zh-cn/device-dev/kernel/kernel-small-basic-time.html
+ @endverbatim
+ * @attention
+ 获取系统Tick数需要在系统时钟使能之后。
+ 时间管理不是单独的功能模块,依赖于los_config.h中的OS_SYS_CLOCK和LOSCFG_BASE_CORE_TICK_PER_SECOND两个配置选项。
+ 系统的Tick数在关中断的情况下不进行计数,故系统Tick数不能作为准确时间计算。
+ */
#define OS_MAX_VALUE 0xFFFFFFFFUL
+/**
+ * @brief 获取自系统启动以来的Tick数
+ *
+ * @return LITE_OS_SEC_TEXT_MINOR
+ */
LITE_OS_SEC_TEXT_MINOR UINT64 LOS_TickCountGet(VOID)
{
return OsGetCurrSchedTimeCycle() / OS_CYCLE_PER_TICK;
}
-
+/**
+ * @brief 每个Tick多少Cycle数
+ *
+ * @return LITE_OS_SEC_TEXT_MINOR
+ */
LITE_OS_SEC_TEXT_MINOR UINT32 LOS_CyclePerTickGet(VOID)
{
return g_sysClock / LOSCFG_BASE_CORE_TICK_PER_SECOND;
}
-
+/**
+ * @brief 毫秒转换成Tick
+ *
+ * @param millisec
+ * @return LITE_OS_SEC_TEXT_MINOR
+ */
LITE_OS_SEC_TEXT_MINOR UINT32 LOS_MS2Tick(UINT32 millisec)
{
if (millisec == OS_MAX_VALUE) {
@@ -53,12 +106,22 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_MS2Tick(UINT32 millisec)
return ((UINT64)millisec * LOSCFG_BASE_CORE_TICK_PER_SECOND) / OS_SYS_MS_PER_SECOND;
}
-
+/**
+ * @brief Tick转化为毫秒
+ *
+ * @param tick
+ * @return LITE_OS_SEC_TEXT_MINOR
+ */
LITE_OS_SEC_TEXT_MINOR UINT32 LOS_Tick2MS(UINT32 tick)
{
return ((UINT64)tick * OS_SYS_MS_PER_SECOND) / LOSCFG_BASE_CORE_TICK_PER_SECOND;
}
-
+/**
+ * @brief 纳秒转化成 tick
+ *
+ * @param nanoseconds
+ * @return LITE_OS_SEC_TEXT_MINOR
+ */
LITE_OS_SEC_TEXT_MINOR UINT32 OsNS2Tick(UINT64 nanoseconds)
{
const UINT32 nsPerTick = OS_SYS_NS_PER_SECOND / LOSCFG_BASE_CORE_TICK_PER_SECOND;
diff --git a/src/kernel_liteos_a/kernel/base/core/los_task.c b/src/kernel_liteos_a/kernel/base/core/los_task.c
index 2bd80726..7509eddd 100644
--- a/src/kernel_liteos_a/kernel/base/core/los_task.c
+++ b/src/kernel_liteos_a/kernel/base/core/los_task.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
- * Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -64,17 +64,94 @@
#ifdef LOSCFG_KERNEL_CONTAINER
#include "los_container_pri.h"
#endif
+/**
+ * @file los_task.c
+ * @brief
+ * @verbatim
+ 基本概念
+ 从系统角度看,任务是竞争系统资源的最小运行单元。任务可以使用或等待CPU、
+ 使用内存空间等系统资源,并独立于其它任务运行。
+ 任务模块可以给用户提供多个任务,实现任务间的切换,帮助用户管理业务程序流程。具有如下特性:
+ 支持多任务。
+ 一个任务表示一个线程。
+ 抢占式调度机制,高优先级的任务可打断低优先级任务,低优先级任务必须在高优先级任务阻塞或结束后才能得到调度。
+ 相同优先级任务支持时间片轮转调度方式。
+ 共有32个优先级[0-31],最高优先级为0,最低优先级为31。
+
+ 任务状态通常分为以下四种:
+ 就绪(Ready):该任务在就绪队列中,只等待CPU。
+ 运行(Running):该任务正在执行。
+ 阻塞(Blocked):该任务不在就绪队列中。包含任务被挂起(suspend状态)、任务被延时(delay状态)、
+ 任务正在等待信号量、读写队列或者等待事件等。
+ 退出态(Dead):该任务运行结束,等待系统回收资源。
+
+ 任务状态迁移说明
+ 就绪态→运行态
+ 任务创建后进入就绪态,发生任务切换时,就绪队列中最高优先级的任务被执行,
+ 从而进入运行态,但此刻该任务依旧在就绪队列中。
+ 运行态→阻塞态
+ 正在运行的任务发生阻塞(挂起、延时、读信号量等)时,该任务会从就绪队列中删除,
+ 任务状态由运行态变成阻塞态,然后发生任务切换,运行就绪队列中最高优先级任务。
+ 阻塞态→就绪态(阻塞态→运行态)
+ 阻塞的任务被恢复后(任务恢复、延时时间超时、读信号量超时或读到信号量等),此时被
+ 恢复的任务会被加入就绪队列,从而由阻塞态变成就绪态;此时如果被恢复任务的优先级高于
+ 正在运行任务的优先级,则会发生任务切换,该任务由就绪态变成运行态。
+ 就绪态→阻塞态
+ 任务也有可能在就绪态时被阻塞(挂起),此时任务状态由就绪态变为阻塞态,该任务
+ 从就绪队列中删除,不会参与任务调度,直到该任务被恢复。
+ 运行态→就绪态
+ 有更高优先级任务创建或者恢复后,会发生任务调度,此刻就绪队列中最高优先级任务
+ 变为运行态,那么原先运行的任务由运行态变为就绪态,依然在就绪队列中。
+ 运行态→退出态
+ 运行中的任务运行结束,任务状态由运行态变为退出态。退出态包含任务运行结束的正常退出状态
+ 以及Invalid状态。例如,任务运行结束但是没有自删除,对外呈现的就是Invalid状态,即退出态。
+ 阻塞态→退出态
+ 阻塞的任务调用删除接口,任务状态由阻塞态变为退出态。
+
+ 主要术语
+ 任务ID
+ 任务ID,在任务创建时通过参数返回给用户,是任务的重要标识。系统中的ID号是唯一的。用户可以
+ 通过任务ID对指定任务进行任务挂起、任务恢复、查询任务名等操作。
+
+ 任务优先级
+ 优先级表示任务执行的优先顺序。任务的优先级决定了在发生任务切换时即将要执行的任务,
+ 就绪队列中最高优先级的任务将得到执行。
+
+ 任务入口函数
+ 新任务得到调度后将执行的函数。该函数由用户实现,在任务创建时,通过任务创建结构体设置。
+
+ 任务栈
+ 每个任务都拥有一个独立的栈空间,我们称为任务栈。栈空间里保存的信息包含局部变量、寄存器、函数参数、函数返回地址等。
+
+ 任务上下文
+ 任务在运行过程中使用的一些资源,如寄存器等,称为任务上下文。当这个任务挂起时,其他任务继续执行,
+ 可能会修改寄存器等资源中的值。如果任务切换时没有保存任务上下文,可能会导致任务恢复后出现未知错误。
+ 因此,Huawei LiteOS在任务切换时会将切出任务的任务上下文信息,保存在自身的任务栈中,以便任务恢复后,
+ 从栈空间中恢复挂起时的上下文信息,从而继续执行挂起时被打断的代码。
+ 任务控制块TCB
+ 每个任务都含有一个任务控制块(TCB)。TCB包含了任务上下文栈指针(stack pointer)、任务状态、
+ 任务优先级、任务ID、任务名、任务栈大小等信息。TCB可以反映出每个任务运行情况。
+ 任务切换
+ 任务切换包含获取就绪队列中最高优先级任务、切出任务上下文保存、切入任务上下文恢复等动作。
+
+ 运作机制
+ 用户创建任务时,系统会初始化任务栈,预置上下文。此外,系统还会将“任务入口函数”
+ 地址放在相应位置。这样在任务第一次启动进入运行态时,将会执行“任务入口函数”。
+ * @endverbatim
+ * @param pathname
+ * @return int
+ */
#if (LOSCFG_BASE_CORE_TSK_LIMIT <= 0)
#error "task maxnum cannot be zero"
#endif /* LOSCFG_BASE_CORE_TSK_LIMIT <= 0 */
-LITE_OS_SEC_BSS LosTaskCB *g_taskCBArray;
-LITE_OS_SEC_BSS LOS_DL_LIST g_losFreeTask;
-LITE_OS_SEC_BSS LOS_DL_LIST g_taskRecycleList;
-LITE_OS_SEC_BSS UINT32 g_taskMaxNum;
-LITE_OS_SEC_BSS UINT32 g_taskScheduled; /* one bit for each cores */
-LITE_OS_SEC_BSS EVENT_CB_S g_resourceEvent;
+LITE_OS_SEC_BSS LosTaskCB *g_taskCBArray;//任务池 128个
+LITE_OS_SEC_BSS LOS_DL_LIST g_losFreeTask;//空闲任务链表
+LITE_OS_SEC_BSS LOS_DL_LIST g_taskRecycleList;//回收任务链表
+LITE_OS_SEC_BSS UINT32 g_taskMaxNum;//任务最大个数
+LITE_OS_SEC_BSS UINT32 g_taskScheduled; /* one bit for each cores *///任务调度器,每个CPU都有对应位
+LITE_OS_SEC_BSS EVENT_CB_S g_resourceEvent;//资源的事件
/* spinlock for task module, only available on SMP mode */
LITE_OS_SEC_BSS SPIN_LOCK_INIT(g_taskSpin);
@@ -82,7 +159,7 @@ STATIC VOID OsConsoleIDSetHook(UINT32 param1,
UINT32 param2) __attribute__((weakref("OsSetConsoleID")));
/* temp task blocks for booting procedure */
-LITE_OS_SEC_BSS STATIC LosTaskCB g_mainTask[LOSCFG_KERNEL_CORE_NUM];
+LITE_OS_SEC_BSS STATIC LosTaskCB g_mainTask[LOSCFG_KERNEL_CORE_NUM];//启动引导过程中使用的临时任务
LosTaskCB *OsGetMainTask(VOID)
{
@@ -92,27 +169,26 @@ LosTaskCB *OsGetMainTask(VOID)
VOID OsSetMainTask(VOID)
{
UINT32 i;
- CHAR *name = "osMain";
+ CHAR *name = "osMain";//任务名称
SchedParam schedParam = { 0 };
schedParam.policy = LOS_SCHED_RR;
schedParam.basePrio = OS_PROCESS_PRIORITY_HIGHEST;
schedParam.priority = OS_TASK_PRIORITY_LOWEST;
-
+ //为每个CPU core 设置mainTask
for (i = 0; i < LOSCFG_KERNEL_CORE_NUM; i++) {
g_mainTask[i].taskStatus = OS_TASK_STATUS_UNUSED;
- g_mainTask[i].taskID = LOSCFG_BASE_CORE_TSK_LIMIT;
+ g_mainTask[i].taskID = LOSCFG_BASE_CORE_TSK_LIMIT;//128
g_mainTask[i].processCB = OS_KERNEL_PROCESS_GROUP;
#ifdef LOSCFG_KERNEL_SMP_LOCKDEP
g_mainTask[i].lockDep.lockDepth = 0;
g_mainTask[i].lockDep.waitLock = NULL;
#endif
(VOID)strncpy_s(g_mainTask[i].taskName, OS_TCB_NAME_LEN, name, OS_TCB_NAME_LEN - 1);
- LOS_ListInit(&g_mainTask[i].lockList);
+ LOS_ListInit(&g_mainTask[i].lockList);//初始化任务锁链表,上面挂的是任务已申请到的互斥锁
(VOID)OsSchedParamInit(&g_mainTask[i], schedParam.policy, &schedParam, NULL);
}
}
-
VOID OsSetMainTaskProcess(UINTPTR processCB)
{
for (UINT32 i = 0; i < LOSCFG_KERNEL_CORE_NUM; i++) {
@@ -122,31 +198,40 @@ VOID OsSetMainTaskProcess(UINTPTR processCB)
#endif
}
}
-
+///空闲任务,每个CPU都有自己的空闲任务
LITE_OS_SEC_TEXT WEAK VOID OsIdleTask(VOID)
{
- while (1) {
- WFI;
+ while (1) {//只有一个死循环
+ WFI;//WFI指令:arm core 立即进入low-power standby state,进入休眠模式,等待中断。
}
}
VOID OsTaskInsertToRecycleList(LosTaskCB *taskCB)
{
- LOS_ListTailInsert(&g_taskRecycleList, &taskCB->pendList);
+ LOS_ListTailInsert(&g_taskRecycleList, &taskCB->pendList);//将任务挂入回收链表,等待回收
}
+/*!
+ * @brief OsTaskJoinPostUnsafe
+ * 查找task 通过 OS_TCB_FROM_PENDLIST 来完成,相当于由LOS_DL_LIST找到LosTaskCB,
+ * 将那些和参数任务绑在一起的task唤醒.
+ * @param taskCB
+ * @return
+ *
+ * @see
+ */
LITE_OS_SEC_TEXT_INIT VOID OsTaskJoinPostUnsafe(LosTaskCB *taskCB)
{
- if (taskCB->taskStatus & OS_TASK_FLAG_PTHREAD_JOIN) {
- if (!LOS_ListEmpty(&taskCB->joinList)) {
- LosTaskCB *resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(taskCB->joinList)));
- OsTaskWakeClearPendMask(resumedTask);
+ if (taskCB->taskStatus & OS_TASK_FLAG_PTHREAD_JOIN) {//join任务处理
+ if (!LOS_ListEmpty(&taskCB->joinList)) {//注意到了这里 joinList中的节点身上都有阻塞标签
+ LosTaskCB *resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(taskCB->joinList)));//通过贴有JOIN标签链表的第一个节点找到Task
+ OsTaskWakeClearPendMask(resumedTask);//清除任务的挂起标记
resumedTask->ops->wake(resumedTask);
}
}
- taskCB->taskStatus |= OS_TASK_STATUS_EXIT;
+ taskCB->taskStatus |= OS_TASK_STATUS_EXIT;//贴上任务退出标签
}
-
+/// 挂起任务,任务进入等待链表,Join代表是支持通过一个任务去唤醒其他的任务
LITE_OS_SEC_TEXT UINT32 OsTaskJoinPendUnsafe(LosTaskCB *taskCB)
{
if (taskCB->taskStatus & OS_TASK_STATUS_INIT) {
@@ -158,20 +243,20 @@ LITE_OS_SEC_TEXT UINT32 OsTaskJoinPendUnsafe(LosTaskCB *taskCB)
}
if ((taskCB->taskStatus & OS_TASK_FLAG_PTHREAD_JOIN) && LOS_ListEmpty(&taskCB->joinList)) {
- OsTaskWaitSetPendMask(OS_TASK_WAIT_JOIN, taskCB->taskID, LOS_WAIT_FOREVER);
+ OsTaskWaitSetPendMask(OS_TASK_WAIT_JOIN, taskCB->taskID, LOS_WAIT_FOREVER);//设置任务的等待标记
LosTaskCB *runTask = OsCurrTaskGet();
return runTask->ops->wait(runTask, &taskCB->joinList, LOS_WAIT_FOREVER);
}
return LOS_EINVAL;
}
-
+///任务设置分离模式 Deatch和JOIN是一对有你没我的状态
LITE_OS_SEC_TEXT UINT32 OsTaskSetDetachUnsafe(LosTaskCB *taskCB)
{
- if (taskCB->taskStatus & OS_TASK_FLAG_PTHREAD_JOIN) {
- if (LOS_ListEmpty(&(taskCB->joinList))) {
- LOS_ListDelete(&(taskCB->joinList));
- taskCB->taskStatus &= ~OS_TASK_FLAG_PTHREAD_JOIN;
+ if (taskCB->taskStatus & OS_TASK_FLAG_PTHREAD_JOIN) {//join状态时
+ if (LOS_ListEmpty(&(taskCB->joinList))) {//joinlist中没有数据了
+ LOS_ListDelete(&(taskCB->joinList));//所谓删除就是自己指向自己
+ taskCB->taskStatus &= ~OS_TASK_FLAG_PTHREAD_JOIN;//去掉JOIN标签
return LOS_OK;
}
/* This error code has a special purpose and is not allowed to appear again on the interface */
@@ -181,39 +266,39 @@ LITE_OS_SEC_TEXT UINT32 OsTaskSetDetachUnsafe(LosTaskCB *taskCB)
return LOS_EINVAL;
}
+//初始化任务模块
LITE_OS_SEC_TEXT_INIT UINT32 OsTaskInit(UINTPTR processCB)
{
UINT32 index;
UINT32 size;
UINT32 ret;
- g_taskMaxNum = LOSCFG_BASE_CORE_TSK_LIMIT;
- size = (g_taskMaxNum + 1) * sizeof(LosTaskCB);
+ g_taskMaxNum = LOSCFG_BASE_CORE_TSK_LIMIT;//任务池中最多默认128个,可谓铁打的任务池流水的线程
+ size = (g_taskMaxNum + 1) * sizeof(LosTaskCB);//计算需分配内存总大小
/*
* This memory is resident memory and is used to save the system resources
* of task control block and will not be freed.
*/
- g_taskCBArray = (LosTaskCB *)LOS_MemAlloc(m_aucSysMem0, size);
+ g_taskCBArray = (LosTaskCB *)LOS_MemAlloc(m_aucSysMem0, size);//任务池常驻内存,不被释放
if (g_taskCBArray == NULL) {
ret = LOS_ERRNO_TSK_NO_MEMORY;
goto EXIT;
}
(VOID)memset_s(g_taskCBArray, size, 0, size);
- LOS_ListInit(&g_losFreeTask);
- LOS_ListInit(&g_taskRecycleList);
- for (index = 0; index < g_taskMaxNum; index++) {
- g_taskCBArray[index].taskStatus = OS_TASK_STATUS_UNUSED;
- g_taskCBArray[index].taskID = index;
+ LOS_ListInit(&g_losFreeTask);//初始化空闲任务链表
+ LOS_ListInit(&g_taskRecycleList);//初始化回收任务链表
+ for (index = 0; index < g_taskMaxNum; index++) {//任务挨个初始化
+ g_taskCBArray[index].taskStatus = OS_TASK_STATUS_UNUSED;//默认未使用,干净.
+ g_taskCBArray[index].taskID = index;//任务ID [0 ~ g_taskMaxNum - 1]
g_taskCBArray[index].processCB = processCB;
- LOS_ListTailInsert(&g_losFreeTask, &g_taskCBArray[index].pendList);
- }
-
+ LOS_ListTailInsert(&g_losFreeTask, &g_taskCBArray[index].pendList);//通过pendList节点插入空闲任务列表
+ }//注意:这里挂的是pendList节点,所以取TCB也要通过 OS_TCB_FROM_PENDLIST 取.
g_taskCBArray[index].taskStatus = OS_TASK_STATUS_UNUSED;
g_taskCBArray[index].taskID = index;
g_taskCBArray[index].processCB = processCB;
- ret = OsSchedInit();
+ ret = OsSchedInit();//调度器初始化
EXIT:
if (ret != LOS_OK) {
@@ -221,41 +306,41 @@ EXIT:
}
return ret;
}
-
+///获取IdletaskId,每个CPU核都对Task进行了内部管理,做到真正的并行处理
UINT32 OsGetIdleTaskId(VOID)
{
return OsSchedRunqueueIdleGet()->taskID;
}
-
+///创建一个空闲任务
LITE_OS_SEC_TEXT_INIT UINT32 OsIdleTaskCreate(UINTPTR processID)
{
UINT32 ret;
TSK_INIT_PARAM_S taskInitParam;
UINT32 idleTaskID;
- (VOID)memset_s((VOID *)(&taskInitParam), sizeof(TSK_INIT_PARAM_S), 0, sizeof(TSK_INIT_PARAM_S));
- taskInitParam.pfnTaskEntry = (TSK_ENTRY_FUNC)OsIdleTask;
- taskInitParam.uwStackSize = LOSCFG_BASE_CORE_TSK_IDLE_STACK_SIZE;
- taskInitParam.pcName = "Idle";
+ (VOID)memset_s((VOID *)(&taskInitParam), sizeof(TSK_INIT_PARAM_S), 0, sizeof(TSK_INIT_PARAM_S));//任务初始参数清0
+ taskInitParam.pfnTaskEntry = (TSK_ENTRY_FUNC)OsIdleTask;//入口函数
+ taskInitParam.uwStackSize = LOSCFG_BASE_CORE_TSK_IDLE_STACK_SIZE;//任务栈大小 2K
+ taskInitParam.pcName = "Idle";//任务名称 叫pcName有点怪怪的,不能换个撒
taskInitParam.policy = LOS_SCHED_IDLE;
- taskInitParam.usTaskPrio = OS_TASK_PRIORITY_LOWEST;
+ taskInitParam.usTaskPrio = OS_TASK_PRIORITY_LOWEST;//默认最低优先级 31
taskInitParam.processID = processID;
#ifdef LOSCFG_KERNEL_SMP
- taskInitParam.usCpuAffiMask = CPUID_TO_AFFI_MASK(ArchCurrCpuid());
+ taskInitParam.usCpuAffiMask = CPUID_TO_AFFI_MASK(ArchCurrCpuid());//每个idle任务只在单独的cpu上运行
#endif
ret = LOS_TaskCreateOnly(&idleTaskID, &taskInitParam);
if (ret != LOS_OK) {
return ret;
}
LosTaskCB *idleTask = OS_TCB_FROM_TID(idleTaskID);
- idleTask->taskStatus |= OS_TASK_FLAG_SYSTEM_TASK;
+ idleTask->taskStatus |= OS_TASK_FLAG_SYSTEM_TASK; //标记为系统任务,idle任务是给CPU休息用的,当然是个系统任务
OsSchedRunqueueIdleInit(idleTask);
return LOS_TaskResume(idleTaskID);
}
/*
- * Description : get id of current running task.
+ * Description : get id of current running task. | 获取当前CPU正在执行的任务ID
* Return : task id
*/
LITE_OS_SEC_TEXT UINT32 LOS_CurTaskIDGet(VOID)
@@ -267,7 +352,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_CurTaskIDGet(VOID)
}
return runTask->taskID;
}
-
+/// 创建指定任务同步信号量
STATIC INLINE UINT32 TaskSyncCreate(LosTaskCB *taskCB)
{
#ifdef LOSCFG_KERNEL_SMP_TASK_SYNC
@@ -280,7 +365,7 @@ STATIC INLINE UINT32 TaskSyncCreate(LosTaskCB *taskCB)
#endif
return LOS_OK;
}
-
+/// 销毁指定任务同步信号量
STATIC INLINE VOID OsTaskSyncDestroy(UINT32 syncSignal)
{
#ifdef LOSCFG_KERNEL_SMP_TASK_SYNC
@@ -291,6 +376,14 @@ STATIC INLINE VOID OsTaskSyncDestroy(UINT32 syncSignal)
}
#ifdef LOSCFG_KERNEL_SMP
+/*!
+ * @brief OsTaskSyncWait
+ * 任务同步等待,通过信号量保持同步
+ * @param taskCB
+ * @return
+ *
+ * @see
+ */
STATIC INLINE UINT32 OsTaskSyncWait(const LosTaskCB *taskCB)
{
#ifdef LOSCFG_KERNEL_SMP_TASK_SYNC
@@ -301,9 +394,9 @@ STATIC INLINE UINT32 OsTaskSyncWait(const LosTaskCB *taskCB)
/*
* gc soft timer works every OS_MP_GC_PERIOD period, to prevent this timer
* triggered right at the timeout has reached, we set the timeout as double
- * of the gc period.
+ * of the gc peroid.
*/
- if (LOS_SemPend(taskCB->syncSignal, OS_MP_GC_PERIOD * 2) != LOS_OK) { /* 2: Wait 200 ms */
+ if (LOS_SemPend(taskCB->syncSignal, OS_MP_GC_PERIOD * 2) != LOS_OK) {
ret = LOS_ERRNO_TSK_MP_SYNC_FAILED;
}
@@ -316,7 +409,7 @@ STATIC INLINE UINT32 OsTaskSyncWait(const LosTaskCB *taskCB)
#endif
}
#endif
-
+/// 同步唤醒
STATIC INLINE VOID OsTaskSyncWake(const LosTaskCB *taskCB)
{
#ifdef LOSCFG_KERNEL_SMP_TASK_SYNC
@@ -338,14 +431,14 @@ STATIC INLINE VOID OsInsertTCBToFreeList(LosTaskCB *taskCB)
taskCB->taskStatus = OS_TASK_STATUS_UNUSED;
LOS_ListAdd(&g_losFreeTask, &taskCB->pendList);
}
-
+//释放任务在内核态下占用的资源
STATIC VOID OsTaskKernelResourcesToFree(UINT32 syncSignal, UINTPTR topOfStack)
{
- OsTaskSyncDestroy(syncSignal);
+ OsTaskSyncDestroy(syncSignal);//任务销毁,同步信息
- (VOID)LOS_MemFree((VOID *)m_aucSysMem1, (VOID *)topOfStack);
+ (VOID)LOS_MemFree((VOID *)m_aucSysMem1, (VOID *)topOfStack);//释放内核态空间
}
-
+//释放任务资源
STATIC VOID OsTaskResourcesToFree(LosTaskCB *taskCB)
{
UINT32 syncSignal = LOSCFG_BASE_IPC_SEM_LIMIT;
@@ -353,7 +446,7 @@ STATIC VOID OsTaskResourcesToFree(LosTaskCB *taskCB)
UINTPTR topOfStack;
#ifdef LOSCFG_KERNEL_VM
- if ((taskCB->taskStatus & OS_TASK_FLAG_USER_MODE) && (taskCB->userMapBase != 0)) {
+ if ((taskCB->taskStatus & OS_TASK_FLAG_USER_MODE) && (taskCB->userMapBase != 0)) {//释放用户态栈
SCHEDULER_LOCK(intSave);
UINT32 mapBase = (UINTPTR)taskCB->userMapBase;
UINT32 mapSize = taskCB->userMapSize;
@@ -363,48 +456,48 @@ STATIC VOID OsTaskResourcesToFree(LosTaskCB *taskCB)
LosProcessCB *processCB = OS_PCB_FROM_TCB(taskCB);
LOS_ASSERT(!(OsProcessVmSpaceGet(processCB) == NULL));
- UINT32 ret = OsUnMMap(OsProcessVmSpaceGet(processCB), (UINTPTR)mapBase, mapSize);
+ UINT32 ret = OsUnMMap(OsProcessVmSpaceGet(processCB), (UINTPTR)mapBase, mapSize);//解除映射
if ((ret != LOS_OK) && (mapBase != 0) && !OsProcessIsInit(processCB)) {
PRINT_ERR("process(%u) unmmap user task(%u) stack failed! mapbase: 0x%x size :0x%x, error: %d\n",
processCB->processID, taskCB->taskID, mapBase, mapSize, ret);
}
#ifdef LOSCFG_KERNEL_LITEIPC
- LiteIpcRemoveServiceHandle(taskCB->taskID);
+ LiteIpcRemoveServiceHandle(taskCB->taskID);//详见百篇博客之IPC篇
#endif
}
#endif
- if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {
+ if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {//任务还没有使用情况
topOfStack = taskCB->topOfStack;
taskCB->topOfStack = 0;
#ifdef LOSCFG_KERNEL_SMP_TASK_SYNC
syncSignal = taskCB->syncSignal;
taskCB->syncSignal = LOSCFG_BASE_IPC_SEM_LIMIT;
#endif
- OsTaskKernelResourcesToFree(syncSignal, topOfStack);
+ OsTaskKernelResourcesToFree(syncSignal, topOfStack);//释放内核态所占内存,即内核态的栈空间
SCHEDULER_LOCK(intSave);
#ifdef LOSCFG_KERNEL_VM
- OsClearSigInfoTmpList(&(taskCB->sig));
+ OsClearSigInfoTmpList(&(taskCB->sig));//归还信号控制块的内存
#endif
OsInsertTCBToFreeList(taskCB);
SCHEDULER_UNLOCK(intSave);
}
return;
}
-
+//批量回收任务
LITE_OS_SEC_TEXT VOID OsTaskCBRecycleToFree(void)
{
UINT32 intSave;
SCHEDULER_LOCK(intSave);
- while (!LOS_ListEmpty(&g_taskRecycleList)) {
- LosTaskCB *taskCB = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&g_taskRecycleList));
- LOS_ListDelete(&taskCB->pendList);
+ while (!LOS_ListEmpty(&g_taskRecycleList)) {//遍历回收链表
+ LosTaskCB *taskCB = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&g_taskRecycleList));//取出任务
+ LOS_ListDelete(&taskCB->pendList);//重置节点
SCHEDULER_UNLOCK(intSave);
- OsTaskResourcesToFree(taskCB);
+ OsTaskResourcesToFree(taskCB);//释放任务所占资源
SCHEDULER_LOCK(intSave);
}
@@ -414,7 +507,7 @@ LITE_OS_SEC_TEXT VOID OsTaskCBRecycleToFree(void)
/*
* Description : All task entry
* Input : taskID --- The ID of the task to be run
- */
+ *///所有任务的入口函数,OsTaskEntry是在new task OsTaskStackInit 时指定的
LITE_OS_SEC_TEXT_INIT VOID OsTaskEntry(UINT32 taskID)
{
LOS_ASSERT(!OS_TID_CHECK_INVALID(taskID));
@@ -424,19 +517,19 @@ LITE_OS_SEC_TEXT_INIT VOID OsTaskEntry(UINT32 taskID)
* from interrupt and other cores. release task spinlock and enable
* interrupt in sequence at the task entry.
*/
- LOS_SpinUnlock(&g_taskSpin);
- (VOID)LOS_IntUnLock();
+ LOS_SpinUnlock(&g_taskSpin);//释放任务自旋锁
+ (VOID)LOS_IntUnLock();//恢复中断
LosTaskCB *taskCB = OS_TCB_FROM_TID(taskID);
- taskCB->joinRetval = taskCB->taskEntry(taskCB->args[0], taskCB->args[1],
+ taskCB->joinRetval = taskCB->taskEntry(taskCB->args[0], taskCB->args[1],//调用任务的入口函数
taskCB->args[2], taskCB->args[3]); /* 2 & 3: just for args array index */
if (!(taskCB->taskStatus & OS_TASK_FLAG_PTHREAD_JOIN)) {
- taskCB->joinRetval = 0;
+ taskCB->joinRetval = 0;//结合数为0
}
-
+
OsRunningTaskToExit(taskCB, 0);
}
-
+///任务创建参数检查
STATIC UINT32 TaskCreateParamCheck(const UINT32 *taskID, TSK_INIT_PARAM_S *initParam)
{
UINT32 poolSize = OS_SYS_MEM_SIZE;
@@ -455,30 +548,30 @@ STATIC UINT32 TaskCreateParamCheck(const UINT32 *taskID, TSK_INIT_PARAM_S *initP
}
}
- if (initParam->pfnTaskEntry == NULL) {
+ if (initParam->pfnTaskEntry == NULL) {//入口函数不能为空
return LOS_ERRNO_TSK_ENTRY_NULL;
}
- if (initParam->usTaskPrio > OS_TASK_PRIORITY_LOWEST) {
+ if (initParam->usTaskPrio > OS_TASK_PRIORITY_LOWEST) {//优先级必须大于31
return LOS_ERRNO_TSK_PRIOR_ERROR;
}
- if (initParam->uwStackSize > poolSize) {
+ if (initParam->uwStackSize > poolSize) {//希望申请的栈大小不能大于总池子
return LOS_ERRNO_TSK_STKSZ_TOO_LARGE;
}
- if (initParam->uwStackSize == 0) {
+ if (initParam->uwStackSize == 0) {//任何任务都必须由内核态栈,所以uwStackSize不能为0
initParam->uwStackSize = LOSCFG_BASE_CORE_TSK_DEFAULT_STACK_SIZE;
}
initParam->uwStackSize = (UINT32)ALIGN(initParam->uwStackSize, LOSCFG_STACK_POINT_ALIGN_SIZE);
- if (initParam->uwStackSize < LOS_TASK_MIN_STACK_SIZE) {
+ if (initParam->uwStackSize < LOS_TASK_MIN_STACK_SIZE) {//运行栈空间不能低于最低值
return LOS_ERRNO_TSK_STKSZ_TOO_SMALL;
}
return LOS_OK;
}
-
+///任务栈(内核态)内存分配,由内核态进程空间提供,即 KProcess 的进程空间
STATIC VOID TaskCBDeInit(LosTaskCB *taskCB)
{
UINT32 intSave;
@@ -531,13 +624,13 @@ STATIC VOID TaskCBBaseInit(LosTaskCB *taskCB, const TSK_INIT_PARAM_S *initParam)
LOS_ListInit(&taskCB->joinList);
}
- LOS_ListInit(&taskCB->lockList);
+ LOS_ListInit(&taskCB->lockList);//初始化互斥锁链表
SET_SORTLIST_VALUE(&taskCB->sortList, OS_SORT_LINK_INVALID_TIME);
#ifdef LOSCFG_KERNEL_VM
taskCB->futex.index = OS_INVALID_VALUE;
#endif
}
-
+///任务初始化
STATIC UINT32 TaskCBInit(LosTaskCB *taskCB, const TSK_INIT_PARAM_S *initParam)
{
UINT32 ret;
@@ -546,7 +639,8 @@ STATIC UINT32 TaskCBInit(LosTaskCB *taskCB, const TSK_INIT_PARAM_S *initParam)
LosSchedParam initSchedParam = {0};
UINT16 policy = (initParam->policy == LOS_SCHED_NORMAL) ? LOS_SCHED_RR : initParam->policy;
- TaskCBBaseInit(taskCB, initParam);
+ TaskCBBaseInit(taskCB, initParam);//初始化任务的基本信息,
+ //taskCB->stackPointer指向内核态栈 sp位置,该位置存着 任务初始上下文
schedParam.policy = policy;
ret = OsProcessAddNewTask(initParam->processID, taskCB, &schedParam, &numCount);
@@ -598,25 +692,34 @@ STATIC UINT32 TaskStackInit(LosTaskCB *taskCB, const TSK_INIT_PARAM_S *initParam
#endif
return LOS_OK;
}
-
+///获取一个空闲TCB
STATIC LosTaskCB *GetFreeTaskCB(VOID)
{
UINT32 intSave;
SCHEDULER_LOCK(intSave);
- if (LOS_ListEmpty(&g_losFreeTask)) {
+ if (LOS_ListEmpty(&g_losFreeTask)) {//全局空闲task为空
SCHEDULER_UNLOCK(intSave);
PRINT_ERR("No idle TCB in the system!\n");
return NULL;
}
LosTaskCB *taskCB = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&g_losFreeTask));
- LOS_ListDelete(LOS_DL_LIST_FIRST(&g_losFreeTask));
+ LOS_ListDelete(LOS_DL_LIST_FIRST(&g_losFreeTask));//从g_losFreeTask链表中摘除自己
SCHEDULER_UNLOCK(intSave);
return taskCB;
}
+/*!
+ * @brief LOS_TaskCreateOnly
+ * 创建任务,并使该任务进入suspend状态,不对该任务进行调度。如果需要调度,可以调用LOS_TaskResume使该任务进入ready状态
+ * @param initParam
+ * @param taskID
+ * @return
+ *
+ * @see
+ */
LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskCreateOnly(UINT32 *taskID, TSK_INIT_PARAM_S *initParam)
{
UINT32 errRet = TaskCreateParamCheck(taskID, initParam);
@@ -656,7 +759,7 @@ DEINIT_TCB:
TaskCBDeInit(taskCB);
return errRet;
}
-
+///创建任务,并使该任务进入ready状态,如果就绪队列中没有更高优先级的任务,则运行该任务
LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskCreate(UINT32 *taskID, TSK_INIT_PARAM_S *initParam)
{
UINT32 ret;
@@ -670,7 +773,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskCreate(UINT32 *taskID, TSK_INIT_PARAM_S *in
return LOS_ERRNO_TSK_YIELD_IN_INT;
}
- if (OsProcessIsUserMode(OsCurrProcessGet())) {
+ if (OsProcessIsUserMode(OsCurrProcessGet())) { //当前进程为用户进程
initParam->processID = (UINTPTR)OsGetKernelInitProcess();
} else {
initParam->processID = (UINTPTR)OsCurrProcessGet();
@@ -696,7 +799,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskCreate(UINT32 *taskID, TSK_INIT_PARAM_S *in
return LOS_OK;
}
-
+///恢复挂起的任务,使该任务进入ready状态
LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskResume(UINT32 taskID)
{
UINT32 intSave;
@@ -711,7 +814,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskResume(UINT32 taskID)
SCHEDULER_LOCK(intSave);
/* clear pending signal */
- taskCB->signal &= ~SIGNAL_SUSPEND;
+ taskCB->signal &= ~SIGNAL_SUSPEND;//清楚挂起信号
if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {
errRet = LOS_ERRNO_TSK_NOT_CREATED;
@@ -737,13 +840,13 @@ LOS_ERREND:
}
/*
- * Check if needs to do the suspend operation on the running task.
- * Return TRUE, if needs to do the suspension.
- * Return FALSE, if meets following circumstances:
- * 1. Do the suspension across cores, if SMP is enabled
- * 2. Do the suspension when preemption is disabled
- * 3. Do the suspension in hard-irq
- * then LOS_TaskSuspend will directly return with 'ret' value.
+ * Check if needs to do the suspend operation on the running task. //检查是否需要对正在运行的任务执行挂起操作。
+ * Return TRUE, if needs to do the suspension. //如果需要暂停,返回TRUE。
+ * Rerturn FALSE, if meets following circumstances: //如果满足以下情况,则返回FALSE:
+ * 1. Do the suspension across cores, if SMP is enabled //1.如果启用了SMP,则跨CPU核执行挂起操作
+ * 2. Do the suspension when preemption is disabled //2.当禁用抢占时则挂起
+ * 3. Do the suspension in hard-irq //3.在硬中断时则挂起
+ * then LOS_TaskSuspend will directly return with 'ret' value. //那么LOS_taskssuspend将直接返回ret值。
*/
LITE_OS_SEC_TEXT_INIT STATIC BOOL OsTaskSuspendCheckOnRun(LosTaskCB *taskCB, UINT32 *ret)
{
@@ -752,20 +855,20 @@ LITE_OS_SEC_TEXT_INIT STATIC BOOL OsTaskSuspendCheckOnRun(LosTaskCB *taskCB, UIN
#ifdef LOSCFG_KERNEL_SMP
/* ASYNCHRONIZED. No need to do task lock checking */
- if (taskCB->currCpu != ArchCurrCpuid()) {
+ if (taskCB->currCpu != ArchCurrCpuid()) {//跨CPU核的情况
taskCB->signal = SIGNAL_SUSPEND;
- LOS_MpSchedule(taskCB->currCpu);
+ LOS_MpSchedule(taskCB->currCpu);//task所属CPU执行调度
return FALSE;
}
#endif
- if (!OsPreemptableInSched()) {
+ if (!OsPreemptableInSched()) {//不能抢占时
/* Suspending the current core's running task */
*ret = LOS_ERRNO_TSK_SUSPEND_LOCKED;
return FALSE;
}
- if (OS_INT_ACTIVE) {
+ if (OS_INT_ACTIVE) {//正在硬中断中
/* suspend running task in interrupt */
taskCB->signal = SIGNAL_SUSPEND;
return FALSE;
@@ -773,7 +876,7 @@ LITE_OS_SEC_TEXT_INIT STATIC BOOL OsTaskSuspendCheckOnRun(LosTaskCB *taskCB, UIN
return TRUE;
}
-
+///任务暂停,参数可以不是当前任务,也就是说 A任务可以让B任务处于阻塞状态,挂起指定的任务,然后切换任务
LITE_OS_SEC_TEXT STATIC UINT32 OsTaskSuspend(LosTaskCB *taskCB)
{
UINT32 errRet;
@@ -786,14 +889,14 @@ LITE_OS_SEC_TEXT STATIC UINT32 OsTaskSuspend(LosTaskCB *taskCB)
return LOS_ERRNO_TSK_ALREADY_SUSPENDED;
}
- if ((tempStatus & OS_TASK_STATUS_RUNNING) &&
- !OsTaskSuspendCheckOnRun(taskCB, &errRet)) {
+ if ((tempStatus & OS_TASK_STATUS_RUNNING) && //如果参数任务正在运行,注意多Cpu core情况下,贴着正在运行标签的任务并不一定是当前CPU的执行任务,
+ !OsTaskSuspendCheckOnRun(taskCB, &errRet)) {//很有可能是别的CPU core在跑的任务
return errRet;
}
return taskCB->ops->suspend(taskCB);
}
-
+///外部接口,对OsTaskSuspend的封装
LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskSuspend(UINT32 taskID)
{
UINT32 intSave;
@@ -813,7 +916,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskSuspend(UINT32 taskID)
SCHEDULER_UNLOCK(intSave);
return errRet;
}
-
+///设置任务为不使用状态
STATIC INLINE VOID OsTaskStatusUnusedSet(LosTaskCB *taskCB)
{
taskCB->taskStatus |= OS_TASK_STATUS_UNUSED;
@@ -925,7 +1028,6 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskDelete(UINT32 taskID)
if (!OsPreemptable()) {
return LOS_ERRNO_TSK_DELETE_LOCKED;
}
-
OsRunningTaskToExit(taskCB, OS_PRO_EXIT_OK);
return LOS_NOK;
}
@@ -959,7 +1061,7 @@ LOS_ERREND:
}
return ret;
}
-
+///任务延时等待,释放CPU,等待时间到期后该任务会重新进入ready状态
LITE_OS_SEC_TEXT UINT32 LOS_TaskDelay(UINT32 tick)
{
UINT32 intSave;
@@ -989,7 +1091,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_TaskDelay(UINT32 tick)
SCHEDULER_UNLOCK(intSave);
return ret;
}
-
+///获取任务的优先级
LITE_OS_SEC_TEXT_MINOR UINT16 LOS_TaskPriGet(UINT32 taskID)
{
UINT32 intSave;
@@ -1001,7 +1103,7 @@ LITE_OS_SEC_TEXT_MINOR UINT16 LOS_TaskPriGet(UINT32 taskID)
LosTaskCB *taskCB = OS_TCB_FROM_TID(taskID);
SCHEDULER_LOCK(intSave);
- if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {
+ if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {//就这么一句话也要来个自旋锁,内核代码自旋锁真是无处不在啊
SCHEDULER_UNLOCK(intSave);
return (UINT16)OS_INVALID;
}
@@ -1010,7 +1112,7 @@ LITE_OS_SEC_TEXT_MINOR UINT16 LOS_TaskPriGet(UINT32 taskID)
SCHEDULER_UNLOCK(intSave);
return param.priority;
}
-
+///设置指定任务的优先级
LITE_OS_SEC_TEXT_MINOR UINT32 LOS_TaskPriSet(UINT32 taskID, UINT16 taskPrio)
{
UINT32 intSave;
@@ -1048,12 +1150,13 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_TaskPriSet(UINT32 taskID, UINT16 taskPrio)
}
return LOS_OK;
}
-
+///设置当前任务的优先级
LITE_OS_SEC_TEXT_MINOR UINT32 LOS_CurTaskPriSet(UINT16 taskPrio)
{
return LOS_TaskPriSet(OsCurrTaskGet()->taskID, taskPrio);
}
+//当前任务释放CPU,并将其移到具有相同优先级的就绪任务队列的末尾. 读懂这个函数 你就彻底搞懂了 yield
LITE_OS_SEC_TEXT_MINOR UINT32 LOS_TaskYield(VOID)
{
UINT32 intSave;
@@ -1099,7 +1202,7 @@ LITE_OS_SEC_TEXT_MINOR VOID LOS_TaskUnlock(VOID)
LOS_Schedule();
}
}
-
+//获取任务信息,给shell使用的
LITE_OS_SEC_TEXT_MINOR UINT32 LOS_TaskInfoGet(UINT32 taskID, TSK_INFO_S *taskInfo)
{
UINT32 intSave;
@@ -1129,8 +1232,8 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_TaskInfoGet(UINT32 taskID, TSK_INFO_S *taskInf
taskCB->ops->schedParamGet(taskCB, ¶m);
taskInfo->usTaskStatus = taskCB->taskStatus;
taskInfo->usTaskPrio = param.priority;
- taskInfo->uwStackSize = taskCB->stackSize;
- taskInfo->uwTopOfStack = taskCB->topOfStack;
+ taskInfo->uwStackSize = taskCB->stackSize; //内核态栈大小
+ taskInfo->uwTopOfStack = taskCB->topOfStack;//内核态栈顶位置
taskInfo->uwEventMask = taskCB->eventMask;
taskInfo->taskEvent = taskCB->taskEvent;
taskInfo->pTaskMux = taskCB->taskMux;
@@ -1141,16 +1244,16 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_TaskInfoGet(UINT32 taskID, TSK_INFO_S *taskInf
}
taskInfo->acName[LOS_TASK_NAMELEN - 1] = '\0';
- taskInfo->uwBottomOfStack = TRUNCATE(((UINTPTR)taskCB->topOfStack + taskCB->stackSize),
+ taskInfo->uwBottomOfStack = TRUNCATE(((UINTPTR)taskCB->topOfStack + taskCB->stackSize),//这里可以看出栈底地址是高于栈顶
OS_TASK_STACK_ADDR_ALIGN);
- taskInfo->uwCurrUsed = (UINT32)(taskInfo->uwBottomOfStack - taskInfo->uwSP);
+ taskInfo->uwCurrUsed = (UINT32)(taskInfo->uwBottomOfStack - taskInfo->uwSP);//当前任务栈已使用了多少
- taskInfo->bOvf = OsStackWaterLineGet((const UINTPTR *)taskInfo->uwBottomOfStack,
+ taskInfo->bOvf = OsStackWaterLineGet((const UINTPTR *)taskInfo->uwBottomOfStack,//获取栈的使用情况
(const UINTPTR *)taskInfo->uwTopOfStack, &taskInfo->uwPeakUsed);
SCHEDULER_UNLOCK(intSave);
return LOS_OK;
}
-
+///CPU亲和性(affinity)将任务绑在指定CPU上,用于多核CPU情况,(该函数仅在SMP模式下支持)
LITE_OS_SEC_TEXT BOOL OsTaskCpuAffiSetUnsafe(UINT32 taskID, UINT16 newCpuAffiMask, UINT16 *oldCpuAffiMask)
{
#ifdef LOSCFG_KERNEL_SMP
@@ -1176,31 +1279,32 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_TaskCpuAffiSet(UINT32 taskID, UINT16 cpuAffiMa
UINT32 intSave;
UINT16 currCpuMask;
- if (OS_TID_CHECK_INVALID(taskID)) {
+ if (OS_TID_CHECK_INVALID(taskID)) {//检测taskid是否有效,task由task池分配,鸿蒙默认128个任务 ID范围[0:127]
return LOS_ERRNO_TSK_ID_INVALID;
}
- if (!(cpuAffiMask & LOSCFG_KERNEL_CPU_MASK)) {
+ if (!(cpuAffiMask & LOSCFG_KERNEL_CPU_MASK)) {//检测cpu亲和力
return LOS_ERRNO_TSK_CPU_AFFINITY_MASK_ERR;
}
LosTaskCB *taskCB = OS_TCB_FROM_TID(taskID);
SCHEDULER_LOCK(intSave);
- if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {
+ if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {//贴有未使用标签的处理
SCHEDULER_UNLOCK(intSave);
return LOS_ERRNO_TSK_NOT_CREATED;
}
needSched = OsTaskCpuAffiSetUnsafe(taskID, cpuAffiMask, &currCpuMask);
SCHEDULER_UNLOCK(intSave);
+
if (needSched && OS_SCHEDULER_ACTIVE) {
- LOS_MpSchedule(currCpuMask);
- LOS_Schedule();
+ LOS_MpSchedule(currCpuMask);//发送信号调度信号给目标CPU
+ LOS_Schedule();//申请调度
}
return LOS_OK;
}
-
+///查询任务被绑在哪个CPU上
LITE_OS_SEC_TEXT_MINOR UINT16 LOS_TaskCpuAffiGet(UINT32 taskID)
{
#ifdef LOSCFG_KERNEL_SMP
@@ -1214,28 +1318,32 @@ LITE_OS_SEC_TEXT_MINOR UINT16 LOS_TaskCpuAffiGet(UINT32 taskID)
LosTaskCB *taskCB = OS_TCB_FROM_TID(taskID);
SCHEDULER_LOCK(intSave);
- if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {
+ if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) { //任务必须在使用
SCHEDULER_UNLOCK(intSave);
return INVALID_CPU_AFFI_MASK;
}
- cpuAffiMask = taskCB->cpuAffiMask;
+ cpuAffiMask = taskCB->cpuAffiMask; //获取亲和力掩码
SCHEDULER_UNLOCK(intSave);
return cpuAffiMask;
#else
(VOID)taskID;
- return 1;
+ return 1;//单核情况直接返回1 ,0号cpu对应0x01
#endif
}
/*
* Description : Process pending signals tagged by others cores
*/
+ /*!
+ 由其他CPU核触发阻塞进程的信号
+ 函数由汇编代码调用 ..\arch\arm\arm\src\los_dispatch.S
+*/
LITE_OS_SEC_TEXT_MINOR VOID OsTaskProcSignal(VOID)
{
UINT32 ret;
-
+ //私有且不可中断,无需保护。这个任务在其他CPU核看到它时总是在运行,所以它在执行代码的同时也可以继续接收信号
/*
* private and uninterruptable, no protection needed.
* while this task is always running when others cores see it,
@@ -1246,27 +1354,27 @@ LITE_OS_SEC_TEXT_MINOR VOID OsTaskProcSignal(VOID)
return;
}
- if (runTask->signal & SIGNAL_KILL) {
+ if (runTask->signal & SIGNAL_KILL) {//意思是其他cpu发起了要干掉你的信号
/*
* clear the signal, and do the task deletion. if the signaled task has been
* scheduled out, then this deletion will wait until next run.
- */
- runTask->signal = SIGNAL_NONE;
+ *///如果发出信号的任务已出调度就绪队列,则此删除将等待下次运行
+ runTask->signal = SIGNAL_NONE;//清除信号,
ret = LOS_TaskDelete(runTask->taskID);
if (ret != LOS_OK) {
PRINT_ERR("Task proc signal delete task(%u) failed err:0x%x\n", runTask->taskID, ret);
}
- } else if (runTask->signal & SIGNAL_SUSPEND) {
- runTask->signal &= ~SIGNAL_SUSPEND;
+ } else if (runTask->signal & SIGNAL_SUSPEND) {//意思是其他cpu发起了要挂起你的信号
+ runTask->signal &= ~SIGNAL_SUSPEND;//任务贴上被其他CPU挂起的标签
/* suspend killed task may fail, ignore the result */
(VOID)LOS_TaskSuspend(runTask->taskID);
#ifdef LOSCFG_KERNEL_SMP
- } else if (runTask->signal & SIGNAL_AFFI) {
- runTask->signal &= ~SIGNAL_AFFI;
+ } else if (runTask->signal & SIGNAL_AFFI) {//意思是下次调度其他cpu要媾和你
+ runTask->signal &= ~SIGNAL_AFFI;//任务贴上被其他CPU媾和的标签
- /* priority queue has updated, notify the target cpu */
- LOS_MpSchedule((UINT32)runTask->cpuAffiMask);
+ /* pri-queue has updated, notify the target cpu */
+ LOS_MpSchedule((UINT32)runTask->cpuAffiMask);//发生调度,此任务将移交给媾和CPU运行.
#endif
}
}
@@ -1339,7 +1447,7 @@ INT32 OsUserProcessOperatePermissionsCheck(const LosTaskCB *taskCB, UINTPTR proc
return LOS_OK;
}
-
+///创建任务之前,检查用户态任务栈的参数,是否地址在用户空间
LITE_OS_SEC_TEXT_INIT STATIC UINT32 OsCreateUserTaskParamCheck(UINT32 processID, TSK_INIT_PARAM_S *param)
{
UserTaskParam *userParam = NULL;
@@ -1349,25 +1457,25 @@ LITE_OS_SEC_TEXT_INIT STATIC UINT32 OsCreateUserTaskParamCheck(UINT32 processID,
}
userParam = ¶m->userParam;
- if ((processID == OS_INVALID_VALUE) && !LOS_IsUserAddress(userParam->userArea)) {
+ if ((processID == OS_INVALID_VALUE) && !LOS_IsUserAddress(userParam->userArea)) {//堆地址必须在用户空间
return OS_INVALID_VALUE;
}
- if (!LOS_IsUserAddress((UINTPTR)param->pfnTaskEntry)) {
+ if (!LOS_IsUserAddress((UINTPTR)param->pfnTaskEntry)) {//入口函数必须在用户空间
return OS_INVALID_VALUE;
}
-
+ //堆栈必须在用户空间
if (userParam->userMapBase && !LOS_IsUserAddressRange(userParam->userMapBase, userParam->userMapSize)) {
return OS_INVALID_VALUE;
}
-
+ //检查堆,栈范围
if (!LOS_IsUserAddress(userParam->userSP)) {
return OS_INVALID_VALUE;
}
return LOS_OK;
}
-
+///创建一个用户态任务
LITE_OS_SEC_TEXT_INIT UINT32 OsCreateUserTask(UINTPTR processID, TSK_INIT_PARAM_S *initParam)
{
UINT32 taskID;
@@ -1376,18 +1484,18 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsCreateUserTask(UINTPTR processID, TSK_INIT_PARAM_
INT32 policy;
SchedParam param;
- ret = OsCreateUserTaskParamCheck(processID, initParam);
+ ret = OsCreateUserTaskParamCheck(processID, initParam);//检查参数,堆栈,入口地址必须在用户空间
if (ret != LOS_OK) {
return ret;
}
-
+ //这里可看出一个任务有两个栈,内核态栈(内核指定栈大小)和用户态栈(用户指定栈大小)
initParam->uwStackSize = OS_USER_TASK_SYSCALL_STACK_SIZE;
- initParam->usTaskPrio = OS_TASK_PRIORITY_LOWEST;
- if (processID == OS_INVALID_VALUE) {
+ initParam->usTaskPrio = OS_TASK_PRIORITY_LOWEST;//设置最低优先级 31级
+ if (processID == OS_INVALID_VALUE) {//外面没指定进程ID的处理
SCHEDULER_LOCK(intSave);
LosProcessCB *processCB = OsCurrProcessGet();
initParam->processID = (UINTPTR)processCB;
- initParam->consoleID = processCB->consoleID;
+ initParam->consoleID = processCB->consoleID;//任务控制台ID归属
SCHEDULER_UNLOCK(intSave);
ret = LOS_GetProcessScheduler(processCB->processID, &policy, NULL);
if (ret != LOS_OK) {
@@ -1400,20 +1508,20 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsCreateUserTask(UINTPTR processID, TSK_INIT_PARAM_
initParam->deadlineUs = param.deadlineUs;
initParam->periodUs = param.periodUs;
}
- } else {
- initParam->policy = LOS_SCHED_RR;
- initParam->processID = processID;
- initParam->consoleID = 0;
+ } else {//进程已经创建
+ initParam->policy = LOS_SCHED_RR;//调度方式为抢占式,注意鸿蒙不仅仅只支持抢占式调度方式
+ initParam->processID = processID;//进程ID赋值
+ initParam->consoleID = 0;//默认0号控制台
}
- ret = LOS_TaskCreateOnly(&taskID, initParam);
+ ret = LOS_TaskCreateOnly(&taskID, initParam);//只创建task实体,不申请调度
if (ret != LOS_OK) {
return OS_INVALID_VALUE;
}
return taskID;
}
-
+///获取任务的调度方式
LITE_OS_SEC_TEXT INT32 LOS_GetTaskScheduler(INT32 taskID)
{
UINT32 intSave;
@@ -1426,7 +1534,7 @@ LITE_OS_SEC_TEXT INT32 LOS_GetTaskScheduler(INT32 taskID)
LosTaskCB *taskCB = OS_TCB_FROM_TID(taskID);
SCHEDULER_LOCK(intSave);
- if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {
+ if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {//状态不能是没有在使用
policy = -LOS_EINVAL;
OS_GOTO_ERREND();
}
@@ -1439,6 +1547,7 @@ LOS_ERREND:
return policy;
}
+//设置任务的调度信息
LITE_OS_SEC_TEXT INT32 LOS_SetTaskScheduler(INT32 taskID, UINT16 policy, UINT16 priority)
{
SchedParam param = { 0 };
@@ -1463,7 +1572,7 @@ LITE_OS_SEC_TEXT INT32 LOS_SetTaskScheduler(INT32 taskID, UINT16 policy, UINT16
SCHEDULER_LOCK(intSave);
if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {
- SCHEDULER_UNLOCK(intSave);
+ SCHEDULER_UNLOCK(intSave);
return LOS_EINVAL;
}
@@ -1582,12 +1691,12 @@ UINT32 LOS_TaskDetach(UINT32 taskID)
SCHEDULER_UNLOCK(intSave);
return errRet;
}
-
+//获取最大任务数
LITE_OS_SEC_TEXT UINT32 LOS_GetSystemTaskMaximum(VOID)
{
return g_taskMaxNum;
}
-
+/// 任务池中最后一个
LosTaskCB *OsGetDefaultTaskCB(VOID)
{
return &g_taskCBArray[g_taskMaxNum];
@@ -1602,44 +1711,43 @@ LITE_OS_SEC_TEXT VOID OsWriteResourceEventUnsafe(UINT32 events)
{
(VOID)OsEventWriteUnsafe(&g_resourceEvent, events, FALSE, NULL);
}
-
+///资源回收任务
STATIC VOID OsResourceRecoveryTask(VOID)
{
UINT32 ret;
- while (1) {
+ while (1) {//死循环,回收资源不存在退出情况,只要系统在运行资源就需要回收
ret = LOS_EventRead(&g_resourceEvent, OS_RESOURCE_EVENT_MASK,
- LOS_WAITMODE_OR | LOS_WAITMODE_CLR, LOS_WAIT_FOREVER);
- if (ret & (OS_RESOURCE_EVENT_FREE | OS_RESOURCE_EVENT_OOM)) {
+ LOS_WAITMODE_OR | LOS_WAITMODE_CLR, LOS_WAIT_FOREVER);//读取资源事件
+ if (ret & (OS_RESOURCE_EVENT_FREE | OS_RESOURCE_EVENT_OOM)) {//收到资源释放或内存异常情况
OsTaskCBRecycleToFree();
-
- OsProcessCBRecycleToFree();
+ OsProcessCBRecycleToFree();//回收进程到空闲进程池
}
-#ifdef LOSCFG_ENABLE_OOM_LOOP_TASK
- if (ret & OS_RESOURCE_EVENT_OOM) {
- (VOID)OomCheckProcess();
+#ifdef LOSCFG_ENABLE_OOM_LOOP_TASK //内存溢出监测任务开关
+ if (ret & OS_RESOURCE_EVENT_OOM) {//触发了这个事件
+ (VOID)OomCheckProcess();//检查进程的内存溢出情况
}
#endif
}
}
-
+///创建一个回收资源的任务
LITE_OS_SEC_TEXT UINT32 OsResourceFreeTaskCreate(VOID)
{
UINT32 ret;
UINT32 taskID;
TSK_INIT_PARAM_S taskInitParam;
- ret = LOS_EventInit((PEVENT_CB_S)&g_resourceEvent);
+ ret = LOS_EventInit((PEVENT_CB_S)&g_resourceEvent);//初始化资源事件
if (ret != LOS_OK) {
return LOS_NOK;
}
(VOID)memset_s((VOID *)(&taskInitParam), sizeof(TSK_INIT_PARAM_S), 0, sizeof(TSK_INIT_PARAM_S));
- taskInitParam.pfnTaskEntry = (TSK_ENTRY_FUNC)OsResourceRecoveryTask;
+ taskInitParam.pfnTaskEntry = (TSK_ENTRY_FUNC)OsResourceRecoveryTask;//入口函数
taskInitParam.uwStackSize = OS_TASK_RESOURCE_STATIC_SIZE;
taskInitParam.pcName = "ResourcesTask";
- taskInitParam.usTaskPrio = OS_TASK_RESOURCE_FREE_PRIORITY;
+ taskInitParam.usTaskPrio = OS_TASK_RESOURCE_FREE_PRIORITY;// 5 ,优先级很高
ret = LOS_TaskCreate(&taskID, &taskInitParam);
if (ret == LOS_OK) {
OS_TCB_FROM_TID(taskID)->taskStatus |= OS_TASK_FLAG_NO_DELETE;
@@ -1647,5 +1755,4 @@ LITE_OS_SEC_TEXT UINT32 OsResourceFreeTaskCreate(VOID)
return ret;
}
-LOS_MODULE_INIT(OsResourceFreeTaskCreate, LOS_INIT_LEVEL_KMOD_TASK);
-
+LOS_MODULE_INIT(OsResourceFreeTaskCreate, LOS_INIT_LEVEL_KMOD_TASK);//资源回收任务初始化
diff --git a/src/kernel_liteos_a/kernel/base/core/los_tick.c b/src/kernel_liteos_a/kernel/base/core/los_tick.c
index b5c2794e..0584b7ea 100644
--- a/src/kernel_liteos_a/kernel/base/core/los_tick.c
+++ b/src/kernel_liteos_a/kernel/base/core/los_tick.c
@@ -37,30 +37,30 @@
#endif
-LITE_OS_SEC_DATA_INIT UINT32 g_sysClock;
-LITE_OS_SEC_DATA_INIT UINT32 g_tickPerSecond;
-LITE_OS_SEC_BSS DOUBLE g_cycle2NsScale;
+LITE_OS_SEC_DATA_INIT UINT32 g_sysClock; ///< 系统时钟,是绝大部分部件工作的时钟源,也是其他所有外设的时钟的来源
+LITE_OS_SEC_DATA_INIT UINT32 g_tickPerSecond; ///< 每秒Tick数,鸿蒙默认是每秒100次,即:10ms
+LITE_OS_SEC_BSS DOUBLE g_cycle2NsScale; ///< 周期转纳秒级
/* spinlock for task module */
-LITE_OS_SEC_BSS SPIN_LOCK_INIT(g_tickSpin);
+LITE_OS_SEC_BSS SPIN_LOCK_INIT(g_tickSpin); ///< 节拍器自旋锁
/*
- * Description : Tick interruption handler
+ * Description : Tick interruption handler | 节拍中断处理函数 ,鸿蒙默认1ms触发一次
*/
LITE_OS_SEC_TEXT VOID OsTickHandler(VOID)
{
-#ifdef LOSCFG_SCHED_TICK_DEBUG
+#ifdef LOSCFG_SCHED_TICK_DEBUG
OsSchedDebugRecordData();
#endif
#ifdef LOSCFG_KERNEL_VDSO
- OsVdsoTimevalUpdate();
+ OsVdsoTimevalUpdate();//更新vdso数据页时间,vdso可以直接在用户进程空间绕过系统调用获取系统时间(例如:gettimeofday)
#endif
#ifdef LOSCFG_BASE_CORE_TICK_HW_TIME
HalClockIrqClear(); /* diff from every platform */
#endif
- OsSchedTick();
+ OsSchedTick();//由时钟发起的调度
}
diff --git a/src/kernel_liteos_a/kernel/base/include/los_container_pri.h b/src/kernel_liteos_a/kernel/base/include/los_container_pri.h
index d7b72c70..db10f529 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_container_pri.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_container_pri.h
@@ -57,10 +57,10 @@
typedef enum {
CONTAINER = 0,
- PID_CONTAINER,
- PID_CHILD_CONTAINER,
- UTS_CONTAINER,
- MNT_CONTAINER,
+ PID_CONTAINER, //进程容器
+ PID_CHILD_CONTAINER, //子进程容器
+ UTS_CONTAINER, //
+ MNT_CONTAINER, //挂载容器
IPC_CONTAINER,
USER_CONTAINER,
TIME_CONTAINER,
@@ -70,29 +70,29 @@ typedef enum {
} ContainerType;
typedef struct Container {
- Atomic rc;
+ Atomic rc; //原子操作
#ifdef LOSCFG_PID_CONTAINER
- struct PidContainer *pidContainer;
- struct PidContainer *pidForChildContainer;
+ struct PidContainer *pidContainer; //进程容器
+ struct PidContainer *pidForChildContainer;//进程的孩子容器
#endif
#ifdef LOSCFG_UTS_CONTAINER
- struct UtsContainer *utsContainer;
+ struct UtsContainer *utsContainer; //
#endif
#ifdef LOSCFG_MNT_CONTAINER
- struct MntContainer *mntContainer;
+ struct MntContainer *mntContainer; //挂载容器
#endif
#ifdef LOSCFG_IPC_CONTAINER
- struct IpcContainer *ipcContainer;
+ struct IpcContainer *ipcContainer; //IPC容器
#endif
#ifdef LOSCFG_TIME_CONTAINER
- struct TimeContainer *timeContainer;
- struct TimeContainer *timeForChildContainer;
+ struct TimeContainer *timeContainer; //时间容器
+ struct TimeContainer *timeForChildContainer;
#endif
#ifdef LOSCFG_NET_CONTAINER
- struct NetContainer *netContainer;
+ struct NetContainer *netContainer; //网络容器
#endif
} Container;
-
+//容器数量上限
typedef struct TagContainerLimit {
#ifdef LOSCFG_PID_CONTAINER
UINT32 pidLimit;
diff --git a/src/kernel_liteos_a/kernel/base/include/los_futex_pri.h b/src/kernel_liteos_a/kernel/base/include/los_futex_pri.h
index 7671115b..a315013d 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_futex_pri.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_futex_pri.h
@@ -1,3 +1,31 @@
+/*!
+ * @file los_futex_pri.h
+ * @brief
+ * @link
+ @verbatim
+ FUTEX_WAIT
+ 这个操作用来检测有uaddr指向的futex是否包含关心的数值val,如果是,则继续sleep直到FUTEX_WAKE操作触发。
+ 加载futex的操作是原子的。这个加载,从比较关心的数值,到开始sleep,都是原子的,与另外一个对于同一个
+ futex的操作是线性的,串行的,严格按照顺序来执行的。如果线程开始sleep,就表示有一个waiter在futex上。
+ 如果futex的值不匹配,回调直接返回失败,错误代码是EAGAIN。
+
+ 与期望值对比的目的是为了防止丢失唤醒的操作。如果另一个线程在基于前面的数值阻塞调用之后,修改了这个值,
+ 另一个线程在数值改变之后,调用FUTEX_WAIT之前执行了FUTEX_WAKE操作,这个调用的线程就会观察到数值变换并且无法唤醒。
+ 这里的意思是,调用FUTEX_WAIT需要做上面的一个操作,就是检测一下这个值是不是我们需要的,如果不是就等待,
+ 如果是就直接运行下去。之所以检测是为了避免丢失唤醒,也就是防止一直等待下去,比如我们在调用FUTEX_WAIT之前,
+ 另一个线程已经调用了FUTEX_WAKE,那么就不会有线程调用FUTEX_WAKE,调用FUTEX_WAIT的线程就永远等不到信号了,也就永远唤醒不了了。
+
+ 如果timeout不是NULL,就表示指向了一个特定的超时时钟。这个超时间隔使用系统时钟的颗粒度四舍五入,
+ 可以保证触发不会比定时的时间早。默认情况通过CLOCK_MONOTONIC测量,但是从Linux 4.5开始,可以在futex_op中设置
+ FUTEX_CLOCK_REALTIME使用CLOCK_REALTIME测量。如果timeout是NULL,将会永远阻塞。
+
+ 注意:对于FUTEX_WAIT,timeout是一个关联的值。与其他的futex设置不同,timeout被认为是一个绝对值。
+ 使用通过FUTEX_BITSET_MATCH_ANY特殊定义的val3传入FUTEX_WAIT_BITSET可以获得附带timeout的FUTEX_WAIT的值。
+ @endverbatim
+ * @version
+ * @author weharmonyos.com | 鸿蒙研究站 | 每天死磕一点点
+ * @date 2021-11-24
+ */
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
@@ -33,25 +61,27 @@
#define _LOS_FUTEX_PRI_H
#include "los_list.h"
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-#define FUTEX_REQUEUE 3
-#define FUTEX_WAKE_OP 5
+#define FUTEX_WAIT 0 ///< 原子性的检查 uaddr 中计数器的值是否为 val,如果是则让任务休眠,直到 FUTEX_WAKE 或者超时(time-out)。
+ //也就是把任务挂到 uaddr 相对应的等待队列上去。
+#define FUTEX_WAKE 1 ///< 最多唤醒 val 个等待在 uaddr 上任务。
+#define FUTEX_REQUEUE 3 ///< 调整指定锁在Futex表中的位置
+#define FUTEX_WAKE_OP 5
#define FUTEX_LOCK_PI 6
#define FUTEX_UNLOCK_PI 7
#define FUTEX_TRYLOCK_PI 8
#define FUTEX_WAIT_BITSET 9
-#define FUTEX_PRIVATE 128
+#define FUTEX_PRIVATE 128 //私有快锁(以虚拟地址进行哈希)
#define FUTEX_MASK 0x3U
-
+/// 每个futex node对应一个被挂起的task ,key值唯一标识一把用户态锁,具有相同key值的node被queue_list串联起来表示被同一把锁阻塞的task队列。
typedef struct {
- UINTPTR key; /* private:uvaddr shared:paddr */
- UINT32 index; /* hash bucket index */
- UINT32 pid; /* private:process id shared:OS_INVALID(-1) */
- LOS_DL_LIST pendList; /* point to pendList in TCB struct */
- LOS_DL_LIST queueList; /* thread list blocked by this lock */
- LOS_DL_LIST futexList; /* point to the next FutexNode */
+ UINTPTR key; /* private:uvaddr | 私有锁,用虚拟地址 shared:paddr | 共享锁,用物理地址*/
+ UINT32 index; /* hash bucket index | 哈希桶索引 OsFutexKeyToIndex */
+ UINT32 pid; /* private:process id shared:OS_INVALID(-1) | 私有锁:进程ID , 共享锁为 -1 */
+ LOS_DL_LIST pendList; /* point to pendList in TCB struct | 指向 TCB 结构中的 pendList, 通过它找到任务*/
+ LOS_DL_LIST queueList; /* thread list blocked by this lock | 挂等待这把锁的任务,其实这里挂到是FutexNode.queueList ,
+ 通过 queueList 可以找到 pendList ,通过 pendList又可以找到真正的任务*/
+ LOS_DL_LIST futexList; /* point to the next FutexNode | 下一把Futex锁*/
} FutexNode;
extern UINT32 OsFutexInit(VOID);
diff --git a/src/kernel_liteos_a/kernel/base/include/los_ipc_container_pri.h b/src/kernel_liteos_a/kernel/base/include/los_ipc_container_pri.h
index 42ccbf3e..d46bd84b 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_ipc_container_pri.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_ipc_container_pri.h
@@ -42,13 +42,13 @@ typedef struct TagQueueCB LosQueueCB;
typedef struct OsMux LosMux;
typedef LosMux pthread_mutex_t;
typedef struct ProcessCB LosProcessCB;
-
+//IPC容器
typedef struct IpcContainer {
Atomic rc;
- LosQueueCB *allQueue;
- LOS_DL_LIST freeQueueList;
+ LosQueueCB *allQueue; //队列控制块(读写分离模式)
+ LOS_DL_LIST freeQueueList;//空闲队列链表
fd_set queueFdSet;
- struct mqarray queueTable[LOSCFG_BASE_IPC_QUEUE_LIMIT];
+ struct mqarray queueTable[LOSCFG_BASE_IPC_QUEUE_LIMIT];//队列池
pthread_mutex_t mqueueMutex;
struct mqpersonal *mqPrivBuf[MAX_MQ_FD];
struct shminfo shmInfo;
diff --git a/src/kernel_liteos_a/kernel/base/include/los_memstat_pri.h b/src/kernel_liteos_a/kernel/base/include/los_memstat_pri.h
index a2a9f581..7f3aee4b 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_memstat_pri.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_memstat_pri.h
@@ -42,7 +42,7 @@ extern "C" {
#endif /* __cplusplus */
typedef struct {
- UINT32 memUsed;
+ UINT32 memUsed; ///< 记录任务内存使用量
} TskMemUsedInfo;
extern VOID OsTaskMemUsedInc(UINT32 usedSize, UINT32 taskID);
@@ -53,7 +53,7 @@ extern VOID OsTaskMemClear(UINT32 taskID);
#ifdef LOS_MEM_SLAB
typedef struct {
- UINT32 slabUsed;
+ UINT32 slabUsed; ///< 任务占用以slab分配方式内存量
} TskSlabUsedInfo;
extern VOID OsTaskSlabUsedInc(UINT32 usedSize, UINT32 taskID);
diff --git a/src/kernel_liteos_a/kernel/base/include/los_mux_pri.h b/src/kernel_liteos_a/kernel/base/include/los_mux_pri.h
index 8837114c..ebab2648 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_mux_pri.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_mux_pri.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
- * Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
diff --git a/src/kernel_liteos_a/kernel/base/include/los_percpu_pri.h b/src/kernel_liteos_a/kernel/base/include/los_percpu_pri.h
index 8a824ffd..e561c03d 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_percpu_pri.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_percpu_pri.h
@@ -43,9 +43,9 @@ extern "C" {
#ifdef LOSCFG_KERNEL_SMP
typedef enum {
- CPU_RUNNING = 0, /* cpu is running */
- CPU_HALT, /* cpu in the halt */
- CPU_EXC /* cpu in the exc */
+ CPU_RUNNING = 0, ///< cpu is running | CPU正在运行状态
+ CPU_HALT, ///< cpu in the halt | CPU处于暂停状态
+ CPU_EXC ///< cpu in the exc | CPU处于异常状态
} ExcFlag;
typedef struct {
@@ -55,14 +55,14 @@ typedef struct {
#endif
} Percpu;
-/* the kernel per-cpu structure */
+/*! the kernel per-cpu structure | 每个cpu的内核描述符 */
extern Percpu g_percpu[LOSCFG_KERNEL_CORE_NUM];
-
+/*! 获得当前运行CPU的信息 */
STATIC INLINE Percpu *OsPercpuGet(VOID)
{
- return &g_percpu[ArchCurrCpuid()];
+ return &g_percpu[ArchCurrCpuid()];
}
-
+/*! 获得参数CPU的信息 */
STATIC INLINE Percpu *OsPercpuGetByID(UINT32 cpuid)
{
return &g_percpu[cpuid];
diff --git a/src/kernel_liteos_a/kernel/base/include/los_pid_container_pri.h b/src/kernel_liteos_a/kernel/base/include/los_pid_container_pri.h
index cc047e68..eed94630 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_pid_container_pri.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_pid_container_pri.h
@@ -38,29 +38,29 @@ typedef struct TagTaskCB LosTaskCB;
typedef struct ProcessCB LosProcessCB;
struct ProcessGroup;
struct Container;
-
+//虚拟进程/任务 信息
typedef struct {
- UINT32 vid; /* Virtual ID */
- UINT32 vpid; /* Virtual parent ID */
- UINTPTR cb; /* Control block */
- LosProcessCB *realParent; /* process real parent */
- LOS_DL_LIST node;
+ UINT32 vid; /* Virtual ID | 虚拟ID*/
+ UINT32 vpid; /* Virtual parent ID | 父进程虚拟ID*/
+ UINTPTR cb; /* Control block | 控制块*/
+ LosProcessCB *realParent; /* process real parent | 进程真实的父进程 */
+ LOS_DL_LIST node;//用于挂入 PidContainer.pidFreeList | tidFreeList
} ProcessVid;
#define PID_CONTAINER_LEVEL_LIMIT 3
-
+//进程容器
typedef struct PidContainer {
- Atomic rc;
- Atomic level;
- Atomic lock;
- BOOL referenced;
- UINT32 containerID;
- struct PidContainer *parent;
- struct ProcessGroup *rootPGroup;
- LOS_DL_LIST tidFreeList;
- ProcessVid tidArray[LOSCFG_BASE_CORE_TSK_LIMIT];
- LOS_DL_LIST pidFreeList;
- ProcessVid pidArray[LOSCFG_BASE_CORE_PROCESS_LIMIT];
+ Atomic rc; //原子操作
+ Atomic level; //等级,0为最高级,父比子高一级
+ Atomic lock; //锁
+ BOOL referenced; //是否被引用
+ UINT32 containerID; //容器ID
+ struct PidContainer *parent; //父进程容器
+ struct ProcessGroup *rootPGroup; //进程组
+ LOS_DL_LIST tidFreeList; //任务空闲链表
+ ProcessVid tidArray[LOSCFG_BASE_CORE_TSK_LIMIT];//虚拟任务池
+ LOS_DL_LIST pidFreeList; //进程空闲链表
+ ProcessVid pidArray[LOSCFG_BASE_CORE_PROCESS_LIMIT];//虚拟进程池
} PidContainer;
#define OS_PID_CONTAINER_FROM_PCB(processCB) ((processCB)->container->pidContainer)
diff --git a/src/kernel_liteos_a/kernel/base/include/los_process_pri.h b/src/kernel_liteos_a/kernel/base/include/los_process_pri.h
index accd618c..d8152854 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_process_pri.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_process_pri.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
- * Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -65,82 +65,86 @@ extern "C" {
#ifdef LOSCFG_SECURITY_CAPABILITY
#define OS_GROUPS_NUMBER_MAX 256
+/*! 用户描述体*/
typedef struct {
- UINT32 userID;
+ UINT32 userID; ///<用户ID [0,60000],0为root用户
UINT32 effUserID;
- UINT32 gid;
+ UINT32 gid; ///<用户组ID [0,60000],0为root用户组
UINT32 effGid;
- UINT32 groupNumber;
- UINT32 groups[1];
+ UINT32 groupNumber;///< 用户组数量
+ UINT32 groups[1]; //所属用户组列表,一个用户可属多个用户组
} User;
#endif
-
+/*! 进程组结构体*/
typedef struct ProcessGroup {
- UINTPTR pgroupLeader; /**< Process group leader is the the process that created the group */
- LOS_DL_LIST processList; /**< List of processes under this process group */
- LOS_DL_LIST exitProcessList; /**< List of closed processes (zombie processes) under this group */
- LOS_DL_LIST groupList; /**< Process group list */
+ UINTPTR pgroupLeader; /**< Process group leader is the the process that created the group | 负责创建进程组的进程首地址*/
+ LOS_DL_LIST processList; /**< List of processes under this process group | 属于该进程组的进程链表*/
+ LOS_DL_LIST exitProcessList; /**< List of closed processes (zombie processes) under this group | 进程组的僵死进程链表*/
+ LOS_DL_LIST groupList; /**< Process group list | 进程组链表,上面挂的都是进程组*/
} ProcessGroup;
+/**
+ * 进程控制块.
+ */
typedef struct ProcessCB {
- CHAR processName[OS_PCB_NAME_LEN]; /**< Process name */
- UINT32 processID; /**< Process ID */
+ CHAR processName[OS_PCB_NAME_LEN]; /**< Process name | 进程名称 */
+ UINT32 processID; /**< Process ID = leader thread ID | 进程ID,由进程池分配,范围[0,64] */
UINT16 processStatus; /**< [15:4] Process Status; [3:0] The number of threads currently
- running in the process */
- UINT16 consoleID; /**< The console id of task belongs */
- UINT16 processMode; /**< Kernel Mode:0; User Mode:1; */
+ running in the process | 这里设计很巧妙.用一个变量表示了两层逻辑 数量和状态,点赞! @note_good 从这里也可以看出一个进程可以有多个正在运行的任务*/
+ UINT16 consoleID; /**< The console id of task belongs | 任务的控制台id归属 */
+ UINT16 processMode; /**< Kernel Mode:0; User Mode:1; | 模式指定为内核还是用户进程 */
struct ProcessCB *parentProcess; /**< Parent process */
- UINT32 exitCode; /**< Process exit status */
- LOS_DL_LIST pendList; /**< Block list to which the process belongs */
- LOS_DL_LIST childrenList; /**< Children process list */
- LOS_DL_LIST exitChildList; /**< Exit children process list */
- LOS_DL_LIST siblingList; /**< Linkage in parent's children list */
- ProcessGroup *pgroup; /**< Process group to which a process belongs */
- LOS_DL_LIST subordinateGroupList; /**< Linkage in group list */
+ UINT32 exitCode; /**< Process exit status | 进程退出状态码*/
+ LOS_DL_LIST pendList; /**< Block list to which the process belongs | 进程所在的阻塞列表,进程因阻塞挂入相应的链表.*/
+ LOS_DL_LIST childrenList; /**< Children process list | 孩子进程都挂到这里,形成双循环链表*/
+ LOS_DL_LIST exitChildList; /**< Exit children process list | 要退出的孩子进程链表,白发人要送黑发人.*/
+ LOS_DL_LIST siblingList; /**< Linkage in parent's children list | 兄弟进程链表, 56个民族是一家,来自同一个父进程.*/
+ ProcessGroup *pgroup; /**< Process group to which a process belongs | 所属进程组*/
+ LOS_DL_LIST subordinateGroupList; /**< Linkage in group list | 进程组员链表*/
LosTaskCB *threadGroup;
- LOS_DL_LIST threadSiblingList; /**< List of threads under this process */
- volatile UINT32 threadNumber; /**< Number of threads alive under this process */
- UINT32 threadCount; /**< Total number of threads created under this process */
- LOS_DL_LIST waitList; /**< The process holds the waitLits to support wait/waitpid */
+ LOS_DL_LIST threadSiblingList; /**< List of threads under this process | 进程的线程(任务)列表 */
+ volatile UINT32 threadNumber; /**< Number of threads alive under this process | 此进程下的活动线程数*/
+ UINT32 threadCount; /**< Total number of threads created under this process | 在此进程下创建的线程总数*/ //
+ LOS_DL_LIST waitList; /**< The process holds the waitLits to support wait/waitpid | 父进程通过进程等待的方式,回收子进程资源,获取子进程退出信息*/
#ifdef LOSCFG_KERNEL_SMP
- UINT32 timerCpu; /**< CPU core number of this task is delayed or pended */
+ UINT32 timerCpu; /**< CPU core number of this task is delayed or pended | 统计各线程被延期或阻塞的时间*/
#endif
- UINTPTR sigHandler; /**< Signal handler */
- sigset_t sigShare; /**< Signal share bit */
+ UINTPTR sigHandler; /**< Signal handler | 信号处理函数,处理如 SIGSYS 等信号*/
+ sigset_t sigShare; /**< Signal share bit | 信号共享位 sigset_t是个64位的变量,对应64种信号*/
#ifdef LOSCFG_KERNEL_LITEIPC
- ProcIpcInfo *ipcInfo; /**< Memory pool for lite ipc */
+ ProcIpcInfo *ipcInfo; /**< Memory pool for lite ipc | 用于进程间通讯的虚拟设备文件系统,设备装载点为 /dev/lite_ipc*/
#endif
#ifdef LOSCFG_KERNEL_VM
- LosVmSpace *vmSpace; /**< VMM space for processes */
+ LosVmSpace *vmSpace; /**< VMM space for processes | 虚拟空间,描述进程虚拟内存的数据结构,linux称为内存描述符 */
#endif
#ifdef LOSCFG_FS_VFS
- struct files_struct *files; /**< Files held by the process */
-#endif
- timer_t timerID; /**< ITimer */
+ struct files_struct *files; /**< Files held by the process | 进程所持有的所有文件,注者称之为进程的文件管理器*/
+#endif //每个进程都有属于自己的文件管理器,记录对文件的操作. 注意:一个文件可以被多个进程操作
+ timer_t timerID; /**< iTimer */
-#ifdef LOSCFG_SECURITY_CAPABILITY
- User *user;
- UINT32 capability;
+#ifdef LOSCFG_SECURITY_CAPABILITY //安全能力
+ User *user; ///< 进程的拥有者
+ UINT32 capability; ///< 安全能力范围 对应 CAP_SETGID
#endif
-#ifdef LOSCFG_SECURITY_VID
- TimerIdMap timerIdMap;
+#ifdef LOSCFG_SECURITY_VID //虚拟ID映射功能
+ TimerIdMap timerIdMap;
#endif
#ifdef LOSCFG_DRIVERS_TZDRIVER
- struct Vnode *execVnode; /**< Exec bin of the process */
+ struct Vnode *execVnode; /**< Exec bin of the process | 进程的可执行文件 */
#endif
- mode_t umask;
+ mode_t umask; ///< umask(user file-creatiopn mode mask)为用户文件创建掩码,是创建文件或文件夹时默认权限的基础。
#ifdef LOSCFG_KERNEL_CPUP
- OsCpupBase *processCpup; /**< Process cpu usage */
+ OsCpupBase *processCpup; /**< Process cpu usage | 进程占用CPU情况统计*/
#endif
- struct rlimit *resourceLimit;
+ struct rlimit *resourceLimit; ///< 每个进程在运行时系统不会无限制的允许单个进程不断的消耗资源,因此都会设置资源限制。
#ifdef LOSCFG_KERNEL_CONTAINER
- Container *container;
+ Container *container; ///< 内核容器
#ifdef LOSCFG_USER_CONTAINER
- struct Credentials *credentials;
+ struct Credentials *credentials; ///< 用户身份证
#endif
#endif
#ifdef LOSCFG_PROC_PROCESS_DIR
- struct ProcDirEntry *procDir;
+ struct ProcDirEntry *procDir; ///< 目录文件项
#endif
#ifdef LOSCFG_KERNEL_PLIMITS
ProcLimiterSet *plimits;
@@ -161,8 +165,8 @@ extern UINT32 g_processMaxNum;
#define OS_PCB_FROM_TCB(taskCB) ((LosProcessCB *)((taskCB)->processCB))
#define OS_PCB_FROM_TID(taskID) ((LosProcessCB *)(OS_TCB_FROM_TID(taskID)->processCB))
#define OS_GET_PGROUP_LEADER(pgroup) ((LosProcessCB *)((pgroup)->pgroupLeader))
-#define OS_PCB_FROM_SIBLIST(ptr) LOS_DL_LIST_ENTRY((ptr), LosProcessCB, siblingList)
-#define OS_PCB_FROM_PENDLIST(ptr) LOS_DL_LIST_ENTRY((ptr), LosProcessCB, pendList)
+#define OS_PCB_FROM_SIBLIST(ptr) LOS_DL_LIST_ENTRY((ptr), LosProcessCB, siblingList)///< 通过siblingList节点找到 LosProcessCB
+#define OS_PCB_FROM_PENDLIST(ptr) LOS_DL_LIST_ENTRY((ptr), LosProcessCB, pendList) ///< 通过pendlist节点找到 LosProcessCB
/**
* @ingroup los_process
@@ -202,7 +206,7 @@ extern UINT32 g_processMaxNum;
*
* The process is run out but the resources occupied by the process are not recovered.
*/
-#define OS_PROCESS_STATUS_ZOMBIES 0x0100U
+#define OS_PROCESS_STATUS_ZOMBIES 0x0100U ///< 进程状态: 僵死
/**
* @ingroup los_process
@@ -211,7 +215,7 @@ extern UINT32 g_processMaxNum;
* The process status equal this is process control block unused,
* coexisting with OS_PROCESS_STATUS_ZOMBIES means that the control block is not recovered.
*/
-#define OS_PROCESS_FLAG_UNUSED 0x0200U
+#define OS_PROCESS_FLAG_UNUSED 0x0200U ///< 进程未使用标签,一般用于进程的初始状态 freelist里面都是这种标签
/**
* @ingroup los_process
@@ -219,7 +223,7 @@ extern UINT32 g_processMaxNum;
*
* The process has been call exit, it only works with multiple cores.
*/
-#define OS_PROCESS_FLAG_EXIT 0x0400U
+#define OS_PROCESS_FLAG_EXIT 0x0400U ///< 进程退出标签,退出的进程进入回收链表等待回收资源
/**
* @ingroup los_process
@@ -227,7 +231,7 @@ extern UINT32 g_processMaxNum;
*
* The process is the leader of the process group.
*/
-#define OS_PROCESS_FLAG_GROUP_LEADER 0x0800U
+#define OS_PROCESS_FLAG_GROUP_LEADER 0x0800U ///< 进程当了进程组领导标签
/**
* @ingroup los_process
@@ -235,21 +239,21 @@ extern UINT32 g_processMaxNum;
*
* The process has performed the exec operation.
*/
-#define OS_PROCESS_FLAG_ALREADY_EXEC 0x1000U
+#define OS_PROCESS_FLAG_ALREADY_EXEC 0x1000U ///< 进程已执行exec操作 load elf时使用
/**
* @ingroup los_process
* Flag that indicates the process or process control block status.
*
* The process is dying or already dying.
- */
-#define OS_PROCESS_STATUS_INACTIVE (OS_PROCESS_FLAG_EXIT | OS_PROCESS_STATUS_ZOMBIES)
+ */ /// 进程不活跃状态定义: 身上贴有退出便签且状态为僵死的进程
+#define OS_PROCESS_STATUS_INACTIVE (OS_PROCESS_FLAG_EXIT | OS_PROCESS_STATUS_ZOMBIES)
/**
* @ingroup los_process
* Used to check if the process control block is unused.
*/
-STATIC INLINE BOOL OsProcessIsUnused(const LosProcessCB *processCB)
+STATIC INLINE BOOL OsProcessIsUnused(const LosProcessCB *processCB)//查下进程是否还在使用?
{
return ((processCB->processStatus & OS_PROCESS_FLAG_UNUSED) != 0);
}
@@ -257,8 +261,8 @@ STATIC INLINE BOOL OsProcessIsUnused(const LosProcessCB *processCB)
/**
* @ingroup los_process
* Used to check if the process is inactive.
- */
-STATIC INLINE BOOL OsProcessIsInactive(const LosProcessCB *processCB)
+ */ /// 进程不活跃函数定义:身上贴有不使用且不活跃标签的进程
+STATIC INLINE BOOL OsProcessIsInactive(const LosProcessCB *processCB)//查下进程是否不活跃?
{
return ((processCB->processStatus & (OS_PROCESS_FLAG_UNUSED | OS_PROCESS_STATUS_INACTIVE)) != 0);
}
@@ -266,8 +270,8 @@ STATIC INLINE BOOL OsProcessIsInactive(const LosProcessCB *processCB)
/**
* @ingroup los_process
* Used to check if the process is dead.
- */
-STATIC INLINE BOOL OsProcessIsDead(const LosProcessCB *processCB)
+ */ /// 进程死啦死啦的定义: 身上贴有不使用且状态为僵死的进程
+STATIC INLINE BOOL OsProcessIsDead(const LosProcessCB *processCB)//查下进程是否死啦死啦滴?
{
return ((processCB->processStatus & OS_PROCESS_STATUS_ZOMBIES) != 0);
}
@@ -286,64 +290,64 @@ STATIC INLINE BOOL OsProcessIsPGroupLeader(const LosProcessCB *processCB)
* @ingroup los_process
* The highest priority of a kernel mode process.
*/
-#define OS_PROCESS_PRIORITY_HIGHEST 0
+#define OS_PROCESS_PRIORITY_HIGHEST 0 ///< 进程最高优先级
/**
* @ingroup los_process
* The lowest priority of a kernel mode process
*/
-#define OS_PROCESS_PRIORITY_LOWEST 31
+#define OS_PROCESS_PRIORITY_LOWEST 31 ///< 进程最低优先级
/**
* @ingroup los_process
* The highest priority of a user mode process.
*/
-#define OS_USER_PROCESS_PRIORITY_HIGHEST 10
+#define OS_USER_PROCESS_PRIORITY_HIGHEST 10 ///< 内核模式和用户模式的优先级分割线 10-31 用户级, 0-9内核级
/**
* @ingroup los_process
* The lowest priority of a user mode process
*/
-#define OS_USER_PROCESS_PRIORITY_LOWEST OS_PROCESS_PRIORITY_LOWEST
+#define OS_USER_PROCESS_PRIORITY_LOWEST OS_PROCESS_PRIORITY_LOWEST ///< 用户进程的最低优先级
/**
* @ingroup los_process
* User state root process default priority
*/
-#define OS_PROCESS_USERINIT_PRIORITY 28
+#define OS_PROCESS_USERINIT_PRIORITY 28 ///< 用户进程默认的优先级,28级好低啊
/**
* @ingroup los_process
* ID of the kernel idle process
*/
-#define OS_KERNEL_IDLE_PROCESS_ID 0U
+#define OS_KERNEL_IDLE_PROCESS_ID 0U //0号进程为空闲进程
/**
* @ingroup los_process
* ID of the user root process
*/
-#define OS_USER_ROOT_PROCESS_ID 1U
+#define OS_USER_ROOT_PROCESS_ID 1U //1号为用户态根进程
/**
* @ingroup los_process
* ID of the kernel root process
*/
-#define OS_KERNEL_ROOT_PROCESS_ID 2U
+#define OS_KERNEL_ROOT_PROCESS_ID 2U //1号为内核态根进程
+#define OS_TASK_DEFAULT_STACK_SIZE 0x2000 ///< task默认栈大小 8K
+#define OS_USER_TASK_SYSCALL_STACK_SIZE 0x3000 ///< 用户通过系统调用的栈大小 12K ,这时是运行在内核模式下
+#define OS_USER_TASK_STACK_SIZE 0x100000 ///< 用户任务运行在用户空间的栈大小 1M
-#define OS_TASK_DEFAULT_STACK_SIZE 0x2000
-#define OS_USER_TASK_SYSCALL_STACK_SIZE 0x3000
-#define OS_USER_TASK_STACK_SIZE 0x100000
-
-#define OS_KERNEL_MODE 0x0U
-#define OS_USER_MODE 0x1U
+#define OS_KERNEL_MODE 0x0U ///< 内核态
+#define OS_USER_MODE 0x1U ///< 用户态
+/*! 用户态进程*/
STATIC INLINE BOOL OsProcessIsUserMode(const LosProcessCB *processCB)
{
return (processCB->processMode == OS_USER_MODE);
}
-#define LOS_PRIO_PROCESS 0U
-#define LOS_PRIO_PGRP 1U
-#define LOS_PRIO_USER 2U
+#define LOS_PRIO_PROCESS 0U ///< 进程标识
+#define LOS_PRIO_PGRP 1U ///< 进程组标识
+#define LOS_PRIO_USER 2U ///< 用户标识
#define OS_USER_PRIVILEGE_PROCESS_GROUP ((UINTPTR)OsGetUserInitProcess())
#define OS_KERNEL_PROCESS_GROUP ((UINTPTR)OsGetKernelInitProcess())
@@ -353,40 +357,40 @@ STATIC INLINE BOOL OsProcessIsUserMode(const LosProcessCB *processCB)
* 31 15 8 7 0
* | | exit code | core dump | signal |
*/
-#define OS_PRO_EXIT_OK 0
-
+#define OS_PRO_EXIT_OK 0 ///< 进程正常退出
+/// 置进程退出码第七位为1
STATIC INLINE VOID OsProcessExitCodeCoreDumpSet(LosProcessCB *processCB)
{
- processCB->exitCode |= 0x80U;
+ processCB->exitCode |= 0x80U; // 0b10000000
}
-
+/// 设置进程退出信号(0 ~ 7)
STATIC INLINE VOID OsProcessExitCodeSignalSet(LosProcessCB *processCB, UINT32 signal)
{
- processCB->exitCode |= signal & 0x7FU;
+ processCB->exitCode |= signal & 0x7FU;// 0b01111111
}
-
+/// 清除进程退出信号(0 ~ 7)
STATIC INLINE VOID OsProcessExitCodeSignalClear(LosProcessCB *processCB)
{
- processCB->exitCode &= (~0x7FU);
+ processCB->exitCode &= (~0x7FU);// 低7位全部清0
}
-
+/// 进程退出码是否被设置过,默认是 0 ,如果 & 0x7FU 还是 0 ,说明没有被设置过.
STATIC INLINE BOOL OsProcessExitCodeSignalIsSet(LosProcessCB *processCB)
{
return (processCB->exitCode) & 0x7FU;
}
-
+/// 设置进程退出号(8 ~ 15)
STATIC INLINE VOID OsProcessExitCodeSet(LosProcessCB *processCB, UINT32 code)
{
processCB->exitCode |= ((code & 0x000000FFU) << 8U) & 0x0000FF00U; /* 8: Move 8 bits to the left, exitCode */
}
#define OS_PID_CHECK_INVALID(pid) (((UINT32)(pid)) >= g_processMaxNum)
-
+/*! 内联函数 进程ID是否有效 */
STATIC INLINE BOOL OsProcessIDUserCheckInvalid(UINT32 pid)
{
return ((pid >= g_processMaxNum) || (pid == 0));
}
-
+/*! 获取当前进程PCB */
STATIC INLINE LosProcessCB *OsCurrProcessGet(VOID)
{
UINT32 intSave;
@@ -398,6 +402,7 @@ STATIC INLINE LosProcessCB *OsCurrProcessGet(VOID)
}
#ifdef LOSCFG_SECURITY_CAPABILITY
+/*! 获取当前进程的所属用户 */
STATIC INLINE User *OsCurrUserGet(VOID)
{
User *user = NULL;
@@ -469,14 +474,14 @@ STATIC INLINE UINT32 OsGetRootPid(const LosProcessCB *processCB)
/*
* return immediately if no child has exited.
*/
-#define LOS_WAIT_WNOHANG (1 << 0U)
+#define LOS_WAIT_WNOHANG (1 << 0U) ///< 如果没有孩子进程退出,则立即返回,而不是阻塞在这个函数上等待;如果结束了,则返回该子进程的进程号。
/*
* return if a child has stopped (but not traced via ptrace(2)).
* Status for traced children which have stopped is provided even
* if this option is not specified.
*/
-#define LOS_WAIT_WUNTRACED (1 << 1U)
+#define LOS_WAIT_WUNTRACED (1 << 1U) ///< 如果子进程进入暂停情况则马上返回,不予以理会结束状态。untraced
#define LOS_WAIT_WSTOPPED (1 << 1U)
/*
@@ -488,7 +493,7 @@ STATIC INLINE UINT32 OsGetRootPid(const LosProcessCB *processCB)
* return if a stopped child has been resumed by delivery of SIGCONT.
* (For Linux-only options, see below.)
*/
-#define LOS_WAIT_WCONTINUED (1 << 3U)
+#define LOS_WAIT_WCONTINUED (1 << 3U) ///< 可获取子进程恢复执行的状态,也就是可获取continued状态 continued
/*
* Leave the child in a waitable state;
@@ -499,30 +504,30 @@ STATIC INLINE UINT32 OsGetRootPid(const LosProcessCB *processCB)
/*
* Indicates that you are already in a wait state
*/
-#define OS_PROCESS_WAIT (1 << 15U)
+#define OS_PROCESS_WAIT (1 << 15U) ///< 表示已经处于等待状态
/*
* Wait for any child process to finish
*/
-#define OS_PROCESS_WAIT_ANY OS_TASK_WAIT_ANYPROCESS
+#define OS_PROCESS_WAIT_ANY OS_TASK_WAIT_ANYPROCESS ///< 等待任意子进程完成
/*
* Wait for the child process specified by the pid to finish
*/
-#define OS_PROCESS_WAIT_PRO OS_TASK_WAIT_PROCESS
+#define OS_PROCESS_WAIT_PRO OS_TASK_WAIT_PROCESS ///< 等待pid指定的子进程完成
/*
* Waits for any child process in the specified process group to finish.
*/
-#define OS_PROCESS_WAIT_GID OS_TASK_WAIT_GID
+#define OS_PROCESS_WAIT_GID OS_TASK_WAIT_GID ///< 等待指定进程组中的任意子进程完成
#define OS_PROCESS_INFO_ALL 1
-#define OS_PROCESS_DEFAULT_UMASK 0022
-
-extern UINTPTR __user_init_entry;
-extern UINTPTR __user_init_bss;
-extern UINTPTR __user_init_end;
-extern UINTPTR __user_init_load_addr;
+#define OS_PROCESS_DEFAULT_UMASK 0022 ///< 系统默认的用户掩码(umask),大多数的Linux系统的默认掩码为022。
+//用户掩码的作用是用户在创建文件时从文件的默认权限中去除掩码中的权限。所以文件创建之后的权限实际为:创建文件的权限为:0666-0022=0644。创建文件夹的权限为:0777-0022=0755
+extern UINTPTR __user_init_entry; ///< 第一个用户态进程(init)的入口地址 查看 LITE_USER_SEC_ENTRY
+extern UINTPTR __user_init_bss; ///< 查看 LITE_USER_SEC_BSS ,赋值由liteos.ld完成
+extern UINTPTR __user_init_end; ///< init 进程的用户空间初始化结束地址
+extern UINTPTR __user_init_load_addr;///< init 进程的加载地址 ,由链接器赋值
extern UINT32 OsProcessInit(VOID);
extern UINT32 OsSystemProcessCreate(VOID);
extern VOID OsProcessNaturalExit(LosProcessCB *processCB, UINT32 status);
diff --git a/src/kernel_liteos_a/kernel/base/include/los_queue_pri.h b/src/kernel_liteos_a/kernel/base/include/los_queue_pri.h
index 4efafe23..4b035457 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_queue_pri.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_queue_pri.h
@@ -39,16 +39,35 @@
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
+/**
+ * @brief @note_pic
+ * @verbatim
+ 鸿蒙对消息队列图
+ |<-----消息内容区,有2个消息---->|
++------------+------------------------------------------------------------+
+| | |---------------|---------------| |
+| | |---------------|---------------| |
+| | |---------------|---------------| |
++-------------------------------------------------------------------------+
+| | ^ ^ |
+|<消息大小> | | | |
+| | |head |tail |
+| + +任务读消息 +任务写消息 |
+| |
+| |
++<-------------+ 队列长度,消息点个数, +------------->+
+ * @endverbatim
+ */
typedef enum {
- OS_QUEUE_READ = 0,
- OS_QUEUE_WRITE = 1,
+ OS_QUEUE_READ = 0, ///< 读队列
+ OS_QUEUE_WRITE = 1, ///< 写队列
OS_QUEUE_N_RW = 2
} QueueReadWrite;
typedef enum {
- OS_QUEUE_HEAD = 0,
- OS_QUEUE_TAIL = 1
+ OS_QUEUE_HEAD = 0, ///< 队列头部标识
+ OS_QUEUE_TAIL = 1 ///< 队列尾部标识
} QueueHeadTail;
#define OS_QUEUE_OPERATE_TYPE(ReadOrWrite, HeadOrTail) (((UINT32)(HeadOrTail) << 1) | (ReadOrWrite))
@@ -64,18 +83,21 @@ typedef enum {
/**
* @ingroup los_queue
* Queue information block structure
+ * @attention 读写队列分离
*/
-typedef struct TagQueueCB {
- UINT8 *queueHandle; /**< Pointer to a queue handle */
- UINT16 queueState; /**< Queue state */
- UINT16 queueLen; /**< Queue length */
- UINT16 queueSize; /**< Node size */
- UINT32 queueID; /**< queueID */
- UINT16 queueHead; /**< Node head */
- UINT16 queueTail; /**< Node tail */
- UINT16 readWriteableCnt[OS_QUEUE_N_RW]; /**< Count of readable or writable resources, 0:readable, 1:writable */
- LOS_DL_LIST readWriteList[OS_QUEUE_N_RW]; /**< the linked list to be read or written, 0:readlist, 1:writelist */
- LOS_DL_LIST memList; /**< Pointer to the memory linked list */
+typedef struct TagQueueCB{
+ UINT8 *queueHandle; /**< Pointer to a queue handle | 队列消息内存空间的指针*/
+ UINT16 queueState; /**< Queue state | 队列状态*/
+ UINT16 queueLen; /**< Queue length | 队列中消息节点个数,即队列长度,由创建时确定,不再改变*/
+ UINT16 queueSize; /**< Node size | 消息节点大小,由创建时确定,不再改变,即定义了每个消息长度的上限.*/
+ UINT32 queueID; /**< queueID | 队列ID*/
+ UINT16 queueHead; /**< Node head | 消息头节点位置(数组下标)*/
+ UINT16 queueTail; /**< Node tail | 消息尾节点位置(数组下标)*/
+ UINT16 readWriteableCnt[OS_QUEUE_N_RW]; /**< Count of readable or writable resources, 0:readable, 1:writable
+ | 队列中可写或可读消息数,0表示可读,1表示可写*/
+ LOS_DL_LIST readWriteList[OS_QUEUE_N_RW]; /**< the linked list to be read or written, 0:readlist, 1:writelist
+ | 挂的都是等待读/写消息的任务链表,0表示读消息的链表,1表示写消息的任务链表*/
+ LOS_DL_LIST memList; /**< Pointer to the memory linked list | 内存块链表*/
} LosQueueCB;
/* queue state */
@@ -83,13 +105,13 @@ typedef struct TagQueueCB {
* @ingroup los_queue
* Message queue state: not in use.
*/
-#define OS_QUEUE_UNUSED 0
+#define OS_QUEUE_UNUSED 0 ///< 队列没有使用
/**
* @ingroup los_queue
* Message queue state: used.
*/
-#define OS_QUEUE_INUSED 1
+#define OS_QUEUE_INUSED 1 ///< 队列被使用
/**
* @ingroup los_queue
diff --git a/src/kernel_liteos_a/kernel/base/include/los_rwlock_pri.h b/src/kernel_liteos_a/kernel/base/include/los_rwlock_pri.h
index 4b0711f7..e0536bb2 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_rwlock_pri.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_rwlock_pri.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
- * Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -41,14 +41,14 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
-#define OS_RWLOCK_MAGIC 0xEFDCAU
+#define OS_RWLOCK_MAGIC 0xEFDCAU ///< 读写锁魔法数字
enum RwlockMode {
- RWLOCK_NONE_MODE,
- RWLOCK_READ_MODE,
- RWLOCK_WRITE_MODE,
- RWLOCK_READFIRST_MODE,
- RWLOCK_WRITEFIRST_MODE
+ RWLOCK_NONE_MODE, ///< 自由模式: 读写链表都没有内容
+ RWLOCK_READ_MODE, ///< 读模式: 读链表有数据,写链表没有数据
+ RWLOCK_WRITE_MODE, ///< 写模式: 写链表有数据,读链表没有数据
+ RWLOCK_READFIRST_MODE, ///< 读优先模式: 读链表中的任务最高优先级高于写链表中任务最高优先级
+ RWLOCK_WRITEFIRST_MODE ///< 写优先模式: 写链表中的任务最高优先级高于读链表中任务最高优先级
};
extern UINT32 OsRwlockRdUnsafe(LosRwlock *rwlock, UINT32 timeout);
diff --git a/src/kernel_liteos_a/kernel/base/include/los_sched_pri.h b/src/kernel_liteos_a/kernel/base/include/los_sched_pri.h
index 1bbf0070..d86bf5b7 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_sched_pri.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_sched_pri.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
- * Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -62,8 +62,8 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
-#define OS_SCHED_MINI_PERIOD (OS_SYS_CLOCK / LOSCFG_BASE_CORE_TICK_PER_SECOND_MINI)
-#define OS_TICK_RESPONSE_PRECISION (UINT32)((OS_SCHED_MINI_PERIOD * 75) / 100)
+#define OS_SCHED_MINI_PERIOD (OS_SYS_CLOCK / LOSCFG_BASE_CORE_TICK_PER_SECOND_MINI) ///< 1毫秒的时钟周期
+#define OS_TICK_RESPONSE_PRECISION (UINT32)((OS_SCHED_MINI_PERIOD * 75) / 100) ///< 不明白为啥是 * 75 就精确了??? @note_thinking
#define OS_SCHED_MAX_RESPONSE_TIME OS_SORT_LINK_INVALID_TIME
#define OS_SCHED_TICK_TO_CYCLE(ticks) ((UINT64)ticks * OS_CYCLE_PER_TICK)
#define AFFI_MASK_TO_CPUID(mask) ((UINT16)((mask) - 1))
@@ -78,30 +78,30 @@ extern UINT32 g_taskScheduled;
typedef struct TagTaskCB LosTaskCB;
typedef BOOL (*SCHED_TL_FIND_FUNC)(UINTPTR, UINTPTR);
-
+//获取当前调度经历了多少个时间周期
STATIC INLINE UINT64 OsGetCurrSchedTimeCycle(VOID)
{
return HalClockGetCycles();
}
typedef enum {
- INT_NO_RESCH = 0x0, /* no needs to schedule */
- INT_PEND_RESCH = 0x1, /* pending schedule flag */
- INT_PEND_TICK = 0x2, /* pending tick */
+ INT_NO_RESCH = 0x0, /* no needs to schedule | 无需调度*/
+ INT_PEND_RESCH = 0x1, /* pending schedule flag | 因阻塞而引起的调度*/
+ INT_PEND_TICK = 0x2, /* pending tick | 因Tick而引起的调度*/
} SchedFlag;
-#define OS_PRIORITY_QUEUE_NUM 32
+#define OS_PRIORITY_QUEUE_NUM 32 //队列优先级
typedef struct {
- LOS_DL_LIST priQueList[OS_PRIORITY_QUEUE_NUM];
- UINT32 readyTasks[OS_PRIORITY_QUEUE_NUM];
- UINT32 queueBitmap;
+ LOS_DL_LIST priQueList[OS_PRIORITY_QUEUE_NUM]; //任务
+ UINT32 readyTasks[OS_PRIORITY_QUEUE_NUM]; //已就绪任务
+ UINT32 queueBitmap; //位图
} HPFQueue;
typedef struct {
- HPFQueue queueList[OS_PRIORITY_QUEUE_NUM];
+ HPFQueue queueList[OS_PRIORITY_QUEUE_NUM]; //
UINT32 queueBitmap;
} HPFRunqueue;
-
+//调度运行队列
typedef struct {
LOS_DL_LIST root;
LOS_DL_LIST waitList;
@@ -119,10 +119,10 @@ typedef struct {
UINT32 schedFlag; /* pending scheduler flag */
} SchedRunqueue;
-extern SchedRunqueue g_schedRunqueue[LOSCFG_KERNEL_CORE_NUM];
+extern SchedRunqueue g_schedRunqueue[LOSCFG_KERNEL_CORE_NUM];//每个CPU核都有一个属于自己的调度队列
VOID OsSchedExpireTimeUpdate(VOID);
-
+//获取当前CPU
STATIC INLINE SchedRunqueue *OsSchedRunqueue(VOID)
{
return &g_schedRunqueue[ArchCurrCpuid()];
@@ -223,10 +223,10 @@ STATIC INLINE VOID OsSchedRunqueuePendingSet(VOID)
OsSchedRunqueue()->schedFlag |= INT_PEND_RESCH;
}
-#define LOS_SCHED_NORMAL 0U
-#define LOS_SCHED_FIFO 1U
-#define LOS_SCHED_RR 2U
-#define LOS_SCHED_IDLE 3U
+#define LOS_SCHED_NORMAL 0U
+#define LOS_SCHED_FIFO 1U
+#define LOS_SCHED_RR 2U
+#define LOS_SCHED_IDLE 3U
#define LOS_SCHED_DEADLINE 6U
typedef struct {
@@ -242,19 +242,21 @@ typedef struct {
UINT32 periodUs;
} SchedParam;
-typedef struct {
- UINT16 policy; /* This field must be present for all scheduling policies and must be the first in the structure */
- UINT16 basePrio;
- UINT16 priority;
- UINT32 initTimeSlice; /* cycle */
- UINT32 priBitmap; /* Bitmap for recording the change of task priority, the priority can not be greater than 31 */
+typedef struct {//记录任务调度信息
+ UINT16 policy; /* This field must be present for all scheduling policies and must be the first in the structure
+ | 所有调度策略都必须存在此字段,并且必须是结构中的第一个字段*/
+ UINT16 basePrio; ///< 起始优先级
+ UINT16 priority; ///< 当前优先级
+ UINT32 initTimeSlice;///< 初始化时间片
+ UINT32 priBitmap; /**< Bitmap for recording the change of task priority, the priority can not be greater than 31
+ | 记录任务优先级变化的位图,优先级不能大于31 */
} SchedHPF;
#define EDF_UNUSED 0
#define EDF_NEXT_PERIOD 1
#define EDF_WAIT_FOREVER 2
#define EDF_INIT 3
-typedef struct {
+typedef struct { //调度策略
UINT16 policy;
UINT16 cpuid;
UINT32 flags;
@@ -267,29 +269,29 @@ typedef struct {
typedef struct {
union {
SchedEDF edf;
- SchedHPF hpf;
+ SchedHPF hpf; // 目前只支持 优先级策略(Highest-Priority-First,HPF)
};
} SchedPolicy;
-typedef struct {
- VOID (*dequeue)(SchedRunqueue *rq, LosTaskCB *taskCB);
- VOID (*enqueue)(SchedRunqueue *rq, LosTaskCB *taskCB);
- VOID (*start)(SchedRunqueue *rq, LosTaskCB *taskCB);
- VOID (*exit)(LosTaskCB *taskCB);
+typedef struct {//调度接口函数
+ VOID (*dequeue)(SchedRunqueue *rq, LosTaskCB *taskCB); ///< 出队列
+ VOID (*enqueue)(SchedRunqueue *rq, LosTaskCB *taskCB); ///< 入队列
+ VOID (*start)(SchedRunqueue *rq, LosTaskCB *taskCB); ///< 开始执行任务
+ VOID (*exit)(LosTaskCB *taskCB); ///< 任务退出
UINT64 (*waitTimeGet)(LosTaskCB *taskCB);
- UINT32 (*wait)(LosTaskCB *runTask, LOS_DL_LIST *list, UINT32 timeout);
- VOID (*wake)(LosTaskCB *taskCB);
- BOOL (*schedParamModify)(LosTaskCB *taskCB, const SchedParam *param);
- UINT32 (*schedParamGet)(const LosTaskCB *taskCB, SchedParam *param);
- UINT32 (*delay)(LosTaskCB *taskCB, UINT64 waitTime);
- VOID (*yield)(LosTaskCB *taskCB);
- UINT32 (*suspend)(LosTaskCB *taskCB);
- UINT32 (*resume)(LosTaskCB *taskCB, BOOL *needSched);
- UINT64 (*deadlineGet)(const LosTaskCB *taskCB);
- VOID (*timeSliceUpdate)(SchedRunqueue *rq, LosTaskCB *taskCB, UINT64 currTime);
- INT32 (*schedParamCompare)(const SchedPolicy *sp1, const SchedPolicy *sp2);
- VOID (*priorityInheritance)(LosTaskCB *owner, const SchedParam *param);
- VOID (*priorityRestore)(LosTaskCB *owner, const LOS_DL_LIST *list, const SchedParam *param);
+ UINT32 (*wait)(LosTaskCB *runTask, LOS_DL_LIST *list, UINT32 timeout); ///< 任务等待
+ VOID (*wake)(LosTaskCB *taskCB);///< 任务唤醒
+ BOOL (*schedParamModify)(LosTaskCB *taskCB, const SchedParam *param);///< 修改调度参数
+ UINT32 (*schedParamGet)(const LosTaskCB *taskCB, SchedParam *param);///< 获取调度参数
+ UINT32 (*delay)(LosTaskCB *taskCB, UINT64 waitTime);///< 延时执行
+ VOID (*yield)(LosTaskCB *taskCB);///< 让出控制权
+ UINT32 (*suspend)(LosTaskCB *taskCB);///< 挂起任务
+ UINT32 (*resume)(LosTaskCB *taskCB, BOOL *needSched);///< 恢复任务
+ UINT64 (*deadlineGet)(const LosTaskCB *taskCB);///< 获取最后期限
+ VOID (*timeSliceUpdate)(SchedRunqueue *rq, LosTaskCB *taskCB, UINT64 currTime);///< 更新时间片
+ INT32 (*schedParamCompare)(const SchedPolicy *sp1, const SchedPolicy *sp2); ///< 比较调度参数
+ VOID (*priorityInheritance)(LosTaskCB *owner, const SchedParam *param);//继承调度参数
+ VOID (*priorityRestore)(LosTaskCB *owner, const LOS_DL_LIST *list, const SchedParam *param);///< 恢复调度参数
} SchedOps;
/**
@@ -298,7 +300,7 @@ typedef struct {
*
* Highest task priority.
*/
-#define OS_TASK_PRIORITY_HIGHEST 0
+#define OS_TASK_PRIORITY_HIGHEST 0 /// 任务最高优先级
/**
* @ingroup los_sched
@@ -306,7 +308,7 @@ typedef struct {
*
* Lowest task priority.
*/
-#define OS_TASK_PRIORITY_LOWEST 31
+#define OS_TASK_PRIORITY_LOWEST 31 /// 任务最低优先级
/**
* @ingroup los_sched
@@ -314,7 +316,7 @@ typedef struct {
*
* The task is init.
*/
-#define OS_TASK_STATUS_INIT 0x0001U
+#define OS_TASK_STATUS_INIT 0x0001U /// 任务初始状态
/**
* @ingroup los_sched
@@ -390,80 +392,80 @@ typedef struct {
* The delayed operation of this task is frozen.
*/
#define OS_TASK_STATUS_FROZEN 0x0200U
-
#define OS_TCB_NAME_LEN 32
typedef struct TagTaskCB {
- VOID *stackPointer; /**< Task stack pointer */
- UINT16 taskStatus; /**< Task status */
-
- UINT64 startTime; /**< The start time of each phase of task */
- UINT64 waitTime; /**< Task delay time, tick number */
- UINT64 irqStartTime; /**< Interrupt start time */
- UINT32 irqUsedTime; /**< Interrupt consumption time */
- INT32 timeSlice; /**< Task remaining time slice */
- SortLinkList sortList; /**< Task sortlink node */
+ VOID *stackPointer; /**< Task stack pointer | 内核栈指针位置(SP) */
+ UINT16 taskStatus; /**< Task status | 各种状态标签,可以拥有多种标签,按位标识 */
+
+ UINT64 startTime; /**< The start time of each phase of task | 任务开始时间 */
+ UINT64 waitTime; /**< Task delay time, tick number | 设置任务调度延期时间 */
+ UINT64 irqStartTime; /**< Interrupt start time | 任务中断开始时间 */
+ UINT32 irqUsedTime; /**< Interrupt consumption time | 任务中断消耗时间 */
+ INT32 timeSlice; /**< Task remaining time slice | 任务剩余时间片 */
+ SortLinkList sortList; /**< Task sortlink node | 跟CPU捆绑的任务排序链表节点,上面挂的是就绪队列的下一个阶段,进入CPU要执行的任务队列 */
const SchedOps *ops;
SchedPolicy sp;
- UINT32 stackSize; /**< Task stack size */
- UINTPTR topOfStack; /**< Task stack top */
- UINT32 taskID; /**< Task ID */
- TSK_ENTRY_FUNC taskEntry; /**< Task entrance function */
- VOID *joinRetval; /**< pthread adaption */
- VOID *taskMux; /**< Task-held mutex */
- VOID *taskEvent; /**< Task-held event */
- UINTPTR args[4]; /**< Parameter, of which the maximum number is 4 */
- CHAR taskName[OS_TCB_NAME_LEN]; /**< Task name */
- LOS_DL_LIST pendList; /**< Task pend node */
- LOS_DL_LIST threadList; /**< thread list */
- UINT32 eventMask; /**< Event mask */
- UINT32 eventMode; /**< Event mode */
+ UINT32 stackSize; /**< Task stack size | 内核态栈大小,内存来自内核空间 */
+ UINTPTR topOfStack; /**< Task stack top | 内核态栈顶 bottom = top + size */
+ UINT32 taskID; /**< Task ID | 任务ID,任务池本质是一个大数组,ID就是数组的索引,默认 < 128 */
+ TSK_ENTRY_FUNC taskEntry; /**< Task entrance function | 任务执行入口地址 */
+ VOID *joinRetval; /**< pthread adaption | 用来存储join线程的入口地址 */
+ VOID *taskMux; /**< Task-held mutex | task在等哪把锁 */
+ VOID *taskEvent; /**< Task-held event | task在等哪个事件 */
+ UINTPTR args[4]; /**< Parameter, of which the maximum number is 4 | 入口函数的参数 例如 main (int argc,char *argv[]) */
+ CHAR taskName[OS_TCB_NAME_LEN]; /**< Task name | 任务的名称 */
+ LOS_DL_LIST pendList; /**< Task pend node | 如果任务阻塞时就通过它挂到各种阻塞情况的链表上,比如OsTaskWait时 */
+ LOS_DL_LIST threadList; /**< thread list | 挂到所属进程的线程链表上 */
+ UINT32 eventMask; /**< Event mask | 任务对哪些事件进行屏蔽 */
+ UINT32 eventMode; /**< Event mode | 事件三种模式(LOS_WAITMODE_AND,LOS_WAITMODE_OR,LOS_WAITMODE_CLR) */
#ifdef LOSCFG_KERNEL_CPUP
- OsCpupBase taskCpup; /**< task cpu usage */
+ OsCpupBase taskCpup; /**< task cpu usage | CPU 使用统计 */
#endif
- INT32 errorNo; /**< Error Num */
- UINT32 signal; /**< Task signal */
- sig_cb sig;
+ INT32 errorNo; /**< Error Num | 错误序号 */
+ UINT32 signal; /**< Task signal | 任务信号类型,(SIGNAL_NONE,SIGNAL_KILL,SIGNAL_SUSPEND,SIGNAL_AFFI) */
+ sig_cb sig; ///< 信号控制块,用于异步通信,类似于 linux singal模块
#ifdef LOSCFG_KERNEL_SMP
- UINT16 currCpu; /**< CPU core number of this task is running on */
- UINT16 lastCpu; /**< CPU core number of this task is running on last time */
- UINT16 cpuAffiMask; /**< CPU affinity mask, support up to 16 cores */
-#ifdef LOSCFG_KERNEL_SMP_TASK_SYNC
- UINT32 syncSignal; /**< Synchronization for signal handling */
+ UINT16 currCpu; /**< CPU core number of this task is running on | 正在运行此任务的CPU内核号 */
+ UINT16 lastCpu; /**< CPU core number of this task is running on last time | 上次运行此任务的CPU内核号 */
+ UINT16 cpuAffiMask; /**< CPU affinity mask, support up to 16 cores | CPU亲和力掩码,最多支持16核,亲和力很重要,多核情况下尽量一个任务在一个CPU核上运行,提高效率 */
+#ifdef LOSCFG_KERNEL_SMP_TASK_SYNC //多核情况下的任务同步开关,采用信号量实现
+ UINT32 syncSignal; /**< Synchronization for signal handling | 用于CPU之间同步信号量 */
#endif
-#ifdef LOSCFG_KERNEL_SMP_LOCKDEP
- LockDep lockDep;
+#ifdef LOSCFG_KERNEL_SMP_LOCKDEP //SMP死锁检测开关
+ LockDep lockDep; ///< 死锁依赖检测
#endif
#endif
-#ifdef LOSCFG_SCHED_DEBUG
- SchedStat schedStat; /**< Schedule statistics */
+#ifdef LOSCFG_SCHED_DEBUG //调试调度开关
+ SchedStat schedStat; /**< Schedule statistics | 调度统计 */
#endif
#ifdef LOSCFG_KERNEL_VM
UINTPTR archMmu;
- UINTPTR userArea;
- UINTPTR userMapBase;
- UINT32 userMapSize; /**< user thread stack size ,real size : userMapSize + USER_STACK_MIN_SIZE */
- FutexNode futex;
+ UINTPTR userArea; ///< 用户空间的堆区开始位置
+ UINTPTR userMapBase; ///< 用户空间的栈顶位置,内存来自用户空间,和topOfStack有本质的区别.
+ UINT32 userMapSize; /**< user thread stack size ,real size : userMapSize + USER_STACK_MIN_SIZE | 用户栈大小 */
+ FutexNode futex; ///< 指明任务在等待哪把快锁,一次只等一锁,锁和任务的关系是(1:N)关系
#endif
UINTPTR processCB; /**< Which belong process */
- LOS_DL_LIST joinList; /**< join list */
- LOS_DL_LIST lockList; /**< Hold the lock list */
- UINTPTR waitID; /**< Wait for the PID or GID of the child process */
+ LOS_DL_LIST joinList; /**< join list | 联结链表,允许任务之间相互释放彼此 */
+ LOS_DL_LIST lockList; /**< Hold the lock list | 该链表上挂的都是已持有的锁 */
+ UINTPTR waitID; /**< Wait for the PID or GID of the child process | 等待子进程的PID或GID */
UINT16 waitFlag; /**< The type of child process that is waiting, belonging to a group or parent,
- a specific child process, or any child process */
-#ifdef LOSCFG_KERNEL_LITEIPC
- IpcTaskInfo *ipcTaskInfo;
+ a specific child process, or any child process | 任务在等待什么信息 ? (OS_TASK_WAIT_PROCESS | OS_TASK_WAIT_GID | OS_TASK_WAIT_LITEIPC ..)
+ 往往用于被其他任务查看该任务在等待什么事件,如果事件到了就可以唤醒任务*/
+#ifdef LOSCFG_KERNEL_LITEIPC //轻量级进程间通信开关
+ IpcTaskInfo *ipcTaskInfo; ///< 任务间通讯信息结构体
#endif
#ifdef LOSCFG_KERNEL_PERF
- UINTPTR pc;
- UINTPTR fp;
+ UINTPTR pc; ///< pc寄存器
+ UINTPTR fp; ///< fp寄存器
#endif
#ifdef LOSCFG_PID_CONTAINER
- PidContainer *pidContainer;
+ PidContainer *pidContainer;//进程容器
#endif
#ifdef LOSCFG_IPC_CONTAINER
- BOOL cloneIpc;
+ BOOL cloneIpc;//是否克隆过IPC (flags & CLONE_NEWIPC)
#endif
} LosTaskCB;
@@ -507,7 +509,7 @@ STATIC INLINE LosTaskCB *OsCurrTaskGet(VOID)
{
return (LosTaskCB *)ArchCurrTaskGet();
}
-
+/// 注意任务地址由硬件保存,见于 CP15 | TPIDRPRW
STATIC INLINE VOID OsCurrTaskSet(LosTaskCB *task)
{
ArchCurrTaskSet(task);
@@ -521,13 +523,13 @@ STATIC INLINE VOID OsCurrUserTaskSet(UINTPTR thread)
STATIC INLINE VOID OsSchedIrqUsedTimeUpdate(VOID)
{
LosTaskCB *runTask = OsCurrTaskGet();
- runTask->irqUsedTime = OsGetCurrSchedTimeCycle() - runTask->irqStartTime;
+ runTask->irqUsedTime = OsGetCurrSchedTimeCycle() - runTask->irqStartTime;//获取时间差
}
-
+/// 获取中断开始时间
STATIC INLINE VOID OsSchedIrqStartTime(VOID)
{
LosTaskCB *runTask = OsCurrTaskGet();
- runTask->irqStartTime = OsGetCurrSchedTimeCycle();
+ runTask->irqStartTime = OsGetCurrSchedTimeCycle(); //获取当前时间
}
#ifdef LOSCFG_KERNEL_SMP
@@ -654,11 +656,11 @@ STATIC INLINE VOID SchedTaskUnfreeze(LosTaskCB *taskCB)
#define OS_SCHEDULER_SET(cpuid) do { \
g_taskScheduled |= (1U << (cpuid)); \
} while (0);
-
+//清楚调度标识位,对应位设置为0
#define OS_SCHEDULER_CLR(cpuid) do { \
g_taskScheduled &= ~(1U << (cpuid)); \
} while (0);
-
+//获取最高优先级任务
#ifdef LOSCFG_KERNEL_SCHED_PLIMIT
BOOL OsSchedLimitCheckTime(LosTaskCB *task);
#endif
diff --git a/src/kernel_liteos_a/kernel/base/include/los_sem_debug_pri.h b/src/kernel_liteos_a/kernel/base/include/los_sem_debug_pri.h
index f58d4bca..36bc7ace 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_sem_debug_pri.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_sem_debug_pri.h
@@ -46,7 +46,7 @@ extern UINT32 OsSemDbgInit(VOID);
STATIC INLINE UINT32 OsSemDbgInitHook(VOID)
{
#ifdef LOSCFG_DEBUG_SEMAPHORE
- return OsSemDbgInit();
+ return OsSemDbgInit();//信号量debug初始化
#else
return LOS_OK;
#endif
diff --git a/src/kernel_liteos_a/kernel/base/include/los_sem_pri.h b/src/kernel_liteos_a/kernel/base/include/los_sem_pri.h
index 8476a1bc..568d24a0 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_sem_pri.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_sem_pri.h
@@ -45,11 +45,11 @@ extern "C" {
* Semaphore control structure.
*/
typedef struct {
- UINT8 semStat; /**< Semaphore state */
- UINT16 semCount; /**< Number of available semaphores */
- UINT16 maxSemCount; /**< Max number of available semaphores */
- UINT32 semID; /**< Semaphore control structure ID */
- LOS_DL_LIST semList; /**< Queue of tasks that are waiting on a semaphore */
+ UINT8 semStat; /**< Semaphore state | 信号量的状态 */
+ UINT16 semCount; /**< Number of available semaphores | 有效信号量的数量 */
+ UINT16 maxSemCount; /**< Max number of available semaphores | 有效信号量的最大数量 */
+ UINT32 semID; /**< Semaphore control structure ID | 信号量索引号 */
+ LOS_DL_LIST semList; /**< Queue of tasks that are waiting on a semaphore | 挂接阻塞于该信号量的任务 */
} LosSemCB;
/**
diff --git a/src/kernel_liteos_a/kernel/base/include/los_signal.h b/src/kernel_liteos_a/kernel/base/include/los_signal.h
index ce39c484..17341dcf 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_signal.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_signal.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
- * Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -43,15 +43,76 @@
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
+/**
+ * @file los_signal.h
+ * @brief
+ * @verbatim
+ 信号是Linux系统中用于进程间互相通信或者操作的一种机制,信号可以在任何时候发给某一进程,而无需知道该进程的状态。
+ 如果该进程当前并未处于执行状态,则该信号就由内核保存起来,直到该进程被调度执行并传递给它为止。
+ 如果一个信号被进程设置为阻塞,则该信号的传递被延迟,直到其阻塞被取消时才被传递给进程。
-#define LOS_BIT_SET(val, bit) ((val) = (val) | (1ULL << (UINT32)(bit)))
-#define LOS_BIT_CLR(val, bit) ((val) = (val) & ~(1ULL << (UINT32)(bit)))
-#define LOS_IS_BIT_SET(val, bit) (bool)((((val) >> (UINT32)(bit)) & 1ULL))
+ 软中断信号(signal,又简称为信号)用来通知进程发生了异步事件。在软件层次上是对中断机制的一种模拟,
+ 在原理上,一个进程收到一个信号与处理器收到一个中断请求可以说是一样的。信号是进程间通信机制中异步通信机制,
+ 一个进程不必通过任何操作来等待信号的到达,事实上,进程也不知道信号到底什么时候到达。
+ 进程之间可以互相通过系统调用kill发送软中断信号。内核也可以因为内部事件而给进程发送信号,通知进程
+ 发生了某个事件。信号机制除了基本通知功能外,还可以传递附加信息。
+
+ 信号量定义如下: 见于..\third_party\musl\arch\aarch64\bits\signal.h
+ #define SIGHUP 1 //终端挂起或者控制进程终止
+ #define SIGINT 2 //键盘中断(如break键被按下)
+ #define SIGQUIT 3 //键盘的退出键被按下
+ #define SIGILL 4 //非法指令
+ #define SIGTRAP 5 //跟踪陷阱(trace trap),启动进程,跟踪代码的执行
+ #define SIGABRT 6 //由abort(3)发出的退出指令
+ #define SIGIOT SIGABRT //abort发出的信号
+ #define SIGBUS 7 //总线错误
+ #define SIGFPE 8 //浮点异常
+ #define SIGKILL 9 //常用的命令 kill 9 123 | 不能被忽略、处理和阻塞
+ #define SIGUSR1 10 //用户自定义信号1
+ #define SIGSEGV 11 //无效的内存引用, 段违例(segmentation violation),进程试图去访问其虚地址空间以外的位置
+ #define SIGUSR2 12 //用户自定义信号2
+ #define SIGPIPE 13 //向某个非读管道中写入数据
+ #define SIGALRM 14 //由alarm(2)发出的信号,默认行为为进程终止
+ #define SIGTERM 15 //终止信号, kill不带参数时默认发送这个信号
+ #define SIGSTKFLT 16 //栈溢出
+ #define SIGCHLD 17 //子进程结束信号
+ #define SIGCONT 18 //进程继续(曾被停止的进程)
+ #define SIGSTOP 19 //终止进程 | 不能被忽略、处理和阻塞
+ #define SIGTSTP 20 //控制终端(tty)上 按下停止键
+ #define SIGTTIN 21 //进程停止,后台进程企图从控制终端读
+ #define SIGTTOU 22 //进程停止,后台进程企图从控制终端写
+ #define SIGURG 23 //I/O有紧急数据到达当前进程
+ #define SIGXCPU 24 //进程的CPU时间片到期
+ #define SIGXFSZ 25 //文件大小的超出上限
+ #define SIGVTALRM 26 //虚拟时钟超时
+ #define SIGPROF 27 //profile时钟超时
+ #define SIGWINCH 28 //窗口大小改变
+ #define SIGIO 29 //I/O相关
+ #define SIGPOLL 29 //
+ #define SIGPWR 30 //电源故障,关机
+ #define SIGSYS 31 //系统调用中参数错,如系统调用号非法
+ #define SIGUNUSED SIGSYS //系统调用异常
+
+ #define _NSIG 65 //信号范围,不超过_NSIG
+
+ https://www.cnblogs.com/hoys/archive/2012/08/19/2646377.html
+
+ Musl官网 http://musl.libc.org/
+ musl是构建在Linux系统调用API之上的C标准库的实现,包括在基本语言标准POSIX中定义的接口,
+ 以及广泛认可的扩展。musl是轻量级的,快速的,简单的,自由的.
+ * @endverbatim
+ * @param pathname
+ * @return int
+ */
+
+#define LOS_BIT_SET(val, bit) ((val) = (val) | (1ULL << (UINT32)(bit))) ///< 按位设置
+#define LOS_BIT_CLR(val, bit) ((val) = (val) & ~(1ULL << (UINT32)(bit))) ///< 按位清除
+#define LOS_IS_BIT_SET(val, bit) (bool)((((val) >> (UINT32)(bit)) & 1ULL)) ///< 位是否设置为1
#define SIG_STOP_VISIT 1
-#define OS_KERNEL_KILL_PERMISSION 0U
-#define OS_USER_KILL_PERMISSION 3U
+#define OS_KERNEL_KILL_PERMISSION 0U ///< 内核态 kill 权限
+#define OS_USER_KILL_PERMISSION 3U ///< 用户态 kill 权限
#define OS_RETURN_IF(expr, errcode) \
if ((expr)) { \
@@ -91,20 +152,37 @@ typedef void (*sa_sighandler_t)(int);
typedef void (*sa_siginfoaction_t)(int, siginfo_t *, void *);
#define SIGNO2SET(s) ((sigset_t)1ULL << (s))
-#define NULL_SIGNAL_SET ((sigset_t)0ULL)
-#define FULL_SIGNAL_SET ((sigset_t)~0ULL)
-
+#define NULL_SIGNAL_SET ((sigset_t)0ULL) ///< 信号集全部清0
+#define FULL_SIGNAL_SET ((sigset_t)~0ULL) ///< 信号集全部置1
+///信号ID是否有效
static inline int GOOD_SIGNO(unsigned int sig)
{
- return (sig < _NSIG) ? 1 : 0;
+ return (sig < _NSIG) ? 1 : 0;//
}
-#define MAX_SIG_ARRAY_IN_MUSL 128
+#define MAX_SIG_ARRAY_IN_MUSL 128 ///< 128个信号
typedef struct {
unsigned long sig[MAX_SIG_ARRAY_IN_MUSL / sizeof(unsigned long)];
} sigset_t_l;
+/***************************************************
+struct sigaction {
+ union {
+ void (*sa_handler)(int); //信号处理函数——普通版
+ void (*sa_sigaction)(int, siginfo_t *, void *);//信号处理函数——高级版
+ } __sa_handler;
+ sigset_t sa_mask;//指定信号处理程序执行过程中需要阻塞的信号;
+ int sa_flags; //标示位
+ // SA_RESTART:使被信号打断的syscall重新发起。
+ // SA_NOCLDSTOP:使父进程在它的子进程暂停或继续运行时不会收到 SIGCHLD 信号。
+ // SA_NOCLDWAIT:使父进程在它的子进程退出时不会收到SIGCHLD信号,这时子进程如果退出也不会成为僵 尸进程。
+ // SA_NODEFER:使对信号的屏蔽无效,即在信号处理函数执行期间仍能发出这个信号。
+ // SA_RESETHAND:信号处理之后重新设置为默认的处理方式。
+ // SA_SIGINFO:使用sa_sigaction成员而不是sa_handler作为信号处理函数。
+ void (*sa_restorer)(void);
+};
+****************************************************/
typedef struct sigaction sigaction_t;
struct sigactq {
@@ -126,7 +204,7 @@ struct sigpendq {
};
typedef struct sigpendq sigpendq_t;
-struct sq_queue_s {
+struct sq_queue_s {//信号队列
sq_entry_t *head;
sq_entry_t *tail;
};
@@ -136,23 +214,24 @@ typedef struct SigInfoListNode {
struct SigInfoListNode *next;
siginfo_t info;
} SigInfoListNode;
-
+/**
+ * @brief 信号控制块(描述符)
+ */
typedef struct {
- sigset_t sigFlag;
- sigset_t sigPendFlag;
- sigset_t sigprocmask; /* Signals that are blocked */
- sq_queue_t sigactionq;
- LOS_DL_LIST waitList;
- sigset_t sigwaitmask; /* Waiting for pending signals */
- siginfo_t sigunbinfo; /* Signal info when task unblocked */
- SigInfoListNode *tmpInfoListHead; /* Signal info List */
- unsigned int sigIntLock;
- void *sigContext;
- unsigned int count;
+ sigset_t sigFlag; ///< 不屏蔽的信号集
+ sigset_t sigPendFlag; ///< 信号阻塞标签集,记录那些信号来过,任务依然阻塞的集合.即:这些信号不能唤醒任务
+ sigset_t sigprocmask; ///< Signals that are blocked | 任务屏蔽了哪些信号
+ sq_queue_t sigactionq; ///< 信号捕捉队列
+ LOS_DL_LIST waitList; ///< 待链表,上面挂的是等待信号到来的任务, 请查找 OsTaskWait(&sigcb->waitList, timeout, TRUE) 理解
+ sigset_t sigwaitmask; /*! Waiting for pending signals | 任务在等待哪些信号的到来 */
+ siginfo_t sigunbinfo; /*! Signal info when task unblocked | 任务解锁时的信号信息 */
+ SigInfoListNode *tmpInfoListHead; /*! Signal info List */
+ unsigned int sigIntLock;///< 信号中断锁
+ void *sigContext; ///< 信号上下文
+ unsigned int count;///< 信号数量
} sig_cb;
typedef struct ProcessCB LosProcessCB;
-
#define SIGEV_THREAD_ID 4
int sys_sigqueue(pid_t, int, const union sigval);
diff --git a/src/kernel_liteos_a/kernel/base/include/los_sortlink_pri.h b/src/kernel_liteos_a/kernel/base/include/los_sortlink_pri.h
index 788947d3..e0aa4522 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_sortlink_pri.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_sortlink_pri.h
@@ -42,17 +42,24 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
+
+/*! \struct SortLinkList
+*
+*/
typedef struct {
- LOS_DL_LIST sortLinkNode;
- UINT64 responseTime;
+ LOS_DL_LIST sortLinkNode; ///< 排序链表,注意上面挂的是一个个等待被执行的任务/软件定时器
+ UINT64 responseTime; ///< 响应时间,这里提取了最近需要触发的定时器/任务的时间,见于 OsAddNode2SortLink 的实现
#ifdef LOSCFG_KERNEL_SMP
- UINT32 cpuid;
+ UINT32 cpuid; ///< 需要哪个CPU处理
#endif
} SortLinkList;
+/*! \struct SortLinkAttribute
+* @brief 排序链表属性
+*/
typedef struct {
- LOS_DL_LIST sortLink;
- UINT32 nodeNum;
+ LOS_DL_LIST sortLink; ///< 排序链表,上面挂的任务/软件定时器
+ UINT32 nodeNum; ///< 链表结点数量
SPIN_LOCK_S spinLock; /* swtmr sort link spin lock */
} SortLinkAttribute;
diff --git a/src/kernel_liteos_a/kernel/base/include/los_stackinfo_pri.h b/src/kernel_liteos_a/kernel/base/include/los_stackinfo_pri.h
index 74eb7ee9..b89eeb35 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_stackinfo_pri.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_stackinfo_pri.h
@@ -41,14 +41,17 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
+/*! \struct StackInfo
+* 栈信息
+*/
typedef struct {
- VOID *stackTop;
- UINT32 stackSize;
- CHAR *stackName;
+ VOID *stackTop; ///< 栈顶
+ UINT32 stackSize; ///< 栈大小
+ CHAR *stackName; ///< 栈名称
} StackInfo;
#define OS_INVALID_WATERLINE 0xFFFFFFFF
-#define OS_STACK_MAGIC_CHECK(topstack) (*(UINTPTR *)(topstack) == OS_STACK_MAGIC_WORD) /* 1:magic valid 0:invalid */
+#define OS_STACK_MAGIC_CHECK(topstack) (*(UINTPTR *)(topstack) == OS_STACK_MAGIC_WORD) /* 1:magic valid 0:unvalid */
extern VOID OsExcStackInfo(VOID);
extern VOID OsExcStackInfoReg(const StackInfo *stackInfo, UINT32 stackNum);
@@ -81,4 +84,4 @@ extern UINT32 OsStackWaterLineGet(const UINTPTR *stackBottom, const UINTPTR *sta
#endif /* __cplusplus */
#endif /* __cplusplus */
-#endif /* _LOS_STACK_INFO_PRI_H */
+#endif /* _LOS_STACK_INFO_PRI_H */
\ No newline at end of file
diff --git a/src/kernel_liteos_a/kernel/base/include/los_swtmr_pri.h b/src/kernel_liteos_a/kernel/base/include/los_swtmr_pri.h
index 02d6c0db..f2df6dbd 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_swtmr_pri.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_swtmr_pri.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
- * Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -39,7 +39,7 @@
#ifdef LOSCFG_SECURITY_VID
#include "vid_api.h"
#else
-#define MAX_INVALID_TIMER_VID OS_SWTMR_MAX_TIMERID
+#define MAX_INVALID_TIMER_VID OS_SWTMR_MAX_TIMERID //最大支持的软件定时器数量 < 65535
#endif
#ifdef __cplusplus
@@ -52,21 +52,21 @@ extern "C" {
* @ingroup los_swtmr_pri
* Software timer state
*/
-enum SwtmrState {
- OS_SWTMR_STATUS_UNUSED, /**< The software timer is not used. */
- OS_SWTMR_STATUS_CREATED, /**< The software timer is created. */
- OS_SWTMR_STATUS_TICKING /**< The software timer is timing. */
+enum SwtmrState { //定时器状态
+ OS_SWTMR_STATUS_UNUSED, /**< The software timer is not used. | 定时器未使用,系统在定时器模块初始化时,会将系统中所有定时器资源初始化成该状态。 */
+ OS_SWTMR_STATUS_CREATED, /**< The software timer is created. | 定时器创建后未启动,或已停止.定时器创建后,不处于计数状态时,定时器将变成该状态。 */
+ OS_SWTMR_STATUS_TICKING /**< The software timer is timing. | 定时器处于计数状态,在定时器创建后调用LOS_SwtmrStart接口启动,定时器将变成该状态,是定时器运行时的状态。 */
};
/**
* @ingroup los_swtmr_pri
* Structure of the callback function that handles software timer timeout
*/
-typedef struct {
- SWTMR_PROC_FUNC handler; /**< Callback function that handles software timer timeout */
+typedef struct {//处理软件定时器超时的回调函数的结构体
+ SWTMR_PROC_FUNC handler; /**< Callback function that handles software timer timeout | 处理软件定时器超时的回调函数*/
UINTPTR arg; /**< Parameter passed in when the callback function
- that handles software timer timeout is called */
- LOS_DL_LIST node;
+ that handles software timer timeout is called | 调用处理软件计时器超时的回调函数时传入的参数*/
+ LOS_DL_LIST node; ///< 挂入定时器超时队列,详见 SwtmrWake( ... )
#ifdef LOSCFG_SWTMR_DEBUG
UINT32 swtmrID;
#endif
@@ -75,11 +75,12 @@ typedef struct {
/**
* @ingroup los_swtmr_pri
* Type of the pointer to the structure of the callback function that handles software timer timeout
- */
+ */ //指向处理软件计时器超时的回调函数结构的指针的类型
typedef SwtmrHandlerItem *SwtmrHandlerItemPtr;
-extern SWTMR_CTRL_S *g_swtmrCBArray;
+extern SWTMR_CTRL_S *g_swtmrCBArray;//软件定时器数组,后续统一注解为定时器池
+//通过参数ID找到对应定时器描述体
#define OS_SWT_FROM_SID(swtmrID) ((SWTMR_CTRL_S *)g_swtmrCBArray + ((swtmrID) % LOSCFG_BASE_CORE_SWTMR_LIMIT))
/**
diff --git a/src/kernel_liteos_a/kernel/base/include/los_sys_pri.h b/src/kernel_liteos_a/kernel/base/include/los_sys_pri.h
index 01a90e8b..75ab6ddb 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_sys_pri.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_sys_pri.h
@@ -43,63 +43,62 @@ extern "C" {
/**
* @ingroup los_sys
- * Number of milliseconds in one second.
+ * Number of milliseconds in one second.
*/
-#define OS_SYS_MS_PER_SECOND 1000
+#define OS_SYS_MS_PER_SECOND 1000 ///< 一秒多少毫秒
/**
* @ingroup los_sys
* Number of microseconds in one second.
*/
-#define OS_SYS_US_PER_SECOND 1000000
+#define OS_SYS_US_PER_SECOND 1000000 ///< 一秒多少微秒
/**
* @ingroup los_sys
* Number of nanoseconds in one second.
*/
-#define OS_SYS_NS_PER_SECOND 1000000000
+#define OS_SYS_NS_PER_SECOND 1000000000 ///< 一秒多少纳秒
/**
* @ingroup los_sys
* Number of microseconds in one milliseconds.
*/
-#define OS_SYS_US_PER_MS 1000
-
+#define OS_SYS_US_PER_MS 1000 ///< 一毫秒的微秒数
/**
* @ingroup los_sys
* Number of nanoseconds in one milliseconds.
*/
-#define OS_SYS_NS_PER_MS 1000000
+#define OS_SYS_NS_PER_MS 1000000 ///< 一毫秒的纳秒数
/**
* @ingroup los_sys
* Number of nanoseconds in one microsecond.
*/
-#define OS_SYS_NS_PER_US 1000
+#define OS_SYS_NS_PER_US 1000 ///< 一微秒的纳秒数
/**
* @ingroup los_sys
* Number of cycle in one tick.
*/
-#define OS_CYCLE_PER_TICK (OS_SYS_CLOCK / LOSCFG_BASE_CORE_TICK_PER_SECOND)
+#define OS_CYCLE_PER_TICK (OS_SYS_CLOCK / LOSCFG_BASE_CORE_TICK_PER_SECOND) ///< 一个节拍的周期数
/**
* @ingroup los_sys
* Number of nanoseconds in one cycle.
*/
-#define OS_NS_PER_CYCLE (OS_SYS_NS_PER_SECOND / OS_SYS_CLOCK)
+#define OS_NS_PER_CYCLE (OS_SYS_NS_PER_SECOND / OS_SYS_CLOCK) ///< 一周期的纳秒数
/**
* @ingroup los_sys
* Number of microseconds in one tick.
*/
-#define OS_US_PER_TICK (OS_SYS_US_PER_SECOND / LOSCFG_BASE_CORE_TICK_PER_SECOND)
+#define OS_US_PER_TICK (OS_SYS_US_PER_SECOND / LOSCFG_BASE_CORE_TICK_PER_SECOND) ///< 一个tick的微秒数
/**
* @ingroup los_sys
* Number of nanoseconds in one tick.
*/
-#define OS_NS_PER_TICK (OS_SYS_NS_PER_SECOND / LOSCFG_BASE_CORE_TICK_PER_SECOND)
+#define OS_NS_PER_TICK (OS_SYS_NS_PER_SECOND / LOSCFG_BASE_CORE_TICK_PER_SECOND) ///< 一个tick的纳秒数
#define OS_US_TO_CYCLE(time, freq) ((((time) / OS_SYS_US_PER_SECOND) * (freq)) + \
(((time) % OS_SYS_US_PER_SECOND) * (freq) / OS_SYS_US_PER_SECOND))
@@ -115,19 +114,19 @@ extern "C" {
* @ingroup los_sys
* The maximum length of name.
*/
-#define OS_SYS_APPVER_NAME_MAX 64
+#define OS_SYS_APPVER_NAME_MAX 64 ///< 名字的最大长度
/**
* @ingroup los_sys
* The magic word.
*/
-#define OS_SYS_MAGIC_WORD 0xAAAAAAAA
+#define OS_SYS_MAGIC_WORD 0xAAAAAAAA ///< 魔法数字,还记得栈顶的魔法数字是多少吗? 0xCCCCCCCC
/**
* @ingroup los_sys
* The initialization value of stack space.
*/
-#define OS_SYS_EMPTY_STACK 0xCACACACA
+#define OS_SYS_EMPTY_STACK 0xCACACACA ///< 栈的填充内容魔法数字
/**
* @ingroup los_sys
diff --git a/src/kernel_liteos_a/kernel/base/include/los_task_pri.h b/src/kernel_liteos_a/kernel/base/include/los_task_pri.h
index fd487a0c..5cf54fa7 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_task_pri.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_task_pri.h
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
- * Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -43,17 +43,17 @@ extern "C" {
/**
* @ingroup los_task
- * Define task signal types.
+ * Define task siginal types.
*
- * Task signal types.
+ * Task siginal types.
*/
-#define SIGNAL_NONE 0U
-#define SIGNAL_KILL (1U << 0)
-#define SIGNAL_SUSPEND (1U << 1)
-#define SIGNAL_AFFI (1U << 2)
+#define SIGNAL_NONE 0U ///< 无信号
+#define SIGNAL_KILL (1U << 0) ///< 干掉
+#define SIGNAL_SUSPEND (1U << 1) ///< 挂起
+#define SIGNAL_AFFI (1U << 2) ///< CPU 亲和性,一个任务被切换后被同一个CPU再次执行,则亲和力高
/* scheduler lock */
-extern SPIN_LOCK_S g_taskSpin;
+extern SPIN_LOCK_S g_taskSpin;//任务自旋锁
#define SCHEDULER_HELD() LOS_SpinHeld(&g_taskSpin)
#define SCHEDULER_LOCK(state) LOS_SpinLockSave(&g_taskSpin, &(state))
#define SCHEDULER_UNLOCK(state) LOS_SpinUnlockRestore(&g_taskSpin, state)
@@ -66,7 +66,7 @@ extern SPIN_LOCK_S g_taskSpin;
* Null task ID
*
*/
-#define OS_TASK_ERRORID 0xFFFFFFFF
+#define OS_TASK_ERRORID 0xFFFFFFFF
/**
* @ingroup los_task
@@ -74,7 +74,7 @@ extern SPIN_LOCK_S g_taskSpin;
*
* The task control block is unused.
*/
-#define OS_TASK_STATUS_UNUSED 0x0400U
+#define OS_TASK_STATUS_UNUSED 0x0400U ///< 任务状态:未使用
/**
* @ingroup los_task
@@ -82,7 +82,7 @@ extern SPIN_LOCK_S g_taskSpin;
*
* The task is joinable.
*/
-#define OS_TASK_FLAG_PTHREAD_JOIN 0x0800U
+#define OS_TASK_FLAG_PTHREAD_JOIN 0x0800U ///< 主task和子task连在一块不分离
/**
* @ingroup los_task
@@ -98,15 +98,15 @@ extern SPIN_LOCK_S g_taskSpin;
*
* The task is system-level task, like idle, swtmr and etc.
*/
-#define OS_TASK_FLAG_SYSTEM_TASK 0x2000U
+#define OS_TASK_FLAG_SYSTEM_TASK 0x2000U ///< 系统任务
/**
* @ingroup los_task
* Flag that indicates the task property.
*
- * The task is no-delete system task, like resourceTask.
+ * The task is no-delete system task, like resourceTask.
*/
-#define OS_TASK_FLAG_NO_DELETE 0x4000U
+#define OS_TASK_FLAG_NO_DELETE 0x4000U ///< 该任务是不可删除的系统任务,如资源回收任务
/**
* @ingroup los_task
@@ -114,7 +114,7 @@ extern SPIN_LOCK_S g_taskSpin;
*
* Kills the thread during process exit.
*/
-#define OS_TASK_FLAG_EXIT_KILL 0x8000U
+#define OS_TASK_FLAG_EXIT_KILL 0x8000U ///< 在进程退出期间一同被干掉的任务
/**
* @ingroup los_task
@@ -122,25 +122,25 @@ extern SPIN_LOCK_S g_taskSpin;
*
* Specifies the process creation task.
*/
-#define OS_TASK_FLAG_SPECIFIES_PROCESS 0x0U
+#define OS_TASK_FLAG_SPECIFIES_PROCESS 0x0U ///< 创建指定任务 例如: cat weharmony.net 的实现
/**
* @ingroup los_task
* Boundary on which the stack size is aligned.
*
*/
-#define OS_TASK_STACK_SIZE_ALIGN 16U
+#define OS_TASK_STACK_SIZE_ALIGN 16U ///< 堆栈大小对齐
/**
* @ingroup los_task
* Boundary on which the stack address is aligned.
*
*/
-#define OS_TASK_STACK_ADDR_ALIGN 8U
+#define OS_TASK_STACK_ADDR_ALIGN 8U ///< 堆栈地址对齐
/**
* @ingroup los_task
- * Number of usable task priorities.
+ * Number of usable task priorities. | 任务优先级数量
*/
#define OS_TSK_PRINUM (OS_TASK_PRIORITY_LOWEST - OS_TASK_PRIORITY_HIGHEST + 1)
@@ -175,9 +175,9 @@ extern SPIN_LOCK_S g_taskSpin;
* @par Dependency:
*
- los_task_pri.h: the header file that contains the API declaration.
* @see
-*/
+*/ ///通过pendList取出TCB,用于挂入链表节点时使用 pendList的情况
#define OS_TCB_FROM_PENDLIST(ptr) LOS_DL_LIST_ENTRY(ptr, LosTaskCB, pendList)
-
+
/**
* @ingroup los_task
* @brief Obtain the pointer to a task control block.
@@ -204,56 +204,60 @@ extern SPIN_LOCK_S g_taskSpin;
#define LOSCFG_STACK_POINT_ALIGN_SIZE (sizeof(UINTPTR) * 2)
#endif
-#define OS_TASK_RESOURCE_STATIC_SIZE 0x1000
-#define OS_TASK_RESOURCE_FREE_PRIORITY 5
-#define OS_RESOURCE_EVENT_MASK 0xFF
-#define OS_RESOURCE_EVENT_OOM 0x02
-#define OS_RESOURCE_EVENT_FREE 0x04
+#define OS_TASK_RESOURCE_STATIC_SIZE 0x1000 ///< 4K
+#define OS_TASK_RESOURCE_FREE_PRIORITY 5 ///< 回收资源任务的优先级
+#define OS_RESOURCE_EVENT_MASK 0xFF ///< 资源事件的掩码
+#define OS_RESOURCE_EVENT_OOM 0x02 ///< 内存溢出事件
+#define OS_RESOURCE_EVENT_FREE 0x04 ///< 资源释放事件
+///< LosTask结构体是给外部使用的
typedef struct {
LosTaskCB *runTask;
LosTaskCB *newTask;
} LosTask;
+///< 进程信号描述符
struct ProcessSignalInfo {
- siginfo_t *sigInfo; /**< Signal to be dispatched */
- LosTaskCB *defaultTcb; /**< Default TCB */
- LosTaskCB *unblockedTcb; /**< The signal unblock on this TCB*/
- LosTaskCB *awakenedTcb; /**< This TCB was awakened */
- LosTaskCB *receivedTcb; /**< This TCB received the signal */
+ siginfo_t *sigInfo; /**< Signal to be dispatched | 要发送的信号*/
+ LosTaskCB *defaultTcb; /**< Default TCB | 默认task,默认接收信号的任务. */
+ LosTaskCB *unblockedTcb; /**< The signal unblock on this TCB | 信号在此TCB上解除阻塞 */
+ LosTaskCB *awakenedTcb; /**< This TCB was awakened | 即 任务在等待这个信号,此信号一来任务被唤醒.*/
+ LosTaskCB *receivedTcb; /**< This TCB received the signal | 如果没有屏蔽信号,任务将接收这个信号. */
};
-typedef int (*ForEachTaskCB)(LosTaskCB *tcb, void *arg);
+typedef int (*ForEachTaskCB)(LosTaskCB *tcb, void *arg);///< 回调任务函数,例如:进程被kill 9 时,通知所有任务善后处理
/**
* @ingroup los_task
* Maximum number of tasks.
*
*/
-extern UINT32 g_taskMaxNum;
+extern UINT32 g_taskMaxNum;///< 任务最大数量 默认128个
+
/**
* @ingroup los_task
* Starting address of a task.
*
*/
-extern LosTaskCB *g_taskCBArray;
+extern LosTaskCB *g_taskCBArray;///< 外部变量 任务池 默认128个
/**
* @ingroup los_task
* Time slice structure.
*/
-typedef struct {
- LosTaskCB *task; /**< Current running task */
- UINT16 time; /**< Expiration time point */
- UINT16 timeout; /**< Expiration duration */
+typedef struct {//时间片结构体,任务轮询
+ LosTaskCB *task; /**< Current running task | 当前运行着的任务*/
+ UINT16 time; /**< Expiration time point | 到期时间点*/
+ UINT16 timeout; /**< Expiration duration | 有效期*/
} OsTaskRobin;
+/// 通过任务ID获取任务实体,task由任务池分配,本质是个数组,彼此都挨在一块
STATIC INLINE LosTaskCB *OsGetTaskCB(UINT32 taskID)
{
return OS_TCB_FROM_TID(taskID);
}
-
+/// 任务是否在使用
STATIC INLINE BOOL OsTaskIsUnused(const LosTaskCB *taskCB)
{
return ((taskCB->taskStatus & OS_TASK_STATUS_UNUSED) != 0);
@@ -261,45 +265,46 @@ STATIC INLINE BOOL OsTaskIsUnused(const LosTaskCB *taskCB)
STATIC INLINE BOOL OsTaskIsKilled(const LosTaskCB *taskCB)
{
- return((taskCB->taskStatus & OS_TASK_FLAG_EXIT_KILL) != 0);
+ return ((taskCB->taskStatus & OS_TASK_FLAG_EXIT_KILL) != 0);
}
STATIC INLINE BOOL OsTaskIsNotDelete(const LosTaskCB *taskCB)
{
return ((taskCB->taskStatus & (OS_TASK_STATUS_UNUSED | OS_TASK_FLAG_SYSTEM_TASK | OS_TASK_FLAG_NO_DELETE)) != 0);
}
-
STATIC INLINE BOOL OsTaskIsUserMode(const LosTaskCB *taskCB)
{
return ((taskCB->taskStatus & OS_TASK_FLAG_USER_MODE) != 0);
}
-#define OS_TID_CHECK_INVALID(taskID) ((UINT32)(taskID) >= g_taskMaxNum)
+#define OS_TID_CHECK_INVALID(taskID) ((UINT32)(taskID) >= g_taskMaxNum)//是否有无效的任务 > 128
/* get task info */
#define OS_ALL_TASK_MASK 0xFFFFFFFF
-
-#define OS_TASK_WAIT_ANYPROCESS (1 << 0U)
-#define OS_TASK_WAIT_PROCESS (1 << 1U)
-#define OS_TASK_WAIT_GID (1 << 2U)
-#define OS_TASK_WAIT_SEM (OS_TASK_WAIT_GID + 1)
-#define OS_TASK_WAIT_QUEUE (OS_TASK_WAIT_SEM + 1)
-#define OS_TASK_WAIT_JOIN (OS_TASK_WAIT_QUEUE + 1)
-#define OS_TASK_WAIT_SIGNAL (OS_TASK_WAIT_JOIN + 1)
-#define OS_TASK_WAIT_LITEIPC (OS_TASK_WAIT_SIGNAL + 1)
-#define OS_TASK_WAIT_MUTEX (OS_TASK_WAIT_LITEIPC + 1)
-#define OS_TASK_WAIT_FUTEX (OS_TASK_WAIT_MUTEX + 1)
-#define OS_TASK_WAIT_EVENT (OS_TASK_WAIT_FUTEX + 1)
-#define OS_TASK_WAIT_COMPLETE (OS_TASK_WAIT_EVENT + 1)
-
+/// 任务的等待事件/信号列表
+#define OS_TASK_WAIT_ANYPROCESS (1 << 0U) ///< 等待任意进程出现
+#define OS_TASK_WAIT_PROCESS (1 << 1U) ///< 等待指定进程出现
+#define OS_TASK_WAIT_GID (1 << 2U) ///< 等待组ID
+#define OS_TASK_WAIT_SEM (OS_TASK_WAIT_GID + 1) ///< 等待信号量发生
+#define OS_TASK_WAIT_QUEUE (OS_TASK_WAIT_SEM + 1) ///< 等待队列到来
+#define OS_TASK_WAIT_JOIN (OS_TASK_WAIT_QUEUE + 1) ///< 等待联结到来
+#define OS_TASK_WAIT_SIGNAL (OS_TASK_WAIT_JOIN + 1) ///< 等待普通信号到来
+#define OS_TASK_WAIT_LITEIPC (OS_TASK_WAIT_SIGNAL + 1) ///< 等待liteipc到来
+#define OS_TASK_WAIT_MUTEX (OS_TASK_WAIT_LITEIPC + 1) ///< 等待MUTEX到来
+#define OS_TASK_WAIT_FUTEX (OS_TASK_WAIT_MUTEX + 1) ///< 等待FUTEX到来
+#define OS_TASK_WAIT_EVENT (OS_TASK_WAIT_FUTEX + 1) ///< 等待事件发生
+#define OS_TASK_WAIT_COMPLETE (OS_TASK_WAIT_EVENT + 1) ///< 等待结束信号
+
+/// 设置事件阻塞掩码,即设置任务的等待事件.
STATIC INLINE VOID OsTaskWaitSetPendMask(UINT16 mask, UINTPTR lockID, UINT32 timeout)
{
LosTaskCB *runTask = OsCurrTaskGet();
- runTask->waitID = lockID;
- runTask->waitFlag = mask;
+ runTask->waitID = lockID; //
+ runTask->waitFlag = mask; //当前任务在等待什么东东到来 例如: OS_TASK_WAIT_LITEIPC
(VOID)timeout;
}
+/// 清除事件阻塞掩码,即任务不再等待任何事件.
STATIC INLINE VOID OsTaskWakeClearPendMask(LosTaskCB *resumeTask)
{
resumeTask->waitID = 0;
diff --git a/src/kernel_liteos_a/kernel/base/include/los_user_container_pri.h b/src/kernel_liteos_a/kernel/base/include/los_user_container_pri.h
index a073599b..83d78445 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_user_container_pri.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_user_container_pri.h
@@ -50,13 +50,13 @@ typedef struct UidGidMap {
UidGidExtent extent[UID_GID_MAP_MAX_EXTENTS];
};
} UidGidMap;
-
+//用户容器
typedef struct UserContainer {
Atomic rc;
INT32 level;
UINT32 owner;
UINT32 group;
- struct UserContainer *parent;
+ struct UserContainer *parent;
UidGidMap uidMap;
UidGidMap gidMap;
UINT32 containerID;
diff --git a/src/kernel_liteos_a/kernel/base/include/los_uts_container_pri.h b/src/kernel_liteos_a/kernel/base/include/los_uts_container_pri.h
index 5642286c..cc560d22 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_uts_container_pri.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_uts_container_pri.h
@@ -39,11 +39,15 @@
typedef struct ProcessCB LosProcessCB;
struct Container;
-
+/****************************************
+* https://unix.stackexchange.com/questions/183717/whats-a-uts-namespace
+* uts的全称: UNIX Time Sharing, UNIX分时操作系统
+* setting hostname, domainname will not affect rest of the system (CLONE_NEWUTS flag)
+****************************************/
typedef struct UtsContainer {
- Atomic rc;
- UINT32 containerID;
- struct utsname utsName;
+ Atomic rc; //原子操作 LDREX 和 STREX 指令保证了原子操作的底层实现
+ UINT32 containerID; //容器ID
+ struct utsname utsName; //存放系统信息的缓冲区
} UtsContainer;
UINT32 OsInitRootUtsContainer(UtsContainer **utsContainer);
diff --git a/src/kernel_liteos_a/kernel/base/include/los_vm_boot.h b/src/kernel_liteos_a/kernel/base/include/los_vm_boot.h
index fb6e714a..6ec88028 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_vm_boot.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_vm_boot.h
@@ -47,17 +47,17 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
-#define OS_KHEAP_BLOCK_SIZE (512 * 1024UL)
-
+#define OS_KHEAP_BLOCK_SIZE (512 * 1024UL) ///< 内核空间 堆内存部分大小, 512K
+//记录 MMU 映射关系
typedef struct ArchMmuInitMapping {
- PADDR_T phys;
- VADDR_T virt;
- size_t size;
- unsigned int flags;
- const char *name;
+ PADDR_T phys;///< 物理地址
+ VADDR_T virt;///< 虚拟地址
+ size_t size;///< 大小
+ unsigned int flags;///< 标识 读/写/.. VM_MAP_REGION_FLAG_PERM_*
+ const char *name;///< 名称
} LosArchMmuInitMapping;
-extern LosArchMmuInitMapping g_archMmuInitMapping[];
+extern LosArchMmuInitMapping g_archMmuInitMapping[];//映射关系表
extern UINTPTR g_vmBootMemBase;
extern BOOL g_kHeapInited;
diff --git a/src/kernel_liteos_a/kernel/base/include/los_vm_common.h b/src/kernel_liteos_a/kernel/base/include/los_vm_common.h
index f2dd70bf..17b3f34c 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_vm_common.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_vm_common.h
@@ -1,3 +1,64 @@
+/*!
+ * @file los_vm_common.h
+ * @brief
+ * @link
+ @verbatim
+ @note_pic
+ 鸿蒙虚拟内存-用户空间图 从 USER_ASPACE_BASE 至 USER_ASPACE_TOP_MAX
+ 鸿蒙源码分析系列篇: https://blog.csdn.net/kuangyufei
+ https://my.oschina.net/u/3751245
+
+ | /\ |
+ | || |
+ |---------------------------|内核空间结束位置KERNEL_ASPACE_BASE + KERNEL_ASPACE_SIZE
+ | |
+ | 内核空间 |
+ | |
+ | |
+ |---------------------------|内核空间开始位置 KERNEL_ASPACE_BASE
+ | |
+ | 16M 预留 |
+ |---------------------------|用户空间栈顶 USER_ASPACE_TOP_MAX = USER_ASPACE_BASE + USER_ASPACE_SIZE
+ | |
+ | stack区 自上而下 |
+ | |
+ | || |
+ | || |
+ | || |
+ | \/ |
+ | |
+ |---------------------------|映射区结束位置 USER_MAP_BASE + USER_MAP_SIZE
+ | 映射区 (文件,匿名,I/O映射) |
+ | |
+ | |
+ | 共享库 .so |
+ | |
+ | L1/L2页表 |
+ |---------------------------|映射区开始位置 USER_MAP_BASE = (USER_ASPACE_TOP_MAX >> 1)
+ | |
+ | |
+ | /\ |
+ | || |
+ | || |
+ | || |
+ | |
+ | heap区 自下而上 |
+ | |
+ |---------------------------|用户空间堆区开始位置 USER_HEAP_BASE = USER_ASPACE_TOP_MAX >> 2
+ | |
+ | .bss |
+ | .data |
+ | .text |
+ |---------------------------|用户空间开始位置 USER_ASPACE_BASE = 0x01000000UL
+ | |
+ | 16M预留 |
+ |---------------------------|虚拟内存开始位置 0x00000000
+
+ @endverbatim
+ * @version
+ * @author weharmonyos.com | 鸿蒙研究站 | 每天死磕一点点
+ * @date 2021-11-30
+ */
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
@@ -43,38 +104,51 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
+
/* user address space, defaults to below kernel space with a 16MB guard gap on either side */
-#ifndef USER_ASPACE_BASE
-#define USER_ASPACE_BASE ((vaddr_t)0x01000000UL)
+#ifndef USER_ASPACE_BASE ///< 用户地址空间,默认为低于内核空间,两侧各有16MB的保护间隙
+#define USER_ASPACE_BASE ((vaddr_t)0x01000000UL) ///< 用户空间基地址 从16M位置开始 user_ram : ORIGIN = 0x1000000, LENGTH = 0x100000
#endif
#ifndef USER_ASPACE_SIZE
-#define USER_ASPACE_SIZE ((vaddr_t)KERNEL_ASPACE_BASE - USER_ASPACE_BASE - 0x01000000UL)
+#define USER_ASPACE_SIZE ((vaddr_t)KERNEL_ASPACE_BASE - USER_ASPACE_BASE - 0x01000000UL)///< 用户空间 < 内核空间 2个16M
#endif
-#define USER_ASPACE_TOP_MAX ((vaddr_t)(USER_ASPACE_BASE + USER_ASPACE_SIZE))
-#define USER_HEAP_BASE ((vaddr_t)(USER_ASPACE_TOP_MAX >> 2))
-#define USER_MAP_BASE ((vaddr_t)(USER_ASPACE_TOP_MAX >> 1))
-#define USER_MAP_SIZE ((vaddr_t)(USER_ASPACE_SIZE >> 3))
+#define USER_ASPACE_TOP_MAX ((vaddr_t)(USER_ASPACE_BASE + USER_ASPACE_SIZE))///< 用户空间顶部位置
+#define USER_HEAP_BASE ((vaddr_t)(USER_ASPACE_TOP_MAX >> 2)) ///< 堆的开始地址
+#define USER_MAP_BASE ((vaddr_t)(USER_ASPACE_TOP_MAX >> 1)) ///< 用户映射区开始地址
+#define USER_MAP_SIZE ((vaddr_t)(USER_ASPACE_SIZE >> 3)) ///< 用户空间映射大小 = 1/8 用户空间
#ifndef PAGE_SIZE
-#define PAGE_SIZE (0x1000U)
+#define PAGE_SIZE (0x1000U) ///< 页大小4K
#endif
-#define PAGE_MASK (~(PAGE_SIZE - 1))
-#define PAGE_SHIFT (12)
+#define PAGE_MASK (~(PAGE_SIZE - 1)) ///< 页掩码,用于页内偏移地址的计算
+#define PAGE_SHIFT (12)///< 12位 - 4K 偏移
#define KB (1024UL)
#define MB (1024UL * 1024UL)
#define GB (1024UL * 1024UL * 1024UL)
-#define ROUNDUP(a, b) (((a) + ((b) - 1)) & ~((b) - 1))
-#define ROUNDDOWN(a, b) ((a) & ~((b) - 1))
-#define ROUNDOFFSET(a, b) ((a) & ((b) - 1))
-#define MIN2(a, b) (((a) < (b)) ? (a) : (b))
-
-#define IS_ALIGNED(a, b) (!(((UINTPTR)(a)) & (((UINTPTR)(b)) - 1)))
-#define IS_PAGE_ALIGNED(x) IS_ALIGNED(x, PAGE_SIZE)
-#define IS_SECTION_ALIGNED(x) IS_ALIGNED(x, SECTION_SIZE)
+/**
+ * @brief
+ * @verbatim
+ 圆整通常被理解为为满足某种要求而进行的数据修正。按照修正后的数据在数值上是否比原数据大,
+ 又可分为向上圆整和向下圆整。它们很像对模拟信号进行采样,对一定范围的数据向一个固定的数据靠拢。
+ 举例理解:
+ ROUNDUP(7,4) = 8 ,ROUNDUP(8,4) = 8 ,ROUNDUP(9,4) = 12
+ ROUNDDOWN(7,4) = 4 ,ROUNDDOWN(8,4) = 8 ,ROUNDDOWN(9,4) = 8
+ ROUNDOFFSET(7,4) = 3 ,ROUNDOFFSET(8,4) = 0 ,ROUNDOFFSET(9,4) = 1
+
+ * @endverbatim
+ */
+#define ROUNDUP(a, b) (((a) + ((b) - 1)) & ~((b) - 1)) ///< 向上圆整
+#define ROUNDDOWN(a, b) ((a) & ~((b) - 1)) ///< 向下圆整
+#define ROUNDOFFSET(a, b) ((a) & ((b) - 1)) ///< 圆整偏移
+#define MIN2(a, b) (((a) < (b)) ? (a) : (b)) ///< 找到最小值
+#define IS_ALIGNED(a, b) (!(((UINTPTR)(a)) & (((UINTPTR)(b)) - 1)))///< 是否按指定的参数对齐
+#define IS_PAGE_ALIGNED(x) IS_ALIGNED(x, PAGE_SIZE) ///< 是否按页大小对齐 4K
+#define IS_SECTION_ALIGNED(x) IS_ALIGNED(x, SECTION_SIZE)///< 是否按段大小对齐
+//虚拟内存的异常提示
#define LOS_ERRNO_VM_NO_ERROR (0)
#define LOS_ERRNO_VM_GENERIC (-1)
#define LOS_ERRNO_VM_NOT_FOUND (-2)
diff --git a/src/kernel_liteos_a/kernel/base/include/los_vm_fault.h b/src/kernel_liteos_a/kernel/base/include/los_vm_fault.h
index d3c650a2..bd6d271b 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_vm_fault.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_vm_fault.h
@@ -55,7 +55,7 @@ typedef struct {
#define VM_MAP_PF_FLAG_USER (1U << 1)
#define VM_MAP_PF_FLAG_INSTRUCTION (1U << 2)
#define VM_MAP_PF_FLAG_NOT_PRESENT (1U << 3)
-
+//缺页中断处理函数
STATUS_T OsVmPageFaultHandler(VADDR_T vaddr, UINT32 flags, ExcContext *frame);
#ifdef __cplusplus
#if __cplusplus
diff --git a/src/kernel_liteos_a/kernel/base/include/los_vm_filemap.h b/src/kernel_liteos_a/kernel/base/include/los_vm_filemap.h
index c8cceb21..3aa50a0c 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_vm_filemap.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_vm_filemap.h
@@ -1,3 +1,23 @@
+/*!
+ * @file los_vm_filemap.h
+ * @brief
+ * @link
+ @verbatim
+ 磁盘高速缓存是一种软件机制,它允许系统把通常存放在磁盘上的一些数据保留在 RAM 中,以便对那些数据的
+ 进一步访问不用再访问磁盘而能尽快得到满足。
+ 页高速缓存中的信息单位是一个完整的页。
+ 一个页包含的磁盘块在物理上不一定相邻,所以不能用设备号和块号标识,而是通过页的所有者和所有者数据中的索引来识别。
+ 页高速缓存可以缓存以下内容
+ A.普通文件数据
+ B.含有目录的页
+ C.直接从快设备读取的页
+ D.用户进程数据的页
+ E.特殊文件系统的文件页
+ @endverbatim
+ * @version
+ * @author weharmonyos.com | 鸿蒙研究站 | 每天死磕一点点
+ * @date 2021-12-9
+ */
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
@@ -51,133 +71,158 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
+#if 0 //@note_#if0
+//page_mapping描述的是一个文件在内存中被映射了多少页,<文件,文件页的关系>
+/* file mapped in VMM pages */
+struct page_mapping {//记录文件页和文件关系的结构体,叫文件页映射
+ LOS_DL_LIST page_list; /* all pages | 链表上挂的是属于该文件的所有FilePage,这些页的内容都来源同一个文件*/
+ SPIN_LOCK_S list_lock; /* lock protecting it | 操作page_list的自旋锁*/
+ LosMux mux_lock; /* mutex lock | 操作page_mapping的互斥量*/
+ unsigned long nrpages; /* number of total pages |page_list的节点数量 */
+ unsigned long flags; ///< @note_why 全量代码中也没查到源码中对其操作
+ Atomic ref; /* reference counting | 引用次数(自增/自减),对应add_mapping/dec_mapping*/
+ struct Vnode *host; /* owner of this mapping | 属于哪个文件的映射*/
+};
+
+
+/* map: full_path(owner) <-> mapping */ //叫文件映射
+struct file_map { //为在内核层面文件在内存的身份证,每个需映射到内存的文件必须创建一个file_map,都挂到全局g_file_mapping链表上
+ LOS_DL_LIST head; ///< 链表节点,用于挂到g_file_mapping上
+ LosMux lock; /* lock to protect this mapping */
+ struct page_mapping mapping; ///< 每个文件都有唯一的page_mapping标识其在内存的身份
+ char *owner; /* owner: full path of file | 文件全路径来标识唯一性*/
+};
+
+#endif
+
+/// 文件页结构体
typedef struct FilePage {
- LOS_DL_LIST node;
- LOS_DL_LIST lru;
- LOS_DL_LIST i_mmap; /* list of mappings */
- UINT32 n_maps; /* num of mapping */
- struct VmPhysSeg *physSeg; /* physical memory that file page belongs to */
- struct VmPage *vmPage;
- struct page_mapping *mapping;
- VM_OFFSET_T pgoff;
- UINT32 flags;
- UINT16 dirtyOff;
- UINT16 dirtyEnd;
+ LOS_DL_LIST node; ///< 节点,节点挂到page_mapping.page_list上,链表以 pgoff 从小到大方式排序.
+ LOS_DL_LIST lru; ///< lru节点, 结合 LosVmPhysSeg: LOS_DL_LIST lruList[VM_NR_LRU_LISTS] 理解
+ LOS_DL_LIST i_mmap; /* list of mappings | 链表记录文件页被哪些进程映射 MapInfo.node挂上来*/
+ UINT32 n_maps; /* num of mapping | 记录被进程映射的次数*/
+ struct VmPhysSeg *physSeg; /* physical memory that file page belongs to | 物理段:物理页框 = 1:N */
+ struct VmPage *vmPage; ///< 物理页框
+ struct page_mapping *mapping; ///< 此结构由文件系统提供,用于描述装入点 见于 ..\third_party\NuttX\include\nuttx\fs\fs.h
+ VM_OFFSET_T pgoff; ///< 页标,文件被切成一页一页读到内存
+ UINT32 flags; ///< 标签
+ UINT16 dirtyOff; ///< 脏页的页内偏移地址
+ UINT16 dirtyEnd; ///< 脏页的结束位置
} LosFilePage;
-
+/// 虚拟地址和文件页的映射信息,在一个进程使用文件页之前,需要提前做好文件页在此内存空间的映射关系,如此通过虚拟内存就可以对文件页读写操作.
typedef struct MapInfo {
- LOS_DL_LIST node;
- VADDR_T vaddr;
- LosFilePage *page;
- LosArchMmu *archMmu;
+ LOS_DL_LIST node; ///< 节点,挂到page->i_mmap链表上.链表上记录要操作文件页的进程对这个page的映射信息
+ VADDR_T vaddr; ///< 虚拟地址.每个进程访问同一个文件页的虚拟地址都是不一样的
+ LosFilePage *page; ///< 文件页中只记录物理地址,是不会变的.但它是需要被多个进程访问,和映射的.
+ LosArchMmu *archMmu; ///< mmu完成vaddr和page->vmPage->physAddr物理地址的映射
} LosMapInfo;
-
+/// Flags由 bitmap 管理
enum OsPageFlags {
- FILE_PAGE_FREE,
- FILE_PAGE_LOCKED,
- FILE_PAGE_REFERENCED,
- FILE_PAGE_DIRTY,
- FILE_PAGE_LRU,
- FILE_PAGE_ACTIVE,
- FILE_PAGE_SHARED,
+ FILE_PAGE_FREE, ///< 空闲页
+ FILE_PAGE_LOCKED, ///< 被锁页
+ FILE_PAGE_REFERENCED, ///< 被引用页
+ FILE_PAGE_DIRTY, ///< 脏页
+ FILE_PAGE_LRU, ///< LRU置换页
+ FILE_PAGE_ACTIVE, ///< 活动页
+ FILE_PAGE_SHARED, ///< 共享页
};
#define PGOFF_MAX 2000
#define MAX_SHRINK_PAGECACHE_TRY 2
-#define VM_FILEMAP_MAX_SCAN (SYS_MEM_SIZE_DEFAULT >> PAGE_SHIFT)
-#define VM_FILEMAP_MIN_SCAN 32
-
+#define VM_FILEMAP_MAX_SCAN (SYS_MEM_SIZE_DEFAULT >> PAGE_SHIFT) ///< 扫描文件映射页最大数量
+#define VM_FILEMAP_MIN_SCAN 32 ///< 扫描文件映射页最小数量
+/// 给页面贴上被锁的标签
STATIC INLINE VOID OsSetPageLocked(LosVmPage *page)
{
LOS_BitmapSet(&page->flags, FILE_PAGE_LOCKED);
}
-
+/// 给页面撕掉被锁的标签
STATIC INLINE VOID OsCleanPageLocked(LosVmPage *page)
{
LOS_BitmapClr(&page->flags, FILE_PAGE_LOCKED);
}
-
+/// 给页面贴上数据被修改的标签
STATIC INLINE VOID OsSetPageDirty(LosVmPage *page)
{
LOS_BitmapSet(&page->flags, FILE_PAGE_DIRTY);
}
-
+/// 给页面撕掉数据被修改的标签
STATIC INLINE VOID OsCleanPageDirty(LosVmPage *page)
{
LOS_BitmapClr(&page->flags, FILE_PAGE_DIRTY);
}
-
+/// 给页面贴上活动的标签
STATIC INLINE VOID OsSetPageActive(LosVmPage *page)
{
LOS_BitmapSet(&page->flags, FILE_PAGE_ACTIVE);
}
-
+/// 给页面撕掉活动的标签
STATIC INLINE VOID OsCleanPageActive(LosVmPage *page)
{
LOS_BitmapClr(&page->flags, FILE_PAGE_ACTIVE);
}
-
+/// 给页面贴上置换页的标签
STATIC INLINE VOID OsSetPageLRU(LosVmPage *page)
{
LOS_BitmapSet(&page->flags, FILE_PAGE_LRU);
}
-
+/// 给页面贴上被释放的标签
STATIC INLINE VOID OsSetPageFree(LosVmPage *page)
{
LOS_BitmapSet(&page->flags, FILE_PAGE_FREE);
}
-
+/// 给页面撕掉被释放的标签
STATIC INLINE VOID OsCleanPageFree(LosVmPage *page)
{
LOS_BitmapClr(&page->flags, FILE_PAGE_FREE);
}
-
+/// 给页面贴上被引用的标签
STATIC INLINE VOID OsSetPageReferenced(LosVmPage *page)
{
LOS_BitmapSet(&page->flags, FILE_PAGE_REFERENCED);
}
-
+/// 给页面撕掉被引用的标签
STATIC INLINE VOID OsCleanPageReferenced(LosVmPage *page)
{
LOS_BitmapClr(&page->flags, FILE_PAGE_REFERENCED);
}
-
+/// 页面是否活动
STATIC INLINE BOOL OsIsPageActive(LosVmPage *page)
{
return BIT_GET(page->flags, FILE_PAGE_ACTIVE);
}
-
+/// 页面是否被锁
STATIC INLINE BOOL OsIsPageLocked(LosVmPage *page)
{
return BIT_GET(page->flags, FILE_PAGE_LOCKED);
}
-
+/// 页面是否被引用,只被一个进程引用的页叫私有页,多个进程引用就是共享页,此为共享内存的本质所在
STATIC INLINE BOOL OsIsPageReferenced(LosVmPage *page)
{
return BIT_GET(page->flags, FILE_PAGE_REFERENCED);
}
-
+/// 页面是否为脏页,所谓脏页就是页内数据是否被更新过,只有脏页才会有写时拷贝
STATIC INLINE BOOL OsIsPageDirty(LosVmPage *page)
{
return BIT_GET(page->flags, FILE_PAGE_DIRTY);
}
-
+/// 文件页是否映射过了
STATIC INLINE BOOL OsIsPageMapped(LosFilePage *page)
{
- return (page->n_maps != 0);
+ return (page->n_maps != 0);//由映射的次数来判断
}
-/* The follow three functions is used to SHM module */
+/*! The follow three functions is used to SHM module | 给页面贴上共享页标签*/
STATIC INLINE VOID OsSetPageShared(LosVmPage *page)
{
- LOS_BitmapSet(&page->flags, FILE_PAGE_SHARED);
+ LOS_BitmapSet(&page->flags, FILE_PAGE_SHARED);//设为共享页面,共享页位 置0
}
-
+/// 给页面撕掉共享页标签
STATIC INLINE VOID OsCleanPageShared(LosVmPage *page)
{
- LOS_BitmapClr(&page->flags, FILE_PAGE_SHARED);
+ LOS_BitmapClr(&page->flags, FILE_PAGE_SHARED);//共享页位 置0
}
-
+/// 是否为共享页
STATIC INLINE BOOL OsIsPageShared(LosVmPage *page)
{
return BIT_GET(page->flags, FILE_PAGE_SHARED);
diff --git a/src/kernel_liteos_a/kernel/base/include/los_vm_iomap.h b/src/kernel_liteos_a/kernel/base/include/los_vm_iomap.h
index bf5fea03..d041b036 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_vm_iomap.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_vm_iomap.h
@@ -41,9 +41,19 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
-enum DmaMemType {
- DMA_CACHE,
- DMA_NOCACHE
+/**
+ * @brief
+ * @verbatim
+ DMA,全称Direct Memory Access,即直接存储器访问。
+ DMA传输将数据从一个地址空间复制到另一个地址空间,提供在外设和存储器之间或者存储器和存储器之间的高速数据传输。
+ DMA的作用就是实现数据的直接传输,而去掉了传统数据传输需要CPU寄存器参与的环节,主要涉及四种情况的数据传输,
+ 但本质上是一样的,都是从内存的某一区域传输到内存的另一区域(外设的数据寄存器本质上就是内存的一个存储单元)
+ * @endverbatim
+ */
+
+enum DmaMemType {
+ DMA_CACHE, ///< 有缓存的DMA
+ DMA_NOCACHE ///< 无缓存的DMA
};
/* thread safety */
diff --git a/src/kernel_liteos_a/kernel/base/include/los_vm_map.h b/src/kernel_liteos_a/kernel/base/include/los_vm_map.h
index 344e1ad8..6ad8167d 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_vm_map.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_vm_map.h
@@ -46,19 +46,43 @@
#include "los_vm_common.h"
struct Vnode;
-
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
+#if 0 // @note_#if0
+file结构体来自 ..\third_party\NuttX\include\nuttx\fs\fs.h
+struct file //打开文件的基本表示形式
+{
+ unsigned int f_magicnum; /* file magic number */
+ int f_oflags; /* Open mode flags */
+ FAR struct inode *f_inode; /* Driver interface */
+ loff_t f_pos; /* File position */
+ unsigned long f_refcount; /* reference count */
+ char *f_path; /* File fullpath */
+ void *f_priv; /* Per file driver private data */
+ const char *f_relpath; /* realpath */
+ struct page_mapping *f_mapping; /* mapping file to memory */
+ void *f_dir; /* DIR struct for iterate the directory if open a directory */
+};
+struct page_mapping {
+ LOS_DL_LIST page_list; /* all pages */
+ SPIN_LOCK_S list_lock; /* lock protecting it */
+ LosMux mux_lock; /* mutex lock */
+ unsigned long nrpages; /* number of total pages */
+ unsigned long flags;
+ Atomic ref; /* reference counting */
+ struct file *host; /* owner of this mapping */
+};
+#endif
/* If the kernel malloc size is less than 16k, use heap, otherwise use physical pages */
#define KMALLOC_LARGE_SIZE (PAGE_SIZE << 2)
-typedef struct VmMapRange {
- VADDR_T base; /**< vm region base addr */
- UINT32 size; /**< vm region size */
+typedef struct VmMapRange {//线性区范围结构体
+ VADDR_T base; /**< vm region base addr | 线性区基地址*/
+ UINT32 size; /**< vm region size | 线性区大小*/
} LosVmMapRange;
struct VmMapRegion;
@@ -67,104 +91,115 @@ struct VmFileOps;
typedef struct VmFileOps LosVmFileOps;
struct VmSpace;
typedef struct VmSpace LosVmSpace;
-
+/// 缺页结构信息体
typedef struct VmFault {
- UINT32 flags; /* FAULT_FLAG_xxx flags */
- unsigned long pgoff; /* Logical page offset based on region */
- VADDR_T vaddr; /* Faulting virtual address */
- VADDR_T *pageKVaddr; /* KVaddr of pagefault's vm page's paddr */
+ UINT32 flags; /*! FAULT_FLAG_xxx flags | 缺页标识*/
+ unsigned long pgoff; /*! Logical page offset based on region | 基于线性区的逻辑页偏移量*/
+ VADDR_T vaddr; /*! Faulting virtual address | 产生缺页的虚拟地址*/
+ VADDR_T *pageKVaddr; /*! KVaddr of pagefault's vm page's paddr | page cache中的虚拟地址*/
} LosVmPgFault;
-
+/// 虚拟内存文件操作函数指针,上层开发可理解为 class 里的方法,注意是对线性区的操作 , 文件操作 见于g_commVmOps
struct VmFileOps {
- void (*open)(struct VmMapRegion *region);
- void (*close)(struct VmMapRegion *region);
- int (*fault)(struct VmMapRegion *region, LosVmPgFault *pageFault);
- void (*remove)(struct VmMapRegion *region, LosArchMmu *archMmu, VM_OFFSET_T offset);
+ void (*open)(struct VmMapRegion *region); ///< 打开
+ void (*close)(struct VmMapRegion *region); ///< 关闭
+ int (*fault)(struct VmMapRegion *region, LosVmPgFault *pageFault); ///< 缺页,OsVmmFileFault
+ void (*remove)(struct VmMapRegion *region, LosArchMmu *archMmu, VM_OFFSET_T offset); ///< 删除 OsVmmFileRemove
};
+/*!
+ 线性区描述符,内核通过线性区管理虚拟地址,而线性地址就是虚拟地址
+ 在内核里,用户空间的进程要访问内存或磁盘里的数据要通过映射的方式将内存的物理地址和用户空间的虚拟地址联系起来.
+ 用户通过访问这样的虚拟地址就可以访问到实际的物理地址,也就是实际的物理内存. 映射在实现虚拟地址到物理地址中扮演
+ 重要角色. 内核中映射分为文件映射和匿名映射. 文件映射就是磁盘中的数据通过文件系统映射到内存再通过文件映射映射到
+ 虚拟空间.这样,用户就可以在用户空间通过 open ,read, write 等函数区操作文件内容. 匿名映射就是用户空间需要分配一定
+ 的物理内存来存储数据,这部分内存不属于任何文件,内核就使用匿名映射将内存中的 某段物理地址与用户空间一一映射,
+ 这样用户就可用直接操作虚拟地址来范围这段物理内存. 至于实际的代码,文件映射的操作就是: open,read,write,close,mmap...
+ 操作的虚拟地址都属于文件映射. malloc 分配的虚拟地址属于匿名映射.
+*/
struct VmMapRegion {
- LosRbNode rbNode; /**< region red-black tree node */
- LosVmSpace *space;
- LOS_DL_LIST node; /**< region dl list */
- LosVmMapRange range; /**< region address range */
- VM_OFFSET_T pgOff; /**< region page offset to file */
- UINT32 regionFlags; /**< region flags: cow, user_wired */
- UINT32 shmid; /**< shmid about shared region */
- UINT8 forkFlags; /**< vm space fork flags: COPY, ZERO, */
- UINT8 regionType; /**< vm region type: ANON, FILE, DEV */
- union {
- struct VmRegionFile {
- int f_oflags;
- struct Vnode *vnode;
- const LosVmFileOps *vmFOps;
+ LosRbNode rbNode; /**< region red-black tree node | 红黑树节点,通过它将本线性区挂在VmSpace.regionRbTree*/
+ LosVmSpace *space; ///< 所属虚拟空间,虚拟空间由多个线性区组成
+ LOS_DL_LIST node; /**< region dl list | 链表节点,通过它将本线性区挂在VmSpace.regions上 ,但最新版本没有regions了,可以删除了 */
+ LosVmMapRange range; /**< region address range | 记录线性区的范围*/
+ VM_OFFSET_T pgOff; /**< region page offset to file | 以文件开始处的偏移量, 必须是分页大小的整数倍, 通常为0, 表示从文件头开始映射。*/
+ UINT32 regionFlags; /**< region flags: cow, user_wired | 线性区标签*/
+ UINT32 shmid; /**< shmid about shared region | shmid为共享线性区id,id背后就是共享线性区*/
+ UINT8 forkFlags; /**< vm space fork flags: COPY, ZERO, | 线性区标记方式*/
+ UINT8 regionType; /**< vm region type: ANON, FILE, DEV | 映射类型是匿名,文件,还是设备,所谓匿名可理解为内存映射*/
+ union {
+ struct VmRegionFile {// <磁盘文件 , 物理内存, 用户进程虚拟地址空间 >
+ int f_oflags; ///< 读写标签
+ struct Vnode *vnode;///< 文件索引节点
+ const LosVmFileOps *vmFOps;///< 文件处理各操作接口,open,read,write,close,mmap
} rf;
- struct VmRegionAnon {
- LOS_DL_LIST node; /**< region LosVmPage list */
+ //匿名映射是指那些没有关联到文件页,如进程堆、栈、数据段和任务已修改的共享库等与物理内存的映射
+ struct VmRegionAnon {//
+ LOS_DL_LIST node; /**< region LosVmPage list | 线性区虚拟页链表*/
} ra;
- struct VmRegionDev {
- LOS_DL_LIST node; /**< region LosVmPage list */
- const LosVmFileOps *vmFOps;
+ struct VmRegionDev {//设备映射,也是一种文件
+ LOS_DL_LIST node; /**< region LosVmPage list | 线性区虚拟页链表*/
+ const LosVmFileOps *vmFOps; ///< 操作设备像操作文件一样方便.
} rd;
} unTypeData;
};
-
+/// 虚拟空间,每个进程都有一个属于自己的虚拟内存地址空间
typedef struct VmSpace {
- LOS_DL_LIST node; /**< vm space dl list */
- LosRbTree regionRbTree; /**< region red-black tree root */
- LosMux regionMux; /**< region red-black tree mutex lock */
- VADDR_T base; /**< vm space base addr */
- UINT32 size; /**< vm space size */
- VADDR_T heapBase; /**< vm space heap base address */
- VADDR_T heapNow; /**< vm space heap base now */
- LosVmMapRegion *heap; /**< heap region */
- VADDR_T mapBase; /**< vm space mapping area base */
- UINT32 mapSize; /**< vm space mapping area size */
- LosArchMmu archMmu; /**< vm mapping physical memory */
+ LOS_DL_LIST node; /**< vm space dl list | 节点,通过它挂到全局虚拟空间 g_vmSpaceList 链表上*/
+ LosRbTree regionRbTree; /**< region red-black tree root | 采用红黑树方式管理本空间各个线性区*/
+ LosMux regionMux; /**< region list mutex lock | 虚拟空间操作红黑树互斥锁*/
+ VADDR_T base; /**< vm space base addr | 虚拟空间的基地址,线性区的分配范围,常用于判断地址是否在内核还是用户空间*/
+ UINT32 size; /**< vm space size | 虚拟空间大小*/
+ VADDR_T heapBase; /**< vm space heap base address | 堆区基地址,指向堆区起点*/
+ VADDR_T heapNow; /**< vm space heap base now | 堆顶地址,指向堆区终点,do_brk()直接修改堆的大小返回新的堆区结束地址, heapNow >= heapBase*/
+ LosVmMapRegion *heap; /**< heap region | 传说中的堆区,用于满足进程的动态内存需求,大家熟知的malloc,realloc,free其实就是在操作这个区*/
+ VADDR_T mapBase; /**< vm space mapping area base | 虚拟空间映射区基地址,L1,L2表存放在这个区 */
+ UINT32 mapSize; /**< vm space mapping area size | 虚拟空间映射区大小,映射区是个很大的区。*/
+ LosArchMmu archMmu; /**< vm mapping physical memory | MMU记录<虚拟地址,物理地址>的映射情况 */
#ifdef LOSCFG_DRIVERS_TZDRIVER
- VADDR_T codeStart; /**< user process code area start */
- VADDR_T codeEnd; /**< user process code area end */
+ VADDR_T codeStart; /**< user process code area start | 代码区开始位置 */
+ VADDR_T codeEnd; /**< user process code area end | 代码区结束位置 */
#endif
} LosVmSpace;
-#define VM_MAP_REGION_TYPE_NONE (0x0)
-#define VM_MAP_REGION_TYPE_ANON (0x1)
-#define VM_MAP_REGION_TYPE_FILE (0x2)
-#define VM_MAP_REGION_TYPE_DEV (0x4)
-#define VM_MAP_REGION_TYPE_MASK (0x7)
+#define VM_MAP_REGION_TYPE_NONE (0x0) ///< 初始化使用
+#define VM_MAP_REGION_TYPE_ANON (0x1) ///< 匿名映射线性区
+#define VM_MAP_REGION_TYPE_FILE (0x2) ///< 文件映射线性区
+#define VM_MAP_REGION_TYPE_DEV (0x4) ///< 设备映射线性区
+#define VM_MAP_REGION_TYPE_MASK (0x7) ///< 映射线性区掩码
/* the high 8 bits(24~31) should reserved, shm will use it */
-#define VM_MAP_REGION_FLAG_CACHED (0<<0)
-#define VM_MAP_REGION_FLAG_UNCACHED (1<<0)
+#define VM_MAP_REGION_FLAG_CACHED (0<<0) ///< 缓冲区
+#define VM_MAP_REGION_FLAG_UNCACHED (1<<0) ///< 非缓冲区
#define VM_MAP_REGION_FLAG_UNCACHED_DEVICE (2<<0) /* only exists on some arches, otherwise UNCACHED */
#define VM_MAP_REGION_FLAG_STRONGLY_ORDERED (3<<0) /* only exists on some arches, otherwise UNCACHED */
-#define VM_MAP_REGION_FLAG_CACHE_MASK (3<<0)
-#define VM_MAP_REGION_FLAG_PERM_USER (1<<2)
-#define VM_MAP_REGION_FLAG_PERM_READ (1<<3)
-#define VM_MAP_REGION_FLAG_PERM_WRITE (1<<4)
-#define VM_MAP_REGION_FLAG_PERM_EXECUTE (1<<5)
-#define VM_MAP_REGION_FLAG_PROT_MASK (0xF<<2)
-#define VM_MAP_REGION_FLAG_NS (1<<6) /* NON-SECURE */
-#define VM_MAP_REGION_FLAG_SHARED (1<<7)
-#define VM_MAP_REGION_FLAG_PRIVATE (1<<8)
-#define VM_MAP_REGION_FLAG_FLAG_MASK (3<<7)
-#define VM_MAP_REGION_FLAG_STACK (1<<9)
-#define VM_MAP_REGION_FLAG_HEAP (1<<10)
-#define VM_MAP_REGION_FLAG_DATA (1<<11)
-#define VM_MAP_REGION_FLAG_TEXT (1<<12)
-#define VM_MAP_REGION_FLAG_BSS (1<<13)
-#define VM_MAP_REGION_FLAG_VDSO (1<<14)
-#define VM_MAP_REGION_FLAG_MMAP (1<<15)
-#define VM_MAP_REGION_FLAG_SHM (1<<16)
-#define VM_MAP_REGION_FLAG_FIXED (1<<17)
-#define VM_MAP_REGION_FLAG_FIXED_NOREPLACE (1<<18)
+#define VM_MAP_REGION_FLAG_CACHE_MASK (3<<0) ///< 缓冲区掩码
+#define VM_MAP_REGION_FLAG_PERM_USER (1<<2) ///< 用户空间永久区,PERM表示常驻区,可理解为非栈,非堆区
+#define VM_MAP_REGION_FLAG_PERM_READ (1<<3) ///< 永久可读取区
+#define VM_MAP_REGION_FLAG_PERM_WRITE (1<<4) ///< 永久可写入区
+#define VM_MAP_REGION_FLAG_PERM_EXECUTE (1<<5) ///< 永久可被执行区
+#define VM_MAP_REGION_FLAG_PROT_MASK (0xF<<2) ///< 访问权限掩码
+#define VM_MAP_REGION_FLAG_NS (1<<6) /* NON-SECURE */
+#define VM_MAP_REGION_FLAG_SHARED (1<<7) ///< MAP_SHARED:把对该内存段的修改保存到磁盘文件中 详见 OsCvtProtFlagsToRegionFlags ,要和 VM_MAP_REGION_FLAG_SHM区别理解
+#define VM_MAP_REGION_FLAG_PRIVATE (1<<8) ///< MAP_PRIVATE:内存段私有,对它的修改值仅对本进程有效,详见 OsCvtProtFlagsToRegionFlags。
+#define VM_MAP_REGION_FLAG_FLAG_MASK (3<<7) ///< 掩码
+#define VM_MAP_REGION_FLAG_STACK (1<<9) ///< 线性区的类型:栈区
+#define VM_MAP_REGION_FLAG_HEAP (1<<10) ///< 线性区的类型:堆区
+#define VM_MAP_REGION_FLAG_DATA (1<<11) ///< data数据区 编译在ELF中
+#define VM_MAP_REGION_FLAG_TEXT (1<<12) ///< 代码区
+#define VM_MAP_REGION_FLAG_BSS (1<<13) ///< bbs数据区 由运行时动态分配,bss段(Block Started by Symbol segment)通常是指用来存放程序中未初始化的全局变量的一块内存区域。
+#define VM_MAP_REGION_FLAG_VDSO (1<<14) ///< VDSO(Virtual Dynamic Shared Object,虚拟动态共享库)由内核提供的虚拟.so文件,它不在磁盘上,而在内核里,内核将其映射到一个地址空间中,被所有程序共享,正文段大小为一个页面。
+#define VM_MAP_REGION_FLAG_MMAP (1<<15) ///< 映射区,虚拟空间内有专门用来存储<虚拟地址-物理地址>映射的区域
+#define VM_MAP_REGION_FLAG_SHM (1<<16) ///< 共享内存区, 被多个进程线性区映射
+#define VM_MAP_REGION_FLAG_FIXED (1<<17) ///< 线性区被填满
+#define VM_MAP_REGION_FLAG_FIXED_NOREPLACE (1<<18) ///< 线性区不被替换
#define VM_MAP_REGION_FLAG_INVALID (1<<19) /* indicates that flags are not specified */
-
+/// 从外部权限标签转化为线性区权限标签
STATIC INLINE UINT32 OsCvtProtFlagsToRegionFlags(unsigned long prot, unsigned long flags)
{
UINT32 regionFlags = 0;
- regionFlags |= VM_MAP_REGION_FLAG_PERM_USER;
- regionFlags |= (prot & PROT_READ) ? VM_MAP_REGION_FLAG_PERM_READ : 0;
+ regionFlags |= VM_MAP_REGION_FLAG_PERM_USER; //必须是用户空间区
+ regionFlags |= (prot & PROT_READ) ? VM_MAP_REGION_FLAG_PERM_READ : 0; //映射区可被读
regionFlags |= (prot & PROT_WRITE) ? (VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE) : 0;
regionFlags |= (prot & PROT_EXEC) ? (VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_EXECUTE) : 0;
regionFlags |= (flags & MAP_SHARED) ? VM_MAP_REGION_FLAG_SHARED : 0;
@@ -174,86 +209,87 @@ STATIC INLINE UINT32 OsCvtProtFlagsToRegionFlags(unsigned long prot, unsigned lo
return regionFlags;
}
-
+/// 虚拟地址是否在内核空间
STATIC INLINE BOOL LOS_IsKernelAddress(VADDR_T vaddr)
{
return ((vaddr >= (VADDR_T)KERNEL_ASPACE_BASE) &&
(vaddr <= ((VADDR_T)KERNEL_ASPACE_BASE + ((VADDR_T)KERNEL_ASPACE_SIZE - 1))));
}
-
+/// 给定地址范围是否都在内核空间中
STATIC INLINE BOOL LOS_IsKernelAddressRange(VADDR_T vaddr, size_t len)
{
return (vaddr + len > vaddr) && LOS_IsKernelAddress(vaddr) && (LOS_IsKernelAddress(vaddr + len - 1));
}
-
+/// 获取线性区的结束地址
STATIC INLINE VADDR_T LOS_RegionEndAddr(LosVmMapRegion *region)
{
return (region->range.base + region->range.size - 1);
}
-
+/// 获取线性区大小
STATIC INLINE size_t LOS_RegionSize(VADDR_T start, VADDR_T end)
{
return (end - start + 1);
}
-
+/// 是否为文件映射区
STATIC INLINE BOOL LOS_IsRegionTypeFile(LosVmMapRegion* region)
{
return region->regionType == VM_MAP_REGION_TYPE_FILE;
}
-
+/// permanent 用户进程永久/常驻区
STATIC INLINE BOOL LOS_IsRegionPermUserReadOnly(LosVmMapRegion* region)
{
return ((region->regionFlags & VM_MAP_REGION_FLAG_PROT_MASK) ==
(VM_MAP_REGION_FLAG_PERM_USER | VM_MAP_REGION_FLAG_PERM_READ));
}
-
+/// 是否为私有线性区
STATIC INLINE BOOL LOS_IsRegionFlagPrivateOnly(LosVmMapRegion* region)
{
return ((region->regionFlags & VM_MAP_REGION_FLAG_FLAG_MASK) == VM_MAP_REGION_FLAG_PRIVATE);
}
-
+/// 设置线性区为文件映射
STATIC INLINE VOID LOS_SetRegionTypeFile(LosVmMapRegion* region)
{
region->regionType = VM_MAP_REGION_TYPE_FILE;
}
-
+/// 是否为设备映射线性区 /dev/...
STATIC INLINE BOOL LOS_IsRegionTypeDev(LosVmMapRegion* region)
{
return region->regionType == VM_MAP_REGION_TYPE_DEV;
}
-
+/// 设为设备映射线性区
STATIC INLINE VOID LOS_SetRegionTypeDev(LosVmMapRegion* region)
{
region->regionType = VM_MAP_REGION_TYPE_DEV;
}
-
+/// 是否为匿名swap映射线性区
STATIC INLINE BOOL LOS_IsRegionTypeAnon(LosVmMapRegion* region)
{
return region->regionType == VM_MAP_REGION_TYPE_ANON;
}
-
+/// 设为匿名swap映射线性区
STATIC INLINE VOID LOS_SetRegionTypeAnon(LosVmMapRegion* region)
{
region->regionType = VM_MAP_REGION_TYPE_ANON;
}
-
+/// 虚拟地址是否在用户空间
STATIC INLINE BOOL LOS_IsUserAddress(VADDR_T vaddr)
{
return ((vaddr >= USER_ASPACE_BASE) &&
(vaddr <= (USER_ASPACE_BASE + (USER_ASPACE_SIZE - 1))));
}
-
+/// 虚拟地址[vaddr,vaddr + len]是否在用户空间
STATIC INLINE BOOL LOS_IsUserAddressRange(VADDR_T vaddr, size_t len)
{
return (vaddr + len > vaddr) && LOS_IsUserAddress(vaddr) && (LOS_IsUserAddress(vaddr + len - 1));
}
+//是否是一个动态分配的地址(通过vmalloc申请的)
STATIC INLINE BOOL LOS_IsVmallocAddress(VADDR_T vaddr)
{
return ((vaddr >= VMALLOC_START) &&
(vaddr <= (VMALLOC_START + (VMALLOC_SIZE - 1))));
}
-
+/// 是否为一个空线性区
STATIC INLINE BOOL OsIsVmRegionEmpty(LosVmSpace *vmSpace)
{
if (vmSpace->regionRbTree.ulNodes == 0) {
@@ -294,7 +330,6 @@ STATUS_T LOS_VmSpaceClone(UINT32 cloneFlags, LosVmSpace *oldVmSpace, LosVmSpace
LosMux *OsGVmSpaceMuxGet(VOID);
STATUS_T OsUnMMap(LosVmSpace *space, VADDR_T addr, size_t size);
STATUS_T OsVmSpaceRegionFree(LosVmSpace *space);
-
/**
* thread safety
* it is used to malloc continuous virtual memory, no sure for continuous physical memory.
diff --git a/src/kernel_liteos_a/kernel/base/include/los_vm_page.h b/src/kernel_liteos_a/kernel/base/include/los_vm_page.h
index c1aa9413..22dc31f5 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_vm_page.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_vm_page.h
@@ -44,21 +44,28 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
+/*!
+ * @brief 物理页框描述符 虚拟内存体现的是程序对内存资源的需求,而物理内存是对该请求的供应。
+ 伙伴算法的思想是:把内存中连续的空闲页框空间看成是空闲页框块,并按照它们的大小(连续页框的数目)分组
+ * @attention vmPage 中并没有虚拟地址,只有物理地址
+ \n 关于nPages和order的关系说明,当请求分配为5页时,order是等于3的,因为只有2^3才能满足5页的请求
+ */
typedef struct VmPage {
- LOS_DL_LIST node; /**< vm object dl list */
- PADDR_T physAddr; /**< vm page physical addr */
- Atomic refCounts; /**< vm page ref count */
- UINT32 flags; /**< vm page flags */
- UINT8 order; /**< vm page in which order list */
- UINT8 segID; /**< the segment id of vm page */
- UINT16 nPages; /**< the vm page is used for kernel heap */
+ LOS_DL_LIST node; /**< vm object dl list | 物理内框节点,通过它挂/摘到全局g_vmPhysSeg[segID]->freeList[order]物理页框链表 或被使用的链表
+ 上, 例如 共享内存的shmIDSource.node*/
+ PADDR_T physAddr; /**< vm page physical addr | 物理页框起始物理地址,只能用于计算,不会用于操作(读/写数据==)*/
+ Atomic refCounts; /**< vm page ref count | 被引用次数,共享内存会有多次引用*/
+ UINT32 flags; /**< vm page flags | 页标签,同时可以有多个标签(共享/引用/活动/被锁==)*/
+ UINT8 order; /**< vm page in which order list | 被安置在伙伴算法的几号序列( 2^0,2^1,2^2,...,2^order)*/
+ UINT8 segID; /**< the segment id of vm page | 所属物理内存段编号ID*/
+ UINT16 nPages; /**< the vm page is used for kernel heap | 分配页数,标识从本页开始连续的几页将一块被分配*/
#ifdef LOSCFG_PAGE_TABLE_FINE_LOCK
SPIN_LOCK_S lock; /**< lock for page table entry */
#endif
} LosVmPage;
-extern LosVmPage *g_vmPageArray;
-extern size_t g_vmPageArraySize;
+extern LosVmPage *g_vmPageArray; ///< 物理页框(page frame)池,在g_vmPageArray中:不可能存在两个物理地址一样的物理页框,
+extern size_t g_vmPageArraySize; ///< 物理总页框(page frame)数
LosVmPage *LOS_VmPageGet(PADDR_T paddr);
VOID OsVmPageStartup(VOID);
diff --git a/src/kernel_liteos_a/kernel/base/include/los_vm_phys.h b/src/kernel_liteos_a/kernel/base/include/los_vm_phys.h
index 75bcab08..ef362177 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_vm_phys.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_vm_phys.h
@@ -43,52 +43,65 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
-#define VM_LIST_ORDER_MAX 9
-#define VM_PHYS_SEG_MAX 32
+/*!
+ * @brief
+ * @verbatim
+ LRU是Least Recently Used的缩写,即最近最少使用页面置换算法,是为虚拟页式存储管理服务的,
+ 是根据页面调入内存后的使用情况进行决策了。由于无法预测各页面将来的使用情况,只能利用
+ “最近的过去”作为“最近的将来”的近似,因此,LRU算法就是将最近最久未使用的页面予以淘汰。
+ * @endverbatim
+ */
+#define VM_LIST_ORDER_MAX 9 ///< 伙伴算法分组数量,从 2^0,2^1,...,2^8 (256*4K)=1M
+#define VM_PHYS_SEG_MAX 32 ///< 最大支持32个段
#ifndef min
-#define min(x, y) ((x) < (y) ? (x) : (y))
+#define min(x, y) ((x) < (y) ? (x) : (y))
#endif
-#define VM_PAGE_TO_PHYS(page) ((page)->physAddr)
-#define VM_ORDER_TO_PAGES(order) (1 << (order))
-#define VM_ORDER_TO_PHYS(order) (1 << (PAGE_SHIFT + (order)))
-#define VM_PHYS_TO_ORDER(phys) (min(LOS_LowBitGet((phys) >> PAGE_SHIFT), VM_LIST_ORDER_MAX - 1))
+#define VM_PAGE_TO_PHYS(page) ((page)->physAddr) ///< 获取物理页框的物理基地址
+#define VM_ORDER_TO_PAGES(order) (1 << (order)) ///< 伙伴算法由order 定位到该块组的页面单位,例如:order=2时,page[4]
+#define VM_ORDER_TO_PHYS(order) (1 << (PAGE_SHIFT + (order))) ///< 通过order块组跳到物理地址
+#define VM_PHYS_TO_ORDER(phys) (min(LOS_LowBitGet((phys) >> PAGE_SHIFT), VM_LIST_ORDER_MAX - 1)) ///< 通过物理地址定位到order
struct VmFreeList {
- LOS_DL_LIST node;
- UINT32 listCnt;
+ LOS_DL_LIST node; ///< 双循环链表用于挂空闲物理内框节点,通过 VmPage->node 挂上来
+ UINT32 listCnt; ///< 空闲物理页总数
};
-enum OsLruList {
- VM_LRU_INACTIVE_ANON = 0,
- VM_LRU_ACTIVE_ANON,
- VM_LRU_INACTIVE_FILE,
- VM_LRU_ACTIVE_FILE,
- VM_LRU_UNEVICTABLE,
+/*!
+ * @brief Lru全称是Least Recently Used,即最近最久未使用的意思 针对匿名页和文件页各拆成对应链表。
+ */
+enum OsLruList {// 页属性
+ VM_LRU_INACTIVE_ANON = 0, ///< 非活动匿名页(swap)
+ VM_LRU_ACTIVE_ANON, ///< 活动匿名页(swap)
+ VM_LRU_INACTIVE_FILE, ///< 非活动文件页(磁盘)
+ VM_LRU_ACTIVE_FILE, ///< 活动文件页(磁盘)
+ VM_LRU_UNEVICTABLE, ///< 禁止换出的页
VM_NR_LRU_LISTS
};
-
+/*!
+ * @brief 物理段描述符
+ */
typedef struct VmPhysSeg {
- PADDR_T start; /* The start of physical memory area */
- size_t size; /* The size of physical memory area */
- LosVmPage *pageBase; /* The first page address of this area */
-
- SPIN_LOCK_S freeListLock; /* The buddy list spinlock */
- struct VmFreeList freeList[VM_LIST_ORDER_MAX]; /* The free pages in the buddy list */
-
- SPIN_LOCK_S lruLock;
- size_t lruSize[VM_NR_LRU_LISTS];
- LOS_DL_LIST lruList[VM_NR_LRU_LISTS];
+ PADDR_T start; /* The start of physical memory area | 物理内存段的开始地址*/
+ size_t size; /* The size of physical memory area | 物理内存段的大小*/
+ LosVmPage *pageBase; /* The first page address of this area | 本段首个物理页框地址*/
+ SPIN_LOCK_S freeListLock; /* The buddy list spinlock | 伙伴算法自旋锁,用于操作freeList链表*/
+ struct VmFreeList freeList[VM_LIST_ORDER_MAX]; /* The free pages in the buddy list | 伙伴算法的分组,默认分成10组 2^0,2^1,...,2^VM_LIST_ORDER_MAX*/
+ SPIN_LOCK_S lruLock; ///< 用于置换的自旋锁,用于操作lruList
+ size_t lruSize[VM_NR_LRU_LISTS]; ///< 5个双循环链表大小,如此方便得到size
+ LOS_DL_LIST lruList[VM_NR_LRU_LISTS]; ///< 页面置换算法,5个双循环链表头,它们分别描述五中不同类型的链表
} LosVmPhysSeg;
-
+/*!
+ * @brief 物理区描述,仅用于方案商配置范围使用
+ */
struct VmPhysArea {
- PADDR_T start;
- size_t size;
+ PADDR_T start; ///< 物理内存区基地址
+ size_t size; ///< 物理内存总大小
};
-extern struct VmPhysSeg g_vmPhysSeg[VM_PHYS_SEG_MAX];
-extern INT32 g_vmPhysSegNum;
+extern struct VmPhysSeg g_vmPhysSeg[VM_PHYS_SEG_MAX]; ///< 物理内存采用段页式管理,先切段后伙伴算法页
+extern INT32 g_vmPhysSegNum; ///< 段总数
UINT32 OsVmPagesToOrder(size_t nPages);
struct VmPhysSeg *OsVmPhysSegGet(LosVmPage *page);
diff --git a/src/kernel_liteos_a/kernel/base/include/los_vm_syscall.h b/src/kernel_liteos_a/kernel/base/include/los_vm_syscall.h
index dbcb3d12..2744db83 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_vm_syscall.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_vm_syscall.h
@@ -45,12 +45,12 @@
extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
-
+//是否非匿名映射 文件映射:映射和实际文件相关联,通常是把文件的内容映射到进程地址空间,这样应用程序就可以像操作进程地址空间一样读写文件。
STATIC INLINE BOOL LOS_IsNamedMapping(unsigned long flags)
{
return ((flags & MAP_ANONYMOUS) == 0);
}
-
+///是否匿名映射 匿名映射:没有映射对应的相关文件,这种映射的内存区域的内容会被初始化为0
STATIC INLINE BOOL LOS_IsAnonymousMapping(unsigned long flags)
{
return ((flags & MAP_ANONYMOUS) == MAP_ANONYMOUS);
diff --git a/src/kernel_liteos_a/kernel/base/include/los_vm_zone.h b/src/kernel_liteos_a/kernel/base/include/los_vm_zone.h
index 3e3a9938..b8fb1280 100644
--- a/src/kernel_liteos_a/kernel/base/include/los_vm_zone.h
+++ b/src/kernel_liteos_a/kernel/base/include/los_vm_zone.h
@@ -1,3 +1,91 @@
+/*!
+ * @file los_vm_zone.h
+ * @brief
+ * @link
+ @verbatim
+ 虚拟地址空间全景图 从 0x00000000U 至 0xFFFFFFFFU ,外设和主存采用统一编址方式 @note_pic
+ 鸿蒙源码分析系列篇: http://weharmonyos.com | https://my.oschina.net/weharmony
+
+ +----------------------------+ 0xFFFFFFFFU
+ | IO设备未缓存 |
+ | PERIPH_PMM_SIZE |
+ +----------------------------+ 外围设备未缓存基地址 PERIPH_UNCACHED_BASE
+ | IO设备缓存 |
+ | PERIPH_PMM_SIZE |
+ +----------------------------+ 外围设备缓存基地址 PERIPH_CACHED_BASE
+ | 包括 IO设备 |
+ | PERIPH_PMM_SIZE |
+ +----------------------------+ 外围设备基地址 PERIPH_DEVICE_BASE
+ | Vmalloc 空间 |
+ | 内核栈 内核堆 |内核动态分配空间
+ | 128M |
+ | 映射区 |
+ +----------------------------+ 内核动态分配开始地址 VMALLOC_START
+ | DDR_MEM_SIZE |
+ | Uncached段 |
+ +----------------------------+ 未缓存虚拟空间基地址 UNCACHED_VMM_BASE = KERNEL_VMM_BASE + KERNEL_VMM_SIZE
+ | 内核虚拟空间 |
+ | mmu table 临时页表 |临时页表的作用详见开机阶段汇编代码注释
+ | .bss |Block Started by Symbol : 未初始化的全局变量,内核映射页表所在区 g_firstPageTable,这个表在内核启动后更新
+ | .data 可读写数据区 |
+ | .rodata 只读数据区 |
+ | .text 代码区 |
+ | vectors 中断向量表 |
+ +----------------------------+ 内核空间开始地址 KERNEL_ASPACE_BASE = KERNEL_VMM_BASE
+ | 16M预留区 |
+ +----------------------------+ 用户空间栈顶 USER_ASPACE_TOP_MAX = USER_ASPACE_BASE + USER_ASPACE_SIZE
+ | |
+ | 用户空间 |
+ | USER_ASPACE_SIZE |
+ | 用户栈区(stack) |
+ | 映射区(map) |
+ | 堆区 (heap) |
+ | .bss |
+ | .data .text |
+ +----------------------------+ 用户空间开始地址 USER_ASPACE_BASE
+ | 16M预留区 |
+ +----------------------------+ 0x00000000U
+
+ 以下定义 可见于 ..\vendor\hi3516dv300\config\board\include\board.h
+
+ 在liteos_a中, KERNEL_VADDR_BASE 是一个很常用的地址, 可以叫内核的运行起始地址
+ 内核的运行地址就是内核设计者希望内核运行时在内存中的位置,这个地址在内核源码中有地方可以配置,
+ 并且链接脚本里也会用到这个地址,编译代码时所用到的跟地址相关的值都是以内核运行基址为基础进行计算的。
+ 在liteos_a中,内核运行基址是在各个板子的board.h中配置的
+
+
+ #ifdef LOSCFG_KERNEL_MMU
+ #ifdef LOSCFG_TEE_ENABLE
+ #define KERNEL_VADDR_BASE 0x41000000
+ #else
+ #define KERNEL_VADDR_BASE 0x40000000
+ #endif
+ #else
+ #define KERNEL_VADDR_BASE DDR_MEM_ADDR
+ #endif
+ #define KERNEL_VADDR_SIZE DDR_MEM_SIZE
+
+ #define SYS_MEM_BASE DDR_MEM_ADDR
+ #define SYS_MEM_END (SYS_MEM_BASE + SYS_MEM_SIZE_DEFAULT)
+
+ #define EXC_INTERACT_MEM_SIZE 0x100000
+
+ 内核空间范围: 0x40000000 ~ 0xFFFFFFFF
+ 用户空间氛围: 0x00000000 ~ 0x3FFFFFFF
+
+ cached地址和uncached地址的区别是
+ 对cached地址的访问是委托给CPU进行的,也就是说你的操作到底是提交给真正的外设或内存,还是转到CPU缓存,
+ 是由CPU决定的。CPU有一套缓存策略来决定什么时候从缓存中读取数据,什么时候同步缓存。
+ 对unchached地址的访问是告诉CPU忽略缓存,访问操作直接反映到外设或内存上。
+ 对于IO设备一定要用uncached地址访问,是因为你的IO输出操作肯定是希望立即反映到IO设备上,不希望让CPU缓存你的操作;
+ 另一方面,IO设备的状态是独立于CPU的,也就是说IO口状态的改变CPU是不知道,这样就导致缓存和外设的内容不一致,
+ 你从IO设备读取数据时,肯定是希望直接读取IO设备的当前状态,而不是CPU缓存的过期值。
+
+ @endverbatim
+ * @version
+ * @author weharmonyos.com | 鸿蒙研究站 | 每天死磕一点点
+ * @date 2021-11-30
+ */
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
@@ -40,44 +128,47 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
-#ifdef LOSCFG_KERNEL_MMU
+
+
+#ifdef LOSCFG_KERNEL_MMU //
#ifdef LOSCFG_TEE_ENABLE
-#define KERNEL_VADDR_BASE 0x41000000
+#define KERNEL_VADDR_BASE 0x41000000 //用于链接器层面的宏配置 | 基地址
#else
-#define KERNEL_VADDR_BASE 0x40000000
+#define KERNEL_VADDR_BASE 0x40000000
#endif
-#else
-#define KERNEL_VADDR_BASE DDR_MEM_ADDR
+#else //没有MMU时,内核运行基址 = 内存的基地址,因为没有了MMU,所以也没有了虚拟内存和物理内存的说法,统一就是物理内存.
+#define KERNEL_VADDR_BASE DDR_MEM_ADDR ///< 内核运行基址 等于内存( Double Data Rate SDRAM)基地址
#endif
-#define KERNEL_VADDR_SIZE DDR_MEM_SIZE
+#define KERNEL_VADDR_SIZE DDR_MEM_SIZE ///< 真实主存的大小
+
+#define SYS_MEM_BASE DDR_MEM_ADDR ///< 物理内存基地址
+#define SYS_MEM_END (SYS_MEM_BASE + SYS_MEM_SIZE_DEFAULT) ///< 物理内存结束地址
-#define SYS_MEM_BASE DDR_MEM_ADDR
-#define SYS_MEM_END (SYS_MEM_BASE + SYS_MEM_SIZE_DEFAULT)
#define _U32_C(X) X##U
#define U32_C(X) _U32_C(X)
-#define KERNEL_VMM_BASE U32_C(KERNEL_VADDR_BASE)
-#define KERNEL_VMM_SIZE U32_C(KERNEL_VADDR_SIZE)
+#define KERNEL_VMM_BASE U32_C(KERNEL_VADDR_BASE) ///< 内核内存管理层面的宏配置 | 基地址
+#define KERNEL_VMM_SIZE U32_C(KERNEL_VADDR_SIZE) ///< 内核大小
-#define KERNEL_ASPACE_BASE KERNEL_VMM_BASE
-#define KERNEL_ASPACE_SIZE KERNEL_VMM_SIZE
+#define KERNEL_ASPACE_BASE KERNEL_VMM_BASE ///< 内核运行空间层面的宏配置 | 基地址
+#define KERNEL_ASPACE_SIZE KERNEL_VMM_SIZE ///< 内核空间大小
/* Uncached vmm aspace */
-#define UNCACHED_VMM_BASE (KERNEL_VMM_BASE + KERNEL_VMM_SIZE)
-#define UNCACHED_VMM_SIZE DDR_MEM_SIZE
-
-#define VMALLOC_START (UNCACHED_VMM_BASE + UNCACHED_VMM_SIZE)
-#define VMALLOC_SIZE 0x08000000
-
-#ifdef LOSCFG_KERNEL_MMU
-#define PERIPH_DEVICE_BASE (VMALLOC_START + VMALLOC_SIZE)
+#define UNCACHED_VMM_BASE (KERNEL_VMM_BASE + KERNEL_VMM_SIZE) ///< 未缓存虚拟空间基地址,适用于DMA,LCD framebuf,
+#define UNCACHED_VMM_SIZE DDR_MEM_SIZE ///<未缓存虚拟空间大小
+
+#define VMALLOC_START (UNCACHED_VMM_BASE + UNCACHED_VMM_SIZE) ///< 内核堆空间基地址
+#define VMALLOC_SIZE 0x08000000 ///< 内核堆空间大小, 128M
+//UART,LCD,摄像头,I2C,中断控制器统称为外部设备, 统一编址
+#ifdef LOSCFG_KERNEL_MMU //使用MMU时,只是虚拟地址不一样,但映射的物理设备空间一致.
+#define PERIPH_DEVICE_BASE (VMALLOC_START + VMALLOC_SIZE) ///< 不使用buffer,cache
#define PERIPH_DEVICE_SIZE U32_C(PERIPH_PMM_SIZE)
-#define PERIPH_CACHED_BASE (PERIPH_DEVICE_BASE + PERIPH_DEVICE_SIZE)
+#define PERIPH_CACHED_BASE (PERIPH_DEVICE_BASE + PERIPH_DEVICE_SIZE) ///< 使用cache但不用buffer
#define PERIPH_CACHED_SIZE U32_C(PERIPH_PMM_SIZE)
-#define PERIPH_UNCACHED_BASE (PERIPH_CACHED_BASE + PERIPH_CACHED_SIZE)
+#define PERIPH_UNCACHED_BASE (PERIPH_CACHED_BASE + PERIPH_CACHED_SIZE) ///< 不使用cache但使用buffer
#define PERIPH_UNCACHED_SIZE U32_C(PERIPH_PMM_SIZE)
-#else
+#else //不使用MMU时,外部设备空间地址一致.
#define PERIPH_DEVICE_BASE PERIPH_PMM_BASE
#define PERIPH_DEVICE_SIZE U32_C(PERIPH_PMM_SIZE)
#define PERIPH_CACHED_BASE PERIPH_PMM_BASE
@@ -86,9 +177,10 @@ extern "C" {
#define PERIPH_UNCACHED_SIZE U32_C(PERIPH_PMM_SIZE)
#endif
-#define IO_DEVICE_ADDR(paddr) ((paddr) - PERIPH_PMM_BASE + PERIPH_DEVICE_BASE)
-#define IO_CACHED_ADDR(paddr) ((paddr) - PERIPH_PMM_BASE + PERIPH_CACHED_BASE)
-#define IO_UNCACHED_ADDR(paddr) ((paddr) - PERIPH_PMM_BASE + PERIPH_UNCACHED_BASE)
+#define IO_DEVICE_ADDR(paddr) ((paddr) - PERIPH_PMM_BASE + PERIPH_DEVICE_BASE) ///< 通过物理地址获取IO设备虚拟地址
+#define IO_CACHED_ADDR(paddr) ((paddr) - PERIPH_PMM_BASE + PERIPH_CACHED_BASE) ///< 通过物理地址获取IO设备虚拟缓存地址
+#define IO_UNCACHED_ADDR(paddr) ((paddr) - PERIPH_PMM_BASE + PERIPH_UNCACHED_BASE) ///< 通过物理地址获取IO设备虚拟未缓存地址
+//DDR_MEM_ADDRDDR内存全称是DDR SDRAM(Double Data Rate SDRAM,双倍速率SDRAM)
#define MEM_CACHED_ADDR(paddr) ((paddr) - DDR_MEM_ADDR + KERNEL_VMM_BASE)
#define MEM_UNCACHED_ADDR(paddr) ((paddr) - DDR_MEM_ADDR + UNCACHED_VMM_BASE)
diff --git a/src/kernel_liteos_a/kernel/base/ipc/los_event.c b/src/kernel_liteos_a/kernel/base/ipc/los_event.c
index dc55aa25..81a6dd5a 100644
--- a/src/kernel_liteos_a/kernel/base/ipc/los_event.c
+++ b/src/kernel_liteos_a/kernel/base/ipc/los_event.c
@@ -1,6 +1,57 @@
+/*!
+ * @file los_event.c
+ * @brief
+ * @link
+ @verbatim
+ 事件(Event)是一种任务间通信的机制,可用于任务间的同步。
+ 多任务环境下,任务之间往往需要同步操作,一个等待即是一个同步。事件可以提供一对多、多对多的同步操作。
+ 一对多同步模型:一个任务等待多个事件的触发。可以是任意一个事件发生时唤醒任务处理事件,也可以是几个事件都发生后才唤醒任务处理事件。
+ 多对多同步模型:多个任务等待多个事件的触发。
+
+ 事件特点
+ 任务通过创建事件控制块来触发事件或等待事件。
+ 事件间相互独立,内部实现为一个32位无符号整型,每一位标识一种事件类型。第25位不可用,因此最多可支持31种事件类型。
+ 事件仅用于任务间的同步,不提供数据传输功能。
+ 多次向事件控制块写入同一事件类型,在被清零前等效于只写入一次。
+ 多个任务可以对同一事件进行读写操作。
+ 支持事件读写超时机制。
+
+ 事件读取模式
+ 在读事件时,可以选择读取模式。读取模式如下:
+ 所有事件(LOS_WAITMODE_AND):逻辑与,基于接口传入的事件类型掩码eventMask,
+ 只有这些事件都已经发生才能读取成功,否则该任务将阻塞等待或者返回错误码。
+ 任一事件(LOS_WAITMODE_OR):逻辑或,基于接口传入的事件类型掩码eventMask,
+ 只要这些事件中有任一种事件发生就可以读取成功,否则该任务将阻塞等待或者返回错误码。
+ 清除事件(LOS_WAITMODE_CLR):这是一种附加读取模式,需要与所有事件模式或任一事件模式结合
+ 使用(LOS_WAITMODE_AND | LOS_WAITMODE_CLR或 LOS_WAITMODE_OR | LOS_WAITMODE_CLR)。在这种模式下,
+ 当设置的所有事件模式或任一事件模式读取成功后,会自动清除事件控制块中对应的事件类型位。
+
+ 运作机制
+ 任务在调用LOS_EventRead接口读事件时,可以根据入参事件掩码类型eventMask读取事件的单个或者多个事件类型。
+ 事件读取成功后,如果设置LOS_WAITMODE_CLR会清除已读取到的事件类型,反之不会清除已读到的事件类型,需显式清除。
+ 可以通过入参选择读取模式,读取事件掩码类型中所有事件还是读取事件掩码类型中任意事件。
+ 任务在调用LOS_EventWrite接口写事件时,对指定事件控制块写入指定的事件类型,
+ 可以一次同时写多个事件类型。写事件会触发任务调度。
+ 任务在调用LOS_EventClear接口清除事件时,根据入参事件和待清除的事件类型,
+ 对事件对应位进行清0操作。
+
+ 使用场景
+ 事件可应用于多种任务同步场景,在某些同步场景下可替代信号量。
+
+ 注意事项
+ 在系统初始化之前不能调用读写事件接口。如果调用,系统运行会不正常。
+ 在中断中,可以对事件对象进行写操作,但不能进行读操作。
+ 在锁任务调度状态下,禁止任务阻塞于读事件。
+ LOS_EventClear 入参值是:要清除的指定事件类型的反码(~events)。
+ 为了区别LOS_EventRead接口返回的是事件还是错误码,事件掩码的第25位不能使用。
+ @endverbatim
+ * @version
+ * @author weharmonyos.com | 鸿蒙研究站 | 每天死磕一点点
+ * @date 2022-1-15
+ */
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
- * Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -40,6 +91,7 @@
#include "los_exc.h"
#endif
+/// 初始化一个事件控制块
LITE_OS_SEC_TEXT_INIT UINT32 LOS_EventInit(PEVENT_CB_S eventCB)
{
UINT32 intSave;
@@ -48,14 +100,14 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_EventInit(PEVENT_CB_S eventCB)
return LOS_ERRNO_EVENT_PTR_NULL;
}
- intSave = LOS_IntLock();
- eventCB->uwEventID = 0;
- LOS_ListInit(&eventCB->stEventList);
- LOS_IntRestore(intSave);
+ intSave = LOS_IntLock();//锁中断
+ eventCB->uwEventID = 0;//事件类型初始化
+ LOS_ListInit(&eventCB->stEventList);//事件链表初始化
+ LOS_IntRestore(intSave);//恢复中断
OsHookCall(LOS_HOOK_TYPE_EVENT_INIT, eventCB);
return LOS_OK;
}
-
+///事件参数检查
LITE_OS_SEC_TEXT STATIC UINT32 OsEventParamCheck(const VOID *ptr, UINT32 eventMask, UINT32 mode)
{
if (ptr == NULL) {
@@ -77,52 +129,53 @@ LITE_OS_SEC_TEXT STATIC UINT32 OsEventParamCheck(const VOID *ptr, UINT32 eventMa
}
return LOS_OK;
}
-
+///根据用户传入的事件值、事件掩码及校验模式,返回用户传入的事件是否符合预期
LITE_OS_SEC_TEXT UINT32 OsEventPoll(UINT32 *eventID, UINT32 eventMask, UINT32 mode)
{
UINT32 ret = 0;
- LOS_ASSERT(OsIntLocked());
- LOS_ASSERT(LOS_SpinHeld(&g_taskSpin));
+ LOS_ASSERT(OsIntLocked());//断言不允许中断了
+ LOS_ASSERT(LOS_SpinHeld(&g_taskSpin));//任务自旋锁
- if (mode & LOS_WAITMODE_OR) {
+ if (mode & LOS_WAITMODE_OR) {//如果模式是读取掩码中任意事件
if ((*eventID & eventMask) != 0) {
ret = *eventID & eventMask;
}
- } else {
- if ((eventMask != 0) && (eventMask == (*eventID & eventMask))) {
+ } else {//等待全部事件发生
+ if ((eventMask != 0) && (eventMask == (*eventID & eventMask))) {//必须满足全部事件发生
ret = *eventID & eventMask;
}
}
- if (ret && (mode & LOS_WAITMODE_CLR)) {
+ if (ret && (mode & LOS_WAITMODE_CLR)) {//读取完成后清除事件
*eventID = *eventID & ~ret;
}
return ret;
}
-
+///检查读事件
LITE_OS_SEC_TEXT STATIC UINT32 OsEventReadCheck(const PEVENT_CB_S eventCB, UINT32 eventMask, UINT32 mode)
{
UINT32 ret;
LosTaskCB *runTask = NULL;
- ret = OsEventParamCheck(eventCB, eventMask, mode);
+
+ ret = OsEventParamCheck(eventCB, eventMask, mode);//事件参数检查
if (ret != LOS_OK) {
return ret;
}
- if (OS_INT_ACTIVE) {
- return LOS_ERRNO_EVENT_READ_IN_INTERRUPT;
+ if (OS_INT_ACTIVE) {//中断正在进行
+ return LOS_ERRNO_EVENT_READ_IN_INTERRUPT;//不能在中断发送时读事件
}
- runTask = OsCurrTaskGet();
- if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {
+ runTask = OsCurrTaskGet();//获取当前任务
+ if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {//任务属于系统任务
OsBackTrace();
- return LOS_ERRNO_EVENT_READ_IN_SYSTEM_TASK;
+ return LOS_ERRNO_EVENT_READ_IN_SYSTEM_TASK;//不能在系统任务中读取事件
}
return LOS_OK;
}
-
+/// 读取指定事件类型的实现函数,超时时间为相对时间:单位为Tick
LITE_OS_SEC_TEXT STATIC UINT32 OsEventReadImp(PEVENT_CB_S eventCB, UINT32 eventMask, UINT32 mode,
UINT32 timeout, BOOL once)
{
@@ -131,57 +184,57 @@ LITE_OS_SEC_TEXT STATIC UINT32 OsEventReadImp(PEVENT_CB_S eventCB, UINT32 eventM
OsHookCall(LOS_HOOK_TYPE_EVENT_READ, eventCB, eventMask, mode, timeout);
if (once == FALSE) {
- ret = OsEventPoll(&eventCB->uwEventID, eventMask, mode);
+ ret = OsEventPoll(&eventCB->uwEventID, eventMask, mode);//检测事件是否符合预期
}
- if (ret == 0) {
- if (timeout == 0) {
+ if (ret == 0) {//不符合预期时
+ if (timeout == 0) {//不等待的情况
return ret;
}
- if (!OsPreemptableInSched()) {
+ if (!OsPreemptableInSched()) {//不能抢占式调度
return LOS_ERRNO_EVENT_READ_IN_LOCK;
}
- runTask->eventMask = eventMask;
- runTask->eventMode = mode;
- runTask->taskEvent = eventCB;
- OsTaskWaitSetPendMask(OS_TASK_WAIT_EVENT, eventMask, timeout);
+ runTask->eventMask = eventMask; //等待事件
+ runTask->eventMode = mode; //事件模式
+ runTask->taskEvent = eventCB; //事件控制块
+ OsTaskWaitSetPendMask(OS_TASK_WAIT_EVENT, eventMask, timeout);//任务进入等待状态,等待事件的到来并设置时长和掩码
ret = runTask->ops->wait(runTask, &eventCB->stEventList, timeout);
if (ret == LOS_ERRNO_TSK_TIMEOUT) {
return LOS_ERRNO_EVENT_READ_TIMEOUT;
}
- ret = OsEventPoll(&eventCB->uwEventID, eventMask, mode);
+ ret = OsEventPoll(&eventCB->uwEventID, eventMask, mode);//检测事件是否符合预期
}
return ret;
}
-
+///读取指定事件类型,超时时间为相对时间:单位为Tick
LITE_OS_SEC_TEXT STATIC UINT32 OsEventRead(PEVENT_CB_S eventCB, UINT32 eventMask, UINT32 mode, UINT32 timeout,
BOOL once)
{
UINT32 ret;
UINT32 intSave;
- ret = OsEventReadCheck(eventCB, eventMask, mode);
+ ret = OsEventReadCheck(eventCB, eventMask, mode);//读取事件检查
if (ret != LOS_OK) {
return ret;
}
SCHEDULER_LOCK(intSave);
- ret = OsEventReadImp(eventCB, eventMask, mode, timeout, once);
+ ret = OsEventReadImp(eventCB, eventMask, mode, timeout, once);//读事件实现函数
SCHEDULER_UNLOCK(intSave);
return ret;
}
-
+///事件恢复操作
LITE_OS_SEC_TEXT STATIC UINT8 OsEventResume(LosTaskCB *resumedTask, const PEVENT_CB_S eventCB, UINT32 events)
{
- UINT8 exitFlag = 0;
+ UINT8 exitFlag = 0;//是否唤醒
if (((resumedTask->eventMode & LOS_WAITMODE_OR) && ((resumedTask->eventMask & events) != 0)) ||
((resumedTask->eventMode & LOS_WAITMODE_AND) &&
- ((resumedTask->eventMask & eventCB->uwEventID) == resumedTask->eventMask))) {
- exitFlag = 1;
+ ((resumedTask->eventMask & eventCB->uwEventID) == resumedTask->eventMask))) {//逻辑与 和 逻辑或 的处理
+ exitFlag = 1;
resumedTask->taskEvent = NULL;
OsTaskWakeClearPendMask(resumedTask);
@@ -190,33 +243,33 @@ LITE_OS_SEC_TEXT STATIC UINT8 OsEventResume(LosTaskCB *resumedTask, const PEVENT
return exitFlag;
}
-
+///以不安全的方式写事件
LITE_OS_SEC_TEXT VOID OsEventWriteUnsafe(PEVENT_CB_S eventCB, UINT32 events, BOOL once, UINT8 *exitFlag)
{
LosTaskCB *resumedTask = NULL;
LosTaskCB *nextTask = NULL;
BOOL schedFlag = FALSE;
OsHookCall(LOS_HOOK_TYPE_EVENT_WRITE, eventCB, events);
- eventCB->uwEventID |= events;
- if (!LOS_ListEmpty(&eventCB->stEventList)) {
+ eventCB->uwEventID |= events;//对应位贴上标签
+ if (!LOS_ListEmpty(&eventCB->stEventList)) {//等待事件链表判断,处理等待事件的任务
for (resumedTask = LOS_DL_LIST_ENTRY((&eventCB->stEventList)->pstNext, LosTaskCB, pendList);
- &resumedTask->pendList != &eventCB->stEventList;) {
- nextTask = LOS_DL_LIST_ENTRY(resumedTask->pendList.pstNext, LosTaskCB, pendList);
- if (OsEventResume(resumedTask, eventCB, events)) {
- schedFlag = TRUE;
+ &resumedTask->pendList != &eventCB->stEventList;) {//循环获取任务链表
+ nextTask = LOS_DL_LIST_ENTRY(resumedTask->pendList.pstNext, LosTaskCB, pendList);//获取任务实体
+ if (OsEventResume(resumedTask, eventCB, events)) {//是否恢复任务
+ schedFlag = TRUE;//任务已加至就绪队列,申请发生一次调度
}
- if (once == TRUE) {
- break;
+ if (once == TRUE) {//是否只处理一次任务
+ break;//退出循环
}
- resumedTask = nextTask;
+ resumedTask = nextTask;//检查链表中下一个任务
}
}
- if ((exitFlag != NULL) && (schedFlag == TRUE)) {
+ if ((exitFlag != NULL) && (schedFlag == TRUE)) {//是否让外面调度
*exitFlag = 1;
}
}
-
+///写入事件
LITE_OS_SEC_TEXT STATIC UINT32 OsEventWrite(PEVENT_CB_S eventCB, UINT32 events, BOOL once)
{
UINT32 intSave;
@@ -230,54 +283,54 @@ LITE_OS_SEC_TEXT STATIC UINT32 OsEventWrite(PEVENT_CB_S eventCB, UINT32 events,
return LOS_ERRNO_EVENT_SETBIT_INVALID;
}
- SCHEDULER_LOCK(intSave);
- OsEventWriteUnsafe(eventCB, events, once, &exitFlag);
- SCHEDULER_UNLOCK(intSave);
+ SCHEDULER_LOCK(intSave); //禁止调度
+ OsEventWriteUnsafe(eventCB, events, once, &exitFlag);//写入事件
+ SCHEDULER_UNLOCK(intSave); //允许调度
- if (exitFlag == 1) {
- LOS_MpSchedule(OS_MP_CPU_ALL);
- LOS_Schedule();
+ if (exitFlag == 1) { //需要发生调度
+ LOS_MpSchedule(OS_MP_CPU_ALL);//通知所有CPU调度
+ LOS_Schedule();//执行调度
}
return LOS_OK;
}
-
+///根据用户传入的事件值、事件掩码及校验模式,返回用户传入的事件是否符合预期
LITE_OS_SEC_TEXT UINT32 LOS_EventPoll(UINT32 *eventID, UINT32 eventMask, UINT32 mode)
{
UINT32 ret;
UINT32 intSave;
-
+ //事件参数检查
ret = OsEventParamCheck((VOID *)eventID, eventMask, mode);
if (ret != LOS_OK) {
return ret;
}
- SCHEDULER_LOCK(intSave);
+ SCHEDULER_LOCK(intSave);//申请任务自旋锁
ret = OsEventPoll(eventID, eventMask, mode);
SCHEDULER_UNLOCK(intSave);
return ret;
}
-
+///读取指定事件类型,超时时间为相对时间:单位为Tick
LITE_OS_SEC_TEXT UINT32 LOS_EventRead(PEVENT_CB_S eventCB, UINT32 eventMask, UINT32 mode, UINT32 timeout)
{
return OsEventRead(eventCB, eventMask, mode, timeout, FALSE);
}
-
+///写指定的事件类型
LITE_OS_SEC_TEXT UINT32 LOS_EventWrite(PEVENT_CB_S eventCB, UINT32 events)
{
return OsEventWrite(eventCB, events, FALSE);
}
-
+///只读一次事件
LITE_OS_SEC_TEXT_MINOR UINT32 OsEventReadOnce(PEVENT_CB_S eventCB, UINT32 eventMask, UINT32 mode,
UINT32 timeout)
{
return OsEventRead(eventCB, eventMask, mode, timeout, TRUE);
}
-
+///只写一次事件
LITE_OS_SEC_TEXT_MINOR UINT32 OsEventWriteOnce(PEVENT_CB_S eventCB, UINT32 events)
{
return OsEventWrite(eventCB, events, TRUE);
}
-
+///销毁指定的事件控制块
LITE_OS_SEC_TEXT_INIT UINT32 LOS_EventDestroy(PEVENT_CB_S eventCB)
{
UINT32 intSave;
@@ -298,7 +351,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_EventDestroy(PEVENT_CB_S eventCB)
OsHookCall(LOS_HOOK_TYPE_EVENT_DESTROY, eventCB);
return LOS_OK;
}
-
+///清除指定的事件类型
LITE_OS_SEC_TEXT_MINOR UINT32 LOS_EventClear(PEVENT_CB_S eventCB, UINT32 eventMask)
{
UINT32 intSave;
@@ -313,7 +366,7 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_EventClear(PEVENT_CB_S eventCB, UINT32 eventMa
return LOS_OK;
}
-
+///有条件式读事件
#ifdef LOSCFG_COMPAT_POSIX
LITE_OS_SEC_TEXT UINT32 OsEventReadWithCond(const EventCond *cond, PEVENT_CB_S eventCB,
UINT32 eventMask, UINT32 mode, UINT32 timeout)
diff --git a/src/kernel_liteos_a/kernel/base/ipc/los_futex.c b/src/kernel_liteos_a/kernel/base/ipc/los_futex.c
index 384bb483..8c968b81 100644
--- a/src/kernel_liteos_a/kernel/base/ipc/los_futex.c
+++ b/src/kernel_liteos_a/kernel/base/ipc/los_futex.c
@@ -1,6 +1,60 @@
+/*!
+ * @file los_futex.c
+ * @brief
+ * @link mutex http://weharmonyos.com/openharmony/zh-cn/device-dev/kernel/kernel-small-basic-trans-user-mutex.html @endlink
+ * @link d17a6152740c https://www.jianshu.com/p/d17a6152740c @endlink
+ @verbatim
+ Futex 由一块能够被多个进程共享的内存空间(一个对齐后的整型变量)组成;这个整型变量的值能够通过汇编语言调用CPU提供的原子操作指令来增加或减少,
+ 并且一个进程可以等待直到那个值变成正数。Futex 的操作几乎全部在用户空间完成;只有当操作结果不一致从而需要仲裁时,才需要进入操作系统内核空间执行。
+ 这种机制允许使用 futex 的锁定原语有非常高的执行效率:由于绝大多数的操作并不需要在多个进程之间进行仲裁,所以绝大多数操作都可以在应用程序空间执行,
+ 而不需要使用(相对高代价的)内核系统调用。
+
+ 基本概念
+ Futex(Fast userspace mutex,用户态快速互斥锁)是内核提供的一种系统调用能力,通常作为基础组件与用户态的相关
+ 锁逻辑结合组成用户态锁,是一种用户态与内核态共同作用的锁,例如用户态mutex锁、barrier与cond同步锁、读写锁。
+ 其用户态部分负责锁逻辑,内核态部分负责锁调度。
+
+ 当用户态线程请求锁时,先在用户态进行锁状态的判断维护,若此时不产生锁的竞争,则直接在用户态进行上锁返回;
+ 反之,则需要进行线程的挂起操作,通过Futex系统调用请求内核介入来挂起线程,并维护阻塞队列。
+
+ 当用户态线程释放锁时,先在用户态进行锁状态的判断维护,若此时没有其他线程被该锁阻塞,则直接在用户态进行解锁返回;
+ 反之,则需要进行阻塞线程的唤醒操作,通过Futex系统调用请求内核介入来唤醒阻塞队列中的线程。
+ 历史
+ futex (fast userspace mutex) 是Linux的一个基础组件,可以用来构建各种更高级别的同步机制,比如锁或者信号量等等,
+ POSIX信号量就是基于futex构建的。大多数时候编写应用程序并不需要直接使用futex,一般用基于它所实现的系统库就够了。
+
+ 传统的SystemV IPC(inter process communication)进程间同步机制都是通过内核对象来实现的,以 semaphore 为例,
+ 当进程间要同步的时候,必须通过系统调用semop(2)进入内核进行PV操作。系统调用的缺点是开销很大,需要从user mode
+ 切换到kernel mode、保存寄存器状态、从user stack切换到kernel stack、等等,通常要消耗上百条指令。事实上,
+ 有一部分系统调用是可以避免的,因为现实中很多同步操作进行的时候根本不存在竞争,即某个进程从持有semaphore直至
+ 释放semaphore的这段时间内,常常没有其它进程对同一semaphore有需求,在这种情况下,内核的参与本来是不必要的,
+ 可是在传统机制下,持有semaphore必须先调用semop(2)进入内核去看看有没有人和它竞争,释放semaphore也必须调用semop(2)
+ 进入内核去看看有没有人在等待同一semaphore,这些不必要的系统调用造成了大量的性能损耗。
+ 设计思想
+ futex的解决思路是:在无竞争的情况下操作完全在user space进行,不需要系统调用,仅在发生竞争的时候进入内核去完成
+ 相应的处理(wait 或者 wake up)。所以说,futex是一种user mode和kernel mode混合的同步机制,需要两种模式合作才能完成,
+ futex变量必须位于user space,而不是内核对象,futex的代码也分为user mode和kernel mode两部分,无竞争的情况下在user mode,
+ 发生竞争时则通过sys_futex系统调用进入kernel mode进行处理
+ 运行机制
+ 当用户态产生锁的竞争或释放需要进行相关线程的调度操作时,会触发Futex系统调用进入内核,此时会将用户态锁的地址
+ 传入内核,并在内核的Futex中以锁地址来区分用户态的每一把锁,因为用户态可用虚拟地址空间为1GiB,为了便于查找、
+ 管理,内核Futex采用哈希桶来存放用户态传入的锁。
+
+ 当前哈希桶共有80个,0~63号桶用于存放私有锁(以虚拟地址进行哈希),64~79号桶用于存放共享锁(以物理地址进行哈希),
+ 私有/共享属性通过用户态锁的初始化以及Futex系统调用入参确定。
+
+ 如下图: 每个futex哈希桶中存放被futex_list串联起来的哈希值相同的futex node,每个futex node对应一个被挂起的task,
+ node中key值唯一标识一把用户态锁,具有相同key值的node被queue_list串联起来表示被同一把锁阻塞的task队列。
+ @endverbatim
+ @image html https://gitee.com/weharmonyos/resources/raw/master/81/futex.png
+ * @attention Futex系统调用通常与用户态逻辑共同组成用户态锁,故推荐使用用户态POSIX接口的锁
+ * @version
+ * @author weharmonyos.com | 鸿蒙研究站 | 每天死磕一点点
+ * @date 2021-11-23
+ */
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
- * Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -43,28 +97,30 @@
#ifdef LOSCFG_KERNEL_VM
-#define OS_FUTEX_FROM_FUTEXLIST(ptr) LOS_DL_LIST_ENTRY(ptr, FutexNode, futexList)
-#define OS_FUTEX_FROM_QUEUELIST(ptr) LOS_DL_LIST_ENTRY(ptr, FutexNode, queueList)
-#define OS_FUTEX_KEY_BASE USER_ASPACE_BASE
-#define OS_FUTEX_KEY_MAX (USER_ASPACE_BASE + USER_ASPACE_SIZE)
+#define OS_FUTEX_FROM_FUTEXLIST(ptr) LOS_DL_LIST_ENTRY(ptr, FutexNode, futexList) // 通过快锁节点找到结构体
+#define OS_FUTEX_FROM_QUEUELIST(ptr) LOS_DL_LIST_ENTRY(ptr, FutexNode, queueList) // 通过队列节点找到结构体
+#define OS_FUTEX_KEY_BASE USER_ASPACE_BASE ///< 进程用户空间基址
+#define OS_FUTEX_KEY_MAX (USER_ASPACE_BASE + USER_ASPACE_SIZE) ///< 进程用户空间尾址
/* private: 0~63 hash index_num
* shared: 64~79 hash index_num */
-#define FUTEX_INDEX_PRIVATE_MAX 64
-#define FUTEX_INDEX_SHARED_MAX 16
-#define FUTEX_INDEX_MAX (FUTEX_INDEX_PRIVATE_MAX + FUTEX_INDEX_SHARED_MAX)
+#define FUTEX_INDEX_PRIVATE_MAX 64 ///< 0~63号桶用于存放私有锁(以虚拟地址进行哈希),同一进程不同线程共享futex变量,表明变量在进程地址空间中的位置
+///< 它告诉内核,这个futex是进程专有的,不可以与其他进程共享。它仅仅用作同一进程的线程间同步。
+#define FUTEX_INDEX_SHARED_MAX 16 ///< 64~79号桶用于存放共享锁(以物理地址进行哈希),不同进程间通过文件共享futex变量,表明该变量在文件中的位置
+#define FUTEX_INDEX_MAX (FUTEX_INDEX_PRIVATE_MAX + FUTEX_INDEX_SHARED_MAX) ///< 80个哈希桶
-#define FUTEX_INDEX_SHARED_POS FUTEX_INDEX_PRIVATE_MAX
+#define FUTEX_INDEX_SHARED_POS FUTEX_INDEX_PRIVATE_MAX ///< 共享锁开始位置
#define FUTEX_HASH_PRIVATE_MASK (FUTEX_INDEX_PRIVATE_MAX - 1)
#define FUTEX_HASH_SHARED_MASK (FUTEX_INDEX_SHARED_MAX - 1)
-
+/// 单独哈希桶,上面挂了一个个 FutexNode
typedef struct {
- LosMux listLock;
- LOS_DL_LIST lockList;
+ LosMux listLock;///< 内核操作lockList的互斥锁
+ LOS_DL_LIST lockList;///< 用于挂载 FutexNode (Fast userspace mutex,用户态快速互斥锁)
} FutexHash;
-FutexHash g_futexHash[FUTEX_INDEX_MAX];
+FutexHash g_futexHash[FUTEX_INDEX_MAX];///< 80个哈希桶
+/// 对互斥锁封装
STATIC INT32 OsFutexLock(LosMux *lock)
{
UINT32 ret = LOS_MuxLock(lock, LOS_WAIT_FOREVER);
@@ -84,15 +140,15 @@ STATIC INT32 OsFutexUnlock(LosMux *lock)
}
return LOS_OK;
}
-
+///< 初始化Futex(Fast userspace mutex,用户态快速互斥锁)模块
UINT32 OsFutexInit(VOID)
{
INT32 count;
UINT32 ret;
-
+ // 初始化 80个哈希桶
for (count = 0; count < FUTEX_INDEX_MAX; count++) {
- LOS_ListInit(&g_futexHash[count].lockList);
- ret = LOS_MuxInit(&(g_futexHash[count].listLock), NULL);
+ LOS_ListInit(&g_futexHash[count].lockList); // 初始化双向链表,上面挂 FutexNode
+ ret = LOS_MuxInit(&(g_futexHash[count].listLock), NULL);//初始化互斥锁
if (ret) {
return ret;
}
@@ -101,7 +157,7 @@ UINT32 OsFutexInit(VOID)
return LOS_OK;
}
-LOS_MODULE_INIT(OsFutexInit, LOS_INIT_LEVEL_KMOD_EXTENDED);
+LOS_MODULE_INIT(OsFutexInit, LOS_INIT_LEVEL_KMOD_EXTENDED);///< 注册Futex模块
#ifdef LOS_FUTEX_DEBUG
STATIC VOID OsFutexShowTaskNodeAttr(const LOS_DL_LIST *futexList)
@@ -152,63 +208,63 @@ VOID OsFutexHashShow(VOID)
}
}
#endif
-
+/// 通过用户空间地址获取哈希key
STATIC INLINE UINTPTR OsFutexFlagsToKey(const UINT32 *userVaddr, const UINT32 flags)
{
UINTPTR futexKey;
if (flags & FUTEX_PRIVATE) {
- futexKey = (UINTPTR)userVaddr;
+ futexKey = (UINTPTR)userVaddr;//私有锁(以虚拟地址进行哈希)
} else {
- futexKey = (UINTPTR)LOS_PaddrQuery((UINT32 *)userVaddr);
+ futexKey = (UINTPTR)LOS_PaddrQuery((UINT32 *)userVaddr);//共享锁(以物理地址进行哈希)
}
return futexKey;
}
-
+/// 通过哈希key获取索引
STATIC INLINE UINT32 OsFutexKeyToIndex(const UINTPTR futexKey, const UINT32 flags)
{
- UINT32 index = LOS_HashFNV32aBuf(&futexKey, sizeof(UINTPTR), FNV1_32A_INIT);
+ UINT32 index = LOS_HashFNV32aBuf(&futexKey, sizeof(UINTPTR), FNV1_32A_INIT);//获取哈希桶索引
if (flags & FUTEX_PRIVATE) {
- index &= FUTEX_HASH_PRIVATE_MASK;
+ index &= FUTEX_HASH_PRIVATE_MASK;//将index锁定在 0 ~ 63号
} else {
index &= FUTEX_HASH_SHARED_MASK;
- index += FUTEX_INDEX_SHARED_POS;
+ index += FUTEX_INDEX_SHARED_POS;//共享锁索引,将index锁定在 64 ~ 79号
}
return index;
}
-
+/// 设置快锁哈希key
STATIC INLINE VOID OsFutexSetKey(UINTPTR futexKey, UINT32 flags, FutexNode *node)
{
- node->key = futexKey;
- node->index = OsFutexKeyToIndex(futexKey, flags);
- node->pid = (flags & FUTEX_PRIVATE) ? LOS_GetCurrProcessID() : OS_INVALID;
+ node->key = futexKey;//哈希key
+ node->index = OsFutexKeyToIndex(futexKey, flags);//哈希桶索引
+ node->pid = (flags & FUTEX_PRIVATE) ? LOS_GetCurrProcessID() : OS_INVALID;//获取进程ID,共享快锁时 快锁节点没有进程ID
}
-
+//析构参数节点
STATIC INLINE VOID OsFutexDeinitFutexNode(FutexNode *node)
{
node->index = OS_INVALID_VALUE;
node->pid = 0;
LOS_ListDelete(&node->queueList);
}
-
+/// 新旧两个节点交换 futexList 位置
STATIC INLINE VOID OsFutexReplaceQueueListHeadNode(FutexNode *oldHeadNode, FutexNode *newHeadNode)
{
LOS_DL_LIST *futexList = oldHeadNode->futexList.pstPrev;
- LOS_ListDelete(&oldHeadNode->futexList);
- LOS_ListHeadInsert(futexList, &newHeadNode->futexList);
- if ((newHeadNode->queueList.pstNext == NULL) || (newHeadNode->queueList.pstPrev == NULL)) {
- LOS_ListInit(&newHeadNode->queueList);
+ LOS_ListDelete(&oldHeadNode->futexList);//将旧节点从futexList链表上摘除
+ LOS_ListHeadInsert(futexList, &newHeadNode->futexList);//将新节点从头部插入futexList链表
+ if ((newHeadNode->queueList.pstNext == NULL) || (newHeadNode->queueList.pstPrev == NULL)) {//新节点前后没有等待这把锁的任务
+ LOS_ListInit(&newHeadNode->queueList);//初始化等锁任务链表
}
}
-
+/// 将参数节点从futexList上摘除
STATIC INLINE VOID OsFutexDeleteKeyFromFutexList(FutexNode *node)
{
LOS_ListDelete(&node->futexList);
}
-
+/// 从哈希桶中删除快锁节点
STATIC VOID OsFutexDeleteKeyNodeFromHash(FutexNode *node, BOOL isDeleteHead, FutexNode **headNode, BOOL *queueFlags)
{
FutexNode *nextNode = NULL;
@@ -217,8 +273,8 @@ STATIC VOID OsFutexDeleteKeyNodeFromHash(FutexNode *node, BOOL isDeleteHead, Fut
return;
}
- if (LOS_ListEmpty(&node->queueList)) {
- OsFutexDeleteKeyFromFutexList(node);
+ if (LOS_ListEmpty(&node->queueList)) {//如果没有任务在等锁
+ OsFutexDeleteKeyFromFutexList(node);//从快锁链表上摘除
if (queueFlags != NULL) {
*queueFlags = TRUE;
}
@@ -226,10 +282,10 @@ STATIC VOID OsFutexDeleteKeyNodeFromHash(FutexNode *node, BOOL isDeleteHead, Fut
}
/* FutexList is not NULL, but the header node of queueList */
- if (node->futexList.pstNext != NULL) {
- if (isDeleteHead == TRUE) {
- nextNode = OS_FUTEX_FROM_QUEUELIST(LOS_DL_LIST_FIRST(&node->queueList));
- OsFutexReplaceQueueListHeadNode(node, nextNode);
+ if (node->futexList.pstNext != NULL) {//是头节点
+ if (isDeleteHead == TRUE) {//是否要删除头节点
+ nextNode = OS_FUTEX_FROM_QUEUELIST(LOS_DL_LIST_FIRST(&node->queueList));//取出第一个快锁节点
+ OsFutexReplaceQueueListHeadNode(node, nextNode);//两个节点交换位置
if (headNode != NULL) {
*headNode = nextNode;
}
@@ -242,22 +298,22 @@ EXIT:
OsFutexDeinitFutexNode(node);
return;
}
-
+/// 从哈希桶上删除快锁
VOID OsFutexNodeDeleteFromFutexHash(FutexNode *node, BOOL isDeleteHead, FutexNode **headNode, BOOL *queueFlags)
{
FutexHash *hashNode = NULL;
-
+ //通过key找到桶号
UINT32 index = OsFutexKeyToIndex(node->key, (node->pid == OS_INVALID) ? 0 : FUTEX_PRIVATE);
if (index >= FUTEX_INDEX_MAX) {
return;
}
- hashNode = &g_futexHash[index];
+ hashNode = &g_futexHash[index];//找到hash桶
if (OsMuxLockUnsafe(&hashNode->listLock, LOS_WAIT_FOREVER)) {
return;
}
- if (node->index != index) {
+ if (node->index != index) {//快锁节点桶号需和哈希桶号一致
goto EXIT;
}
@@ -270,7 +326,7 @@ EXIT:
return;
}
-
+/// 这块代码谁写的? 这种命名 ...
STATIC FutexNode *OsFutexDeleteAlreadyWakeTaskAndGetNext(const FutexNode *node, FutexNode **headNode, BOOL isDeleteHead)
{
FutexNode *tempNode = (FutexNode *)node;
@@ -292,7 +348,7 @@ STATIC FutexNode *OsFutexDeleteAlreadyWakeTaskAndGetNext(const FutexNode *node,
return tempNode;
}
-
+/// 插入一把新Futex锁到哈希桶中,只有是新的key时才会插入,因为其实存在多个FutexNode是一个key
STATIC VOID OsFutexInsertNewFutexKeyToHash(FutexNode *node)
{
FutexNode *headNode = NULL;
@@ -322,16 +378,17 @@ STATIC VOID OsFutexInsertNewFutexKeyToHash(FutexNode *node)
futexList != &(hashNode->lockList);
futexList = futexList->pstNext) {
headNode = OS_FUTEX_FROM_FUTEXLIST(futexList);
- if (node->key <= headNode->key) {
+ if (node->key <= headNode->key) {
LOS_ListTailInsert(&(headNode->futexList), &(node->futexList));
break;
}
+
}
EXIT:
return;
}
-
+///< 从后往前插入快锁 Form写错了 @note_thinking
STATIC INT32 OsFutexInsertFindFormBackToFront(LOS_DL_LIST *queueList, const LosTaskCB *runTask, FutexNode *node)
{
LOS_DL_LIST *listHead = queueList;
@@ -407,55 +464,55 @@ STATIC INT32 OsFutexRecycleAndFindHeadNode(FutexNode *headNode, FutexNode *node,
return LOS_OK;
}
-
+///< 将快锁挂到任务的阻塞链表上
STATIC INT32 OsFutexInsertTasktoPendList(FutexNode **firstNode, FutexNode *node, const LosTaskCB *run)
{
- LosTaskCB *taskHead = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&((*firstNode)->pendList)));
+ LosTaskCB *taskHead = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&((*firstNode)->pendList)));//获取阻塞链表首个任务
LOS_DL_LIST *queueList = &((*firstNode)->queueList);
INT32 ret1 = OsSchedParamCompare(run, taskHead);
if (ret1 < 0) {
/* The one with the highest priority is inserted at the top of the queue */
- LOS_ListTailInsert(queueList, &(node->queueList));
- OsFutexReplaceQueueListHeadNode(*firstNode, node);
+ LOS_ListTailInsert(queueList, &(node->queueList));//查到queueList的尾部
+ OsFutexReplaceQueueListHeadNode(*firstNode, node);//同时交换futexList链表上的位置
*firstNode = node;
return LOS_OK;
}
-
+ //如果等锁链表上没有任务或者当前任务大于链表首个任务
if (LOS_ListEmpty(queueList) && (ret1 >= 0)) {
/* Insert the next position in the queue with equal priority */
- LOS_ListHeadInsert(queueList, &(node->queueList));
+ LOS_ListHeadInsert(queueList, &(node->queueList));//从头部插入当前任务,当前任务是要被挂起的
return LOS_OK;
}
-
- FutexNode *tailNode = OS_FUTEX_FROM_QUEUELIST(LOS_DL_LIST_LAST(queueList));
- LosTaskCB *taskTail = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(tailNode->pendList)));
+
+ FutexNode *tailNode = OS_FUTEX_FROM_QUEUELIST(LOS_DL_LIST_LAST(queueList));//获取尾部节点
+ LosTaskCB *taskTail = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(tailNode->pendList)));//获取阻塞任务的最后一个
INT32 ret2 = OsSchedParamCompare(taskTail, run);
if ((ret2 <= 0) || (ret1 > ret2)) {
- return OsFutexInsertFindFormBackToFront(queueList, run, node);
+ return OsFutexInsertFindFormBackToFront(queueList, run, node);//从后往前插入
}
- return OsFutexInsertFindFromFrontToBack(queueList, run, node);
+ return OsFutexInsertFindFromFrontToBack(queueList, run, node);//否则从前往后插入
}
-
+/// 由指定快锁找到对应哈希桶
STATIC FutexNode *OsFindFutexNode(const FutexNode *node)
{
- FutexHash *hashNode = &g_futexHash[node->index];
+ FutexHash *hashNode = &g_futexHash[node->index];//先找到所在哈希桶
LOS_DL_LIST *futexList = &(hashNode->lockList);
FutexNode *headNode = NULL;
for (futexList = futexList->pstNext;
- futexList != &(hashNode->lockList);
+ futexList != &(hashNode->lockList);//判断循环结束条件,相等时说明跑完一轮了
futexList = futexList->pstNext) {
- headNode = OS_FUTEX_FROM_FUTEXLIST(futexList);
- if ((headNode->key == node->key) && (headNode->pid == node->pid)) {
- return headNode;
- }
+ headNode = OS_FUTEX_FROM_FUTEXLIST(futexList);//拿到快锁节点实体
+ if ((headNode->key == node->key) && (headNode->pid == node->pid)) {//已经存在这个节点,注意这里的比较
+ return headNode;//是key和pid 一起比较,因为只有这样才能确定唯一性
+ }//详细讲解请查看 鸿蒙内核源码分析(内核态锁篇) | 如何实现快锁Futex(下)
}
return NULL;
}
-
+///< 查找快锁并插入哈希桶中
STATIC INT32 OsFindAndInsertToHash(FutexNode *node)
{
FutexNode *headNode = NULL;
@@ -464,7 +521,7 @@ STATIC INT32 OsFindAndInsertToHash(FutexNode *node)
INT32 ret;
headNode = OsFindFutexNode(node);
- if (headNode == NULL) {
+ if (headNode == NULL) {//没有找到,说明这是一把新锁
OsFutexInsertNewFutexKeyToHash(node);
LOS_ListInit(&(node->queueList));
return LOS_OK;
@@ -483,14 +540,14 @@ STATIC INT32 OsFindAndInsertToHash(FutexNode *node)
return ret;
}
-
+/// 共享内存检查
STATIC INT32 OsFutexKeyShmPermCheck(const UINT32 *userVaddr, const UINT32 flags)
{
PADDR_T paddr;
/* Check whether the futexKey is a shared lock */
- if (!(flags & FUTEX_PRIVATE)) {
- paddr = (UINTPTR)LOS_PaddrQuery((UINT32 *)userVaddr);
+ if (!(flags & FUTEX_PRIVATE)) {//非私有快锁
+ paddr = (UINTPTR)LOS_PaddrQuery((UINT32 *)userVaddr);//能否查询到物理地址
if (paddr == 0) return LOS_NOK;
}
@@ -549,13 +606,13 @@ STATIC INT32 OsFutexDeleteTimeoutTaskNode(FutexHash *hashNode, FutexNode *node)
}
return LOS_ETIMEDOUT;
}
-
+/// 将快锁节点插入任务
STATIC INT32 OsFutexInsertTaskToHash(LosTaskCB **taskCB, FutexNode **node, const UINTPTR futexKey, const UINT32 flags)
{
INT32 ret;
- *taskCB = OsCurrTaskGet();
- *node = &((*taskCB)->futex);
- OsFutexSetKey(futexKey, flags, *node);
+ *taskCB = OsCurrTaskGet(); //获取当前任务
+ *node = &((*taskCB)->futex); //获取当前任务的快锁节点
+ OsFutexSetKey(futexKey, flags, *node);//设置参数 key index pid
ret = OsFindAndInsertToHash(*node);
if (ret) {
@@ -565,33 +622,33 @@ STATIC INT32 OsFutexInsertTaskToHash(LosTaskCB **taskCB, FutexNode **node, const
LOS_ListInit(&((*node)->pendList));
return LOS_OK;
}
-
+/// 将当前任务挂入等待链表中
STATIC INT32 OsFutexWaitTask(const UINT32 *userVaddr, const UINT32 flags, const UINT32 val, const UINT32 timeout)
{
INT32 futexRet;
UINT32 intSave, lockVal;
LosTaskCB *taskCB = NULL;
FutexNode *node = NULL;
- UINTPTR futexKey = OsFutexFlagsToKey(userVaddr, flags);
- UINT32 index = OsFutexKeyToIndex(futexKey, flags);
+ UINTPTR futexKey = OsFutexFlagsToKey(userVaddr, flags);//通过地址和flags 找到 key
+ UINT32 index = OsFutexKeyToIndex(futexKey, flags);//通过key找到哈希桶
FutexHash *hashNode = &g_futexHash[index];
- if (OsFutexLock(&hashNode->listLock)) {
+ if (OsFutexLock(&hashNode->listLock)) {//操作快锁节点链表前先上互斥锁
return LOS_EINVAL;
}
-
- if (LOS_ArchCopyFromUser(&lockVal, userVaddr, sizeof(UINT32))) {
+ //userVaddr必须是用户空间虚拟地址
+ if (LOS_ArchCopyFromUser(&lockVal, userVaddr, sizeof(UINT32))) {//将值拷贝到内核空间
PRINT_ERR("Futex wait param check failed! copy from user failed!\n");
futexRet = LOS_EINVAL;
goto EXIT_ERR;
}
- if (lockVal != val) {
+ if (lockVal != val) {//对参数内部逻辑检查
futexRet = LOS_EBADF;
goto EXIT_ERR;
}
-
- if (OsFutexInsertTaskToHash(&taskCB, &node, futexKey, flags)) {
+ //注意第二个参数 FutexNode *node = NULL
+ if (OsFutexInsertTaskToHash(&taskCB, &node, futexKey, flags)) {// node = taskCB->futex
futexRet = LOS_NOK;
goto EXIT_ERR;
}
@@ -602,7 +659,7 @@ STATIC INT32 OsFutexWaitTask(const UINT32 *userVaddr, const UINT32 flags, const
taskCB->ops->wait(taskCB, &(node->pendList), timeout);
LOS_SpinUnlock(&g_taskSpin);
- futexRet = OsFutexUnlock(&hashNode->listLock);
+ futexRet = OsFutexUnlock(&hashNode->listLock);//
if (futexRet) {
OsSchedUnlock();
LOS_IntRestore(intSave);
@@ -632,21 +689,21 @@ EXIT_ERR:
EXIT_UNLOCK_ERR:
return futexRet;
}
-
+/// 设置线程等待 | 向Futex表中插入代表被阻塞的线程的node
INT32 OsFutexWait(const UINT32 *userVaddr, UINT32 flags, UINT32 val, UINT32 absTime)
{
INT32 ret;
UINT32 timeout = LOS_WAIT_FOREVER;
- ret = OsFutexWaitParamCheck(userVaddr, flags, absTime);
+ ret = OsFutexWaitParamCheck(userVaddr, flags, absTime);//参数检查
if (ret) {
return ret;
}
- if (absTime != LOS_WAIT_FOREVER) {
- timeout = OsNS2Tick((UINT64)absTime * OS_SYS_NS_PER_US);
+ if (absTime != LOS_WAIT_FOREVER) {//转换时间 , 内核的时间单位是 tick
+ timeout = OsNS2Tick((UINT64)absTime * OS_SYS_NS_PER_US); //转成 tick
}
- return OsFutexWaitTask(userVaddr, flags, val, timeout);
+ return OsFutexWaitTask(userVaddr, flags, val, timeout);//将任务挂起 timeOut 时长
}
STATIC INT32 OsFutexWakeParamCheck(const UINT32 *userVaddr, UINT32 flags)
@@ -657,12 +714,12 @@ STATIC INT32 OsFutexWakeParamCheck(const UINT32 *userVaddr, UINT32 flags)
PRINT_ERR("Futex wake param check failed! error flags: 0x%x\n", flags);
return LOS_EINVAL;
}
-
+ //地址必须在用户空间
if ((vaddr % sizeof(INT32)) || (vaddr < OS_FUTEX_KEY_BASE) || (vaddr >= OS_FUTEX_KEY_MAX)) {
PRINT_ERR("Futex wake param check failed! error userVaddr: 0x%x\n", userVaddr);
return LOS_EINVAL;
}
-
+ //必须得是个共享内存地址
if (flags && (OsFutexKeyShmPermCheck(userVaddr, flags) != LOS_OK)) {
PRINT_ERR("Futex wake param check failed! error shared memory perm userVaddr: 0x%x\n", userVaddr);
return LOS_EINVAL;
@@ -672,7 +729,8 @@ STATIC INT32 OsFutexWakeParamCheck(const UINT32 *userVaddr, UINT32 flags)
}
/* Check to see if the task to be awakened has timed out
- * if time out, to weak next pend task.
+ * if time out, to weak next pend task.
+ * | 查看要唤醒的任务是否超时,如果超时,就唤醒,并查看下一个挂起的任务。
*/
STATIC VOID OsFutexCheckAndWakePendTask(FutexNode *headNode, const INT32 wakeNumber,
FutexHash *hashNode, FutexNode **nextNode, BOOL *wakeAny)
@@ -707,7 +765,19 @@ STATIC VOID OsFutexCheckAndWakePendTask(FutexNode *headNode, const INT32 wakeNum
}
return;
}
-
+
+/*!
+ * @brief OsFutexWakeTask 唤醒任务
+ *
+ * @param flags
+ * @param futexKey
+ * @param newHeadNode
+ * @param wakeAny
+ * @param wakeNumber 唤醒数量
+ * @return
+ *
+ * @see
+ */
STATIC INT32 OsFutexWakeTask(UINTPTR futexKey, UINT32 flags, INT32 wakeNumber, FutexNode **newHeadNode, BOOL *wakeAny)
{
UINT32 intSave;
@@ -715,13 +785,13 @@ STATIC INT32 OsFutexWakeTask(UINTPTR futexKey, UINT32 flags, INT32 wakeNumber, F
FutexNode *headNode = NULL;
UINT32 index = OsFutexKeyToIndex(futexKey, flags);
FutexHash *hashNode = &g_futexHash[index];
- FutexNode tempNode = {
+ FutexNode tempNode = { //先组成一个临时快锁节点,目的是为了找到哈希桶中是否有这个节点
.key = futexKey,
.index = index,
.pid = (flags & FUTEX_PRIVATE) ? LOS_GetCurrProcessID() : OS_INVALID,
};
- node = OsFindFutexNode(&tempNode);
+ node = OsFindFutexNode(&tempNode);//找快锁节点
if (node == NULL) {
return LOS_EBADF;
}
@@ -729,7 +799,7 @@ STATIC INT32 OsFutexWakeTask(UINTPTR futexKey, UINT32 flags, INT32 wakeNumber, F
headNode = node;
SCHEDULER_LOCK(intSave);
- OsFutexCheckAndWakePendTask(headNode, wakeNumber, hashNode, newHeadNode, wakeAny);
+ OsFutexCheckAndWakePendTask(headNode, wakeNumber, hashNode, newHeadNode, wakeAny);//再找到等这把锁的唤醒指向数量的任务
if ((*newHeadNode) != NULL) {
OsFutexReplaceQueueListHeadNode(headNode, *newHeadNode);
OsFutexDeinitFutexNode(headNode);
@@ -741,7 +811,7 @@ STATIC INT32 OsFutexWakeTask(UINTPTR futexKey, UINT32 flags, INT32 wakeNumber, F
return LOS_OK;
}
-
+/// 唤醒一个被指定锁阻塞的线程
INT32 OsFutexWake(const UINT32 *userVaddr, UINT32 flags, INT32 wakeNumber)
{
INT32 ret, futexRet;
@@ -750,11 +820,11 @@ INT32 OsFutexWake(const UINT32 *userVaddr, UINT32 flags, INT32 wakeNumber)
FutexHash *hashNode = NULL;
FutexNode *headNode = NULL;
BOOL wakeAny = FALSE;
-
+ //1.检查参数
if (OsFutexWakeParamCheck(userVaddr, flags)) {
return LOS_EINVAL;
}
-
+ //2.找到指定用户空间地址对应的桶
futexKey = OsFutexFlagsToKey(userVaddr, flags);
index = OsFutexKeyToIndex(futexKey, flags);
@@ -762,7 +832,7 @@ INT32 OsFutexWake(const UINT32 *userVaddr, UINT32 flags, INT32 wakeNumber)
if (OsFutexLock(&hashNode->listLock)) {
return LOS_EINVAL;
}
-
+ //3.换起等待该锁的进程
ret = OsFutexWakeTask(futexKey, flags, wakeNumber, &headNode, &wakeAny);
if (ret) {
goto EXIT_ERR;
@@ -776,7 +846,7 @@ INT32 OsFutexWake(const UINT32 *userVaddr, UINT32 flags, INT32 wakeNumber)
if (futexRet) {
goto EXIT_UNLOCK_ERR;
}
-
+ //4.根据指定参数决定是否发起调度
if (wakeAny == TRUE) {
LOS_MpSchedule(OS_MP_CPU_ALL);
LOS_Schedule();
@@ -885,7 +955,7 @@ STATIC VOID OsFutexRequeueSplitTwoLists(FutexHash *oldHashNode, FutexNode *oldHe
tailNode->queueList.pstNext = &newHeadNode->queueList;
return;
}
-
+/// 删除旧key并获取头节点
STATIC FutexNode *OsFutexRequeueRemoveOldKeyAndGetHead(UINTPTR oldFutexKey, UINT32 flags, INT32 wakeNumber,
UINTPTR newFutexKey, INT32 requeueCount, BOOL *wakeAny)
{
@@ -921,7 +991,7 @@ STATIC FutexNode *OsFutexRequeueRemoveOldKeyAndGetHead(UINTPTR oldFutexKey, UINT
return oldHeadNode;
}
-
+/// 检查锁在Futex表中的状态
STATIC INT32 OsFutexRequeueParamCheck(const UINT32 *oldUserVaddr, UINT32 flags, const UINT32 *newUserVaddr)
{
VADDR_T oldVaddr = (VADDR_T)(UINTPTR)oldUserVaddr;
@@ -930,12 +1000,12 @@ STATIC INT32 OsFutexRequeueParamCheck(const UINT32 *oldUserVaddr, UINT32 flags,
if (oldVaddr == newVaddr) {
return LOS_EINVAL;
}
-
+ //检查标记
if ((flags & (~FUTEX_PRIVATE)) != FUTEX_REQUEUE) {
PRINT_ERR("Futex requeue param check failed! error flags: 0x%x\n", flags);
return LOS_EINVAL;
}
-
+ //检查地址范围,必须在用户空间
if ((oldVaddr % sizeof(INT32)) || (oldVaddr < OS_FUTEX_KEY_BASE) || (oldVaddr >= OS_FUTEX_KEY_MAX)) {
PRINT_ERR("Futex requeue param check failed! error old userVaddr: 0x%x\n", oldUserVaddr);
return LOS_EINVAL;
@@ -948,7 +1018,7 @@ STATIC INT32 OsFutexRequeueParamCheck(const UINT32 *oldUserVaddr, UINT32 flags,
return LOS_OK;
}
-
+/// 调整指定锁在Futex表中的位置
INT32 OsFutexRequeue(const UINT32 *userVaddr, UINT32 flags, INT32 wakeNumber, INT32 count, const UINT32 *newUserVaddr)
{
INT32 ret;
@@ -965,12 +1035,12 @@ INT32 OsFutexRequeue(const UINT32 *userVaddr, UINT32 flags, INT32 wakeNumber, IN
return LOS_EINVAL;
}
- oldFutexKey = OsFutexFlagsToKey(userVaddr, flags);
+ oldFutexKey = OsFutexFlagsToKey(userVaddr, flags);//先拿key
newFutexKey = OsFutexFlagsToKey(newUserVaddr, flags);
- oldIndex = OsFutexKeyToIndex(oldFutexKey, flags);
+ oldIndex = OsFutexKeyToIndex(oldFutexKey, flags);//再拿所在哈希桶位置,共有80个哈希桶
newIndex = OsFutexKeyToIndex(newFutexKey, flags);
- oldHashNode = &g_futexHash[oldIndex];
+ oldHashNode = &g_futexHash[oldIndex];//拿到对应哈希桶实体
if (OsFutexLock(&oldHashNode->listLock)) {
return LOS_EINVAL;
}
diff --git a/src/kernel_liteos_a/kernel/base/ipc/los_mux.c b/src/kernel_liteos_a/kernel/base/ipc/los_mux.c
index fa4a67b8..8a87eb54 100644
--- a/src/kernel_liteos_a/kernel/base/ipc/los_mux.c
+++ b/src/kernel_liteos_a/kernel/base/ipc/los_mux.c
@@ -1,6 +1,59 @@
+/*!
+ * @file los_mux.c
+ * @brief
+ * @link kernel-mini-basic-ipc-mutex-guide http://weharmonyos.com/openharmony/zh-cn/device-dev/kernel/kernel-mini-basic-ipc-mutex-guide.html @endlink
+ @verbatim
+ 基本概念
+ 互斥锁又称互斥型信号量,是一种特殊的二值性信号量,用于实现对共享资源的独占式处理。
+ 任意时刻互斥锁的状态只有两种,开锁或闭锁。当有任务持有时,互斥锁处于闭锁状态,这个任务获得该互斥锁的所有权。
+ 当该任务释放它时,该互斥锁被开锁,任务失去该互斥锁的所有权。当一个任务持有互斥锁时,其他任务将不能再对该互斥锁进行开锁或持有。
+ 多任务环境下往往存在多个任务竞争同一共享资源的应用场景,互斥锁可被用于对共享资源的保护从而实现独占式访问。
+ 另外互斥锁可以解决信号量存在的优先级翻转问题。
+
+ 运作机制
+ 多任务环境下会存在多个任务访问同一公共资源的场景,而有些公共资源是非共享的临界资源,
+ 只能被独占使用。互斥锁怎样来避免这种冲突呢?
+ 用互斥锁处理临界资源的同步访问时,如果有任务访问该资源,则互斥锁为加锁状态。此时其他任务
+ 如果想访问这个临界资源则会被阻塞,直到互斥锁被持有该锁的任务释放后,其他任务才能重新访问
+ 该公共资源,此时互斥锁再次上锁,如此确保同一时刻只有一个任务正在访问这个临界资源,保证了
+ 临界资源操作的完整性。
+
+ 使用场景
+ 多任务环境下往往存在多个任务竞争同一临界资源的应用场景,互斥锁可以提供任务间的互斥机制,
+ 防止两个任务在同一时刻访问相同的临界资源,从而实现独占式访问。
+
+ 申请互斥锁有三种模式:无阻塞模式、永久阻塞模式、定时阻塞模式。
+ 无阻塞模式:任务需要申请互斥锁,若该互斥锁当前没有任务持有,或者持有该互斥锁的任务和申请
+ 该互斥锁的任务为同一个任务,则申请成功。
+ 永久阻塞模式:任务需要申请互斥锁,若该互斥锁当前没有被占用,则申请成功。否则,该任务进入阻塞态,
+ 系统切换到就绪任务中优先级高者继续执行。任务进入阻塞态后,直到有其他任务释放该互斥锁,阻塞任务才会重新得以执行。
+ 定时阻塞模式:任务需要申请互斥锁,若该互斥锁当前没有被占用,则申请成功。否则该任务进入阻塞态,
+ 系统切换到就绪任务中优先级高者继续执行。任务进入阻塞态后,指定时间超时前有其他任务释放该互斥锁,
+ 或者用户指定时间超时后,阻塞任务才会重新得以执行。
+ 释放互斥锁:
+ 如果有任务阻塞于该互斥锁,则唤醒被阻塞任务中优先级最高的,该任务进入就绪态,并进行任务调度。
+ 如果没有任务阻塞于该互斥锁,则互斥锁释放成功。
+
+ 互斥锁典型场景的开发流程:
+ 通过make menuconfig配置互斥锁模块。
+ 创建互斥锁LOS_MuxCreate。
+ 申请互斥锁LOS_MuxPend。
+ 释放互斥锁LOS_MuxPost。
+ 删除互斥锁LOS_MuxDelete。
+
+ @endverbatim
+ * @image html https://gitee.com/weharmonyos/resources/raw/master/27/mux.png
+ * @attention 两个任务不能对同一把互斥锁加锁。如果某任务对已被持有的互斥锁加锁,则该任务会被挂起,直到持有该锁的任务对互斥锁解锁,才能执行对这把互斥锁的加锁操作。
+ \n 互斥锁不能在中断服务程序中使用。
+ \n LiteOS-M内核作为实时操作系统需要保证任务调度的实时性,尽量避免任务的长时间阻塞,因此在获得互斥锁之后,应该尽快释放互斥锁。
+ \n 持有互斥锁的过程中,不得再调用LOS_TaskPriSet等接口更改持有互斥锁任务的优先级。
+ * @version
+ * @author weharmonyos.com | 鸿蒙研究站 | 每天死磕一点点
+ * @date 2021-11-18
+ */
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
- * Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -40,19 +93,19 @@
#ifdef LOSCFG_BASE_IPC_MUX
#define MUTEXATTR_TYPE_MASK 0x0FU
-
+///互斥属性初始化
LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrInit(LosMuxAttr *attr)
{
if (attr == NULL) {
return LOS_EINVAL;
}
- attr->protocol = LOS_MUX_PRIO_INHERIT;
- attr->prioceiling = OS_TASK_PRIORITY_LOWEST;
- attr->type = LOS_MUX_DEFAULT;
+ attr->protocol = LOS_MUX_PRIO_INHERIT; //协议默认用继承方式, A(4)task等B(19)释放锁时,B的调度优先级直接升到(4)
+ attr->prioceiling = OS_TASK_PRIORITY_LOWEST;//最低优先级
+ attr->type = LOS_MUX_DEFAULT; //默认 LOS_MUX_RECURSIVE
return LOS_OK;
}
-
+/// ????? 销毁互斥属 ,这里啥也没干呀
LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrDestroy(LosMuxAttr *attr)
{
if (attr == NULL) {
@@ -61,7 +114,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrDestroy(LosMuxAttr *attr)
return LOS_OK;
}
-
+///获取互斥锁的类型属性,由outType接走,不送!
LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrGetType(const LosMuxAttr *attr, INT32 *outType)
{
INT32 type;
@@ -79,7 +132,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrGetType(const LosMuxAttr *attr, INT32 *outTyp
return LOS_OK;
}
-
+///设置互斥锁的类型属性
LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrSetType(LosMuxAttr *attr, INT32 type)
{
if ((attr == NULL) || (type < LOS_MUX_NORMAL) || (type > LOS_MUX_ERRORCHECK)) {
@@ -89,7 +142,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrSetType(LosMuxAttr *attr, INT32 type)
attr->type = (UINT8)((attr->type & ~MUTEXATTR_TYPE_MASK) | (UINT32)type);
return LOS_OK;
}
-
+///获取互斥锁的类型属性
LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrGetProtocol(const LosMuxAttr *attr, INT32 *protocol)
{
if ((attr != NULL) && (protocol != NULL)) {
@@ -100,7 +153,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrGetProtocol(const LosMuxAttr *attr, INT32 *pr
return LOS_OK;
}
-
+///设置互斥锁属性的协议
LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrSetProtocol(LosMuxAttr *attr, INT32 protocol)
{
if (attr == NULL) {
@@ -117,7 +170,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrSetProtocol(LosMuxAttr *attr, INT32 protocol)
return LOS_EINVAL;
}
}
-
+///获取互斥锁属性优先级
LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrGetPrioceiling(const LosMuxAttr *attr, INT32 *prioceiling)
{
if (attr == NULL) {
@@ -130,7 +183,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrGetPrioceiling(const LosMuxAttr *attr, INT32
return LOS_OK;
}
-
+///设置互斥锁属性的优先级的上限
LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrSetPrioceiling(LosMuxAttr *attr, INT32 prioceiling)
{
if ((attr == NULL) ||
@@ -143,7 +196,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrSetPrioceiling(LosMuxAttr *attr, INT32 prioce
return LOS_OK;
}
-
+///设置互斥锁的优先级的上限,老优先级由oldPrioceiling带走
LITE_OS_SEC_TEXT UINT32 LOS_MuxSetPrioceiling(LosMux *mutex, INT32 prioceiling, INT32 *oldPrioceiling)
{
INT32 ret;
@@ -172,7 +225,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxSetPrioceiling(LosMux *mutex, INT32 prioceiling,
return ret;
}
-
+///获取互斥锁的优先级的上限
LITE_OS_SEC_TEXT UINT32 LOS_MuxGetPrioceiling(const LosMux *mutex, INT32 *prioceiling)
{
if ((mutex != NULL) && (prioceiling != NULL) && (mutex->magic == OS_MUX_MAGIC)) {
@@ -182,7 +235,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxGetPrioceiling(const LosMux *mutex, INT32 *prioce
return LOS_EINVAL;
}
-
+///互斥锁是否有效
LITE_OS_SEC_TEXT BOOL LOS_MuxIsValid(const LosMux *mutex)
{
if ((mutex != NULL) && (mutex->magic == OS_MUX_MAGIC)) {
@@ -191,7 +244,7 @@ LITE_OS_SEC_TEXT BOOL LOS_MuxIsValid(const LosMux *mutex)
return FALSE;
}
-
+///检查互斥锁属性是否OK,否则 no ok :|)
STATIC UINT32 OsCheckMutexAttr(const LosMuxAttr *attr)
{
if (((INT8)(attr->type) < LOS_MUX_NORMAL) || (attr->type > LOS_MUX_ERRORCHECK)) {
@@ -205,7 +258,7 @@ STATIC UINT32 OsCheckMutexAttr(const LosMuxAttr *attr)
}
return LOS_OK;
}
-
+/// 初始化互斥锁
LITE_OS_SEC_TEXT UINT32 LOS_MuxInit(LosMux *mutex, const LosMuxAttr *attr)
{
UINT32 intSave;
@@ -215,24 +268,24 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxInit(LosMux *mutex, const LosMuxAttr *attr)
}
if (attr == NULL) {
- (VOID)LOS_MuxAttrInit(&mutex->attr);
+ (VOID)LOS_MuxAttrInit(&mutex->attr);//属性初始化
} else {
- (VOID)memcpy_s(&mutex->attr, sizeof(LosMuxAttr), attr, sizeof(LosMuxAttr));
+ (VOID)memcpy_s(&mutex->attr, sizeof(LosMuxAttr), attr, sizeof(LosMuxAttr));//把attr 拷贝到 mutex->attr
}
- if (OsCheckMutexAttr(&mutex->attr) != LOS_OK) {
+ if (OsCheckMutexAttr(&mutex->attr) != LOS_OK) {//检查属性
return LOS_EINVAL;
}
- SCHEDULER_LOCK(intSave);
- mutex->muxCount = 0;
- mutex->owner = NULL;
- LOS_ListInit(&mutex->muxList);
- mutex->magic = OS_MUX_MAGIC;
- SCHEDULER_UNLOCK(intSave);
+ SCHEDULER_LOCK(intSave); //拿到调度自旋锁
+ mutex->muxCount = 0; //锁定互斥量的次数
+ mutex->owner = NULL; //谁持有该锁
+ LOS_ListInit(&mutex->muxList); //互斥量双循环链表
+ mutex->magic = OS_MUX_MAGIC; //固定标识,互斥锁的魔法数字
+ SCHEDULER_UNLOCK(intSave); //释放调度自旋锁
return LOS_OK;
}
-
+///销毁互斥锁
LITE_OS_SEC_TEXT UINT32 LOS_MuxDestroy(LosMux *mutex)
{
UINT32 intSave;
@@ -241,22 +294,22 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxDestroy(LosMux *mutex)
return LOS_EINVAL;
}
- SCHEDULER_LOCK(intSave);
+ SCHEDULER_LOCK(intSave); //保存调度自旋锁
if (mutex->magic != OS_MUX_MAGIC) {
- SCHEDULER_UNLOCK(intSave);
+ SCHEDULER_UNLOCK(intSave);//释放调度自旋锁
return LOS_EBADF;
}
if (mutex->muxCount != 0) {
- SCHEDULER_UNLOCK(intSave);
+ SCHEDULER_UNLOCK(intSave);//释放调度自旋锁
return LOS_EBUSY;
}
- (VOID)memset_s(mutex, sizeof(LosMux), 0, sizeof(LosMux));
- SCHEDULER_UNLOCK(intSave);
+ (VOID)memset_s(mutex, sizeof(LosMux), 0, sizeof(LosMux));//很简单,全部清0处理.
+ SCHEDULER_UNLOCK(intSave); //释放调度自旋锁
return LOS_OK;
}
-
+///设置互斥锁位图
STATIC VOID OsMuxBitmapSet(const LosMux *mutex, const LosTaskCB *runTask)
{
if (mutex->attr.protocol != LOS_MUX_PRIO_INHERIT) {
@@ -271,7 +324,7 @@ STATIC VOID OsMuxBitmapSet(const LosMux *mutex, const LosTaskCB *runTask)
owner->ops->priorityInheritance(owner, ¶m);
}
}
-
+///恢复互斥锁位图
VOID OsMuxBitmapRestore(const LosMux *mutex, const LOS_DL_LIST *list, const LosTaskCB *runTask)
{
if (mutex->attr.protocol != LOS_MUX_PRIO_INHERIT) {
@@ -284,20 +337,21 @@ VOID OsMuxBitmapRestore(const LosMux *mutex, const LOS_DL_LIST *list, const LosT
owner->ops->priorityRestore(owner, list, ¶m);
}
+/// 最坏情况就是拿锁失败,让出CPU,变成阻塞任务,等别的任务释放锁后排到自己了接着执行.
STATIC UINT32 OsMuxPendOp(LosTaskCB *runTask, LosMux *mutex, UINT32 timeout)
{
UINT32 ret;
- if ((mutex->muxList.pstPrev == NULL) || (mutex->muxList.pstNext == NULL)) {
+ if ((mutex->muxList.pstPrev == NULL) || (mutex->muxList.pstNext == NULL)) {//列表为空时的处理
/* This is for mutex macro initialization. */
- mutex->muxCount = 0;
- mutex->owner = NULL;
- LOS_ListInit(&mutex->muxList);
+ mutex->muxCount = 0;//锁计数器清0
+ mutex->owner = NULL;//锁没有归属任务
+ LOS_ListInit(&mutex->muxList);//初始化锁的任务链表,后续申请这把锁任务都会挂上去
}
- if (mutex->muxCount == 0) {
- mutex->muxCount++;
- mutex->owner = (VOID *)runTask;
+ if (mutex->muxCount == 0) {//无task用锁时,肯定能拿到锁了.在里面返回
+ mutex->muxCount++; //互斥锁计数器加1
+ mutex->owner = (VOID *)runTask; //当前任务拿到锁
LOS_ListTailInsert(&runTask->lockList, &mutex->holdList);
if (mutex->attr.protocol == LOS_MUX_PRIO_PROTECT) {
SchedParam param = { 0 };
@@ -307,23 +361,23 @@ STATIC UINT32 OsMuxPendOp(LosTaskCB *runTask, LosMux *mutex, UINT32 timeout)
}
return LOS_OK;
}
-
- if (((LosTaskCB *)mutex->owner == runTask) && (mutex->attr.type == LOS_MUX_RECURSIVE)) {
- mutex->muxCount++;
- return LOS_OK;
+ //递归锁muxCount>0 如果是递归锁就要处理两种情况 1.runtask持有锁 2.锁被别的任务拿走了
+ if (((LosTaskCB *)mutex->owner == runTask) && (mutex->attr.type == LOS_MUX_RECURSIVE)) {//第一种情况 runtask是锁持有方
+ mutex->muxCount++; //递归锁计数器加1,递归锁的目的是防止死锁,鸿蒙默认用的就是递归锁(LOS_MUX_DEFAULT = LOS_MUX_RECURSIVE)
+ return LOS_OK; //成功退出
}
-
- if (!timeout) {
- return LOS_EINVAL;
+ //到了这里说明锁在别的任务那里,当前任务只能被阻塞了.
+ if (!timeout) {//参数timeout表示等待多久再来拿锁
+ return LOS_EINVAL;//timeout = 0表示不等了,没拿到锁就返回不纠结,返回错误.见于LOS_MuxTrylock
}
-
- if (!OsPreemptableInSched()) {
- return LOS_EDEADLK;
+ //自己要被阻塞,只能申请调度,让出CPU core 让别的任务上
+ if (!OsPreemptableInSched()) {//不能申请调度 (不能调度的原因是因为没有持有调度任务自旋锁)
+ return LOS_EDEADLK;//返回错误,自旋锁被别的CPU core 持有
}
- OsMuxBitmapSet(mutex, runTask);
+ OsMuxBitmapSet(mutex, runTask);//设置锁位图,尽可能的提高锁持有任务的优先级
- runTask->taskMux = (VOID *)mutex;
+ runTask->taskMux = (VOID *)mutex; //记下当前任务在等待这把锁
LOS_DL_LIST *node = OsSchedLockPendFindPos(runTask, &mutex->muxList);
if (node == NULL) {
ret = LOS_NOK;
@@ -332,10 +386,10 @@ STATIC UINT32 OsMuxPendOp(LosTaskCB *runTask, LosMux *mutex, UINT32 timeout)
OsTaskWaitSetPendMask(OS_TASK_WAIT_MUTEX, (UINTPTR)mutex, timeout);
ret = runTask->ops->wait(runTask, node, timeout);
- if (ret == LOS_ERRNO_TSK_TIMEOUT) {
+ if (ret == LOS_ERRNO_TSK_TIMEOUT) {//这行代码虽和OsTaskWait挨在一起,但要过很久才会执行到,因为在OsTaskWait中CPU切换了任务上下文
OsMuxBitmapRestore(mutex, NULL, runTask);
- runTask->taskMux = NULL;
- ret = LOS_ETIMEDOUT;
+ runTask->taskMux = NULL;// 所以重新回到这里时可能已经超时了
+ ret = LOS_ETIMEDOUT;//返回超时
}
return ret;
@@ -343,7 +397,7 @@ STATIC UINT32 OsMuxPendOp(LosTaskCB *runTask, LosMux *mutex, UINT32 timeout)
UINT32 OsMuxLockUnsafe(LosMux *mutex, UINT32 timeout)
{
- LosTaskCB *runTask = OsCurrTaskGet();
+ LosTaskCB *runTask = OsCurrTaskGet();//获取当前任务
if (mutex->magic != OS_MUX_MAGIC) {
return LOS_EBADF;
@@ -352,23 +406,23 @@ UINT32 OsMuxLockUnsafe(LosMux *mutex, UINT32 timeout)
if (OsCheckMutexAttr(&mutex->attr) != LOS_OK) {
return LOS_EINVAL;
}
-
+ //LOS_MUX_ERRORCHECK 时 muxCount是要等于0 ,当前任务持有锁就不能再lock了. 鸿蒙默认用的是递归锁LOS_MUX_RECURSIVE
if ((mutex->attr.type == LOS_MUX_ERRORCHECK) && (mutex->owner == (VOID *)runTask)) {
return LOS_EDEADLK;
}
return OsMuxPendOp(runTask, mutex, timeout);
}
-
+/// 尝试加锁,
UINT32 OsMuxTrylockUnsafe(LosMux *mutex, UINT32 timeout)
{
- LosTaskCB *runTask = OsCurrTaskGet();
+ LosTaskCB *runTask = OsCurrTaskGet();//获取当前任务
- if (mutex->magic != OS_MUX_MAGIC) {
+ if (mutex->magic != OS_MUX_MAGIC) {//检查MAGIC有没有被改变
return LOS_EBADF;
}
- if (OsCheckMutexAttr(&mutex->attr) != LOS_OK) {
+ if (OsCheckMutexAttr(&mutex->attr) != LOS_OK) {//检查互斥锁属性
return LOS_EINVAL;
}
@@ -377,9 +431,9 @@ UINT32 OsMuxTrylockUnsafe(LosMux *mutex, UINT32 timeout)
return LOS_EBUSY;
}
- return OsMuxPendOp(runTask, mutex, timeout);
+ return OsMuxPendOp(runTask, mutex, timeout);//当前任务去拿锁,拿不到就等timeout
}
-
+/// 拿互斥锁,
LITE_OS_SEC_TEXT UINT32 LOS_MuxLock(LosMux *mutex, UINT32 timeout)
{
LosTaskCB *runTask = NULL;
@@ -394,19 +448,19 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxLock(LosMux *mutex, UINT32 timeout)
return LOS_EINTR;
}
- runTask = (LosTaskCB *)OsCurrTaskGet();
+ runTask = (LosTaskCB *)OsCurrTaskGet();//获取当前任务
/* DO NOT Call blocking API in system tasks */
- if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {
+ if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {//不要在内核任务里用mux锁
PRINTK("Warning: DO NOT call %s in system tasks.\n", __FUNCTION__);
- OsBackTrace();
+ OsBackTrace();//打印task信息
}
- SCHEDULER_LOCK(intSave);
- ret = OsMuxLockUnsafe(mutex, timeout);
+ SCHEDULER_LOCK(intSave);//调度自旋锁
+ ret = OsMuxLockUnsafe(mutex, timeout);//如果任务没拿到锁,将进入阻塞队列一直等待,直到timeout或者持锁任务释放锁时唤醒它
SCHEDULER_UNLOCK(intSave);
return ret;
}
-
+///尝试要锁,没拿到也不等,直接返回,不纠结
LITE_OS_SEC_TEXT UINT32 LOS_MuxTrylock(LosMux *mutex)
{
LosTaskCB *runTask = NULL;
@@ -421,39 +475,50 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxTrylock(LosMux *mutex)
return LOS_EINTR;
}
- runTask = (LosTaskCB *)OsCurrTaskGet();
+ runTask = (LosTaskCB *)OsCurrTaskGet();//获取当前执行的任务
/* DO NOT Call blocking API in system tasks */
- if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {
+ if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {//系统任务不能
PRINTK("Warning: DO NOT call %s in system tasks.\n", __FUNCTION__);
OsBackTrace();
}
SCHEDULER_LOCK(intSave);
- ret = OsMuxTrylockUnsafe(mutex, 0);
+ ret = OsMuxTrylockUnsafe(mutex, 0);//timeout = 0,不等待,没拿到锁就算了
SCHEDULER_UNLOCK(intSave);
return ret;
}
+/*!
+ * @brief OsMuxPostOp
+ * 是否有其他任务持有互斥锁而处于阻塞状,如果是就要唤醒它,注意唤醒一个任务的操作是由别的任务完成的
+ * OsMuxPostOp只由OsMuxUnlockUnsafe,参数任务归还锁了,自然就会遇到锁要给谁用的问题, 因为很多任务在申请锁,由OsMuxPostOp来回答这个问题
+ * @param mutex
+ * @param needSched
+ * @param taskCB
+ * @return
+ *
+ * @see
+ */
STATIC UINT32 OsMuxPostOp(LosTaskCB *taskCB, LosMux *mutex, BOOL *needSched)
{
- if (LOS_ListEmpty(&mutex->muxList)) {
- LOS_ListDelete(&mutex->holdList);
+ if (LOS_ListEmpty(&mutex->muxList)) {//如果互斥锁列表为空
+ LOS_ListDelete(&mutex->holdList);//把持有互斥锁的节点摘掉
mutex->owner = NULL;
return LOS_OK;
}
- LosTaskCB *resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(mutex->muxList)));
+ LosTaskCB *resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(mutex->muxList)));//拿到等待互斥锁链表的第一个任务实体,接下来要唤醒任务
OsMuxBitmapRestore(mutex, &mutex->muxList, resumedTask);
- mutex->muxCount = 1;
- mutex->owner = (VOID *)resumedTask;
- LOS_ListDelete(&mutex->holdList);
- LOS_ListTailInsert(&resumedTask->lockList, &mutex->holdList);
+ mutex->muxCount = 1;//互斥锁数量为1
+ mutex->owner = (VOID *)resumedTask;//互斥锁的持有人换了
+ LOS_ListDelete(&mutex->holdList);//自然要从等锁链表中把自己摘出去
+ LOS_ListTailInsert(&resumedTask->lockList, &mutex->holdList);//把锁挂到恢复任务的锁链表上,lockList是任务持有的所有锁记录
OsTaskWakeClearPendMask(resumedTask);
resumedTask->ops->wake(resumedTask);
resumedTask->taskMux = NULL;
- if (needSched != NULL) {
- *needSched = TRUE;
+ if (needSched != NULL) {//如果不为空
+ *needSched = TRUE;//就走起再次调度流程
}
return LOS_OK;
@@ -476,21 +541,21 @@ UINT32 OsMuxUnlockUnsafe(LosTaskCB *taskCB, LosMux *mutex, BOOL *needSched)
if (mutex->muxCount == 0) {
return LOS_EPERM;
}
-
- if ((--mutex->muxCount != 0) && (mutex->attr.type == LOS_MUX_RECURSIVE)) {
+ //注意 --mutex->muxCount 先执行了-- 操作.
+ if ((--mutex->muxCount != 0) && (mutex->attr.type == LOS_MUX_RECURSIVE)) {//属性类型为LOS_MUX_RECURSIVE时,muxCount是可以不为0的
return LOS_OK;
}
- if (mutex->attr.protocol == LOS_MUX_PRIO_PROTECT) {
+ if (mutex->attr.protocol == LOS_MUX_PRIO_PROTECT) {//属性协议为保护时
SchedParam param = { 0 };
taskCB->ops->schedParamGet(taskCB, ¶m);
taskCB->ops->priorityRestore(taskCB, NULL, ¶m);
}
- /* Whether a task block the mutex lock. */
- return OsMuxPostOp(taskCB, mutex, needSched);
+ /* Whether a task block the mutex lock. *///任务是否阻塞互斥锁
+ return OsMuxPostOp(taskCB, mutex, needSched);//一个任务去唤醒另一个在等锁的任务
}
-
+///释放锁
LITE_OS_SEC_TEXT UINT32 LOS_MuxUnlock(LosMux *mutex)
{
LosTaskCB *runTask = NULL;
@@ -506,9 +571,9 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxUnlock(LosMux *mutex)
return LOS_EINTR;
}
- runTask = (LosTaskCB *)OsCurrTaskGet();
+ runTask = (LosTaskCB *)OsCurrTaskGet();//获取当前任务
/* DO NOT Call blocking API in system tasks */
- if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {
+ if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {//不能在系统任务里调用,因为很容易让系统任务发生死锁
PRINTK("Warning: DO NOT call %s in system tasks.\n", __FUNCTION__);
OsBackTrace();
}
@@ -516,12 +581,12 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxUnlock(LosMux *mutex)
SCHEDULER_LOCK(intSave);
ret = OsMuxUnlockUnsafe(runTask, mutex, &needSched);
SCHEDULER_UNLOCK(intSave);
- if (needSched == TRUE) {
- LOS_MpSchedule(OS_MP_CPU_ALL);
- LOS_Schedule();
+ if (needSched == TRUE) {//需要调度的情况
+ LOS_MpSchedule(OS_MP_CPU_ALL);//向所有CPU发送调度指令
+ LOS_Schedule();//发起调度
}
return ret;
}
-#endif /* LOSCFG_BASE_IPC_MUX */
+#endif /* (LOSCFG_BASE_IPC_MUX == YES) */
diff --git a/src/kernel_liteos_a/kernel/base/ipc/los_queue.c b/src/kernel_liteos_a/kernel/base/ipc/los_queue.c
index 8fe06ba2..d3d0de7e 100644
--- a/src/kernel_liteos_a/kernel/base/ipc/los_queue.c
+++ b/src/kernel_liteos_a/kernel/base/ipc/los_queue.c
@@ -1,519 +1,612 @@
-/*
- * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
- * Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without modification,
- * are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice, this list of
- * conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice, this list
- * of conditions and the following disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its contributors may be used
- * to endorse or promote products derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "los_queue_pri.h"
-#include "los_queue_debug_pri.h"
-#include "los_task_pri.h"
-#include "los_sched_pri.h"
-#include "los_spinlock.h"
-#include "los_mp.h"
-#include "los_percpu_pri.h"
-#include "los_hook.h"
-#ifdef LOSCFG_IPC_CONTAINER
-#include "los_ipc_container_pri.h"
-#endif
-
-#ifdef LOSCFG_BASE_IPC_QUEUE
-#if (LOSCFG_BASE_IPC_QUEUE_LIMIT <= 0)
-#error "queue maxnum cannot be zero"
-#endif /* LOSCFG_BASE_IPC_QUEUE_LIMIT <= 0 */
-
-#ifndef LOSCFG_IPC_CONTAINER
-LITE_OS_SEC_BSS LosQueueCB *g_allQueue = NULL;
-LITE_OS_SEC_BSS STATIC LOS_DL_LIST g_freeQueueList;
-#define FREE_QUEUE_LIST g_freeQueueList
-#endif
-
-LITE_OS_SEC_TEXT_INIT LosQueueCB *OsAllQueueCBInit(LOS_DL_LIST *freeQueueList)
-{
- UINT32 index;
-
- if (freeQueueList == NULL) {
- return NULL;
- }
-
- UINT32 size = LOSCFG_BASE_IPC_QUEUE_LIMIT * sizeof(LosQueueCB);
- /* system resident memory, don't free */
- LosQueueCB *allQueue = (LosQueueCB *)LOS_MemAlloc(m_aucSysMem0, size);
- if (allQueue == NULL) {
- return NULL;
- }
- (VOID)memset_s(allQueue, size, 0, size);
- LOS_ListInit(freeQueueList);
- for (index = 0; index < LOSCFG_BASE_IPC_QUEUE_LIMIT; index++) {
- LosQueueCB *queueNode = ((LosQueueCB *)allQueue) + index;
- queueNode->queueID = index;
- LOS_ListTailInsert(freeQueueList, &queueNode->readWriteList[OS_QUEUE_WRITE]);
- }
-
-#ifndef LOSCFG_IPC_CONTAINER
- if (OsQueueDbgInitHook() != LOS_OK) {
- (VOID)LOS_MemFree(m_aucSysMem0, allQueue);
- return NULL;
- }
-#endif
- return allQueue;
-}
-
-/*
- * Description : queue initial
- * Return : LOS_OK on success or error code on failure
- */
-LITE_OS_SEC_TEXT_INIT UINT32 OsQueueInit(VOID)
-{
-#ifndef LOSCFG_IPC_CONTAINER
- g_allQueue = OsAllQueueCBInit(&g_freeQueueList);
- if (g_allQueue == NULL) {
- return LOS_ERRNO_QUEUE_NO_MEMORY;
- }
-#endif
- return LOS_OK;
-}
-
-LITE_OS_SEC_TEXT_INIT UINT32 LOS_QueueCreate(CHAR *queueName, UINT16 len, UINT32 *queueID,
- UINT32 flags, UINT16 maxMsgSize)
-{
- LosQueueCB *queueCB = NULL;
- UINT32 intSave;
- LOS_DL_LIST *unusedQueue = NULL;
- UINT8 *queue = NULL;
- UINT16 msgSize;
-
- (VOID)queueName;
- (VOID)flags;
-
- if (queueID == NULL) {
- return LOS_ERRNO_QUEUE_CREAT_PTR_NULL;
- }
-
- if (maxMsgSize > (OS_NULL_SHORT - sizeof(UINT32))) {
- return LOS_ERRNO_QUEUE_SIZE_TOO_BIG;
- }
-
- if ((len == 0) || (maxMsgSize == 0)) {
- return LOS_ERRNO_QUEUE_PARA_ISZERO;
- }
-
- msgSize = maxMsgSize + sizeof(UINT32);
- /*
- * Memory allocation is time-consuming, to shorten the time of disable interrupt,
- * move the memory allocation to here.
- */
- queue = (UINT8 *)LOS_MemAlloc(m_aucSysMem1, (UINT32)len * msgSize);
- if (queue == NULL) {
- return LOS_ERRNO_QUEUE_CREATE_NO_MEMORY;
- }
-
- SCHEDULER_LOCK(intSave);
- if (LOS_ListEmpty(&FREE_QUEUE_LIST)) {
- SCHEDULER_UNLOCK(intSave);
- OsQueueCheckHook();
- (VOID)LOS_MemFree(m_aucSysMem1, queue);
- return LOS_ERRNO_QUEUE_CB_UNAVAILABLE;
- }
-
- unusedQueue = LOS_DL_LIST_FIRST(&FREE_QUEUE_LIST);
- LOS_ListDelete(unusedQueue);
- queueCB = GET_QUEUE_LIST(unusedQueue);
- queueCB->queueLen = len;
- queueCB->queueSize = msgSize;
- queueCB->queueHandle = queue;
- queueCB->queueState = OS_QUEUE_INUSED;
- queueCB->readWriteableCnt[OS_QUEUE_READ] = 0;
- queueCB->readWriteableCnt[OS_QUEUE_WRITE] = len;
- queueCB->queueHead = 0;
- queueCB->queueTail = 0;
- LOS_ListInit(&queueCB->readWriteList[OS_QUEUE_READ]);
- LOS_ListInit(&queueCB->readWriteList[OS_QUEUE_WRITE]);
- LOS_ListInit(&queueCB->memList);
-
- OsQueueDbgUpdateHook(queueCB->queueID, OsCurrTaskGet()->taskEntry);
- SCHEDULER_UNLOCK(intSave);
-
- *queueID = queueCB->queueID;
- OsHookCall(LOS_HOOK_TYPE_QUEUE_CREATE, queueCB);
- return LOS_OK;
-}
-
-STATIC LITE_OS_SEC_TEXT UINT32 OsQueueReadParameterCheck(UINT32 queueID, const VOID *bufferAddr,
- const UINT32 *bufferSize, UINT32 timeout)
-{
- if (GET_QUEUE_INDEX(queueID) >= LOSCFG_BASE_IPC_QUEUE_LIMIT) {
- return LOS_ERRNO_QUEUE_INVALID;
- }
- if ((bufferAddr == NULL) || (bufferSize == NULL)) {
- return LOS_ERRNO_QUEUE_READ_PTR_NULL;
- }
-
- if ((*bufferSize == 0) || (*bufferSize > (OS_NULL_SHORT - sizeof(UINT32)))) {
- return LOS_ERRNO_QUEUE_READSIZE_IS_INVALID;
- }
-
- OsQueueDbgTimeUpdateHook(queueID);
-
- if (timeout != LOS_NO_WAIT) {
- if (OS_INT_ACTIVE) {
- return LOS_ERRNO_QUEUE_READ_IN_INTERRUPT;
- }
- }
- return LOS_OK;
-}
-
-STATIC LITE_OS_SEC_TEXT UINT32 OsQueueWriteParameterCheck(UINT32 queueID, const VOID *bufferAddr,
- const UINT32 *bufferSize, UINT32 timeout)
-{
- if (GET_QUEUE_INDEX(queueID) >= LOSCFG_BASE_IPC_QUEUE_LIMIT) {
- return LOS_ERRNO_QUEUE_INVALID;
- }
-
- if (bufferAddr == NULL) {
- return LOS_ERRNO_QUEUE_WRITE_PTR_NULL;
- }
-
- if (*bufferSize == 0) {
- return LOS_ERRNO_QUEUE_WRITESIZE_ISZERO;
- }
-
- OsQueueDbgTimeUpdateHook(queueID);
-
- if (timeout != LOS_NO_WAIT) {
- if (OS_INT_ACTIVE) {
- return LOS_ERRNO_QUEUE_WRITE_IN_INTERRUPT;
- }
- }
- return LOS_OK;
-}
-
-STATIC VOID OsQueueBufferOperate(LosQueueCB *queueCB, UINT32 operateType, VOID *bufferAddr, UINT32 *bufferSize)
-{
- UINT8 *queueNode = NULL;
- UINT32 msgDataSize;
- UINT16 queuePosition;
-
- /* get the queue position */
- switch (OS_QUEUE_OPERATE_GET(operateType)) {
- case OS_QUEUE_READ_HEAD:
- queuePosition = queueCB->queueHead;
- ((queueCB->queueHead + 1) == queueCB->queueLen) ? (queueCB->queueHead = 0) : (queueCB->queueHead++);
- break;
- case OS_QUEUE_WRITE_HEAD:
- (queueCB->queueHead == 0) ? (queueCB->queueHead = queueCB->queueLen - 1) : (--queueCB->queueHead);
- queuePosition = queueCB->queueHead;
- break;
- case OS_QUEUE_WRITE_TAIL:
- queuePosition = queueCB->queueTail;
- ((queueCB->queueTail + 1) == queueCB->queueLen) ? (queueCB->queueTail = 0) : (queueCB->queueTail++);
- break;
- default: /* read tail, reserved. */
- PRINT_ERR("invalid queue operate type!\n");
- return;
- }
-
- queueNode = &(queueCB->queueHandle[(queuePosition * (queueCB->queueSize))]);
-
- if (OS_QUEUE_IS_READ(operateType)) {
- if (memcpy_s(&msgDataSize, sizeof(UINT32), queueNode + queueCB->queueSize - sizeof(UINT32),
- sizeof(UINT32)) != EOK) {
- PRINT_ERR("get msgdatasize failed\n");
- return;
- }
- msgDataSize = (*bufferSize < msgDataSize) ? *bufferSize : msgDataSize;
- if (memcpy_s(bufferAddr, *bufferSize, queueNode, msgDataSize) != EOK) {
- PRINT_ERR("copy message to buffer failed\n");
- return;
- }
-
- *bufferSize = msgDataSize;
- } else {
- if (memcpy_s(queueNode, queueCB->queueSize, bufferAddr, *bufferSize) != EOK) {
- PRINT_ERR("store message failed\n");
- return;
- }
- if (memcpy_s(queueNode + queueCB->queueSize - sizeof(UINT32), sizeof(UINT32), bufferSize,
- sizeof(UINT32)) != EOK) {
- PRINT_ERR("store message size failed\n");
- return;
- }
- }
-}
-
-STATIC UINT32 OsQueueOperateParamCheck(const LosQueueCB *queueCB, UINT32 queueID,
- UINT32 operateType, const UINT32 *bufferSize)
-{
- if ((queueCB->queueID != queueID) || (queueCB->queueState == OS_QUEUE_UNUSED)) {
- return LOS_ERRNO_QUEUE_NOT_CREATE;
- }
-
- if (OS_QUEUE_IS_WRITE(operateType) && (*bufferSize > (queueCB->queueSize - sizeof(UINT32)))) {
- return LOS_ERRNO_QUEUE_WRITE_SIZE_TOO_BIG;
- }
- return LOS_OK;
-}
-
-UINT32 OsQueueOperate(UINT32 queueID, UINT32 operateType, VOID *bufferAddr, UINT32 *bufferSize, UINT32 timeout)
-{
- UINT32 ret;
- UINT32 readWrite = OS_QUEUE_READ_WRITE_GET(operateType);
- UINT32 intSave;
- OsHookCall(LOS_HOOK_TYPE_QUEUE_READ, (LosQueueCB *)GET_QUEUE_HANDLE(queueID), operateType, *bufferSize, timeout);
-
- SCHEDULER_LOCK(intSave);
- LosQueueCB *queueCB = (LosQueueCB *)GET_QUEUE_HANDLE(queueID);
- ret = OsQueueOperateParamCheck(queueCB, queueID, operateType, bufferSize);
- if (ret != LOS_OK) {
- goto QUEUE_END;
- }
-
- if (queueCB->readWriteableCnt[readWrite] == 0) {
- if (timeout == LOS_NO_WAIT) {
- ret = OS_QUEUE_IS_READ(operateType) ? LOS_ERRNO_QUEUE_ISEMPTY : LOS_ERRNO_QUEUE_ISFULL;
- goto QUEUE_END;
- }
-
- if (!OsPreemptableInSched()) {
- ret = LOS_ERRNO_QUEUE_PEND_IN_LOCK;
- goto QUEUE_END;
- }
-
- LosTaskCB *runTask = OsCurrTaskGet();
- OsTaskWaitSetPendMask(OS_TASK_WAIT_QUEUE, queueCB->queueID, timeout);
- ret = runTask->ops->wait(runTask, &queueCB->readWriteList[readWrite], timeout);
- if (ret == LOS_ERRNO_TSK_TIMEOUT) {
- ret = LOS_ERRNO_QUEUE_TIMEOUT;
- goto QUEUE_END;
- }
- } else {
- queueCB->readWriteableCnt[readWrite]--;
- }
-
- OsQueueBufferOperate(queueCB, operateType, bufferAddr, bufferSize);
-
- if (!LOS_ListEmpty(&queueCB->readWriteList[!readWrite])) {
- LosTaskCB *resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&queueCB->readWriteList[!readWrite]));
- OsTaskWakeClearPendMask(resumedTask);
- resumedTask->ops->wake(resumedTask);
- SCHEDULER_UNLOCK(intSave);
- LOS_MpSchedule(OS_MP_CPU_ALL);
- LOS_Schedule();
- return LOS_OK;
- } else {
- queueCB->readWriteableCnt[!readWrite]++;
- }
-
-QUEUE_END:
- SCHEDULER_UNLOCK(intSave);
- return ret;
-}
-
-LITE_OS_SEC_TEXT UINT32 LOS_QueueReadCopy(UINT32 queueID,
- VOID *bufferAddr,
- UINT32 *bufferSize,
- UINT32 timeout)
-{
- UINT32 ret;
- UINT32 operateType;
-
- ret = OsQueueReadParameterCheck(queueID, bufferAddr, bufferSize, timeout);
- if (ret != LOS_OK) {
- return ret;
- }
-
- operateType = OS_QUEUE_OPERATE_TYPE(OS_QUEUE_READ, OS_QUEUE_HEAD);
- return OsQueueOperate(queueID, operateType, bufferAddr, bufferSize, timeout);
-}
-
-LITE_OS_SEC_TEXT UINT32 LOS_QueueWriteHeadCopy(UINT32 queueID,
- VOID *bufferAddr,
- UINT32 bufferSize,
- UINT32 timeout)
-{
- UINT32 ret;
- UINT32 operateType;
-
- ret = OsQueueWriteParameterCheck(queueID, bufferAddr, &bufferSize, timeout);
- if (ret != LOS_OK) {
- return ret;
- }
-
- operateType = OS_QUEUE_OPERATE_TYPE(OS_QUEUE_WRITE, OS_QUEUE_HEAD);
- return OsQueueOperate(queueID, operateType, bufferAddr, &bufferSize, timeout);
-}
-
-LITE_OS_SEC_TEXT UINT32 LOS_QueueWriteCopy(UINT32 queueID,
- VOID *bufferAddr,
- UINT32 bufferSize,
- UINT32 timeout)
-{
- UINT32 ret;
- UINT32 operateType;
-
- ret = OsQueueWriteParameterCheck(queueID, bufferAddr, &bufferSize, timeout);
- if (ret != LOS_OK) {
- return ret;
- }
-
- operateType = OS_QUEUE_OPERATE_TYPE(OS_QUEUE_WRITE, OS_QUEUE_TAIL);
- return OsQueueOperate(queueID, operateType, bufferAddr, &bufferSize, timeout);
-}
-
-LITE_OS_SEC_TEXT UINT32 LOS_QueueRead(UINT32 queueID, VOID *bufferAddr, UINT32 bufferSize, UINT32 timeout)
-{
- return LOS_QueueReadCopy(queueID, bufferAddr, &bufferSize, timeout);
-}
-
-LITE_OS_SEC_TEXT UINT32 LOS_QueueWrite(UINT32 queueID, VOID *bufferAddr, UINT32 bufferSize, UINT32 timeout)
-{
- if (bufferAddr == NULL) {
- return LOS_ERRNO_QUEUE_WRITE_PTR_NULL;
- }
- bufferSize = sizeof(CHAR *);
- return LOS_QueueWriteCopy(queueID, &bufferAddr, bufferSize, timeout);
-}
-
-LITE_OS_SEC_TEXT UINT32 LOS_QueueWriteHead(UINT32 queueID,
- VOID *bufferAddr,
- UINT32 bufferSize,
- UINT32 timeout)
-{
- if (bufferAddr == NULL) {
- return LOS_ERRNO_QUEUE_WRITE_PTR_NULL;
- }
- bufferSize = sizeof(CHAR *);
- return LOS_QueueWriteHeadCopy(queueID, &bufferAddr, bufferSize, timeout);
-}
-
-LITE_OS_SEC_TEXT_INIT UINT32 LOS_QueueDelete(UINT32 queueID)
-{
- LosQueueCB *queueCB = NULL;
- UINT8 *queue = NULL;
- UINT32 intSave;
- UINT32 ret;
-
- if (GET_QUEUE_INDEX(queueID) >= LOSCFG_BASE_IPC_QUEUE_LIMIT) {
- return LOS_ERRNO_QUEUE_NOT_FOUND;
- }
-
- SCHEDULER_LOCK(intSave);
- queueCB = (LosQueueCB *)GET_QUEUE_HANDLE(queueID);
- if ((queueCB->queueID != queueID) || (queueCB->queueState == OS_QUEUE_UNUSED)) {
- ret = LOS_ERRNO_QUEUE_NOT_CREATE;
- goto QUEUE_END;
- }
-
- if (!LOS_ListEmpty(&queueCB->readWriteList[OS_QUEUE_READ])) {
- ret = LOS_ERRNO_QUEUE_IN_TSKUSE;
- goto QUEUE_END;
- }
-
- if (!LOS_ListEmpty(&queueCB->readWriteList[OS_QUEUE_WRITE])) {
- ret = LOS_ERRNO_QUEUE_IN_TSKUSE;
- goto QUEUE_END;
- }
-
- if (!LOS_ListEmpty(&queueCB->memList)) {
- ret = LOS_ERRNO_QUEUE_IN_TSKUSE;
- goto QUEUE_END;
- }
-
- if ((queueCB->readWriteableCnt[OS_QUEUE_WRITE] + queueCB->readWriteableCnt[OS_QUEUE_READ]) !=
- queueCB->queueLen) {
- ret = LOS_ERRNO_QUEUE_IN_TSKWRITE;
- goto QUEUE_END;
- }
-
- queue = queueCB->queueHandle;
- queueCB->queueHandle = NULL;
- queueCB->queueState = OS_QUEUE_UNUSED;
- queueCB->queueID = SET_QUEUE_ID(GET_QUEUE_COUNT(queueCB->queueID) + 1, GET_QUEUE_INDEX(queueCB->queueID));
- OsQueueDbgUpdateHook(queueCB->queueID, NULL);
-
- LOS_ListTailInsert(&FREE_QUEUE_LIST, &queueCB->readWriteList[OS_QUEUE_WRITE]);
- SCHEDULER_UNLOCK(intSave);
- OsHookCall(LOS_HOOK_TYPE_QUEUE_DELETE, queueCB);
- ret = LOS_MemFree(m_aucSysMem1, (VOID *)queue);
- return ret;
-
-QUEUE_END:
- SCHEDULER_UNLOCK(intSave);
- return ret;
-}
-
-LITE_OS_SEC_TEXT_MINOR UINT32 LOS_QueueInfoGet(UINT32 queueID, QUEUE_INFO_S *queueInfo)
-{
- UINT32 intSave;
- UINT32 ret = LOS_OK;
- LosQueueCB *queueCB = NULL;
- LosTaskCB *tskCB = NULL;
-
- if (queueInfo == NULL) {
- return LOS_ERRNO_QUEUE_PTR_NULL;
- }
-
- if (GET_QUEUE_INDEX(queueID) >= LOSCFG_BASE_IPC_QUEUE_LIMIT) {
- return LOS_ERRNO_QUEUE_INVALID;
- }
-
- (VOID)memset_s((VOID *)queueInfo, sizeof(QUEUE_INFO_S), 0, sizeof(QUEUE_INFO_S));
- SCHEDULER_LOCK(intSave);
-
- queueCB = (LosQueueCB *)GET_QUEUE_HANDLE(queueID);
- if ((queueCB->queueID != queueID) || (queueCB->queueState == OS_QUEUE_UNUSED)) {
- ret = LOS_ERRNO_QUEUE_NOT_CREATE;
- goto QUEUE_END;
- }
-
- queueInfo->uwQueueID = queueID;
- queueInfo->usQueueLen = queueCB->queueLen;
- queueInfo->usQueueSize = queueCB->queueSize;
- queueInfo->usQueueHead = queueCB->queueHead;
- queueInfo->usQueueTail = queueCB->queueTail;
- queueInfo->usReadableCnt = queueCB->readWriteableCnt[OS_QUEUE_READ];
- queueInfo->usWritableCnt = queueCB->readWriteableCnt[OS_QUEUE_WRITE];
-
- LOS_DL_LIST_FOR_EACH_ENTRY(tskCB, &queueCB->readWriteList[OS_QUEUE_READ], LosTaskCB, pendList) {
- queueInfo->uwWaitReadTask |= 1ULL << tskCB->taskID;
- }
-
- LOS_DL_LIST_FOR_EACH_ENTRY(tskCB, &queueCB->readWriteList[OS_QUEUE_WRITE], LosTaskCB, pendList) {
- queueInfo->uwWaitWriteTask |= 1ULL << tskCB->taskID;
- }
-
- LOS_DL_LIST_FOR_EACH_ENTRY(tskCB, &queueCB->memList, LosTaskCB, pendList) {
- queueInfo->uwWaitMemTask |= 1ULL << tskCB->taskID;
- }
-
-QUEUE_END:
- SCHEDULER_UNLOCK(intSave);
- return ret;
-}
-
-#endif /* LOSCFG_BASE_IPC_QUEUE */
-
+/**
+ * @file los_queue.c
+ * @brief
+ * @verbatim
+ 基本概念
+ 队列又称消息队列,是一种常用于任务间通信的数据结构。队列接收来自任务或中断的
+ 不固定长度消息,并根据不同的接口确定传递的消息是否存放在队列空间中。
+
+ 任务能够从队列里面读取消息,当队列中的消息为空时,挂起读取任务;当队列中有新消息时,
+ 挂起的读取任务被唤醒并处理新消息。任务也能够往队列里写入消息,当队列已经写满消息时,
+ 挂起写入任务;当队列中有空闲消息节点时,挂起的写入任务被唤醒并写入消息。如果将
+ 读队列和写队列的超时时间设置为0,则不会挂起任务,接口会直接返回,这就是非阻塞模式。
+
+ 消息队列提供了异步处理机制,允许将一个消息放入队列,但不立即处理。同时队列还有缓冲消息的作用。
+
+ 队列特性
+ 消息以先进先出的方式排队,支持异步读写。
+ 读队列和写队列都支持超时机制。
+ 每读取一条消息,就会将该消息节点设置为空闲。
+ 发送消息类型由通信双方约定,可以允许不同长度(不超过队列的消息节点大小)的消息。
+ 一个任务能够从任意一个消息队列接收和发送消息。
+ 多个任务能够从同一个消息队列接收和发送消息。
+ 创建队列时所需的队列空间,默认支持接口内系统自行动态申请内存的方式,同时也支持将用户分配的队列空间作为接口入参传入的方式。
+
+ 队列运作原理
+ 创建队列时,创建队列成功会返回队列ID。
+
+ 在队列控制块中维护着一个消息头节点位置Head和一个消息尾节点位置Tail来,用于表示当前
+ 队列中消息的存储情况。Head表示队列中被占用的消息节点的起始位置。Tail表示被占用的
+ 消息节点的结束位置,也是空闲消息节点的起始位置。队列刚创建时,Head和Tail均指向队列起始位置。
+
+ 写队列时,根据readWriteableCnt[1]判断队列是否可以写入,不能对已满(readWriteableCnt[1]为0)
+ 队列进行写操作。写队列支持两种写入方式:向队列尾节点写入,也可以向队列头节点写入。尾节点写入时,
+ 根据Tail找到起始空闲消息节点作为数据写入对象,如果Tail已经指向队列尾部则采用回卷方式。头节点写入时,
+ 将Head的前一个节点作为数据写入对象,如果Head指向队列起始位置则采用回卷方式。
+
+ 读队列时,根据readWriteableCnt[0]判断队列是否有消息需要读取,对全部空闲(readWriteableCnt[0]为0)
+ 队列进行读操作会引起任务挂起。如果队列可以读取消息,则根据Head找到最先写入队列的消息节点进行读取。
+ 如果Head已经指向队列尾部则采用回卷方式。
+
+ 删除队列时,根据队列ID找到对应队列,把队列状态置为未使用,把队列控制块置为初始状态。
+ 如果是通过系统动态申请内存方式创建的队列,还会释放队列所占内存。
+
+ 使用场景
+ 队列用于任务间通信,可以实现消息的异步处理。同时消息的发送方和接收方不需要彼此联系,两者间是解耦的。
+
+ 队列错误码
+ 对存在失败可能性的操作返回对应的错误码,以便快速定位错误原因。
+ * @endverbatim
+ */
+
+
+/*
+ * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "los_queue_pri.h"
+#include "los_queue_debug_pri.h"
+#include "los_task_pri.h"
+#include "los_sched_pri.h"
+#include "los_spinlock.h"
+#include "los_mp.h"
+#include "los_percpu_pri.h"
+#include "los_hook.h"
+#ifdef LOSCFG_IPC_CONTAINER
+#include "los_ipc_container_pri.h"
+#endif
+
+#ifdef LOSCFG_BASE_IPC_QUEUE
+#if (LOSCFG_BASE_IPC_QUEUE_LIMIT <= 0)
+#error "queue maxnum cannot be zero"
+#endif /* LOSCFG_BASE_IPC_QUEUE_LIMIT <= 0 */
+
+#ifndef LOSCFG_IPC_CONTAINER
+LITE_OS_SEC_BSS LosQueueCB *g_allQueue = NULL;///< 消息队列池
+LITE_OS_SEC_BSS STATIC LOS_DL_LIST g_freeQueueList;///< 空闲队列链表,管分配的,需要队列从这里申请
+#define FREE_QUEUE_LIST g_freeQueueList
+#endif
+
+LITE_OS_SEC_TEXT_INIT LosQueueCB *OsAllQueueCBInit(LOS_DL_LIST *freeQueueList)
+{
+ UINT32 index;
+
+ if (freeQueueList == NULL) {
+ return NULL;
+ }
+
+ UINT32 size = LOSCFG_BASE_IPC_QUEUE_LIMIT * sizeof(LosQueueCB);
+ /* system resident memory, don't free */
+ LosQueueCB *allQueue = (LosQueueCB *)LOS_MemAlloc(m_aucSysMem0, size);
+ if (allQueue == NULL) {
+ return NULL;
+ }
+ (VOID)memset_s(allQueue, size, 0, size);
+ LOS_ListInit(freeQueueList);
+ for (index = 0; index < LOSCFG_BASE_IPC_QUEUE_LIMIT; index++) {
+ LosQueueCB *queueNode = ((LosQueueCB *)allQueue) + index;
+ queueNode->queueID = index;
+ LOS_ListTailInsert(freeQueueList, &queueNode->readWriteList[OS_QUEUE_WRITE]);
+ }
+
+#ifndef LOSCFG_IPC_CONTAINER
+ if (OsQueueDbgInitHook() != LOS_OK) {
+ (VOID)LOS_MemFree(m_aucSysMem0, allQueue);
+ return NULL;
+ }
+#endif
+ return allQueue;
+}
+/*
+ * Description : queue initial | 消息队列模块初始化
+ * Return : LOS_OK on success or error code on failure
+ */
+LITE_OS_SEC_TEXT_INIT UINT32 OsQueueInit(VOID)
+{
+#ifndef LOSCFG_IPC_CONTAINER
+ g_allQueue = OsAllQueueCBInit(&g_freeQueueList);
+ if (g_allQueue == NULL) {
+ return LOS_ERRNO_QUEUE_NO_MEMORY;
+ }
+#endif
+ return LOS_OK;
+}
+///创建一个队列,根据用户传入队列长度和消息节点大小来开辟相应的内存空间以供该队列使用,参数queueID带走队列ID
+LITE_OS_SEC_TEXT_INIT UINT32 LOS_QueueCreate(CHAR *queueName, UINT16 len, UINT32 *queueID,
+ UINT32 flags, UINT16 maxMsgSize)
+{
+ LosQueueCB *queueCB = NULL;
+ UINT32 intSave;
+ LOS_DL_LIST *unusedQueue = NULL;
+ UINT8 *queue = NULL;
+ UINT16 msgSize;
+
+ (VOID)queueName;
+ (VOID)flags;
+
+ if (queueID == NULL) {
+ return LOS_ERRNO_QUEUE_CREAT_PTR_NULL;
+ }
+
+ if (maxMsgSize > (OS_NULL_SHORT - sizeof(UINT32))) {// maxMsgSize上限 为啥要减去 sizeof(UINT32) ,因为前面存的是队列的大小
+ return LOS_ERRNO_QUEUE_SIZE_TOO_BIG;
+ }
+
+ if ((len == 0) || (maxMsgSize == 0)) {
+ return LOS_ERRNO_QUEUE_PARA_ISZERO;
+ }
+
+ msgSize = maxMsgSize + sizeof(UINT32);//总size = 消息体内容长度 + 消息大小(UINT32)
+ /*
+ * Memory allocation is time-consuming, to shorten the time of disable interrupt,
+ * move the memory allocation to here.
+ *///内存分配非常耗时,为了缩短禁用中断的时间,将内存分配移到此处,用的时候分配队列内存
+ queue = (UINT8 *)LOS_MemAlloc(m_aucSysMem1, (UINT32)len * msgSize);//从系统内存池中分配,由这里提供读写队列的内存
+ if (queue == NULL) {//这里是一次把队列要用到的所有最大内存都申请下来了,能保证不会出现后续使用过程中内存不够的问题出现
+ return LOS_ERRNO_QUEUE_CREATE_NO_MEMORY;//调用处有 OsSwtmrInit sys_mbox_new DoMqueueCreate ==
+ }
+
+ SCHEDULER_LOCK(intSave);
+ if (LOS_ListEmpty(&FREE_QUEUE_LIST)) {//没有空余的队列ID的处理,注意软时钟定时器是由 g_swtmrCBArray统一管理的,里面有正在使用和可分配空闲的队列
+ SCHEDULER_UNLOCK(intSave);//g_freeQueueList是管理可用于分配的队列链表,申请消息队列的ID需要向它要
+ OsQueueCheckHook();
+ (VOID)LOS_MemFree(m_aucSysMem1, queue);//没有就要释放 queue申请的内存
+ return LOS_ERRNO_QUEUE_CB_UNAVAILABLE;
+ }
+
+ unusedQueue = LOS_DL_LIST_FIRST(&FREE_QUEUE_LIST);//找到一个没有被使用的队列
LOS_ListDelete(unusedQueue);//将自己从g_freeQueueList中摘除, unusedQueue只是个 LOS_DL_LIST 结点.
+ queueCB = GET_QUEUE_LIST(unusedQueue);//通过unusedQueue找到整个消息队列(LosQueueCB)
+ queueCB->queueLen = len; //队列中消息的总个数,注意这个一旦创建是不能变的.
+ queueCB->queueSize = msgSize;//消息节点的大小,注意这个一旦创建也是不能变的.
+ queueCB->queueHandle = queue; //队列句柄,队列内容存储区.
+ queueCB->queueState = OS_QUEUE_INUSED; //队列状态使用中
+ queueCB->readWriteableCnt[OS_QUEUE_READ] = 0;//可读资源计数,OS_QUEUE_READ(0):可读.
+ queueCB->readWriteableCnt[OS_QUEUE_WRITE] = len;//可些资源计数 OS_QUEUE_WRITE(1):可写, 默认len可写.
+ queueCB->queueHead = 0;//队列头节点
+ queueCB->queueTail = 0;//队列尾节点
+ LOS_ListInit(&queueCB->readWriteList[OS_QUEUE_READ]);//初始化可读队列任务链表
+ LOS_ListInit(&queueCB->readWriteList[OS_QUEUE_WRITE]);//初始化可写队列任务链表
+ LOS_ListInit(&queueCB->memList);//
+
+ OsQueueDbgUpdateHook(queueCB->queueID, OsCurrTaskGet()->taskEntry);//在创建或删除队列调试信息时更新任务条目
+ SCHEDULER_UNLOCK(intSave);
+
+ *queueID = queueCB->queueID;//带走队列ID
+ OsHookCall(LOS_HOOK_TYPE_QUEUE_CREATE, queueCB);
+ return LOS_OK;
+}
+///读队列参数检查
+STATIC LITE_OS_SEC_TEXT UINT32 OsQueueReadParameterCheck(UINT32 queueID, const VOID *bufferAddr,
+ const UINT32 *bufferSize, UINT32 timeout)
+{
+ if (GET_QUEUE_INDEX(queueID) >= LOSCFG_BASE_IPC_QUEUE_LIMIT) {//队列ID不能超上限
+ return LOS_ERRNO_QUEUE_INVALID;
+ }
+ if ((bufferAddr == NULL) || (bufferSize == NULL)) {//缓存地址和大小参数判断
+ return LOS_ERRNO_QUEUE_READ_PTR_NULL;
+ }
+
+ if ((*bufferSize == 0) || (*bufferSize > (OS_NULL_SHORT - sizeof(UINT32)))) {//限制了读取数据的上限64K, sizeof(UINT32)代表的是队列的长度
+ return LOS_ERRNO_QUEUE_READSIZE_IS_INVALID; //所以要减去
+ }
+
+ OsQueueDbgTimeUpdateHook(queueID);
+
+ if (timeout != LOS_NO_WAIT) {//等待一定时间再读取
+ if (OS_INT_ACTIVE) {//如果碰上了硬中断
+ return LOS_ERRNO_QUEUE_READ_IN_INTERRUPT;//意思是:硬中断发生时是不能读消息队列的
+ }
+ }
+ return LOS_OK;
+}
+///写队列参数检查
+STATIC LITE_OS_SEC_TEXT UINT32 OsQueueWriteParameterCheck(UINT32 queueID, const VOID *bufferAddr,
+ const UINT32 *bufferSize, UINT32 timeout)
+{
+ if (GET_QUEUE_INDEX(queueID) >= LOSCFG_BASE_IPC_QUEUE_LIMIT) {//队列ID不能超上限
+ return LOS_ERRNO_QUEUE_INVALID;
+ }
+
+ if (bufferAddr == NULL) {//没有数据源
+ return LOS_ERRNO_QUEUE_WRITE_PTR_NULL;
+ }
+
+ if (*bufferSize == 0) {//这里没有限制写队列的大小,如果写入一个很大buf 会怎样?
+ return LOS_ERRNO_QUEUE_WRITESIZE_ISZERO;
+ }
+
+ OsQueueDbgTimeUpdateHook(queueID);
+
+ if (timeout != LOS_NO_WAIT) {
+ if (OS_INT_ACTIVE) {
+ return LOS_ERRNO_QUEUE_WRITE_IN_INTERRUPT;
+ }
+ }
+ return LOS_OK;
+}
+///队列buf操作,注意队列数据是按顺序来读取的,要不从头,要不从尾部,不会出现从中间读写,所有可由 head 和 tail 来管理队列.
+STATIC VOID OsQueueBufferOperate(LosQueueCB *queueCB, UINT32 operateType, VOID *bufferAddr, UINT32 *bufferSize)
+{
+ UINT8 *queueNode = NULL;
+ UINT32 msgDataSize;
+ UINT16 queuePosition;
+
+ /* get the queue position | 先找到队列的位置*/
+ switch (OS_QUEUE_OPERATE_GET(operateType)) {//获取操作类型
+ case OS_QUEUE_READ_HEAD://从列队头开始读
+ queuePosition = queueCB->queueHead;//拿到头部位置
+ ((queueCB->queueHead + 1) == queueCB->queueLen) ? (queueCB->queueHead = 0) : (queueCB->queueHead++);//调整队列头部位置
+ break;
+ case OS_QUEUE_WRITE_HEAD://从列队头开始写
+ (queueCB->queueHead == 0) ? (queueCB->queueHead = queueCB->queueLen - 1) : (--queueCB->queueHead);//调整队列头部位置
+ queuePosition = queueCB->queueHead;//拿到头部位置
+ break;
+ case OS_QUEUE_WRITE_TAIL://从列队尾部开始写
+ queuePosition = queueCB->queueTail;//设置队列位置为尾部位置
+ ((queueCB->queueTail + 1) == queueCB->queueLen) ? (queueCB->queueTail = 0) : (queueCB->queueTail++);//调整队列尾部位置
+ break;
+ default: /* read tail, reserved. */
+ PRINT_ERR("invalid queue operate type!\n");
+ return;
+ }
+ //queueHandle是create队列时,由外界参数申请的一块内存. 用于copy 使用
+ queueNode = &(queueCB->queueHandle[(queuePosition * (queueCB->queueSize))]);//拿到队列节点
+
+ if (OS_QUEUE_IS_READ(operateType)) {//读操作处理,读队列分两步走
+ if (memcpy_s(&msgDataSize, sizeof(UINT32), queueNode + queueCB->queueSize - sizeof(UINT32),
+ sizeof(UINT32)) != EOK) {//1.先读出队列大小,由队列头四个字节表示
+ PRINT_ERR("get msgdatasize failed\n");
+ return;
+ }
+ msgDataSize = (*bufferSize < msgDataSize) ? *bufferSize : msgDataSize;
+ if (memcpy_s(bufferAddr, *bufferSize, queueNode, msgDataSize) != EOK) {//2.读表示读走已有数据,所以相当于bufferAddr接着了queueNode的数据
+ PRINT_ERR("copy message to buffer failed\n");
+ return;
+ }
+
+ *bufferSize = msgDataSize;//通过入参 带走消息的大小
+ } else {//只有读写两种操作,这里就是写队列了.写也分两步走 , @note_thinking 这里建议鸿蒙加上 OS_QUEUE_IS_WRITE 判断
+ if (memcpy_s(queueNode, queueCB->queueSize, bufferAddr, *bufferSize) != EOK) {//1.写入消息内容
+ PRINT_ERR("store message failed\n");//表示把外面数据写进来,所以相当于queueNode接着了bufferAddr的数据
+ return;
+ }
+ if (memcpy_s(queueNode + queueCB->queueSize - sizeof(UINT32), sizeof(UINT32), bufferSize,
+ sizeof(UINT32)) != EOK) {//2.写入消息数据的长度,sizeof(UINT32)
+ PRINT_ERR("store message size failed\n");
+ return;
+ }
+ }
+}
+///队列操作参数检查
+STATIC UINT32 OsQueueOperateParamCheck(const LosQueueCB *queueCB, UINT32 queueID,
+ UINT32 operateType, const UINT32 *bufferSize)
+{
+ if ((queueCB->queueID != queueID) || (queueCB->queueState == OS_QUEUE_UNUSED)) {//队列ID和状态判断
+ return LOS_ERRNO_QUEUE_NOT_CREATE;
+ }
+
+ if (OS_QUEUE_IS_WRITE(operateType) && (*bufferSize > (queueCB->queueSize - sizeof(UINT32)))) {//写时判断
+ return LOS_ERRNO_QUEUE_WRITE_SIZE_TOO_BIG;//塞进来的数据太大,大于队列节点能承受的范围
+ }
+ return LOS_OK;
+}
+
+/**
+ * @brief 队列操作.是读是写由operateType定
+ 本函数是消息队列最重要的一个函数,可以分析出读取消息过程中
+ 发生的细节,涉及任务的唤醒和阻塞,阻塞链表任务的相互提醒.
+ * @param queueID
+ * @param operateType
+ * @param bufferAddr
+ * @param bufferSize
+ * @param timeout
+ * @return UINT32
+ */
+UINT32 OsQueueOperate(UINT32 queueID, UINT32 operateType, VOID *bufferAddr, UINT32 *bufferSize, UINT32 timeout)
+{
+ UINT32 ret;
+ UINT32 readWrite = OS_QUEUE_READ_WRITE_GET(operateType);//获取读/写操作标识
+ UINT32 intSave;
+ OsHookCall(LOS_HOOK_TYPE_QUEUE_READ, (LosQueueCB *)GET_QUEUE_HANDLE(queueID), operateType, *bufferSize, timeout);
+
+ SCHEDULER_LOCK(intSave);
+ LosQueueCB *queueCB = (LosQueueCB *)GET_QUEUE_HANDLE(queueID);//获取对应的队列控制块
+ ret = OsQueueOperateParamCheck(queueCB, queueID, operateType, bufferSize);//参数检查
+ if (ret != LOS_OK) {
+ goto QUEUE_END;
+ }
+
+ if (queueCB->readWriteableCnt[readWrite] == 0) {//根据readWriteableCnt判断队列是否有消息读/写
+ if (timeout == LOS_NO_WAIT) {//不等待直接退出
+ ret = OS_QUEUE_IS_READ(operateType) ? LOS_ERRNO_QUEUE_ISEMPTY : LOS_ERRNO_QUEUE_ISFULL;
+ goto QUEUE_END;
+ }
+
+ if (!OsPreemptableInSched()) {//不支持抢占式调度
+ ret = LOS_ERRNO_QUEUE_PEND_IN_LOCK;
+ goto QUEUE_END;
+ }
+ //任务等待,这里很重要啊,将自己从就绪列表摘除,让出了CPU并发起了调度,并挂在readWriteList[readWrite]上,挂的都等待读/写消息的task
+ LosTaskCB *runTask = OsCurrTaskGet();
+ OsTaskWaitSetPendMask(OS_TASK_WAIT_QUEUE, queueCB->queueID, timeout);
+ ret = runTask->ops->wait(runTask, &queueCB->readWriteList[readWrite], timeout);
+ if (ret == LOS_ERRNO_TSK_TIMEOUT) {//唤醒后如果超时了,返回读/写消息失败
+ ret = LOS_ERRNO_QUEUE_TIMEOUT;
+ goto QUEUE_END;//
+ }
+ } else {
+ queueCB->readWriteableCnt[readWrite]--;//对应队列中计数器--,说明一条消息只能被读/写一次
+ }
+
+ OsQueueBufferOperate(queueCB, operateType, bufferAddr, bufferSize);//发起读或写队列操作
+
+ if (!LOS_ListEmpty(&queueCB->readWriteList[!readWrite])) {//如果还有任务在排着队等待读/写入消息(当时不能读/写的原因有可能当时队列满了==)
+ LosTaskCB *resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&queueCB->readWriteList[!readWrite]));//取出要读/写消息的任务
+ OsTaskWakeClearPendMask(resumedTask);
+ resumedTask->ops->wake(resumedTask);
+ SCHEDULER_UNLOCK(intSave);
+ LOS_MpSchedule(OS_MP_CPU_ALL);//让所有CPU发出调度申请,因为很可能那个要读/写消息的队列是由其他CPU执行
+ LOS_Schedule();//申请调度
+ return LOS_OK;
+ } else {
+ queueCB->readWriteableCnt[!readWrite]++;//对应队列读/写中计数器++
+ }
+
+QUEUE_END:
+ SCHEDULER_UNLOCK(intSave);
+ return ret;
+}
+///接口函数定时读取消息队列
+LITE_OS_SEC_TEXT UINT32 LOS_QueueReadCopy(UINT32 queueID,
+ VOID *bufferAddr,
+ UINT32 *bufferSize,
+ UINT32 timeout)
+{
+ UINT32 ret;
+ UINT32 operateType;
+
+ ret = OsQueueReadParameterCheck(queueID, bufferAddr, bufferSize, timeout);//参数检查
+ if (ret != LOS_OK) {
+ return ret;
+ }
+
+ operateType = OS_QUEUE_OPERATE_TYPE(OS_QUEUE_READ, OS_QUEUE_HEAD);//从头开始读
+ return OsQueueOperate(queueID, operateType, bufferAddr, bufferSize, timeout);//定时执行读操作
+}
+///接口函数从队列头开始写
+LITE_OS_SEC_TEXT UINT32 LOS_QueueWriteHeadCopy(UINT32 queueID,
+ VOID *bufferAddr,
+ UINT32 bufferSize,
+ UINT32 timeout)
+{
+ UINT32 ret;
+ UINT32 operateType;
+
+ ret = OsQueueWriteParameterCheck(queueID, bufferAddr, &bufferSize, timeout);//参数检查
+ if (ret != LOS_OK) {
+ return ret;
+ }
+
+ operateType = OS_QUEUE_OPERATE_TYPE(OS_QUEUE_WRITE, OS_QUEUE_HEAD);//从头开始写
+ return OsQueueOperate(queueID, operateType, bufferAddr, &bufferSize, timeout);//执行写操作
+}
+///接口函数 从队列尾部开始写
+LITE_OS_SEC_TEXT UINT32 LOS_QueueWriteCopy(UINT32 queueID,
+ VOID *bufferAddr,
+ UINT32 bufferSize,
+ UINT32 timeout)
+{
+ UINT32 ret;
+ UINT32 operateType;
+
+ ret = OsQueueWriteParameterCheck(queueID, bufferAddr, &bufferSize, timeout);//参数检查
+ if (ret != LOS_OK) {
+ return ret;
+ }
+
+ operateType = OS_QUEUE_OPERATE_TYPE(OS_QUEUE_WRITE, OS_QUEUE_TAIL);//从尾部开始写
+ return OsQueueOperate(queueID, operateType, bufferAddr, &bufferSize, timeout);//执行写操作
+}
+
+/**
+ * @brief
+ * @verbatim
+ 外部接口 读一个队列数据
+ 读队列时,根据Head找到最先写入队列中的消息节点进行读取。如果Head已经指向队列尾则采用回卷方式。
+ 根据usReadableCnt判断队列是否有消息读取,对全部空闲(usReadableCnt为0)队列进行读队列操作会引起任务挂起。
+ * @endverbatim
+ */
+LITE_OS_SEC_TEXT UINT32 LOS_QueueRead(UINT32 queueID, VOID *bufferAddr, UINT32 bufferSize, UINT32 timeout)
+{
+ return LOS_QueueReadCopy(queueID, bufferAddr, &bufferSize, timeout);
+}
+
+/**
+ * @brief
+ * @verbatim
+ 外部接口 写一个队列数据
+ 根据Tail找到被占用消息节点末尾的空闲节点作为数据写入对象。如果Tail已经指向队列尾则采用回卷方式。
+ 根据usWritableCnt判断队列是否可以写入,不能对已满(usWritableCnt为0)队列进行写队列操作
+ * @endverbatim
+ */
+LITE_OS_SEC_TEXT UINT32 LOS_QueueWrite(UINT32 queueID, VOID *bufferAddr, UINT32 bufferSize, UINT32 timeout)
+{
+ if (bufferAddr == NULL) {
+ return LOS_ERRNO_QUEUE_WRITE_PTR_NULL;
+ }
+ bufferSize = sizeof(CHAR *);
+ return LOS_QueueWriteCopy(queueID, &bufferAddr, bufferSize, timeout);
+}
+
+/**
+ * @brief
+ * @verbatim
+ 外部接口 从头部写入
+ 写队列时,根据Tail找到被占用消息节点末尾的空闲节点作为数据写入对象。如果Tail已经指向队列尾则采用回卷方式。
+ 根据usWritableCnt判断队列是否可以写入,不能对已满(usWritableCnt为0)队列进行写队列操作
+ * @endverbatim
+ */
+LITE_OS_SEC_TEXT UINT32 LOS_QueueWriteHead(UINT32 queueID,
+ VOID *bufferAddr,
+ UINT32 bufferSize,
+ UINT32 timeout)
+{
+ if (bufferAddr == NULL) {
+ return LOS_ERRNO_QUEUE_WRITE_PTR_NULL;
+ }
+ bufferSize = sizeof(CHAR *);
+ return LOS_QueueWriteHeadCopy(queueID, &bufferAddr, bufferSize, timeout);
+}
+
+/**
+ * @brief
+ * @verbatim
+ 外部接口 删除队列,还有任务要读/写消息时不能删除
+ 删除队列时,根据传入的队列ID寻找到对应的队列,把队列状态置为未使用,
+ 释放原队列所占的空间,对应的队列控制头置为初始状态。
+ * @endverbatim
+ */
+LITE_OS_SEC_TEXT_INIT UINT32 LOS_QueueDelete(UINT32 queueID)
+{
+ LosQueueCB *queueCB = NULL;
+ UINT8 *queue = NULL;
+ UINT32 intSave;
+ UINT32 ret;
+
+ if (GET_QUEUE_INDEX(queueID) >= LOSCFG_BASE_IPC_QUEUE_LIMIT) {
+ return LOS_ERRNO_QUEUE_NOT_FOUND;
+ }
+
+ SCHEDULER_LOCK(intSave);
+ queueCB = (LosQueueCB *)GET_QUEUE_HANDLE(queueID);//拿到队列实体
+ if ((queueCB->queueID != queueID) || (queueCB->queueState == OS_QUEUE_UNUSED)) {
+ ret = LOS_ERRNO_QUEUE_NOT_CREATE;
+ goto QUEUE_END;
+ }
+
+ if (!LOS_ListEmpty(&queueCB->readWriteList[OS_QUEUE_READ])) {//尚有任务要读数据
+ ret = LOS_ERRNO_QUEUE_IN_TSKUSE;
+ goto QUEUE_END;
+ }
+
+ if (!LOS_ListEmpty(&queueCB->readWriteList[OS_QUEUE_WRITE])) {//尚有任务要写数据
+ ret = LOS_ERRNO_QUEUE_IN_TSKUSE;
+ goto QUEUE_END;
+ }
+
+ if (!LOS_ListEmpty(&queueCB->memList)) {//
+ ret = LOS_ERRNO_QUEUE_IN_TSKUSE;
+ goto QUEUE_END;
+ }
+
+ if ((queueCB->readWriteableCnt[OS_QUEUE_WRITE] + queueCB->readWriteableCnt[OS_QUEUE_READ]) !=
+ queueCB->queueLen) {//读写队列的内容长度不等于总长度
+ ret = LOS_ERRNO_QUEUE_IN_TSKWRITE;
+ goto QUEUE_END;
+ }
+
+ queue = queueCB->queueHandle; //队列buf
+ queueCB->queueHandle = NULL; //
+ queueCB->queueState = OS_QUEUE_UNUSED;//重置队列状态
+ queueCB->queueID = SET_QUEUE_ID(GET_QUEUE_COUNT(queueCB->queueID) + 1, GET_QUEUE_INDEX(queueCB->queueID));//@note_why 这里需要这样做吗?
+ OsQueueDbgUpdateHook(queueCB->queueID, NULL);
+
+ LOS_ListTailInsert(&FREE_QUEUE_LIST, &queueCB->readWriteList[OS_QUEUE_WRITE]);//回收,将节点挂入可分配链表,等待重新被分配再利用
+ SCHEDULER_UNLOCK(intSave);
+ OsHookCall(LOS_HOOK_TYPE_QUEUE_DELETE, queueCB);
+ ret = LOS_MemFree(m_aucSysMem1, (VOID *)queue);//释放队列句柄
+ return ret;
+
+QUEUE_END:
+ SCHEDULER_UNLOCK(intSave);
+ return ret;
+}
+///外部接口, 获取队列信息,用queueInfo 把 LosQueueCB数据接走,QUEUE_INFO_S对内部数据的封装
+LITE_OS_SEC_TEXT_MINOR UINT32 LOS_QueueInfoGet(UINT32 queueID, QUEUE_INFO_S *queueInfo)
+{
+ UINT32 intSave;
+ UINT32 ret = LOS_OK;
+ LosQueueCB *queueCB = NULL;
+ LosTaskCB *tskCB = NULL;
+
+ if (queueInfo == NULL) {
+ return LOS_ERRNO_QUEUE_PTR_NULL;
+ }
+
+ if (GET_QUEUE_INDEX(queueID) >= LOSCFG_BASE_IPC_QUEUE_LIMIT) {//1024
+ return LOS_ERRNO_QUEUE_INVALID;
+ }
+
+ (VOID)memset_s((VOID *)queueInfo, sizeof(QUEUE_INFO_S), 0, sizeof(QUEUE_INFO_S));//接走数据之前先清0
+ SCHEDULER_LOCK(intSave);
+
+ queueCB = (LosQueueCB *)GET_QUEUE_HANDLE(queueID);//通过队列ID获取 QCB
+ if ((queueCB->queueID != queueID) || (queueCB->queueState == OS_QUEUE_UNUSED)) {
+ ret = LOS_ERRNO_QUEUE_NOT_CREATE;
+ goto QUEUE_END;
+ }
+
+ queueInfo->uwQueueID = queueID;
+ queueInfo->usQueueLen = queueCB->queueLen;
+ queueInfo->usQueueSize = queueCB->queueSize;
+ queueInfo->usQueueHead = queueCB->queueHead;
+ queueInfo->usQueueTail = queueCB->queueTail;
+ queueInfo->usReadableCnt = queueCB->readWriteableCnt[OS_QUEUE_READ];//可读数
+ queueInfo->usWritableCnt = queueCB->readWriteableCnt[OS_QUEUE_WRITE];//可写数
+
+ LOS_DL_LIST_FOR_EACH_ENTRY(tskCB, &queueCB->readWriteList[OS_QUEUE_READ], LosTaskCB, pendList) {//找出哪些task需要读消息
+ queueInfo->uwWaitReadTask |= 1ULL << tskCB->taskID;//记录等待读消息的任务号, uwWaitReadTask 每一位代表一个任务编号
+ }//0b..011011011 代表 0,1,3,4,6,7 号任务有数据等待读消息.
+
+ LOS_DL_LIST_FOR_EACH_ENTRY(tskCB, &queueCB->readWriteList[OS_QUEUE_WRITE], LosTaskCB, pendList) {//找出哪些task需要写消息
+ queueInfo->uwWaitWriteTask |= 1ULL << tskCB->taskID;//记录等待写消息的任务号, uwWaitWriteTask 每一位代表一个任务编号
+ }////0b..011011011 代表 0,1,3,4,6,7 号任务有数据等待写消息.
+
+ LOS_DL_LIST_FOR_EACH_ENTRY(tskCB, &queueCB->memList, LosTaskCB, pendList) {//同上
+ queueInfo->uwWaitMemTask |= 1ULL << tskCB->taskID; //MailBox模块使用
+ }
+
+QUEUE_END:
+ SCHEDULER_UNLOCK(intSave);
+ return ret;
+}
+
+#endif /* (LOSCFG_BASE_IPC_QUEUE == YES) */
+
diff --git a/src/kernel_liteos_a/kernel/base/ipc/los_rwlock.c b/src/kernel_liteos_a/kernel/base/ipc/los_rwlock.c
index 546fe831..1cdda60f 100644
--- a/src/kernel_liteos_a/kernel/base/ipc/los_rwlock.c
+++ b/src/kernel_liteos_a/kernel/base/ipc/los_rwlock.c
@@ -1,6 +1,32 @@
+/*!
+ * @file los_rwlock.c
+ * @brief
+ * @link rwlock https://weharmony.github.io/openharmony/zh-cn/device-dev/kernel/kernel-small-basic-trans-rwlock.html @endlink
+ @verbatim
+ 基本概念
+ 读写锁与互斥锁类似,可用来同步同一进程中的各个任务,但与互斥锁不同的是,其允许多个读操作并发重入,而写操作互斥。
+ 相对于互斥锁的开锁或闭锁状态,读写锁有三种状态:读模式下的锁,写模式下的锁,无锁。
+ 读写锁的使用规则:
+ 保护区无写模式下的锁,任何任务均可以为其增加读模式下的锁。
+ 保护区处于无锁状态下,才可增加写模式下的锁。
+ 多任务环境下往往存在多个任务访问同一共享资源的应用场景,读模式下的锁以共享状态对保护区访问,
+ 而写模式下的锁可被用于对共享资源的保护从而实现独占式访问。
+ 这种共享-独占的方式非常适合多任务中读数据频率远大于写数据频率的应用中,提高应用多任务并发度。
+ 运行机制
+ 相较于互斥锁,读写锁如何实现读模式下的锁及写模式下的锁来控制多任务的读写访问呢?
+ 若A任务首次获取了写模式下的锁,有其他任务来获取或尝试获取读模式下的锁,均无法再上锁。
+ 若A任务获取了读模式下的锁,当有任务来获取或尝试获取读模式下的锁时,读写锁计数均加一。
+ @endverbatim
+ @image html
+ * @attention
+ * @version
+ * @author weharmonyos.com | 鸿蒙研究站 | 每天死磕一点点
+ * @date 2022-02-18
+ */
+
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
- * Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -37,9 +63,10 @@
#include "los_exc.h"
#include "los_sched_pri.h"
+
#ifdef LOSCFG_BASE_IPC_RWLOCK
#define RWLOCK_COUNT_MASK 0x00FFFFFFU
-
+/// 判断读写锁有效性
BOOL LOS_RwlockIsValid(const LosRwlock *rwlock)
{
if ((rwlock != NULL) && ((rwlock->magic & RWLOCK_COUNT_MASK) == OS_RWLOCK_MAGIC)) {
@@ -48,7 +75,7 @@ BOOL LOS_RwlockIsValid(const LosRwlock *rwlock)
return FALSE;
}
-
+/// 创建读写锁,初始化锁信息
UINT32 LOS_RwlockInit(LosRwlock *rwlock)
{
UINT32 intSave;
@@ -71,7 +98,7 @@ UINT32 LOS_RwlockInit(LosRwlock *rwlock)
SCHEDULER_UNLOCK(intSave);
return LOS_OK;
}
-
+/// 删除指定的读写锁
UINT32 LOS_RwlockDestroy(LosRwlock *rwlock)
{
UINT32 intSave;
@@ -95,18 +122,18 @@ UINT32 LOS_RwlockDestroy(LosRwlock *rwlock)
SCHEDULER_UNLOCK(intSave);
return LOS_OK;
}
-
+/// 读写锁检查
STATIC UINT32 OsRwlockCheck(const LosRwlock *rwlock)
{
if (rwlock == NULL) {
return LOS_EINVAL;
}
- if (OS_INT_ACTIVE) {
+ if (OS_INT_ACTIVE) { // 读写锁不能在中断服务程序中使用。请想想为什么 ?
return LOS_EINTR;
}
- /* DO NOT Call blocking API in system tasks */
+ /* DO NOT Call blocking API in system tasks | 系统任务不能使用读写锁 */
LosTaskCB *runTask = (LosTaskCB *)OsCurrTaskGet();
if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {
return LOS_EPERM;
@@ -114,19 +141,23 @@ STATIC UINT32 OsRwlockCheck(const LosRwlock *rwlock)
return LOS_OK;
}
-
+/// 指定任务优先级优先级是否低于 写锁任务最高优先级
STATIC BOOL OsRwlockPriCompare(LosTaskCB *runTask, LOS_DL_LIST *rwList)
{
if (!LOS_ListEmpty(rwList)) {
- LosTaskCB *highestTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(rwList));
- if (OsSchedParamCompare(runTask, highestTask) < 0) {
+ LosTaskCB *highestTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(rwList));//首个写锁任务优先级是最高的
+ if (OsSchedParamCompare(runTask, highestTask) < 0) {//如果当前任务优先级低于等待写锁任务
return TRUE;
}
return FALSE;
}
return TRUE;
}
-
+/* 申请读模式下的锁,分三种情况:
+1. 若无人持有锁,读任务可获得锁。
+2. 若有人持有锁,读任务可获得锁,读取顺序按照任务优先级。
+3. 若有人(非自己)持有写模式下的锁,则当前任务无法获得锁,直到写模式下的锁释放。
+*/
STATIC UINT32 OsRwlockRdPendOp(LosTaskCB *runTask, LosRwlock *rwlock, UINT32 timeout)
{
UINT32 ret;
@@ -135,12 +166,12 @@ STATIC UINT32 OsRwlockRdPendOp(LosTaskCB *runTask, LosRwlock *rwlock, UINT32 tim
* When the rwlock mode is read mode or free mode and the priority of the current read task
* is higher than the first pended write task. current read task can obtain this rwlock.
*/
- if (rwlock->rwCount >= 0) {
- if (OsRwlockPriCompare(runTask, &(rwlock->writeList))) {
- if (rwlock->rwCount == INT8_MAX) {
+ if (rwlock->rwCount >= 0) {//第一和第二种情况
+ if (OsRwlockPriCompare(runTask, &(rwlock->writeList))) {//读优先级低于写优先级,意思就是必须先写再读
+ if (rwlock->rwCount == INT8_MAX) {//读锁任务达到上限
return LOS_EINVAL;
}
- rwlock->rwCount++;
+ rwlock->rwCount++;//拿读锁成功
return LOS_OK;
}
}
@@ -149,45 +180,51 @@ STATIC UINT32 OsRwlockRdPendOp(LosTaskCB *runTask, LosRwlock *rwlock, UINT32 tim
return LOS_EINVAL;
}
- if (!OsPreemptableInSched()) {
+ if (!OsPreemptableInSched()) {//不可抢占时
return LOS_EDEADLK;
}
- /* The current task is not allowed to obtain the write lock when it obtains the read lock. */
- if ((LosTaskCB *)(rwlock->writeOwner) == runTask) {
+ /* The current task is not allowed to obtain the write lock when it obtains the read lock.
+ | 当前任务在获得读锁时不允许获得写锁 */
+ if ((LosTaskCB *)(rwlock->writeOwner) == runTask) { //拥有写锁任务是否为当前任务
return LOS_EINVAL;
}
/*
* When the rwlock mode is write mode or the priority of the current read task
* is lower than the first pended write task, current read task will be pended.
+ | 当 rwlock 模式为写模式或当前读任务的优先级低于第一个挂起的写任务时,当前读任务将被挂起。
+ 反正就是写锁任务优先
*/
- LOS_DL_LIST *node = OsSchedLockPendFindPos(runTask, &(rwlock->readList));
- ret = runTask->ops->wait(runTask, node, timeout);
+ LOS_DL_LIST *node = OsSchedLockPendFindPos(runTask, &(rwlock->readList));//找到要挂入的位置
+ //例如现有链表内任务优先级为 0 3 8 9 23 当前为 10 时, 返回的是 9 这个节点
+ ret = runTask->ops->wait(runTask, node, timeout);//从尾部插入读锁链表 由此变成了 0 3 8 9 10 23
if (ret == LOS_ERRNO_TSK_TIMEOUT) {
return LOS_ETIMEDOUT;
}
return ret;
}
-
+/// 申请写模式下的锁
STATIC UINT32 OsRwlockWrPendOp(LosTaskCB *runTask, LosRwlock *rwlock, UINT32 timeout)
{
UINT32 ret;
- /* When the rwlock is free mode, current write task can obtain this rwlock. */
+ /* When the rwlock is free mode, current write task can obtain this rwlock.
+ | 若该锁当前没有任务持有,或者持有该读模式下的锁的任务和申请该锁的任务为同一个任务,则申请成功,可立即获得写模式下的锁。*/
if (rwlock->rwCount == 0) {
rwlock->rwCount = -1;
- rwlock->writeOwner = (VOID *)runTask;
+ rwlock->writeOwner = (VOID *)runTask;//直接给当前进程锁
return LOS_OK;
}
- /* Current write task can use one rwlock once again if the rwlock owner is it. */
+ /* Current write task can use one rwlock once again if the rwlock owner is it.
+ | 如果 rwlock 拥有者是当前写入任务,则它可以再次使用该锁。*/
if ((rwlock->rwCount < 0) && ((LosTaskCB *)(rwlock->writeOwner) == runTask)) {
if (rwlock->rwCount == INT8_MIN) {
return LOS_EINVAL;
}
- rwlock->rwCount--;
+ rwlock->rwCount--;//注意再次拥有算是两把写锁了.
return LOS_OK;
}
@@ -201,9 +238,9 @@ STATIC UINT32 OsRwlockWrPendOp(LosTaskCB *runTask, LosRwlock *rwlock, UINT32 tim
/*
* When the rwlock is read mode or other write task obtains this rwlock, current
- * write task will be pended.
+ * write task will be pended. | 当 rwlock 为读模式或其他写任务获得该 rwlock 时,当前的写任务将被挂起。直到读模式下的锁释放
*/
- LOS_DL_LIST *node = OsSchedLockPendFindPos(runTask, &(rwlock->writeList));
+ LOS_DL_LIST *node = OsSchedLockPendFindPos(runTask, &(rwlock->writeList));//找到要挂入的位置
ret = runTask->ops->wait(runTask, node, timeout);
if (ret == LOS_ERRNO_TSK_TIMEOUT) {
ret = LOS_ETIMEDOUT;
@@ -265,20 +302,22 @@ UINT32 OsRwlockTryWrUnsafe(LosRwlock *rwlock, UINT32 timeout)
return LOS_EBADF;
}
- /* When the rwlock is read mode, current write task will be pended. */
+ /* When the rwlock is read mode, current write task will be pended.
+ | 当 rwlock 为读模式时,当前的写任务将被挂起。*/
if (rwlock->rwCount > 0) {
return LOS_EBUSY;
}
- /* When other write task obtains this rwlock, current write task will be pended. */
+ /* When other write task obtains this rwlock, current write task will be pended.
+ | 当其他写任务获得这个rwlock时,当前的写任务将被挂起。*/
LosTaskCB *runTask = OsCurrTaskGet();
if ((rwlock->rwCount < 0) && ((LosTaskCB *)(rwlock->writeOwner) != runTask)) {
return LOS_EBUSY;
}
- return OsRwlockWrPendOp(runTask, rwlock, timeout);
+ return OsRwlockWrPendOp(runTask, rwlock, timeout);//
}
-
+/// 申请指定的读模式下的锁
UINT32 LOS_RwlockRdLock(LosRwlock *rwlock, UINT32 timeout)
{
UINT32 intSave;
@@ -293,7 +332,7 @@ UINT32 LOS_RwlockRdLock(LosRwlock *rwlock, UINT32 timeout)
SCHEDULER_UNLOCK(intSave);
return ret;
}
-
+/// 尝试申请指定的读模式下的锁
UINT32 LOS_RwlockTryRdLock(LosRwlock *rwlock)
{
UINT32 intSave;
@@ -304,11 +343,11 @@ UINT32 LOS_RwlockTryRdLock(LosRwlock *rwlock)
}
SCHEDULER_LOCK(intSave);
- ret = OsRwlockTryRdUnsafe(rwlock, 0);
+ ret = OsRwlockTryRdUnsafe(rwlock, 0);//所谓尝试就是没锁爷就返回,不等待,不纠结.当前任务也不会被挂起
SCHEDULER_UNLOCK(intSave);
return ret;
}
-
+/// 申请指定的写模式下的锁
UINT32 LOS_RwlockWrLock(LosRwlock *rwlock, UINT32 timeout)
{
UINT32 intSave;
@@ -323,7 +362,7 @@ UINT32 LOS_RwlockWrLock(LosRwlock *rwlock, UINT32 timeout)
SCHEDULER_UNLOCK(intSave);
return ret;
}
-
+/// 尝试申请指定的写模式下的锁
UINT32 LOS_RwlockTryWrLock(LosRwlock *rwlock)
{
UINT32 intSave;
@@ -334,32 +373,32 @@ UINT32 LOS_RwlockTryWrLock(LosRwlock *rwlock)
}
SCHEDULER_LOCK(intSave);
- ret = OsRwlockTryWrUnsafe(rwlock, 0);
+ ret = OsRwlockTryWrUnsafe(rwlock, 0);//所谓尝试就是没锁爷就返回,不等待,不纠结.当前任务也不会被挂起
SCHEDULER_UNLOCK(intSave);
return ret;
}
-
+/// 获取读写锁模式
STATIC UINT32 OsRwlockGetMode(LOS_DL_LIST *readList, LOS_DL_LIST *writeList)
{
BOOL isReadEmpty = LOS_ListEmpty(readList);
BOOL isWriteEmpty = LOS_ListEmpty(writeList);
- if (isReadEmpty && isWriteEmpty) {
- return RWLOCK_NONE_MODE;
+ if (isReadEmpty && isWriteEmpty) { //读写链表都没有内容
+ return RWLOCK_NONE_MODE; //自由模式
}
- if (!isReadEmpty && isWriteEmpty) {
+ if (!isReadEmpty && isWriteEmpty) { //读链表有数据,写链表没有数据
return RWLOCK_READ_MODE;
}
- if (isReadEmpty && !isWriteEmpty) {
+ if (isReadEmpty && !isWriteEmpty) { //写链表有数据,读链表没有数据
return RWLOCK_WRITE_MODE;
}
LosTaskCB *pendedReadTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(readList));
LosTaskCB *pendedWriteTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(writeList));
if (OsSchedParamCompare(pendedWriteTask, pendedReadTask) <= 0) {
- return RWLOCK_WRITEFIRST_MODE;
+ return RWLOCK_WRITEFIRST_MODE; //写的优先级高时,为写优先模式
}
- return RWLOCK_READFIRST_MODE;
+ return RWLOCK_READFIRST_MODE; //读的优先级高时,为读优先模式
}
-
+/// 释放锁
STATIC UINT32 OsRwlockPostOp(LosRwlock *rwlock, BOOL *needSched)
{
UINT32 rwlockMode;
@@ -367,15 +406,15 @@ STATIC UINT32 OsRwlockPostOp(LosRwlock *rwlock, BOOL *needSched)
rwlock->rwCount = 0;
rwlock->writeOwner = NULL;
- rwlockMode = OsRwlockGetMode(&(rwlock->readList), &(rwlock->writeList));
- if (rwlockMode == RWLOCK_NONE_MODE) {
+ rwlockMode = OsRwlockGetMode(&(rwlock->readList), &(rwlock->writeList));//先获取模式
+ if (rwlockMode == RWLOCK_NONE_MODE) {//自由模式则正常返回
return LOS_OK;
}
- /* In this case, rwlock will wake the first pended write task. */
- if ((rwlockMode == RWLOCK_WRITE_MODE) || (rwlockMode == RWLOCK_WRITEFIRST_MODE)) {
- resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(rwlock->writeList)));
- rwlock->rwCount = -1;
- rwlock->writeOwner = (VOID *)resumedTask;
+ /* In this case, rwlock will wake the first pended write task. | 在这种情况下,rwlock 将唤醒第一个挂起的写任务。 */
+ if ((rwlockMode == RWLOCK_WRITE_MODE) || (rwlockMode == RWLOCK_WRITEFIRST_MODE)) {//如果当前是写模式 (有任务在等写锁涅)
+ resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(rwlock->writeList)));//获取任务实体
+ rwlock->rwCount = -1;//直接干成-1,注意这里并不是 --
+ rwlock->writeOwner = (VOID *)resumedTask;//有锁了则唤醒等锁的任务(写模式)
resumedTask->ops->wake(resumedTask);
if (needSched != NULL) {
*needSched = TRUE;
@@ -383,29 +422,30 @@ STATIC UINT32 OsRwlockPostOp(LosRwlock *rwlock, BOOL *needSched)
return LOS_OK;
}
- rwlock->rwCount = 1;
+ rwlock->rwCount = 1; //直接干成1,因为是释放操作
resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(rwlock->readList)));
resumedTask->ops->wake(resumedTask);
- while (!LOS_ListEmpty(&(rwlock->readList))) {
+ while (!LOS_ListEmpty(&(rwlock->readList))) {//遍历读链表,目的是要唤醒其他读模式的任务(优先级得要高于pendedWriteTaskPri才行)
resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(rwlock->readList)));
if (rwlockMode == RWLOCK_READFIRST_MODE) {
LosTaskCB *pendedWriteTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(rwlock->writeList)));
if (OsSchedParamCompare(resumedTask, pendedWriteTask) >= 0) {
- break;
- }
+ break;//跳出循环
+ }
}
if (rwlock->rwCount == INT8_MAX) {
return EINVAL;
}
- rwlock->rwCount++;
- resumedTask->ops->wake(resumedTask);
+ rwlock->rwCount++;//读锁任务数量增加
+ resumedTask->ops->wake(resumedTask);//不断唤醒读锁任务,由此实现了允许多个读操作并发,因为在多核情况下resumedTask很大可能
+ //与当前任务并不在同一个核上运行, 此处非常有意思,点赞! @note_good
}
if (needSched != NULL) {
*needSched = TRUE;
}
return LOS_OK;
}
-
+/// 释放锁,唤醒任务
UINT32 OsRwlockUnlockUnsafe(LosRwlock *rwlock, BOOL *needSched)
{
if ((rwlock->magic & RWLOCK_COUNT_MASK) != OS_RWLOCK_MAGIC) {
@@ -417,27 +457,28 @@ UINT32 OsRwlockUnlockUnsafe(LosRwlock *rwlock, BOOL *needSched)
}
LosTaskCB *runTask = OsCurrTaskGet();
- if ((rwlock->rwCount < 0) && ((LosTaskCB *)(rwlock->writeOwner) != runTask)) {
+ if ((rwlock->rwCount < 0) && ((LosTaskCB *)(rwlock->writeOwner) != runTask)) {//写模式时,当前任务未持有锁
return LOS_EPERM;
}
/*
* When the rwCount of the rwlock more than 1 or less than -1, the rwlock mode will
* not changed after current unlock operation, so pended tasks can not be waken.
+ | 当 rwlock 的 rwCount 大于 1 或小于 -1 时,当前解锁操作后 rwlock 模式不会改变,因此挂起的任务不能被唤醒。
*/
- if (rwlock->rwCount > 1) {
+ if (rwlock->rwCount > 1) {//读模式
rwlock->rwCount--;
return LOS_OK;
}
- if (rwlock->rwCount < -1) {
+ if (rwlock->rwCount < -1) {//写模式
rwlock->rwCount++;
return LOS_OK;
}
return OsRwlockPostOp(rwlock, needSched);
}
-
+/// 释放指定读写锁
UINT32 LOS_RwlockUnLock(LosRwlock *rwlock)
{
UINT32 intSave;
@@ -451,9 +492,9 @@ UINT32 LOS_RwlockUnLock(LosRwlock *rwlock)
SCHEDULER_LOCK(intSave);
ret = OsRwlockUnlockUnsafe(rwlock, &needSched);
SCHEDULER_UNLOCK(intSave);
- LOS_MpSchedule(OS_MP_CPU_ALL);
- if (needSched == TRUE) {
- LOS_Schedule();
+ LOS_MpSchedule(OS_MP_CPU_ALL);//设置调度CPU的方式,所有CPU参与调度
+ if (needSched == TRUE) {//是否需要调度
+ LOS_Schedule();//产生调度,切换任务执行
}
return ret;
}
diff --git a/src/kernel_liteos_a/kernel/base/ipc/los_sem.c b/src/kernel_liteos_a/kernel/base/ipc/los_sem.c
index 31d484a1..9f3b43c4 100644
--- a/src/kernel_liteos_a/kernel/base/ipc/los_sem.c
+++ b/src/kernel_liteos_a/kernel/base/ipc/los_sem.c
@@ -1,6 +1,52 @@
+/*!
+ * @file los_sem.c
+ * @brief
+ * @link kernel-mini-basic-ipc-sem-basic http://weharmonyos.com/openharmony/zh-cn/device-dev/kernel/kernel-mini-basic-ipc-sem-basic.html @endlink
+ @verbatim
+ 信号量(Semaphore)是一种实现任务间通信的机制,可以实现任务间同步或共享资源的互斥访问。
+
+ 一个信号量的数据结构中,通常有一个计数值,用于对有效资源数的计数,表示剩下的可被使用的共享资源数,其值的含义分两种情况:
+
+ 0,表示该信号量当前不可获取,因此可能存在正在等待该信号量的任务。
+ 正值,表示该信号量当前可被获取。
+ 以同步为目的的信号量和以互斥为目的的信号量在使用上有如下不同:
+
+ 用作互斥时,初始信号量计数值不为0,表示可用的共享资源个数。在需要使用共享资源前,先获取信号量,
+ 然后使用一个共享资源,使用完毕后释放信号量。这样在共享资源被取完,即信号量计数减至0时,其他需要获取信号量的任务将被阻塞,
+ 从而保证了共享资源的互斥访问。另外,当共享资源数为1时,建议使用二值信号量,一种类似于互斥锁的机制。
+ 用作同步时,初始信号量计数值为0。任务1获取信号量而阻塞,直到任务2或者某中断释放信号量,任务1才得以进入Ready或Running态,从而达到了任务间的同步。
+
+ 信号量运作原理
+ 信号量初始化,为配置的N个信号量申请内存(N值可以由用户自行配置,通过LOSCFG_BASE_IPC_SEM_LIMIT宏实现),并把所有信号量初始化成未使用,
+ 加入到未使用链表中供系统使用。
+
+ 信号量创建,从未使用的信号量链表中获取一个信号量,并设定初值。
+
+ 信号量申请,若其计数器值大于0,则直接减1返回成功。否则任务阻塞,等待其它任务释放该信号量,等待的超时时间可设定。
+ 当任务被一个信号量阻塞时,将该任务挂到信号量等待任务队列的队尾。
+
+ 信号量释放,若没有任务等待该信号量,则直接将计数器加1返回。否则唤醒该信号量等待任务队列上的第一个任务。
+
+ 信号量删除,将正在使用的信号量置为未使用信号量,并挂回到未使用链表。
+
+ 信号量允许多个任务在同一时刻访问共享资源,但会限制同一时刻访问此资源的最大任务数目。当访问资源的任务数达到该资源允许的最大数量时,
+ 会阻塞其他试图获取该资源的任务,直到有任务释放该信号量。
+
+ 开发流程
+ 创建信号量LOS_SemCreate,若要创建二值信号量则调用LOS_BinarySemCreate。
+ 申请信号量LOS_SemPend。
+ 释放信号量LOS_SemPost。
+ 删除信号量LOS_SemDelete。
+ @endverbatim
+ * @image html https://gitee.com/weharmonyos/resources/raw/master/29/sem_run.png
+ * @attention 由于中断不能被阻塞,因此不能在中断中使用阻塞模式申请信号量。
+ * @version
+ * @author weharmonyos.com | 鸿蒙研究站 | 每天死磕一点点
+ * @date 2021-11-18
+ */
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
- * Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -40,17 +86,18 @@
#include "los_percpu_pri.h"
#include "los_hook.h"
+
#ifdef LOSCFG_BASE_IPC_SEM
#if (LOSCFG_BASE_IPC_SEM_LIMIT <= 0)
#error "sem maxnum cannot be zero"
#endif /* LOSCFG_BASE_IPC_SEM_LIMIT <= 0 */
-LITE_OS_SEC_DATA_INIT STATIC LOS_DL_LIST g_unusedSemList;
-LITE_OS_SEC_BSS LosSemCB *g_allSem = NULL;
+LITE_OS_SEC_DATA_INIT STATIC LOS_DL_LIST g_unusedSemList; ///< 可用的信号量列表,干嘛不用freeList? 可以看出这里是另一个人写的代码
+LITE_OS_SEC_BSS LosSemCB *g_allSem = NULL; ///< 信号池,一次分配 LOSCFG_BASE_IPC_SEM_LIMIT 个信号量
/*
- * Description : Initialize the semaphore doubly linked list
+ * Description : Initialize the semaphore doubly linked list | 信号量初始化
* Return : LOS_OK on success, or error code on failure
*/
LITE_OS_SEC_TEXT_INIT UINT32 OsSemInit(VOID)
@@ -58,18 +105,18 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsSemInit(VOID)
LosSemCB *semNode = NULL;
UINT32 index;
- LOS_ListInit(&g_unusedSemList);
+ LOS_ListInit(&g_unusedSemList);//初始化链表,链表上挂未使用的信号量,用于分配信号量,鸿蒙信号量的个数是有限的,默认1024个
/* system resident memory, don't free */
- g_allSem = (LosSemCB *)LOS_MemAlloc(m_aucSysMem0, (LOSCFG_BASE_IPC_SEM_LIMIT * sizeof(LosSemCB)));
+ g_allSem = (LosSemCB *)LOS_MemAlloc(m_aucSysMem0, (LOSCFG_BASE_IPC_SEM_LIMIT * sizeof(LosSemCB)));//分配信号池
if (g_allSem == NULL) {
return LOS_ERRNO_SEM_NO_MEMORY;
}
for (index = 0; index < LOSCFG_BASE_IPC_SEM_LIMIT; index++) {
- semNode = ((LosSemCB *)g_allSem) + index;
- semNode->semID = SET_SEM_ID(0, index);
- semNode->semStat = OS_SEM_UNUSED;
- LOS_ListTailInsert(&g_unusedSemList, &semNode->semList);
+ semNode = ((LosSemCB *)g_allSem) + index;//拿信号控制块, 可以直接g_allSem[index]来嘛
+ semNode->semID = SET_SEM_ID(0, index);//保存ID
+ semNode->semStat = OS_SEM_UNUSED;//标记未使用
+ LOS_ListTailInsert(&g_unusedSemList, &semNode->semList);//通过semList把 信号块挂到空闲链表上
}
if (OsSemDbgInitHook() != LOS_OK) {
@@ -97,45 +144,46 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsSemCreate(UINT16 count, UINT16 maxCount, UINT32 *
return LOS_ERRNO_SEM_PTR_NULL;
}
- if (count > maxCount) {
+ if (count > maxCount) {//信号量不能大于最大值,两参数都是外面给的
OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_OVERFLOW);
}
- SCHEDULER_LOCK(intSave);
+ SCHEDULER_LOCK(intSave);//进入临界区,拿自旋锁
- if (LOS_ListEmpty(&g_unusedSemList)) {
+ if (LOS_ListEmpty(&g_unusedSemList)) {//没有可分配的空闲信号提供
SCHEDULER_UNLOCK(intSave);
OsSemInfoGetFullDataHook();
OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_ALL_BUSY);
}
- unusedSem = LOS_DL_LIST_FIRST(&g_unusedSemList);
- LOS_ListDelete(unusedSem);
+ unusedSem = LOS_DL_LIST_FIRST(&g_unusedSemList);//从未使用信号量池中取首个
+ LOS_ListDelete(unusedSem);//从空闲链表上摘除
SCHEDULER_UNLOCK(intSave);
- semCreated = GET_SEM_LIST(unusedSem);
- semCreated->semCount = count;
- semCreated->semStat = OS_SEM_USED;
- semCreated->maxSemCount = maxCount;
- LOS_ListInit(&semCreated->semList);
- *semHandle = semCreated->semID;
+ semCreated = GET_SEM_LIST(unusedSem);//通过semList挂到链表上的,这里也要通过它把LosSemCB头查到. 进程,线程等结构体也都是这么干的.
+ semCreated->semCount = count;//设置数量
+ semCreated->semStat = OS_SEM_USED;//设置可用状态
+ semCreated->maxSemCount = maxCount;//设置最大信号数量
+ LOS_ListInit(&semCreated->semList);//初始化链表,后续阻塞任务通过task->pendList挂到semList链表上,就知道哪些任务在等它了.
+ *semHandle = semCreated->semID;//参数带走 semID
OsHookCall(LOS_HOOK_TYPE_SEM_CREATE, semCreated);
OsSemDbgUpdateHook(semCreated->semID, OsCurrTaskGet()->taskEntry, count);
+
return LOS_OK;
ERR_HANDLER:
OS_RETURN_ERROR_P2(errLine, errNo);
}
-
+///对外接口 创建信号量
LITE_OS_SEC_TEXT_INIT UINT32 LOS_SemCreate(UINT16 count, UINT32 *semHandle)
{
return OsSemCreate(count, OS_SEM_COUNT_MAX, semHandle);
}
-
+///对外接口 创建二值信号量,其计数值最大为1,可以当互斥锁用
LITE_OS_SEC_TEXT_INIT UINT32 LOS_BinarySemCreate(UINT16 count, UINT32 *semHandle)
{
return OsSemCreate(count, OS_SEM_BINARY_COUNT_MAX, semHandle);
}
-
+///对外接口 删除指定的信号量,参数就是 semID
LITE_OS_SEC_TEXT_INIT UINT32 LOS_SemDelete(UINT32 semHandle)
{
UINT32 intSave;
@@ -147,23 +195,23 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_SemDelete(UINT32 semHandle)
OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_INVALID);
}
- semDeleted = GET_SEM(semHandle);
+ semDeleted = GET_SEM(semHandle);//通过ID拿到信号量实体
SCHEDULER_LOCK(intSave);
- if ((semDeleted->semStat == OS_SEM_UNUSED) || (semDeleted->semID != semHandle)) {
+ if ((semDeleted->semStat == OS_SEM_UNUSED) || (semDeleted->semID != semHandle)) {//参数判断
SCHEDULER_UNLOCK(intSave);
OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_INVALID);
}
- if (!LOS_ListEmpty(&semDeleted->semList)) {
+ if (!LOS_ListEmpty(&semDeleted->semList)) {//当前还有任务挂在这个信号上面,当然不能删除
SCHEDULER_UNLOCK(intSave);
- OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_PENDED);
+ OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_PENDED);//这个宏很有意思,里面goto到ERR_HANDLER
}
- LOS_ListTailInsert(&g_unusedSemList, &semDeleted->semList);
- semDeleted->semStat = OS_SEM_UNUSED;
- semDeleted->semID = SET_SEM_ID(GET_SEM_COUNT(semDeleted->semID) + 1, GET_SEM_INDEX(semDeleted->semID));
+ LOS_ListTailInsert(&g_unusedSemList, &semDeleted->semList);//通过semList从尾部插入空闲链表
+ semDeleted->semStat = OS_SEM_UNUSED;//状态变成了未使用
+ semDeleted->semID = SET_SEM_ID(GET_SEM_COUNT(semDeleted->semID) + 1, GET_SEM_INDEX(semDeleted->semID));//设置ID
OsHookCall(LOS_HOOK_TYPE_SEM_DELETE, semDeleted);
OsSemDbgUpdateHook(semDeleted->semID, NULL, 0);
@@ -174,11 +222,11 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_SemDelete(UINT32 semHandle)
ERR_HANDLER:
OS_RETURN_ERROR_P2(errLine, errNo);
}
-
+///对外接口 申请指定的信号量,并设置超时时间
LITE_OS_SEC_TEXT UINT32 LOS_SemPend(UINT32 semHandle, UINT32 timeout)
{
UINT32 intSave;
- LosSemCB *semPended = GET_SEM(semHandle);
+ LosSemCB *semPended = GET_SEM(semHandle);//通过ID拿到信号体
UINT32 retErr = LOS_OK;
LosTaskCB *runTask = NULL;
@@ -192,7 +240,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_SemPend(UINT32 semHandle, UINT32 timeout)
return LOS_ERRNO_SEM_PEND_INTERR;
}
- runTask = OsCurrTaskGet();
+ runTask = OsCurrTaskGet();//获取当前任务
if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {
OsBackTrace();
return LOS_ERRNO_SEM_PEND_IN_SYSTEM_TASK;
@@ -204,19 +252,20 @@ LITE_OS_SEC_TEXT UINT32 LOS_SemPend(UINT32 semHandle, UINT32 timeout)
retErr = LOS_ERRNO_SEM_INVALID;
goto OUT;
}
+
/* Update the operate time, no matter the actual Pend success or not */
OsSemDbgTimeUpdateHook(semHandle);
- if (semPended->semCount > 0) {
- semPended->semCount--;
+ if (semPended->semCount > 0) {//还有资源可用,返回肯定得成功,semCount=0时代表没资源了,task会必须去睡眠了
+ semPended->semCount--;//资源少了一个
OsHookCall(LOS_HOOK_TYPE_SEM_PEND, semPended, runTask, timeout);
- goto OUT;
+ goto OUT;//注意这里 retErr = LOS_OK ,所以返回是OK的
} else if (!timeout) {
retErr = LOS_ERRNO_SEM_UNAVAILABLE;
goto OUT;
}
- if (!OsPreemptableInSched()) {
+ if (!OsPreemptableInSched()) {//不能申请调度 (不能调度的原因是因为没有持有调度任务自旋锁)
PRINT_ERR("!!!LOS_ERRNO_SEM_PEND_IN_LOCK!!!\n");
OsBackTrace();
retErr = LOS_ERRNO_SEM_PEND_IN_LOCK;
@@ -226,7 +275,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_SemPend(UINT32 semHandle, UINT32 timeout)
OsHookCall(LOS_HOOK_TYPE_SEM_PEND, semPended, runTask, timeout);
OsTaskWaitSetPendMask(OS_TASK_WAIT_SEM, semPended->semID, timeout);
retErr = runTask->ops->wait(runTask, &semPended->semList, timeout);
- if (retErr == LOS_ERRNO_TSK_TIMEOUT) {
+ if (retErr == LOS_ERRNO_TSK_TIMEOUT) {//注意:这里是涉及到task切换的,把自己挂起,唤醒其他task
retErr = LOS_ERRNO_SEM_TIMEOUT;
}
@@ -234,7 +283,7 @@ OUT:
SCHEDULER_UNLOCK(intSave);
return retErr;
}
-
+///以不安全的方式释放指定的信号量,所谓不安全指的是不用自旋锁
LITE_OS_SEC_TEXT UINT32 OsSemPostUnsafe(UINT32 semHandle, BOOL *needSched)
{
LosTaskCB *resumedTask = NULL;
@@ -246,23 +295,23 @@ LITE_OS_SEC_TEXT UINT32 OsSemPostUnsafe(UINT32 semHandle, BOOL *needSched)
/* Update the operate time, no matter the actual Post success or not */
OsSemDbgTimeUpdateHook(semHandle);
- if (semPosted->semCount == OS_SEM_COUNT_MAX) {
+ if (semPosted->semCount == OS_SEM_COUNT_MAX) {//当前信号资源不能大于最大资源量
return LOS_ERRNO_SEM_OVERFLOW;
}
- if (!LOS_ListEmpty(&semPosted->semList)) {
- resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(semPosted->semList)));
+ if (!LOS_ListEmpty(&semPosted->semList)) {//当前有任务挂在semList上,要去唤醒任务
+ resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(semPosted->semList)));//semList上面挂的都是task->pendlist节点,取第一个task下来唤醒
OsTaskWakeClearPendMask(resumedTask);
resumedTask->ops->wake(resumedTask);
- if (needSched != NULL) {
- *needSched = TRUE;
+ if (needSched != NULL) {//参数不为空,就返回需要调度的标签
+ *needSched = TRUE;//TRUE代表需要调度
}
- } else {
- semPosted->semCount++;
+ } else {//当前没有任务挂在semList上,
+ semPosted->semCount++;//信号资源多一个
}
OsHookCall(LOS_HOOK_TYPE_SEM_POST, semPosted, resumedTask);
return LOS_OK;
}
-
+///对外接口 释放指定的信号量
LITE_OS_SEC_TEXT UINT32 LOS_SemPost(UINT32 semHandle)
{
UINT32 intSave;
@@ -272,16 +321,15 @@ LITE_OS_SEC_TEXT UINT32 LOS_SemPost(UINT32 semHandle)
if (GET_SEM_INDEX(semHandle) >= LOSCFG_BASE_IPC_SEM_LIMIT) {
return LOS_ERRNO_SEM_INVALID;
}
-
SCHEDULER_LOCK(intSave);
ret = OsSemPostUnsafe(semHandle, &needSched);
SCHEDULER_UNLOCK(intSave);
- if (needSched) {
- LOS_MpSchedule(OS_MP_CPU_ALL);
- LOS_Schedule();
+ if (needSched) {//需要调度的情况
+ LOS_MpSchedule(OS_MP_CPU_ALL);//向所有CPU发送调度指令
+ LOS_Schedule();////发起调度
}
return ret;
}
-#endif /* LOSCFG_BASE_IPC_SEM */
+#endif /* (LOSCFG_BASE_IPC_SEM == YES) */
diff --git a/src/kernel_liteos_a/kernel/base/ipc/los_sem_debug.c b/src/kernel_liteos_a/kernel/base/ipc/los_sem_debug.c
index 1ed42515..284a800d 100644
--- a/src/kernel_liteos_a/kernel/base/ipc/los_sem_debug.c
+++ b/src/kernel_liteos_a/kernel/base/ipc/los_sem_debug.c
@@ -78,11 +78,11 @@ STATIC VOID OsSemPendedTaskNamePrint(LosSemCB *semNode)
#ifdef LOSCFG_DEBUG_SEMAPHORE
typedef struct {
- UINT16 origSemCount; /* Number of original available semaphores */
- UINT64 lastAccessTime; /* The last operation time */
- TSK_ENTRY_FUNC creator; /* The task entry who created this sem */
+ UINT16 origSemCount; /* Number of orignal available semaphores *///原始可用信号量数
+ UINT64 lastAccessTime; /* The last operation time */ //最后操作时间
+ TSK_ENTRY_FUNC creator; /* The task entry who created this sem */ //由哪个task的入口函数创建了这个任务
} SemDebugCB;
-STATIC SemDebugCB *g_semDebugArray = NULL;
+STATIC SemDebugCB *g_semDebugArray = NULL;//默认1024个SemDebugCB debug信号量池
STATIC BOOL SemCompareValue(const IpcSortParam *sortParam, UINT32 left, UINT32 right)
{
@@ -102,23 +102,23 @@ UINT32 OsSemDbgInit(VOID)
(VOID)memset_s(g_semDebugArray, size, 0, size);
return LOS_OK;
}
-
+///更新最后访问时间
VOID OsSemDbgTimeUpdate(UINT32 semID)
{
SemDebugCB *semDebug = &g_semDebugArray[GET_SEM_INDEX(semID)];
- semDebug->lastAccessTime = LOS_TickCountGet();
+ semDebug->lastAccessTime = LOS_TickCountGet();//获取tick总数
return;
}
-
+///更新信号量
VOID OsSemDbgUpdate(UINT32 semID, TSK_ENTRY_FUNC creator, UINT16 count)
{
SemDebugCB *semDebug = &g_semDebugArray[GET_SEM_INDEX(semID)];
- semDebug->creator = creator;
- semDebug->lastAccessTime = LOS_TickCountGet();
- semDebug->origSemCount = count;
+ semDebug->creator = creator; //改为由参数入口函数创建了这个任务
+ semDebug->lastAccessTime = LOS_TickCountGet();//获取tick总数
+ semDebug->origSemCount = count;//原始信号量改变
return;
}
-
+///按信号量访问时间排序
STATIC VOID OsSemSort(UINT32 *semIndexArray, UINT32 usedCount)
{
UINT32 i, intSave;
@@ -296,6 +296,6 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdSemInfoGet(UINT32 argc, const CHAR **arg
return ret;
}
-SHELLCMD_ENTRY(sem_shellcmd, CMD_TYPE_EX, "sem", 1, (CmdCallBackFunc)OsShellCmdSemInfoGet);
+SHELLCMD_ENTRY(sem_shellcmd, CMD_TYPE_EX, "sem", 1, (CmdCallBackFunc)OsShellCmdSemInfoGet);//采用shell命令静态注册方式
#endif
diff --git a/src/kernel_liteos_a/kernel/base/ipc/los_signal.c b/src/kernel_liteos_a/kernel/base/ipc/los_signal.c
index bb7bd2f7..3a130e56 100644
--- a/src/kernel_liteos_a/kernel/base/ipc/los_signal.c
+++ b/src/kernel_liteos_a/kernel/base/ipc/los_signal.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
- * Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -53,15 +53,22 @@ int raise(int sig)
#define GETUNMASKSET(procmask, pendFlag) ((~(procmask)) & (sigset_t)(pendFlag))
#define UINT64_BIT_SIZE 64
+/**
+ * @brief 判定信号signo是否存在信号集中。如果信号集里已有该信号则返回1,否则返回0。如果有错误则返回-1
+ * @param set
+ * @param signo
+ * @return int
+ */
int OsSigIsMember(const sigset_t *set, int signo)
{
int ret = LOS_NOK;
- /* In musl, sig No bits 00000100 present sig No 3, but 1<< 3 = 00001000, so signo needs minus 1 */
+ /* In musl, sig No bits 00000100 present sig No 3, but 1<< 3 = 00001000, so signo needs minus 1 */
+ //在musl中,sig No bits 00000100表示sig No 3,但是在SIGNO2SET中 1<<3 = 00001000,因此signo需要减1
signo -= 1;
/* Verify the signal */
- if (GOOD_SIGNO(signo)) {
+ if (GOOD_SIGNO(signo)) {//有效信号判断
/* Check if the signal is in the set */
- ret = ((*set & SIGNO2SET((unsigned int)signo)) != 0);
+ ret = ((*set & SIGNO2SET((unsigned int)signo)) != 0);//检查信号是否还在集合中
}
return ret;
@@ -120,7 +127,6 @@ VOID OsClearSigInfoTmpList(sig_cb *sigcb)
(VOID)LOS_MemFree(m_aucSysMem0, tmpInfoNode);
}
}
-
STATIC INLINE VOID OsSigWaitTaskWake(LosTaskCB *taskCB, INT32 signo)
{
sig_cb *sigcb = &taskCB->sig;
@@ -132,14 +138,14 @@ STATIC INLINE VOID OsSigWaitTaskWake(LosTaskCB *taskCB, INT32 signo)
OsSigEmptySet(&sigcb->sigwaitmask);
}
}
-
+///< 唤醒被挂起的处于等待指定信号的任务
STATIC UINT32 OsPendingTaskWake(LosTaskCB *taskCB, INT32 signo)
{
if (!OsTaskIsPending(taskCB) || !OsProcessIsUserMode(OS_PCB_FROM_TCB(taskCB))) {
return 0;
}
- if ((signo != SIGKILL) && (taskCB->waitFlag != OS_TASK_WAIT_SIGNAL)) {
+ if ((signo != SIGKILL) && (taskCB->waitFlag != OS_TASK_WAIT_SIGNAL)) { // @note_thinking 这个判断会不会有问题 ?
return 0;
}
@@ -153,16 +159,16 @@ STATIC UINT32 OsPendingTaskWake(LosTaskCB *taskCB, INT32 signo)
OsTaskWakeClearPendMask(taskCB);
taskCB->ops->wake(taskCB);
break;
- case OS_TASK_WAIT_SIGNAL:
+ case OS_TASK_WAIT_SIGNAL://等待普通信号
OsSigWaitTaskWake(taskCB, signo);
break;
- case OS_TASK_WAIT_LITEIPC:
- OsTaskWakeClearPendMask(taskCB);
+ case OS_TASK_WAIT_LITEIPC://等待liteipc信号
+ OsTaskWakeClearPendMask(taskCB);//重置任务的等待信息
taskCB->ops->wake(taskCB);
break;
- case OS_TASK_WAIT_FUTEX:
- OsFutexNodeDeleteFromFutexHash(&taskCB->futex, TRUE, NULL, NULL);
- OsTaskWakeClearPendMask(taskCB);
+ case OS_TASK_WAIT_FUTEX://等待快锁信号
+ OsFutexNodeDeleteFromFutexHash(&taskCB->futex, TRUE, NULL, NULL);//从哈希桶中删除快锁
+ OsTaskWakeClearPendMask(taskCB);//重置任务的等待信息
taskCB->ops->wake(taskCB);
break;
default:
@@ -171,7 +177,7 @@ STATIC UINT32 OsPendingTaskWake(LosTaskCB *taskCB, INT32 signo)
return 0;
}
-
+///给任务(线程)发送一个信号
int OsTcbDispatch(LosTaskCB *stcb, siginfo_t *info)
{
bool masked = FALSE;
@@ -179,19 +185,19 @@ int OsTcbDispatch(LosTaskCB *stcb, siginfo_t *info)
OS_RETURN_IF_NULL(sigcb);
/* If signo is 0, not send signal, just check process or pthread exist */
- if (info->si_signo == 0) {
+ if (info->si_signo == 0) {//如果信号为0,则不发送信号,只是作为检查进程和线程是否还存在.
return 0;
}
- masked = (bool)OsSigIsMember(&sigcb->sigprocmask, info->si_signo);
- if (masked) {
- /* If signal is in wait list and mask list, need unblock it */
+ masked = (bool)OsSigIsMember(&sigcb->sigprocmask, info->si_signo);//@note_thinking 这里还有 masked= -1的情况要处理!!!
+ if (masked) {//如果信号被屏蔽了,要看等待信号集,sigwaitmask
+ /* If signal is in wait list and mask list, need unblock it */ //如果信号在等待列表和掩码列表中,需要解除阻止
if (LOS_ListEmpty(&sigcb->waitList) ||
- (!LOS_ListEmpty(&sigcb->waitList) && !OsSigIsMember(&sigcb->sigwaitmask, info->si_signo))) {
- OsSigAddSet(&sigcb->sigPendFlag, info->si_signo);
+ (!LOS_ListEmpty(&sigcb->waitList) && !OsSigIsMember(&sigcb->sigwaitmask, info->si_signo))) {
+ OsSigAddSet(&sigcb->sigPendFlag, info->si_signo);//将信号加入挂起/待办集
}
- } else {
+ } else {//信号没有被屏蔽的处理
/* unmasked signal actions */
- OsSigAddSet(&sigcb->sigFlag, info->si_signo);
+ OsSigAddSet(&sigcb->sigFlag, info->si_signo);//不屏蔽的信号集
}
if (OsAddSigInfoToTmpList(sigcb, info) == LOS_NOK) {
@@ -206,14 +212,23 @@ void OsSigMaskSwitch(LosTaskCB * const rtcb, sigset_t set)
sigset_t unmaskset;
rtcb->sig.sigprocmask = set;
- unmaskset = GETUNMASKSET(rtcb->sig.sigprocmask, rtcb->sig.sigPendFlag);
+ unmaskset = GETUNMASKSET(rtcb->sig.sigprocmask, rtcb->sig.sigPendFlag);//过滤出没有被屏蔽的信号集
if (unmaskset != NULL_SIGNAL_SET) {
/* pendlist do */
- rtcb->sig.sigFlag |= unmaskset;
- rtcb->sig.sigPendFlag ^= unmaskset;
+ rtcb->sig.sigFlag |= unmaskset; //加入不屏蔽信号集
+ rtcb->sig.sigPendFlag ^= unmaskset;//从挂起/待办集中去掉unmaskset
}
}
+/**
+ * @brief
+ * @verbatim
+ 向信号集设置信号屏蔽的方法
+ SIG_BLOCK:将set指向信号集中的信号,添加到进程阻塞信号集;
+ SIG_UNBLOCK:将set指向信号集中的信号,从进程阻塞信号集删除;
+ SIG_SETMASK:将set指向信号集中的信号,设置成进程阻塞信号集;
+ * @endverbatim
+ */
int OsSigprocMask(int how, const sigset_t_l *setl, sigset_t_l *oldsetl)
{
LosTaskCB *spcb = NULL;
@@ -223,11 +238,11 @@ int OsSigprocMask(int how, const sigset_t_l *setl, sigset_t_l *oldsetl)
SCHEDULER_LOCK(intSave);
spcb = OsCurrTaskGet();
- /* If requested, copy the old mask to user. */
+ /* If requested, copy the old mask to user. | 如果需要,请将旧掩码复制给用户*/
if (oldsetl != NULL) {
*(sigset_t *)oldsetl = spcb->sig.sigprocmask;
}
- /* If requested, modify the current signal mask. */
+ /* If requested, modify the current signal mask. | 如有要求,修改当前信号屏蔽*/
if (setl != NULL) {
set = *(sigset_t *)setl;
/* Okay, determine what we are supposed to do */
@@ -236,46 +251,46 @@ int OsSigprocMask(int how, const sigset_t_l *setl, sigset_t_l *oldsetl)
* set pointed to by set as the new sigprocmask.
*/
case SIG_BLOCK:
- spcb->sig.sigprocmask |= set;
+ spcb->sig.sigprocmask |= set;//增加信号屏蔽位
break;
/* Set the intersection of the current set and the
* signal set pointed to by set as the new sigprocmask.
*/
case SIG_UNBLOCK:
- spcb->sig.sigprocmask &= ~(set);
+ spcb->sig.sigprocmask &= ~(set);//解除信号屏蔽位
break;
/* Set the signal set pointed to by set as the new sigprocmask. */
case SIG_SETMASK:
- spcb->sig.sigprocmask = set;
+ spcb->sig.sigprocmask = set;//设置一个新的屏蔽掩码
break;
default:
ret = -EINVAL;
break;
}
/* If pending mask not in sigmask, need set sigflag. */
- OsSigMaskSwitch(spcb, spcb->sig.sigprocmask);
+ OsSigMaskSwitch(spcb, spcb->sig.sigprocmask);//更新与屏蔽信号相关的变量
}
SCHEDULER_UNLOCK(intSave);
return ret;
}
-
+///让进程的每一个task执行参数函数
int OsSigProcessForeachChild(LosProcessCB *spcb, ForEachTaskCB handler, void *arg)
{
int ret;
/* Visit the main thread last (if present) */
- LosTaskCB *taskCB = NULL;
- LOS_DL_LIST_FOR_EACH_ENTRY(taskCB, &(spcb->threadSiblingList), LosTaskCB, threadList) {
- ret = handler(taskCB, arg);
- OS_RETURN_IF(ret != 0, ret);
+ LosTaskCB *taskCB = NULL;//遍历进程的 threadList 链表,里面存放的都是task节点
+ LOS_DL_LIST_FOR_EACH_ENTRY(taskCB, &(spcb->threadSiblingList), LosTaskCB, threadList) {//遍历进程的任务列表
+ ret = handler(taskCB, arg);//回调参数函数
+ OS_RETURN_IF(ret != 0, ret);//这个宏的意思就是只有ret = 0时,啥也不处理.其余就返回 ret
}
return LOS_OK;
}
-
+///信号处理函数,这里就是上面的 handler = SigProcessSignalHandler,见于 OsSigProcessSend
static int SigProcessSignalHandler(LosTaskCB *tcb, void *arg)
{
- struct ProcessSignalInfo *info = (struct ProcessSignalInfo *)arg;
+ struct ProcessSignalInfo *info = (struct ProcessSignalInfo *)arg;//先把参数解出来
int ret;
int isMember;
@@ -283,128 +298,130 @@ static int SigProcessSignalHandler(LosTaskCB *tcb, void *arg)
return 0;
}
- /* If the default tcb is not set, then set this one as default. */
- if (!info->defaultTcb) {
+ /* If the default tcb is not setted, then set this one as default. */
+ if (!info->defaultTcb) {//如果没有默认发送方的任务,即默认参数任务.
info->defaultTcb = tcb;
}
- isMember = OsSigIsMember(&tcb->sig.sigwaitmask, info->sigInfo->si_signo);
- if (isMember && (!info->awakenedTcb)) {
+ isMember = OsSigIsMember(&tcb->sig.sigwaitmask, info->sigInfo->si_signo);//任务是否在等待这个信号
+ if (isMember && (!info->awakenedTcb)) {//是在等待,并尚未向该任务时发送信号时
/* This means the task is waiting for this signal. Stop looking for it and use this tcb.
* The requirement is: if more than one task in this task group is waiting for the signal,
* then only one indeterminate task in the group will receive the signal.
*/
- ret = OsTcbDispatch(tcb, info->sigInfo);
- OS_RETURN_IF(ret < 0, ret);
+ ret = OsTcbDispatch(tcb, info->sigInfo);//发送信号,注意这是给其他任务发送信号,tcb不是当前任务
+ OS_RETURN_IF(ret < 0, ret);//这种写法很有意思
/* set this tcb as awakenedTcb */
info->awakenedTcb = tcb;
OS_RETURN_IF(info->receivedTcb != NULL, SIG_STOP_VISIT); /* Stop search */
}
/* Is this signal unblocked on this thread? */
- isMember = OsSigIsMember(&tcb->sig.sigprocmask, info->sigInfo->si_signo);
- if ((!isMember) && (!info->receivedTcb) && (tcb != info->awakenedTcb)) {
- /* if unblockedTcb of this signal is not set, then set it. */
+ isMember = OsSigIsMember(&tcb->sig.sigprocmask, info->sigInfo->si_signo);//任务是否屏蔽了这个信号
+ if ((!isMember) && (!info->receivedTcb) && (tcb != info->awakenedTcb)) {//没有屏蔽,有唤醒任务没有接收任务.
+ /* if unblockedTcb of this signal is not setted, then set it. */
if (!info->unblockedTcb) {
info->unblockedTcb = tcb;
}
- ret = OsTcbDispatch(tcb, info->sigInfo);
+ ret = OsTcbDispatch(tcb, info->sigInfo);//向任务发送信号
OS_RETURN_IF(ret < 0, ret);
/* set this tcb as receivedTcb */
- info->receivedTcb = tcb;
+ info->receivedTcb = tcb;//设置这个任务为接收任务
OS_RETURN_IF(info->awakenedTcb != NULL, SIG_STOP_VISIT); /* Stop search */
}
return 0; /* Keep searching */
}
-
+///进程收到 SIGKILL 信号后,通知任务tcb处理.
static int SigProcessKillSigHandler(LosTaskCB *tcb, void *arg)
{
- struct ProcessSignalInfo *info = (struct ProcessSignalInfo *)arg;
+ struct ProcessSignalInfo *info = (struct ProcessSignalInfo *)arg;//转参
return OsPendingTaskWake(tcb, info->sigInfo->si_signo);
}
+//处理信号发送
static void SigProcessLoadTcb(struct ProcessSignalInfo *info, siginfo_t *sigInfo)
{
LosTaskCB *tcb = NULL;
- if (info->awakenedTcb == NULL && info->receivedTcb == NULL) {
- if (info->unblockedTcb) {
- tcb = info->unblockedTcb;
- } else if (info->defaultTcb) {
+ if (info->awakenedTcb == NULL && info->receivedTcb == NULL) {//信号即没有指定接收task 也没有指定被唤醒task
+ if (info->unblockedTcb) {//如果进程信号信息体中有阻塞task
+ tcb = info->unblockedTcb;//
+ } else if (info->defaultTcb) {//如果有默认的发送方task
tcb = info->defaultTcb;
} else {
return;
}
/* Deliver the signal to the selected task */
- (void)OsTcbDispatch(tcb, sigInfo);
+ (void)OsTcbDispatch(tcb, sigInfo);//向所选任务发送信号
}
}
-
+///给参数进程发送参数信号
int OsSigProcessSend(LosProcessCB *spcb, siginfo_t *sigInfo)
{
int ret;
struct ProcessSignalInfo info = {
- .sigInfo = sigInfo,
- .defaultTcb = NULL,
+ .sigInfo = sigInfo, //信号内容
+ .defaultTcb = NULL, //以下四个值将在OsSigProcessForeachChild中根据条件完善
.unblockedTcb = NULL,
.awakenedTcb = NULL,
.receivedTcb = NULL
};
-
- if (info.sigInfo == NULL) {
+ //总之是要从进程中找个至少一个任务来接受这个信号,优先级
+ //awakenedTcb > receivedTcb > unblockedTcb > defaultTcb
+ if (info.sigInfo == NULL){
return -EFAULT;
}
-
- /* visit all taskcb and dispatch signal */
- if (info.sigInfo->si_signo == SIGKILL) {
- OsSigAddSet(&spcb->sigShare, info.sigInfo->si_signo);
+ /* visit all taskcb and dispatch signal */ //访问所有任务和分发信号
+ if (info.sigInfo->si_signo == SIGKILL) {//需要干掉进程时 SIGKILL = 9, #linux kill 9 14
+ OsSigAddSet(&spcb->sigShare, info.sigInfo->si_signo);//信号集中增加信号
(void)OsSigProcessForeachChild(spcb, SigProcessKillSigHandler, &info);
return 0;
} else {
- ret = OsSigProcessForeachChild(spcb, SigProcessSignalHandler, &info);
+ ret = OsSigProcessForeachChild(spcb, SigProcessSignalHandler, &info);//进程通知所有task处理信号
}
if (ret < 0) {
return ret;
}
- SigProcessLoadTcb(&info, sigInfo);
+ SigProcessLoadTcb(&info, sigInfo);//确保能给一个任务发送信号
return 0;
}
-
+///信号集全部清0
int OsSigEmptySet(sigset_t *set)
{
*set = NULL_SIGNAL_SET;
return 0;
}
-/* Privilege process can't send to kernel and privilege process */
+/* Privilege process can't send to kernel and privilege process */ //内核进程组和用户特权进程组无法发送
static int OsSignalPermissionToCheck(const LosProcessCB *spcb)
{
UINTPTR gid = (UINTPTR)OS_GET_PGROUP_LEADER(spcb->pgroup);
- if (gid == OS_KERNEL_PROCESS_GROUP) {
+
+ if (gid == OS_KERNEL_PROCESS_GROUP) {//内核进程组
return -EPERM;
- } else if (gid == OS_USER_PRIVILEGE_PROCESS_GROUP) {
+ } else if (gid == OS_USER_PRIVILEGE_PROCESS_GROUP) {//用户特权进程组
return -EPERM;
}
return 0;
}
-
+///信号分发,发送信号权限/进程组过滤.
STATIC int SendSigPermissionCheck(LosProcessCB *spcb, int permission)
{
if (spcb == NULL) {
return -ESRCH;
}
- if (OsProcessIsUnused(spcb)) {
+ if (OsProcessIsUnused(spcb)) {//进程是否还在使用,不一定是当前进程但必须是个有效进程
return -ESRCH;
}
-#ifdef LOSCFG_SECURITY_CAPABILITY
- LosProcessCB *current = OsCurrProcessGet();
- /* Kernel process always has kill permission and user process should check permission */
- if (OsProcessIsUserMode(current) && !(current->processStatus & OS_PROCESS_FLAG_EXIT)) {
+#ifdef LOSCFG_SECURITY_CAPABILITY //启用能力安全模式
+ LosProcessCB *current = OsCurrProcessGet();//获取当前进程,检查当前进程是否有发送信号的权限.
+ /* Kernel process always has kill permission and user process should check permission *///内核进程总是有kill权限,用户进程需要检查权限
+ if (OsProcessIsUserMode(current) && !(current->processStatus & OS_PROCESS_FLAG_EXIT)) {//用户进程检查能力范围
if ((current != spcb) && (!IsCapPermit(CAP_KILL)) && (current->user->userID != spcb->user->userID)) {
return -EPERM;
}
@@ -437,7 +454,7 @@ int OsSendSigToProcess(LosProcessCB *spcb, int sig, int permission)
info.si_code = SI_USER;
info.si_value.sival_ptr = NULL;
- return OsSigProcessSend(spcb, &info);
+ return OsSigProcessSend(spcb, &info);//给参数进程发送信号
}
int OsDispatch(pid_t pid, siginfo_t *info, int permission)
@@ -459,7 +476,14 @@ int OsDispatch(pid_t pid, siginfo_t *info, int permission)
return OsSigProcessSend(spcb, info);
}
-
+/**
+ * @brief
+ * @verbatim
+ 用于向进程或进程组发送信号
+ shell命令 kill 14 7(kill -14 7效果相同)
+ 发送信号14(SIGALRM默认行为为进程终止)给7号进程
+ * @endverbatim
+ */
int OsKill(pid_t pid, int sig, int permission)
{
siginfo_t info;
@@ -470,14 +494,14 @@ int OsKill(pid_t pid, int sig, int permission)
return -EINVAL;
}
- /* Create the siginfo structure */
- info.si_signo = sig;
- info.si_code = SI_USER;
+ /* Create the siginfo structure */ //创建信号结构体
+ info.si_signo = sig; //信号编号
+ info.si_code = SI_USER; //来自用户进程信号
info.si_value.sival_ptr = NULL;
if (pid > 0) {
/* Send the signal to the specify process */
- ret = OsDispatch(pid, &info, permission);
+ ret = OsDispatch(pid, &info, permission);//发送信号
} else if (pid == -1) {
/* Send SIG to all processes */
ret = OsSendSignalToAllProcess(&info, permission);
@@ -489,18 +513,17 @@ int OsKill(pid_t pid, int sig, int permission)
}
return ret;
}
-
+///给发送信号过程加锁
int OsKillLock(pid_t pid, int sig)
{
int ret;
unsigned int intSave;
SCHEDULER_LOCK(intSave);
- ret = OsKill(pid, sig, OS_USER_KILL_PERMISSION);
+ ret = OsKill(pid, sig, OS_USER_KILL_PERMISSION);//用户权限向进程发送信号
SCHEDULER_UNLOCK(intSave);
return ret;
}
-
INT32 OsTaskKillUnsafe(UINT32 taskID, INT32 signo)
{
siginfo_t info;
@@ -519,7 +542,7 @@ INT32 OsTaskKillUnsafe(UINT32 taskID, INT32 signo)
* dispatch rules. */
return OsTcbDispatch(taskCB, &info);
}
-
+///发送信号
int OsPthreadKill(UINT32 tid, int signo)
{
int ret;
@@ -537,7 +560,7 @@ int OsPthreadKill(UINT32 tid, int signo)
SCHEDULER_UNLOCK(intSave);
return ret;
}
-
+///向信号集中加入signo信号
int OsSigAddSet(sigset_t *set, int signo)
{
/* Verify the signal */
@@ -545,13 +568,13 @@ int OsSigAddSet(sigset_t *set, int signo)
return -EINVAL;
} else {
/* In musl, sig No bits 00000100 present sig No 3, but 1<< 3 = 00001000, so signo needs minus 1 */
- signo -= 1;
+ signo -= 1;// 信号范围是 [1 ~ 64 ],而保存变量位的范围是[0 ~ 63]
/* Add the signal to the set */
- *set |= SIGNO2SET((unsigned int)signo);
+ *set |= SIGNO2SET((unsigned int)signo);//填充信号集
return LOS_OK;
}
}
-
+///获取阻塞当前任务的信号集
int OsSigPending(sigset_t *set)
{
LosTaskCB *tcb = NULL;
@@ -563,7 +586,7 @@ int OsSigPending(sigset_t *set)
SCHEDULER_LOCK(intSave);
tcb = OsCurrTaskGet();
- *set = tcb->sig.sigPendFlag;
+ *set = tcb->sig.sigPendFlag;//被阻塞的信号集
SCHEDULER_UNLOCK(intSave);
return LOS_OK;
}
@@ -578,7 +601,7 @@ STATIC int FindFirstSetedBit(UINT64 n)
for (count = 0; (count < UINT64_BIT_SIZE) && (n ^ 1ULL); n >>= 1, count++) {}
return (count < UINT64_BIT_SIZE) ? count : (-1);
}
-
+///等待信号时间
int OsSigTimedWaitNoLock(sigset_t *set, siginfo_t *info, unsigned int timeout)
{
LosTaskCB *task = NULL;
@@ -589,19 +612,19 @@ int OsSigTimedWaitNoLock(sigset_t *set, siginfo_t *info, unsigned int timeout)
sigcb = &task->sig;
if (sigcb->waitList.pstNext == NULL) {
- LOS_ListInit(&sigcb->waitList);
+ LOS_ListInit(&sigcb->waitList);//初始化信号等待链表
}
- /* If pendingflag & set > 0, should clear pending flag */
+ /* If pendingflag & set > 0, shound clear pending flag */
sigset_t clear = sigcb->sigPendFlag & *set;
if (clear) {
sigcb->sigPendFlag ^= clear;
ret = FindFirstSetedBit((UINT64)clear) + 1;
OsMoveTmpInfoToUnbInfo(sigcb, ret);
} else {
- OsSigAddSet(set, SIGKILL);
- OsSigAddSet(set, SIGSTOP);
+ OsSigAddSet(set, SIGKILL);//kill 9 14 必须要处理
+ OsSigAddSet(set, SIGSTOP);//终止进程的信号也必须处理
- sigcb->sigwaitmask |= *set;
+ sigcb->sigwaitmask |= *set;//按位加到等待集上,也就是说sigwaitmask的信号来了都是要处理的.
OsTaskWaitSetPendMask(OS_TASK_WAIT_SIGNAL, sigcb->sigwaitmask, timeout);
ret = task->ops->wait(task, &sigcb->waitList, timeout);
if (ret == LOS_ERRNO_TSK_TIMEOUT) {
@@ -614,7 +637,7 @@ int OsSigTimedWaitNoLock(sigset_t *set, siginfo_t *info, unsigned int timeout)
}
return ret;
}
-
+///让当前任务等待的信号
int OsSigTimedWait(sigset_t *set, siginfo_t *info, unsigned int timeout)
{
int ret;
@@ -622,12 +645,12 @@ int OsSigTimedWait(sigset_t *set, siginfo_t *info, unsigned int timeout)
SCHEDULER_LOCK(intSave);
- ret = OsSigTimedWaitNoLock(set, info, timeout);
+ ret = OsSigTimedWaitNoLock(set, info, timeout);//以不加锁的方式等待
SCHEDULER_UNLOCK(intSave);
return ret;
}
-
+///通过信号挂起当前任务
int OsPause(void)
{
LosTaskCB *spcb = NULL;
@@ -637,7 +660,7 @@ int OsPause(void)
oldSigprocmask = spcb->sig.sigprocmask;
return OsSigSuspend(&oldSigprocmask);
}
-
+///用参数set代替进程的原有掩码,并暂停进程执行,直到收到信号再恢复原有掩码并继续执行进程。
int OsSigSuspend(const sigset_t *set)
{
unsigned int intSave;
@@ -677,6 +700,17 @@ int OsSigSuspend(const sigset_t *set)
return -EINTR;
}
+/**
+ * @brief
+ * @verbatim
+ 信号安装,函数用于改变进程接收到特定信号后的行为。
+ sig:信号的值,可以为除SIGKILL及SIGSTOP外的任何一个特定有效的信号(为这两个信号定义自己的处理函数,将导致信号安装错误)。
+ act:设置对signal信号的新处理方式。
+ oldact:原来对信号的处理方式。
+ 如果把第二、第三个参数都设为NULL,那么该函数可用于检查信号的有效性。
+ 返回值:0 表示成功,-1 表示有错误发生。
+ * @endverbatim
+ */
int OsSigAction(int sig, const sigaction_t *act, sigaction_t *oact)
{
UINTPTR addr;
@@ -685,14 +719,17 @@ int OsSigAction(int sig, const sigaction_t *act, sigaction_t *oact)
if (!GOOD_SIGNO(sig) || sig < 1 || act == NULL) {
return -EINVAL;
}
- if (LOS_ArchCopyFromUser(&action, act, sizeof(sigaction_t)) != LOS_OK) {
- return -EFAULT;
- }
-
- if (sig == SIGSYS) {
- addr = OsGetSigHandler();
- if (addr == 0) {
- OsSetSigHandler((unsigned long)(UINTPTR)action.sa_handler);
+ //将数据从用户空间拷贝到内核空间
+ if (LOS_ArchCopyFromUser(&action, act, sizeof(sigaction_t)) != LOS_OK) {
+ return -EFAULT;
+ }
+
+ if (sig == SIGSYS) {//鸿蒙此处通过错误的系统调用 来安装信号处理函数,有点巧妙.
+ addr = OsGetSigHandler();//是否已存在信号处理函数
+ if (addr == 0) {//进程没有设置信号处理函数时
+ OsSetSigHandler((unsigned long)(UINTPTR)action.sa_handler);//设置进程信号处理函数
+ //void (*sa_handler)(int); //信号处理函数——普通版
+ //void (*sa_sigaction)(int, siginfo_t *, void *);//信号处理函数——高级版
return LOS_OK;
}
return -EINVAL;
@@ -717,11 +754,17 @@ VOID OsSigIntUnlock(VOID)
(VOID)LOS_AtomicSub((Atomic *)&sigcb->sigIntLock, 1);
}
+/**
+ * @brief 保存信号上下文
+ * @verbatim
+ 产生系统调用时,也就是软中断时,保存用户栈寄存器现场信息
+ 改写PC寄存器的值
+ * @endverbatim
+ */
VOID *OsSaveSignalContext(VOID *sp, VOID *newSp)
{
UINTPTR sigHandler;
UINT32 intSave;
-
LosTaskCB *task = OsCurrTaskGet();
LosProcessCB *process = OsCurrProcessGet();
sig_cb *sigcb = &task->sig;
@@ -754,7 +797,7 @@ VOID *OsSaveSignalContext(VOID *sp, VOID *newSp)
OsProcessExitCodeSignalSet(process, signo);
sigcb->sigContext = sp;
- OsInitSignalContext(sp, newSp, sigHandler, signo, sigVal);
+ OsInitSignalContext(sp, newSp, sigHandler, signo, sigVal);//初始化信号上下文
/* sig No bits 00000100 present sig No 3, but 1<< 3 = 00001000, so signo needs minus 1 */
sigcb->sigFlag ^= 1ULL << (signo - 1);
@@ -767,6 +810,16 @@ VOID *OsSaveSignalContext(VOID *sp, VOID *newSp)
return sp;
}
+/**
+ * @brief
+ * @verbatim
+ 恢复信号上下文,由系统调用之__NR_sigreturn产生,这是一个内部产生的系统调用.
+ 为什么要恢复呢?
+ 因为系统调用的执行由任务内核态完成,使用的栈也是内核栈,CPU相关寄存器记录的都是内核栈的内容,
+ 而系统调用完成后,需返回任务的用户栈执行,这时需将CPU各寄存器回到用户态现场
+ 所以函数的功能就变成了还原寄存器的值
+ * @endverbatim
+ */
VOID *OsRestorSignalContext(VOID *sp)
{
UINT32 intSave;
@@ -785,8 +838,8 @@ VOID *OsRestorSignalContext(VOID *sp)
VOID *saveContext = sigcb->sigContext;
sigcb->sigContext = NULL;
sigcb->count--;
- process->sigShare = 0;
- OsProcessExitCodeSignalClear(process);
+ process->sigShare = 0; //回到用户态,信号共享清0
+ OsProcessExitCodeSignalClear(process);//清空进程退出码
SCHEDULER_UNLOCK(intSave);
return saveContext;
}
diff --git a/src/kernel_liteos_a/kernel/base/mem/common/los_memstat.c b/src/kernel_liteos_a/kernel/base/mem/common/los_memstat.c
index 91164b4c..03815a3f 100644
--- a/src/kernel_liteos_a/kernel/base/mem/common/los_memstat.c
+++ b/src/kernel_liteos_a/kernel/base/mem/common/los_memstat.c
@@ -32,9 +32,9 @@
#include "los_memstat_pri.h"
#include "los_task_pri.h"
-
+/// 记录每个任务对内存的使用情况
LITE_OS_SEC_BSS_MINOR STATIC TskMemUsedInfo g_tskMemUsedInfo[LOSCFG_BASE_CORE_TSK_LIMIT];
-
+/// 计算指定任务对内存使用增加量
LITE_OS_SEC_TEXT_MINOR VOID OsTaskMemUsedInc(UINT32 usedSize, UINT32 taskID)
{
if (taskID >= LOSCFG_BASE_CORE_TSK_LIMIT) {
@@ -43,9 +43,9 @@ LITE_OS_SEC_TEXT_MINOR VOID OsTaskMemUsedInc(UINT32 usedSize, UINT32 taskID)
if (OS_INT_ACTIVE) {
return;
}
- g_tskMemUsedInfo[taskID].memUsed += usedSize;
+ g_tskMemUsedInfo[taskID].memUsed += usedSize; ///< 叠加
}
-
+/// 计算指定任务对内存使用减少量
LITE_OS_SEC_TEXT_MINOR VOID OsTaskMemUsedDec(UINT32 usedSize, UINT32 taskID)
{
if (taskID >= LOSCFG_BASE_CORE_TSK_LIMIT) {
@@ -59,9 +59,9 @@ LITE_OS_SEC_TEXT_MINOR VOID OsTaskMemUsedDec(UINT32 usedSize, UINT32 taskID)
OsCurrTaskGet()->taskName, g_tskMemUsedInfo[taskID].memUsed, usedSize);
return;
}
- g_tskMemUsedInfo[taskID].memUsed -= usedSize;
+ g_tskMemUsedInfo[taskID].memUsed -= usedSize; ///< 递减
}
-
+/// 获取指定任务对内存的使用情况
LITE_OS_SEC_TEXT_MINOR UINT32 OsTaskMemUsage(UINT32 taskID)
{
if (taskID >= LOSCFG_BASE_CORE_TSK_LIMIT) {
@@ -70,7 +70,7 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsTaskMemUsage(UINT32 taskID)
return g_tskMemUsedInfo[taskID].memUsed;
}
-
+/// 清空任务内存使用记录
LITE_OS_SEC_TEXT_MINOR VOID OsTaskMemClear(UINT32 taskID)
{
if (taskID >= LOSCFG_BASE_CORE_TSK_LIMIT) {
@@ -82,8 +82,8 @@ LITE_OS_SEC_TEXT_MINOR VOID OsTaskMemClear(UINT32 taskID)
}
g_tskMemUsedInfo[taskID].memUsed = 0;
}
-
-#ifdef LOS_MEM_SLAB
+// Slab是一种内存分配器,通过将内存划分不同大小的空间分配给对象使用来进行缓存管理,应用于内核对象的缓存。
+#ifdef LOS_MEM_SLAB //
LITE_OS_SEC_BSS_MINOR STATIC TskSlabUsedInfo g_tskSlabUsedInfo[LOSCFG_BASE_CORE_TSK_LIMIT];
LITE_OS_SEC_TEXT_MINOR VOID OsTaskSlabUsedInc(UINT32 usedSize, UINT32 taskID)
diff --git a/src/kernel_liteos_a/kernel/base/mem/membox/los_membox.c b/src/kernel_liteos_a/kernel/base/mem/membox/los_membox.c
index 7a4b66de..01328435 100644
--- a/src/kernel_liteos_a/kernel/base/mem/membox/los_membox.c
+++ b/src/kernel_liteos_a/kernel/base/mem/membox/los_membox.c
@@ -1,3 +1,34 @@
+/*!
+ * @file los_membox.c
+ * @brief 静态内存池主文件
+ * @link
+ @verbatim
+
+ 使用场景
+ 当用户需要使用固定长度的内存时,可以通过静态内存分配的方式获取内存,一旦使用完毕,
+ 通过静态内存释放函数归还所占用内存,使之可以重复使用。
+
+ 开发流程
+ 通过make menuconfig配置静态内存管理模块。
+ 规划一片内存区域作为静态内存池。
+ 调用LOS_MemboxInit初始化静态内存池。
+ 初始化会将入参指定的内存区域分割为N块(N值取决于静态内存总大小和块大小),将所有内存块挂到空闲链表,在内存起始处放置控制头。
+ 调用LOS_MemboxAlloc接口分配静态内存。
+ 系统将会从空闲链表中获取第一个空闲块,并返回该内存块的起始地址。
+ 调用LOS_MemboxClr接口。将入参地址对应的内存块清零。
+ 调用LOS_MemboxFree接口。将该内存块加入空闲链表。
+
+ 注意事项
+ 静态内存池区域,如果是通过动态内存分配方式获得的,在不需要静态内存池时,
+ 需要释放该段内存,避免发生内存泄露。
+ 静态内存不常用,因为需要使用者去确保不会超出使用范围
+
+ @endverbatim
+ * @version
+ * @author weharmonyos.com | 鸿蒙研究站 | 每天死磕一点点
+ * @date 2022-04-02
+ */
+
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
@@ -33,26 +64,25 @@
#include "los_hwi.h"
#include "los_spinlock.h"
-
#ifdef LOSCFG_AARCH64
-#define OS_MEMBOX_MAGIC 0xa55a5aa5a55a5aa5
+#define OS_MEMBOX_MAGIC 0xa55a5aa5a55a5aa5 //魔法数字,@note_good 点赞,设计的很精巧,node内容从下一个节点地址变成魔法数字
#else
-#define OS_MEMBOX_MAGIC 0xa55a5aa5
+#define OS_MEMBOX_MAGIC 0xa55a5aa5
#endif
#define OS_MEMBOX_SET_MAGIC(addr) \
- ((LOS_MEMBOX_NODE *)(addr))->pstNext = (LOS_MEMBOX_NODE *)OS_MEMBOX_MAGIC
+ ((LOS_MEMBOX_NODE *)(addr))->pstNext = (LOS_MEMBOX_NODE *)OS_MEMBOX_MAGIC //设置魔法数字
#define OS_MEMBOX_CHECK_MAGIC(addr) \
((((LOS_MEMBOX_NODE *)(addr))->pstNext == (LOS_MEMBOX_NODE *)OS_MEMBOX_MAGIC) ? LOS_OK : LOS_NOK)
#define OS_MEMBOX_USER_ADDR(addr) \
- ((VOID *)((UINT8 *)(addr) + OS_MEMBOX_NODE_HEAD_SIZE))
+ ((VOID *)((UINT8 *)(addr) + OS_MEMBOX_NODE_HEAD_SIZE)) //@note_good 使用之前要去掉节点信息,太赞了! 很艺术化!!
#define OS_MEMBOX_NODE_ADDR(addr) \
- ((LOS_MEMBOX_NODE *)(VOID *)((UINT8 *)(addr) - OS_MEMBOX_NODE_HEAD_SIZE))
+ ((LOS_MEMBOX_NODE *)(VOID *)((UINT8 *)(addr) - OS_MEMBOX_NODE_HEAD_SIZE)) //节块 = (节头 + 节体) addr = 节体
/* spinlock for mem module, only available on SMP mode */
LITE_OS_SEC_BSS SPIN_LOCK_INIT(g_memboxSpin);
-#define MEMBOX_LOCK(state) LOS_SpinLockSave(&g_memboxSpin, &(state))
-#define MEMBOX_UNLOCK(state) LOS_SpinUnlockRestore(&g_memboxSpin, (state))
-
+#define MEMBOX_LOCK(state) LOS_SpinLockSave(&g_memboxSpin, &(state)) ///< 获取静态内存池自旋锁
+#define MEMBOX_UNLOCK(state) LOS_SpinUnlockRestore(&g_memboxSpin, (state))///< 释放静态内存池自旋锁
+/// 检查静态内存块
STATIC INLINE UINT32 OsCheckBoxMem(const LOS_MEMBOX_INFO *boxInfo, const VOID *node)
{
UINT32 offset;
@@ -70,12 +100,12 @@ STATIC INLINE UINT32 OsCheckBoxMem(const LOS_MEMBOX_INFO *boxInfo, const VOID *n
return LOS_NOK;
}
- return OS_MEMBOX_CHECK_MAGIC(node);
+ return OS_MEMBOX_CHECK_MAGIC(node);//检查魔法数字是否被修改过了
}
-
+/// 初始化一个静态内存池,根据入参设定其起始地址、总大小及每个内存块大小
LITE_OS_SEC_TEXT_INIT UINT32 LOS_MemboxInit(VOID *pool, UINT32 poolSize, UINT32 blkSize)
{
- LOS_MEMBOX_INFO *boxInfo = (LOS_MEMBOX_INFO *)pool;
+ LOS_MEMBOX_INFO *boxInfo = (LOS_MEMBOX_INFO *)pool;//在内存起始处安置池头
LOS_MEMBOX_NODE *node = NULL;
UINT32 index;
UINT32 intSave;
@@ -93,30 +123,30 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_MemboxInit(VOID *pool, UINT32 poolSize, UINT32
}
MEMBOX_LOCK(intSave);
- boxInfo->uwBlkSize = LOS_MEMBOX_ALIGNED(blkSize + OS_MEMBOX_NODE_HEAD_SIZE);
- boxInfo->uwBlkNum = (poolSize - sizeof(LOS_MEMBOX_INFO)) / boxInfo->uwBlkSize;
- boxInfo->uwBlkCnt = 0;
- if (boxInfo->uwBlkNum == 0) {
+ boxInfo->uwBlkSize = LOS_MEMBOX_ALIGNED(blkSize + OS_MEMBOX_NODE_HEAD_SIZE); //节块总大小(节头+节体)
+ boxInfo->uwBlkNum = (poolSize - sizeof(LOS_MEMBOX_INFO)) / boxInfo->uwBlkSize;//总节块数量
+ boxInfo->uwBlkCnt = 0; //已分配的数量
+ if (boxInfo->uwBlkNum == 0) {//只有0块的情况
MEMBOX_UNLOCK(intSave);
return LOS_NOK;
}
- node = (LOS_MEMBOX_NODE *)(boxInfo + 1);
+ node = (LOS_MEMBOX_NODE *)(boxInfo + 1);//去除池头,找到第一个节块位置
- boxInfo->stFreeList.pstNext = node;
+ boxInfo->stFreeList.pstNext = node;//池头空闲链表指向第一个节块
- for (index = 0; index < boxInfo->uwBlkNum - 1; ++index) {
- node->pstNext = OS_MEMBOX_NEXT(node, boxInfo->uwBlkSize);
- node = node->pstNext;
+ for (index = 0; index < boxInfo->uwBlkNum - 1; ++index) {//切割节块,挂入空闲链表
+ node->pstNext = OS_MEMBOX_NEXT(node, boxInfo->uwBlkSize);//按块大小切割好,统一由pstNext指向
+ node = node->pstNext;//node存储了下一个节点的地址信息
}
- node->pstNext = NULL;
+ node->pstNext = NULL;//最后一个为null
MEMBOX_UNLOCK(intSave);
return LOS_OK;
}
-
+/// 从指定的静态内存池中申请一块静态内存块,整个内核源码只有 OsSwtmrScan中用到了静态内存.
LITE_OS_SEC_TEXT VOID *LOS_MemboxAlloc(VOID *pool)
{
LOS_MEMBOX_INFO *boxInfo = (LOS_MEMBOX_INFO *)pool;
@@ -129,18 +159,18 @@ LITE_OS_SEC_TEXT VOID *LOS_MemboxAlloc(VOID *pool)
}
MEMBOX_LOCK(intSave);
- node = &(boxInfo->stFreeList);
- if (node->pstNext != NULL) {
- nodeTmp = node->pstNext;
- node->pstNext = nodeTmp->pstNext;
- OS_MEMBOX_SET_MAGIC(nodeTmp);
- boxInfo->uwBlkCnt++;
+ node = &(boxInfo->stFreeList);//拿到空闲单链表
+ if (node->pstNext != NULL) {//不需要遍历链表,因为这是空闲链表
+ nodeTmp = node->pstNext;//先记录要使用的节点
+ node->pstNext = nodeTmp->pstNext;//不再空闲了,把节点摘出去了.
+ OS_MEMBOX_SET_MAGIC(nodeTmp);//为已使用的节块设置魔法数字
+ boxInfo->uwBlkCnt++;//已使用块数增加
}
MEMBOX_UNLOCK(intSave);
- return (nodeTmp == NULL) ? NULL : OS_MEMBOX_USER_ADDR(nodeTmp);
+ return (nodeTmp == NULL) ? NULL : OS_MEMBOX_USER_ADDR(nodeTmp);//返回可用的虚拟地址
}
-
+/// 释放指定的一块静态内存块
LITE_OS_SEC_TEXT UINT32 LOS_MemboxFree(VOID *pool, VOID *box)
{
LOS_MEMBOX_INFO *boxInfo = (LOS_MEMBOX_INFO *)pool;
@@ -153,21 +183,21 @@ LITE_OS_SEC_TEXT UINT32 LOS_MemboxFree(VOID *pool, VOID *box)
MEMBOX_LOCK(intSave);
do {
- LOS_MEMBOX_NODE *node = OS_MEMBOX_NODE_ADDR(box);
+ LOS_MEMBOX_NODE *node = OS_MEMBOX_NODE_ADDR(box);//通过节体获取节块首地址
if (OsCheckBoxMem(boxInfo, node) != LOS_OK) {
break;
}
- node->pstNext = boxInfo->stFreeList.pstNext;
- boxInfo->stFreeList.pstNext = node;
- boxInfo->uwBlkCnt--;
+ node->pstNext = boxInfo->stFreeList.pstNext;//节块指向空闲链表表头
+ boxInfo->stFreeList.pstNext = node;//空闲链表表头反指向它,意味节块排到第一,下次申请将首个分配它
+ boxInfo->uwBlkCnt--;//已经使用的内存块减一
ret = LOS_OK;
- } while (0);
+ } while (0);//将被编译时优化
MEMBOX_UNLOCK(intSave);
return ret;
}
-
+/// 清零指定静态内存块的内容
LITE_OS_SEC_TEXT_MINOR VOID LOS_MemboxClr(VOID *pool, VOID *box)
{
LOS_MEMBOX_INFO *boxInfo = (LOS_MEMBOX_INFO *)pool;
@@ -175,11 +205,12 @@ LITE_OS_SEC_TEXT_MINOR VOID LOS_MemboxClr(VOID *pool, VOID *box)
if ((pool == NULL) || (box == NULL)) {
return;
}
-
+ //将魔法数字一并清除了.
(VOID)memset_s(box, (boxInfo->uwBlkSize - OS_MEMBOX_NODE_HEAD_SIZE), 0,
(boxInfo->uwBlkSize - OS_MEMBOX_NODE_HEAD_SIZE));
}
-
+/// 打印指定静态内存池所有节点信息(打印等级是LOS_INFO_LEVEL),包括内存池起始地址、
+/// 内存块大小、总内存块数量、每个空闲内存块的起始地址、所有内存块的起始地址
LITE_OS_SEC_TEXT_MINOR VOID LOS_ShowBox(VOID *pool)
{
UINT32 index;
@@ -206,7 +237,7 @@ LITE_OS_SEC_TEXT_MINOR VOID LOS_ShowBox(VOID *pool)
}
MEMBOX_UNLOCK(intSave);
}
-
+/// 获取指定静态内存池的信息,包括内存池中总内存块数量、已经分配出去的内存块数量、每个内存块的大小
LITE_OS_SEC_TEXT_MINOR UINT32 LOS_MemboxStatisticsGet(const VOID *boxMem, UINT32 *maxBlk,
UINT32 *blkCnt, UINT32 *blkSize)
{
diff --git a/src/kernel_liteos_a/kernel/base/mem/tlsf/los_memory.c b/src/kernel_liteos_a/kernel/base/mem/tlsf/los_memory.c
index 3441b613..cfd75971 100644
--- a/src/kernel_liteos_a/kernel/base/mem/tlsf/los_memory.c
+++ b/src/kernel_liteos_a/kernel/base/mem/tlsf/los_memory.c
@@ -1,3 +1,51 @@
+/*!
+ * @file los_memory.c
+ * @brief
+ * @link tlsf算法论文 http://www.gii.upv.es/tlsf/files/ecrts04_tlsf.pdf @endlink
+ @verbatim
+ https://www.codenong.com/cs106845116/ TLSF算法(一)分配中的位图计算
+ 基本概念
+ 内存管理模块管理系统的内存资源,它是操作系统的核心模块之一,主要包括内存的初始化、分配以及释放。
+ OpenHarmony LiteOS-A的堆内存管理提供内存初始化、分配、释放等功能。在系统运行过程中,堆内存管理
+ 模块通过对内存的申请/释放来管理用户和OS对内存的使用,使内存的利用率和使用效率达到最优,同时最大限度地解决系统的内存碎片问题。
+
+ 运行机制
+ 堆内存管理,即在内存资源充足的情况下,根据用户需求,从系统配置的一块比较大的连续内存
+ (内存池,也是堆内存)中分配任意大小的内存块。当用户不需要该内存块时,又可以释放回系统供下一次使用。
+ 与静态内存相比,动态内存管理的优点是按需分配,缺点是内存池中容易出现碎片。
+ OpenHarmony LiteOS-A堆内存在TLSF算法的基础上,对区间的划分进行了优化,获得更优的性能,降低了碎片率。
+ 动态内存核心算法框图如下:
+ @endverbatim
+ * @image html https://gitee.com/weharmonyos/resources/raw/master/11/1.png
+ @verbatim
+ 根据空闲内存块的大小,使用多个空闲链表来管理。根据内存空闲块大小分为两个部分:[4, 127]和[27, 231],如上图size class所示:
+
+ 1. 对[4,127]区间的内存进行等分,如上图下半部分所示,分为31个小区间,每个小区间对应内存块大小为4字节的倍数。
+ 每个小区间对应一个空闲内存链表和用于标记对应空闲内存链表是否为空的一个比特位,值为1时,空闲链表非空。
+ [4,127]区间的31个小区间内存对应31个比特位进行标记链表是否为空。
+ 2. 大于127字节的空闲内存块,按照2的次幂区间大小进行空闲链表管理。总共分为24个小区间,每个小区间又等分为8个二级小区间,
+ 见上图上半部分的Size Class和Size SubClass部分。每个二级小区间对应一个空闲链表和用于标记对应空闲内存链表是否为空的一个比特位。
+ 总共24*8=192个二级小区间,对应192个空闲链表和192个比特位进行标记链表是否为空。
+ 例如,当有40字节的空闲内存需要插入空闲链表时,对应小区间[40,43],第10个空闲链表,位图标记的第10比特位。
+ 把40字节的空闲内存挂载第10个空闲链表上,并判断是否需要更新位图标记。当需要申请40字节的内存时,
+ 根据位图标记获取存在满足申请大小的内存块的空闲链表,从空闲链表上获取空闲内存节点。如果分配的节点大于需要申请的内存大小,
+ 进行分割节点操作,剩余的节点重新挂载到相应的空闲链表上。当有580字节的空闲内存需要插入空闲链表时,对应二级小区间[29,29+2^6],
+ 第31+2*8=47个空闲链表,并使用位图的第47个比特位来标记链表是否为空。把580字节的空闲内存挂载第47个空闲链表上,并判断是否需要更新位图标记。
+ 当需要申请580字节的内存时,根据位图标记获取存在满足申请大小的内存块的空闲链表,从空闲链表上获取空闲内存节点。
+ 如果分配的节点大于需要申请的内存大小,进行分割节点操作,剩余的节点重新挂载到相应的空闲链表上。如果对应的空闲链表为空,
+ 则向更大的内存区间去查询是否有满足条件的空闲链表,实际计算时,会一次性查找到满足申请大小的空闲链表。
+
+ 内存信息包括内存池大小、内存使用量、剩余内存大小、最大空闲内存、内存水线、内存节点数统计、碎片率等。
+ 内存水线:即内存池的最大使用量,每次申请和释放时,都会更新水线值,实际业务可根据该值,优化内存池大小;
+ 碎片率:衡量内存池的碎片化程度,碎片率高表现为内存池剩余内存很多,但是最大空闲内存块很小,可以用公式(fragment=100-最大空闲内存块大小/剩余内存大小)来度量;
+
+ 内存管理结构如下图所示:
+ @endverbatim
+ * @image html https://gitee.com/weharmonyos/resources/raw/master/11/2.png
+ * @version
+ * @author weharmonyos.com | 鸿蒙研究站 | 每天死磕一点点
+ * @date 2021-11-19
+ */
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
@@ -43,7 +91,7 @@
#include "los_lms_pri.h"
#endif
-/* Used to cut non-essential functions. */
+/* Used to cut non-essential functions. | 用于削减非必要功能 */
#define OS_MEM_FREE_BY_TASKID 0
#ifdef LOSCFG_KERNEL_VM
#define OS_MEM_EXPAND_ENABLE 1
@@ -56,32 +104,32 @@
/* column num of the output info of mem node */
#define OS_MEM_COLUMN_NUM 8
-UINT8 *m_aucSysMem0 = NULL;
-UINT8 *m_aucSysMem1 = NULL;
+UINT8 *m_aucSysMem0 = NULL; ///< 异常交互动态内存池地址的起始地址,当不支持异常交互特性时,m_aucSysMem0等于m_aucSysMem1。
+UINT8 *m_aucSysMem1 = NULL; ///< (内核态)系统动态内存池地址的起始地址 @note_thinking 能否不要用 0,1来命名核心变量 ???
#ifdef LOSCFG_MEM_MUL_POOL
-VOID *g_poolHead = NULL;
+VOID *g_poolHead = NULL; ///内存池头,由它牵引多个内存池
#endif
/* The following is the macro definition and interface implementation related to the TLSF. */
/* Supposing a Second Level Index: SLI = 3. */
-#define OS_MEM_SLI 3
+#define OS_MEM_SLI 3 ///< 二级小区间级数,
/* Giving 1 free list for each small bucket: 4, 8, 12, up to 124. */
-#define OS_MEM_SMALL_BUCKET_COUNT 31
-#define OS_MEM_SMALL_BUCKET_MAX_SIZE 128
-/* Giving OS_MEM_FREE_LIST_NUM free lists for each large bucket. */
-#define OS_MEM_LARGE_BUCKET_COUNT 24
-#define OS_MEM_FREE_LIST_NUM (1 << OS_MEM_SLI)
+#define OS_MEM_SMALL_BUCKET_COUNT 31 ///< 小桶的偏移单位 从 4 ~ 124 ,共32级
+#define OS_MEM_SMALL_BUCKET_MAX_SIZE 128 ///< 小桶的最大数量
+/* Giving OS_MEM_FREE_LIST_NUM free lists for each large bucket. */
+#define OS_MEM_LARGE_BUCKET_COUNT 24 /// 为每个大存储桶空闲列表数量 大桶范围: [2^7, 2^31] ,每个小区间有分为 2^3个小区间
+#define OS_MEM_FREE_LIST_NUM (1 << OS_MEM_SLI) ///< 2^3 = 8 个,即大桶的每个区间又分为8个小区间
/* OS_MEM_SMALL_BUCKET_MAX_SIZE to the power of 2 is 7. */
-#define OS_MEM_LARGE_START_BUCKET 7
+#define OS_MEM_LARGE_START_BUCKET 7 /// 大桶的开始下标
/* The count of free list. */
-#define OS_MEM_FREE_LIST_COUNT (OS_MEM_SMALL_BUCKET_COUNT + (OS_MEM_LARGE_BUCKET_COUNT << OS_MEM_SLI))
+#define OS_MEM_FREE_LIST_COUNT (OS_MEM_SMALL_BUCKET_COUNT + (OS_MEM_LARGE_BUCKET_COUNT << OS_MEM_SLI)) ///< 总链表的数量 32 + 24 * 8 = 224
/* The bitmap is used to indicate whether the free list is empty, 1: not empty, 0: empty. */
-#define OS_MEM_BITMAP_WORDS ((OS_MEM_FREE_LIST_COUNT >> 5) + 1)
-
-#define OS_MEM_BITMAP_MASK 0x1FU
+#define OS_MEM_BITMAP_WORDS ((OS_MEM_FREE_LIST_COUNT >> 5) + 1) ///< 224 >> 5 + 1 = 7 ,为什么要右移 5 因为 2^5 = 32 是一个32位整型的大小
+ ///< 而 32 * 7 = 224 ,也就是说用 int[7]当位图就能表示完 224个链表 ,此处,一定要理解好,因为这是理解 TLSF 算法的关键.
+#define OS_MEM_BITMAP_MASK 0x1FU ///< 因为一个int型为 32位, 2^5 = 32,所以此处 0x1FU = 5个1 足以.
/* Used to find the first bit of 1 in bitmap. */
STATIC INLINE UINT16 OsMemFFS(UINT32 bitmap)
@@ -101,7 +149,7 @@ STATIC INLINE UINT32 OsMemLog2(UINT32 size)
return OsMemFLS(size);
}
-/* Get the first level: f = log2(size). */
+/* Get the first level: f = log2(size). | 获取第一级*/
STATIC INLINE UINT32 OsMemFlGet(UINT32 size)
{
if (size < OS_MEM_SMALL_BUCKET_MAX_SIZE) {
@@ -110,56 +158,56 @@ STATIC INLINE UINT32 OsMemFlGet(UINT32 size)
return OsMemLog2(size);
}
-/* Get the second level: s = (size - 2^f) * 2^SLI / 2^f. */
+/* Get the second level: s = (size - 2^f) * 2^SLI / 2^f. | 获取第二级 */
STATIC INLINE UINT32 OsMemSlGet(UINT32 size, UINT32 fl)
{
return (((size << OS_MEM_SLI) >> fl) - OS_MEM_FREE_LIST_NUM);
}
/* The following is the memory algorithm related macro definition and interface implementation. */
-
+/// 内存池节点
struct OsMemNodeHead {
- UINT32 magic;
- union {
- struct OsMemNodeHead *prev; /* The prev is used for current node points to the previous node */
- struct OsMemNodeHead *next; /* The next is used for last node points to the expand node */
+ UINT32 magic; ///< 魔法数字 0xABCDDCBA
+ union {//注意这里的前后指向的是连续的地址节点,用于分割和合并
+ struct OsMemNodeHead *prev; /* The prev is used for current node points to the previous node | prev 用于当前节点指向前一个节点*/
+ struct OsMemNodeHead *next; /* The next is used for last node points to the expand node | next 用于最后一个节点指向展开节点*/
} ptr;
-#ifdef LOSCFG_MEM_LEAKCHECK
- UINTPTR linkReg[LOS_RECORD_LR_CNT];
+#ifdef LOSCFG_MEM_LEAKCHECK //内存泄漏检测
+ UINTPTR linkReg[LOS_RECORD_LR_CNT];///< 存放左右节点地址,用于检测
#endif
- UINT32 sizeAndFlag;
+ UINT32 sizeAndFlag; ///< 节点总大小+标签
};
-
+/// 已使用内存池节点
struct OsMemUsedNodeHead {
- struct OsMemNodeHead header;
+ struct OsMemNodeHead header;///< 已被使用节点
#if OS_MEM_FREE_BY_TASKID
- UINT32 taskID;
+ UINT32 taskID; ///< 使用节点的任务ID
#endif
};
-
+/// 内存池空闲节点
struct OsMemFreeNodeHead {
- struct OsMemNodeHead header;
- struct OsMemFreeNodeHead *prev;
- struct OsMemFreeNodeHead *next;
+ struct OsMemNodeHead header; ///< 内存池节点
+ struct OsMemFreeNodeHead *prev; ///< 前一个空闲前驱节点
+ struct OsMemFreeNodeHead *next; ///< 后一个空闲后继节点
};
-
+/// 内存池信息
struct OsMemPoolInfo {
- VOID *pool;
- UINT32 totalSize;
- UINT32 attr;
+ VOID *pool; ///< 指向内存块基地址,仅做记录而已,真正的分配内存跟它没啥关系
+ UINT32 totalSize; ///< 总大小,确定了内存池的边界
+ UINT32 attr; ///< 属性 default attr: lock, not expand.
#ifdef LOSCFG_MEM_WATERLINE
- UINT32 waterLine; /* Maximum usage size in a memory pool */
- UINT32 curUsedSize; /* Current usage size in a memory pool */
+ UINT32 waterLine; /* Maximum usage size in a memory pool | 内存吃水线*/
+ UINT32 curUsedSize; /* Current usage size in a memory pool | 当前已使用大小*/
#endif
};
-
+/// 内存池头信息
struct OsMemPoolHead {
- struct OsMemPoolInfo info;
- UINT32 freeListBitmap[OS_MEM_BITMAP_WORDS];
- struct OsMemFreeNodeHead *freeList[OS_MEM_FREE_LIST_COUNT];
- SPIN_LOCK_S spinlock;
+ struct OsMemPoolInfo info; ///< 记录内存池的信息
+ UINT32 freeListBitmap[OS_MEM_BITMAP_WORDS]; ///< 空闲位图 int[7] = 32 * 7 = 224 > 223
+ struct OsMemFreeNodeHead *freeList[OS_MEM_FREE_LIST_COUNT];///< 空闲节点链表 32 + 24 * 8 = 224
+ SPIN_LOCK_S spinlock; ///< 操作本池的自旋锁,涉及CPU多核竞争,所以必须得是自旋锁
#ifdef LOSCFG_MEM_MUL_POOL
- VOID *nextPool;
+ VOID *nextPool; ///< 指向下一个内存池 OsMemPoolHead 类型
#endif
};
@@ -168,16 +216,16 @@ struct OsMemPoolHead {
#define MEM_UNLOCK(pool, state) LOS_SpinUnlockRestore(&(pool)->spinlock, (state))
/* The memory pool support expand. */
-#define OS_MEM_POOL_EXPAND_ENABLE 0x01
+#define OS_MEM_POOL_EXPAND_ENABLE 0x01 ///< 支持扩展
/* The memory pool support no lock. */
-#define OS_MEM_POOL_LOCK_ENABLE 0x02
-
-#define OS_MEM_NODE_MAGIC 0xABCDDCBA
-#define OS_MEM_MIN_ALLOC_SIZE (sizeof(struct OsMemFreeNodeHead) - sizeof(struct OsMemUsedNodeHead))
-
-#define OS_MEM_NODE_USED_FLAG 0x80000000U
-#define OS_MEM_NODE_ALIGNED_FLAG 0x40000000U
-#define OS_MEM_NODE_LAST_FLAG 0x20000000U /* Sentinel Node */
+#define OS_MEM_POOL_LOCK_ENABLE 0x02 ///< 加锁
+
+#define OS_MEM_NODE_MAGIC 0xABCDDCBA ///< 内存节点的魔法数字
+#define OS_MEM_MIN_ALLOC_SIZE (sizeof(struct OsMemFreeNodeHead) - sizeof(struct OsMemUsedNodeHead)) //最小分配空间
+// 必须给指向空闲块的指针留位置
+#define OS_MEM_NODE_USED_FLAG 0x80000000U ///< 已使用标签
+#define OS_MEM_NODE_ALIGNED_FLAG 0x40000000U ///< 对齐标签
+#define OS_MEM_NODE_LAST_FLAG 0x20000000U /* Sentinel Node | 哨兵节点标签*/
#define OS_MEM_NODE_ALIGNED_AND_USED_FLAG (OS_MEM_NODE_USED_FLAG | OS_MEM_NODE_ALIGNED_FLAG | OS_MEM_NODE_LAST_FLAG)
#define OS_MEM_NODE_GET_ALIGNED_FLAG(sizeAndFlag) \
@@ -226,32 +274,33 @@ STATIC INLINE UINT32 OsMemAllocCheck(struct OsMemPoolHead *pool, UINT32 intSave)
#if OS_MEM_FREE_BY_TASKID
STATIC INLINE VOID OsMemNodeSetTaskID(struct OsMemUsedNodeHead *node)
{
- node->taskID = LOS_CurTaskIDGet();
+ node->taskID = LOS_CurTaskIDGet();//将当前任务ID绑定到内存池节点上
}
#endif
#ifdef LOSCFG_MEM_WATERLINE
STATIC INLINE VOID OsMemWaterUsedRecord(struct OsMemPoolHead *pool, UINT32 size)
{
- pool->info.curUsedSize += size;
+ pool->info.curUsedSize += size; //延长可使用空间
if (pool->info.curUsedSize > pool->info.waterLine) {
- pool->info.waterLine = pool->info.curUsedSize;
+ pool->info.waterLine = pool->info.curUsedSize; //警戒线加高
}
}
#else
STATIC INLINE VOID OsMemWaterUsedRecord(struct OsMemPoolHead *pool, UINT32 size)
{
- (VOID)pool;
- (VOID)size;
+ (VOID)pool; // @note_thinking 为何要这么写 ,因为格式规范吗 ? 直接啥也不写不行吗 ?
+ (VOID)size; // 编译器会优化掉这种代码
}
#endif
#if OS_MEM_EXPAND_ENABLE
+/// 更新哨兵节点内容
STATIC INLINE struct OsMemNodeHead *OsMemLastSentinelNodeGet(const struct OsMemNodeHead *sentinelNode)
{
struct OsMemNodeHead *node = NULL;
- VOID *ptr = sentinelNode->ptr.next;
- UINT32 size = OS_MEM_NODE_GET_SIZE(sentinelNode->sizeAndFlag);
+ VOID *ptr = sentinelNode->ptr.next;//返回不连续的内存块
+ UINT32 size = OS_MEM_NODE_GET_SIZE(sentinelNode->sizeAndFlag); // 获取大小
while ((ptr != NULL) && (size != 0)) {
node = OS_MEM_END_NODE(ptr, size);
@@ -261,7 +310,7 @@ STATIC INLINE struct OsMemNodeHead *OsMemLastSentinelNodeGet(const struct OsMemN
return node;
}
-
+/// 检查哨兵节点
STATIC INLINE BOOL OsMemSentinelNodeCheck(struct OsMemNodeHead *sentinelNode)
{
if (!OS_MEM_NODE_GET_USED_FLAG(sentinelNode->sizeAndFlag)) {
@@ -274,7 +323,7 @@ STATIC INLINE BOOL OsMemSentinelNodeCheck(struct OsMemNodeHead *sentinelNode)
return TRUE;
}
-
+/// 是否为最后一个哨兵节点
STATIC INLINE BOOL OsMemIsLastSentinelNode(struct OsMemNodeHead *sentinelNode)
{
if (OsMemSentinelNodeCheck(sentinelNode) == FALSE) {
@@ -289,11 +338,11 @@ STATIC INLINE BOOL OsMemIsLastSentinelNode(struct OsMemNodeHead *sentinelNode)
return FALSE;
}
-
+/// 设置哨兵节点内容
STATIC INLINE VOID OsMemSentinelNodeSet(struct OsMemNodeHead *sentinelNode, VOID *newNode, UINT32 size)
{
- if (sentinelNode->ptr.next != NULL) {
- sentinelNode = OsMemLastSentinelNodeGet(sentinelNode);
+ if (sentinelNode->ptr.next != NULL) { //哨兵节点有 逻辑地址不连续的衔接内存块
+ sentinelNode = OsMemLastSentinelNodeGet(sentinelNode);//更新哨兵节点内容
}
sentinelNode->sizeAndFlag = size;
@@ -329,14 +378,14 @@ STATIC INLINE struct OsMemNodeHead *PreSentinelNodeGet(const VOID *pool, const s
return NULL;
}
-
+/// 大内存释放
UINT32 OsMemLargeNodeFree(const VOID *ptr)
{
- LosVmPage *page = OsVmVaddrToPage((VOID *)ptr);
+ LosVmPage *page = OsVmVaddrToPage((VOID *)ptr);//获取物理页
if ((page == NULL) || (page->nPages == 0)) {
return LOS_NOK;
}
- LOS_PhysPagesFreeContiguous((VOID *)ptr, page->nPages);
+ LOS_PhysPagesFreeContiguous((VOID *)ptr, page->nPages);//释放连续的几个物理页
return LOS_OK;
}
@@ -375,7 +424,7 @@ STATIC INLINE BOOL TryShrinkPool(const VOID *pool, const struct OsMemNodeHead *n
#endif
return TRUE;
}
-
+/// 内存池扩展实现
STATIC INLINE INT32 OsMemPoolExpandSub(VOID *pool, UINT32 size, UINT32 intSave)
{
UINT32 tryCount = MAX_SHRINK_PAGECACHE_TRY;
@@ -383,11 +432,11 @@ STATIC INLINE INT32 OsMemPoolExpandSub(VOID *pool, UINT32 size, UINT32 intSave)
struct OsMemNodeHead *newNode = NULL;
struct OsMemNodeHead *endNode = NULL;
- size = ROUNDUP(size + OS_MEM_NODE_HEAD_SIZE, PAGE_SIZE);
- endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);
+ size = ROUNDUP(size + OS_MEM_NODE_HEAD_SIZE, PAGE_SIZE);//圆整
+ endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);//获取哨兵节点
RETRY:
- newNode = (struct OsMemNodeHead *)LOS_PhysPagesAllocContiguous(size >> PAGE_SHIFT);
+ newNode = (struct OsMemNodeHead *)LOS_PhysPagesAllocContiguous(size >> PAGE_SHIFT);//申请新的内存池 | 物理内存
if (newNode == NULL) {
if (tryCount > 0) {
tryCount--;
@@ -411,26 +460,26 @@ RETRY:
size = (resize == 0) ? size : resize;
}
#endif
- newNode->sizeAndFlag = (size - OS_MEM_NODE_HEAD_SIZE);
- newNode->ptr.prev = OS_MEM_END_NODE(newNode, size);
- OsMemSentinelNodeSet(endNode, newNode, size);
- OsMemFreeNodeAdd(pool, (struct OsMemFreeNodeHead *)newNode);
+ newNode->sizeAndFlag = (size - OS_MEM_NODE_HEAD_SIZE);//设置新节点大小
+ newNode->ptr.prev = OS_MEM_END_NODE(newNode, size);//新节点的前节点指向新节点的哨兵节点
+ OsMemSentinelNodeSet(endNode, newNode, size);//设置老内存池的哨兵节点信息,其实就是指向新内存块
+ OsMemFreeNodeAdd(pool, (struct OsMemFreeNodeHead *)newNode);//将新节点加入空闲链表
- endNode = OS_MEM_END_NODE(newNode, size);
- (VOID)memset(endNode, 0, sizeof(*endNode));
- endNode->ptr.next = NULL;
- endNode->magic = OS_MEM_NODE_MAGIC;
- OsMemSentinelNodeSet(endNode, NULL, 0);
- OsMemWaterUsedRecord(poolInfo, OS_MEM_NODE_HEAD_SIZE);
+ endNode = OS_MEM_END_NODE(newNode, size);//获取新节点的哨兵节点
+ (VOID)memset(endNode, 0, sizeof(*endNode));//清空内存
+ endNode->ptr.next = NULL;//新哨兵节点没有后续指向,因为它已成为最后
+ endNode->magic = OS_MEM_NODE_MAGIC;//设置新哨兵节的魔法数字
+ OsMemSentinelNodeSet(endNode, NULL, 0); //设置新哨兵节点内容
+ OsMemWaterUsedRecord(poolInfo, OS_MEM_NODE_HEAD_SIZE);//更新内存池警戒线
return 0;
}
-
+/// 扩展内存池
STATIC INLINE INT32 OsMemPoolExpand(VOID *pool, UINT32 allocSize, UINT32 intSave)
{
- UINT32 expandDefault = MEM_EXPAND_SIZE(LOS_MemPoolSizeGet(pool));
+ UINT32 expandDefault = MEM_EXPAND_SIZE(LOS_MemPoolSizeGet(pool));//至少要扩展现有内存池的 1/8 大小
UINT32 expandSize = MAX(expandDefault, allocSize);
- UINT32 tryCount = 1;
+ UINT32 tryCount = 1;//尝试次数
UINT32 ret;
do {
@@ -447,7 +496,7 @@ STATIC INLINE INT32 OsMemPoolExpand(VOID *pool, UINT32 allocSize, UINT32 intSave
return -1;
}
-
+///< 允许指定内存池扩展
VOID LOS_MemExpandEnable(VOID *pool)
{
if (pool == NULL) {
@@ -486,7 +535,7 @@ STATIC INLINE VOID OsLmsAllocAlignMark(VOID *ptr, VOID *alignedPtr, UINT32 size)
g_lms->simpleMark((UINTPTR)ptr + sizeof(UINT32), (UINTPTR)alignedPtr, LMS_SHADOW_REDZONE_U8);
}
- /* mark remaining as redzone */
+ /* mark remining as redzone */
g_lms->simpleMark(LMS_ADDR_ALIGN((UINTPTR)alignedPtr + size), (UINTPTR)OS_MEM_NEXT_NODE(allocNode),
LMS_SHADOW_REDZONE_U8);
}
@@ -522,8 +571,7 @@ STATIC INLINE VOID OsLmsReallocResizeMark(struct OsMemNodeHead *node, UINT32 res
g_lms->simpleMark((UINTPTR)node + resize, (UINTPTR)OS_MEM_NEXT_NODE(node), LMS_SHADOW_REDZONE_U8);
}
#endif
-
-#ifdef LOSCFG_MEM_LEAKCHECK
+#ifdef LOSCFG_MEM_LEAKCHECK //内存泄漏检查
STATIC INLINE VOID OsMemLinkRegisterRecord(struct OsMemNodeHead *node)
{
LOS_RecordLR(node->linkReg, LOS_RECORD_LR_CNT, LOS_RECORD_LR_CNT, LOS_OMIT_LR_CNT);
@@ -549,7 +597,7 @@ STATIC INLINE VOID OsMemUsedNodePrint(struct OsMemNodeHead *node)
PRINTK("\n");
}
}
-
+/// 打印已使用的节点
VOID OsMemUsedNodeShow(VOID *pool)
{
if (pool == NULL) {
@@ -626,17 +674,17 @@ STATIC VOID OsMemNodeBacktraceInfo(const struct OsMemNodeHead *tmpNode,
STATIC INLINE UINT32 OsMemFreeListIndexGet(UINT32 size)
{
- UINT32 fl = OsMemFlGet(size);
+ UINT32 fl = OsMemFlGet(size);//获取一级位图
if (size < OS_MEM_SMALL_BUCKET_MAX_SIZE) {
return fl;
}
- UINT32 sl = OsMemSlGet(size, fl);
+ UINT32 sl = OsMemSlGet(size, fl);//获取二级位图
return (OS_MEM_SMALL_BUCKET_COUNT + ((fl - OS_MEM_LARGE_START_BUCKET) << OS_MEM_SLI) + sl);
}
STATIC INLINE struct OsMemFreeNodeHead *OsMemFindCurSuitableBlock(struct OsMemPoolHead *poolHead,
- UINT32 index, UINT32 size)
+ UINT32 index, UINT32 size)
{
struct OsMemFreeNodeHead *node = NULL;
@@ -663,7 +711,7 @@ STATIC INLINE UINT32 OsMemNotEmptyIndexGet(struct OsMemPoolHead *poolHead, UINT3
return OS_MEM_FREE_LIST_COUNT;
}
-
+/// 找到下一个合适的块
STATIC INLINE struct OsMemFreeNodeHead *OsMemFindNextSuitableBlock(VOID *pool, UINT32 size, UINT32 *outIndex)
{
struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
@@ -730,13 +778,13 @@ STATIC INLINE VOID OsMemListAdd(struct OsMemPoolHead *pool, UINT32 listIndex, st
OsMemSetFreeListBit(pool, listIndex);
node->header.magic = OS_MEM_NODE_MAGIC;
}
-
+/// 从空闲链表中删除
STATIC INLINE VOID OsMemListDelete(struct OsMemPoolHead *pool, UINT32 listIndex, struct OsMemFreeNodeHead *node)
{
if (node == pool->freeList[listIndex]) {
pool->freeList[listIndex] = node->next;
- if (node->next == NULL) {
- OsMemClearFreeListBit(pool, listIndex);
+ if (node->next == NULL) {//如果链表空了
+ OsMemClearFreeListBit(pool, listIndex);//将位图位 置为 0
} else {
node->next->prev = NULL;
}
@@ -748,27 +796,27 @@ STATIC INLINE VOID OsMemListDelete(struct OsMemPoolHead *pool, UINT32 listIndex,
}
node->header.magic = OS_MEM_NODE_MAGIC;
}
-
+/// 添加一个空闲节点
STATIC INLINE VOID OsMemFreeNodeAdd(VOID *pool, struct OsMemFreeNodeHead *node)
{
- UINT32 index = OsMemFreeListIndexGet(node->header.sizeAndFlag);
+ UINT32 index = OsMemFreeListIndexGet(node->header.sizeAndFlag);//根据大小定位索引位
if (index >= OS_MEM_FREE_LIST_COUNT) {
LOS_Panic("The index of free lists is error, index = %u\n", index);
return;
}
- OsMemListAdd(pool, index, node);
+ OsMemListAdd(pool, index, node);//挂入链表
}
-
+/// 从空闲链表上摘除节点
STATIC INLINE VOID OsMemFreeNodeDelete(VOID *pool, struct OsMemFreeNodeHead *node)
{
- UINT32 index = OsMemFreeListIndexGet(node->header.sizeAndFlag);
+ UINT32 index = OsMemFreeListIndexGet(node->header.sizeAndFlag);//根据大小定位索引位
if (index >= OS_MEM_FREE_LIST_COUNT) {
LOS_Panic("The index of free lists is error, index = %u\n", index);
return;
}
OsMemListDelete(pool, index, node);
}
-
+//获取一个空闲的节点
STATIC INLINE struct OsMemNodeHead *OsMemFreeNodeGet(VOID *pool, UINT32 size)
{
struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
@@ -782,56 +830,67 @@ STATIC INLINE struct OsMemNodeHead *OsMemFreeNodeGet(VOID *pool, UINT32 size)
return &firstNode->header;
}
-
+/// 合并节点,和前面的节点合并 node 消失
STATIC INLINE VOID OsMemMergeNode(struct OsMemNodeHead *node)
{
struct OsMemNodeHead *nextNode = NULL;
- node->ptr.prev->sizeAndFlag += node->sizeAndFlag;
- nextNode = (struct OsMemNodeHead *)((UINTPTR)node + node->sizeAndFlag);
- if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag)) {
- nextNode->ptr.prev = node->ptr.prev;
+ node->ptr.prev->sizeAndFlag += node->sizeAndFlag; //前节点长度变长
+ nextNode = (struct OsMemNodeHead *)((UINTPTR)node + node->sizeAndFlag); // 下一个节点位置
+ if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag)) {//不是哨兵节点
+ nextNode->ptr.prev = node->ptr.prev;//后一个节点的前节点变成前前节点
}
}
-
+/// 切割节点
STATIC INLINE VOID OsMemSplitNode(VOID *pool, struct OsMemNodeHead *allocNode, UINT32 allocSize)
{
struct OsMemFreeNodeHead *newFreeNode = NULL;
struct OsMemNodeHead *nextNode = NULL;
- newFreeNode = (struct OsMemFreeNodeHead *)(VOID *)((UINT8 *)allocNode + allocSize);
- newFreeNode->header.ptr.prev = allocNode;
- newFreeNode->header.sizeAndFlag = allocNode->sizeAndFlag - allocSize;
- allocNode->sizeAndFlag = allocSize;
- nextNode = OS_MEM_NEXT_NODE(&newFreeNode->header);
- if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag)) {
- nextNode->ptr.prev = &newFreeNode->header;
- if (!OS_MEM_NODE_GET_USED_FLAG(nextNode->sizeAndFlag)) {
- OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode);
- OsMemMergeNode(nextNode);
+ newFreeNode = (struct OsMemFreeNodeHead *)(VOID *)((UINT8 *)allocNode + allocSize);//切割后出现的新空闲节点,在分配节点的右侧
+ newFreeNode->header.ptr.prev = allocNode;//新节点指向前节点,说明是从左到右切割
+ newFreeNode->header.sizeAndFlag = allocNode->sizeAndFlag - allocSize;//新空闲节点大小
+ allocNode->sizeAndFlag = allocSize;//分配节点大小
+ nextNode = OS_MEM_NEXT_NODE(&newFreeNode->header);//获取新节点的下一个节点
+ if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag)) {//如果下一个节点不是哨兵节点(末尾节点)
+ nextNode->ptr.prev = &newFreeNode->header;//下一个节点的前节点为新空闲节点
+ if (!OS_MEM_NODE_GET_USED_FLAG(nextNode->sizeAndFlag)) {//如果下一个节点也是空闲的
+ OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode);//删除下一个节点信息
+ OsMemMergeNode(nextNode);//下一个节点和新空闲节点 合并成一个新节点
}
}
- OsMemFreeNodeAdd(pool, newFreeNode);
+ OsMemFreeNodeAdd(pool, newFreeNode);//挂入空闲链表
}
-
+//
STATIC INLINE VOID *OsMemCreateUsedNode(VOID *addr)
{
- struct OsMemUsedNodeHead *node = (struct OsMemUsedNodeHead *)addr;
-
+ struct OsMemUsedNodeHead *node = (struct OsMemUsedNodeHead *)addr;//直接将地址转成使用节点,说明节点信息也存在内存池中
+ //这种用法是非常巧妙的
#if OS_MEM_FREE_BY_TASKID
- OsMemNodeSetTaskID(node);
+ OsMemNodeSetTaskID(node);//设置使用内存节点的任务
#endif
-#ifdef LOSCFG_KERNEL_LMS
+#ifdef LOSCFG_KERNEL_LMS //检测内存泄漏
struct OsMemNodeHead *newNode = (struct OsMemNodeHead *)node;
if (g_lms != NULL) {
g_lms->mallocMark(newNode, OS_MEM_NEXT_NODE(newNode), OS_MEM_NODE_HEAD_SIZE);
}
#endif
- return node + 1;
+ return node + 1; //@note_good 这个地方挺有意思的,只是将结构体扩展下,留一个 int 位 ,变成了已使用节点,返回的地址正是要分配给应用的地址
}
+/*!
+ * @brief OsMemPoolInit 内存池初始化
+ * 内存池节点部分包含3种类型节点:未使用空闲内存节点(OsMemFreeNodeHead),已使用内存节点(OsMemUsedNodeHead) 和 尾节点(OsMemNodeHead)。
+ * \n 每个内存节点维护一个前序指针,指向内存池中上一个内存节点,还维护内存节点的大小和使用标记。
+ * \n 空闲内存节点和已使用内存节点后面的内存区域是数据域
+ * @param pool
+ * @param size
+ * @return
+ *
+ * @see
+ */
STATIC UINT32 OsMemPoolInit(VOID *pool, UINT32 size)
{
struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool;
@@ -853,30 +912,32 @@ STATIC UINT32 OsMemPoolInit(VOID *pool, UINT32 size)
#endif
LOS_SpinInit(&poolHead->spinlock);
- poolHead->info.pool = pool;
- poolHead->info.totalSize = size;
- poolHead->info.attr = OS_MEM_POOL_LOCK_ENABLE; /* default attr: lock, not expand. */
+ poolHead->info.pool = pool; //内存池的起始地址,但注意真正的内存并不是从此处分配,它只是用来记录这个内存块的开始位置而已.
+ poolHead->info.totalSize = size;//内存池总大小
+ poolHead->info.attr = OS_MEM_POOL_LOCK_ENABLE; /* default attr: lock, not expand. | 默认是上锁,不支持扩展,需扩展得另外设置*/
- newNode = OS_MEM_FIRST_NODE(pool);
- newNode->sizeAndFlag = (size - sizeof(struct OsMemPoolHead) - OS_MEM_NODE_HEAD_SIZE);
- newNode->ptr.prev = NULL;
- newNode->magic = OS_MEM_NODE_MAGIC;
- OsMemFreeNodeAdd(pool, (struct OsMemFreeNodeHead *)newNode);
+ newNode = OS_MEM_FIRST_NODE(pool);//跳到第一个节点位置,即跳过结构体本身位置,真正的分配内存是从newNode开始的.
+ newNode->sizeAndFlag = (size - sizeof(struct OsMemPoolHead) - OS_MEM_NODE_HEAD_SIZE);//这才是可供分配给外界使用的总内存块大小,即数据域
+ //OS_MEM_NODE_HEAD_SIZE 叫当前使用节点,即指 newNode占用的空间
+ newNode->ptr.prev = NULL;//开始是空指向
+ newNode->magic = OS_MEM_NODE_MAGIC;//魔法数字 用于标识这是一个 OsMemNodeHead 节点, 魔法数字不能被覆盖,
+ OsMemFreeNodeAdd(pool, (struct OsMemFreeNodeHead *)newNode);//添加一个空闲节点,由此有了首个可供分配的空闲节点
/* The last mem node */
- endNode = OS_MEM_END_NODE(pool, size);
- endNode->magic = OS_MEM_NODE_MAGIC;
-#if OS_MEM_EXPAND_ENABLE
- endNode->ptr.next = NULL;
- OsMemSentinelNodeSet(endNode, NULL, 0);
+ endNode = OS_MEM_END_NODE(pool, size);//确定尾节点位置,尾节点没有数据域
+ endNode->magic = OS_MEM_NODE_MAGIC; //填入尾节点的魔法数字
+#if OS_MEM_EXPAND_ENABLE //支持扩展
+ endNode->ptr.next = NULL;//尾节点没有后继节点
+ OsMemSentinelNodeSet(endNode, NULL, 0);//将尾节点设置为哨兵节点
#else
- endNode->sizeAndFlag = 0;
- endNode->ptr.prev = newNode;
+ endNode->sizeAndFlag = 0;//0代表没有数据域
+ endNode->ptr.prev = newNode;//前驱指针指向第一个节点
OS_MEM_NODE_SET_USED_FLAG(endNode->sizeAndFlag);
#endif
-#ifdef LOSCFG_MEM_WATERLINE
- poolHead->info.curUsedSize = sizeof(struct OsMemPoolHead) + OS_MEM_NODE_HEAD_SIZE;
- poolHead->info.waterLine = poolHead->info.curUsedSize;
+#ifdef LOSCFG_MEM_WATERLINE //吃水线开关
+ poolHead->info.curUsedSize = sizeof(struct OsMemPoolHead) + OS_MEM_NODE_HEAD_SIZE;//内存池已使用了这么多空间,这些都是存内存池自身数据的空间,
+ //但此处是否还要算是 endNode ? @note_thinking
+ poolHead->info.waterLine = poolHead->info.curUsedSize; //设置吃水线
#endif
#ifdef LOSCFG_KERNEL_LMS
if (resize != 0) {
@@ -896,13 +957,13 @@ STATIC VOID OsMemPoolDeinit(const VOID *pool, UINT32 size)
#endif
(VOID)memset_s(pool, size, 0, sizeof(struct OsMemPoolHead));
}
-
+/// 新增内存池
STATIC UINT32 OsMemPoolAdd(VOID *pool, UINT32 size)
{
VOID *nextPool = g_poolHead;
VOID *curPool = g_poolHead;
UINTPTR poolEnd;
- while (nextPool != NULL) {
+ while (nextPool != NULL) {//单链表遍历方式
poolEnd = (UINTPTR)nextPool + LOS_MemPoolSizeGet(nextPool);
if (((pool <= nextPool) && (((UINTPTR)pool + size) > (UINTPTR)nextPool)) ||
(((UINTPTR)pool < poolEnd) && (((UINTPTR)pool + size) >= poolEnd))) {
@@ -916,15 +977,15 @@ STATIC UINT32 OsMemPoolAdd(VOID *pool, UINT32 size)
}
if (g_poolHead == NULL) {
- g_poolHead = pool;
+ g_poolHead = pool; //首个内存池
} else {
- ((struct OsMemPoolHead *)curPool)->nextPool = pool;
+ ((struct OsMemPoolHead *)curPool)->nextPool = pool; //两池扯上关系
}
- ((struct OsMemPoolHead *)pool)->nextPool = NULL;
+ ((struct OsMemPoolHead *)pool)->nextPool = NULL; //新池下一个无所指
return LOS_OK;
}
-
+/// 删除内存池
STATIC UINT32 OsMemPoolDelete(const VOID *pool)
{
UINT32 ret = LOS_NOK;
@@ -955,29 +1016,39 @@ STATIC UINT32 OsMemPoolDelete(const VOID *pool)
}
#endif
+/*!
+ * @brief LOS_MemInit 初始化一块指定的动态内存池,大小为size
+ * 初始一个内存池后生成一个内存池控制头、尾节点EndNode,剩余的内存被标记为FreeNode内存节点。
+ * @param pool
+ * @param size
+ * @return
+ * @attention EndNode作为内存池末尾的节点,size为0。
+ * @see
+ */
UINT32 LOS_MemInit(VOID *pool, UINT32 size)
{
if ((pool == NULL) || (size <= OS_MEM_MIN_POOL_SIZE)) {
return OS_ERROR;
}
- size = OS_MEM_ALIGN(size, OS_MEM_ALIGN_SIZE);
+ size = OS_MEM_ALIGN(size, OS_MEM_ALIGN_SIZE);//4个字节对齐
if (OsMemPoolInit(pool, size)) {
return OS_ERROR;
}
-#ifdef LOSCFG_MEM_MUL_POOL
+#ifdef LOSCFG_MEM_MUL_POOL //多内存池开关
if (OsMemPoolAdd(pool, size)) {
(VOID)OsMemPoolDeInit(pool, size);
return OS_ERROR;
}
#endif
- OsHookCall(LOS_HOOK_TYPE_MEM_INIT, pool, size);
+ OsHookCall(LOS_HOOK_TYPE_MEM_INIT, pool, size);//打印日志
return LOS_OK;
}
#ifdef LOSCFG_MEM_MUL_POOL
+/// 删除指定内存池
UINT32 LOS_MemDeInit(VOID *pool)
{
struct OsMemPoolHead *tmpPool = (struct OsMemPoolHead *)pool;
@@ -997,7 +1068,7 @@ UINT32 LOS_MemDeInit(VOID *pool)
OsHookCall(LOS_HOOK_TYPE_MEM_DEINIT, tmpPool);
return LOS_OK;
}
-
+/// 打印系统中已初始化的所有内存池,包括内存池的起始地址、内存池大小、空闲内存总大小、已使用内存总大小、最大的空闲内存块大小、空闲内存块数量、已使用的内存块数量。
UINT32 LOS_MemPoolList(VOID)
{
VOID *nextPool = g_poolHead;
@@ -1011,7 +1082,7 @@ UINT32 LOS_MemPoolList(VOID)
return index;
}
#endif
-
+/// 从指定动态内存池中申请size长度的内存
STATIC INLINE VOID *OsMemAlloc(struct OsMemPoolHead *pool, UINT32 size, UINT32 intSave)
{
struct OsMemNodeHead *allocNode = NULL;
@@ -1024,15 +1095,15 @@ STATIC INLINE VOID *OsMemAlloc(struct OsMemPoolHead *pool, UINT32 size, UINT32 i
UINT32 allocSize = OS_MEM_ALIGN(size + OS_MEM_NODE_HEAD_SIZE, OS_MEM_ALIGN_SIZE);
#if OS_MEM_EXPAND_ENABLE
-retry:
+retry: //这种写法也挺赞的 @note_good
#endif
- allocNode = OsMemFreeNodeGet(pool, allocSize);
- if (allocNode == NULL) {
+ allocNode = OsMemFreeNodeGet(pool, allocSize);//获取空闲节点
+ if (allocNode == NULL) {//没有内存了,怎搞?
#if OS_MEM_EXPAND_ENABLE
if (pool->info.attr & OS_MEM_POOL_EXPAND_ENABLE) {
- INT32 ret = OsMemPoolExpand(pool, allocSize, intSave);
+ INT32 ret = OsMemPoolExpand(pool, allocSize, intSave);//扩展内存池
if (ret == 0) {
- goto retry;
+ goto retry;//再来一遍
}
}
#endif
@@ -1047,22 +1118,22 @@ retry:
return NULL;
}
- if ((allocSize + OS_MEM_NODE_HEAD_SIZE + OS_MEM_MIN_ALLOC_SIZE) <= allocNode->sizeAndFlag) {
- OsMemSplitNode(pool, allocNode, allocSize);
+ if ((allocSize + OS_MEM_NODE_HEAD_SIZE + OS_MEM_MIN_ALLOC_SIZE) <= allocNode->sizeAndFlag) {//所需小于内存池可供分配量
+ OsMemSplitNode(pool, allocNode, allocSize);//劈开内存池
}
- OS_MEM_NODE_SET_USED_FLAG(allocNode->sizeAndFlag);
- OsMemWaterUsedRecord(pool, OS_MEM_NODE_GET_SIZE(allocNode->sizeAndFlag));
+ OS_MEM_NODE_SET_USED_FLAG(allocNode->sizeAndFlag);//给节点贴上已使用的标签
+ OsMemWaterUsedRecord(pool, OS_MEM_NODE_GET_SIZE(allocNode->sizeAndFlag));//更新吃水线
-#ifdef LOSCFG_MEM_LEAKCHECK
+#ifdef LOSCFG_MEM_LEAKCHECK //检测内存泄漏开关
OsMemLinkRegisterRecord(allocNode);
#endif
- return OsMemCreateUsedNode((VOID *)allocNode);
+ return OsMemCreateUsedNode((VOID *)allocNode);//创建已使用节点
}
-
+/// 从指定内存池中申请size长度的内存,注意这可不是从内核堆空间中申请内存
VOID *LOS_MemAlloc(VOID *pool, UINT32 size)
{
- if ((pool == NULL) || (size == 0)) {
+ if ((pool == NULL) || (size == 0)) {//没提供内存池时
return (size > 0) ? OsVmBootMemAlloc(size) : NULL;
}
@@ -1079,14 +1150,14 @@ VOID *LOS_MemAlloc(VOID *pool, UINT32 size)
break;
}
MEM_LOCK(poolHead, intSave);
- ptr = OsMemAlloc(poolHead, size, intSave);
+ ptr = OsMemAlloc(poolHead, size, intSave);//真正的分配内存函数,详细查看 鸿蒙内核源码分析(内存池篇)
MEM_UNLOCK(poolHead, intSave);
} while (0);
- OsHookCall(LOS_HOOK_TYPE_MEM_ALLOC, pool, ptr, size);
+ OsHookCall(LOS_HOOK_TYPE_MEM_ALLOC, pool, ptr, size);//打印日志,到此一游
return ptr;
}
-
+/// 从指定内存池中申请size长度的内存且地址按boundary字节对齐的内存
VOID *LOS_MemAllocAlign(VOID *pool, UINT32 size, UINT32 boundary)
{
UINT32 gapSize;
@@ -1143,10 +1214,10 @@ VOID *LOS_MemAllocAlign(VOID *pool, UINT32 size, UINT32 boundary)
ptr = alignedPtr;
} while (0);
- OsHookCall(LOS_HOOK_TYPE_MEM_ALLOCALIGN, pool, ptr, size, boundary);
+ OsHookCall(LOS_HOOK_TYPE_MEM_ALLOCALIGN, pool, ptr, size, boundary);//打印对齐日志,表示程序曾临幸过此处
return ptr;
}
-
+/// 内存池有效性检查
STATIC INLINE BOOL OsMemAddrValidCheck(const struct OsMemPoolHead *pool, const VOID *addr)
{
UINT32 size;
@@ -1160,7 +1231,7 @@ STATIC INLINE BOOL OsMemAddrValidCheck(const struct OsMemPoolHead *pool, const V
if (OS_MEM_MIDDLE_ADDR_OPEN_END(pool + 1, addr, (UINTPTR)pool + size)) {
return TRUE;
}
-#if OS_MEM_EXPAND_ENABLE
+#if OS_MEM_EXPAND_ENABLE //如果支持可扩展
struct OsMemNodeHead *node = NULL;
struct OsMemNodeHead *sentinel = OS_MEM_END_NODE(pool, size);
while (OsMemIsLastSentinelNode(sentinel) == FALSE) {
@@ -1250,7 +1321,7 @@ STATIC UINT32 OsMemCheckUsedNode(const struct OsMemPoolHead *pool, const struct
return LOS_OK;
}
-
+/// 释放内存
STATIC INLINE UINT32 OsMemFree(struct OsMemPoolHead *pool, struct OsMemNodeHead *node)
{
UINT32 ret = OsMemCheckUsedNode(pool, node);
@@ -1260,10 +1331,10 @@ STATIC INLINE UINT32 OsMemFree(struct OsMemPoolHead *pool, struct OsMemNodeHead
}
#ifdef LOSCFG_MEM_WATERLINE
- pool->info.curUsedSize -= OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
+ pool->info.curUsedSize -= OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);//降低水位线
#endif
- node->sizeAndFlag = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);
+ node->sizeAndFlag = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);//获取大小和标记
#ifdef LOSCFG_MEM_LEAKCHECK
OsMemLinkRegisterRecord(node);
#endif
@@ -1274,17 +1345,17 @@ STATIC INLINE UINT32 OsMemFree(struct OsMemPoolHead *pool, struct OsMemNodeHead
g_lms->check((UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, TRUE);
}
#endif
- struct OsMemNodeHead *preNode = node->ptr.prev; /* merage preNode */
+ struct OsMemNodeHead *preNode = node->ptr.prev; /* merage preNode | 合并前一个节点*/
if ((preNode != NULL) && !OS_MEM_NODE_GET_USED_FLAG(preNode->sizeAndFlag)) {
- OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)preNode);
- OsMemMergeNode(node);
+ OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)preNode);//删除前节点的信息
+ OsMemMergeNode(node);//向前合并
node = preNode;
}
- struct OsMemNodeHead *nextNode = OS_MEM_NEXT_NODE(node); /* merage nextNode */
+ struct OsMemNodeHead *nextNode = OS_MEM_NEXT_NODE(node); /* merage nextNode | 计算后一个节点位置*/
if ((nextNode != NULL) && !OS_MEM_NODE_GET_USED_FLAG(nextNode->sizeAndFlag)) {
- OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode);
- OsMemMergeNode(nextNode);
+ OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode);//删除后节点信息
+ OsMemMergeNode(nextNode);//合并节点
}
#if OS_MEM_EXPAND_ENABLE
@@ -1305,7 +1376,7 @@ STATIC INLINE UINT32 OsMemFree(struct OsMemPoolHead *pool, struct OsMemNodeHead
#endif
return ret;
}
-
+/// 释放从指定动态内存中申请的内存
UINT32 LOS_MemFree(VOID *pool, VOID *ptr)
{
UINT32 intSave;
@@ -1321,13 +1392,13 @@ UINT32 LOS_MemFree(VOID *pool, VOID *ptr)
struct OsMemNodeHead *node = NULL;
do {
- UINT32 gapSize = *(UINT32 *)((UINTPTR)ptr - sizeof(UINT32));
+ UINT32 gapSize = *(UINT32 *)((UINTPTR)ptr - sizeof(UINT32));//获取节点大小和标签 即: sizeAndFlag
if (OS_MEM_NODE_GET_ALIGNED_FLAG(gapSize) && OS_MEM_NODE_GET_USED_FLAG(gapSize)) {
PRINT_ERR("[%s:%d]gapSize:0x%x error\n", __FUNCTION__, __LINE__, gapSize);
break;
}
- node = (struct OsMemNodeHead *)((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE);
+ node = (struct OsMemNodeHead *)((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE);//定位到节点开始位置
if (OS_MEM_NODE_GET_ALIGNED_FLAG(gapSize)) {
gapSize = OS_MEM_NODE_GET_ALIGNED_GAPSIZE(gapSize);
@@ -1415,7 +1486,7 @@ STATIC INLINE VOID *OsGetRealPtr(const VOID *pool, VOID *ptr)
}
STATIC INLINE VOID *OsMemRealloc(struct OsMemPoolHead *pool, const VOID *ptr,
- struct OsMemNodeHead *node, UINT32 size, UINT32 intSave)
+ struct OsMemNodeHead *node, UINT32 size, UINT32 intSave)
{
struct OsMemNodeHead *nextNode = NULL;
UINT32 allocSize = OS_MEM_ALIGN(size + OS_MEM_NODE_HEAD_SIZE, OS_MEM_ALIGN_SIZE);
@@ -1446,7 +1517,7 @@ STATIC INLINE VOID *OsMemRealloc(struct OsMemPoolHead *pool, const VOID *ptr,
}
return tmpPtr;
}
-
+/// 按size大小重新分配内存块,并将原内存块内容拷贝到新内存块。如果新内存块申请成功,则释放原内存块
VOID *LOS_MemRealloc(VOID *pool, VOID *ptr, UINT32 size)
{
if ((pool == NULL) || OS_MEM_NODE_GET_USED_FLAG(size) || OS_MEM_NODE_GET_ALIGNED_FLAG(size)) {
@@ -1539,7 +1610,7 @@ UINT32 LOS_MemFreeByTaskID(VOID *pool, UINT32 taskID)
return LOS_OK;
}
#endif
-
+/// 获取指定动态内存池的总大小
UINT32 LOS_MemPoolSizeGet(const VOID *pool)
{
UINT32 count = 0;
@@ -1548,23 +1619,23 @@ UINT32 LOS_MemPoolSizeGet(const VOID *pool)
return LOS_NOK;
}
- count += ((struct OsMemPoolHead *)pool)->info.totalSize;
+ count += ((struct OsMemPoolHead *)pool)->info.totalSize; // 这里的 += 好像没必要吧?, = 就可以了, @note_thinking
-#if OS_MEM_EXPAND_ENABLE
+#if OS_MEM_EXPAND_ENABLE //支持扩展
UINT32 size;
struct OsMemNodeHead *node = NULL;
- struct OsMemNodeHead *sentinel = OS_MEM_END_NODE(pool, count);
+ struct OsMemNodeHead *sentinel = OS_MEM_END_NODE(pool, count);//获取哨兵节点
- while (OsMemIsLastSentinelNode(sentinel) == FALSE) {
- size = OS_MEM_NODE_GET_SIZE(sentinel->sizeAndFlag);
- node = OsMemSentinelNodeGet(sentinel);
- sentinel = OS_MEM_END_NODE(node, size);
- count += size;
+ while (OsMemIsLastSentinelNode(sentinel) == FALSE) {//不是最后一个节点
+ size = OS_MEM_NODE_GET_SIZE(sentinel->sizeAndFlag);//数据域大小
+ node = OsMemSentinelNodeGet(sentinel);//再获取哨兵节点
+ sentinel = OS_MEM_END_NODE(node, size);//获取尾节点
+ count += size; //内存池大小变大
}
#endif
return count;
}
-
+/// 获取指定动态内存池的总使用量大小
UINT32 LOS_MemTotalUsedGet(VOID *pool)
{
struct OsMemNodeHead *tmpNode = NULL;
@@ -1637,7 +1708,7 @@ STATIC UINT32 OsMemAddrValidCheckPrint(const VOID *pool, struct OsMemFreeNodeHea
}
STATIC UINT32 OsMemIntegrityCheckSub(struct OsMemNodeHead **tmpNode, const VOID *pool,
- const struct OsMemNodeHead *endNode)
+ const struct OsMemNodeHead *endNode)
{
if (!OS_MEM_MAGIC_VALID(*tmpNode)) {
OsMemMagicCheckPrint(tmpNode);
@@ -1653,7 +1724,7 @@ STATIC UINT32 OsMemIntegrityCheckSub(struct OsMemNodeHead **tmpNode, const VOID
}
STATIC UINT32 OsMemFreeListNodeCheck(const struct OsMemPoolHead *pool,
- const struct OsMemFreeNodeHead *node)
+ const struct OsMemFreeNodeHead *node)
{
if (!OsMemAddrValidCheck(pool, node) ||
!OsMemAddrValidCheck(pool, node->prev) ||
@@ -1714,9 +1785,9 @@ OUT:
#endif
}
}
-
+//对指定内存池做完整性检查,
STATIC UINT32 OsMemIntegrityCheck(const struct OsMemPoolHead *pool, struct OsMemNodeHead **tmpNode,
- struct OsMemNodeHead **preNode)
+ struct OsMemNodeHead **preNode)
{
struct OsMemNodeHead *endNode = OS_MEM_END_NODE(pool, pool->info.totalSize);
@@ -1839,7 +1910,7 @@ STATIC INLINE UINT32 OsMemAllocCheck(struct OsMemPoolHead *pool, UINT32 intSave)
return LOS_OK;
}
#endif
-
+/// 对指定内存池做完整性检查
UINT32 LOS_MemIntegrityCheck(const VOID *pool)
{
if (pool == NULL) {
@@ -1864,7 +1935,7 @@ ERROR_OUT:
}
STATIC INLINE VOID OsMemInfoGet(struct OsMemPoolHead *poolInfo, struct OsMemNodeHead *node,
- LOS_MEM_POOL_STATUS *poolStatus)
+ LOS_MEM_POOL_STATUS *poolStatus)
{
UINT32 totalUsedSize = 0;
UINT32 totalFreeSize = 0;
@@ -1893,8 +1964,17 @@ STATIC INLINE VOID OsMemInfoGet(struct OsMemPoolHead *poolInfo, struct OsMemNode
poolStatus->freeNodeNum += freeNodeNum;
}
+/*!
+ * @brief LOS_MemInfoGet
+ * 获取指定内存池的内存结构信息,包括空闲内存大小、已使用内存大小、空闲内存块数量、已使用的内存块数量、最大的空闲内存块大小
+ * @param pool
+ * @param poolStatus
+ * @return
+ *
+ * @see
+ */
UINT32 LOS_MemInfoGet(VOID *pool, LOS_MEM_POOL_STATUS *poolStatus)
-{
+{//内存碎片率计算:同样调用LOS_MemInfoGet接口,可以获取内存池的剩余内存大小和最大空闲内存块大小,然后根据公式(fragment=100-最大空闲内存块大小/剩余内存大小)得出此时的动态内存池碎片率。
struct OsMemPoolHead *poolInfo = pool;
if (poolStatus == NULL) {
@@ -1975,7 +2055,7 @@ STATIC VOID OsMemInfoPrint(VOID *pool)
status.freeNodeNum);
#endif
}
-
+/// 打印指定内存池的空闲内存块的大小及数量
UINT32 LOS_MemFreeNodeShow(VOID *pool)
{
struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool;
@@ -2022,7 +2102,7 @@ UINT32 LOS_MemFreeNodeShow(VOID *pool)
return LOS_OK;
}
-
+///内核空间动态内存(堆内存)初始化 , 争取系统动态内存池
STATUS_T OsKHeapInit(size_t size)
{
STATUS_T ret;
@@ -2041,38 +2121,38 @@ STATUS_T OsKHeapInit(size_t size)
return -1;
}
- m_aucSysMem0 = m_aucSysMem1 = ptr;
- ret = LOS_MemInit(m_aucSysMem0, size);
+ m_aucSysMem0 = m_aucSysMem1 = ptr;// 指定内核内存池的位置
+ ret = LOS_MemInit(m_aucSysMem0, size); //初始化内存池,供内核分配动态内存
if (ret != LOS_OK) {
PRINT_ERR("vmm_kheap_init LOS_MemInit failed!\n");
g_vmBootMemBase -= size;
return ret;
}
#if OS_MEM_EXPAND_ENABLE
- LOS_MemExpandEnable(OS_SYS_MEM_ADDR);
+ LOS_MemExpandEnable(OS_SYS_MEM_ADDR);//支持扩展系统动态内存
#endif
return LOS_OK;
}
-
+///< 判断地址是否在堆区
BOOL OsMemIsHeapNode(const VOID *ptr)
{
- struct OsMemPoolHead *pool = (struct OsMemPoolHead *)m_aucSysMem1;
- struct OsMemNodeHead *firstNode = OS_MEM_FIRST_NODE(pool);
- struct OsMemNodeHead *endNode = OS_MEM_END_NODE(pool, pool->info.totalSize);
+ struct OsMemPoolHead *pool = (struct OsMemPoolHead *)m_aucSysMem1;//内核堆区开始地址
+ struct OsMemNodeHead *firstNode = OS_MEM_FIRST_NODE(pool);//获取内存池首个节点
+ struct OsMemNodeHead *endNode = OS_MEM_END_NODE(pool, pool->info.totalSize);//获取内存池的尾节点
- if (OS_MEM_MIDDLE_ADDR(firstNode, ptr, endNode)) {
+ if (OS_MEM_MIDDLE_ADDR(firstNode, ptr, endNode)) {//如果在首尾范围内
return TRUE;
}
-#if OS_MEM_EXPAND_ENABLE
- UINT32 intSave;
- UINT32 size;
- MEM_LOCK(pool, intSave);
- while (OsMemIsLastSentinelNode(endNode) == FALSE) {
- size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag);
- firstNode = OsMemSentinelNodeGet(endNode);
- endNode = OS_MEM_END_NODE(firstNode, size);
- if (OS_MEM_MIDDLE_ADDR(firstNode, ptr, endNode)) {
+#if OS_MEM_EXPAND_ENABLE//内存池经过扩展后,新旧块的虚拟地址是不连续的,所以需要跳块判断
+ UINT32 intSave;
+ UINT32 size;//详细查看百篇博客系列篇之 鸿蒙内核源码分析(内存池篇)
+ MEM_LOCK(pool, intSave); //获取自旋锁
+ while (OsMemIsLastSentinelNode(endNode) == FALSE) { //哨兵节点是内存池结束的标记
+ size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag);//获取节点大小
+ firstNode = OsMemSentinelNodeGet(endNode);//获取下一块的开始地址
+ endNode = OS_MEM_END_NODE(firstNode, size);//获取下一块的尾节点
+ if (OS_MEM_MIDDLE_ADDR(firstNode, ptr, endNode)) {//判断地址是否在该块中
MEM_UNLOCK(pool, intSave);
return TRUE;
}
@@ -2081,3 +2161,5 @@ BOOL OsMemIsHeapNode(const VOID *ptr)
#endif
return FALSE;
}
+
+
diff --git a/src/kernel_liteos_a/kernel/base/misc/kill_shellcmd.c b/src/kernel_liteos_a/kernel/base/misc/kill_shellcmd.c
index e21dca7d..ee4ac314 100644
--- a/src/kernel_liteos_a/kernel/base/misc/kill_shellcmd.c
+++ b/src/kernel_liteos_a/kernel/base/misc/kill_shellcmd.c
@@ -52,7 +52,23 @@ LITE_OS_SEC_TEXT_MINOR VOID OsPrintKillUsage(VOID)
{
PRINTK("\nkill: usage: kill [sigspec] [pid]\n");
}
+/*********************************************
+命令功能
+命令用于发送特定信号给指定进程。
+命令格式
+kill [signo | -signo] [pid]
+
+参数 参数说明 取值范围
+signo 信号ID [1,30]
+pid 进程ID [1,MAX_INT]
+
+须知: signo有效范围为[0,64],建议取值范围为[1,30],其余为保留内容。
+
+使用指南
+必须指定发送的信号编号及进程号。
+进程编号取值范围根据系统配置变化,例如系统最大支持pid为256,则取值范围缩小为[1-256]。
+*********************************************/
LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdKill(INT32 argc, const CHAR **argv)
{
#define ARG_NUM 2
diff --git a/src/kernel_liteos_a/kernel/base/misc/los_stackinfo.c b/src/kernel_liteos_a/kernel/base/misc/los_stackinfo.c
index 6f96a8a5..ae3950dc 100644
--- a/src/kernel_liteos_a/kernel/base/misc/los_stackinfo.c
+++ b/src/kernel_liteos_a/kernel/base/misc/los_stackinfo.c
@@ -38,27 +38,53 @@
#include "shell.h"
#endif
-const StackInfo *g_stackInfo = NULL;
-UINT32 g_stackNum;
+/**
+ * @file los_stackinfo.c
+ * @brief 栈内容
+ * @verbatim
+ @note_pic OsExcStackInfo 各个CPU栈布局图,其他栈也是一样,CPU各核硬件栈都是紧挨着
+ __undef_stack(SMP)
+ +-------------------+ <--- cpu1 top
+ | |
+ | CPU core1 |
+ | |
+ +--------------------<--- cpu2 top
+ | |
+ | cpu core 2 |
+ | |
+ +--------------------<--- cpu3 top
+ | |
+ | cpu core 3 |
+ | |
+ +--------------------<--- cpu4 top
+ | |
+ | cpu core 4 |
+ | |
+ +-------------------+
+ * @endverbatim
+ */
+const StackInfo *g_stackInfo = NULL; ///< CPU所有工作模式的栈信息
+UINT32 g_stackNum; ///< CPU所有工作模式的栈数量
+///获取栈的吃水线
UINT32 OsStackWaterLineGet(const UINTPTR *stackBottom, const UINTPTR *stackTop, UINT32 *peakUsed)
{
UINT32 size;
const UINTPTR *tmp = NULL;
- if (*stackTop == OS_STACK_MAGIC_WORD) {
+ if (*stackTop == OS_STACK_MAGIC_WORD) {//栈顶值是否等于 magic 0xCCCCCCCC
tmp = stackTop + 1;
- while ((tmp < stackBottom) && (*tmp == OS_STACK_INIT)) {
+ while ((tmp < stackBottom) && (*tmp == OS_STACK_INIT)) {//记录从栈顶到栈低有多少个连续的 0xCACACACA
tmp++;
}
- size = (UINT32)((UINTPTR)stackBottom - (UINTPTR)tmp);
- *peakUsed = (size == 0) ? size : (size + sizeof(CHAR *));
+ size = (UINT32)((UINTPTR)stackBottom - (UINTPTR)tmp);//剩余多少非0xCACACACA的栈空间
+ *peakUsed = (size == 0) ? size : (size + sizeof(CHAR *));//得出高峰用值,还剩多少可用
return LOS_OK;
} else {
- *peakUsed = OS_INVALID_WATERLINE;
+ *peakUsed = OS_INVALID_WATERLINE;//栈溢出了
return LOS_NOK;
}
}
-
+///异常情况下的栈检查,主要就是检查栈顶值有没有被改写
VOID OsExcStackCheck(VOID)
{
UINT32 index;
@@ -71,7 +97,7 @@ VOID OsExcStackCheck(VOID)
for (index = 0; index < g_stackNum; index++) {
for (cpuid = 0; cpuid < LOSCFG_KERNEL_CORE_NUM; cpuid++) {
stackTop = (UINTPTR *)((UINTPTR)g_stackInfo[index].stackTop + cpuid * g_stackInfo[index].stackSize);
- if (*stackTop != OS_STACK_MAGIC_WORD) {
+ if (*stackTop != OS_STACK_MAGIC_WORD) {// 只要栈顶内容不是 0xCCCCCCCCC 就是溢出了.
PRINT_ERR("cpu:%u %s overflow , magic word changed to 0x%x\n",
LOSCFG_KERNEL_CORE_NUM - 1 - cpuid, g_stackInfo[index].stackName, *stackTop);
}
@@ -79,6 +105,7 @@ VOID OsExcStackCheck(VOID)
}
}
+///打印栈的信息 把每个CPU的栈信息打印出来
VOID OsExcStackInfo(VOID)
{
UINT32 index;
@@ -93,36 +120,37 @@ VOID OsExcStackInfo(VOID)
PrintExcInfo("\n stack name cpu id stack addr total size used size\n"
" ---------- ------ --------- -------- --------\n");
-
for (index = 0; index < g_stackNum; index++) {
- for (cpuid = 0; cpuid < LOSCFG_KERNEL_CORE_NUM; cpuid++) {
+ for (cpuid = 0; cpuid < LOSCFG_KERNEL_CORE_NUM; cpuid++) {//可以看出 各个CPU的栈是紧挨的的
stackTop = (UINTPTR *)((UINTPTR)g_stackInfo[index].stackTop + cpuid * g_stackInfo[index].stackSize);
stack = (UINTPTR *)((UINTPTR)stackTop + g_stackInfo[index].stackSize);
- (VOID)OsStackWaterLineGet(stack, stackTop, &size);
+ (VOID)OsStackWaterLineGet(stack, stackTop, &size);//获取吃水线, 鸿蒙用WaterLine 这个词用的很妙
PrintExcInfo("%11s %-5d %-10p 0x%-8x 0x%-4x\n", g_stackInfo[index].stackName,
LOSCFG_KERNEL_CORE_NUM - 1 - cpuid, stackTop, g_stackInfo[index].stackSize, size);
}
}
- OsExcStackCheck();
+ OsExcStackCheck();//发生异常时栈检查
}
+///注册栈信息
VOID OsExcStackInfoReg(const StackInfo *stackInfo, UINT32 stackNum)
{
- g_stackInfo = stackInfo;
+ g_stackInfo = stackInfo; //全局变量指向g_excStack
g_stackNum = stackNum;
}
+///task栈的初始化,设置固定的值. 0xcccccccc 和 0xcacacaca
VOID OsStackInit(VOID *stacktop, UINT32 stacksize)
{
/* initialize the task stack, write magic num to stack top */
- errno_t ret = memset_s(stacktop, stacksize, (INT32)OS_STACK_INIT, stacksize);
+ errno_t ret = memset_s(stacktop, stacksize, (INT32)OS_STACK_INIT, stacksize);//清一色填 0xCACACACA
if (ret == EOK) {
- *((UINTPTR *)stacktop) = OS_STACK_MAGIC_WORD;
+ *((UINTPTR *)stacktop) = OS_STACK_MAGIC_WORD;//0xCCCCCCCCC 中文就是"烫烫烫烫" 这几个字懂点计算机的人都不会陌生了.
}
}
#ifdef LOSCFG_SHELL_CMD_DEBUG
-SHELLCMD_ENTRY(stack_shellcmd, CMD_TYPE_EX, "stack", 1, (CmdCallBackFunc)OsExcStackInfo);
+SHELLCMD_ENTRY(stack_shellcmd, CMD_TYPE_EX, "stack", 1, (CmdCallBackFunc)OsExcStackInfo);//采用shell命令静态注册方式
#endif
diff --git a/src/kernel_liteos_a/kernel/base/misc/mempt_shellcmd.c b/src/kernel_liteos_a/kernel/base/misc/mempt_shellcmd.c
index 0e8ec47b..7177ace6 100644
--- a/src/kernel_liteos_a/kernel/base/misc/mempt_shellcmd.c
+++ b/src/kernel_liteos_a/kernel/base/misc/mempt_shellcmd.c
@@ -193,7 +193,7 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdUname(INT32 argc, const CHAR *argv[])
if (argc == 1) {
if (strcmp(argv[0], "-a") == 0) {
- PRINTK("%s %d.%d.%d.%d %s %s\n", KERNEL_NAME, KERNEL_MAJOR, KERNEL_MINOR, KERNEL_PATCH, KERNEL_ITRE, \
+ PRINTK("%s %d.%d.%d.%d %s %s\n", KERNEL_NAME, KERNEL_MAJOR, KERNEL_MINOR, KERNEL_PATCH, KERNEL_ITRE,\
__DATE__, __TIME__);
return 0;
} else if (strcmp(argv[0], "-s") == 0) {
diff --git a/src/kernel_liteos_a/kernel/base/misc/swtmr_shellcmd.c b/src/kernel_liteos_a/kernel/base/misc/swtmr_shellcmd.c
index 81573ef4..49c8349c 100644
--- a/src/kernel_liteos_a/kernel/base/misc/swtmr_shellcmd.c
+++ b/src/kernel_liteos_a/kernel/base/misc/swtmr_shellcmd.c
@@ -58,20 +58,21 @@ STATIC VOID OsPrintSwtmrMsg(const SWTMR_CTRL_S *swtmr)
(VOID)LOS_SwtmrTimeGet(swtmr->usTimerID, &ticks);
PRINTK("%7u%10s%8s%12u%7u%#12x%#12x\n",
- swtmr->usTimerID % LOSCFG_BASE_CORE_SWTMR_LIMIT,
- g_shellSwtmrStatus[swtmr->ucState],
- g_shellSwtmrMode[swtmr->ucMode],
- swtmr->uwInterval,
+ swtmr->usTimerID % LOSCFG_BASE_CORE_SWTMR_LIMIT, //软件定时器ID。
+ g_shellSwtmrStatus[swtmr->ucState], //软件定时器状态,状态可能为:"UnUsed", "Created", "Ticking"。
+ g_shellSwtmrMode[swtmr->ucMode], //软件定时器模式。模式可能为:"Once", "Period", "NSD(单次定时器,定时结束后不会自动删除)"
+ swtmr->uwInterval, //软件定时器使用的Tick数。
ticks,
- swtmr->uwArg,
- swtmr->pfnHandler);
+ swtmr->uwArg, //传入的参数。
+ swtmr->pfnHandler); //回调函数的地址。
}
STATIC INLINE VOID OsPrintSwtmrMsgHead(VOID)
{
PRINTK("\r\nSwTmrID State Mode Interval Count Arg handlerAddr\n");
}
-
+///shell命令之swtmr 命令用于查询系统软件定时器相关信息。
+//参数缺省时,默认显示所有软件定时器的相关信息。
STATIC UINT32 SwtmrBaseInfoGet(UINT32 timerID)
{
SWTMR_CTRL_S *swtmr = g_swtmrCBArray;
@@ -173,7 +174,6 @@ SWTMR_HELP:
PRINTK(" swtmr ID --- Specifies information about a software timer.\n");
return LOS_OK;
}
-
-SHELLCMD_ENTRY(swtmr_shellcmd, CMD_TYPE_EX, "swtmr", 1, (CmdCallBackFunc)OsShellCmdSwtmrInfoGet);
+SHELLCMD_ENTRY(swtmr_shellcmd, CMD_TYPE_EX, "swtmr", 1, (CmdCallBackFunc)OsShellCmdSwtmrInfoGet);//采用shell命令静态注册方式
#endif /* LOSCFG_SHELL */
diff --git a/src/kernel_liteos_a/kernel/base/misc/sysinfo_shellcmd.c b/src/kernel_liteos_a/kernel/base/misc/sysinfo_shellcmd.c
index b7c56f84..1d3849b2 100644
--- a/src/kernel_liteos_a/kernel/base/misc/sysinfo_shellcmd.c
+++ b/src/kernel_liteos_a/kernel/base/misc/sysinfo_shellcmd.c
@@ -118,7 +118,7 @@ UINT32 OsShellCmdSwtmrCntGet(VOID)
LOS_IntRestore(intSave);
return swtmrCnt;
}
-
+///查看系统资源使用情况
LITE_OS_SEC_TEXT_MINOR VOID OsShellCmdSystemInfoGet(VOID)
{
UINT8 isTaskEnable = TRUE;
@@ -137,27 +137,27 @@ LITE_OS_SEC_TEXT_MINOR VOID OsShellCmdSystemInfoGet(VOID)
#else
UINT8 isSwtmrEnable = FALSE;
#endif
-
+//模块名称 当前使用量 最大可用量 模块是否开启
PRINTK("\n Module Used Total Enabled\n");
PRINTK("--------------------------------------------\n");
PRINTK(" Task %-10u%-10d%s\n",
- OsShellCmdTaskCntGet(),
- LOSCFG_BASE_CORE_TSK_LIMIT,
- SYSINFO_ENABLED(isTaskEnable));
+ OsShellCmdTaskCntGet(), //有效任务数
+ LOSCFG_BASE_CORE_TSK_LIMIT, //任务最大数 128
+ SYSINFO_ENABLED(isTaskEnable));//任务是否失效 YES or NO
PRINTK(" Sem %-10u%-10d%s\n",
- OsShellCmdSemCntGet(),
- LOSCFG_BASE_IPC_SEM_LIMIT,
- SYSINFO_ENABLED(isSemEnable));
+ OsShellCmdSemCntGet(), //信号量的数量
+ LOSCFG_BASE_IPC_SEM_LIMIT, //信号量最大数 1024
+ SYSINFO_ENABLED(isSemEnable));//信号量是否失效 YES or NO
PRINTK(" Queue %-10u%-10d%s\n",
- OsShellCmdQueueCntGet(),
- LOSCFG_BASE_IPC_QUEUE_LIMIT,
- SYSINFO_ENABLED(isQueueEnable));
+ OsShellCmdQueueCntGet(), //队列的数量
+ LOSCFG_BASE_IPC_QUEUE_LIMIT, //队列的最大数 1024
+ SYSINFO_ENABLED(isQueueEnable));//队列是否失效 YES or NO
PRINTK(" SwTmr %-10u%-10d%s\n",
- OsShellCmdSwtmrCntGet(),
- LOSCFG_BASE_CORE_SWTMR_LIMIT,
- SYSINFO_ENABLED(isSwtmrEnable));
+ OsShellCmdSwtmrCntGet(), //定时器的数量
+ LOSCFG_BASE_CORE_SWTMR_LIMIT, //定时器的总数 1024
+ SYSINFO_ENABLED(isSwtmrEnable)); //定时器是否失效 YES or NO
}
-
+///systeminfo命令用于显示当前操作系统内资源使用情况,包括任务、信号量、互斥量、队列、定时器等。
INT32 OsShellCmdSystemInfo(INT32 argc, const CHAR **argv)
{
if (argc == 0) {
diff --git a/src/kernel_liteos_a/kernel/base/misc/vm_shellcmd.c b/src/kernel_liteos_a/kernel/base/misc/vm_shellcmd.c
index 445014b3..b8455e8a 100644
--- a/src/kernel_liteos_a/kernel/base/misc/vm_shellcmd.c
+++ b/src/kernel_liteos_a/kernel/base/misc/vm_shellcmd.c
@@ -54,7 +54,7 @@
#define VMM_CMD "vmm"
#define OOM_CMD "oom"
#define VMM_PMM_CMD "v2p"
-
+//dump内核空间
LITE_OS_SEC_TEXT_MINOR VOID OsDumpKernelAspace(VOID)
{
LosVmSpace *kAspace = LOS_GetKVmSpace();
@@ -104,26 +104,26 @@ LITE_OS_SEC_TEXT_MINOR VOID OsDoDumpVm(pid_t pid)
PRINTK("\tThe process [%d] not active\n", pid);
}
}
-
+///查看进程的虚拟内存使用情况。vmm [-a / -h / --help], vmm [pid]
LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdDumpVm(INT32 argc, const CHAR *argv[])
{
- if (argc == 0) {
+ if (argc == 0) { //没有参数 使用 # vmm 查看所有进程使用虚拟内存的情况
OsDumpAllAspace();
} else if (argc == 1) {
pid_t pid = OsPid(argv[0]);
- if (strcmp(argv[0], "-a") == 0) {
+ if (strcmp(argv[0], "-a") == 0) { //# vmm -a 查看所有进程使用虚拟内存的情况
OsDumpAllAspace();
- } else if (strcmp(argv[0], "-k") == 0) {
+ } else if (strcmp(argv[0], "-k") == 0) {//# vmm -k 查看内核进程使用虚拟内存的情况
OsDumpKernelAspace();
- } else if (pid >= 0) {
+ } else if (pid >= 0) { //# vmm 3 查看3号进程使用虚拟内存的情况
OsDoDumpVm(pid);
- } else if (strcmp(argv[0], "-h") == 0 || strcmp(argv[0], "--help") == 0) {
+ } else if (strcmp(argv[0], "-h") == 0 || strcmp(argv[0], "--help") == 0) { //# vmm -h 或者 vmm --help
OsPrintUsage();
} else {
- PRINTK("%s: invalid option: %s\n", VMM_CMD, argv[0]);
+ PRINTK("%s: invalid option: %s\n", VMM_CMD, argv[0]); //格式错误,输出规范格式
OsPrintUsage();
}
- } else {
+ } else { //多于一个参数 例如 # vmm 3 9
OsPrintUsage();
}
@@ -135,7 +135,7 @@ LITE_OS_SEC_TEXT_MINOR VOID V2PPrintUsage(VOID)
PRINTK("pid vaddr(0x1000000~0x3e000000), print physical address of virtual address\n"
"-h | --help, print v2p command usage\n");
}
-
+///v2p 虚拟内存对应的物理内存
LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdV2P(INT32 argc, const CHAR *argv[])
{
UINT32 vaddr;
@@ -180,7 +180,7 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdV2P(INT32 argc, const CHAR *argv[])
return LOS_OK;
}
-
+///查看系统内存物理页及pagecache物理页使用情况 , Debug版本才具备的命令 # pmm
LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdDumpPmm(VOID)
{
OsVmPhysDump();
@@ -192,12 +192,13 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdDumpPmm(VOID)
LITE_OS_SEC_TEXT_MINOR VOID OomPrintUsage(VOID)
{
- PRINTK("\t-i [interval], set oom check interval (ms)\n"
- "\t-m [mem byte], set oom low memory threshold (Byte)\n"
- "\t-r [mem byte], set page cache reclaim memory threshold (Byte)\n"
- "\t-h | --help, print vmm command usage\n");
+ PRINTK("\t-i [interval], set oom check interval (ms)\n" //设置oom线程任务检查的时间间隔。
+ "\t-m [mem byte], set oom low memory threshold (Byte)\n" //设置低内存阈值。
+ "\t-r [mem byte], set page cache reclaim memory threshold (Byte)\n" //设置pagecache内存回收阈值。
+ "\t-h | --help, print vmm command usage\n"); //使用帮助。
}
-
+///查看和设置低内存阈值以及pagecache内存回收阈值。参数缺省时,显示oom功能当前配置信息。
+//当系统内存不足时,会打印出内存不足的提示信息。
LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdOom(INT32 argc, const CHAR *argv[])
{
UINT32 lowMemThreshold;
@@ -219,7 +220,7 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdOom(INT32 argc, const CHAR *argv[])
PRINTK("[oom] low mem threshold %s(byte) invalid.\n", argv[1]);
return OS_ERROR;
} else {
- OomSetLowMemThreashold(lowMemThreshold);
+ OomSetLowMemThreashold(lowMemThreshold);//设置低内存阈值
}
} else if (strcmp(argv[0], "-i") == 0) {
checkInterval = strtoul((CHAR *)argv[1], &endPtr, 0);
@@ -227,7 +228,7 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdOom(INT32 argc, const CHAR *argv[])
PRINTK("[oom] check interval %s(us) invalid.\n", argv[1]);
return OS_ERROR;
} else {
- OomSetCheckInterval(checkInterval);
+ OomSetCheckInterval(checkInterval);//设置oom线程任务检查的时间间隔
}
} else if (strcmp(argv[0], "-r") == 0) {
reclaimMemThreshold = strtoul((CHAR *)argv[1], &endPtr, 0);
@@ -235,7 +236,7 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdOom(INT32 argc, const CHAR *argv[])
PRINTK("[oom] reclaim mem threshold %s(byte) invalid.\n", argv[1]);
return OS_ERROR;
} else {
- OomSetReclaimMemThreashold(reclaimMemThreshold);
+ OomSetReclaimMemThreashold(reclaimMemThreshold);//设置pagecache内存回收阈值
}
} else {
PRINTK("%s: invalid option: %s %s\n", OOM_CMD, argv[0], argv[1]);
@@ -250,13 +251,13 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdOom(INT32 argc, const CHAR *argv[])
}
#ifdef LOSCFG_SHELL_CMD_DEBUG
-SHELLCMD_ENTRY(oom_shellcmd, CMD_TYPE_SHOW, OOM_CMD, 2, (CmdCallBackFunc)OsShellCmdOom);
-SHELLCMD_ENTRY(vm_shellcmd, CMD_TYPE_SHOW, VMM_CMD, 1, (CmdCallBackFunc)OsShellCmdDumpVm);
-SHELLCMD_ENTRY(v2p_shellcmd, CMD_TYPE_SHOW, VMM_PMM_CMD, 1, (CmdCallBackFunc)OsShellCmdV2P);
+SHELLCMD_ENTRY(oom_shellcmd, CMD_TYPE_SHOW, OOM_CMD, 2, (CmdCallBackFunc)OsShellCmdOom);//采用shell命令静态注册方式
+SHELLCMD_ENTRY(vm_shellcmd, CMD_TYPE_SHOW, VMM_CMD, 1, (CmdCallBackFunc)OsShellCmdDumpVm);//采用shell命令静态注册方式 vmm
+SHELLCMD_ENTRY(v2p_shellcmd, CMD_TYPE_SHOW, VMM_PMM_CMD, 1, (CmdCallBackFunc)OsShellCmdV2P);//采用shell命令静态注册方式 v2p
#endif
#ifdef LOSCFG_SHELL
-SHELLCMD_ENTRY(pmm_shellcmd, CMD_TYPE_SHOW, "pmm", 0, (CmdCallBackFunc)OsShellCmdDumpPmm);
+SHELLCMD_ENTRY(pmm_shellcmd, CMD_TYPE_SHOW, "pmm", 0, (CmdCallBackFunc)OsShellCmdDumpPmm);//采用shell命令静态注册方式
#endif
#endif
diff --git a/src/kernel_liteos_a/kernel/base/mp/los_mp.c b/src/kernel_liteos_a/kernel/base/mp/los_mp.c
index 67bc7012..41825f20 100644
--- a/src/kernel_liteos_a/kernel/base/mp/los_mp.c
+++ b/src/kernel_liteos_a/kernel/base/mp/los_mp.c
@@ -1,3 +1,32 @@
+/*!
+ * @file los_mp.c
+ * @brief
+ * @link
+ * @verbatim
+ 多CPU核的操作系统3种处理模式(SMP+AMP+BMP) 鸿蒙实现的是 SMP 的方式
+ 非对称多处理(Asymmetric multiprocessing,AMP)每个CPU内核
+ 运行一个独立的操作系统或同一操作系统的独立实例(instantiation)。
+
+ 对称多处理(Symmetric multiprocessing,SMP)一个操作系统的实例
+ 可以同时管理所有CPU内核,且应用并不绑定某一个内核。
+
+ 混合多处理(Bound multiprocessing,BMP)一个操作系统的实例可以
+ 同时管理所有CPU内核,但每个应用被锁定于某个指定的核心。
+
+ 多核多线程处理器的中断
+ 由 PIC(Programmable Interrupt Controller)统一控制。PIC 允许一个
+ 硬件线程中断其他的硬件线程,这种方式被称为核间中断(Inter-Processor Interrupts,IPI)
+
+ SGI:软件触发中断(Software Generated Interrupt)。在arm处理器中,
+ SGI共有16个,硬件中断号分别为ID0~ID15。它通常用于多核间通讯。
+ * @endverbatim
+ * @version
+ * @author weharmonyos.com | 鸿蒙研究站 | 每天死磕一点点
+ * @date 2021-11-18
+ *
+ * @history
+ *
+ */
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
@@ -36,43 +65,43 @@
#include "los_swtmr.h"
#include "los_task_pri.h"
-#ifdef LOSCFG_KERNEL_SMP
+#ifdef LOSCFG_KERNEL_SMP
+//给参数CPU发送调度信号
#ifdef LOSCFG_KERNEL_SMP_CALL
LITE_OS_SEC_BSS SPIN_LOCK_INIT(g_mpCallSpin);
#define MP_CALL_LOCK(state) LOS_SpinLockSave(&g_mpCallSpin, &(state))
#define MP_CALL_UNLOCK(state) LOS_SpinUnlockRestore(&g_mpCallSpin, (state))
#endif
-
-VOID LOS_MpSchedule(UINT32 target)
+VOID LOS_MpSchedule(UINT32 target)//target每位对应CPU core
{
UINT32 cpuid = ArchCurrCpuid();
- target &= ~(1U << cpuid);
- HalIrqSendIpi(target, LOS_MP_IPI_SCHEDULE);
+ target &= ~(1U << cpuid);//获取除了自身之外的其他CPU
+ HalIrqSendIpi(target, LOS_MP_IPI_SCHEDULE);//向目标CPU发送调度信号,核间中断(Inter-Processor Interrupts),IPI
}
-
+///硬中断唤醒处理函数
VOID OsMpWakeHandler(VOID)
{
/* generic wakeup ipi, do nothing */
}
-
+///硬中断调度处理函数
VOID OsMpScheduleHandler(VOID)
-{
+{//将调度标志设置为与唤醒功能不同,这样就可以在硬中断结束时触发调度程序。
/*
* set schedule flag to differ from wake function,
* so that the scheduler can be triggered at the end of irq.
*/
OsSchedRunqueuePendingSet();
}
-
+///硬中断暂停处理函数
VOID OsMpHaltHandler(VOID)
{
(VOID)LOS_IntLock();
- OsPercpuGet()->excFlag = CPU_HALT;
+ OsPercpuGet()->excFlag = CPU_HALT;//让当前Cpu停止工作
- while (1) {}
+ while (1) {}//陷入空循环,也就是空闲状态
}
-
+///MP定时器处理函数, 递归检查所有可用任务
VOID OsMpCollectTasks(VOID)
{
LosTaskCB *taskCB = NULL;
@@ -80,19 +109,19 @@ VOID OsMpCollectTasks(VOID)
UINT32 ret;
/* recursive checking all the available task */
- for (; taskID <= g_taskMaxNum; taskID++) {
+ for (; taskID <= g_taskMaxNum; taskID++) { //递归检查所有可用任务
taskCB = &g_taskCBArray[taskID];
if (OsTaskIsUnused(taskCB) || OsTaskIsRunning(taskCB)) {
continue;
}
- /*
+ /* 虽然任务状态不是原子的,但此检查可能成功,但无法完成删除,此删除将在下次运行之前处理
* though task status is not atomic, this check may success but not accomplish
* the deletion; this deletion will be handled until the next run.
*/
- if (taskCB->signal & SIGNAL_KILL) {
- ret = LOS_TaskDelete(taskID);
+ if (taskCB->signal & SIGNAL_KILL) {//任务收到被干掉信号
+ ret = LOS_TaskDelete(taskID);//干掉任务,回归任务池
if (ret != LOS_OK) {
PRINT_WARN("GC collect task failed err:0x%x\n", ret);
}
@@ -101,6 +130,17 @@ VOID OsMpCollectTasks(VOID)
}
#ifdef LOSCFG_KERNEL_SMP_CALL
+/*!
+ * @brief OsMpFuncCall
+ * 向指定CPU的funcLink上注册回调函数, 该怎么理解这个函数呢 ? 具体有什么用呢 ?
+ * \n 可由CPU a核向b核发起一个请求,让b核去执行某个函数, 这是否是分布式调度的底层实现基础 ?
+ * @param args
+ * @param func
+ * @param target
+ * @return
+ *
+ * @see
+ */
VOID OsMpFuncCall(UINT32 target, SMP_FUNC_CALL func, VOID *args)
{
UINT32 index;
@@ -110,13 +150,13 @@ VOID OsMpFuncCall(UINT32 target, SMP_FUNC_CALL func, VOID *args)
return;
}
- if (!(target & OS_MP_CPU_ALL)) {
+ if (!(target & OS_MP_CPU_ALL)) {//检查目标CPU是否正确
return;
}
- for (index = 0; index < LOSCFG_KERNEL_CORE_NUM; index++) {
+ for (index = 0; index < LOSCFG_KERNEL_CORE_NUM; index++) {//遍历所有核
if (CPUID_TO_AFFI_MASK(index) & target) {
- MpCallFunc *mpCallFunc = (MpCallFunc *)LOS_MemAlloc(m_aucSysMem0, sizeof(MpCallFunc));
+ MpCallFunc *mpCallFunc = (MpCallFunc *)LOS_MemAlloc(m_aucSysMem0, sizeof(MpCallFunc));//从内核空间 分配回调结构体
if (mpCallFunc == NULL) {
PRINT_ERR("smp func call malloc failed\n");
return;
@@ -125,59 +165,66 @@ VOID OsMpFuncCall(UINT32 target, SMP_FUNC_CALL func, VOID *args)
mpCallFunc->args = args;
MP_CALL_LOCK(intSave);
- LOS_ListAdd(&g_percpu[index].funcLink, &(mpCallFunc->node));
+ LOS_ListAdd(&g_percpu[index].funcLink, &(mpCallFunc->node));//将回调结构体挂入链表尾部
MP_CALL_UNLOCK(intSave);
}
}
- HalIrqSendIpi(target, LOS_MP_IPI_FUNC_CALL);
+ HalIrqSendIpi(target, LOS_MP_IPI_FUNC_CALL);//向目标CPU发起核间中断
}
+/*!
+ * @brief OsMpFuncCallHandler
+ * 回调向当前CPU注册过的函数
+ * @return
+ *
+ * @see
+ */
VOID OsMpFuncCallHandler(VOID)
{
UINT32 intSave;
- UINT32 cpuid = ArchCurrCpuid();
+ UINT32 cpuid = ArchCurrCpuid();//获取当前CPU
LOS_DL_LIST *list = NULL;
MpCallFunc *mpCallFunc = NULL;
MP_CALL_LOCK(intSave);
- while (!LOS_ListEmpty(&g_percpu[cpuid].funcLink)) {
- list = LOS_DL_LIST_FIRST(&g_percpu[cpuid].funcLink);
- LOS_ListDelete(list);
+ while (!LOS_ListEmpty(&g_percpu[cpuid].funcLink)) {//遍历回调函数链表,知道为空
+ list = LOS_DL_LIST_FIRST(&g_percpu[cpuid].funcLink);//获取链表第一个数据
+ LOS_ListDelete(list);//将自己从链表上摘除
MP_CALL_UNLOCK(intSave);
- mpCallFunc = LOS_DL_LIST_ENTRY(list, MpCallFunc, node);
- mpCallFunc->func(mpCallFunc->args);
- (VOID)LOS_MemFree(m_aucSysMem0, mpCallFunc);
+ mpCallFunc = LOS_DL_LIST_ENTRY(list, MpCallFunc, node);//获取回调函数
+ mpCallFunc->func(mpCallFunc->args);//获取参数并回调该函数
+ (VOID)LOS_MemFree(m_aucSysMem0, mpCallFunc);//释放回调函数内存
MP_CALL_LOCK(intSave);
}
MP_CALL_UNLOCK(intSave);
}
-
+/// CPU层级的回调模块初始化
VOID OsMpFuncCallInit(VOID)
{
UINT32 index;
- /* init funclink for each core */
+ /* init funclink for each core | 为每个CPU核整一个回调函数链表*/
for (index = 0; index < LOSCFG_KERNEL_CORE_NUM; index++) {
- LOS_ListInit(&g_percpu[index].funcLink);
+ LOS_ListInit(&g_percpu[index].funcLink);//链表初始化
}
}
#endif /* LOSCFG_KERNEL_SMP_CALL */
-
+//MP(multiprocessing) 多核处理器初始化
UINT32 OsMpInit(VOID)
{
UINT16 swtmrId;
- (VOID)LOS_SwtmrCreate(OS_MP_GC_PERIOD, LOS_SWTMR_MODE_PERIOD,
- (SWTMR_PROC_FUNC)OsMpCollectTasks, &swtmrId, 0);
- (VOID)LOS_SwtmrStart(swtmrId);
+ (VOID)LOS_SwtmrCreate(OS_MP_GC_PERIOD, LOS_SWTMR_MODE_PERIOD, //创建一个周期性,持续时间为 100个tick的定时器
+ (SWTMR_PROC_FUNC)OsMpCollectTasks, &swtmrId, 0);//OsMpCollectTasks为超时回调函数
+ (VOID)LOS_SwtmrStart(swtmrId);//开始定时任务
#ifdef LOSCFG_KERNEL_SMP_CALL
OsMpFuncCallInit();
#endif
return LOS_OK;
}
-LOS_MODULE_INIT(OsMpInit, LOS_INIT_LEVEL_KMOD_TASK);
+LOS_MODULE_INIT(OsMpInit, LOS_INIT_LEVEL_KMOD_TASK);//多处理器模块初始化
#endif
diff --git a/src/kernel_liteos_a/kernel/base/mp/los_percpu.c b/src/kernel_liteos_a/kernel/base/mp/los_percpu.c
index 4d883916..527c028f 100644
--- a/src/kernel_liteos_a/kernel/base/mp/los_percpu.c
+++ b/src/kernel_liteos_a/kernel/base/mp/los_percpu.c
@@ -33,7 +33,7 @@
#include "los_printf.h"
#ifdef LOSCFG_KERNEL_SMP
-Percpu g_percpu[LOSCFG_KERNEL_CORE_NUM];
+Percpu g_percpu[LOSCFG_KERNEL_CORE_NUM]; ///< CPU池,池大小由CPU核数决定
VOID OsAllCpuStatusOutput(VOID)
{
diff --git a/src/kernel_liteos_a/kernel/base/om/los_err.c b/src/kernel_liteos_a/kernel/base/om/los_err.c
index 6267a9d1..c8973dc5 100644
--- a/src/kernel_liteos_a/kernel/base/om/los_err.c
+++ b/src/kernel_liteos_a/kernel/base/om/los_err.c
@@ -31,9 +31,33 @@
#include "los_err.h"
+/**
+ * @file los_err.c
+ * @brief
+ * @verbatim
+基本概念
+ 错误处理指程序运行错误时,调用错误处理模块的接口函数,上报错误信息,并调用注册的钩子函数
+ 进行特定处理,保存现场以便定位问题。通过错误处理,可以控制和提示程序中的非法输入,防止程序崩溃。
-LITE_OS_SEC_BSS STATIC LOS_ERRORHANDLE_FUNC g_errHandleHook = NULL;
+运作机制
+ 错误处理是一种机制,用于处理异常状况。当程序出现错误时,会显示相应的错误码。
+ 此外,如果注册了相应的错误处理函数,则会执行这个函数。
+系统中只有一个错误处理的钩子函数。当多次注册钩子函数时,最后一次注册的钩子函数会覆盖前一次注册的函数
+ * @endverbatim
+ */
+
+LITE_OS_SEC_BSS STATIC LOS_ERRORHANDLE_FUNC g_errHandleHook = NULL;///< 错误接管钩子函数
+
+/**
+ * @brief 调用钩子函数,处理错误
+ * @param fileName 存放错误日志的文件名,系统内部调用时,入参为"os_unspecific_file"
+ * @param lineNo 发生错误的代码行号系统内部调用时,若值为0xa1b2c3f8,表示未传递行号
+ * @param errorNo 错误码
+ * @param paraLen 入参para的长度系统内部调用时,入参为0
+ * @param para 错误标签系统内部调用时,入参为NULL
+ * @return LITE_OS_SEC_TEXT_INIT
+ */
LITE_OS_SEC_TEXT_INIT UINT32 LOS_ErrHandle(CHAR *fileName, UINT32 lineNo, UINT32 errorNo,
UINT32 paraLen, VOID *para)
{
@@ -43,7 +67,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_ErrHandle(CHAR *fileName, UINT32 lineNo, UINT32
return LOS_OK;
}
-
+///设置钩子函数,处理错误
LITE_OS_SEC_TEXT_INIT VOID LOS_SetErrHandleHook(LOS_ERRORHANDLE_FUNC fun)
{
g_errHandleHook = fun;
diff --git a/src/kernel_liteos_a/kernel/base/sched/los_idle.c b/src/kernel_liteos_a/kernel/base/sched/los_idle.c
index d2538595..0ba434dc 100644
--- a/src/kernel_liteos_a/kernel/base/sched/los_idle.c
+++ b/src/kernel_liteos_a/kernel/base/sched/los_idle.c
@@ -44,7 +44,7 @@ STATIC VOID IdleTimeSliceUpdate(SchedRunqueue *rq, LosTaskCB *taskCB, UINT64 cur
STATIC INT32 IdleParamCompare(const SchedPolicy *sp1, const SchedPolicy *sp2);
STATIC VOID IdlePriorityInheritance(LosTaskCB *owner, const SchedParam *param);
STATIC VOID IdlePriorityRestore(LosTaskCB *owner, const LOS_DL_LIST *list, const SchedParam *param);
-
+//空闲调度
const STATIC SchedOps g_idleOps = {
.dequeue = IdleDequeue,
.enqueue = IdleEnqueue,
diff --git a/src/kernel_liteos_a/kernel/base/sched/los_priority.c b/src/kernel_liteos_a/kernel/base/sched/los_priority.c
index 610ba119..5f1c858d 100644
--- a/src/kernel_liteos_a/kernel/base/sched/los_priority.c
+++ b/src/kernel_liteos_a/kernel/base/sched/los_priority.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022-2023 Huawei Device Co., Ltd. All rights reserved.
+ * Copyright (c) 2022-2022 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -43,6 +43,8 @@
#define OS_SCHED_READY_MAX 30
#define OS_TIME_SLICE_MIN (INT32)((50 * OS_SYS_NS_PER_US) / OS_NS_PER_CYCLE) /* 50us */
+//基于优先数调度算法 Highest-Priority-First (HPF)
+
STATIC HPFRunqueue g_schedHPF;
STATIC VOID HPFDequeue(SchedRunqueue *rq, LosTaskCB *taskCB);
@@ -63,7 +65,7 @@ STATIC VOID HPFTimeSliceUpdate(SchedRunqueue *rq, LosTaskCB *taskCB, UINT64 curr
STATIC INT32 HPFParamCompare(const SchedPolicy *sp1, const SchedPolicy *sp2);
STATIC VOID HPFPriorityInheritance(LosTaskCB *owner, const SchedParam *param);
STATIC VOID HPFPriorityRestore(LosTaskCB *owner, const LOS_DL_LIST *list, const SchedParam *param);
-
+//优先级调度算法操作
const STATIC SchedOps g_priorityOps = {
.dequeue = HPFDequeue,
.enqueue = HPFEnqueue,
@@ -243,7 +245,7 @@ STATIC INLINE VOID PriQueInsert(HPFRunqueue *rq, LosTaskCB *taskCB)
taskCB->taskStatus &= ~OS_TASK_STATUS_BLOCKED;
taskCB->taskStatus |= OS_TASK_STATUS_READY;
}
-
+//入就绪队列
STATIC VOID HPFEnqueue(SchedRunqueue *rq, LosTaskCB *taskCB)
{
#ifdef LOSCFG_SCHED_HPF_DEBUG
@@ -253,14 +255,14 @@ STATIC VOID HPFEnqueue(SchedRunqueue *rq, LosTaskCB *taskCB)
#endif
PriQueInsert(rq->hpfRunqueue, taskCB);
}
-
+//出就绪队列
STATIC VOID HPFDequeue(SchedRunqueue *rq, LosTaskCB *taskCB)
{
SchedHPF *sched = (SchedHPF *)&taskCB->sp;
- if (taskCB->taskStatus & OS_TASK_STATUS_READY) {
+ if (taskCB->taskStatus & OS_TASK_STATUS_READY) {//是否有就绪状态
PriQueDelete(rq->hpfRunqueue, sched->basePrio, &taskCB->pendList, sched->priority);
- taskCB->taskStatus &= ~OS_TASK_STATUS_READY;
+ taskCB->taskStatus &= ~OS_TASK_STATUS_READY;//更新成非就绪状态
}
}
@@ -475,7 +477,7 @@ STATIC VOID HPFPriorityInheritance(LosTaskCB *owner, const SchedParam *param)
LOS_BitmapSet(&sp->priBitmap, sp->priority);
sp->priority = param->priority;
}
-
+/// 恢复任务优先级
STATIC VOID HPFPriorityRestore(LosTaskCB *owner, const LOS_DL_LIST *list, const SchedParam *param)
{
UINT16 priority;
@@ -498,8 +500,8 @@ STATIC VOID HPFPriorityRestore(LosTaskCB *owner, const LOS_DL_LIST *list, const
}
if ((list != NULL) && !LOS_ListEmpty((LOS_DL_LIST *)list)) {
- priority = LOS_HighBitGet(sp->priBitmap);
- LOS_DL_LIST_FOR_EACH_ENTRY(pendedTask, list, LosTaskCB, pendList) {
+ priority = LOS_HighBitGet(sp->priBitmap);//获取在历史调度中最高优先级
+ LOS_DL_LIST_FOR_EACH_ENTRY(pendedTask, list, LosTaskCB, pendList) {//遍历链表
SchedHPF *pendSp = (SchedHPF *)&pendedTask->sp;
if ((pendedTask->ops == owner->ops) && (priority != pendSp->priority)) {
LOS_BitmapClr(&sp->priBitmap, pendSp->priority);
@@ -537,7 +539,7 @@ VOID HPFProcessDefaultSchedParamGet(SchedParam *param)
{
param->basePrio = OS_USER_PROCESS_PRIORITY_HIGHEST;
}
-
+//HPF 调度策略初始化
VOID HPFSchedPolicyInit(SchedRunqueue *rq)
{
if (ArchCurrCpuid() > 0) {
diff --git a/src/kernel_liteos_a/kernel/base/sched/los_sortlink.c b/src/kernel_liteos_a/kernel/base/sched/los_sortlink.c
index 4f68436f..7dcfce04 100644
--- a/src/kernel_liteos_a/kernel/base/sched/los_sortlink.c
+++ b/src/kernel_liteos_a/kernel/base/sched/los_sortlink.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
- * Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -30,7 +30,7 @@
*/
#include "los_sortlink_pri.h"
-
+/// 排序链表初始化
VOID OsSortLinkInit(SortLinkAttribute *sortLinkHeader)
{
LOS_ListInit(&sortLinkHeader->sortLink);
@@ -38,38 +38,47 @@ VOID OsSortLinkInit(SortLinkAttribute *sortLinkHeader)
sortLinkHeader->nodeNum = 0;
}
+/*!
+ * @brief OsAddNode2SortLink 向链表中插入结点,并按时间顺序排列
+ *
+ * @param sortLinkHeader 被插入的链表
+ * @param sortList 要插入的结点
+ * @return
+ *
+ * @see
+ */
STATIC INLINE VOID AddNode2SortLink(SortLinkAttribute *sortLinkHeader, SortLinkList *sortList)
{
- LOS_DL_LIST *head = (LOS_DL_LIST *)&sortLinkHeader->sortLink;
+ LOS_DL_LIST *head = (LOS_DL_LIST *)&sortLinkHeader->sortLink; //获取双向链表
- if (LOS_ListEmpty(head)) {
- LOS_ListHeadInsert(head, &sortList->sortLinkNode);
- sortLinkHeader->nodeNum++;
+ if (LOS_ListEmpty(head)) { //空链表,直接插入
+ LOS_ListHeadInsert(head, &sortList->sortLinkNode);//插入结点
+ sortLinkHeader->nodeNum++;//CPU的工作量增加了
return;
}
-
+ //链表不为空时,插入分三种情况, responseTime 大于,等于,小于的处理
SortLinkList *listSorted = LOS_DL_LIST_ENTRY(head->pstNext, SortLinkList, sortLinkNode);
- if (listSorted->responseTime > sortList->responseTime) {
- LOS_ListAdd(head, &sortList->sortLinkNode);
- sortLinkHeader->nodeNum++;
- return;
- } else if (listSorted->responseTime == sortList->responseTime) {
- LOS_ListAdd(head->pstNext, &sortList->sortLinkNode);
+ if (listSorted->responseTime > sortList->responseTime) {//如果要插入的节点 responseTime 最小
+ LOS_ListAdd(head, &sortList->sortLinkNode);//能跑进来说明是最小的,直接插入到第一的位置
+ sortLinkHeader->nodeNum++;//CPU的工作量增加了
+ return;//直接返回了
+ } else if (listSorted->responseTime == sortList->responseTime) {//相等的情况
+ LOS_ListAdd(head->pstNext, &sortList->sortLinkNode);//插到第二的位置
sortLinkHeader->nodeNum++;
return;
}
-
- LOS_DL_LIST *prevNode = head->pstPrev;
- do {
- listSorted = LOS_DL_LIST_ENTRY(prevNode, SortLinkList, sortLinkNode);
- if (listSorted->responseTime <= sortList->responseTime) {
+ //处理大于链表中第一个responseTime的情况,需要遍历链表
+ LOS_DL_LIST *prevNode = head->pstPrev;//注意这里用的前一个结点,也就是说前一个结点中的responseTime 是最大的
+ do { // @note_good 这里写的有点妙,也是双向链表的魅力所在
+ listSorted = LOS_DL_LIST_ENTRY(prevNode, SortLinkList, sortLinkNode);//一个个遍历,先比大的再比小的
+ if (listSorted->responseTime <= sortList->responseTime) {//如果时间比你小,就插到后面
LOS_ListAdd(prevNode, &sortList->sortLinkNode);
sortLinkHeader->nodeNum++;
break;
}
- prevNode = prevNode->pstPrev;
- } while (1);
+ prevNode = prevNode->pstPrev;//再拿上一个更小的responseTime进行比较
+ } while (1);//死循环
}
VOID OsAdd2SortLink(SortLinkAttribute *head, SortLinkList *node, UINT64 responseTime, UINT16 idleCpu)
diff --git a/src/kernel_liteos_a/kernel/base/vm/los_vm_boot.c b/src/kernel_liteos_a/kernel/base/vm/los_vm_boot.c
index 81287a26..b4b04eb1 100644
--- a/src/kernel_liteos_a/kernel/base/vm/los_vm_boot.c
+++ b/src/kernel_liteos_a/kernel/base/vm/los_vm_boot.c
@@ -38,10 +38,15 @@
#include "los_vm_page.h"
#include "los_arch_mmu.h"
+/**
+ * @brief 虚拟内存区间检查, 需理解 los_vm_zone.h 中画出的鸿蒙虚拟内存全景图
+ */
+
+UINTPTR g_vmBootMemBase = (UINTPTR)&__bss_end; ///< 内核空间可用于分配的区域,紧挨着.bss区
+BOOL g_kHeapInited = FALSE; ///< 内核堆区初始化变量
-UINTPTR g_vmBootMemBase = (UINTPTR)&__bss_end;
-BOOL g_kHeapInited = FALSE;
+///< 开机引导分配器分配内存,只有开机时采用的分配方式
VOID *OsVmBootMemAlloc(size_t len)
{
UINTPTR ptr;
@@ -51,33 +56,33 @@ VOID *OsVmBootMemAlloc(size_t len)
return NULL;
}
- ptr = LOS_Align(g_vmBootMemBase, sizeof(UINTPTR));
- g_vmBootMemBase = ptr + LOS_Align(len, sizeof(UINTPTR));
-
+ ptr = LOS_Align(g_vmBootMemBase, sizeof(UINTPTR));//对齐
+ g_vmBootMemBase = ptr + LOS_Align(len, sizeof(UINTPTR));//通过改变 g_vmBootMemBase来获取内存
+ //这样也行,g_vmBootMemBase 真是野蛮粗暴
return (VOID *)ptr;
}
-
+///整个系统内存初始化
UINT32 OsSysMemInit(VOID)
{
STATUS_T ret;
#ifdef LOSCFG_KERNEL_VM
- OsKSpaceInit();
+ OsKSpaceInit();//内核空间初始化
#endif
- ret = OsKHeapInit(OS_KHEAP_BLOCK_SIZE);
+ ret = OsKHeapInit(OS_KHEAP_BLOCK_SIZE);// 内核堆空间初始化 512K
if (ret != LOS_OK) {
VM_ERR("OsKHeapInit fail\n");
return LOS_NOK;
}
#ifdef LOSCFG_KERNEL_VM
- OsVmPageStartup();
- g_kHeapInited = TRUE;
- OsInitMappingStartUp();
+ OsVmPageStartup();// 物理内存初始化
+ g_kHeapInited = TRUE; //内核堆区初始化完成
+ OsInitMappingStartUp();//映射初始化
#else
- g_kHeapInited = TRUE;
+ g_kHeapInited = TRUE;//内核堆区完成初始化
#endif
-
return LOS_OK;
}
+
diff --git a/src/kernel_liteos_a/kernel/base/vm/los_vm_dump.c b/src/kernel_liteos_a/kernel/base/vm/los_vm_dump.c
index f326ea12..c1b190b8 100644
--- a/src/kernel_liteos_a/kernel/base/vm/los_vm_dump.c
+++ b/src/kernel_liteos_a/kernel/base/vm/los_vm_dump.c
@@ -53,7 +53,7 @@
#define FLAG_SIZE 4
#define FLAG_START 2
-
+//获取线性区的名称或文件路径
const CHAR *OsGetRegionNameOrFilePath(LosVmMapRegion *region)
{
struct Vnode *vnode = NULL;
@@ -64,24 +64,24 @@ const CHAR *OsGetRegionNameOrFilePath(LosVmMapRegion *region)
vnode = region->unTypeData.rf.vnode;
return vnode->filePath;
#endif
- } else if (region->regionFlags & VM_MAP_REGION_FLAG_HEAP) {
+ } else if (region->regionFlags & VM_MAP_REGION_FLAG_HEAP) {//堆区
return "HEAP";
- } else if (region->regionFlags & VM_MAP_REGION_FLAG_STACK) {
+ } else if (region->regionFlags & VM_MAP_REGION_FLAG_STACK) {//栈区
return "STACK";
- } else if (region->regionFlags & VM_MAP_REGION_FLAG_TEXT) {
+ } else if (region->regionFlags & VM_MAP_REGION_FLAG_TEXT) {//文本区
return "Text";
- } else if (region->regionFlags & VM_MAP_REGION_FLAG_VDSO) {
+ } else if (region->regionFlags & VM_MAP_REGION_FLAG_VDSO) {//虚拟动态链接对象区(Virtual Dynamically Shared Object、VDSO)
return "VDSO";
- } else if (region->regionFlags & VM_MAP_REGION_FLAG_MMAP) {
+ } else if (region->regionFlags & VM_MAP_REGION_FLAG_MMAP) {//映射区
return "MMAP";
- } else if (region->regionFlags & VM_MAP_REGION_FLAG_SHM) {
+ } else if (region->regionFlags & VM_MAP_REGION_FLAG_SHM) {//共享区
return "SHM";
} else {
return "";
}
return "";
}
-
+///
INT32 OsRegionOverlapCheckUnlock(LosVmSpace *space, LosVmMapRegion *region)
{
LosVmMapRegion *regionTemp = NULL;
@@ -107,7 +107,7 @@ INT32 OsRegionOverlapCheckUnlock(LosVmSpace *space, LosVmMapRegion *region)
return 0;
}
-
+///shell task 进程虚拟内存的使用情况
UINT32 OsShellCmdProcessVmUsage(LosVmSpace *space)
{
LosVmMapRegion *region = NULL;
@@ -119,7 +119,7 @@ UINT32 OsShellCmdProcessVmUsage(LosVmSpace *space)
return 0;
}
- if (space == LOS_GetKVmSpace()) {
+ if (space == LOS_GetKVmSpace()) {//内核空间
OsShellCmdProcessPmUsage(space, NULL, &used);
return used;
}
@@ -127,15 +127,14 @@ UINT32 OsShellCmdProcessVmUsage(LosVmSpace *space)
if (ret != 0) {
return 0;
}
-
- RB_SCAN_SAFE(&space->regionRbTree, pstRbNode, pstRbNodeNext)
- region = (LosVmMapRegion *)pstRbNode;
- used += region->range.size;
- RB_SCAN_SAFE_END(&space->regionRbTree, pstRbNode, pstRbNodeNext)
+ RB_SCAN_SAFE(&space->regionRbTree, pstRbNode, pstRbNodeNext)//开始扫描红黑树
+ region = (LosVmMapRegion *)pstRbNode;//拿到线性区,注意LosVmMapRegion结构体的第一个变量就是pstRbNode,所以可直接(LosVmMapRegion *)转
+ used += region->range.size;//size叠加,算出总使用
+ RB_SCAN_SAFE_END(&space->regionRbTree, pstRbNode, pstRbNodeNext)//结束扫描红黑树
(VOID)LOS_MuxRelease(&space->regionMux);
return used;
}
-
+///内核空间物理内存使用情况统计
UINT32 OsKProcessPmUsage(LosVmSpace *kSpace, UINT32 *actualPm)
{
UINT32 memUsed;
@@ -159,26 +158,26 @@ UINT32 OsKProcessPmUsage(LosVmSpace *kSpace, UINT32 *actualPm)
/* Kernel resident memory, include default heap memory */
memUsed = SYS_MEM_SIZE_DEFAULT - (totalCount << PAGE_SHIFT);
- spaceList = LOS_GetVmSpaceList();
+ spaceList = LOS_GetVmSpaceList();//获取虚拟空间链表,上面挂了所有虚拟空间
LosMux *vmSpaceListMux = OsGVmSpaceMuxGet();
(VOID)LOS_MuxAcquire(vmSpaceListMux);
- LOS_DL_LIST_FOR_EACH_ENTRY(space, spaceList, LosVmSpace, node) {
- if (space == LOS_GetKVmSpace()) {
+ LOS_DL_LIST_FOR_EACH_ENTRY(space, spaceList, LosVmSpace, node) {//遍历链表
+ if (space == LOS_GetKVmSpace()) {//内核空间不统计
continue;
}
UProcessUsed += OsUProcessPmUsage(space, NULL, NULL);
}
(VOID)LOS_MuxRelease(vmSpaceListMux);
- /* Kernel dynamic memory, include extended heap memory */
+ /* Kernel dynamic memory, include extended heap memory */ //内核动态内存,包括扩展堆内存
memUsed += ((usedCount << PAGE_SHIFT) - UProcessUsed);
- /* Remaining heap memory */
+ /* Remaining heap memory */ //剩余堆内存
memUsed -= freeMem;
*actualPm = memUsed;
return memUsed;
}
-
+///shell task 物理内存的使用情况
UINT32 OsShellCmdProcessPmUsage(LosVmSpace *space, UINT32 *sharePm, UINT32 *actualPm)
{
if (space == NULL) {
@@ -194,7 +193,7 @@ UINT32 OsShellCmdProcessPmUsage(LosVmSpace *space, UINT32 *sharePm, UINT32 *actu
}
return OsUProcessPmUsage(space, sharePm, actualPm);
}
-
+///虚拟空间物理内存的使用情况,参数同时带走共享物理内存 sharePm和actualPm 单位是字节
UINT32 OsUProcessPmUsage(LosVmSpace *space, UINT32 *sharePm, UINT32 *actualPm)
{
LosVmMapRegion *region = NULL;
@@ -230,10 +229,10 @@ UINT32 OsUProcessPmUsage(LosVmSpace *space, UINT32 *sharePm, UINT32 *actualPm)
continue;
}
- shareRef = LOS_AtomicRead(&page->refCounts);
+ shareRef = LOS_AtomicRead(&page->refCounts);//ref 大于1 说明page被其他空间也引用了,这就是共享内存核心定义!
if (shareRef > 1) {
if (sharePm != NULL) {
- *sharePm += PAGE_SIZE;
+ *sharePm += PAGE_SIZE;//一页 4K 字节
}
pmSize += PAGE_SIZE / shareRef;
} else {
@@ -250,7 +249,7 @@ UINT32 OsUProcessPmUsage(LosVmSpace *space, UINT32 *sharePm, UINT32 *actualPm)
return pmSize;
}
-
+///通过虚拟空间获取进程实体
LosProcessCB *OsGetPIDByAspace(const LosVmSpace *space)
{
UINT32 pid;
@@ -258,13 +257,13 @@ LosProcessCB *OsGetPIDByAspace(const LosVmSpace *space)
LosProcessCB *processCB = NULL;
SCHEDULER_LOCK(intSave);
- for (pid = 0; pid < g_processMaxNum; ++pid) {
+ for (pid = 0; pid < g_processMaxNum; ++pid) {//循环进程池,进程池本质是个数组
processCB = g_processCBArray + pid;
- if (OsProcessIsUnused(processCB)) {
- continue;
+ if (OsProcessIsUnused(processCB)) {//进程还没被分配使用
+ continue;//继续找呗
}
- if (processCB->vmSpace == space) {
+ if (processCB->vmSpace == space) {//找到了
SCHEDULER_UNLOCK(intSave);
return processCB;
}
@@ -272,7 +271,7 @@ LosProcessCB *OsGetPIDByAspace(const LosVmSpace *space)
SCHEDULER_UNLOCK(intSave);
return NULL;
}
-
+///统计虚拟空间中某个线性区的页数
UINT32 OsCountRegionPages(LosVmSpace *space, LosVmMapRegion *region, UINT32 *pssPages)
{
UINT32 regionPages = 0;
@@ -301,12 +300,12 @@ UINT32 OsCountRegionPages(LosVmSpace *space, LosVmMapRegion *region, UINT32 *pss
}
if (pssPages != NULL) {
- *pssPages = (UINT32)(pss + 0.5); /* 0.5, for page alignment */
+ *pssPages = (UINT32)(pss + 0.5);
}
return regionPages;
}
-
+///统计虚拟空间的总页数
UINT32 OsCountAspacePages(LosVmSpace *space)
{
UINT32 spacePages = 0;
@@ -390,27 +389,30 @@ VOID OsDumpRegion2(LosVmSpace *space, LosVmMapRegion *region)
region->range.size, flagsStr, regionPages, pssPages);
(VOID)LOS_MemFree(m_aucSysMem0, flagsStr);
}
-
+///dump 指定虚拟空间的信息
VOID OsDumpAspace(LosVmSpace *space)
{
LosVmMapRegion *region = NULL;
LosRbNode *pstRbNode = NULL;
LosRbNode *pstRbNodeNext = NULL;
UINT32 spacePages;
- LosProcessCB *pcb = OsGetPIDByAspace(space);
+ LosProcessCB *pcb = OsGetPIDByAspace(space);//通过虚拟空间找到进程实体
if (pcb == NULL) {
return;
}
-
- spacePages = OsCountAspacePages(space);
+ //进程ID | 进程虚拟内存控制块地址信息 | 虚拟内存起始地址 | 虚拟内存大小 | 已使用的物理页数量
+ spacePages = OsCountAspacePages(space);//获取空间的页数
PRINTK("\r\n PID aspace name base size pages \n");
PRINTK(" ---- ------ ---- ---- ----- ----\n");
PRINTK(" %-4d %#010x %-10.10s %#010x %#010x %d\n", pcb->processID, space, pcb->processName,
space->base, space->size, spacePages);
- PRINTK("\r\n\t region name base size mmu_flags pages pg/ref\n");
+
+ //虚拟区间控制块地址信息 | 虚拟区间类型 | 虚拟区间起始地址 | 虚拟区间大小 | 虚拟区间mmu映射属性 | 已使用的物理页数量(包括共享内存部分 | 已使用的物理页数量
+
+ PRINTK("\r\n\t region name base size mmu_flags pages pg/ref\n");
PRINTK("\t ------ ---- ---- ---- --------- ----- -----\n");
- RB_SCAN_SAFE(&space->regionRbTree, pstRbNode, pstRbNodeNext)
+ RB_SCAN_SAFE(&space->regionRbTree, pstRbNode, pstRbNodeNext)//按region 轮询统计
region = (LosVmMapRegion *)pstRbNode;
if (region != NULL) {
OsDumpRegion2(space, region);
@@ -421,14 +423,14 @@ VOID OsDumpAspace(LosVmSpace *space)
RB_SCAN_SAFE_END(&space->regionRbTree, pstRbNode, pstRbNodeNext)
return;
}
-
+///查看所有进程使用虚拟内存的情况
VOID OsDumpAllAspace(VOID)
{
LosVmSpace *space = NULL;
- LOS_DL_LIST *aspaceList = LOS_GetVmSpaceList();
- LOS_DL_LIST_FOR_EACH_ENTRY(space, aspaceList, LosVmSpace, node) {
+ LOS_DL_LIST *aspaceList = LOS_GetVmSpaceList();//获取所有空间链表
+ LOS_DL_LIST_FOR_EACH_ENTRY(space, aspaceList, LosVmSpace, node) {//循环取出进程虚拟空间
(VOID)LOS_MuxAcquire(&space->regionMux);
- OsDumpAspace(space);
+ OsDumpAspace(space);//dump 空间
(VOID)LOS_MuxRelease(&space->regionMux);
}
return;
@@ -447,11 +449,11 @@ STATUS_T OsRegionOverlapCheck(LosVmSpace *space, LosVmMapRegion *region)
(VOID)LOS_MuxRelease(&space->regionMux);
return ret;
}
-
+///dump 页表项
VOID OsDumpPte(VADDR_T vaddr)
{
UINT32 l1Index = vaddr >> MMU_DESCRIPTOR_L1_SMALL_SHIFT;
- LosVmSpace *space = LOS_SpaceGet(vaddr);
+ LosVmSpace *space = LOS_SpaceGet(vaddr);//通过虚拟地址获取空间,内核分三个空间 内核进程空间,内核堆空间,用户进程空间
UINT32 ttEntry;
LosVmPage *page = NULL;
PTE_T *l2Table = NULL;
@@ -461,27 +463,27 @@ VOID OsDumpPte(VADDR_T vaddr)
return;
}
- ttEntry = space->archMmu.virtTtb[l1Index];
+ ttEntry = space->archMmu.virtTtb[l1Index];//找到 L1 页面项
if (ttEntry) {
- l2Table = LOS_PaddrToKVaddr(MMU_DESCRIPTOR_L1_PAGE_TABLE_ADDR(ttEntry));
- l2Index = (vaddr % MMU_DESCRIPTOR_L1_SMALL_SIZE) >> PAGE_SHIFT;
+ l2Table = LOS_PaddrToKVaddr(MMU_DESCRIPTOR_L1_PAGE_TABLE_ADDR(ttEntry));//找到L1页面项对应的 L2表
+ l2Index = (vaddr % MMU_DESCRIPTOR_L1_SMALL_SIZE) >> PAGE_SHIFT;//找到L2页面项
if (l2Table == NULL) {
goto ERR;
}
- page = LOS_VmPageGet(l2Table[l2Index] & ~(PAGE_SIZE - 1));
+ page = LOS_VmPageGet(l2Table[l2Index] & ~(PAGE_SIZE - 1));//获取物理页框
if (page == NULL) {
goto ERR;
}
PRINTK("vaddr %p, l1Index %d, ttEntry %p, l2Table %p, l2Index %d, pfn %p count %d\n",
- vaddr, l1Index, ttEntry, l2Table, l2Index, l2Table[l2Index], LOS_AtomicRead(&page->refCounts));
- } else {
+ vaddr, l1Index, ttEntry, l2Table, l2Index, l2Table[l2Index], LOS_AtomicRead(&page->refCounts));//打印L1 L2 页表项
+ } else {//不在L1表
PRINTK("vaddr %p, l1Index %d, ttEntry %p\n", vaddr, l1Index, ttEntry);
}
return;
ERR:
PRINTK("%s, error vaddr: %#x, l2Table: %#x, l2Index: %#x\n", __FUNCTION__, vaddr, l2Table, l2Index);
}
-
+///获取段剩余页框数
UINT32 OsVmPhySegPagesGet(LosVmPhysSeg *seg)
{
UINT32 intSave;
@@ -489,14 +491,25 @@ UINT32 OsVmPhySegPagesGet(LosVmPhysSeg *seg)
UINT32 segFreePages = 0;
LOS_SpinLockSave(&seg->freeListLock, &intSave);
- for (flindex = 0; flindex < VM_LIST_ORDER_MAX; flindex++) {
- segFreePages += ((1 << flindex) * seg->freeList[flindex].listCnt);
+ for (flindex = 0; flindex < VM_LIST_ORDER_MAX; flindex++) {//遍历块组
+ segFreePages += ((1 << flindex) * seg->freeList[flindex].listCnt);//1 << flindex等于页数, * 节点数 得到组块的总页数.
}
LOS_SpinUnlockRestore(&seg->freeListLock, intSave);
- return segFreePages;
+ return segFreePages;//返回剩余未分配的总物理页框
}
-
+///dump 物理内存
+/***********************************************************
+* phys_seg:物理页控制块地址信息
+* base:第一个物理页地址,即物理页内存起始地址
+* size:物理页内存大小
+* free_pages:空闲物理页数量
+* active anon: pagecache中,活跃的匿名页数量
+* inactive anon: pagecache中,不活跃的匿名页数量
+* active file: pagecache中,活跃的文件页数量
+* inactive file: pagecache中,不活跃的文件页数量
+* pmm pages total:总的物理页数,used:已使用的物理页数,free:空闲的物理页数
+************************************************************/
VOID OsVmPhysDump(VOID)
{
LosVmPhysSeg *seg = NULL;
@@ -508,7 +521,7 @@ VOID OsVmPhysDump(VOID)
UINT32 flindex;
UINT32 listCount[VM_LIST_ORDER_MAX] = {0};
- for (segIndex = 0; segIndex < g_vmPhysSegNum; segIndex++) {
+ for (segIndex = 0; segIndex < g_vmPhysSegNum; segIndex++) {//循环取段
seg = &g_vmPhysSeg[segIndex];
if (seg->size > 0) {
segFreePages = OsVmPhySegPagesGet(seg);
@@ -538,7 +551,7 @@ VOID OsVmPhysDump(VOID)
PRINTK("\n\rpmm pages: total = %u, used = %u, free = %u\n",
totalPages, (totalPages - totalFreePages), totalFreePages);
}
-
+///获取物理内存的使用信息,两个参数接走数据
VOID OsVmPhysUsedInfoGet(UINT32 *usedCount, UINT32 *totalCount)
{
UINT32 index;
@@ -551,12 +564,12 @@ VOID OsVmPhysUsedInfoGet(UINT32 *usedCount, UINT32 *totalCount)
*usedCount = 0;
*totalCount = 0;
- for (index = 0; index < g_vmPhysSegNum; index++) {
+ for (index = 0; index < g_vmPhysSegNum; index++) {//循环取段
physSeg = &g_vmPhysSeg[index];
if (physSeg->size > 0) {
- *totalCount += physSeg->size >> PAGE_SHIFT;
- segFreePages = OsVmPhySegPagesGet(physSeg);
- *usedCount += (*totalCount - segFreePages);
+ *totalCount += physSeg->size >> PAGE_SHIFT;//叠加段的总页数
+ segFreePages = OsVmPhySegPagesGet(physSeg);//获取段的剩余页数
+ *usedCount += (*totalCount - segFreePages);//叠加段的使用页数
}
}
}
diff --git a/src/kernel_liteos_a/kernel/base/vm/los_vm_fault.c b/src/kernel_liteos_a/kernel/base/vm/los_vm_fault.c
index 86b08199..5e584ef9 100644
--- a/src/kernel_liteos_a/kernel/base/vm/los_vm_fault.c
+++ b/src/kernel_liteos_a/kernel/base/vm/los_vm_fault.c
@@ -49,12 +49,11 @@
#include "vnode.h"
#endif
-
#ifdef LOSCFG_KERNEL_VM
extern char __exc_table_start[];
extern char __exc_table_end[];
-
+//线性区正确性检查
STATIC STATUS_T OsVmRegionPermissionCheck(LosVmMapRegion *region, UINT32 flags)
{
if ((region->regionFlags & VM_MAP_REGION_FLAG_PERM_READ) != VM_MAP_REGION_FLAG_PERM_READ) {
@@ -62,14 +61,14 @@ STATIC STATUS_T OsVmRegionPermissionCheck(LosVmMapRegion *region, UINT32 flags)
return LOS_NOK;
}
- if ((flags & VM_MAP_PF_FLAG_WRITE) == VM_MAP_PF_FLAG_WRITE) {
+ if ((flags & VM_MAP_PF_FLAG_WRITE) == VM_MAP_PF_FLAG_WRITE) {//写入许可
if ((region->regionFlags & VM_MAP_REGION_FLAG_PERM_WRITE) != VM_MAP_REGION_FLAG_PERM_WRITE) {
VM_ERR("write permission check failed operation flags %x, region flags %x", flags, region->regionFlags);
return LOS_NOK;
}
}
- if ((flags & VM_MAP_PF_FLAG_INSTRUCTION) == VM_MAP_PF_FLAG_INSTRUCTION) {
+ if ((flags & VM_MAP_PF_FLAG_INSTRUCTION) == VM_MAP_PF_FLAG_INSTRUCTION) {//指令
if ((region->regionFlags & VM_MAP_REGION_FLAG_PERM_EXECUTE) != VM_MAP_REGION_FLAG_PERM_EXECUTE) {
VM_ERR("exec permission check failed operation flags %x, region flags %x", flags, region->regionFlags);
return LOS_NOK;
@@ -96,8 +95,9 @@ STATIC VOID OsFaultTryFixup(ExcContext *frame, VADDR_T excVaddr, STATUS_T *statu
}
}
-#ifdef LOSCFG_FS_VFS
-STATIC STATUS_T OsDoReadFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)
+#ifdef LOSCFG_FS_VFS
+//读页时发生缺页的处理
+STATIC STATUS_T OsDoReadFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)//读缺页
{
status_t ret;
PADDR_T paddr;
@@ -105,26 +105,26 @@ STATIC STATUS_T OsDoReadFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)
VADDR_T vaddr = (VADDR_T)vmPgFault->vaddr;
LosVmSpace *space = region->space;
- ret = LOS_ArchMmuQuery(&space->archMmu, vaddr, NULL, NULL);
- if (ret == LOS_OK) {
- return LOS_OK;
+ ret = LOS_ArchMmuQuery(&space->archMmu, vaddr, NULL, NULL);//查询是否缺页
+ if (ret == LOS_OK) {//注意这里时LOS_OK却返回,都OK了说明查到了物理地址,有页了。
+ return LOS_OK;//查到了就说明不缺页的,缺页就是因为虚拟地址没有映射到物理地址嘛
}
- if (region->unTypeData.rf.vmFOps == NULL || region->unTypeData.rf.vmFOps->fault == NULL) {
+ if (region->unTypeData.rf.vmFOps == NULL || region->unTypeData.rf.vmFOps->fault == NULL) {//线性区必须有实现了缺页接口
VM_ERR("region args invalid, file path: %s", region->unTypeData.rf.vnode->filePath);
return LOS_ERRNO_VM_INVALID_ARGS;
}
(VOID)LOS_MuxAcquire(®ion->unTypeData.rf.vnode->mapping.mux_lock);
- ret = region->unTypeData.rf.vmFOps->fault(region, vmPgFault);
+ ret = region->unTypeData.rf.vmFOps->fault(region, vmPgFault);// 函数指针,执行的是g_commVmOps.OsVmmFileFault
if (ret == LOS_OK) {
- paddr = LOS_PaddrQuery(vmPgFault->pageKVaddr);
- page = LOS_VmPageGet(paddr);
+ paddr = LOS_PaddrQuery(vmPgFault->pageKVaddr);//查询物理地址
+ page = LOS_VmPageGet(paddr);//获取page
if (page != NULL) { /* just incase of page null */
- LOS_AtomicInc(&page->refCounts);
+ LOS_AtomicInc(&page->refCounts);//ref 自增
OsCleanPageLocked(page);
}
ret = LOS_ArchMmuMap(&space->archMmu, vaddr, paddr, 1,
- region->regionFlags & (~VM_MAP_REGION_FLAG_PERM_WRITE));
+ region->regionFlags & (~VM_MAP_REGION_FLAG_PERM_WRITE));//重新映射为非可写
if (ret < 0) {
VM_ERR("LOS_ArchMmuMap failed");
OsDelMapInfo(region, vmPgFault, false);
@@ -140,7 +140,7 @@ STATIC STATUS_T OsDoReadFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)
return LOS_ERRNO_VM_NO_MEMORY;
}
-/* unmap a page when cow happened only */
+/* numap a page when cow happend only *///仅当写时拷贝发生时取消页面映射
STATIC LosVmPage *OsCowUnmapOrg(LosArchMmu *archMmu, LosVmMapRegion *region, LosVmPgFault *vmf)
{
UINT32 intSave;
@@ -168,7 +168,7 @@ STATIC LosVmPage *OsCowUnmapOrg(LosArchMmu *archMmu, LosVmMapRegion *region, Los
return oldPage;
}
#endif
-
+//在私有线性区写入文件时发生缺页的处理
status_t OsDoCowFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)
{
STATUS_T ret;
@@ -186,23 +186,23 @@ status_t OsDoCowFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)
}
space = region->space;
- ret = LOS_ArchMmuQuery(&space->archMmu, (VADDR_T)vmPgFault->vaddr, &oldPaddr, NULL);
+ ret = LOS_ArchMmuQuery(&space->archMmu, (VADDR_T)vmPgFault->vaddr, &oldPaddr, NULL);//查询出老物理地址
if (ret == LOS_OK) {
- oldPage = OsCowUnmapOrg(&space->archMmu, region, vmPgFault);
+ oldPage = OsCowUnmapOrg(&space->archMmu, region, vmPgFault);//取消页面映射
}
- newPage = LOS_PhysPageAlloc();
+ newPage = LOS_PhysPageAlloc();//分配一个新页面
if (newPage == NULL) {
VM_ERR("LOS_PhysPageAlloc failed");
ret = LOS_ERRNO_VM_NO_MEMORY;
goto ERR_OUT;
}
- newPaddr = VM_PAGE_TO_PHYS(newPage);
- kvaddr = OsVmPageToVaddr(newPage);
+ newPaddr = VM_PAGE_TO_PHYS(newPage);//拿到新的物理地址
+ kvaddr = OsVmPageToVaddr(newPage);//拿到新的虚拟地址
(VOID)LOS_MuxAcquire(®ion->unTypeData.rf.vnode->mapping.mux_lock);
- ret = region->unTypeData.rf.vmFOps->fault(region, vmPgFault);
+ ret = region->unTypeData.rf.vmFOps->fault(region, vmPgFault);// 函数指针 g_commVmOps.OsVmmFileFault
if (ret != LOS_OK) {
VM_ERR("call region->vm_ops->fault fail");
(VOID)LOS_MuxRelease(®ion->unTypeData.rf.vnode->mapping.mux_lock);
@@ -214,20 +214,20 @@ status_t OsDoCowFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)
* we can take it as a normal file cow map. 2.this page has done file cow map,
* we can take it as a anonymous cow map.
*/
- if ((oldPaddr == 0) || (LOS_PaddrToKVaddr(oldPaddr) == vmPgFault->pageKVaddr)) {
- (VOID)memcpy_s(kvaddr, PAGE_SIZE, vmPgFault->pageKVaddr, PAGE_SIZE);
- LOS_AtomicInc(&newPage->refCounts);
- OsCleanPageLocked(LOS_VmPageGet(LOS_PaddrQuery(vmPgFault->pageKVaddr)));
+ if ((oldPaddr == 0) || (LOS_PaddrToKVaddr(oldPaddr) == vmPgFault->pageKVaddr)) {//没有映射或者 已在pagecache有映射
+ (VOID)memcpy_s(kvaddr, PAGE_SIZE, vmPgFault->pageKVaddr, PAGE_SIZE);//直接copy到新页
+ LOS_AtomicInc(&newPage->refCounts);//引用ref++
+ OsCleanPageLocked(LOS_VmPageGet(LOS_PaddrQuery(vmPgFault->pageKVaddr)));//解锁
} else {
- OsPhysSharePageCopy(oldPaddr, &newPaddr, newPage);
+ OsPhysSharePageCopy(oldPaddr, &newPaddr, newPage);//调用之前 oldPaddr肯定不等于newPaddr
/* use old page free the new one */
- if (newPaddr == oldPaddr) {
- LOS_PhysPageFree(newPage);
+ if (newPaddr == oldPaddr) {//注意这里newPaddr可能已经被改变了,参数传入的是 &newPaddr
+ LOS_PhysPageFree(newPage);//释放新页,别浪费的内存,内核使用内存是一分钱当十块用.
newPage = NULL;
}
}
- ret = LOS_ArchMmuMap(&space->archMmu, (VADDR_T)vmPgFault->vaddr, newPaddr, 1, region->regionFlags);
+ ret = LOS_ArchMmuMap(&space->archMmu, (VADDR_T)vmPgFault->vaddr, newPaddr, 1, region->regionFlags);//把新物理地址映射给缺页的虚拟地址,这样就不会缺页啦
if (ret < 0) {
VM_ERR("LOS_ArchMmuMap failed");
ret = LOS_ERRNO_VM_NO_MEMORY;
@@ -252,7 +252,7 @@ ERR_OUT:
return ret;
}
-
+///在共享线性区写文件操作发生缺页的情况处理,因为线性区是共享的
status_t OsDoSharedFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)
{
STATUS_T ret;
@@ -268,10 +268,10 @@ status_t OsDoSharedFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)
return LOS_ERRNO_VM_INVALID_ARGS;
}
- ret = LOS_ArchMmuQuery(&space->archMmu, vmPgFault->vaddr, &paddr, NULL);
+ ret = LOS_ArchMmuQuery(&space->archMmu, vmPgFault->vaddr, &paddr, NULL);//查询物理地址
if (ret == LOS_OK) {
- LOS_ArchMmuUnmap(&space->archMmu, vmPgFault->vaddr, 1);
- ret = LOS_ArchMmuMap(&space->archMmu, vaddr, paddr, 1, region->regionFlags);
+ LOS_ArchMmuUnmap(&space->archMmu, vmPgFault->vaddr, 1);//先取消映射
+ ret = LOS_ArchMmuMap(&space->archMmu, vaddr, paddr, 1, region->regionFlags);//再重新映射,为啥这么干,是因为regionFlags变了,
if (ret < 0) {
VM_ERR("LOS_ArchMmuMap failed. ret=%d", ret);
return LOS_ERRNO_VM_NO_MEMORY;
@@ -279,16 +279,16 @@ status_t OsDoSharedFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)
LOS_SpinLockSave(®ion->unTypeData.rf.vnode->mapping.list_lock, &intSave);
fpage = OsFindGetEntry(®ion->unTypeData.rf.vnode->mapping, vmPgFault->pgoff);
- if (fpage) {
- OsMarkPageDirty(fpage, region, 0, 0);
+ if (fpage) {//在页高速缓存(page cache)中找到了
+ OsMarkPageDirty(fpage, region, 0, 0);//标记为脏页
}
LOS_SpinUnlockRestore(®ion->unTypeData.rf.vnode->mapping.list_lock, intSave);
return LOS_OK;
}
-
+ //以下是没有映射到物理地址的处理
(VOID)LOS_MuxAcquire(®ion->unTypeData.rf.vnode->mapping.mux_lock);
- ret = region->unTypeData.rf.vmFOps->fault(region, vmPgFault);
+ ret = region->unTypeData.rf.vmFOps->fault(region, vmPgFault);//函数指针,执行的是g_commVmOps.OsVmmFileFault
if (ret == LOS_OK) {
paddr = LOS_PaddrQuery(vmPgFault->pageKVaddr);
page = LOS_VmPageGet(paddr);
@@ -319,26 +319,39 @@ status_t OsDoSharedFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)
* For COW fault, pagecache is copied to private anonyous pages and the changes on this page
* won't write through to the underlying file. For SHARED fault, pagecache is mapping with
* region->arch_mmu_flags and the changes on this page will write through to the underlying file
- */
+ */ //操作文件时产生缺页中断
STATIC STATUS_T OsDoFileFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault, UINT32 flags)
{
STATUS_T ret;
- if (flags & VM_MAP_PF_FLAG_WRITE) {
- if (region->regionFlags & VM_MAP_REGION_FLAG_SHARED) {
- ret = OsDoSharedFault(region, vmPgFault);
- } else {
- ret = OsDoCowFault(region, vmPgFault);
+ if (flags & VM_MAP_PF_FLAG_WRITE) {//写页的时候产生缺页
+ if (region->regionFlags & VM_MAP_REGION_FLAG_SHARED) {//共享线性区
+ ret = OsDoSharedFault(region, vmPgFault);//写操作时的共享缺页,最复杂,此页上的更改将写入磁盘文件
+ } else {//非共享线性区
+ ret = OsDoCowFault(region, vmPgFault);//(写时拷贝技术)写操作时的私有缺页,pagecache被复制到私有的任意一个页面上,并在此页面上进行更改,不会直接写入磁盘文件
}
- } else {
- ret = OsDoReadFault(region, vmPgFault);
+ } else {//读页的时候产生缺页
+ ret = OsDoReadFault(region, vmPgFault);//页面读取操作很简单,只需共享页面缓存(节省内存)并进行读权限映射(region->arch_mmu_flags&(~arch_mmu_FLAG_PERM_WRITE))
}
return ret;
}
+/***************************************************************
+缺页中断处理程序
+通常有两种情况导致
+第一种:由编程错误引起的异常
+第二种:属于进程的地址空间范围但还尚未分配物理页框引起的异常
+***************************************************************/
+/**
+ * @brief
+ * @param vaddr
+ * @param flags
+ * @param frame
+ * @return STATUS_T
+ */
STATUS_T OsVmPageFaultHandler(VADDR_T vaddr, UINT32 flags, ExcContext *frame)
{
- LosVmSpace *space = LOS_SpaceGet(vaddr);
+ LosVmSpace *space = LOS_SpaceGet(vaddr);//获取虚拟地址所属空间
LosVmMapRegion *region = NULL;
STATUS_T status;
PADDR_T oldPaddr;
@@ -354,9 +367,9 @@ STATUS_T OsVmPageFaultHandler(VADDR_T vaddr, UINT32 flags, ExcContext *frame)
return status;
}
- if (((flags & VM_MAP_PF_FLAG_USER) != 0) && (!LOS_IsUserAddress(vaddr))) {
+ if (((flags & VM_MAP_PF_FLAG_USER) != 0) && (!LOS_IsUserAddress(vaddr))) {//地址保护,用户空间不允许跨界访问
VM_ERR("user space not allowed to access invalid address: %#x", vaddr);
- return LOS_ERRNO_VM_ACCESS_DENIED;
+ return LOS_ERRNO_VM_ACCESS_DENIED;//拒绝访问
}
#ifdef LOSCFG_KERNEL_PLIMITS
@@ -366,7 +379,7 @@ STATUS_T OsVmPageFaultHandler(VADDR_T vaddr, UINT32 flags, ExcContext *frame)
#endif
(VOID)LOS_MuxAcquire(&space->regionMux);
- region = LOS_RegionFind(space, vaddr);
+ region = LOS_RegionFind(space, vaddr);//通过虚拟地址找到所在线性区
if (region == NULL) {
VM_ERR("region not exists, vaddr: %#x", vaddr);
status = LOS_ERRNO_VM_NOT_FOUND;
@@ -375,11 +388,11 @@ STATUS_T OsVmPageFaultHandler(VADDR_T vaddr, UINT32 flags, ExcContext *frame)
status = OsVmRegionPermissionCheck(region, flags);
if (status != LOS_OK) {
- status = LOS_ERRNO_VM_ACCESS_DENIED;
+ status = LOS_ERRNO_VM_ACCESS_DENIED;//拒绝访问
goto CHECK_FAILED;
}
- if (OomCheckProcess()) {
+ if (OomCheckProcess()) {//低内存检查
/*
* under low memory, when user process request memory allocation
* it will fail, and result is LOS_NOK and current user process
@@ -389,18 +402,18 @@ STATUS_T OsVmPageFaultHandler(VADDR_T vaddr, UINT32 flags, ExcContext *frame)
goto CHECK_FAILED;
}
- vaddr = ROUNDDOWN(vaddr, PAGE_SIZE);
-#ifdef LOSCFG_FS_VFS
- if (LOS_IsRegionFileValid(region)) {
+ vaddr = ROUNDDOWN(vaddr, PAGE_SIZE);//为啥要向下圆整,因为这一页要重新使用,需找到页面基地址
+#ifdef LOSCFG_FS_VFS
+ if (LOS_IsRegionFileValid(region)) {//是否为文件线性区
if (region->unTypeData.rf.vnode == NULL) {
goto CHECK_FAILED;
}
- vmPgFault.vaddr = vaddr;
- vmPgFault.pgoff = ((vaddr - region->range.base) >> PAGE_SHIFT) + region->pgOff;
+ vmPgFault.vaddr = vaddr;//虚拟地址
+ vmPgFault.pgoff = ((vaddr - region->range.base) >> PAGE_SHIFT) + region->pgOff;//计算出文件读取位置
vmPgFault.flags = flags;
- vmPgFault.pageKVaddr = NULL;
+ vmPgFault.pageKVaddr = NULL;//缺失页初始化没有物理地址
- status = OsDoFileFault(region, &vmPgFault, flags);
+ status = OsDoFileFault(region, &vmPgFault, flags);//缺页处理
if (status) {
VM_ERR("vm fault error, status=%d", status);
goto CHECK_FAILED;
@@ -408,27 +421,27 @@ STATUS_T OsVmPageFaultHandler(VADDR_T vaddr, UINT32 flags, ExcContext *frame)
goto DONE;
}
#endif
-
- newPage = LOS_PhysPageAlloc();
+ //请求调页:推迟到不能再推迟为止
+ newPage = LOS_PhysPageAlloc();//分配一个新的物理页
if (newPage == NULL) {
status = LOS_ERRNO_VM_NO_MEMORY;
goto CHECK_FAILED;
}
- newPaddr = VM_PAGE_TO_PHYS(newPage);
- (VOID)memset_s(OsVmPageToVaddr(newPage), PAGE_SIZE, 0, PAGE_SIZE);
- status = LOS_ArchMmuQuery(&space->archMmu, vaddr, &oldPaddr, NULL);
- if (status >= 0) {
- LOS_ArchMmuUnmap(&space->archMmu, vaddr, 1);
- OsPhysSharePageCopy(oldPaddr, &newPaddr, newPage);
+ newPaddr = VM_PAGE_TO_PHYS(newPage);//获取物理地址
+ (VOID)memset_s(OsVmPageToVaddr(newPage), PAGE_SIZE, 0, PAGE_SIZE);//获取虚拟地址 清0
+ status = LOS_ArchMmuQuery(&space->archMmu, vaddr, &oldPaddr, NULL);//通过虚拟地址查询老物理地址
+ if (status >= 0) {//已经映射过了,@note_thinking 不是缺页吗,怎么会有页的情况?
+ LOS_ArchMmuUnmap(&space->archMmu, vaddr, 1);//解除映射关系
+ OsPhysSharePageCopy(oldPaddr, &newPaddr, newPage);//将oldPaddr的数据拷贝到newPage
/* use old page free the new one */
- if (newPaddr == oldPaddr) {
- LOS_PhysPageFree(newPage);
+ if (newPaddr == oldPaddr) {//新老物理地址一致
+ LOS_PhysPageFree(newPage);//继续使用旧页释放新页
newPage = NULL;
}
/* map all of the pages */
- status = LOS_ArchMmuMap(&space->archMmu, vaddr, newPaddr, 1, region->regionFlags);
+ status = LOS_ArchMmuMap(&space->archMmu, vaddr, newPaddr, 1, region->regionFlags);//重新映射新物理地址
if (status < 0) {
VM_ERR("failed to map replacement page, status:%d", status);
status = LOS_ERRNO_VM_MAP_FAILED;
@@ -437,10 +450,10 @@ STATUS_T OsVmPageFaultHandler(VADDR_T vaddr, UINT32 flags, ExcContext *frame)
status = LOS_OK;
goto DONE;
- } else {
+ } else {//
/* map all of the pages */
- LOS_AtomicInc(&newPage->refCounts);
- status = LOS_ArchMmuMap(&space->archMmu, vaddr, newPaddr, 1, region->regionFlags);
+ LOS_AtomicInc(&newPage->refCounts);//引用数自增
+ status = LOS_ArchMmuMap(&space->archMmu, vaddr, newPaddr, 1, region->regionFlags);//映射新物理地址,如此下次就不会缺页了
if (status < 0) {
VM_ERR("failed to map page, status:%d", status);
status = LOS_ERRNO_VM_MAP_FAILED;
diff --git a/src/kernel_liteos_a/kernel/base/vm/los_vm_filemap.c b/src/kernel_liteos_a/kernel/base/vm/los_vm_filemap.c
index e95cd826..1e396f43 100644
--- a/src/kernel_liteos_a/kernel/base/vm/los_vm_filemap.c
+++ b/src/kernel_liteos_a/kernel/base/vm/los_vm_filemap.c
@@ -66,98 +66,114 @@ VOID ResetPageCacheHitInfo(int *try, int *hit)
#define TRACE_TRY_CACHE()
#define TRACE_HIT_CACHE()
#endif
-
#ifdef LOSCFG_KERNEL_VM
+/**
+ * @brief
+ @verbatim
+ 增加文件页到页高速缓存(page cache)
+ LosFilePage将一个文件切成了一页一页,因为读文件过程随机seek,所以文件页也不会是连续的,
+ pgoff记录文件的位置,并确保在cache的文件数据是按顺序排列的.
+ @endverbatim
+ * @param page
+ * @param mapping
+ * @param pgoff
+ * @return STATIC
+ */
STATIC VOID OsPageCacheAdd(LosFilePage *page, struct page_mapping *mapping, VM_OFFSET_T pgoff)
{
LosFilePage *fpage = NULL;
- LOS_DL_LIST_FOR_EACH_ENTRY(fpage, &mapping->page_list, LosFilePage, node) {
- if (fpage->pgoff > pgoff) {
- LOS_ListTailInsert(&fpage->node, &page->node);
+ LOS_DL_LIST_FOR_EACH_ENTRY(fpage, &mapping->page_list, LosFilePage, node) {//遍历page_list链表
+ if (fpage->pgoff > pgoff) {//插入的条件,这样插入保证了按pgoff 从小到大排序
+ LOS_ListTailInsert(&fpage->node, &page->node);//等于挂到fpage节点的前面了
goto done_add;
}
}
- LOS_ListTailInsert(&mapping->page_list, &page->node);
+ LOS_ListTailInsert(&mapping->page_list, &page->node);//将页挂到文件映射的链表上,相当于挂到了最后
done_add:
- mapping->nrpages++;
+ mapping->nrpages++; //文件在缓存中多了一个 文件页
}
-
+///将页面加到活动文件页LRU链表上
VOID OsAddToPageacheLru(LosFilePage *page, struct page_mapping *mapping, VM_OFFSET_T pgoff)
{
OsPageCacheAdd(page, mapping, pgoff);
OsLruCacheAdd(page, VM_LRU_ACTIVE_FILE);
}
-
+///从页高速缓存上删除页
VOID OsPageCacheDel(LosFilePage *fpage)
{
/* delete from file cache list */
- LOS_ListDelete(&fpage->node);
- fpage->mapping->nrpages--;
+ LOS_ListDelete(&fpage->node);//将自己从链表上摘除
+ fpage->mapping->nrpages--;//文件映射的页总数减少
/* unmap and remove map info */
- if (OsIsPageMapped(fpage)) {
+ if (OsIsPageMapped(fpage)) {//是否映射过
OsUnmapAllLocked(fpage);
}
- LOS_PhysPageFree(fpage->vmPage);
+ LOS_PhysPageFree(fpage->vmPage);//释放物理内存
- LOS_MemFree(m_aucSysMem0, fpage);
+ LOS_MemFree(m_aucSysMem0, fpage);//释放文件页结构体内存
}
-
+/**************************************************************************************************
+每个进程都有自己的地址空间, 多个进程可以访问同一个LosFilePage,每个进程使用的虚拟地址都需要单独映射
+所以同一个LosFilePage会映射到多个进程空间.本函数记录页面被哪些进程映射过
+在两个地方会被被空间映射
+1.缺页中断 2.克隆地址空间
+**************************************************************************************************/
VOID OsAddMapInfo(LosFilePage *page, LosArchMmu *archMmu, VADDR_T vaddr)
{
LosMapInfo *info = NULL;
- info = (LosMapInfo *)LOS_MemAlloc(m_aucSysMem0, sizeof(LosMapInfo));
+ info = (LosMapInfo *)LOS_MemAlloc(m_aucSysMem0, sizeof(LosMapInfo));//分配一个映射信息
if (info == NULL) {
VM_ERR("OsAddMapInfo alloc memory failed!");
return;
}
- info->page = page;
- info->archMmu = archMmu;
- info->vaddr = vaddr;
+ info->page = page; //文件页
+ info->archMmu = archMmu;//进程MMU,完成虚实地址转换
+ info->vaddr = vaddr; //虚拟地址
- LOS_ListAdd(&page->i_mmap, &info->node);
- page->n_maps++;
+ LOS_ListAdd(&page->i_mmap, &info->node);//将 LosMapInfo 节点挂入链表
+ page->n_maps++;//映射总数++
}
-
+///通过虚拟地址获取文件页映射信息,archMmu每个进程都有属于自己的mmu
LosMapInfo *OsGetMapInfo(const LosFilePage *page, const LosArchMmu *archMmu, VADDR_T vaddr)
{
LosMapInfo *info = NULL;
- const LOS_DL_LIST *immap = &page->i_mmap;
+ const LOS_DL_LIST *immap = &page->i_mmap;//一个文件页被多个进程映射
- LOS_DL_LIST_FOR_EACH_ENTRY(info, immap, LosMapInfo, node) {
- if ((info->archMmu == archMmu) && (info->vaddr == vaddr) && (info->page == page)) {
+ LOS_DL_LIST_FOR_EACH_ENTRY(info, immap, LosMapInfo, node) {//遍历每个节点
+ if ((info->archMmu == archMmu) && (info->vaddr == vaddr) && (info->page == page)) {//全等时返回
return info;
}
}
-
return NULL;
}
-
+///删除页高速缓存和LRU,对应 OsAddToPageacheLru
VOID OsDeletePageCacheLru(LosFilePage *page)
{
- /* delete from lru list */
- OsLruCacheDel(page);
- /* delete from cache list and free pmm if needed */
- OsPageCacheDel(page);
+ /* delete form lru list */
+ OsLruCacheDel(page); //将页面从lru列表中删除
+ /* delete from cache lits and free pmm if need */
+ OsPageCacheDel(page); //从page缓存中删除
}
+//解除文件页和进程的映射关系
STATIC VOID OsPageCacheUnmap(LosFilePage *fpage, LosArchMmu *archMmu, VADDR_T vaddr)
{
UINT32 intSave;
LosMapInfo *info = NULL;
LOS_SpinLockSave(&fpage->physSeg->lruLock, &intSave);
- info = OsGetMapInfo(fpage, archMmu, vaddr);
+ info = OsGetMapInfo(fpage, archMmu, vaddr);//获取文件页在进程的映射信息
if (info == NULL) {
VM_ERR("OsPageCacheUnmap get map info failed!");
} else {
- OsUnmapPageLocked(fpage, info);
+ OsUnmapPageLocked(fpage, info);//解除进程和文件页映射关系
}
if (!(OsIsPageMapped(fpage) && ((fpage->flags & VM_MAP_REGION_FLAG_PERM_EXECUTE) ||
OsIsPageDirty(fpage->vmPage)))) {
@@ -166,7 +182,7 @@ STATIC VOID OsPageCacheUnmap(LosFilePage *fpage, LosArchMmu *archMmu, VADDR_T va
LOS_SpinUnlockRestore(&fpage->physSeg->lruLock, intSave);
}
-
+///删除文件
VOID OsVmmFileRemove(LosVmMapRegion *region, LosArchMmu *archMmu, VM_OFFSET_T pgoff)
{
UINT32 intSave;
@@ -179,31 +195,31 @@ VOID OsVmmFileRemove(LosVmMapRegion *region, LosArchMmu *archMmu, VM_OFFSET_T pg
LosVmPage *mapPage = NULL;
if (!LOS_IsRegionFileValid(region) || (region->unTypeData.rf.vnode == NULL)) {
- return;
+ return;//判断是否为文件映射,是否已map
}
vnode = region->unTypeData.rf.vnode;
mapping = &vnode->mapping;
- vaddr = region->range.base + ((UINT32)(pgoff - region->pgOff) << PAGE_SHIFT);
+ vaddr = region->range.base + ((UINT32)(pgoff - region->pgOff) << PAGE_SHIFT);//得到虚拟地址
- status_t status = LOS_ArchMmuQuery(archMmu, vaddr, &paddr, NULL);
+ status_t status = LOS_ArchMmuQuery(archMmu, vaddr, &paddr, NULL);//获取物理地址
if (status != LOS_OK) {
return;
}
- mapPage = LOS_VmPageGet(paddr);
+ mapPage = LOS_VmPageGet(paddr);//获取物理页框
/* is page is in cache list */
LOS_SpinLockSave(&mapping->list_lock, &intSave);
- fpage = OsFindGetEntry(mapping, pgoff);
+ fpage = OsFindGetEntry(mapping, pgoff);//获取fpage
/* no cache or have cache but not map(cow), free it direct */
- if ((fpage == NULL) || (fpage->vmPage != mapPage)) {
- LOS_PhysPageFree(mapPage);
- LOS_ArchMmuUnmap(archMmu, vaddr, 1);
+ if ((fpage == NULL) || (fpage->vmPage != mapPage)) {//没有缓存或有缓存但没有映射(cow),直接释放它
+ LOS_PhysPageFree(mapPage);//释放物理页框
+ LOS_ArchMmuUnmap(archMmu, vaddr, 1);//取消虚拟地址的映射
/* this is a page cache map! */
} else {
- OsPageCacheUnmap(fpage, archMmu, vaddr);
- if (OsIsPageDirty(fpage->vmPage)) {
- tmpPage = OsDumpDirtyPage(fpage);
+ OsPageCacheUnmap(fpage, archMmu, vaddr);////取消缓存中的映射
+ if (OsIsPageDirty(fpage->vmPage)) {//脏页处理
+ tmpPage = OsDumpDirtyPage(fpage);//dump 脏页
}
}
LOS_SpinUnlockRestore(&mapping->list_lock, intSave);
@@ -213,15 +229,15 @@ VOID OsVmmFileRemove(LosVmMapRegion *region, LosArchMmu *archMmu, VM_OFFSET_T pg
}
return;
}
-
+///标记page为脏页 进程修改了高速缓存里的数据时,该页就被内核标记为脏页
VOID OsMarkPageDirty(LosFilePage *fpage, const LosVmMapRegion *region, INT32 off, INT32 len)
{
if (region != NULL) {
- OsSetPageDirty(fpage->vmPage);
- fpage->dirtyOff = off;
- fpage->dirtyEnd = len;
+ OsSetPageDirty(fpage->vmPage);//设置为脏页
+ fpage->dirtyOff = off;//脏页偏移位置
+ fpage->dirtyEnd = len;//脏页结束位置
} else {
- OsSetPageDirty(fpage->vmPage);
+ OsSetPageDirty(fpage->vmPage);//设置为脏页
if ((off + len) > fpage->dirtyEnd) {
fpage->dirtyEnd = off + len;
}
@@ -258,22 +274,22 @@ STATIC UINT32 GetDirtySize(LosFilePage *fpage, struct Vnode *vnode)
return PAGE_SIZE;
}
-
+///冲洗脏页,回写磁盘
STATIC INT32 OsFlushDirtyPage(LosFilePage *fpage)
{
UINT32 ret;
size_t len;
char *buff = NULL;
- struct Vnode *vnode = fpage->mapping->host;
+ struct Vnode *vnode = fpage->mapping->host;/* owner of this mapping */ //此映射属于哪个文件,注意是1:1的关系.
if (vnode == NULL) {
VM_ERR("page cache vnode error");
return LOS_NOK;
}
- len = fpage->dirtyEnd - fpage->dirtyOff;
+ len = fpage->dirtyEnd - fpage->dirtyOff;//计算出脏数据长度
len = (len == 0) ? GetDirtySize(fpage, vnode) : len;
- if (len == 0) {
- OsCleanPageDirty(fpage->vmPage);
+ if (len == 0) {//没有脏数据
+ OsCleanPageDirty(fpage->vmPage);//页面取消脏标签
return LOS_OK;
}
@@ -290,7 +306,7 @@ STATIC INT32 OsFlushDirtyPage(LosFilePage *fpage)
return ret;
}
-
+///备份脏页,老脏页撕掉脏页标签
LosFilePage *OsDumpDirtyPage(LosFilePage *oldFPage)
{
LosFilePage *newFPage = NULL;
@@ -302,11 +318,11 @@ LosFilePage *OsDumpDirtyPage(LosFilePage *oldFPage)
}
OsCleanPageDirty(oldFPage->vmPage);
- (VOID)memcpy_s(newFPage, sizeof(LosFilePage), oldFPage, sizeof(LosFilePage));
+ (VOID)memcpy_s(newFPage, sizeof(LosFilePage), oldFPage, sizeof(LosFilePage));//直接内存拷贝
return newFPage;
}
-
+///冲洗脏页数据,将脏页数据回写磁盘
VOID OsDoFlushDirtyPage(LosFilePage *fpage)
{
if (fpage == NULL) {
@@ -328,7 +344,7 @@ STATIC VOID OsReleaseFpage(struct page_mapping *mapping, LosFilePage *fpage)
LOS_SpinUnlockRestore(lruLock, lruSave);
LOS_SpinUnlockRestore(&mapping->list_lock, intSave);
}
-
+///删除映射信息
VOID OsDelMapInfo(LosVmMapRegion *region, LosVmPgFault *vmf, BOOL cleanDirty)
{
UINT32 intSave;
@@ -349,9 +365,9 @@ VOID OsDelMapInfo(LosVmMapRegion *region, LosVmPgFault *vmf, BOOL cleanDirty)
}
if (cleanDirty) {
- OsCleanPageDirty(fpage->vmPage);
+ OsCleanPageDirty(fpage->vmPage);//恢复干净页
}
- info = OsGetMapInfo(fpage, ®ion->space->archMmu, (vaddr_t)vmf->vaddr);
+ info = OsGetMapInfo(fpage, ®ion->space->archMmu, (vaddr_t)vmf->vaddr);//通过虚拟地址获取映射信息
if (info != NULL) {
fpage->n_maps--;
LOS_ListDelete(&info->node);
@@ -362,7 +378,10 @@ VOID OsDelMapInfo(LosVmMapRegion *region, LosVmPgFault *vmf, BOOL cleanDirty)
}
LOS_SpinUnlockRestore(&mapping->list_lock, intSave);
}
-
+/*!
+文件缺页时的处理,先读入磁盘数据,再重新读页数据
+被 OsDoReadFault(...),OsDoCowFault(...),OsDoSharedFault(...) 等调用
+*/
INT32 OsVmmFileFault(LosVmMapRegion *region, LosVmPgFault *vmf)
{
INT32 ret;
@@ -374,7 +393,7 @@ INT32 OsVmmFileFault(LosVmMapRegion *region, LosVmPgFault *vmf)
struct page_mapping *mapping = NULL;
LosFilePage *fpage = NULL;
- if (!LOS_IsRegionFileValid(region) || (region->unTypeData.rf.vnode == NULL) || (vmf == NULL)) {
+ if (!LOS_IsRegionFileValid(region) || (region->unTypeData.rf.vnode == NULL) || (vmf == NULL)) {//文件是否映射到了内存
VM_ERR("Input param is NULL");
return LOS_NOK;
}
@@ -383,26 +402,26 @@ INT32 OsVmmFileFault(LosVmMapRegion *region, LosVmPgFault *vmf)
/* get or create a new cache node */
LOS_SpinLockSave(&mapping->list_lock, &intSave);
- fpage = OsFindGetEntry(mapping, vmf->pgoff);
+ fpage = OsFindGetEntry(mapping, vmf->pgoff);//获取文件页
TRACE_TRY_CACHE();
- if (fpage != NULL) {
+ if (fpage != NULL) {//找到了,说明该页已经在页高速缓存中
TRACE_HIT_CACHE();
OsPageRefIncLocked(fpage);
- } else {
- fpage = OsPageCacheAlloc(mapping, vmf->pgoff);
+ } else {//真的缺页了,页高速缓存中没找到
+ fpage = OsPageCacheAlloc(mapping, vmf->pgoff);//分配一个文件页,将数据初始化好,包括vmpage(物理页框)
if (fpage == NULL) {
LOS_SpinUnlockRestore(&mapping->list_lock, intSave);
VM_ERR("Failed to alloc a page frame");
return LOS_NOK;
}
- newCache = true;
+ newCache = true;//分配了新文件页
}
- OsSetPageLocked(fpage->vmPage);
+ OsSetPageLocked(fpage->vmPage);//对vmpage上锁
LOS_SpinUnlockRestore(&mapping->list_lock, intSave);
- kvaddr = OsVmPageToVaddr(fpage->vmPage);
+ kvaddr = OsVmPageToVaddr(fpage->vmPage);//获取该页框在内核空间的虚拟地址,因为 page cache本身就是在内核空间,
/* read file to new page cache */
- if (newCache) {
+ if (newCache) {//新cache
ret = vnode->vop->ReadPage(vnode, kvaddr, fpage->pgoff << PAGE_SHIFT);
if (ret == 0) {
VM_ERR("Failed to read from file!");
@@ -410,32 +429,32 @@ INT32 OsVmmFileFault(LosVmMapRegion *region, LosVmPgFault *vmf)
return LOS_NOK;
}
LOS_SpinLockSave(&mapping->list_lock, &intSave);
- OsAddToPageacheLru(fpage, mapping, vmf->pgoff);
+ OsAddToPageacheLru(fpage, mapping, vmf->pgoff);//将fpage挂入pageCache 和 LruCache
LOS_SpinUnlockRestore(&mapping->list_lock, intSave);
}
LOS_SpinLockSave(&mapping->list_lock, &intSave);
/* cow fault case no need to save mapinfo */
if (!((vmf->flags & VM_MAP_PF_FLAG_WRITE) && !(region->regionFlags & VM_MAP_REGION_FLAG_SHARED))) {
- OsAddMapInfo(fpage, ®ion->space->archMmu, (vaddr_t)vmf->vaddr);
+ OsAddMapInfo(fpage, ®ion->space->archMmu, (vaddr_t)vmf->vaddr);//添加<虚拟地址,文件页>的映射关系,如此进程以后就能通过虚拟地址操作文件页了.
fpage->flags = region->regionFlags;
}
/* share page fault, mark the page dirty */
- if ((vmf->flags & VM_MAP_PF_FLAG_WRITE) && (region->regionFlags & VM_MAP_REGION_FLAG_SHARED)) {
- OsMarkPageDirty(fpage, region, 0, 0);
+ if ((vmf->flags & VM_MAP_PF_FLAG_WRITE) && (region->regionFlags & VM_MAP_REGION_FLAG_SHARED)) {//有过写操作或者为共享线性区
+ OsMarkPageDirty(fpage, region, 0, 0);//标记为脏页,要回写磁盘,内核会在适当的时候回写磁盘
}
- vmf->pageKVaddr = kvaddr;
+ vmf->pageKVaddr = kvaddr;//缺陷页记录文件页的虚拟地址
LOS_SpinUnlockRestore(&mapping->list_lock, intSave);
return LOS_OK;
}
-
+///文件缓存冲洗,把所有fpage冲洗一边,把脏页洗到dirtyList中,配合OsFileCacheRemove理解
VOID OsFileCacheFlush(struct page_mapping *mapping)
{
UINT32 intSave;
UINT32 lruLock;
- LOS_DL_LIST_HEAD(dirtyList);
+ LOS_DL_LIST_HEAD(dirtyList);//LOS_DL_LIST list = { &(list), &(list) };
LosFilePage *ftemp = NULL;
LosFilePage *fpage = NULL;
@@ -443,70 +462,77 @@ VOID OsFileCacheFlush(struct page_mapping *mapping)
return;
}
LOS_SpinLockSave(&mapping->list_lock, &intSave);
- LOS_DL_LIST_FOR_EACH_ENTRY(fpage, &mapping->page_list, LosFilePage, node) {
+ LOS_DL_LIST_FOR_EACH_ENTRY(fpage, &mapping->page_list, LosFilePage, node) {//循环从page_list中取node给fpage
LOS_SpinLockSave(&fpage->physSeg->lruLock, &lruLock);
- if (OsIsPageDirty(fpage->vmPage)) {
- ftemp = OsDumpDirtyPage(fpage);
+ if (OsIsPageDirty(fpage->vmPage)) {//是否为脏页
+ ftemp = OsDumpDirtyPage(fpage);//这里挺妙的,copy出一份新页,老页变成了非脏页继续用
if (ftemp != NULL) {
- LOS_ListTailInsert(&dirtyList, &ftemp->node);
+ LOS_ListTailInsert(&dirtyList, &ftemp->node);//将新页插入脏页List,等待回写磁盘
}
}
LOS_SpinUnlockRestore(&fpage->physSeg->lruLock, lruLock);
}
LOS_SpinUnlockRestore(&mapping->list_lock, intSave);
- LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, ftemp, &dirtyList, LosFilePage, node) {
- OsDoFlushDirtyPage(fpage);
+ LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, ftemp, &dirtyList, LosFilePage, node) {//仔细看这个宏,关键在 &(item)->member != (list);
+ OsDoFlushDirtyPage(fpage);//立马洗掉,所以dirtyList可以不是全局变量
}
}
+/******************************************************************************
+ 删除文件缓存,清空文件在page cache的痕迹
+ 参数 mapping 可理解为文件在内存的身份证
+******************************************************************************/
VOID OsFileCacheRemove(struct page_mapping *mapping)
{
UINT32 intSave;
UINT32 lruSave;
SPIN_LOCK_S *lruLock = NULL;
- LOS_DL_LIST_HEAD(dirtyList);
+ LOS_DL_LIST_HEAD(dirtyList);//定义一个叫dirtyList的双循环链表并初始化,用于挂脏页
LosFilePage *ftemp = NULL;
LosFilePage *fpage = NULL;
LosFilePage *fnext = NULL;
- LOS_SpinLockSave(&mapping->list_lock, &intSave);
- LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, &mapping->page_list, LosFilePage, node) {
+ LOS_SpinLockSave(&mapping->list_lock, &intSave);//多进程操作,必须上锁.
+ LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, &mapping->page_list, LosFilePage, node) {//遍历文件在内存中产生的所有文件页(例如1,4,8页)不一定连续,取决于用户的读取顺序
lruLock = &fpage->physSeg->lruLock;
- LOS_SpinLockSave(lruLock, &lruSave);
- if (OsIsPageDirty(fpage->vmPage)) {
- ftemp = OsDumpDirtyPage(fpage);
- if (ftemp != NULL) {
- LOS_ListTailInsert(&dirtyList, &ftemp->node);
+ LOS_SpinLockSave(lruLock, &lruSave);//@note_why 自旋锁有必要从这里开始上锁吗?
+ if (OsIsPageDirty(fpage->vmPage)) {//数据是脏页吗,脏页就是被修改过数据的页
+ ftemp = OsDumpDirtyPage(fpage);//做这个拷贝动作是为了fpage的统一下线,因为数据回写磁盘的速度是很慢的,如果直接在这里处理脏数据
+ if (ftemp != NULL) {//会导致函数持有mapping->list_lock自旋锁的时间太长了,影响其他CPU的处理效率
+ LOS_ListTailInsert(&dirtyList, &ftemp->node);//将临时脏页挂到记录脏页链表上
}
}
- OsDeletePageCacheLru(fpage);
+ OsDeletePageCacheLru(fpage);//删除高速缓存和从置换链表中下线
LOS_SpinUnlockRestore(lruLock, lruSave);
}
- LOS_SpinUnlockRestore(&mapping->list_lock, intSave);
+ LOS_SpinUnlockRestore(&mapping->list_lock, intSave);//恢复自旋锁,不能让别的CPU等太久
- LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, &dirtyList, LosFilePage, node) {
- OsDoFlushDirtyPage(fpage);
+ LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, &dirtyList, LosFilePage, node) {//到这里,再来慢慢的统一处理脏页数据
+ OsDoFlushDirtyPage(fpage);//遍历脏页链表,一页一页处理
}
}
-
-LosVmFileOps g_commVmOps = {
+///虚拟内存文件操作实现类
+LosVmFileOps g_commVmOps = {//
.open = NULL,
.close = NULL,
- .fault = OsVmmFileFault,
- .remove = OsVmmFileRemove,
+ .fault = OsVmmFileFault, //缺页中断处理
+ .remove = OsVmmFileRemove,//删除页
};
-
+//文件映射
INT32 OsVfsFileMmap(struct file *filep, LosVmMapRegion *region)
{
- region->unTypeData.rf.vmFOps = &g_commVmOps;
+ region->unTypeData.rf.vmFOps = &g_commVmOps;//文件操作
region->unTypeData.rf.vnode = filep->f_vnode;
region->unTypeData.rf.f_oflags = filep->f_oflags;
return ENOERR;
}
-
+/*!
+ 有名映射,可理解为文件映射,跟匿名映射相对应
+ 参数filep是广义的文件,在鸿蒙内核,目录/普通文件/字符设备/块设备/网络套接字/管道/链接 都是文件
+*/
STATUS_T OsNamedMMap(struct file *filep, LosVmMapRegion *region)
{
struct Vnode *vnode = NULL;
@@ -519,10 +545,10 @@ STATUS_T OsNamedMMap(struct file *filep, LosVmMapRegion *region)
vnode->useCount++;
VnodeDrop();
if (filep->ops != NULL && filep->ops->mmap != NULL) {
- if (vnode->type == VNODE_TYPE_CHR || vnode->type == VNODE_TYPE_BLK) {
- LOS_SetRegionTypeDev(region);
+ if (vnode->type == VNODE_TYPE_CHR || vnode->type == VNODE_TYPE_BLK) {//块设备或者字符设备 /dev/..
+ LOS_SetRegionTypeDev(region);//设置为设备类型
} else {
- LOS_SetRegionTypeFile(region);
+ LOS_SetRegionTypeFile(region);//设置为文件类型
}
int ret = filep->ops->mmap(filep, region);
if (ret != LOS_OK) {
@@ -538,17 +564,21 @@ STATUS_T OsNamedMMap(struct file *filep, LosVmMapRegion *region)
return LOS_OK;
}
+/**************************************************************************************************
+ 通过位置从文件映射页中找到的指定的文件页
+ 举例:mapping->page_list上节点的数据可能只有是文件 1,3,4,6 页的数据,此时来找文件第5页的数据就会没有
+**************************************************************************************************/
LosFilePage *OsFindGetEntry(struct page_mapping *mapping, VM_OFFSET_T pgoff)
{
LosFilePage *fpage = NULL;
- LOS_DL_LIST_FOR_EACH_ENTRY(fpage, &mapping->page_list, LosFilePage, node) {
- if (fpage->pgoff == pgoff) {
+ LOS_DL_LIST_FOR_EACH_ENTRY(fpage, &mapping->page_list, LosFilePage, node) {//遍历文件页
+ if (fpage->pgoff == pgoff) {//找到指定的页,
return fpage;
}
- if (fpage->pgoff > pgoff) {
- break;
+ if (fpage->pgoff > pgoff) {//大于之前还没有找到,说明不在链表中,往后的也不用找了,
+ break;//因为 mapping->page_list节点上的数据都是按 fpage->pgoff 从小到大的顺序排列的.
}
}
@@ -556,6 +586,11 @@ LosFilePage *OsFindGetEntry(struct page_mapping *mapping, VM_OFFSET_T pgoff)
}
/* need mutex & change memory to dma zone. */
+/*!
+以页高速缓存方式分配一个文件页 LosFilePage
+ Direct Memory Access(存储器直接访问)指一种高速的数据传输操作,允许在外部设备和存储器之间直接读写数据。
+ 整个数据传输操作在一个称为"DMA控制器"的控制下进行的。CPU只需在数据传输开始和结束时做一点处理(开始和结束时候要做中断处理)
+*/
LosFilePage *OsPageCacheAlloc(struct page_mapping *mapping, VM_OFFSET_T pgoff)
{
VOID *kvaddr = NULL;
@@ -563,39 +598,39 @@ LosFilePage *OsPageCacheAlloc(struct page_mapping *mapping, VM_OFFSET_T pgoff)
LosVmPage *vmPage = NULL;
LosFilePage *fpage = NULL;
- vmPage = LOS_PhysPageAlloc();
+ vmPage = LOS_PhysPageAlloc(); //先分配一个物理页
if (vmPage == NULL) {
VM_ERR("alloc vm page failed");
return NULL;
}
- physSeg = OsVmPhysSegGet(vmPage);
- kvaddr = OsVmPageToVaddr(vmPage);
+ physSeg = OsVmPhysSegGet(vmPage);//通过页获取所在seg
+ kvaddr = OsVmPageToVaddr(vmPage);//获取内核空间的虚拟地址,具体点进去看函数说明,这里一定要理解透彻!
if ((physSeg == NULL) || (kvaddr == NULL)) {
- LOS_PhysPageFree(vmPage);
+ LOS_PhysPageFree(vmPage); //异常情况要释放vmPage
VM_ERR("alloc vm page failed!");
return NULL;
}
- fpage = (LosFilePage *)LOS_MemAlloc(m_aucSysMem0, sizeof(LosFilePage));
+ fpage = (LosFilePage *)LOS_MemAlloc(m_aucSysMem0, sizeof(LosFilePage));//从内存池中分配一个filePage
if (fpage == NULL) {
- LOS_PhysPageFree(vmPage);
+ LOS_PhysPageFree(vmPage); //异常情况要释放vmPage
VM_ERR("Failed to allocate for page!");
return NULL;
}
- (VOID)memset_s((VOID *)fpage, sizeof(LosFilePage), 0, sizeof(LosFilePage));
+ (VOID)memset_s((VOID *)fpage, sizeof(LosFilePage), 0, sizeof(LosFilePage));//调标准库函数 置0
- LOS_ListInit(&fpage->i_mmap);
- LOS_ListInit(&fpage->node);
- LOS_ListInit(&fpage->lru);
- fpage->n_maps = 0;
- fpage->dirtyOff = PAGE_SIZE;
- fpage->dirtyEnd = 0;
- fpage->physSeg = physSeg;
- fpage->vmPage = vmPage;
- fpage->mapping = mapping;
- fpage->pgoff = pgoff;
- (VOID)memset_s(kvaddr, PAGE_SIZE, 0, PAGE_SIZE);
+ LOS_ListInit(&fpage->i_mmap); //初始化映射,链表上挂 MapInfo
+ LOS_ListInit(&fpage->node); //节点初始化
+ LOS_ListInit(&fpage->lru); //LRU初始化
+ fpage->n_maps = 0; //映射次数
+ fpage->dirtyOff = PAGE_SIZE; //默认页尾部,相当于没有脏数据
+ fpage->dirtyEnd = 0; //脏页结束位置
+ fpage->physSeg = physSeg; //页框所属段.其中包含了 LRU LIST ==
+ fpage->vmPage = vmPage; //物理页框
+ fpage->mapping = mapping; //记录所有文件页映射
+ fpage->pgoff = pgoff; //将文件切成一页页,页标
+ (VOID)memset_s(kvaddr, PAGE_SIZE, 0, PAGE_SIZE);//页内数据清0
return fpage;
}
diff --git a/src/kernel_liteos_a/kernel/base/vm/los_vm_iomap.c b/src/kernel_liteos_a/kernel/base/vm/los_vm_iomap.c
index a313da14..17898e5b 100644
--- a/src/kernel_liteos_a/kernel/base/vm/los_vm_iomap.c
+++ b/src/kernel_liteos_a/kernel/base/vm/los_vm_iomap.c
@@ -1,3 +1,49 @@
+/*!
+ * @file los_vm_iomap.c
+ * @brief DMA
+ * @link
+ @verbatim
+ 直接内存访问
+ 直接内存访问(Direct Memory Access,DMA)是计算机科学中的一种内存访问技术。它允许某些电脑内部的
+ 硬件子系统(电脑外设),可以独立地直接读写系统内存,而不需中央处理器(CPU)介入处理
+ 在同等程度的处理器负担下,DMA是一种快速的数据传送方式。很多硬件的系统会使用DMA,包含硬盘控制器、
+ 绘图显卡、网卡和声卡。
+
+ DMA是所有现代电脑的重要特色,它允许不同速度的硬件设备来沟通,而不需要依于中央处理器的大量中断负载。
+ 否则,中央处理器需要从来源把每一片段的资料复制到寄存器,然后把它们再次写回到新的地方。在这个时间中,
+ 中央处理器对于其他的工作来说就无法使用。
+
+ DMA传输常使用在将一个内存区从一个设备复制到另外一个。当中央处理器初始化这个传输动作,传输动作本身是
+ 由DMA控制器来实行和完成。典型的例子就是移动一个外部内存的区块到芯片内部更快的内存去。像是这样的操作
+ 并没有让处理器工作拖延,使其可以被重新调度去处理其他的工作。DMA传输对于高性能嵌入式系统算法和网络是
+ 很重要的。 举个例子,个人电脑的ISA DMA控制器拥有8个DMA通道,其中的7个通道是可以让计算机的中央处理器所利用。
+ 每一个DMA通道有一个16位地址寄存器和一个16位计数寄存器。要初始化资料传输时,设备驱动程序一起设置DMA通道的
+ 地址和计数寄存器,以及资料传输的方向,读取或写入。然后指示DMA硬件开始这个传输动作。当传输结束的时候,
+ 设备就会以中断的方式通知中央处理器。
+
+ "分散-收集"(Scatter-gather)DMA允许在一次单一的DMA处理中传输资料到多个内存区域。相当于把多个简单的DMA要求
+ 串在一起。同样,这样做的目的是要减轻中央处理器的多次输出输入中断和资料复制任务。
+ DRQ意为DMA要求;DACK意为DMA确认。这些符号一般在有DMA功能的电脑系统硬件概要上可以看到。
+ 它们表示了介于中央处理器和DMA控制器之间的电子信号传输线路。
+
+ 缓存一致性问题
+ DMA会导致缓存一致性问题。想像中央处理器带有缓存与外部内存的情况,DMA的运作则是去访问外部内存,
+ 当中央处理器访问外部内存某个地址的时候,暂时先将新的值写入缓存中,但并未将外部内存的资料更新,
+ 若在缓存中的资料尚未更新到外部内存前发生了DMA,则DMA过程将会读取到未更新的资料。
+ 相同的,如果外部设备写入新的值到外部内存内,则中央处理器若访问缓存时则会访问到尚未更新的资料。
+ 这些问题可以用两种方法来解决:
+
+ 缓存同调系统(Cache-coherent system):以硬件方法来完成,当外部设备写入内存时以一个信号来通知
+ 缓存控制器某内存地址的值已经过期或是应该更新资料。
+ 非同调系统(Non-coherent system):以软件方法来完成,操作系统必须确认缓存读取时,DMA程序已经
+ 开始或是禁止DMA发生。
+ 第二种的方法会造成DMA的系统负担。
+ @endverbatim
+ * @version
+ * @author weharmonyos.com | 鸿蒙研究站 | 每天死磕一点点
+ * @date 2022-04-02
+ */
+
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
@@ -36,7 +82,7 @@
#include "los_vm_map.h"
#include "los_memory.h"
-
+/// 分配DMA空间
VOID *LOS_DmaMemAlloc(DMA_ADDR_T *dmaAddr, size_t size, size_t align, enum DmaMemType type)
{
VOID *kVaddr = NULL;
@@ -51,9 +97,9 @@ VOID *LOS_DmaMemAlloc(DMA_ADDR_T *dmaAddr, size_t size, size_t align, enum DmaMe
}
#ifdef LOSCFG_KERNEL_VM
- kVaddr = LOS_KernelMallocAlign(size, align);
+ kVaddr = LOS_KernelMallocAlign(size, align);//不走内存池方式, 直接申请物理页
#else
- kVaddr = LOS_MemAllocAlign(OS_SYS_MEM_ADDR, size, align);
+ kVaddr = LOS_MemAllocAlign(OS_SYS_MEM_ADDR, size, align);//从内存池中申请
#endif
if (kVaddr == NULL) {
VM_ERR("failed, size = %u, align = %u", size, align);
@@ -61,16 +107,16 @@ VOID *LOS_DmaMemAlloc(DMA_ADDR_T *dmaAddr, size_t size, size_t align, enum DmaMe
}
if (dmaAddr != NULL) {
- *dmaAddr = (DMA_ADDR_T)LOS_PaddrQuery(kVaddr);
+ *dmaAddr = (DMA_ADDR_T)LOS_PaddrQuery(kVaddr);//查询物理地址, DMA直接将数据灌到物理地址
}
- if (type == DMA_NOCACHE) {
+ if (type == DMA_NOCACHE) {//无缓存模式 , 计算新的虚拟地址
kVaddr = (VOID *)VMM_TO_UNCACHED_ADDR((UINTPTR)kVaddr);
}
return kVaddr;
}
-
+/// 释放 DMA指针
VOID LOS_DmaMemFree(VOID *vaddr)
{
UINTPTR addr;
@@ -79,13 +125,13 @@ VOID LOS_DmaMemFree(VOID *vaddr)
return;
}
addr = (UINTPTR)vaddr;
-
+ // 未缓存区
if ((addr >= UNCACHED_VMM_BASE) && (addr < UNCACHED_VMM_BASE + UNCACHED_VMM_SIZE)) {
- addr = UNCACHED_TO_VMM_ADDR(addr);
+ addr = UNCACHED_TO_VMM_ADDR(addr); //转换成未缓存区地址
#ifdef LOSCFG_KERNEL_VM
- LOS_KernelFree((VOID *)addr);
+ LOS_KernelFree((VOID *)addr);//
#else
- LOS_MemFree(OS_SYS_MEM_ADDR, (VOID *)addr);
+ LOS_MemFree(OS_SYS_MEM_ADDR, (VOID *)addr);//内存池方式释放
#endif
} else if ((addr >= KERNEL_VMM_BASE) && (addr < KERNEL_VMM_BASE + KERNEL_VMM_SIZE)) {
#ifdef LOSCFG_KERNEL_VM
@@ -98,7 +144,7 @@ VOID LOS_DmaMemFree(VOID *vaddr)
}
return;
}
-
+/// 将DMA虚拟地址转成物理地址
DMA_ADDR_T LOS_DmaVaddrToPaddr(VOID *vaddr)
{
return (DMA_ADDR_T)LOS_PaddrQuery(vaddr);
diff --git a/src/kernel_liteos_a/kernel/base/vm/los_vm_map.c b/src/kernel_liteos_a/kernel/base/vm/los_vm_map.c
index 5d4f62b3..c18c751b 100644
--- a/src/kernel_liteos_a/kernel/base/vm/los_vm_map.c
+++ b/src/kernel_liteos_a/kernel/base/vm/los_vm_map.c
@@ -1,6 +1,54 @@
+/*!
+ * @file los_vm_map.c
+ * @brief 虚拟内存管理
+ * @link vm http://weharmonyos.com/openharmony/zh-cn/device-dev/kernel/kernel-small-basic-memory-virtual.html @endlink
+ @verbatim
+基本概念
+ 虚拟内存管理是计算机系统管理内存的一种技术。每个进程都有连续的虚拟地址空间,虚拟地址空间的大小由CPU的位数决定,
+ 32位的硬件平台可以提供的最大的寻址空间为0-4GiB。整个4GiB空间分成两部分,LiteOS-A内核占据3GiB的高地址空间,
+ 1GiB的低地址空间留给进程使用。各个进程空间的虚拟地址空间是独立的,代码、数据互不影响。
+
+ 系统将虚拟内存分割为称为虚拟页的内存块,大小一般为4KiB或64KiB,LiteOS-A内核默认的页的大小是4KiB,
+ 根据需要可以对MMU(Memory Management Units)进行配置。虚拟内存管理操作的最小单位就是一个页,
+ LiteOS-A内核中一个虚拟地址区间region包含地址连续的多个虚拟页,也可只有一个页。同样,物理内存也会按照页大小进行分割,
+ 分割后的每个内存块称为页帧。虚拟地址空间划分:内核态占高地址3GiB(0x40000000 ~ 0xFFFFFFFF),
+ 用户态占低地址1GiB(0x01000000 ~ 0x3F000000),具体见下表,详细可以查看或配置los_vm_zone.h。
+
+内核态地址规划:
+ Zone名称 描述 属性
+ ----------------------------------------------------------------------------
+ DMA zone 供IO设备的DMA使用。 Uncache
+
+ Normal zone 加载内核代码段、数据段、堆和栈的地址区间。 Cache
+
+ high mem zone可以分配连续的虚拟内存,但其所映射的物理内存不一定连续。Cache
+
+用户态地址规划:
+ Zone名称 描述 属性
+ ----------------------------------------------------------------------------
+ 代码段 用户态代码段地址区间。 Cache
+ 堆 用户态堆地址区间。 Cache
+ 栈 用户态栈地址区间。 Cache
+ 共享库 用于加载用户态共享库的地址区间,包括mmap所映射的区间。 Cache
+
+运行机制
+ 虚拟内存管理中,虚拟地址空间是连续的,但是其映射的物理内存并不一定是连续的,如下图所示。
+ 可执行程序加载运行,CPU访问虚拟地址空间的代码或数据时存在两种情况:
+
+ 1. CPU访问的虚拟地址所在的页,如V0,已经与具体的物理页P0做映射,CPU通过找到进程对应的页表条目(详见虚实映射),
+ 根据页表条目中的物理地址信息访问物理内存中的内容并返回。
+ 2. CPU访问的虚拟地址所在的页,如V2,没有与具体的物理页做映射,系统会触发缺页异常,系统申请一个物理页,
+ 并把相应的信息拷贝到物理页中,并且把物理页的起始地址更新到页表条目中。此时CPU重新执行访问虚拟内存的指令
+ 便能够访问到具体的代码或数据。
+
+ @endverbatim
+ * @version
+ * @author weharmonyos.com | 鸿蒙研究站 | 每天死磕一点点
+ * @date 2021-11-25
+ */
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
- * Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved.
+ * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -51,57 +99,65 @@
#ifdef LOSCFG_KERNEL_VM
-#define VM_MAP_WASTE_MEM_LEVEL (PAGE_SIZE >> 2)
-LosMux g_vmSpaceListMux;
-LOS_DL_LIST_HEAD(g_vmSpaceList);
-LosVmSpace g_kVmSpace;
-LosVmSpace g_vMallocSpace;
+#define VM_MAP_WASTE_MEM_LEVEL (PAGE_SIZE >> 2) ///< 浪费内存大小(1K)
+LosMux g_vmSpaceListMux; ///< 用于锁g_vmSpaceList的互斥量
+LOS_DL_LIST_HEAD(g_vmSpaceList); ///< 初始化全局虚拟空间节点,所有虚拟空间都挂到此节点上.
+LosVmSpace g_kVmSpace; ///< 内核非分配空间,用于内核运行栈,代码区,数据区
+LosVmSpace g_vMallocSpace; ///< 内核分配空间,用于内核分配内存
+/************************************************************
+* 获取进程空间系列接口
+************************************************************/
+/// 获取当前进程空间结构体指针
LosVmSpace *LOS_CurrSpaceGet(VOID)
{
return OsCurrProcessGet()->vmSpace;
}
-
+/// 获取虚拟地址对应的进程空间结构体指针
LosVmSpace *LOS_SpaceGet(VADDR_T vaddr)
{
- if (LOS_IsKernelAddress(vaddr)) {
- return LOS_GetKVmSpace();
- } else if (LOS_IsUserAddress(vaddr)) {
+ if (LOS_IsKernelAddress(vaddr)) { //是否为内核空间
+ return LOS_GetKVmSpace(); //获取内核空间
+ } else if (LOS_IsUserAddress(vaddr)) {//是否为用户空间
return LOS_CurrSpaceGet();
- } else if (LOS_IsVmallocAddress(vaddr)) {
- return LOS_GetVmallocSpace();
+ } else if (LOS_IsVmallocAddress(vaddr)) {//是否为内核分配空间
+ return LOS_GetVmallocSpace();//获取内核分配空间
} else {
return NULL;
}
}
-
+///内核空间只有g_kVmSpace一个,所有的内核进程都共用一个内核空间
LosVmSpace *LOS_GetKVmSpace(VOID)
{
return &g_kVmSpace;
}
-
+///获取进程空间链表指针 g_vmSpaceList中挂的是进程空间 g_kVmSpace, g_vMallocSpace,所有用户进程的空间(独有一个进程空间)
LOS_DL_LIST *LOS_GetVmSpaceList(VOID)
{
return &g_vmSpaceList;
}
-
+///获取内核堆空间的全局变量
LosVmSpace *LOS_GetVmallocSpace(VOID)
{
return &g_vMallocSpace;
}
+/************************************************************
+* 虚拟地址区间region相关的操作
+************************************************************/
+///释放挂在红黑树上节点,等于释放了线性区
ULONG_T OsRegionRbFreeFn(LosRbNode *pstNode)
{
LOS_MemFree(m_aucSysMem0, pstNode);
return LOS_OK;
}
-
+///通过红黑树节点找到对应的线性区
VOID *OsRegionRbGetKeyFn(LosRbNode *pstNode)
{
LosVmMapRegion *region = (LosVmMapRegion *)LOS_DL_LIST_ENTRY(pstNode, LosVmMapRegion, rbNode);
return (VOID *)®ion->range;
}
-
+///比较两个红黑树节点
ULONG_T OsRegionRbCmpKeyFn(const VOID *pNodeKeyA, const VOID *pNodeKeyB)
{
LosVmMapRange rangeA = *(LosVmMapRange *)pNodeKeyA;
@@ -111,129 +167,146 @@ ULONG_T OsRegionRbCmpKeyFn(const VOID *pNodeKeyA, const VOID *pNodeKeyB)
UINT32 startB = rangeB.base;
UINT32 endB = rangeB.base + rangeB.size - 1;
- if (startA > endB) {
- return RB_BIGGER;
+ if (startA > endB) {// A基地址大于B的结束地址
+ return RB_BIGGER; //说明线性区A更大,在右边
} else if (startA >= startB) {
if (endA <= endB) {
- return RB_EQUAL;
+ return RB_EQUAL; //相等,说明 A在B中
} else {
- return RB_BIGGER;
+ return RB_BIGGER; //说明 A的结束地址更大
}
- } else if (startA <= startB) {
+ } else if (startA <= startB) { //A基地址小于等于B的基地址
if (endA >= endB) {
- return RB_EQUAL;
+ return RB_EQUAL; //相等 说明 B在A中
} else {
- return RB_SMALLER;
+ return RB_SMALLER;//说明A的结束地址更小
}
- } else if (endA < startB) {
- return RB_SMALLER;
+ } else if (endA < startB) {//A结束地址小于B的开始地址
+ return RB_SMALLER;//说明A在
}
return RB_EQUAL;
}
+/*!
+ * @brief OsVmSpaceInitCommon 初始化进程虚拟空间,必须提供L1表的虚拟内存地址
+ *
+ * @param virtTtb L1表的地址,TTB表地址
+ * @param vmSpace
+ * @return
+ *
+ * @see
+ */
STATIC BOOL OsVmSpaceInitCommon(LosVmSpace *vmSpace, VADDR_T *virtTtb)
{
- LOS_RbInitTree(&vmSpace->regionRbTree, OsRegionRbCmpKeyFn, OsRegionRbFreeFn, OsRegionRbGetKeyFn);
+ LOS_RbInitTree(&vmSpace->regionRbTree, OsRegionRbCmpKeyFn, OsRegionRbFreeFn, OsRegionRbGetKeyFn);//初始化虚拟存储空间-以红黑树组织方式
- status_t retval = LOS_MuxInit(&vmSpace->regionMux, NULL);
+ status_t retval = LOS_MuxInit(&vmSpace->regionMux, NULL);//初始化互斥量
if (retval != LOS_OK) {
VM_ERR("Create mutex for vm space failed, status: %d", retval);
return FALSE;
}
(VOID)LOS_MuxAcquire(&g_vmSpaceListMux);
- LOS_ListAdd(&g_vmSpaceList, &vmSpace->node);
+ LOS_ListAdd(&g_vmSpaceList, &vmSpace->node);//将虚拟空间挂入全局虚拟空间双循环链表上
(VOID)LOS_MuxRelease(&g_vmSpaceListMux);
- return OsArchMmuInit(&vmSpace->archMmu, virtTtb);
+ return OsArchMmuInit(&vmSpace->archMmu, virtTtb);//对mmu初始化
}
-
+///@note_thinking 这个函数名称和内容不太搭
VOID OsVmMapInit(VOID)
{
- status_t retval = LOS_MuxInit(&g_vmSpaceListMux, NULL);
+ status_t retval = LOS_MuxInit(&g_vmSpaceListMux, NULL);//初始化虚拟空间的互斥量
if (retval != LOS_OK) {
VM_ERR("Create mutex for g_vmSpaceList failed, status: %d", retval);
}
}
-
-BOOL OsKernVmSpaceInit(LosVmSpace *vmSpace, VADDR_T *virtTtb)
+///初始化内核虚拟空间
+BOOL OsKernVmSpaceInit(LosVmSpace *vmSpace, VADDR_T *virtTtb)//内核空间页表是编译时放在bbs段指定的,共用 L1表
{
- vmSpace->base = KERNEL_ASPACE_BASE;
- vmSpace->size = KERNEL_ASPACE_SIZE;
- vmSpace->mapBase = KERNEL_VMM_BASE;
- vmSpace->mapSize = KERNEL_VMM_SIZE;
+ vmSpace->base = KERNEL_ASPACE_BASE;//内核空间基地址, 线性区将分配在此范围
+ vmSpace->size = KERNEL_ASPACE_SIZE;//内核空间大小
+ vmSpace->mapBase = KERNEL_VMM_BASE;//内核空间映射区基地址
+ vmSpace->mapSize = KERNEL_VMM_SIZE;//内核空间映射区大小
#ifdef LOSCFG_DRIVERS_TZDRIVER
- vmSpace->codeStart = 0;
- vmSpace->codeEnd = 0;
+ vmSpace->codeStart = 0; //代码区开始地址
+ vmSpace->codeEnd = 0; //代码区结束地址
#endif
- return OsVmSpaceInitCommon(vmSpace, virtTtb);
+ return OsVmSpaceInitCommon(vmSpace, virtTtb);//virtTtb 用于初始化 mmu
}
-
-BOOL OsVMallocSpaceInit(LosVmSpace *vmSpace, VADDR_T *virtTtb)
+///初始化内核堆空间
+BOOL OsVMallocSpaceInit(LosVmSpace *vmSpace, VADDR_T *virtTtb)//内核动态空间的页表是动态申请得来,共用 L1表
{
- vmSpace->base = VMALLOC_START;
- vmSpace->size = VMALLOC_SIZE;
- vmSpace->mapBase = VMALLOC_START;
- vmSpace->mapSize = VMALLOC_SIZE;
+ vmSpace->base = VMALLOC_START; //内核堆空间基地址
+ vmSpace->size = VMALLOC_SIZE; //内核堆空间大小
+ vmSpace->mapBase = VMALLOC_START; //内核堆空间映射基地址
+ vmSpace->mapSize = VMALLOC_SIZE; //内核堆空间映射区大小
#ifdef LOSCFG_DRIVERS_TZDRIVER
vmSpace->codeStart = 0;
vmSpace->codeEnd = 0;
#endif
- return OsVmSpaceInitCommon(vmSpace, virtTtb);
+ return OsVmSpaceInitCommon(vmSpace, virtTtb);//创建MMU,为后续的虚实映射做好初始化的工作
}
-
+///内核虚拟空间初始化
VOID OsKSpaceInit(VOID)
{
- OsVmMapInit();
- OsKernVmSpaceInit(&g_kVmSpace, OsGFirstTableGet());
- OsVMallocSpaceInit(&g_vMallocSpace, OsGFirstTableGet());
+ OsVmMapInit();//初始化后续操作 g_vmSpaceList 的互斥锁
+ OsKernVmSpaceInit(&g_kVmSpace, OsGFirstTableGet()); //初始化内核进程虚拟空间
+ OsVMallocSpaceInit(&g_vMallocSpace, OsGFirstTableGet());//初始化内核动态分配空间
}
-
+/*!
+ * @brief OsUserVmSpaceInit 用户空间的TTB表是动态申请得来,每个进程有属于自己的L1,L2表
+ * 初始化用户进程虚拟空间,主要划分数据区,堆区,映射区和创建mmu
+ * @param virtTtb
+ * @param vmSpace
+ * @return
+ *
+ * @see
+ */
BOOL OsUserVmSpaceInit(LosVmSpace *vmSpace, VADDR_T *virtTtb)
{
- vmSpace->base = USER_ASPACE_BASE;
- vmSpace->size = USER_ASPACE_SIZE;
- vmSpace->mapBase = USER_MAP_BASE;
- vmSpace->mapSize = USER_MAP_SIZE;
- vmSpace->heapBase = USER_HEAP_BASE;
- vmSpace->heapNow = USER_HEAP_BASE;
- vmSpace->heap = NULL;
+ vmSpace->base = USER_ASPACE_BASE;//用户空间基地址
+ vmSpace->size = USER_ASPACE_SIZE;//用户空间大小
+ vmSpace->mapBase = USER_MAP_BASE;//用户空间映射基地址
+ vmSpace->mapSize = USER_MAP_SIZE;//用户空间映射大小
+ vmSpace->heapBase = USER_HEAP_BASE;//用户堆区开始地址,只有用户进程需要设置这里,动态内存的开始地址
+ vmSpace->heapNow = USER_HEAP_BASE;//堆区最新指向地址,用户堆空间大小可通过系统调用 do_brk()扩展
+ vmSpace->heap = NULL; //最近分配的一个堆线性区
#ifdef LOSCFG_DRIVERS_TZDRIVER
vmSpace->codeStart = 0;
vmSpace->codeEnd = 0;
#endif
- return OsVmSpaceInitCommon(vmSpace, virtTtb);
+ return OsVmSpaceInitCommon(vmSpace, virtTtb);//创建MMU,为后续的虚实映射做好初始化的工作
}
-
+/// 创建用户进程空间
LosVmSpace *OsCreateUserVmSpace(VOID)
{
BOOL retVal = FALSE;
- LosVmSpace *space = LOS_MemAlloc(m_aucSysMem0, sizeof(LosVmSpace));
+ LosVmSpace *space = LOS_MemAlloc(m_aucSysMem0, sizeof(LosVmSpace));//在内核空间申请用户进程空间
if (space == NULL) {
return NULL;
}
-
- VADDR_T *ttb = LOS_PhysPagesAllocContiguous(1);
- if (ttb == NULL) {
+ //此处为何直接申请物理页帧存放用户进程的页表,大概是因为所有页表都被存放在内核空间(g_kVmSpace)而非内核分配空间(g_vMallocSpace)
+ VADDR_T *ttb = LOS_PhysPagesAllocContiguous(1);//分配一个物理页用于存放虚实映射关系表, 即:L1表
+ if (ttb == NULL) {//若连页表都没有,剩下的也别玩了.
(VOID)LOS_MemFree(m_aucSysMem0, space);
return NULL;
}
-
- (VOID)memset_s(ttb, PAGE_SIZE, 0, PAGE_SIZE);
- retVal = OsUserVmSpaceInit(space, ttb);
- LosVmPage *vmPage = OsVmVaddrToPage(ttb);
+
+ (VOID)memset_s(ttb, PAGE_SIZE, 0, PAGE_SIZE);//4K空间置0
+ retVal = OsUserVmSpaceInit(space, ttb);//初始化用户空间,mmu
+ LosVmPage *vmPage = OsVmVaddrToPage(ttb);//找到所在物理页框
if ((retVal == FALSE) || (vmPage == NULL)) {
(VOID)LOS_MemFree(m_aucSysMem0, space);
LOS_PhysPagesFreeContiguous(ttb, 1);
return NULL;
}
- LOS_ListAdd(&space->archMmu.ptList, &(vmPage->node));
+ LOS_ListAdd(&space->archMmu.ptList, &(vmPage->node));//页表链表,先挂上L1,后续还会挂上 N个L2表
return space;
}
-STATIC BOOL OsVmSpaceParamCheck(const LosVmSpace *vmSpace)
+STATIC BOOL OsVmSpaceParamCheck(const LosVmSpace *vmSpace)//这么简单也要写个函数?
{
if (vmSpace == NULL) {
return FALSE;
@@ -241,6 +314,7 @@ STATIC BOOL OsVmSpaceParamCheck(const LosVmSpace *vmSpace)
return TRUE;
}
+//虚拟内存空间克隆,被用于fork进程
STATUS_T LOS_VmSpaceClone(UINT32 cloneFlags, LosVmSpace *oldVmSpace, LosVmSpace *newVmSpace)
{
LosRbNode *pstRbNode = NULL;
@@ -255,23 +329,23 @@ STATUS_T LOS_VmSpaceClone(UINT32 cloneFlags, LosVmSpace *oldVmSpace, LosVmSpace
return LOS_ERRNO_VM_INVALID_ARGS;
}
- if ((OsIsVmRegionEmpty(oldVmSpace) == TRUE) || (oldVmSpace == &g_kVmSpace)) {
+ if ((OsIsVmRegionEmpty(oldVmSpace) == TRUE) || (oldVmSpace == &g_kVmSpace)) {//不允许clone内核空间,内核空间是独一无二的.
return LOS_ERRNO_VM_INVALID_ARGS;
}
-
+ //空间克隆的主体实现是:线性区重新一个个分配物理内存,重新映射.
/* search the region list */
- newVmSpace->mapBase = oldVmSpace->mapBase;
- newVmSpace->heapBase = oldVmSpace->heapBase;
- newVmSpace->heapNow = oldVmSpace->heapNow;
+ newVmSpace->mapBase = oldVmSpace->mapBase; //复制映射区基址
+ newVmSpace->heapBase = oldVmSpace->heapBase; //复制堆区基址
+ newVmSpace->heapNow = oldVmSpace->heapNow; //复制堆区当前使用到哪了
(VOID)LOS_MuxAcquire(&oldVmSpace->regionMux);
- RB_SCAN_SAFE(&oldVmSpace->regionRbTree, pstRbNode, pstRbNodeNext)
+ RB_SCAN_SAFE(&oldVmSpace->regionRbTree, pstRbNode, pstRbNodeNext)//红黑树循环开始
LosVmMapRegion *oldRegion = (LosVmMapRegion *)pstRbNode;
#if defined(LOSCFG_KERNEL_SHM) && defined(LOSCFG_IPC_CONTAINER)
if ((oldRegion->regionFlags & VM_MAP_REGION_FLAG_SHM) && (cloneFlags & CLONE_NEWIPC)) {
continue;
}
#endif
- LosVmMapRegion *newRegion = OsVmRegionDup(newVmSpace, oldRegion, oldRegion->range.base, oldRegion->range.size);
+ LosVmMapRegion *newRegion = OsVmRegionDup(newVmSpace, oldRegion, oldRegion->range.base, oldRegion->range.size);//复制线性区
if (newRegion == NULL) {
VM_ERR("dup new region failed");
ret = LOS_ERRNO_VM_NO_MEMORY;
@@ -279,50 +353,50 @@ STATUS_T LOS_VmSpaceClone(UINT32 cloneFlags, LosVmSpace *oldVmSpace, LosVmSpace
}
#ifdef LOSCFG_KERNEL_SHM
- if (oldRegion->regionFlags & VM_MAP_REGION_FLAG_SHM) {
- OsShmFork(newVmSpace, oldRegion, newRegion);
- continue;
+ if (oldRegion->regionFlags & VM_MAP_REGION_FLAG_SHM) {//如果老线性区是共享内存
+ OsShmFork(newVmSpace, oldRegion, newRegion);//fork共享线性区,如此新虚拟空间也能用那个线性区
+ continue;//不往下走了,因为共享内存不需要重新映射,下面无非就是需要MMU映射虚拟地址<-->物理地址
}
#endif
- if (oldRegion == oldVmSpace->heap) {
- newVmSpace->heap = newRegion;
+ if (oldRegion == oldVmSpace->heap) {//如果这个线性区是堆区
+ newVmSpace->heap = newRegion;//那么新的线性区也是新虚拟空间的堆区
}
- numPages = newRegion->range.size >> PAGE_SHIFT;
- for (i = 0; i < numPages; i++) {
+ numPages = newRegion->range.size >> PAGE_SHIFT;//计算线性区页数
+ for (i = 0; i < numPages; i++) {//一页一页进行重新映射
vaddr = newRegion->range.base + (i << PAGE_SHIFT);
- if (LOS_ArchMmuQuery(&oldVmSpace->archMmu, vaddr, &paddr, &flags) != LOS_OK) {
+ if (LOS_ArchMmuQuery(&oldVmSpace->archMmu, vaddr, &paddr, &flags) != LOS_OK) {//先查物理地址
continue;
}
- page = LOS_VmPageGet(paddr);
+ page = LOS_VmPageGet(paddr);//通过物理页获取物理内存的页框
if (page != NULL) {
- LOS_AtomicInc(&page->refCounts);
+ LOS_AtomicInc(&page->refCounts);//refCounts 自增
}
- if (flags & VM_MAP_REGION_FLAG_PERM_WRITE) {
- LOS_ArchMmuUnmap(&oldVmSpace->archMmu, vaddr, 1);
- LOS_ArchMmuMap(&oldVmSpace->archMmu, vaddr, paddr, 1, flags & ~VM_MAP_REGION_FLAG_PERM_WRITE);
+ if (flags & VM_MAP_REGION_FLAG_PERM_WRITE) {//可写入区标签
+ LOS_ArchMmuUnmap(&oldVmSpace->archMmu, vaddr, 1);//先删除老空间映射
+ LOS_ArchMmuMap(&oldVmSpace->archMmu, vaddr, paddr, 1, flags & ~VM_MAP_REGION_FLAG_PERM_WRITE);//老空间重新映射
}
- LOS_ArchMmuMap(&newVmSpace->archMmu, vaddr, paddr, 1, flags & ~VM_MAP_REGION_FLAG_PERM_WRITE);
+ LOS_ArchMmuMap(&newVmSpace->archMmu, vaddr, paddr, 1, flags & ~VM_MAP_REGION_FLAG_PERM_WRITE);//映射新空间
-#ifdef LOSCFG_FS_VFS
- if (LOS_IsRegionFileValid(oldRegion)) {
+#ifdef LOSCFG_FS_VFS //文件系统开关
+ if (LOS_IsRegionFileValid(oldRegion)) {//是都是一个文件映射线性区
LosFilePage *fpage = NULL;
LOS_SpinLockSave(&oldRegion->unTypeData.rf.vnode->mapping.list_lock, &intSave);
fpage = OsFindGetEntry(&oldRegion->unTypeData.rf.vnode->mapping, newRegion->pgOff + i);
if ((fpage != NULL) && (fpage->vmPage == page)) { /* cow page no need map */
- OsAddMapInfo(fpage, &newVmSpace->archMmu, vaddr);
+ OsAddMapInfo(fpage, &newVmSpace->archMmu, vaddr);//添加文件页映射,记录页面被进程映射过
}
LOS_SpinUnlockRestore(&oldRegion->unTypeData.rf.vnode->mapping.list_lock, intSave);
}
#endif
}
- RB_SCAN_SAFE_END(&oldVmSpace->regionRbTree, pstRbNode, pstRbNodeNext)
+ RB_SCAN_SAFE_END(&oldVmSpace->regionRbTree, pstRbNode, pstRbNodeNext)//红黑树循环结束
(VOID)LOS_MuxRelease(&oldVmSpace->regionMux);
return ret;
}
-
+///通过虚拟(线性)地址查找所属线性区,红黑树
LosVmMapRegion *OsFindRegion(LosRbTree *regionRbTree, VADDR_T vaddr, size_t len)
{
LosVmMapRegion *regionRst = NULL;
@@ -336,18 +410,18 @@ LosVmMapRegion *OsFindRegion(LosRbTree *regionRbTree, VADDR_T vaddr, size_t len)
}
return regionRst;
}
-
+/// 查找线性区 根据起始地址在进程空间内查找是否存在
LosVmMapRegion *LOS_RegionFind(LosVmSpace *vmSpace, VADDR_T addr)
{
LosVmMapRegion *region = NULL;
- (VOID)LOS_MuxAcquire(&vmSpace->regionMux);
+ (VOID)LOS_MuxAcquire(&vmSpace->regionMux);//因进程空间是隔离的,所以此处只会涉及到任务(线程)之间的竞争,故使用互斥锁,而自旋锁则用于CPU核间的竞争
region = OsFindRegion(&vmSpace->regionRbTree, addr, 1);
(VOID)LOS_MuxRelease(&vmSpace->regionMux);
return region;
}
-
+/// 查找线性区 根据地址区间在进程空间内查找是否存在
LosVmMapRegion *LOS_RegionRangeFind(LosVmSpace *vmSpace, VADDR_T addr, size_t len)
{
LosVmMapRegion *region = NULL;
@@ -358,14 +432,14 @@ LosVmMapRegion *LOS_RegionRangeFind(LosVmSpace *vmSpace, VADDR_T addr, size_t le
return region;
}
-
+/// 分配指定长度的线性区
VADDR_T OsAllocRange(LosVmSpace *vmSpace, size_t len)
{
LosVmMapRegion *curRegion = NULL;
LosRbNode *pstRbNode = NULL;
LosRbNode *pstRbNodeTmp = NULL;
LosRbTree *regionRbTree = &vmSpace->regionRbTree;
- VADDR_T curEnd = vmSpace->mapBase;
+ VADDR_T curEnd = vmSpace->mapBase;//获取映射区基地址
VADDR_T nextStart;
curRegion = LOS_RegionFind(vmSpace, vmSpace->mapBase);
@@ -384,7 +458,7 @@ VADDR_T OsAllocRange(LosVmSpace *vmSpace, size_t len)
curEnd = curRegion->range.base + curRegion->range.size;
}
RB_MID_SCAN_END(regionRbTree, pstRbNode)
- } else {
+ } else {//红黑树扫描排序,从小到大
/* rbtree scan is sorted, from small to big */
RB_SCAN_SAFE(regionRbTree, pstRbNode, pstRbNodeTmp)
curRegion = (LosVmMapRegion *)pstRbNode;
@@ -407,34 +481,34 @@ VADDR_T OsAllocRange(LosVmSpace *vmSpace, size_t len)
return 0;
}
-
+/// 分配指定开始地址和长度的线性区
VADDR_T OsAllocSpecificRange(LosVmSpace *vmSpace, VADDR_T vaddr, size_t len, UINT32 regionFlags)
{
STATUS_T status;
- if (LOS_IsRangeInSpace(vmSpace, vaddr, len) == FALSE) {
+ if (LOS_IsRangeInSpace(vmSpace, vaddr, len) == FALSE) {//虚拟地址是否在进程空间范围内
return 0;
}
if ((LOS_RegionFind(vmSpace, vaddr) != NULL) ||
(LOS_RegionFind(vmSpace, vaddr + len - 1) != NULL) ||
- (LOS_RegionRangeFind(vmSpace, vaddr, len - 1) != NULL)) {
+ (LOS_RegionRangeFind(vmSpace, vaddr, len - 1) != NULL)) {//没找到的情况
if ((regionFlags & VM_MAP_REGION_FLAG_FIXED_NOREPLACE) != 0) {
return 0;
- } else if ((regionFlags & VM_MAP_REGION_FLAG_FIXED) != 0) {
- status = LOS_UnMMap(vaddr, len);
+ } else if ((regionFlags & VM_MAP_REGION_FLAG_FIXED) != 0) {//线性区未填满,则解除这部分空间的映射
+ status = LOS_UnMMap(vaddr, len);//解除映射
if (status != LOS_OK) {
VM_ERR("unmap specific range va: %#x, len: %#x failed, status: %d", vaddr, len, status);
return 0;
}
} else {
- return OsAllocRange(vmSpace, len);
+ return OsAllocRange(vmSpace, len);//默认分配一个
}
}
return vaddr;
}
-
+///映射类型为文件的线性区是否有效
BOOL LOS_IsRegionFileValid(LosVmMapRegion *region)
{
if ((region != NULL) && (LOS_IsRegionTypeFile(region)) &&
@@ -443,7 +517,7 @@ BOOL LOS_IsRegionFileValid(LosVmMapRegion *region)
}
return FALSE;
}
-
+///向红黑树中插入线性区
BOOL OsInsertRegion(LosRbTree *regionRbTree, LosVmMapRegion *region)
{
if (LOS_RbAddNode(regionRbTree, (LosRbNode *)region) == FALSE) {
@@ -453,46 +527,46 @@ BOOL OsInsertRegion(LosRbTree *regionRbTree, LosVmMapRegion *region)
}
return TRUE;
}
-
+///创建一个线性区
LosVmMapRegion *OsCreateRegion(VADDR_T vaddr, size_t len, UINT32 regionFlags, unsigned long offset)
{
- LosVmMapRegion *region = LOS_MemAlloc(m_aucSysMem0, sizeof(LosVmMapRegion));
+ LosVmMapRegion *region = LOS_MemAlloc(m_aucSysMem0, sizeof(LosVmMapRegion));//只是分配一个线性区结构体
if (region == NULL) {
VM_ERR("memory allocate for LosVmMapRegion failed");
return region;
}
-
+ //创建线性区的本质就是在画饼,见如下操作:
(void)memset_s(region, sizeof(LosVmMapRegion), 0, sizeof(LosVmMapRegion));
- region->range.base = vaddr;
- region->range.size = len;
- region->pgOff = offset;
- region->regionFlags = regionFlags;
- region->regionType = VM_MAP_REGION_TYPE_NONE;
- region->forkFlags = 0;
- region->shmid = -1;
+ region->range.base = vaddr; //虚拟地址作为线性区的基地址
+ region->range.size = len; //线性区大小,这是线性区构思最巧妙的地方,只要不过分,蓝图随便画。
+ region->pgOff = offset; //页标
+ region->regionFlags = regionFlags;//标识,可读/可写/可执行
+ region->regionType = VM_MAP_REGION_TYPE_NONE;//未映射
+ region->forkFlags = 0; //
+ region->shmid = -1; //默认线性区为不共享,无共享资源ID
return region;
}
-
+///通过虚拟地址查询映射的物理地址
PADDR_T LOS_PaddrQuery(VOID *vaddr)
{
PADDR_T paddr = 0;
STATUS_T status;
LosVmSpace *space = NULL;
LosArchMmu *archMmu = NULL;
-
- if (LOS_IsKernelAddress((VADDR_T)(UINTPTR)vaddr)) {
+ //先取出对应空间的mmu
+ if (LOS_IsKernelAddress((VADDR_T)(UINTPTR)vaddr)) {//是否内核空间地址
archMmu = &g_kVmSpace.archMmu;
- } else if (LOS_IsUserAddress((VADDR_T)(UINTPTR)vaddr)) {
+ } else if (LOS_IsUserAddress((VADDR_T)(UINTPTR)vaddr)) {//是否为用户空间地址
space = OsCurrProcessGet()->vmSpace;
archMmu = &space->archMmu;
- } else if (LOS_IsVmallocAddress((VADDR_T)(UINTPTR)vaddr)) {
+ } else if (LOS_IsVmallocAddress((VADDR_T)(UINTPTR)vaddr)) {//是否为分配空间地址,堆区地址
archMmu = &g_vMallocSpace.archMmu;
} else {
VM_ERR("vaddr is beyond range");
return 0;
}
- status = LOS_ArchMmuQuery(archMmu, (VADDR_T)(UINTPTR)vaddr, &paddr, 0);
+ status = LOS_ArchMmuQuery(archMmu, (VADDR_T)(UINTPTR)vaddr, &paddr, 0);//查询物理地址
if (status == LOS_OK) {
return paddr;
} else {
@@ -500,6 +574,10 @@ PADDR_T LOS_PaddrQuery(VOID *vaddr)
}
}
+/*!
+ * 这里不是真的分配物理内存,而是逻辑上画一个连续的区域,标记这个区域可以拿用,表示内存已经归你了。
+ 但真正的物理内存的占用会延迟到使用的时候才由缺页中断调入内存
+*/
LosVmMapRegion *LOS_RegionAlloc(LosVmSpace *vmSpace, VADDR_T vaddr, size_t len, UINT32 regionFlags, VM_OFFSET_T pgoff)
{
VADDR_T rstVaddr;
@@ -510,37 +588,40 @@ LosVmMapRegion *LOS_RegionAlloc(LosVmSpace *vmSpace, VADDR_T vaddr, size_t len,
* this is the most portable method of creating a new mapping. If addr is not NULL,
* then the kernel takes it as where to place the mapping;
*/
- (VOID)LOS_MuxAcquire(&vmSpace->regionMux);
- if (vaddr == 0) {
+ (VOID)LOS_MuxAcquire(&vmSpace->regionMux);//获得互斥锁
+ if (vaddr == 0) {//如果地址是0,根据线性区管理的实际情况,自动创建虚拟地址, 这是创建新映射的最便捷的方法。
rstVaddr = OsAllocRange(vmSpace, len);
} else {
- /* if it is already mmapped here, we unmmap it */
- rstVaddr = OsAllocSpecificRange(vmSpace, vaddr, len, regionFlags);
+ /* if it is already mmapped here, we unmmap it | 如果已经被映射了, 则解除映射关系*/
+ rstVaddr = OsAllocSpecificRange(vmSpace, vaddr, len, regionFlags);//创建包含指定虚拟地址的线性区, rstVaddr != vaddr || rstVaddr == vaddr
if (rstVaddr == 0) {
VM_ERR("alloc specific range va: %#x, len: %#x failed", vaddr, len);
goto OUT;
}
}
- if (rstVaddr == 0) {
+ if (rstVaddr == 0) {//没有可供映射的虚拟地址
goto OUT;
}
- newRegion = OsCreateRegion(rstVaddr, len, regionFlags, pgoff);
+ newRegion = OsCreateRegion(rstVaddr, len, regionFlags, pgoff);//创建一个线性区,指定线性区的开始地址rstVaddr ...
if (newRegion == NULL) {
goto OUT;
}
newRegion->space = vmSpace;
- isInsertSucceed = OsInsertRegion(&vmSpace->regionRbTree, newRegion);
- if (isInsertSucceed == FALSE) {
- (VOID)LOS_MemFree(m_aucSysMem0, newRegion);
+ isInsertSucceed = OsInsertRegion(&vmSpace->regionRbTree, newRegion);//插入红黑树和双循环链表中管理
+ if (isInsertSucceed == FALSE) {//插入失败
+ (VOID)LOS_MemFree(m_aucSysMem0, newRegion);//从内存池中释放
newRegion = NULL;
}
OUT:
- (VOID)LOS_MuxRelease(&vmSpace->regionMux);
+ (VOID)LOS_MuxRelease(&vmSpace->regionMux);//释放互斥锁
return newRegion;
}
-
+/*!
+ * 删除匿名页,匿名页就是内存映射页
+ * 1.解除映射关系 2.释放物理内存
+*/
STATIC VOID OsAnonPagesRemove(LosArchMmu *archMmu, VADDR_T vaddr, UINT32 count)
{
status_t status;
@@ -552,20 +633,20 @@ STATIC VOID OsAnonPagesRemove(LosArchMmu *archMmu, VADDR_T vaddr, UINT32 count)
return;
}
- while (count > 0) {
+ while (count > 0) {//一页页操作
count--;
- status = LOS_ArchMmuQuery(archMmu, vaddr, &paddr, NULL);
- if (status != LOS_OK) {
+ status = LOS_ArchMmuQuery(archMmu, vaddr, &paddr, NULL);//通过虚拟地址拿到物理地址
+ if (status != LOS_OK) {//失败,拿下一页的物理地址
vaddr += PAGE_SIZE;
continue;
}
- LOS_ArchMmuUnmap(archMmu, vaddr, 1);
+ LOS_ArchMmuUnmap(archMmu, vaddr, 1);//解除一页的映射
- page = LOS_VmPageGet(paddr);
- if (page != NULL) {
- if (!OsIsPageShared(page)) {
- LOS_PhysPageFree(page);
+ page = LOS_VmPageGet(paddr);//通过物理地址获取所在物理页框的起始地址
+ if (page != NULL) {//获取成功
+ if (!OsIsPageShared(page)) {//不是共享页,共享页会有专门的共享标签,共享本质是有无多个进程对该页的引用
+ LOS_PhysPageFree(page);//释放物理页框
}
}
vaddr += PAGE_SIZE;
@@ -609,7 +690,7 @@ STATIC VOID OsFilePagesRemove(LosVmSpace *space, LosVmMapRegion *region)
}
}
#endif
-
+/// 释放进程空间指定线性区
STATUS_T LOS_RegionFree(LosVmSpace *space, LosVmMapRegion *region)
{
if ((space == NULL) || (region == NULL)) {
@@ -619,35 +700,34 @@ STATUS_T LOS_RegionFree(LosVmSpace *space, LosVmMapRegion *region)
(VOID)LOS_MuxAcquire(&space->regionMux);
-#ifdef LOSCFG_FS_VFS
- if (LOS_IsRegionFileValid(region)) {
- OsFilePagesRemove(space, region);
+#ifdef LOSCFG_FS_VFS //文件开关
+ if (LOS_IsRegionFileValid(region)) {//是否为文件线性区
+ OsFilePagesRemove(space, region);//删除文件页
VnodeHold();
region->unTypeData.rf.vnode->useCount--;
VnodeDrop();
} else
#endif
-
-#ifdef LOSCFG_KERNEL_SHM
- if (OsIsShmRegion(region)) {
- OsShmRegionFree(space, region);
+#ifdef LOSCFG_KERNEL_SHM //共享内存开关
+ if (OsIsShmRegion(region)) { //是否为共享内存线性区
+ OsShmRegionFree(space, region);//释放共享线性区
} else if (LOS_IsRegionTypeDev(region)) {
#else
- if (LOS_IsRegionTypeDev(region)) {
+ if (LOS_IsRegionTypeDev(region)) {//如果是设备线性区
#endif
- OsDevPagesRemove(&space->archMmu, region->range.base, region->range.size >> PAGE_SHIFT);
+ OsDevPagesRemove(&space->archMmu, region->range.base, region->range.size >> PAGE_SHIFT);//删除映射设备
} else {
- OsAnonPagesRemove(&space->archMmu, region->range.base, region->range.size >> PAGE_SHIFT);
+ OsAnonPagesRemove(&space->archMmu, region->range.base, region->range.size >> PAGE_SHIFT);//删除匿名映射
}
/* remove it from space */
- LOS_RbDelNode(&space->regionRbTree, ®ion->rbNode);
+ LOS_RbDelNode(&space->regionRbTree, ®ion->rbNode);//从红黑树中删除线性区
/* free it */
- LOS_MemFree(m_aucSysMem0, region);
+ LOS_MemFree(m_aucSysMem0, region);//释放线性区结构体占用的内存
(VOID)LOS_MuxRelease(&space->regionMux);
return LOS_OK;
}
-
+/// 复制线性区
LosVmMapRegion *OsVmRegionDup(LosVmSpace *space, LosVmMapRegion *oldRegion, VADDR_T vaddr, size_t size)
{
LosVmMapRegion *newRegion = NULL;
@@ -655,31 +735,31 @@ LosVmMapRegion *OsVmRegionDup(LosVmSpace *space, LosVmMapRegion *oldRegion, VADD
(VOID)LOS_MuxAcquire(&space->regionMux);
regionFlags = oldRegion->regionFlags;
- if (vaddr == 0) {
- regionFlags &= ~(VM_MAP_REGION_FLAG_FIXED | VM_MAP_REGION_FLAG_FIXED_NOREPLACE);
+ if (vaddr == 0) {//不指定地址
+ regionFlags &= ~(VM_MAP_REGION_FLAG_FIXED | VM_MAP_REGION_FLAG_FIXED_NOREPLACE); //撕掉两个标签
} else {
- regionFlags |= VM_MAP_REGION_FLAG_FIXED;
+ regionFlags |= VM_MAP_REGION_FLAG_FIXED; //贴上填满线性区标签
}
- newRegion = LOS_RegionAlloc(space, vaddr, size, regionFlags, oldRegion->pgOff);
+ newRegion = LOS_RegionAlloc(space, vaddr, size, regionFlags, oldRegion->pgOff); //分配一个线性区
if (newRegion == NULL) {
VM_ERR("LOS_RegionAlloc failed");
goto REGIONDUPOUT;
}
- newRegion->regionType = oldRegion->regionType;
+ newRegion->regionType = oldRegion->regionType;//复制线性区类型(文件,设备,匿名)
#ifdef LOSCFG_KERNEL_SHM
- if (OsIsShmRegion(oldRegion)) {
- newRegion->shmid = oldRegion->shmid;
+ if (OsIsShmRegion(oldRegion)) {//如果是共享内存
+ newRegion->shmid = oldRegion->shmid;//复制共享ID
}
#endif
-#ifdef LOSCFG_FS_VFS
- if (LOS_IsRegionTypeFile(oldRegion)) {
- newRegion->unTypeData.rf.vmFOps = oldRegion->unTypeData.rf.vmFOps;
- newRegion->unTypeData.rf.vnode = oldRegion->unTypeData.rf.vnode;
- newRegion->unTypeData.rf.f_oflags = oldRegion->unTypeData.rf.f_oflags;
+#ifdef LOSCFG_FS_VFS //文件开关
+ if (LOS_IsRegionTypeFile(oldRegion)) {//如果是文件线性区
+ newRegion->unTypeData.rf.vmFOps = oldRegion->unTypeData.rf.vmFOps; //文件操作接口
+ newRegion->unTypeData.rf.vnode = oldRegion->unTypeData.rf.vnode; //文件索引节点
+ newRegion->unTypeData.rf.f_oflags = oldRegion->unTypeData.rf.f_oflags;//读写标签
VnodeHold();
- newRegion->unTypeData.rf.vnode->useCount++;
+ newRegion->unTypeData.rf.vnode->useCount++;//索引节点使用数增加
VnodeDrop();
}
#endif
@@ -688,14 +768,14 @@ REGIONDUPOUT:
(VOID)LOS_MuxRelease(&space->regionMux);
return newRegion;
}
-
+/// 劈开线性区
STATIC LosVmMapRegion *OsVmRegionSplit(LosVmMapRegion *oldRegion, VADDR_T newRegionStart)
{
LosVmMapRegion *newRegion = NULL;
LosVmSpace *space = oldRegion->space;
- size_t size = LOS_RegionSize(newRegionStart, LOS_RegionEndAddr(oldRegion));
+ size_t size = LOS_RegionSize(newRegionStart, LOS_RegionEndAddr(oldRegion));//获取线性区大小
- oldRegion->range.size = LOS_RegionSize(oldRegion->range.base, newRegionStart - 1);
+ oldRegion->range.size = LOS_RegionSize(oldRegion->range.base, newRegionStart - 1);//获取旧线性区大小
if (oldRegion->range.size == 0) {
LOS_RbDelNode(&space->regionRbTree, &oldRegion->rbNode);
}
@@ -710,14 +790,14 @@ STATIC LosVmMapRegion *OsVmRegionSplit(LosVmMapRegion *oldRegion, VADDR_T newReg
#endif
return newRegion;
}
-
+///对线性区进行调整
STATUS_T OsVmRegionAdjust(LosVmSpace *space, VADDR_T newRegionStart, size_t size)
{
LosVmMapRegion *region = NULL;
VADDR_T nextRegionBase = newRegionStart + size;
LosVmMapRegion *newRegion = NULL;
- region = LOS_RegionFind(space, newRegionStart);
+ region = LOS_RegionFind(space, newRegionStart);//先找到线性区
if ((region != NULL) && (newRegionStart > region->range.base)) {
newRegion = OsVmRegionSplit(region, newRegionStart);
if (newRegion == NULL) {
@@ -737,7 +817,7 @@ STATUS_T OsVmRegionAdjust(LosVmSpace *space, VADDR_T newRegionStart, size_t size
return LOS_OK;
}
-
+///删除线性区
STATUS_T OsRegionsRemove(LosVmSpace *space, VADDR_T regionBase, size_t size)
{
STATUS_T status;
@@ -748,12 +828,12 @@ STATUS_T OsRegionsRemove(LosVmSpace *space, VADDR_T regionBase, size_t size)
(VOID)LOS_MuxAcquire(&space->regionMux);
- status = OsVmRegionAdjust(space, regionBase, size);
+ status = OsVmRegionAdjust(space, regionBase, size);//线性区调整
if (status != LOS_OK) {
goto ERR_REGION_SPLIT;
}
- RB_SCAN_SAFE(&space->regionRbTree, pstRbNodeTemp, pstRbNodeNext)
+ RB_SCAN_SAFE(&space->regionRbTree, pstRbNodeTemp, pstRbNodeNext)//扫描虚拟空间内的线性区
regionTemp = (LosVmMapRegion *)pstRbNodeTemp;
if (regionTemp->range.base > regionEnd) {
break;
@@ -772,7 +852,7 @@ ERR_REGION_SPLIT:
(VOID)LOS_MuxRelease(&space->regionMux);
return status;
}
-
+///根据指定参数范围[addr,addr+len] 释放用户空间中堆区所占用的物理内存
INT32 OsUserHeapFree(LosVmSpace *vmSpace, VADDR_T addr, size_t len)
{
LosVmMapRegion *vmRegion = NULL;
@@ -781,25 +861,25 @@ INT32 OsUserHeapFree(LosVmSpace *vmSpace, VADDR_T addr, size_t len)
VADDR_T vaddr;
STATUS_T ret;
- if (vmSpace == LOS_GetKVmSpace() || vmSpace->heap == NULL) {
+ if (vmSpace == LOS_GetKVmSpace() || vmSpace->heap == NULL) {//虚拟空间堆区必须在非内核空间
return -1;
}
- vmRegion = LOS_RegionFind(vmSpace, addr);
+ vmRegion = LOS_RegionFind(vmSpace, addr);//通过参数虚拟地址红黑树找到线性区,线性区范围内包含了参数虚拟地址
if (vmRegion == NULL) {
return -1;
}
- if (vmRegion == vmSpace->heap) {
+ if (vmRegion == vmSpace->heap) {//地址所在的线性区为堆区
vaddr = addr;
- while (len > 0) {
- if (LOS_ArchMmuQuery(&vmSpace->archMmu, vaddr, &paddr, 0) == LOS_OK) {
- ret = LOS_ArchMmuUnmap(&vmSpace->archMmu, vaddr, 1);
+ while (len > 0) {//参数0 代表不获取 flags 信息
+ if (LOS_ArchMmuQuery(&vmSpace->archMmu, vaddr, &paddr, 0) == LOS_OK) {//通过虚拟地址查到物理地址
+ ret = LOS_ArchMmuUnmap(&vmSpace->archMmu, vaddr, 1);//解除映射关系以页为单位,这里解除1页
if (ret <= 0) {
VM_ERR("unmap failed, ret = %d", ret);
}
- vmPage = LOS_VmPageGet(paddr);
- LOS_PhysPageFree(vmPage);
+ vmPage = LOS_VmPageGet(paddr);//获取物理页面信息
+ LOS_PhysPageFree(vmPage);//释放页
}
vaddr += PAGE_SIZE;
len -= PAGE_SIZE;
@@ -809,7 +889,7 @@ INT32 OsUserHeapFree(LosVmSpace *vmSpace, VADDR_T addr, size_t len)
return -1;
}
-
+///线性区是否支持扩展
STATUS_T OsIsRegionCanExpand(LosVmSpace *space, LosVmMapRegion *region, size_t size)
{
LosVmMapRegion *nextRegion = NULL;
@@ -826,13 +906,13 @@ STATUS_T OsIsRegionCanExpand(LosVmSpace *space, LosVmMapRegion *region, size_t s
return LOS_NOK;
}
-
+///解除一定范围的虚拟地址的映射关系
STATUS_T OsUnMMap(LosVmSpace *space, VADDR_T addr, size_t size)
{
size = LOS_Align(size, PAGE_SIZE);
addr = LOS_Align(addr, PAGE_SIZE);
(VOID)LOS_MuxAcquire(&space->regionMux);
- STATUS_T status = OsRegionsRemove(space, addr, size);
+ STATUS_T status = OsRegionsRemove(space, addr, size);//删除线性区
if (status != LOS_OK) {
status = -EINVAL;
VM_ERR("region_split failed");
@@ -843,28 +923,28 @@ ERR_REGION_SPLIT:
(VOID)LOS_MuxRelease(&space->regionMux);
return status;
}
-
+/// 释放所有线性区
STATIC VOID OsVmSpaceAllRegionFree(LosVmSpace *space)
{
LosRbNode *pstRbNode = NULL;
LosRbNode *pstRbNodeNext = NULL;
/* free all of the regions */
- RB_SCAN_SAFE(&space->regionRbTree, pstRbNode, pstRbNodeNext)
- LosVmMapRegion *region = (LosVmMapRegion *)pstRbNode;
+ RB_SCAN_SAFE(&space->regionRbTree, pstRbNode, pstRbNodeNext) //遍历红黑树
+ LosVmMapRegion *region = (LosVmMapRegion *)pstRbNode;//拿到线性区
if (region->range.size == 0) {
VM_ERR("space free, region: %#x flags: %#x, base:%#x, size: %#x",
region, region->regionFlags, region->range.base, region->range.size);
}
- STATUS_T ret = LOS_RegionFree(space, region);
+ STATUS_T ret = LOS_RegionFree(space, region);//释放线性区
if (ret != LOS_OK) {
VM_ERR("free region error, space %p, region %p", space, region);
}
- RB_SCAN_SAFE_END(&space->regionRbTree, pstRbNode, pstRbNodeNext)
+ RB_SCAN_SAFE_END(&space->regionRbTree, pstRbNode, pstRbNodeNext)//要好好研究下这几个宏,有点意思
return;
}
-
+/// 释放虚拟空间
STATUS_T OsVmSpaceRegionFree(LosVmSpace *space)
{
if (space == NULL) {
@@ -882,22 +962,21 @@ STATUS_T OsVmSpaceRegionFree(LosVmSpace *space)
return LOS_OK;
}
-
+///释放虚拟空间,注意内核空间不能被释放掉,永驻内存
STATUS_T LOS_VmSpaceFree(LosVmSpace *space)
{
if (space == NULL) {
return LOS_ERRNO_VM_INVALID_ARGS;
}
- if (space == &g_kVmSpace) {
+ if (space == &g_kVmSpace) {//不能释放内核虚拟空间,内核空间常驻内存
VM_ERR("try to free kernel aspace, not allowed");
return LOS_OK;
}
/* pop it out of the global aspace list */
(VOID)LOS_MuxAcquire(&space->regionMux);
-
- LOS_ListDelete(&space->node);
+ LOS_ListDelete(&space->node);//从g_vmSpaceList链表里删除,g_vmSpaceList记录了所有空间节点。
OsVmSpaceAllRegionFree(space);
@@ -920,7 +999,7 @@ STATUS_T LOS_VmSpaceFree(LosVmSpace *space)
LOS_MemFree(m_aucSysMem0, space);
return LOS_OK;
}
-
+///虚拟地址和size是否在空间
BOOL LOS_IsRangeInSpace(const LosVmSpace *space, VADDR_T vaddr, size_t size)
{
/* is the starting address within the address space */
@@ -940,7 +1019,7 @@ BOOL LOS_IsRangeInSpace(const LosVmSpace *space, VADDR_T vaddr, size_t size)
}
return TRUE;
}
-
+/// 在进程空间中预留一块内存空间
STATUS_T LOS_VmSpaceReserve(LosVmSpace *space, size_t size, VADDR_T vaddr)
{
UINT32 regionFlags = 0;
@@ -961,7 +1040,7 @@ STATUS_T LOS_VmSpaceReserve(LosVmSpace *space, size_t size, VADDR_T vaddr)
return region ? LOS_OK : LOS_ERRNO_VM_NO_MEMORY;
}
-
+///实现从虚拟地址到物理地址的映射,将指定长度的物理地址区间与虚拟地址区间做映射,需提前申请物理地址区间
STATUS_T LOS_VaddrToPaddrMmap(LosVmSpace *space, VADDR_T vaddr, PADDR_T paddr, size_t len, UINT32 flags)
{
STATUS_T ret;
@@ -976,19 +1055,19 @@ STATUS_T LOS_VaddrToPaddrMmap(LosVmSpace *space, VADDR_T vaddr, PADDR_T paddr, s
}
if (space == NULL) {
- space = OsCurrProcessGet()->vmSpace;
+ space = OsCurrProcessGet()->vmSpace;//获取当前进程的空间
}
- region = LOS_RegionFind(space, vaddr);
- if (region != NULL) {
+ region = LOS_RegionFind(space, vaddr);//通过虚拟地址查找线性区
+ if (region != NULL) {//已经被映射过了,失败返回
VM_ERR("vaddr : 0x%x already used!", vaddr);
return LOS_ERRNO_VM_BUSY;
}
- region = LOS_RegionAlloc(space, vaddr, len, flags, 0);
+ region = LOS_RegionAlloc(space, vaddr, len, flags, 0);//通过虚拟地址 创建一个region
if (region == NULL) {
VM_ERR("failed");
- return LOS_ERRNO_VM_NO_MEMORY;
+ return LOS_ERRNO_VM_NO_MEMORY;//内存不够
}
while (len > 0) {
@@ -998,9 +1077,9 @@ STATUS_T LOS_VaddrToPaddrMmap(LosVmSpace *space, VADDR_T vaddr, PADDR_T paddr, s
VM_ERR("Page is NULL");
return LOS_ERRNO_VM_NOT_VALID;
}
- LOS_AtomicInc(&vmPage->refCounts);
+ LOS_AtomicInc(&vmPage->refCounts);//ref自增
- ret = LOS_ArchMmuMap(&space->archMmu, vaddr, paddr, 1, region->regionFlags);
+ ret = LOS_ArchMmuMap(&space->archMmu, vaddr, paddr, 1, region->regionFlags);//mmu map
if (ret <= 0) {
VM_ERR("LOS_ArchMmuMap failed: %d", ret);
LOS_RegionFree(space, region);
@@ -1014,9 +1093,10 @@ STATUS_T LOS_VaddrToPaddrMmap(LosVmSpace *space, VADDR_T vaddr, PADDR_T paddr, s
return LOS_OK;
}
+//对外接口|申请内核堆空间内存
VOID *LOS_VMalloc(size_t size)
{
- LosVmSpace *space = &g_vMallocSpace;
+ LosVmSpace *space = &g_vMallocSpace;//从内核动态空间申请
LosVmMapRegion *region = NULL;
size_t sizeCount;
size_t count;
@@ -1025,48 +1105,48 @@ VOID *LOS_VMalloc(size_t size)
PADDR_T pa;
STATUS_T ret;
- size = LOS_Align(size, PAGE_SIZE);
+ size = LOS_Align(size, PAGE_SIZE);//
if ((size == 0) || (size > space->size)) {
return NULL;
}
- sizeCount = size >> PAGE_SHIFT;
+ sizeCount = size >> PAGE_SHIFT;//按页申请所以需右移12位
LOS_DL_LIST_HEAD(pageList);
- (VOID)LOS_MuxAcquire(&space->regionMux);
+ (VOID)LOS_MuxAcquire(&space->regionMux);//获得互斥锁
- count = LOS_PhysPagesAlloc(sizeCount, &pageList);
+ count = LOS_PhysPagesAlloc(sizeCount, &pageList);//一页一页申请,并从pageList尾部插入
if (count < sizeCount) {
VM_ERR("failed to allocate enough pages (ask %zu, got %zu)", sizeCount, count);
goto ERROR;
}
- /* allocate a region and put it in the aspace list */
- region = LOS_RegionAlloc(space, 0, size, VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE, 0);
+ /* allocate a region and put it in the aspace list *///分配一个可读写的线性区,并挂在space
+ region = LOS_RegionAlloc(space, 0, size, VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE, 0);//注意第二个参数是 vaddr = 0 !!!
if (region == NULL) {
VM_ERR("alloc region failed, size = %x", size);
goto ERROR;
}
- va = region->range.base;
- while ((vmPage = LOS_ListRemoveHeadType(&pageList, LosVmPage, node))) {
- pa = vmPage->physAddr;
- LOS_AtomicInc(&vmPage->refCounts);
- ret = LOS_ArchMmuMap(&space->archMmu, va, pa, 1, region->regionFlags);
+ va = region->range.base;//va 该区范围基地址为虚拟地址的开始位置,理解va怎么来的是理解线性地址的关键!
+ while ((vmPage = LOS_ListRemoveHeadType(&pageList, LosVmPage, node))) {//从pageList循环拿page
+ pa = vmPage->physAddr;//获取page物理地址,因上面是通过LOS_PhysPagesAlloc分配
+ LOS_AtomicInc(&vmPage->refCounts);//refCounts 自增
+ ret = LOS_ArchMmuMap(&space->archMmu, va, pa, 1, region->regionFlags);//一页一页的map
if (ret != 1) {
VM_ERR("LOS_ArchMmuMap failed!, err;%d", ret);
}
- va += PAGE_SIZE;
- }
+ va += PAGE_SIZE;//一页映射完成,进入下一页
+ }//va 注意 region的虚拟地址页是连续的,但物理页可以不连续! 很重要!!!
- (VOID)LOS_MuxRelease(&space->regionMux);
- return (VOID *)(UINTPTR)region->range.base;
+ (VOID)LOS_MuxRelease(&space->regionMux);//释放互斥锁
+ return (VOID *)(UINTPTR)region->range.base;//返回虚拟基地址供应用使用
ERROR:
- (VOID)LOS_PhysPagesFree(&pageList);
- (VOID)LOS_MuxRelease(&space->regionMux);
+ (VOID)LOS_PhysPagesFree(&pageList);//释放物理内存页
+ (VOID)LOS_MuxRelease(&space->regionMux);//释放互斥锁
return NULL;
}
-
+///对外接口|释放内核堆空间内存
VOID LOS_VFree(const VOID *addr)
{
LosVmSpace *space = &g_vMallocSpace;
@@ -1080,13 +1160,13 @@ VOID LOS_VFree(const VOID *addr)
(VOID)LOS_MuxAcquire(&space->regionMux);
- region = LOS_RegionFind(space, (VADDR_T)(UINTPTR)addr);
+ region = LOS_RegionFind(space, (VADDR_T)(UINTPTR)addr);//先找到线性区
if (region == NULL) {
VM_ERR("find region failed");
goto DONE;
}
- ret = LOS_RegionFree(space, region);
+ ret = LOS_RegionFree(space, region);//释放线性区
if (ret) {
VM_ERR("free region failed, ret = %d", ret);
}
@@ -1097,10 +1177,9 @@ DONE:
LosMux *OsGVmSpaceMuxGet(VOID)
{
- return &g_vmSpaceListMux;
+ return &g_vmSpaceListMux;
}
-
-STATIC INLINE BOOL OsMemLargeAlloc(UINT32 size)
+STATIC INLINE BOOL OsMemLargeAlloc(UINT32 size)//是不是分配浪费大于1K的内存
{
if (g_kHeapInited == FALSE) {
return FALSE;
@@ -1122,28 +1201,28 @@ PADDR_T LOS_PaddrQuery(VOID *vaddr)
return (PADDR_T)VMM_TO_DMA_ADDR((VADDR_T)vaddr);
}
#endif
-
+///内核空间内存分配,申请小于16KiB的内存则通过堆内存池获取,否则申请多个连续物理页
VOID *LOS_KernelMalloc(UINT32 size)
{
VOID *ptr = NULL;
-
+ //从本函数可知,内核空间的分配有两种方式
#ifdef LOSCFG_KERNEL_VM
- if (OsMemLargeAlloc(size)) {
- ptr = LOS_PhysPagesAllocContiguous(ROUNDUP(size, PAGE_SIZE) >> PAGE_SHIFT);
+ if (OsMemLargeAlloc(size)) {//是不是分配浪费小于1K的内存
+ ptr = LOS_PhysPagesAllocContiguous(ROUNDUP(size, PAGE_SIZE) >> PAGE_SHIFT);//分配连续的物理内存页
} else
#endif
{
- ptr = LOS_MemAlloc(OS_SYS_MEM_ADDR, size);
+ ptr = LOS_MemAlloc(OS_SYS_MEM_ADDR, size);//从内存池分配
}
return ptr;
}
-
+/// 申请具有对齐属性的内存,申请规则:申请小于16KiB的内存则通过堆内存池获取,否则申请多个连续物理页
VOID *LOS_KernelMallocAlign(UINT32 size, UINT32 boundary)
{
VOID *ptr = NULL;
-#ifdef LOSCFG_KERNEL_VM
+#ifdef LOSCFG_KERNEL_VM
if (OsMemLargeAlloc(size) && IS_ALIGNED(PAGE_SIZE, boundary)) {
ptr = LOS_PhysPagesAllocContiguous(ROUNDUP(size, PAGE_SIZE) >> PAGE_SHIFT);
} else
@@ -1154,7 +1233,7 @@ VOID *LOS_KernelMallocAlign(UINT32 size, UINT32 boundary)
return ptr;
}
-
+/// 重新分配内核内存空间
VOID *LOS_KernelRealloc(VOID *ptr, UINT32 size)
{
VOID *tmpPtr = NULL;
@@ -1162,6 +1241,7 @@ VOID *LOS_KernelRealloc(VOID *ptr, UINT32 size)
#ifdef LOSCFG_KERNEL_VM
LosVmPage *page = NULL;
errno_t ret;
+
if (ptr == NULL) {
tmpPtr = LOS_KernelMalloc(size);
} else {
@@ -1198,7 +1278,7 @@ VOID LOS_KernelFree(VOID *ptr)
{
#ifdef LOSCFG_KERNEL_VM
UINT32 ret;
- if (OsMemIsHeapNode(ptr) == FALSE) {
+ if (OsMemIsHeapNode(ptr) == FALSE) {//判断地址是否在堆区
ret = OsMemLargeNodeFree(ptr);
if (ret != LOS_OK) {
VM_ERR("KernelFree %p failed", ptr);
@@ -1207,6 +1287,8 @@ VOID LOS_KernelFree(VOID *ptr)
} else
#endif
{
- (VOID)LOS_MemFree(OS_SYS_MEM_ADDR, ptr);
+ (VOID)LOS_MemFree(OS_SYS_MEM_ADDR, ptr);//从内存池中释放
}
}
+
+
diff --git a/src/kernel_liteos_a/kernel/base/vm/los_vm_page.c b/src/kernel_liteos_a/kernel/base/vm/los_vm_page.c
index 650fd0b0..bb134206 100644
--- a/src/kernel_liteos_a/kernel/base/vm/los_vm_page.c
+++ b/src/kernel_liteos_a/kernel/base/vm/los_vm_page.c
@@ -40,34 +40,37 @@
#ifdef LOSCFG_KERNEL_VM
-LosVmPage *g_vmPageArray = NULL;
-size_t g_vmPageArraySize;
-
+LosVmPage *g_vmPageArray = NULL;//物理页框数组
+size_t g_vmPageArraySize;//物理页框大小
+//页框初始化
STATIC VOID OsVmPageInit(LosVmPage *page, paddr_t pa, UINT8 segID)
{
- LOS_ListInit(&page->node);
- page->flags = FILE_PAGE_FREE;
- LOS_AtomicSet(&page->refCounts, 0);
- page->physAddr = pa;
- page->segID = segID;
- page->order = VM_LIST_ORDER_MAX;
+ LOS_ListInit(&page->node); //页节点初始化
+ page->flags = FILE_PAGE_FREE; //页标签,初始为空闲页
+ LOS_AtomicSet(&page->refCounts, 0); //引用次数0
+ page->physAddr = pa; //物理地址
+ page->segID = segID; //物理地址使用段管理,段ID
+ page->order = VM_LIST_ORDER_MAX; //初始化值,不属于任何块组
page->nPages = 0;
#ifdef LOSCFG_PAGE_TABLE_FINE_LOCK
LOS_SpinInit(&page->lock);
#endif
}
-
+///伙伴算法初始化
STATIC INLINE VOID OsVmPageOrderListInit(LosVmPage *page, size_t nPages)
-{
- OsVmPhysPagesFreeContiguous(page, nPages);
+{//@note_why 此时所有页面 page->order = VM_LIST_ORDER_MAX,能挂入伙伴算法的链表吗?
+ OsVmPhysPagesFreeContiguous(page, nPages);//释放连续的物理页框
}
-
#define VMPAGEINIT(page, pa, segID) do { \
OsVmPageInit(page, pa, segID); \
(page)++; \
(pa) += PAGE_SIZE; \
} while (0)
+/*!
+ 完成对物理内存整体初始化,本函数一定运行在实模式下
+ 1.申请大块内存g_vmPageArray存放LosVmPage,按4K一页划分物理内存存放在数组中.
+*/
VOID OsVmPageStartup(VOID)
{
struct VmPhysSeg *seg = NULL;
@@ -76,7 +79,7 @@ VOID OsVmPageStartup(VOID)
UINT32 nPage;
INT32 segID;
- OsVmPhysAreaSizeAdjust(ROUNDUP((g_vmBootMemBase - KERNEL_ASPACE_BASE), PAGE_SIZE));
+ OsVmPhysAreaSizeAdjust(ROUNDUP((g_vmBootMemBase - KERNEL_ASPACE_BASE), PAGE_SIZE));//校正 g_physArea size
/*
* Pages getting from OsVmPhysPageNumGet() interface here contain the memory
@@ -85,20 +88,20 @@ VOID OsVmPageStartup(VOID)
*/
UINT32 pageNum = OsVmPhysPageNumGet();
nPage = pageNum * PAGE_SIZE / (sizeof(LosVmPage) + PAGE_SIZE);
- g_vmPageArraySize = nPage * sizeof(LosVmPage);
- g_vmPageArray = (LosVmPage *)OsVmBootMemAlloc(g_vmPageArraySize);
+ g_vmPageArraySize = nPage * sizeof(LosVmPage);//页表总大小
+ g_vmPageArray = (LosVmPage *)OsVmBootMemAlloc(g_vmPageArraySize);//实模式下申请内存,此时还没有初始化MMU
- OsVmPhysAreaSizeAdjust(ROUNDUP(g_vmPageArraySize, PAGE_SIZE));
+ OsVmPhysAreaSizeAdjust(ROUNDUP(g_vmPageArraySize, PAGE_SIZE));//
- OsVmPhysSegAdd();
- OsVmPhysInit();
+ OsVmPhysSegAdd();// 完成对段的初始化
+ OsVmPhysInit();// 加入空闲链表和设置置换算法,LRU(最近最久未使用)算法
#ifdef LOSCFG_KERNEL_PLIMITS
OsMemLimitSetLimit(pageNum * PAGE_SIZE);
#endif
- for (segID = 0; segID < g_vmPhysSegNum; segID++) {
+ for (segID = 0; segID < g_vmPhysSegNum; segID++) {//遍历物理段,将段切成一页一页
seg = &g_vmPhysSeg[segID];
- nPage = seg->size >> PAGE_SHIFT;
+ nPage = seg->size >> PAGE_SHIFT;//本段总页数
UINT32 count = nPage >> 3; /* 3: 2 ^ 3, nPage / 8, cycle count */
UINT32 left = nPage & 0x7; /* 0x7: nPage % 8, left page */
@@ -116,17 +119,17 @@ VOID OsVmPageStartup(VOID)
for (; left > 0; left--) {
VMPAGEINIT(page, pa, segID);
}
- OsVmPageOrderListInit(seg->pageBase, nPage);
+ OsVmPageOrderListInit(seg->pageBase, nPage);//伙伴算法初始化,将所有页加入空闲链表供分配
}
}
-
+///通过物理地址获取页框
LosVmPage *LOS_VmPageGet(PADDR_T paddr)
{
INT32 segID;
LosVmPage *page = NULL;
- for (segID = 0; segID < g_vmPhysSegNum; segID++) {
- page = OsVmPhysToPage(paddr, segID);
+ for (segID = 0; segID < g_vmPhysSegNum; segID++) {//物理内存采用段页管理
+ page = OsVmPhysToPage(paddr, segID);//通过物理地址和段ID找出物理页框
if (page != NULL) {
break;
}
diff --git a/src/kernel_liteos_a/kernel/base/vm/los_vm_phys.c b/src/kernel_liteos_a/kernel/base/vm/los_vm_phys.c
index 2c581456..837b41b0 100644
--- a/src/kernel_liteos_a/kernel/base/vm/los_vm_phys.c
+++ b/src/kernel_liteos_a/kernel/base/vm/los_vm_phys.c
@@ -1,3 +1,38 @@
+/*!
+ * @file los_vm_phys.c
+ * @brief 物理内存管理 - 段页式管理
+ * @link physical http://weharmonyos.com/openharmony/zh-cn/device-dev/kernel/kernel-small-basic-memory-physical.html @endlink
+ @verbatim
+基本概念
+ 物理内存是计算机上最重要的资源之一,指的是实际的内存设备提供的、可以通过CPU总线直接进行寻址的内存空间,
+ 其主要作用是为操作系统及程序提供临时存储空间。LiteOS-A内核管理物理内存是通过分页实现的,除了内核堆占用的一部分内存外,
+ 其余可用内存均以4KiB为单位划分成页帧,内存分配和内存回收便是以页帧为单位进行操作。内核采用伙伴算法管理空闲页面,
+ 可以降低一定的内存碎片率,提高内存分配和释放的效率,但是一个很小的块往往也会阻塞一个大块的合并,导致不能分配较大的内存块。
+运行机制
+ LiteOS-A内核的物理内存使用分布视图,主要由内核镜像、内核堆及物理页组成。内核堆部分见堆内存管理一节。
+ -----------------------------------------------------
+
+ kernel.bin | heap | page frames
+ (内核镜像) | (内核堆) | (物理页框)
+ -----------------------------------------------------
+ 伙伴算法把所有空闲页帧分成9个内存块组,每组中内存块包含2的幂次方个页帧,例如:第0组的内存块包含2的0次方个页帧,
+ 即1个页帧;第8组的内存块包含2的8次方个页帧,即256个页帧。相同大小的内存块挂在同一个链表上进行管理。
+
+申请内存
+ 系统申请20KiB内存,按一页帧4K算,即5个页帧时,9个内存块组中索引为2的链表挂着一块大小为8个页帧的内存块满足要求,分配出20KiB内存后还剩余12KiB内存,
+ 即3个页帧,将3个页帧分成2的幂次方之和,即0跟1,尝试查找伙伴进行合并。2个页帧的内存块没有伙伴则直接插到索引为1的链表上,
+ 继续查找1个页帧的内存块是否有伙伴,索引为0的链表上此时有1个,如果两个内存块地址连续则进行合并,并将内存块挂到索引为0的链表上,否则不做处理。
+释放内存
+ 系统释放12KiB内存,即3个页帧,将3个页帧分成2的幂次方之和,即2跟1,尝试查找伙伴进行合并,索引为1的链表上有1个内存块,
+ 若地址连续则合并,并将合并后的内存块挂到索引为2的链表上,索引为0的链表上此时也有1个,如果地址连续则进行合并,
+ 并将合并后的内存块挂到索引为1的链表上,此时继续判断是否有伙伴,重复上述操作。
+ @endverbatim
+ * @image html https://gitee.com/weharmonyos/resources/raw/master/17/malloc_phy.png
+ * @image html https://gitee.com/weharmonyos/resources/raw/master/17/free_phy.png
+ * @version
+ * @author weharmonyos.com | 鸿蒙研究站 | 每天死磕一点点
+ * @date 2021-11-25
+ */
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
@@ -41,36 +76,36 @@
#define ONE_PAGE 1
-/* Physical memory area array */
-STATIC struct VmPhysArea g_physArea[] = {
+/* Physical memory area array | 物理内存区数组 */
+STATIC struct VmPhysArea g_physArea[] = {///< 这里只有一个区域,即只生成一个段
{
- .start = SYS_MEM_BASE,
- .size = SYS_MEM_SIZE_DEFAULT,
+ .start = SYS_MEM_BASE, //整个物理内存基地址,#define SYS_MEM_BASE DDR_MEM_ADDR , 0x80000000
+ .size = SYS_MEM_SIZE_DEFAULT,//整个物理内存总大小 0x07f00000
},
};
-struct VmPhysSeg g_vmPhysSeg[VM_PHYS_SEG_MAX];
-INT32 g_vmPhysSegNum = 0;
-
+struct VmPhysSeg g_vmPhysSeg[VM_PHYS_SEG_MAX]; ///< 最大32段
+INT32 g_vmPhysSegNum = 0; ///< 段数
+/// 获取段数组,全局变量,变量放在 .bbs 区
LosVmPhysSeg *OsGVmPhysSegGet(void)
{
return g_vmPhysSeg;
}
-
+/// 初始化Lru置换链表
STATIC VOID OsVmPhysLruInit(struct VmPhysSeg *seg)
{
INT32 i;
UINT32 intSave;
- LOS_SpinInit(&seg->lruLock);
+ LOS_SpinInit(&seg->lruLock);//初始化自旋锁,自旋锁用于CPU多核同步
LOS_SpinLockSave(&seg->lruLock, &intSave);
- for (i = 0; i < VM_NR_LRU_LISTS; i++) {
- seg->lruSize[i] = 0;
- LOS_ListInit(&seg->lruList[i]);
+ for (i = 0; i < VM_NR_LRU_LISTS; i++) { //五个双循环链表
+ seg->lruSize[i] = 0; //记录链表节点数
+ LOS_ListInit(&seg->lruList[i]); //初始化LRU链表
}
LOS_SpinUnlockRestore(&seg->lruLock, intSave);
}
-
+/// 创建物理段,由区划分转成段管理
STATIC INT32 OsVmPhysSegCreate(paddr_t start, size_t size)
{
struct VmPhysSeg *seg = NULL;
@@ -79,8 +114,8 @@ STATIC INT32 OsVmPhysSegCreate(paddr_t start, size_t size)
return -1;
}
- seg = &g_vmPhysSeg[g_vmPhysSegNum++];
- for (; (seg > g_vmPhysSeg) && ((seg - 1)->start > (start + size)); seg--) {
+ seg = &g_vmPhysSeg[g_vmPhysSegNum++];//拿到一段数据
+ for (; (seg > g_vmPhysSeg) && ((seg - 1)->start > (start + size)); seg--) {//定位到合适的段
*seg = *(seg - 1);
}
seg->start = start;
@@ -88,21 +123,21 @@ STATIC INT32 OsVmPhysSegCreate(paddr_t start, size_t size)
return 0;
}
-
+/// 添加物理段
VOID OsVmPhysSegAdd(VOID)
{
INT32 i, ret;
LOS_ASSERT(g_vmPhysSegNum < VM_PHYS_SEG_MAX);
-
- for (i = 0; i < (sizeof(g_physArea) / sizeof(g_physArea[0])); i++) {
- ret = OsVmPhysSegCreate(g_physArea[i].start, g_physArea[i].size);
+
+ for (i = 0; i < (sizeof(g_physArea) / sizeof(g_physArea[0])); i++) {//遍历g_physArea数组
+ ret = OsVmPhysSegCreate(g_physArea[i].start, g_physArea[i].size);//由区划分转成段管理
if (ret != 0) {
VM_ERR("create phys seg failed");
}
}
}
-
+/// 段区域大小调整
VOID OsVmPhysAreaSizeAdjust(size_t size)
{
/*
@@ -113,35 +148,36 @@ VOID OsVmPhysAreaSizeAdjust(size_t size)
g_physArea[0].size -= size;
}
+/// 获得物理内存的总页数
UINT32 OsVmPhysPageNumGet(VOID)
{
UINT32 nPages = 0;
INT32 i;
for (i = 0; i < (sizeof(g_physArea) / sizeof(g_physArea[0])); i++) {
- nPages += g_physArea[i].size >> PAGE_SHIFT;
+ nPages += g_physArea[i].size >> PAGE_SHIFT;//右移12位,相当于除以4K, 计算出总页数
}
- return nPages;
+ return nPages;//返回所有物理内存总页数
}
-
+/// 初始化空闲链表,分配物理页框使用伙伴算法
STATIC INLINE VOID OsVmPhysFreeListInit(struct VmPhysSeg *seg)
{
int i;
UINT32 intSave;
struct VmFreeList *list = NULL;
- LOS_SpinInit(&seg->freeListLock);
+ LOS_SpinInit(&seg->freeListLock);//初始化用于分配的自旋锁
LOS_SpinLockSave(&seg->freeListLock, &intSave);
- for (i = 0; i < VM_LIST_ORDER_MAX; i++) {
- list = &seg->freeList[i];
- LOS_ListInit(&list->node);
- list->listCnt = 0;
+ for (i = 0; i < VM_LIST_ORDER_MAX; i++) {//遍历伙伴算法空闲块组链表
+ list = &seg->freeList[i]; //一个个来
+ LOS_ListInit(&list->node); //LosVmPage.node将挂到list->node上
+ list->listCnt = 0; //链表上的数量默认0
}
LOS_SpinUnlockRestore(&seg->freeListLock, intSave);
}
-
+/// 物理段初始化
VOID OsVmPhysInit(VOID)
{
struct VmPhysSeg *seg = NULL;
@@ -150,13 +186,13 @@ VOID OsVmPhysInit(VOID)
for (i = 0; i < g_vmPhysSegNum; i++) {
seg = &g_vmPhysSeg[i];
- seg->pageBase = &g_vmPageArray[nPages];
- nPages += seg->size >> PAGE_SHIFT;
- OsVmPhysFreeListInit(seg);
- OsVmPhysLruInit(seg);
+ seg->pageBase = &g_vmPageArray[nPages];//记录本段首页物理页框地址
+ nPages += seg->size >> PAGE_SHIFT;//偏移12位,按4K一页,算出本段总页数
+ OsVmPhysFreeListInit(seg); //初始化空闲链表,分配页框使用伙伴算法
+ OsVmPhysLruInit(seg); //初始化LRU置换链表
}
}
-
+/// 将页框挂入空闲链表,分配物理页框从空闲链表里拿
STATIC VOID OsVmPhysFreeListAddUnsafe(LosVmPage *page, UINT8 order)
{
struct VmPhysSeg *seg = NULL;
@@ -173,36 +209,44 @@ STATIC VOID OsVmPhysFreeListAddUnsafe(LosVmPage *page, UINT8 order)
LOS_ListTailInsert(&list->node, &page->node);
list->listCnt++;
}
-
+///将物理页框从空闲链表上摘除,见于物理页框被分配的情况
STATIC VOID OsVmPhysFreeListDelUnsafe(LosVmPage *page)
{
struct VmPhysSeg *seg = NULL;
struct VmFreeList *list = NULL;
- if ((page->segID >= VM_PHYS_SEG_MAX) || (page->order >= VM_LIST_ORDER_MAX)) {
+ if ((page->segID >= VM_PHYS_SEG_MAX) || (page->order >= VM_LIST_ORDER_MAX)) {//等于VM_LIST_ORDER_MAX也不行,说明伙伴算法最大支持 2^8的分配
LOS_Panic("The page segment id(%u) or order(%u) is invalid\n", page->segID, page->order);
}
- seg = &g_vmPhysSeg[page->segID];
- list = &seg->freeList[page->order];
- list->listCnt--;
- LOS_ListDelete(&page->node);
- page->order = VM_LIST_ORDER_MAX;
+ seg = &g_vmPhysSeg[page->segID]; //找到物理页框对应的段
+ list = &seg->freeList[page->order]; //根据伙伴算法组序号找到空闲链表
+ list->listCnt--; //链表节点总数减一
+ LOS_ListDelete(&page->node); //将自己从链表上摘除
+ page->order = VM_LIST_ORDER_MAX; //告诉系统物理页框已不在空闲链表上, 用于OsVmPhysPagesSpiltUnsafe的断言
}
+
+/**
+ * @brief 本函数很像卖猪肉的,拿一大块肉剁,先把多余的放回到小块肉堆里去.
+ * @param page
+ * @param oldOrder 原本要买 2^2肉
+ * @param newOrder 却找到个 2^8肉块
+ * @return STATIC
+ */
STATIC VOID OsVmPhysPagesSpiltUnsafe(LosVmPage *page, UINT8 oldOrder, UINT8 newOrder)
{
UINT32 order;
LosVmPage *buddyPage = NULL;
- for (order = newOrder; order > oldOrder;) {
- order--;
- buddyPage = &page[VM_ORDER_TO_PAGES(order)];
- LOS_ASSERT(buddyPage->order == VM_LIST_ORDER_MAX);
- OsVmPhysFreeListAddUnsafe(buddyPage, order);
+ for (order = newOrder; order > oldOrder;) {//把肉剁碎的过程,把多余的肉块切成2^7,2^6...标准块,
+ order--;//越切越小,逐一挂到对应的空闲链表上
+ buddyPage = &page[VM_ORDER_TO_PAGES(order)];//@note_good 先把多余的肉割出来,这句代码很赞!因为LosVmPage本身是在一个大数组上,page[nPages]可直接定位
+ LOS_ASSERT(buddyPage->order == VM_LIST_ORDER_MAX);//没挂到伙伴算法对应组块空闲链表上的物理页框的order必须是VM_LIST_ORDER_MAX
+ OsVmPhysFreeListAddUnsafe(buddyPage, order);//将劈开的节点挂到对应序号的链表上,buddyPage->order = order
}
}
-
+///通过物理地址获取所属参数段的物理页框
LosVmPage *OsVmPhysToPage(paddr_t pa, UINT8 segID)
{
struct VmPhysSeg *seg = NULL;
@@ -216,8 +260,8 @@ LosVmPage *OsVmPhysToPage(paddr_t pa, UINT8 segID)
return NULL;
}
- offset = pa - seg->start;
- return (seg->pageBase + (offset >> PAGE_SHIFT));
+ offset = pa - seg->start;//得到物理地址的偏移量
+ return (seg->pageBase + (offset >> PAGE_SHIFT));//得到对应的物理页框
}
LosVmPage *OsVmPaddrToPage(paddr_t paddr)
@@ -233,31 +277,37 @@ LosVmPage *OsVmPaddrToPage(paddr_t paddr)
}
return NULL;
}
-
-VOID *OsVmPageToVaddr(LosVmPage *page)
+/*!
+ * @brief 通过page获取内核空间的虚拟地址 参考OsArchMmuInit
+ \n #define SYS_MEM_BASE DDR_MEM_ADDR /* physical memory base 物理地址的起始地址 * /
+ \n 本函数非常重要,通过一个物理地址找到内核虚拟地址
+ \n 内核静态映射:提升虚实转化效率,段映射减少页表项
+ * @param page
+ * @return VOID*
+ */
+VOID *OsVmPageToVaddr(LosVmPage *page)//
{
VADDR_T vaddr;
- vaddr = KERNEL_ASPACE_BASE + page->physAddr - SYS_MEM_BASE;
-
- return (VOID *)(UINTPTR)vaddr;
+ vaddr = KERNEL_ASPACE_BASE + page->physAddr - SYS_MEM_BASE;//表示申请的物理地址在物理空间的偏移量等于映射的虚拟地址在内核空间的偏移量
+ return (VOID *)(UINTPTR)vaddr;//不需要存储映射关系,这简直就是神来之笔,拍案叫绝。@note_good 详见 鸿蒙内核源码分析(页表管理篇)
}
-
+///通过虚拟地址找映射的物理页框
LosVmPage *OsVmVaddrToPage(VOID *ptr)
{
struct VmPhysSeg *seg = NULL;
- PADDR_T pa = LOS_PaddrQuery(ptr);
+ PADDR_T pa = LOS_PaddrQuery(ptr);//通过空间的虚拟地址查询物理地址
UINT32 segID;
- for (segID = 0; segID < g_vmPhysSegNum; segID++) {
+ for (segID = 0; segID < g_vmPhysSegNum; segID++) {//遍历所有段
seg = &g_vmPhysSeg[segID];
- if ((pa >= seg->start) && (pa < (seg->start + seg->size))) {
- return seg->pageBase + ((pa - seg->start) >> PAGE_SHIFT);
+ if ((pa >= seg->start) && (pa < (seg->start + seg->size))) {//找到物理地址所在的段
+ return seg->pageBase + ((pa - seg->start) >> PAGE_SHIFT);//段基地址+页偏移索引 得到虚拟地址经映射所在物理页框
}
}
return NULL;
}
-
+/// 回收一定范围内的页框
STATIC INLINE VOID OsVmRecycleExtraPages(LosVmPage *page, size_t startPage, size_t endPage)
{
if (startPage >= endPage) {
@@ -266,7 +316,7 @@ STATIC INLINE VOID OsVmRecycleExtraPages(LosVmPage *page, size_t startPage, size
OsVmPhysPagesFreeContiguous(page, endPage - startPage);
}
-
+/// 大块的物理内存分配
STATIC LosVmPage *OsVmPhysLargeAlloc(struct VmPhysSeg *seg, size_t nPages)
{
struct VmFreeList *list = NULL;
@@ -276,11 +326,11 @@ STATIC LosVmPage *OsVmPhysLargeAlloc(struct VmPhysSeg *seg, size_t nPages)
PADDR_T paEnd;
size_t size = nPages << PAGE_SHIFT;
- list = &seg->freeList[VM_LIST_ORDER_MAX - 1];
- LOS_DL_LIST_FOR_EACH_ENTRY(page, &list->node, LosVmPage, node) {
+ list = &seg->freeList[VM_LIST_ORDER_MAX - 1];//先找伙伴算法中内存块最大的开撸
+ LOS_DL_LIST_FOR_EACH_ENTRY(page, &list->node, LosVmPage, node) {//遍历链表
paStart = page->physAddr;
paEnd = paStart + size;
- if (paEnd > (seg->start + seg->size)) {
+ if (paEnd > (seg->start + seg->size)) {//匹配物理地址范围
continue;
}
@@ -302,7 +352,7 @@ STATIC LosVmPage *OsVmPhysLargeAlloc(struct VmPhysSeg *seg, size_t nPages)
return NULL;
}
-
+/// 申请物理页并挂在对应的链表上
STATIC LosVmPage *OsVmPhysPagesAlloc(struct VmPhysSeg *seg, size_t nPages)
{
struct VmFreeList *list = NULL;
@@ -312,13 +362,13 @@ STATIC LosVmPage *OsVmPhysPagesAlloc(struct VmPhysSeg *seg, size_t nPages)
UINT32 newOrder;
order = OsVmPagesToOrder(nPages);
- if (order < VM_LIST_ORDER_MAX) {
- for (newOrder = order; newOrder < VM_LIST_ORDER_MAX; newOrder++) {
+ if (order < VM_LIST_ORDER_MAX) {//按正常的伙伴算法分配
+ for (newOrder = order; newOrder < VM_LIST_ORDER_MAX; newOrder++) {//从小往大了撸
list = &seg->freeList[newOrder];
- if (LOS_ListEmpty(&list->node)) {
- continue;
+ if (LOS_ListEmpty(&list->node)) {//这条链路上没有可分配的物理页框
+ continue;//继续往大的找
}
- page = LOS_DL_LIST_ENTRY(LOS_DL_LIST_FIRST(&list->node), LosVmPage, node);
+ page = LOS_DL_LIST_ENTRY(LOS_DL_LIST_FIRST(&list->node), LosVmPage, node);//找到了直接返回第一个节点
goto DONE;
}
} else {
@@ -339,7 +389,7 @@ DONE:
return page;
}
-
+/// 释放物理页框,所谓释放物理页就是把页挂到空闲链表中
VOID OsVmPhysPagesFree(LosVmPage *page, UINT8 order)
{
paddr_t pa;
@@ -349,51 +399,59 @@ VOID OsVmPhysPagesFree(LosVmPage *page, UINT8 order)
return;
}
- if (order < VM_LIST_ORDER_MAX - 1) {
- pa = VM_PAGE_TO_PHYS(page);
- do {
- pa ^= VM_ORDER_TO_PHYS(order);
- buddyPage = OsVmPhysToPage(pa, page->segID);
- if ((buddyPage == NULL) || (buddyPage->order != order)) {
+ if (order < VM_LIST_ORDER_MAX - 1) {//order[0,7]
+ pa = VM_PAGE_TO_PHYS(page);//获取物理地址
+ do {//按位异或
+ pa ^= VM_ORDER_TO_PHYS(order);//@note_good 注意这里是高位和低位的 ^= ,也就是说跳到order块组物理地址处,此处处理甚妙!
+ buddyPage = OsVmPhysToPage(pa, page->segID);//通过物理地址拿到页框
+ if ((buddyPage == NULL) || (buddyPage->order != order)) {//页框所在组块必须要对应
break;
}
- OsVmPhysFreeListDelUnsafe(buddyPage);
+ OsVmPhysFreeListDelUnsafe(buddyPage);//注意buddypage是连续的物理页框 例如order=2时,2^2=4页就是一个块组 |_|_|_|_|
order++;
pa &= ~(VM_ORDER_TO_PHYS(order) - 1);
page = OsVmPhysToPage(pa, page->segID);
} while (order < VM_LIST_ORDER_MAX - 1);
}
- OsVmPhysFreeListAddUnsafe(page, order);
+ OsVmPhysFreeListAddUnsafe(page, order);//伙伴算法 空闲节点增加
}
-
+///连续的释放物理页框, 如果8页连在一块是一起释放的
VOID OsVmPhysPagesFreeContiguous(LosVmPage *page, size_t nPages)
{
paddr_t pa;
UINT32 order;
size_t n;
- while (TRUE) {
- pa = VM_PAGE_TO_PHYS(page);
- order = VM_PHYS_TO_ORDER(pa);
- n = VM_ORDER_TO_PAGES(order);
- if (n > nPages) {
+ while (TRUE) {//死循环
+ pa = VM_PAGE_TO_PHYS(page);//获取页面物理地址
+ order = VM_PHYS_TO_ORDER(pa);//通过物理地址找到伙伴算法的级别
+ n = VM_ORDER_TO_PAGES(order);//通过级别找到物理页块 (1< nPages) {//nPages只剩下小于2^order时,退出循环
break;
}
- OsVmPhysPagesFree(page, order);
- nPages -= n;
- page += n;
+ OsVmPhysPagesFree(page, order);//释放伙伴算法对应块组
+ nPages -= n;//总页数减少
+ page += n;//释放的页数增多
}
-
+ //举例剩下 7个页框时,依次用 2^2 2^1 2^0 方式释放
while (nPages > 0) {
- order = LOS_HighBitGet(nPages);
- n = VM_ORDER_TO_PAGES(order);
- OsVmPhysPagesFree(page, order);
+ order = LOS_HighBitGet(nPages);//从高到低块组释放
+ n = VM_ORDER_TO_PAGES(order);//2^order次方
+ OsVmPhysPagesFree(page, order);//释放块组
nPages -= n;
- page += n;
+ page += n;//相当于page[n]
}
}
+/*!
+ * @brief OsVmPhysPagesGet 获取一定数量的页框 LosVmPage实体是放在全局大数组中的,
+ * LosVmPage->nPages 标记了分配页数
+ * @param nPages
+ * @return
+ *
+ * @see
+ */
STATIC LosVmPage *OsVmPhysPagesGet(size_t nPages)
{
UINT32 intSave;
@@ -404,11 +462,11 @@ STATIC LosVmPage *OsVmPhysPagesGet(size_t nPages)
for (segID = 0; segID < g_vmPhysSegNum; segID++) {
seg = &g_vmPhysSeg[segID];
LOS_SpinLockSave(&seg->freeListLock, &intSave);
- page = OsVmPhysPagesAlloc(seg, nPages);
- if (page != NULL) {
- /* the first page of continuous physical addresses holds refCounts */
- LOS_AtomicSet(&page->refCounts, 0);
- page->nPages = nPages;
+ page = OsVmPhysPagesAlloc(seg, nPages);//分配指定页数的物理页,nPages需小于伙伴算法一次能分配的最大页数
+ if (page != NULL) {//分配成功
+ /* */
+ LOS_AtomicSet(&page->refCounts, 0);//设置引用次数为0
+ page->nPages = nPages;//页数
LOS_SpinUnlockRestore(&seg->freeListLock, intSave);
return page;
}
@@ -416,7 +474,7 @@ STATIC LosVmPage *OsVmPhysPagesGet(size_t nPages)
}
return NULL;
}
-
+///分配连续的物理页
VOID *LOS_PhysPagesAllocContiguous(size_t nPages)
{
LosVmPage *page = NULL;
@@ -424,15 +482,15 @@ VOID *LOS_PhysPagesAllocContiguous(size_t nPages)
if (nPages == 0) {
return NULL;
}
-
- page = OsVmPhysPagesGet(nPages);
+ //鸿蒙 nPages 不能大于 2^8 次方,即256个页,1M内存,仅限于内核态,用户态不限制分配大小.
+ page = OsVmPhysPagesGet(nPages);//通过伙伴算法获取物理上连续的页
if (page == NULL) {
return NULL;
}
- return OsVmPageToVaddr(page);
+ return OsVmPageToVaddr(page);//通过物理页找虚拟地址
}
-
+/// 释放指定页数地址连续的物理内存
VOID LOS_PhysPagesFreeContiguous(VOID *ptr, size_t nPages)
{
UINT32 intSave;
@@ -443,17 +501,17 @@ VOID LOS_PhysPagesFreeContiguous(VOID *ptr, size_t nPages)
return;
}
- page = OsVmVaddrToPage(ptr);
+ page = OsVmVaddrToPage(ptr);//通过虚拟地址找到页框
if (page == NULL) {
VM_ERR("vm page of ptr(%#x) is null", ptr);
return;
}
- page->nPages = 0;
+ page->nPages = 0;//被分配的页数置为0,表示不被分配
seg = &g_vmPhysSeg[page->segID];
LOS_SpinLockSave(&seg->freeListLock, &intSave);
- OsVmPhysPagesFreeContiguous(page, nPages);
+ OsVmPhysPagesFreeContiguous(page, nPages);//具体释放实现
LOS_SpinUnlockRestore(&seg->freeListLock, intSave);
#ifdef LOSCFG_KERNEL_PLIMITS
@@ -468,7 +526,7 @@ PADDR_T OsKVaddrToPaddr(VADDR_T kvaddr)
}
return (kvaddr - KERNEL_ASPACE_BASE + SYS_MEM_BASE);
}
-
+/// 通过物理地址获取内核虚拟地址
VADDR_T *LOS_PaddrToKVaddr(PADDR_T paddr)
{
struct VmPhysSeg *seg = NULL;
@@ -484,10 +542,10 @@ VADDR_T *LOS_PaddrToKVaddr(PADDR_T paddr)
return (VADDR_T *)(UINTPTR)(paddr - SYS_MEM_BASE + KERNEL_ASPACE_BASE);
}
}
-
- return (VADDR_T *)(UINTPTR)(paddr - SYS_MEM_BASE + KERNEL_ASPACE_BASE);
+ //内核
+ return (VADDR_T *)(UINTPTR)(paddr - SYS_MEM_BASE + KERNEL_ASPACE_BASE);//
}
-
+///释放一个物理页框
VOID LOS_PhysPageFree(LosVmPage *page)
{
UINT32 intSave;
@@ -497,12 +555,12 @@ VOID LOS_PhysPageFree(LosVmPage *page)
return;
}
- if (LOS_AtomicDecRet(&page->refCounts) <= 0) {
+ if (LOS_AtomicDecRet(&page->refCounts) <= 0) {//减少引用数后不能小于0
seg = &g_vmPhysSeg[page->segID];
LOS_SpinLockSave(&seg->freeListLock, &intSave);
- OsVmPhysPagesFreeContiguous(page, ONE_PAGE);
- LOS_AtomicSet(&page->refCounts, 0);
+ OsVmPhysPagesFreeContiguous(page, ONE_PAGE);//释放一页
+ LOS_AtomicSet(&page->refCounts, 0);//只要物理内存被释放了,引用数就必须得重置为 0
LOS_SpinUnlockRestore(&seg->freeListLock, intSave);
}
@@ -510,12 +568,22 @@ VOID LOS_PhysPageFree(LosVmPage *page)
OsMemLimitMemFree(PAGE_SIZE);
#endif
}
-
+/// 申请一个物理页
LosVmPage *LOS_PhysPageAlloc(VOID)
{
- return OsVmPhysPagesGet(ONE_PAGE);
+ return OsVmPhysPagesGet(ONE_PAGE);//分配一页物理页
}
+/*!
+ * @brief LOS_PhysPagesAlloc 分配nPages页个物理页框,并将页框挂入list
+ \n 返回已分配的页面大小,不负责一定能分配到nPages的页框
+ *
+ * @param list
+ * @param nPages
+ * @return
+ *
+ * @see
+ */
size_t LOS_PhysPagesAlloc(size_t nPages, LOS_DL_LIST *list)
{
LosVmPage *page = NULL;
@@ -526,17 +594,17 @@ size_t LOS_PhysPagesAlloc(size_t nPages, LOS_DL_LIST *list)
}
while (nPages--) {
- page = OsVmPhysPagesGet(ONE_PAGE);
+ page = OsVmPhysPagesGet(ONE_PAGE);//一页一页分配,由伙伴算法分配
if (page == NULL) {
break;
}
- LOS_ListTailInsert(list, &page->node);
+ LOS_ListTailInsert(list, &page->node);//从参数链表list尾部挂入新页面结点
count++;
}
return count;
}
-
+///拷贝共享页面
VOID OsPhysSharePageCopy(PADDR_T oldPaddr, PADDR_T *newPaddr, LosVmPage *newPage)
{
UINT32 intSave;
@@ -550,43 +618,43 @@ VOID OsPhysSharePageCopy(PADDR_T oldPaddr, PADDR_T *newPaddr, LosVmPage *newPage
return;
}
- oldPage = LOS_VmPageGet(oldPaddr);
+ oldPage = LOS_VmPageGet(oldPaddr);//由物理地址得到页框
if (oldPage == NULL) {
VM_ERR("invalid oldPaddr %p", oldPaddr);
return;
}
- seg = &g_vmPhysSeg[oldPage->segID];
+ seg = &g_vmPhysSeg[oldPage->segID];//拿到物理段
LOS_SpinLockSave(&seg->freeListLock, &intSave);
- if (LOS_AtomicRead(&oldPage->refCounts) == 1) {
- *newPaddr = oldPaddr;
- } else {
- newMem = LOS_PaddrToKVaddr(*newPaddr);
- oldMem = LOS_PaddrToKVaddr(oldPaddr);
+ if (LOS_AtomicRead(&oldPage->refCounts) == 1) {//页面引用次数仅一次,说明只有一个进程在操作
+ *newPaddr = oldPaddr;//新老指向同一块物理地址
+ } else {//是个共享内存
+ newMem = LOS_PaddrToKVaddr(*newPaddr); //新页虚拟地址
+ oldMem = LOS_PaddrToKVaddr(oldPaddr); //老页虚拟地址
if ((newMem == NULL) || (oldMem == NULL)) {
LOS_SpinUnlockRestore(&seg->freeListLock, intSave);
return;
- }
- if (memcpy_s(newMem, PAGE_SIZE, oldMem, PAGE_SIZE) != EOK) {
+ }//请记住,在保护模式下,物理地址只能用于计算,操作(包括拷贝)需要虚拟地址!
+ if (memcpy_s(newMem, PAGE_SIZE, oldMem, PAGE_SIZE) != EOK) {//老页内容复制给新页,需操作虚拟地址,拷贝一页数据
VM_ERR("memcpy_s failed");
}
- LOS_AtomicInc(&newPage->refCounts);
- LOS_AtomicDec(&oldPage->refCounts);
+ LOS_AtomicInc(&newPage->refCounts);//新页引用次数以原子方式自动减量
+ LOS_AtomicDec(&oldPage->refCounts);//老页引用次数以原子方式自动减量
}
LOS_SpinUnlockRestore(&seg->freeListLock, intSave);
return;
}
-
+///获取物理页框所在段
struct VmPhysSeg *OsVmPhysSegGet(LosVmPage *page)
{
if ((page == NULL) || (page->segID >= VM_PHYS_SEG_MAX)) {
return NULL;
}
- return (OsGVmPhysSegGet() + page->segID);
+ return (OsGVmPhysSegGet() + page->segID);//等用于OsGVmPhysSegGet()[page->segID]
}
-
+///获取参数nPages对应的块组,例如 7 -> 2^3 返回 3
UINT32 OsVmPagesToOrder(size_t nPages)
{
UINT32 order;
@@ -595,7 +663,7 @@ UINT32 OsVmPagesToOrder(size_t nPages)
return order;
}
-
+///释放双链表中的所有节点内存,本质是回归到伙伴orderlist中
size_t LOS_PhysPagesFree(LOS_DL_LIST *list)
{
UINT32 intSave;
@@ -608,16 +676,16 @@ size_t LOS_PhysPagesFree(LOS_DL_LIST *list)
return 0;
}
- LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(page, nPage, list, LosVmPage, node) {
- LOS_ListDelete(&page->node);
- if (LOS_AtomicDecRet(&page->refCounts) <= 0) {
- seg = &g_vmPhysSeg[page->segID];
- LOS_SpinLockSave(&seg->freeListLock, &intSave);
- OsVmPhysPagesFreeContiguous(page, ONE_PAGE);
- LOS_AtomicSet(&page->refCounts, 0);
- LOS_SpinUnlockRestore(&seg->freeListLock, intSave);
+ LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(page, nPage, list, LosVmPage, node) {//宏循环
+ LOS_ListDelete(&page->node);//先把自己摘出去
+ if (LOS_AtomicDecRet(&page->refCounts) <= 0) {//无引用
+ seg = &g_vmPhysSeg[page->segID];//获取物理段
+ LOS_SpinLockSave(&seg->freeListLock, &intSave);//锁住freeList
+ OsVmPhysPagesFreeContiguous(page, ONE_PAGE);//连续释放,注意这里的ONE_PAGE其实有误导,让人以为是释放4K,其实是指连续的物理页框,如果3页连在一块是一起释放的.
+ LOS_AtomicSet(&page->refCounts, 0);//引用重置为0
+ LOS_SpinUnlockRestore(&seg->freeListLock, intSave);//恢复锁
}
- count++;
+ count++;//继续取下一个node
}
return count;
diff --git a/src/kernel_liteos_a/kernel/base/vm/los_vm_scan.c b/src/kernel_liteos_a/kernel/base/vm/los_vm_scan.c
index ab467818..0482932f 100644
--- a/src/kernel_liteos_a/kernel/base/vm/los_vm_scan.c
+++ b/src/kernel_liteos_a/kernel/base/vm/los_vm_scan.c
@@ -37,6 +37,10 @@
#ifdef LOSCFG_KERNEL_VM
/* unmap a lru page by map record info caller need lru lock */
+/**************************************************************************************************
+ 解除文件页和进程(mmu)的映射关系
+ 参数info记录了进程的MMU
+**************************************************************************************************/
VOID OsUnmapPageLocked(LosFilePage *page, LosMapInfo *info)
{
if (page == NULL || info == NULL) {
@@ -47,88 +51,88 @@ VOID OsUnmapPageLocked(LosFilePage *page, LosMapInfo *info)
LOS_ListDelete(&info->node);
LOS_AtomicDec(&page->vmPage->refCounts);
LOS_ArchMmuUnmap(info->archMmu, info->vaddr, 1);
- LOS_MemFree(m_aucSysMem0, info);
+ LOS_MemFree(m_aucSysMem0, info);//释放虚拟
}
-
+///解除文件页在所有进程的映射
VOID OsUnmapAllLocked(LosFilePage *page)
{
LosMapInfo *info = NULL;
LosMapInfo *next = NULL;
LOS_DL_LIST *immap = &page->i_mmap;
-
- LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(info, next, immap, LosMapInfo, node) {
+
+ LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(info, next, immap, LosMapInfo, node) {//遍历 immap->info 链表
OsUnmapPageLocked(page, info);
}
}
/* add a new lru node to lru list, lruType can be file or anon */
-VOID OsLruCacheAdd(LosFilePage *fpage, enum OsLruList lruType)
+VOID OsLruCacheAdd(LosFilePage *fpage, enum OsLruList lruType)//在lru列表中添加一个新的lru节点,lruType可以是文件或匿名
{
UINT32 intSave;
- LosVmPhysSeg *physSeg = fpage->physSeg;
- LosVmPage *page = fpage->vmPage;
+ LosVmPhysSeg *physSeg = fpage->physSeg; //得到页面对应段
+ LosVmPage *page = fpage->vmPage; //得到物理页面
- LOS_SpinLockSave(&physSeg->lruLock, &intSave);
- OsSetPageActive(page);
- OsCleanPageReferenced(page);
- physSeg->lruSize[lruType]++;
- LOS_ListTailInsert(&physSeg->lruList[lruType], &fpage->lru);
+ LOS_SpinLockSave(&physSeg->lruLock, &intSave);//自旋锁:最多只能被一个内核持有,CPU内核 互斥锁
+ OsSetPageActive(page); //设置页面为活动页
+ OsCleanPageReferenced(page);//清除页面被引用位
+ physSeg->lruSize[lruType]++; //lruType页总size++
+ LOS_ListTailInsert(&physSeg->lruList[lruType], &fpage->lru);//加入lruType页双循环链表中
- LOS_SpinUnlockRestore(&physSeg->lruLock, intSave);
+ LOS_SpinUnlockRestore(&physSeg->lruLock, intSave);//解锁
}
-/* delete a lru node, caller need hold lru_lock */
-VOID OsLruCacheDel(LosFilePage *fpage)
+/* dellete a lru node, caller need hold lru_lock */
+VOID OsLruCacheDel(LosFilePage *fpage)//删除lru节点,调用者需要拿到lru锁
{
- LosVmPhysSeg *physSeg = fpage->physSeg;
- int type = OsIsPageActive(fpage->vmPage) ? VM_LRU_ACTIVE_FILE : VM_LRU_INACTIVE_FILE;
+ LosVmPhysSeg *physSeg = fpage->physSeg; //得到页面对应段
+ int type = OsIsPageActive(fpage->vmPage) ? VM_LRU_ACTIVE_FILE : VM_LRU_INACTIVE_FILE;//得到页面LRU类型
- physSeg->lruSize[type]--;
- LOS_ListDelete(&fpage->lru);
+ physSeg->lruSize[type]--; //type页总size--
+ LOS_ListDelete(&fpage->lru);//将自己从lru链表中摘出来
}
-
+///非活动文件页低于活动文件页吗
BOOL OsInactiveListIsLow(LosVmPhysSeg *physSeg)
{
return (physSeg->lruSize[VM_LRU_ACTIVE_FILE] >
- physSeg->lruSize[VM_LRU_INACTIVE_FILE]) ? TRUE : FALSE;
+ physSeg->lruSize[VM_LRU_INACTIVE_FILE]) ? TRUE : FALSE;//直接对比size,效率杠杠的
}
/* move a page from inactive list to active list head */
-STATIC INLINE VOID OsMoveToActiveList(LosFilePage *fpage)
+STATIC INLINE VOID OsMoveToActiveList(LosFilePage *fpage)//将页面从非活动列表移动到活动列表
{
- LosVmPhysSeg *physSeg = fpage->physSeg;
+ LosVmPhysSeg *physSeg = fpage->physSeg; //得到页面对应段
- physSeg->lruSize[VM_LRU_ACTIVE_FILE]++;
- physSeg->lruSize[VM_LRU_INACTIVE_FILE]--;
- LOS_ListDelete(&fpage->lru);
- LOS_ListTailInsert(&physSeg->lruList[VM_LRU_ACTIVE_FILE], &fpage->lru);
+ physSeg->lruSize[VM_LRU_ACTIVE_FILE]++; //活动页总size++
+ physSeg->lruSize[VM_LRU_INACTIVE_FILE]--; //不活动页总size--
+ LOS_ListDelete(&fpage->lru); //将自己从lru链表中摘出来
+ LOS_ListTailInsert(&physSeg->lruList[VM_LRU_ACTIVE_FILE], &fpage->lru);//加入活动页双循环链表中
}
/* move a page from active list to inactive list head */
-STATIC INLINE VOID OsMoveToInactiveList(LosFilePage *fpage)
+STATIC INLINE VOID OsMoveToInactiveList(LosFilePage *fpage)//将页面从活动列表移动到非活动列表
{
- LosVmPhysSeg *physSeg = fpage->physSeg;
+ LosVmPhysSeg *physSeg = fpage->physSeg; //得到页面对应段
- physSeg->lruSize[VM_LRU_ACTIVE_FILE]--;
- physSeg->lruSize[VM_LRU_INACTIVE_FILE]++;
- LOS_ListDelete(&fpage->lru);
- LOS_ListTailInsert(&physSeg->lruList[VM_LRU_INACTIVE_FILE], &fpage->lru);
+ physSeg->lruSize[VM_LRU_ACTIVE_FILE]--; //活动页总size--
+ physSeg->lruSize[VM_LRU_INACTIVE_FILE]++; //不活动页总size++
+ LOS_ListDelete(&fpage->lru); //将自己从lru链表中摘出来
+ LOS_ListTailInsert(&physSeg->lruList[VM_LRU_INACTIVE_FILE], &fpage->lru);//加入不活动页双循环链表中
}
-/* move a page to the most active pos in lru list(active head) */
+/* move a page to the most active pos in lru list(active head) *///将页面移至lru列表中最活跃的位置
STATIC INLINE VOID OsMoveToActiveHead(LosFilePage *fpage)
{
- LosVmPhysSeg *physSeg = fpage->physSeg;
- LOS_ListDelete(&fpage->lru);
- LOS_ListTailInsert(&physSeg->lruList[VM_LRU_ACTIVE_FILE], &fpage->lru);
+ LosVmPhysSeg *physSeg = fpage->physSeg; //得到页面对应段
+ LOS_ListDelete(&fpage->lru); //将自己从lru链表中摘出来
+ LOS_ListTailInsert(&physSeg->lruList[VM_LRU_ACTIVE_FILE], &fpage->lru);//加入活动页双循环链表中
}
/* move a page to the most active pos in lru list(inactive head) */
-STATIC INLINE VOID OsMoveToInactiveHead(LosFilePage *fpage)
+STATIC INLINE VOID OsMoveToInactiveHead(LosFilePage *fpage)//鸿蒙会从inactive链表的尾部开始进行回收,跟linux一样
{
- LosVmPhysSeg *physSeg = fpage->physSeg;
- LOS_ListDelete(&fpage->lru);
- LOS_ListTailInsert(&physSeg->lruList[VM_LRU_INACTIVE_FILE], &fpage->lru);
+ LosVmPhysSeg *physSeg = fpage->physSeg; //得到页面对应段
+ LOS_ListDelete(&fpage->lru); //将自己从lru链表中摘出来
+ LOS_ListTailInsert(&physSeg->lruList[VM_LRU_INACTIVE_FILE], &fpage->lru);//加入不活动页双循环链表中
}
/* page referced add: (call by page cache get)
@@ -138,7 +142,7 @@ ref:0, act:0 --> ref:1, act:0
ref:1, act:0 --> ref:0, act:1
ref:0, act:1 --> ref:1, act:1
*/
-VOID OsPageRefIncLocked(LosFilePage *fpage)
+VOID OsPageRefIncLocked(LosFilePage *fpage)// ref ,act 标签转换功能
{
BOOL isOrgActive;
UINT32 intSave;
@@ -148,16 +152,16 @@ VOID OsPageRefIncLocked(LosFilePage *fpage)
return;
}
- LOS_SpinLockSave(&fpage->physSeg->lruLock, &intSave);
+ LOS_SpinLockSave(&fpage->physSeg->lruLock, &intSave);//要处理lruList,先拿锁
- page = fpage->vmPage;
- isOrgActive = OsIsPageActive(page);
+ page = fpage->vmPage;//拿到物理页框
+ isOrgActive = OsIsPageActive(page);//页面是否在活动
- if (OsIsPageReferenced(page) && !OsIsPageActive(page)) {
- OsCleanPageReferenced(page);
- OsSetPageActive(page);
+ if (OsIsPageReferenced(page) && !OsIsPageActive(page)) {//身兼 不活动和引用标签
+ OsCleanPageReferenced(page);//撕掉引用标签 ref:1, act:0 --> ref:0, act:1
+ OsSetPageActive(page); //贴上活动标签
} else if (!OsIsPageReferenced(page)) {
- OsSetPageReferenced(page);
+ OsSetPageReferenced(page);//ref:0, act:0 --> ref:1, act:0
}
if (!isOrgActive && OsIsPageActive(page)) {
@@ -175,14 +179,14 @@ VOID OsPageRefIncLocked(LosFilePage *fpage)
LOS_SpinUnlockRestore(&fpage->physSeg->lruLock, intSave);
}
-/* page referced dec: (call by shrinker)
+/* page referced dec: (call by thrinker)
----------inactive----------|----------active------------
[ref:0,act:0], [ref:1,act:0]|[ref:0,act:1], [ref:1,act:1]
ref:1, act:1 --> ref:0, act:1
ref:0, act:1 --> ref:1, act:0
ref:1, act:0 --> ref:0, act:0
*/
-VOID OsPageRefDecNoLock(LosFilePage *fpage)
+VOID OsPageRefDecNoLock(LosFilePage *fpage) // ref ,act 标签转换功能
{
BOOL isOrgActive;
LosVmPage *page = NULL;
@@ -194,7 +198,7 @@ VOID OsPageRefDecNoLock(LosFilePage *fpage)
page = fpage->vmPage;
isOrgActive = OsIsPageActive(page);
- if (!OsIsPageReferenced(page) && OsIsPageActive(page)) {
+ if (!OsIsPageReferenced(page) && OsIsPageActive(page)) {//[ref:0,act:1]的情况
OsCleanPageActive(page);
OsSetPageReferenced(page);
} else if (OsIsPageReferenced(page)) {
@@ -205,39 +209,39 @@ VOID OsPageRefDecNoLock(LosFilePage *fpage)
OsMoveToInactiveList(fpage);
}
}
-
+///缩小活动页链表
VOID OsShrinkActiveList(LosVmPhysSeg *physSeg, int nScan)
{
LosFilePage *fpage = NULL;
LosFilePage *fnext = NULL;
LOS_DL_LIST *activeFile = &physSeg->lruList[VM_LRU_ACTIVE_FILE];
- LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, activeFile, LosFilePage, lru) {
- if (LOS_SpinTrylock(&fpage->mapping->list_lock) != LOS_OK) {
- continue;
+ LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, activeFile, LosFilePage, lru) {//一页一页处理
+ if (LOS_SpinTrylock(&fpage->mapping->list_lock) != LOS_OK) {//尝试获取文件页所在的page_mapping锁
+ continue;//接着处理下一文件页
}
- /* happened when caller hold cache lock and try reclaim this page */
- if (OsIsPageLocked(fpage->vmPage)) {
- LOS_SpinUnlock(&fpage->mapping->list_lock);
- continue;
+ /* happend when caller hold cache lock and try reclaim this page *///调用方持有缓存锁并尝试回收此页时发生
+ if (OsIsPageLocked(fpage->vmPage)) {//页面是否被锁
+ LOS_SpinUnlock(&fpage->mapping->list_lock);//失败时,一定要释放page_mapping锁.
+ continue;//接着处理下一文件页
}
- if (OsIsPageMapped(fpage) && (fpage->flags & VM_MAP_REGION_FLAG_PERM_EXECUTE)) {
- LOS_SpinUnlock(&fpage->mapping->list_lock);
- continue;
+ if (OsIsPageMapped(fpage) && (fpage->flags & VM_MAP_REGION_FLAG_PERM_EXECUTE)) {//文件页是否被映射而且是个可执行文件 ?
+ LOS_SpinUnlock(&fpage->mapping->list_lock);//是时,一定要释放page_mapping锁.
+ continue;//接着处理下一文件页
}
+ //找了可以收缩的文件页
+ OsPageRefDecNoLock(fpage); //将页面移到未活动文件链表
- OsPageRefDecNoLock(fpage);
-
- LOS_SpinUnlock(&fpage->mapping->list_lock);
+ LOS_SpinUnlock(&fpage->mapping->list_lock); //释放page_mapping锁.
if (--nScan <= 0) {
break;
}
}
}
-
+///缩小未活动页链表
int OsShrinkInactiveList(LosVmPhysSeg *physSeg, int nScan, LOS_DL_LIST *list)
{
UINT32 nrReclaimed = 0;
@@ -248,36 +252,36 @@ int OsShrinkInactiveList(LosVmPhysSeg *physSeg, int nScan, LOS_DL_LIST *list)
LosFilePage *ftemp = NULL;
LOS_DL_LIST *inactive_file = &physSeg->lruList[VM_LRU_INACTIVE_FILE];
- LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, inactive_file, LosFilePage, lru) {
+ LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, inactive_file, LosFilePage, lru) {//遍历链表一页一页处理
flock = &fpage->mapping->list_lock;
- if (LOS_SpinTrylock(flock) != LOS_OK) {
- continue;
+ if (LOS_SpinTrylock(flock) != LOS_OK) {//尝试获取文件页所在的page_mapping锁
+ continue;//接着处理下一文件页
}
- page = fpage->vmPage;
- if (OsIsPageLocked(page)) {
+ page = fpage->vmPage;//获取物理页框
+ if (OsIsPageLocked(page)) {//页面是否被锁
LOS_SpinUnlock(flock);
- continue;
+ continue;//接着处理下一文件页
}
if (OsIsPageMapped(fpage) && (OsIsPageDirty(page) || (fpage->flags & VM_MAP_REGION_FLAG_PERM_EXECUTE))) {
- LOS_SpinUnlock(flock);
- continue;
+ LOS_SpinUnlock(flock);//文件页是否被映射而且是个脏页获取是个可执行文件 ?
+ continue;//接着处理下一文件页
}
- if (OsIsPageDirty(page)) {
- ftemp = OsDumpDirtyPage(fpage);
- if (ftemp != NULL) {
- LOS_ListTailInsert(list, &ftemp->node);
+ if (OsIsPageDirty(page)) {//是脏页
+ ftemp = OsDumpDirtyPage(fpage);//备份脏页
+ if (ftemp != NULL) {//备份成功了
+ LOS_ListTailInsert(list, &ftemp->node);//将脏页挂到参数链表上带走
}
}
- OsDeletePageCacheLru(fpage);
+ OsDeletePageCacheLru(fpage);//将文件页从LRU和pagecache上摘除
LOS_SpinUnlock(flock);
- nrReclaimed++;
+ nrReclaimed++;//成功回收了一页
- if (--nScan <= 0) {
+ if (--nScan <= 0) {//继续回收
break;
}
}
@@ -286,48 +290,48 @@ int OsShrinkInactiveList(LosVmPhysSeg *physSeg, int nScan, LOS_DL_LIST *list)
}
#ifdef LOSCFG_FS_VFS
-int OsTryShrinkMemory(size_t nPage)
+int OsTryShrinkMemory(size_t nPage)//尝试收缩文件页
{
UINT32 intSave;
size_t totalPages;
size_t nReclaimed = 0;
LosVmPhysSeg *physSeg = NULL;
UINT32 index;
- LOS_DL_LIST_HEAD(dirtyList);
+ LOS_DL_LIST_HEAD(dirtyList);//初始化脏页链表,上面将挂所有脏页用于同步到磁盘后回收
LosFilePage *fpage = NULL;
LosFilePage *fnext = NULL;
if (nPage == 0) {
- nPage = VM_FILEMAP_MIN_SCAN;
+ nPage = VM_FILEMAP_MIN_SCAN;//
}
if (nPage > VM_FILEMAP_MAX_SCAN) {
nPage = VM_FILEMAP_MAX_SCAN;
}
- for (index = 0; index < g_vmPhysSegNum; index++) {
- physSeg = &g_vmPhysSeg[index];
+ for (index = 0; index < g_vmPhysSegNum; index++) {//遍历整个物理段组
+ physSeg = &g_vmPhysSeg[index];//一段段来
LOS_SpinLockSave(&physSeg->lruLock, &intSave);
- totalPages = physSeg->lruSize[VM_LRU_ACTIVE_FILE] + physSeg->lruSize[VM_LRU_INACTIVE_FILE];
- if (totalPages < VM_FILEMAP_MIN_SCAN) {
+ totalPages = physSeg->lruSize[VM_LRU_ACTIVE_FILE] + physSeg->lruSize[VM_LRU_INACTIVE_FILE];//统计所有文件页
+ if (totalPages < VM_FILEMAP_MIN_SCAN) {//文件页占用内存不多的情况下,怎么处理?
LOS_SpinUnlockRestore(&physSeg->lruLock, intSave);
- continue;
+ continue;//放过这一段,找下一段
}
if (OsInactiveListIsLow(physSeg)) {
- OsShrinkActiveList(physSeg, (nPage < VM_FILEMAP_MIN_SCAN) ? VM_FILEMAP_MIN_SCAN : nPage);
+ OsShrinkActiveList(physSeg, (nPage < VM_FILEMAP_MIN_SCAN) ? VM_FILEMAP_MIN_SCAN : nPage);//缩小活动页
}
- nReclaimed += OsShrinkInactiveList(physSeg, nPage, &dirtyList);
+ nReclaimed += OsShrinkInactiveList(physSeg, nPage, &dirtyList);//缩小未活动页,带出脏页链表
LOS_SpinUnlockRestore(&physSeg->lruLock, intSave);
- if (nReclaimed >= nPage) {
- break;
+ if (nReclaimed >= nPage) {//够了,够了,达到目的了.
+ break;//退出收缩
}
}
- LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, &dirtyList, LosFilePage, node) {
- OsDoFlushDirtyPage(fpage);
+ LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, &dirtyList, LosFilePage, node) {//遍历处理脏页数据
+ OsDoFlushDirtyPage(fpage);//冲洗脏页数据,将脏页数据回写磁盘
}
return nReclaimed;
diff --git a/src/kernel_liteos_a/kernel/base/vm/los_vm_syscall.c b/src/kernel_liteos_a/kernel/base/vm/los_vm_syscall.c
index 6aa23993..483b48cf 100644
--- a/src/kernel_liteos_a/kernel/base/vm/los_vm_syscall.c
+++ b/src/kernel_liteos_a/kernel/base/vm/los_vm_syscall.c
@@ -65,7 +65,7 @@ STATUS_T OsCheckMMapParams(VADDR_T *vaddr, unsigned long flags, size_t len, unsi
return -EINVAL;
}
- if ((flags & MAP_SUPPORT_MASK) == 0) {
+ if ((flags & MAP_SUPPORT_MASK) == 0) {//映射权限限制
return -EINVAL;
}
if (((flags & MAP_SHARED_PRIVATE) == 0) || ((flags & MAP_SHARED_PRIVATE) == MAP_SHARED_PRIVATE)) {
@@ -95,20 +95,64 @@ STATUS_T OsNamedMmapingPermCheck(struct file *filep, unsigned long flags, unsign
return LOS_OK;
}
-
+///匿名映射
STATUS_T OsAnonMMap(LosVmMapRegion *region)
{
LOS_SetRegionTypeAnon(region);
return LOS_OK;
}
+/**
+ * @brief
+ @verbatim
+ mmap基础概念:
+ 一种内存映射文件的方法,即将一个文件或者其它对象映射到进程的地址空间,实现文件磁盘地址和进程虚拟地址空间中一段虚拟地址的一一对映关系.
+ 实现这样的映射关系后,进程就可以采用指针的方式读写操作这一段内存,而系统会自动回写脏页面到对应的文件磁盘上,
+ 即完成了对文件的操作而不必再调用read,write等系统调用函数。相反,内核空间对这段区域的修改也直接反映用户空间,
+ 从而可以实现不同进程间的文件共享。
+
+ https://www.cnblogs.com/huxiao-tee/p/4660352.html
+ http://abcdxyzk.github.io/blog/2015/09/11/kernel-mm-mmap/
+
+ 参数 描述
+ addr 指向欲映射的内存起始地址,通常设为 NULL,代表让系统自动选定地址,映射成功后返回该地址。
+ length 代表将文件中多大的部分映射到内存。
+ prot 用于设置内存段的访问权限,有如下权限:
+ PROT_EXEC 映射区域可被执行
+ PROT_READ 映射区域可被读取
+ PROT_WRITE 映射区域可被写入
+ PROT_NONE 映射区域不能存取
+
+ flags 控制程序对内存段的改变所造成的影响,有如下属性:
+ MAP_FIXED 如果参数start所指的地址无法成功建立映射时,则放弃映射,不对地址做修正。通常不鼓励用此旗标。
+ MAP_SHARED 对映射区域的写入数据会复制回文件内,而且允许其他映射该文件的进程共享。
+ MAP_PRIVATE 对映射区域的写入操作会产生一个映射文件的复制,即私人的“写入时复制”(copy on write)对此区域作的任何修改都不会写回原来的文件内容。
+ MAP_ANONYMOUS建立匿名映射。此时会忽略参数fd,不涉及文件,而且映射区域无法和其他进程共享。
+ MAP_DENYWRITE只允许对映射区域的写入操作,其他对文件直接写入的操作将会被拒绝。
+ MAP_LOCKED 将映射区域锁定住,这表示该区域不会被置换(swap)。
+
+ fd: 要映射到内存中的文件描述符。如果使用匿名内存映射时,即flags中设置了MAP_ANONYMOUS,fd设为-1。
+ 有些系统不支持匿名内存映射,则可以使用fopen打开/dev/zero文件,然后对该文件进行映射,可以同样达到匿名内存映射的效果。
+
+ offset 文件映射的偏移量,通常设置为0,代表从文件最前方开始对应,offset必须是PAGE_SIZE的整数倍。
+ 成功返回:虚拟内存地址,这地址是页对齐。
+ 失败返回:(void *)-1。
+ @endverbatim
+ * @param vaddr
+ * @param len
+ * @param prot
+ * @param flags
+ * @param fd
+ * @param pgoff
+ * @return VADDR_T
+ */
VADDR_T LOS_MMap(VADDR_T vaddr, size_t len, unsigned prot, unsigned long flags, int fd, unsigned long pgoff)
{
STATUS_T status;
VADDR_T resultVaddr;
UINT32 regionFlags;
- LosVmMapRegion *newRegion = NULL;
- struct file *filep = NULL;
+ LosVmMapRegion *newRegion = NULL;//应用的内存分配对应到内核就是分配一个线性区
+ struct file *filep = NULL;// inode : file = 1:N ,一对多关系,一个inode可以被多个进程打开,返回不同的file但都指向同一个inode
LosVmSpace *vmSpace = OsCurrProcessGet()->vmSpace;
len = ROUNDUP(len, PAGE_SIZE);
@@ -116,9 +160,9 @@ VADDR_T LOS_MMap(VADDR_T vaddr, size_t len, unsigned prot, unsigned long flags,
if (checkRst != LOS_OK) {
return checkRst;
}
-
- if (LOS_IsNamedMapping(flags)) {
- status = fs_getfilep(fd, &filep);
+
+ if (LOS_IsNamedMapping(flags)) {//是否文件映射
+ status = fs_getfilep(fd, &filep);//获取文件描述符和状态
if (status < 0) {
return -EBADF;
}
@@ -131,30 +175,30 @@ VADDR_T LOS_MMap(VADDR_T vaddr, size_t len, unsigned prot, unsigned long flags,
(VOID)LOS_MuxAcquire(&vmSpace->regionMux);
/* user mode calls mmap to release heap physical memory without releasing heap virtual space */
- status = OsUserHeapFree(vmSpace, vaddr, len);
- if (status == LOS_OK) {
+ status = OsUserHeapFree(vmSpace, vaddr, len);//用户模式释放堆物理内存而不释放堆虚拟空间
+ if (status == LOS_OK) {//OsUserHeapFree 干两件事 1.解除映射关系 2.释放物理页
resultVaddr = vaddr;
goto MMAP_DONE;
}
-
- regionFlags = OsCvtProtFlagsToRegionFlags(prot, flags);
- newRegion = LOS_RegionAlloc(vmSpace, vaddr, len, regionFlags, pgoff);
+ //地址不在堆区
+ regionFlags = OsCvtProtFlagsToRegionFlags(prot, flags);//将参数flag转换Region的flag
+ newRegion = LOS_RegionAlloc(vmSpace, vaddr, len, regionFlags, pgoff);//分配一个线性区
if (newRegion == NULL) {
- resultVaddr = (VADDR_T)-ENOMEM;
+ resultVaddr = (VADDR_T)-ENOMEM;//ENOMEM:内存溢出
goto MMAP_DONE;
}
newRegion->regionFlags |= VM_MAP_REGION_FLAG_MMAP;
- resultVaddr = newRegion->range.base;
+ resultVaddr = newRegion->range.base;//线性区基地址为分配的地址
if (LOS_IsNamedMapping(flags)) {
- status = OsNamedMMap(filep, newRegion);
+ status = OsNamedMMap(filep, newRegion);//文件映射
} else {
- status = OsAnonMMap(newRegion);
+ status = OsAnonMMap(newRegion);//匿名映射
}
if (status != LOS_OK) {
- LOS_RbDelNode(&vmSpace->regionRbTree, &newRegion->rbNode);
- LOS_RegionFree(vmSpace, newRegion);
+ LOS_RbDelNode(&vmSpace->regionRbTree, &newRegion->rbNode);//从红黑树和双循环链表中删除
+ LOS_RegionFree(vmSpace, newRegion);//释放
resultVaddr = (VADDR_T)-ENOMEM;
goto MMAP_DONE;
}
@@ -163,7 +207,7 @@ MMAP_DONE:
(VOID)LOS_MuxRelease(&vmSpace->regionMux);
return resultVaddr;
}
-
+///解除映射关系
STATUS_T LOS_UnMMap(VADDR_T addr, size_t size)
{
if ((addr <= 0) || (size == 0)) {
@@ -172,7 +216,6 @@ STATUS_T LOS_UnMMap(VADDR_T addr, size_t size)
return OsUnMMap(OsCurrProcessGet()->vmSpace, addr, size);
}
-
STATIC INLINE BOOL OsProtMprotectPermCheck(unsigned long prot, LosVmMapRegion *region)
{
UINT32 protFlags = 0;
@@ -186,20 +229,33 @@ STATIC INLINE BOOL OsProtMprotectPermCheck(unsigned long prot, LosVmMapRegion *r
return ((protFlags & permFlags) == protFlags);
}
-
+/// 收缩堆区
VOID *OsShrinkHeap(VOID *addr, LosVmSpace *space)
{
VADDR_T newBrk, oldBrk;
- newBrk = LOS_Align((VADDR_T)(UINTPTR)addr, PAGE_SIZE);
- oldBrk = LOS_Align(space->heapNow, PAGE_SIZE);
- if (LOS_UnMMap(newBrk, (oldBrk - newBrk)) < 0) {
- return (void *)(UINTPTR)space->heapNow;
+ newBrk = LOS_Align((VADDR_T)(UINTPTR)addr, PAGE_SIZE);//新堆顶
+ oldBrk = LOS_Align(space->heapNow, PAGE_SIZE);//旧堆顶
+ if (LOS_UnMMap(newBrk, (oldBrk - newBrk)) < 0) {//解除相差区的映射
+ return (void *)(UINTPTR)space->heapNow;//解除失败就持续现有的
}
- space->heapNow = (VADDR_T)(UINTPTR)addr;
+ space->heapNow = (VADDR_T)(UINTPTR)addr;//返回新堆顶
return addr;
}
+/**
+ * @brief
+ @verbatim
+ 用户进程向内核申请空间,进一步说用于扩展用户堆栈空间,或者回收用户堆栈空间
+ 扩展当前进程的堆空间
+ 一个进程所有的线性区都在进程指定的线性地址范围内,
+ 线性区之间是不会有地址的重叠的,开始都是连续的,随着进程的运行出现了释放再分配的情况
+ 由此出现了断断续续的线性区,内核回收线性区时会检测是否和周边的线性区可合并成一个更大
+ 的线性区用于分配。
+ @endverbatim
+ * @param addr
+ * @return VOID*
+ */
VOID *LOS_DoBrk(VOID *addr)
{
LosVmSpace *space = OsCurrProcessGet()->vmSpace;
@@ -209,60 +265,60 @@ VOID *LOS_DoBrk(VOID *addr)
VOID *alignAddr = NULL;
VOID *shrinkAddr = NULL;
- if (addr == NULL) {
- return (void *)(UINTPTR)space->heapNow;
+ if (addr == NULL) {//参数地址未传情况
+ return (void *)(UINTPTR)space->heapNow;//以现有指向地址为基础进行扩展
}
- if ((UINTPTR)addr < (UINTPTR)space->heapBase) {
+ if ((UINTPTR)addr < (UINTPTR)space->heapBase) {//heapBase是堆区的开始地址,所以参数地址不能低于它
return (VOID *)-ENOMEM;
}
- size = (UINTPTR)addr - (UINTPTR)space->heapBase;
- size = ROUNDUP(size, PAGE_SIZE);
- alignAddr = (CHAR *)(UINTPTR)(space->heapBase) + size;
+ size = (UINTPTR)addr - (UINTPTR)space->heapBase;//算出大小
+ size = ROUNDUP(size, PAGE_SIZE); //圆整size
+ alignAddr = (CHAR *)(UINTPTR)(space->heapBase) + size;//得到新的线性区的结束地址
PRINT_INFO("brk addr %p , size 0x%x, alignAddr %p, align %d\n", addr, size, alignAddr, PAGE_SIZE);
(VOID)LOS_MuxAcquire(&space->regionMux);
- if (addr < (VOID *)(UINTPTR)space->heapNow) {
- shrinkAddr = OsShrinkHeap(addr, space);
+ if (addr < (VOID *)(UINTPTR)space->heapNow) {//如果地址小于堆区现地址
+ shrinkAddr = OsShrinkHeap(addr, space);//收缩堆区
(VOID)LOS_MuxRelease(&space->regionMux);
return shrinkAddr;
}
- if ((UINTPTR)alignAddr >= space->mapBase) {
- VM_ERR("Process heap memory space is insufficient");
+ if ((UINTPTR)alignAddr >= space->mapBase) {//参数地址 大于映射区地址
+ VM_ERR("Process heap memory space is insufficient");//进程堆空间不足
ret = (VOID *)-ENOMEM;
goto REGION_ALLOC_FAILED;
}
- if (space->heapBase == space->heapNow) {
- region = LOS_RegionAlloc(space, space->heapBase, size,
- VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE |
+ if (space->heapBase == space->heapNow) {//往往是第一次调用本函数才会出现,因为初始化时 heapBase = heapNow
+ region = LOS_RegionAlloc(space, space->heapBase, size,//分配一个可读/可写/可使用的线性区,只需分配一次
+ VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE |//线性区的大小由range.size决定
VM_MAP_REGION_FLAG_FIXED | VM_MAP_REGION_FLAG_PERM_USER, 0);
if (region == NULL) {
ret = (VOID *)-ENOMEM;
VM_ERR("LOS_RegionAlloc failed");
goto REGION_ALLOC_FAILED;
}
- region->regionFlags |= VM_MAP_REGION_FLAG_HEAP;
- space->heap = region;
+ region->regionFlags |= VM_MAP_REGION_FLAG_HEAP;//贴上线性区类型为堆区的标签,注意一个线性区可以有多种标签
+ space->heap = region;//指定线性区为堆区
}
- space->heapNow = (VADDR_T)(UINTPTR)alignAddr;
- space->heap->range.size = size;
- ret = (VOID *)(UINTPTR)space->heapNow;
+ space->heapNow = (VADDR_T)(UINTPTR)alignAddr;//更新堆区顶部位置
+ space->heap->range.size = size; //更新堆区大小,经此操作线性区变大或缩小了
+ ret = (VOID *)(UINTPTR)space->heapNow;//返回堆顶
REGION_ALLOC_FAILED:
(VOID)LOS_MuxRelease(&space->regionMux);
return ret;
}
-
+/// 继承老线性区的标签
STATIC UINT32 OsInheritOldRegionName(UINT32 oldRegionFlags)
{
UINT32 vmFlags = 0;
- if (oldRegionFlags & VM_MAP_REGION_FLAG_HEAP) {
- vmFlags |= VM_MAP_REGION_FLAG_HEAP;
+ if (oldRegionFlags & VM_MAP_REGION_FLAG_HEAP) { //如果是从大堆区中申请的
+ vmFlags |= VM_MAP_REGION_FLAG_HEAP; //线性区则贴上堆区标签
} else if (oldRegionFlags & VM_MAP_REGION_FLAG_STACK) {
vmFlags |= VM_MAP_REGION_FLAG_STACK;
} else if (oldRegionFlags & VM_MAP_REGION_FLAG_TEXT) {
@@ -277,7 +333,7 @@ STATIC UINT32 OsInheritOldRegionName(UINT32 oldRegionFlags)
return vmFlags;
}
-
+///修改内存段的访问权限
INT32 LOS_DoMprotect(VADDR_T vaddr, size_t len, unsigned long prot)
{
LosVmSpace *space = OsCurrProcessGet()->vmSpace;
@@ -287,7 +343,7 @@ INT32 LOS_DoMprotect(VADDR_T vaddr, size_t len, unsigned long prot)
int ret;
(VOID)LOS_MuxAcquire(&space->regionMux);
- region = LOS_RegionFind(space, vaddr);
+ region = LOS_RegionFind(space, vaddr);//通过虚拟地址找到线性区
if (!IS_ALIGNED(vaddr, PAGE_SIZE) || (region == NULL) || (vaddr > vaddr + len)) {
ret = -EINVAL;
goto OUT_MPROTECT;
@@ -297,19 +353,18 @@ INT32 LOS_DoMprotect(VADDR_T vaddr, size_t len, unsigned long prot)
ret = -EINVAL;
goto OUT_MPROTECT;
}
-
+ //如果是堆区或VDSO区,说明区内容是不能修改的
if ((region->regionFlags & VM_MAP_REGION_FLAG_VDSO) || (region->regionFlags & VM_MAP_REGION_FLAG_HEAP)) {
ret = -EPERM;
goto OUT_MPROTECT;
}
-
+ //如果是共享文件,说明内容也不能修改
if (LOS_IsRegionTypeFile(region) && (region->regionFlags & VM_MAP_REGION_FLAG_SHARED)) {
if (!OsProtMprotectPermCheck(prot, region)) {
ret = -EACCES;
goto OUT_MPROTECT;
}
}
-
len = LOS_Align(len, PAGE_SIZE);
/* can't operation cross region */
if ((region->range.base + region->range.size) < (vaddr + len)) {
@@ -318,11 +373,11 @@ INT32 LOS_DoMprotect(VADDR_T vaddr, size_t len, unsigned long prot)
}
/* if only move some part of region, we need to split first */
- if (region->range.size > len) {
- OsVmRegionAdjust(space, vaddr, len);
+ if (region->range.size > len) {//如果只修改部分区域,我们需要先拆分区
+ OsVmRegionAdjust(space, vaddr, len);//调整下线性区范围
}
- vmFlags = OsCvtProtFlagsToRegionFlags(prot, 0);
+ vmFlags = OsCvtProtFlagsToRegionFlags(prot, 0);//转换FLAGS
vmFlags |= (region->regionFlags & VM_MAP_REGION_FLAG_SHARED) ? VM_MAP_REGION_FLAG_SHARED : 0;
vmFlags |= OsInheritOldRegionName(region->regionFlags);
region = LOS_RegionFind(space, vaddr);
@@ -332,7 +387,7 @@ INT32 LOS_DoMprotect(VADDR_T vaddr, size_t len, unsigned long prot)
}
region->regionFlags = vmFlags;
count = len >> PAGE_SHIFT;
- ret = LOS_ArchMmuChangeProt(&space->archMmu, vaddr, count, region->regionFlags);
+ ret = LOS_ArchMmuChangeProt(&space->archMmu, vaddr, count, region->regionFlags);//修改访问权限实体函数
if (ret) {
ret = -ENOMEM;
goto OUT_MPROTECT;
@@ -387,7 +442,7 @@ STATUS_T OsMremapCheck(VADDR_T addr, size_t oldLen, VADDR_T newAddr, size_t newL
}
}
- /* avoid new region overlapping with the old one */
+ /* avoid new region overlaping with the old one */
if (flags & MREMAP_FIXED) {
if (((region->range.base + region->range.size) > newAddr) &&
(region->range.base < (newAddr + newLen))) {
@@ -401,7 +456,7 @@ STATUS_T OsMremapCheck(VADDR_T addr, size_t oldLen, VADDR_T newAddr, size_t newL
return LOS_OK;
}
-
+///重新映射虚拟内存地址。
VADDR_T LOS_DoMremap(VADDR_T oldAddress, size_t oldSize, size_t newSize, int flags, VADDR_T newAddr)
{
LosVmMapRegion *regionOld = NULL;
@@ -496,7 +551,7 @@ OUT_MREMAP:
(VOID)LOS_MuxRelease(&space->regionMux);
return ret;
}
-
+///输出内存线性区
VOID LOS_DumpMemRegion(VADDR_T vaddr)
{
LosVmSpace *space = NULL;
@@ -506,12 +561,12 @@ VOID LOS_DumpMemRegion(VADDR_T vaddr)
return;
}
- if (LOS_IsRangeInSpace(space, ROUNDDOWN(vaddr, MB), MB) == FALSE) {
+ if (LOS_IsRangeInSpace(space, ROUNDDOWN(vaddr, MB), MB) == FALSE) {//是否在空间范围内
return;
}
- OsDumpPte(vaddr);
- OsDumpAspace(space);
+ OsDumpPte(vaddr);//dump L1 L2
+ OsDumpAspace(space);//dump 空间
}
#endif
diff --git a/src/kernel_liteos_a/kernel/base/vm/oom.c b/src/kernel_liteos_a/kernel/base/vm/oom.c
index 72996781..c8091250 100644
--- a/src/kernel_liteos_a/kernel/base/vm/oom.c
+++ b/src/kernel_liteos_a/kernel/base/vm/oom.c
@@ -47,8 +47,8 @@
#ifdef LOSCFG_KERNEL_VM
-LITE_OS_SEC_BSS OomCB *g_oomCB = NULL;
-static SPIN_LOCK_INIT(g_oomSpinLock);
+LITE_OS_SEC_BSS OomCB *g_oomCB = NULL; //全局内存溢出控制块
+static SPIN_LOCK_INIT(g_oomSpinLock);//内存溢出自旋锁
LITE_OS_SEC_TEXT_MINOR STATIC UINT32 OomScoreProcess(LosProcessCB *candidateProcess)
{
@@ -57,20 +57,20 @@ LITE_OS_SEC_TEXT_MINOR STATIC UINT32 OomScoreProcess(LosProcessCB *candidateProc
#ifndef LOSCFG_KERNEL_SMP
(VOID)LOS_MuxAcquire(&candidateProcess->vmSpace->regionMux);
#endif
- /* we only consider actual physical memory here. */
+ /* we only consider actual physical memory here. */ //只考虑实际的物理内存
OsUProcessPmUsage(candidateProcess->vmSpace, NULL, &actualPm);
#ifndef LOSCFG_KERNEL_SMP
(VOID)LOS_MuxRelease(&candidateProcess->vmSpace->regionMux);
#endif
return actualPm;
}
-
+///用于设置 g_oomCB->processVictimCB 回调函数
LITE_OS_SEC_TEXT_MINOR STATIC UINT32 OomKillProcess(UINTPTR param)
{
/* we will not kill process, and do nothing here */
return LOS_OK;
}
-
+///强制收缩内存
LITE_OS_SEC_TEXT_MINOR STATIC UINT32 OomForceShrinkMemory(VOID)
{
UINT32 i;
@@ -80,13 +80,14 @@ LITE_OS_SEC_TEXT_MINOR STATIC UINT32 OomForceShrinkMemory(VOID)
* TryShrinkMemory maybe reclaim 0 pages in the first time from active list
* to inactive list, and in the second time reclaim memory from inactive list.
*/
+ //TryShrinkMemory可能会在第一时间从活动列表中回收0页到非活动列表,并在第二次从非活动列表中回收内存。
for (i = 0; i < MAX_SHRINK_PAGECACHE_TRY; i++) {
reclaimMemPages += OsTryShrinkMemory(0);
}
return reclaimMemPages;
}
-
+///内存不足时回收页高速缓存
LITE_OS_SEC_TEXT_MINOR STATIC BOOL OomReclaimPageCache(VOID)
{
UINT32 totalPm = 0;
@@ -96,43 +97,44 @@ LITE_OS_SEC_TEXT_MINOR STATIC BOOL OomReclaimPageCache(VOID)
UINT32 i;
for (i = 0; i < MAX_SHRINK_PAGECACHE_TRY; i++) {
- OsVmPhysUsedInfoGet(&usedPm, &totalPm);
- isReclaimMemory = ((totalPm - usedPm) << PAGE_SHIFT) < g_oomCB->reclaimMemThreshold;
- if (isReclaimMemory) {
+ OsVmPhysUsedInfoGet(&usedPm, &totalPm);//获取总的和已经使用的物理内存数量
+ isReclaimMemory = ((totalPm - usedPm) << PAGE_SHIFT) < g_oomCB->reclaimMemThreshold;//检查是否过了回收门槛
+ if (isReclaimMemory) {//要回收了
/*
* we do force memory reclaim from page cache here.
* if we get memory, we will reclaim pagecache memory again.
* if there is no memory to reclaim, we will return.
*/
- reclaimMemPages = OomForceShrinkMemory();
- if (reclaimMemPages > 0) {
+ //在这里强制从页缓存中回收内存,
+ reclaimMemPages = OomForceShrinkMemory();//强制回收内存
+ if (reclaimMemPages > 0) {//如果得到内存,将再次回收pagecache内存
continue;
}
}
- break;
+ break;//实在没有内存可回收
}
- return isReclaimMemory;
+ return isReclaimMemory;//返回回收的数量
}
/*
- * check is low memory or not, if low memory, try to kill process.
- * return is kill process or not.
+ * check is low memory or not, if low memory, try to kill process.
+ * return is kill process or not.
*/
-LITE_OS_SEC_TEXT_MINOR BOOL OomCheckProcess(VOID)
+LITE_OS_SEC_TEXT_MINOR BOOL OomCheckProcess(VOID)//检查内存是否不足,如果内存不足,请尝试终止进程,返回是否kill进程
{
UINT32 totalPm;
UINT32 usedPm;
BOOL isLowMemory = FALSE;
/*
- * spinlock the current core schedule, make sure oom process atomic
- * spinlock other place entering OomCheckProcess, make sure oom process mutex
+ * spinlock the current core schedule, make sure oom process atomic //旋转锁定当前核心计划,确保oom进程原子化
+ * spinlock other place entering OomCheckProcess, make sure oom process mutex //旋转锁定其他进入OomCheckProcess的地方,确保oom进程互斥
*/
LOS_SpinLock(&g_oomSpinLock);
/* first we will check if we need to reclaim pagecache memory */
- if (OomReclaimPageCache() == FALSE) {
+ if (OomReclaimPageCache() == FALSE) {//
LOS_SpinUnlock(&g_oomSpinLock);
goto NO_VICTIM_PROCESS;
}
@@ -140,9 +142,7 @@ LITE_OS_SEC_TEXT_MINOR BOOL OomCheckProcess(VOID)
/* get free bytes */
OsVmPhysUsedInfoGet(&usedPm, &totalPm);
isLowMemory = ((totalPm - usedPm) << PAGE_SHIFT) < g_oomCB->lowMemThreshold;
-
LOS_SpinUnlock(&g_oomSpinLock);
-
if (isLowMemory) {
PRINTK("[oom] OS is in low memory state\n"
"total physical memory: %#x(byte), used: %#x(byte),"
@@ -155,14 +155,14 @@ NO_VICTIM_PROCESS:
return isLowMemory;
}
-#ifdef LOSCFG_ENABLE_OOM_LOOP_TASK
-STATIC VOID OomWriteEvent(VOID)
+#ifdef LOSCFG_ENABLE_OOM_LOOP_TASK //内存溢出监测任务开关
+STATIC VOID OomWriteEvent(VOID) // OomTaskInit中创建的定时器回调
{
- OsWriteResourceEvent(OS_RESOURCE_EVENT_OOM);
+ OsWriteResourceEvent(OS_RESOURCE_EVENT_OOM);//广播内存溢出事件
}
#endif
-
-LITE_OS_SEC_TEXT_MINOR VOID OomInfodump(VOID)
+//打印内存不足时的信息
+LITE_OS_SEC_TEXT_MINOR VOID OomInfodump(VOID) //打印内存溢出信息
{
PRINTK("[oom] oom loop task status: %s\n"
" oom low memory threshold: %#x(byte)\n"
@@ -172,7 +172,7 @@ LITE_OS_SEC_TEXT_MINOR VOID OomInfodump(VOID)
g_oomCB->lowMemThreshold, g_oomCB->reclaimMemThreshold,
g_oomCB->checkInterval);
}
-
+///设置低内存门槛
LITE_OS_SEC_TEXT_MINOR VOID OomSetLowMemThreashold(UINT32 lowMemThreshold)
{
if ((lowMemThreshold > OOM_DEFAULT_LOW_MEM_THRESHOLD_MAX)) {
@@ -186,7 +186,7 @@ LITE_OS_SEC_TEXT_MINOR VOID OomSetLowMemThreashold(UINT32 lowMemThreshold)
g_oomCB->lowMemThreshold);
}
}
-
+///设置回收内存的门槛
LITE_OS_SEC_TEXT_MINOR VOID OomSetReclaimMemThreashold(UINT32 reclaimMemThreshold)
{
UINT32 totalPm = 0;
@@ -204,7 +204,7 @@ LITE_OS_SEC_TEXT_MINOR VOID OomSetReclaimMemThreashold(UINT32 reclaimMemThreshol
g_oomCB->reclaimMemThreshold);
}
}
-
+///设置监控间隔
LITE_OS_SEC_TEXT_MINOR VOID OomSetCheckInterval(UINT32 checkInterval)
{
if ((checkInterval >= OOM_CHECK_MIN) && (checkInterval <= OOM_CHECK_MAX)) {
@@ -216,7 +216,7 @@ LITE_OS_SEC_TEXT_MINOR VOID OomSetCheckInterval(UINT32 checkInterval)
g_oomCB->checkInterval, OOM_CHECK_MIN, OOM_CHECK_MAX);
}
}
-
+///内存不足监控任务初始化, OOM 通过开一个软件定时器来检查内存的使用情况
LITE_OS_SEC_TEXT_MINOR UINT32 OomTaskInit(VOID)
{
g_oomCB = (OomCB *)LOS_MemAlloc(m_aucSysMem0, sizeof(OomCB));
@@ -225,28 +225,28 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OomTaskInit(VOID)
return LOS_NOK;
}
- g_oomCB->lowMemThreshold = OOM_DEFAULT_LOW_MEM_THRESHOLD;
- g_oomCB->reclaimMemThreshold = OOM_DEFAULT_RECLAIM_MEM_THRESHOLD;
- g_oomCB->checkInterval = OOM_DEFAULT_CHECK_INTERVAL;
- g_oomCB->processVictimCB = (OomFn)OomKillProcess;
- g_oomCB->scoreCB = (OomFn)OomScoreProcess;
- g_oomCB->enabled = FALSE;
+ g_oomCB->lowMemThreshold = OOM_DEFAULT_LOW_MEM_THRESHOLD; //运行任务的门槛
+ g_oomCB->reclaimMemThreshold = OOM_DEFAULT_RECLAIM_MEM_THRESHOLD; //回收内存的门槛
+ g_oomCB->checkInterval = OOM_DEFAULT_CHECK_INTERVAL; //检测时间间隔 1S
+ g_oomCB->processVictimCB = (OomFn)OomKillProcess; //出问题时对进程的处理函数
+ g_oomCB->scoreCB = (OomFn)OomScoreProcess; //统计进程占用的物理内存
+ g_oomCB->enabled = FALSE; //是否启用监控
-#ifdef LOSCFG_ENABLE_OOM_LOOP_TASK
+#ifdef LOSCFG_ENABLE_OOM_LOOP_TASK //内存溢出检测开关
g_oomCB->enabled = TRUE;
UINT32 ret = LOS_SwtmrCreate(g_oomCB->checkInterval, LOS_SWTMR_MODE_PERIOD, (SWTMR_PROC_FUNC)OomWriteEvent,
- &g_oomCB->swtmrID, (UINTPTR)g_oomCB);
+ &g_oomCB->swtmrID, (UINTPTR)g_oomCB);//创建检测定时器
if (ret != LOS_OK) {
return ret;
}
- return LOS_SwtmrStart(g_oomCB->swtmrID);
+ return LOS_SwtmrStart(g_oomCB->swtmrID);//启动定时器
#else
return LOS_OK;
#endif
}
-LOS_MODULE_INIT(OomTaskInit, LOS_INIT_LEVEL_KMOD_TASK);
+LOS_MODULE_INIT(OomTaskInit, LOS_INIT_LEVEL_KMOD_TASK);//初始化内存监控模块
#endif
diff --git a/src/kernel_liteos_a/kernel/base/vm/shm.c b/src/kernel_liteos_a/kernel/base/vm/shm.c
index 637c6167..9c4ad450 100644
--- a/src/kernel_liteos_a/kernel/base/vm/shm.c
+++ b/src/kernel_liteos_a/kernel/base/vm/shm.c
@@ -1,3 +1,24 @@
+/*!
+ * @file shm.c
+ * @brief
+ * @link
+ @verbatim
+ 什么是共享内存
+ 顾名思义,共享内存就是允许两个不相关的进程访问同一个物理内存。共享内存是在两个正在运行的进程之间
+ 共享和传递数据的一种非常有效的方式。不同进程之间共享的内存通常安排为同一段物理内存。进程可以将同
+ 一段共享内存连接到它们自己的地址空间中,所有进程都可以访问共享内存中的地址,就好像它们是由用C语言
+ 函数malloc()分配的内存一样。而如果某个进程向共享内存写入数据,所做的改动将立即影响到可以访问同一段
+ 共享内存的任何其他进程。
+
+ 特别提醒:共享内存并未提供同步机制,也就是说,在第一个进程结束对共享内存的写操作之前,并无自动机制
+ 可以阻止第二个进程开始对它进行读取。所以我们通常需要用其他的机制来同步对共享内存的访问
+
+ 共享线性区可以由任意的进程创建,每个使用共享线性区都必须经过映射.
+ @endverbatim
+ * @version
+ * @author weharmonyos.com | 鸿蒙研究站 | 每天死磕一点点
+ * @date 2021-12-24
+ */
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved.
@@ -55,9 +76,9 @@
#ifdef LOSCFG_KERNEL_SHM
-#define SHM_SEG_FREE 0x2000
-#define SHM_SEG_USED 0x4000
-#define SHM_SEG_REMOVE 0x8000
+#define SHM_SEG_FREE 0x2000 //空闲未使用
+#define SHM_SEG_USED 0x4000 //已使用
+#define SHM_SEG_REMOVE 0x8000 //删除
#ifndef SHM_M
#define SHM_M 010000
@@ -66,21 +87,17 @@
#ifndef SHM_X
#define SHM_X 0100
#endif
-
#ifndef ACCESSPERMS
-#define ACCESSPERMS (S_IRWXU | S_IRWXG | S_IRWXO)
-#endif
+#define ACCESSPERMS (S_IRWXU | S_IRWXG | S_IRWXO)//文件权限值意思就是 用户,用户组,其他可读可写.
+#endif //代表含义U:user G:group O:other
#define SHM_S_IRUGO (S_IRUSR | S_IRGRP | S_IROTH)
#define SHM_S_IWUGO (S_IWUSR | S_IWGRP | S_IWOTH)
#define SHM_S_IXUGO (S_IXUSR | S_IXGRP | S_IXOTH)
-
#define SHM_GROUPE_TO_USER 3
#define SHM_OTHER_TO_USER 6
-
#ifndef LOSCFG_IPC_CONTAINER
STATIC LosMux g_sysvShmMux;
-
/* private data */
STATIC struct shminfo g_shmInfo;
STATIC struct shmIDSource *g_shmSegs = NULL;
@@ -96,6 +113,60 @@ STATIC UINT32 g_shmUsedPageCount;
#define SYSV_SHM_LOCK() (VOID)LOS_MuxLock(&IPC_SHM_SYS_VSHM_MUTEX, LOS_WAIT_FOREVER)
#define SYSV_SHM_UNLOCK() (VOID)LOS_MuxUnlock(&IPC_SHM_SYS_VSHM_MUTEX)
+#if 0 // @note_#if0
+
+//内核为每一个IPC对象保存一个ipc_perm结构体,该结构说明了IPC对象的权限和所有者
+struct ipc_perm {
+ key_t __ipc_perm_key; //调用shmget()时给出的关键字
+ uid_t uid; //共享内存所有者的有效用户ID
+ gid_t gid; //共享内存所有者所属组的有效组ID
+ uid_t cuid; //共享内存创建 者的有效用户ID
+ gid_t cgid; //共享内存创建者所属组的有效组ID
+ mode_t mode; //权限 + SHM_DEST / SHM_LOCKED /SHM_HUGETLB 标志位
+ int __ipc_perm_seq; //序列号
+ long __pad1; //保留扩展用
+ long __pad2;
+};
+//每个共享内存段在内核中维护着一个内部结构shmid_ds
+struct shmid_ds {
+ struct ipc_perm shm_perm;///< 操作许可,里面包含共享内存的用户ID、组ID等信息
+ size_t shm_segsz; ///< 共享内存段的大小,单位为字节
+ time_t shm_atime; ///< 最后一个进程访问共享内存的时间
+ time_t shm_dtime; ///< 最后一个进程离开共享内存的时间
+ time_t shm_ctime; ///< 创建时间
+ pid_t shm_cpid; ///< 创建共享内存的进程ID
+ pid_t shm_lpid; ///< 最后操作共享内存的进程ID
+ unsigned long shm_nattch; ///< 当前使用该共享内存段的进程数量
+ unsigned long __pad1; //保留扩展用
+ unsigned long __pad2;
+};
+// 共享内存模块设置信息
+struct shminfo {
+ unsigned long shmmax, shmmin, shmmni, shmseg, shmall, __unused[4];
+};
+struct shmIDSource {//共享内存描述符
+ struct shmid_ds ds; //是内核为每一个共享内存段维护的数据结构
+ UINT32 status; //状态 SHM_SEG_FREE ...
+ LOS_DL_LIST node; //节点,挂VmPage
+#ifdef LOSCFG_SHELL
+ CHAR ownerName[OS_PCB_NAME_LEN];
+#endif
+};
+
+/* private data */
+STATIC struct shminfo g_shmInfo = { //描述共享内存范围的全局变量
+ .shmmax = SHM_MAX,//共享内存单个上限 4096页 即 16M
+ .shmmin = SHM_MIN,//共享内存单个下限 1页 即:4K
+ .shmmni = SHM_MNI,//共享内存总数 默认192
+ .shmseg = SHM_SEG,//每个用户进程可以使用的最多的共享内存段的数目 128
+ .shmall = SHM_ALL,//系统范围内共享内存的总页数, 4096页
+};
+
+STATIC struct shmIDSource *g_shmSegs = NULL;
+STATIC UINT32 g_shmUsedPageCount;
+#endif
+
+//共享内存初始化
struct shmIDSource *OsShmCBInit(LosMux *sysvShmMux, struct shminfo *shmInfo, UINT32 *shmUsedPageCount)
{
UINT32 ret;
@@ -104,7 +175,6 @@ struct shmIDSource *OsShmCBInit(LosMux *sysvShmMux, struct shminfo *shmInfo, UIN
if ((sysvShmMux == NULL) || (shmInfo == NULL) || (shmUsedPageCount == NULL)) {
return NULL;
}
-
ret = LOS_MuxInit(sysvShmMux, NULL);
if (ret != LOS_OK) {
goto ERROR;
@@ -115,7 +185,6 @@ struct shmIDSource *OsShmCBInit(LosMux *sysvShmMux, struct shminfo *shmInfo, UIN
shmInfo->shmmni = SHM_MNI;
shmInfo->shmseg = SHM_SEG;
shmInfo->shmall = SHM_ALL;
-
struct shmIDSource *shmSegs = LOS_MemAlloc((VOID *)OS_SYS_MEM_ADDR, sizeof(struct shmIDSource) * shmInfo->shmmni);
if (shmSegs == NULL) {
(VOID)LOS_MuxDestroy(sysvShmMux);
@@ -125,9 +194,9 @@ struct shmIDSource *OsShmCBInit(LosMux *sysvShmMux, struct shminfo *shmInfo, UIN
0, (sizeof(struct shmIDSource) * shmInfo->shmmni));
for (i = 0; i < shmInfo->shmmni; i++) {
- shmSegs[i].status = SHM_SEG_FREE;
- shmSegs[i].ds.shm_perm.seq = i + 1;
- LOS_ListInit(&shmSegs[i].node);
+ shmSegs[i].status = SHM_SEG_FREE;//节点初始状态为空闲
+ shmSegs[i].ds.shm_perm.seq = i + 1;//struct ipc_perm shm_perm;系统为每一个IPC对象保存一个ipc_perm结构体,结构说明了IPC对象的权限和所有者
+ LOS_ListInit(&shmSegs[i].node);//初始化节点
}
*shmUsedPageCount = 0;
@@ -137,7 +206,6 @@ ERROR:
VM_ERR("ShmInit fail\n");
return NULL;
}
-
UINT32 ShmInit(VOID)
{
#ifndef LOSCFG_IPC_CONTAINER
@@ -149,8 +217,9 @@ UINT32 ShmInit(VOID)
return LOS_OK;
}
-LOS_MODULE_INIT(ShmInit, LOS_INIT_LEVEL_VM_COMPLETE);
+LOS_MODULE_INIT(ShmInit, LOS_INIT_LEVEL_VM_COMPLETE);//共享内存模块初始化
+//共享内存反初始化
UINT32 ShmDeinit(VOID)
{
UINT32 ret;
@@ -165,7 +234,7 @@ UINT32 ShmDeinit(VOID)
return 0;
}
-
+///给共享段中所有物理页框贴上共享标签
STATIC inline VOID ShmSetSharedFlag(struct shmIDSource *seg)
{
LosVmPage *page = NULL;
@@ -174,7 +243,7 @@ STATIC inline VOID ShmSetSharedFlag(struct shmIDSource *seg)
OsSetPageShared(page);
}
}
-
+///给共享段中所有物理页框撕掉共享标签
STATIC inline VOID ShmClearSharedFlag(struct shmIDSource *seg)
{
LosVmPage *page = NULL;
@@ -183,7 +252,7 @@ STATIC inline VOID ShmClearSharedFlag(struct shmIDSource *seg)
OsCleanPageShared(page);
}
}
-
+///seg下所有共享页引用减少
STATIC VOID ShmPagesRefDec(struct shmIDSource *seg)
{
LosVmPage *page = NULL;
@@ -193,6 +262,15 @@ STATIC VOID ShmPagesRefDec(struct shmIDSource *seg)
}
}
+/**
+ * @brief 为共享段分配物理内存
+ 例如:参数size = 4097, LOS_Align(size, PAGE_SIZE) = 8192
+ 分配页数 size >> PAGE_SHIFT = 2页
+ * @param key
+ * @param size
+ * @param shmflg
+ * @return STATIC
+ */
STATIC INT32 ShmAllocSegCheck(key_t key, size_t *size, INT32 *segNum)
{
INT32 i;
@@ -201,7 +279,7 @@ STATIC INT32 ShmAllocSegCheck(key_t key, size_t *size, INT32 *segNum)
return -EINVAL;
}
- *size = LOS_Align(*size, PAGE_SIZE);
+ *size = LOS_Align(*size, PAGE_SIZE);//必须对齐
if ((IPC_SHM_USED_PAGE_COUNT + (*size >> PAGE_SHIFT)) > IPC_SHM_INFO.shmall) {
return -ENOMEM;
}
@@ -211,11 +289,10 @@ STATIC INT32 ShmAllocSegCheck(key_t key, size_t *size, INT32 *segNum)
return -ENOMEM;
}
#endif
-
- for (i = 0; i < IPC_SHM_INFO.shmmni; i++) {
- if (IPC_SHM_SEGS[i].status & SHM_SEG_FREE) {
- IPC_SHM_SEGS[i].status &= ~SHM_SEG_FREE;
- *segNum = i;
+ for (i = 0; i < IPC_SHM_INFO.shmmni; i++) {//试图找到一个空闲段与参数key绑定
+ if (IPC_SHM_SEGS[i].status & SHM_SEG_FREE) {//找到空闲段
+ IPC_SHM_SEGS[i].status &= ~SHM_SEG_FREE;//变成非空闲状态
+ *segNum = i;//标号
break;
}
}
@@ -236,49 +313,47 @@ STATIC INT32 ShmAllocSeg(key_t key, size_t size, INT32 shmflg)
if (ret < 0) {
return ret;
}
-
seg = &IPC_SHM_SEGS[segNum];
- count = LOS_PhysPagesAlloc(size >> PAGE_SHIFT, &seg->node);
- if (count != (size >> PAGE_SHIFT)) {
- (VOID)LOS_PhysPagesFree(&seg->node);
- seg->status = SHM_SEG_FREE;
+ count = LOS_PhysPagesAlloc(size >> PAGE_SHIFT, &seg->node);//分配共享页面,函数内部把node都挂好了.
+ if (count != (size >> PAGE_SHIFT)) {//当未分配到足够的内存时,处理方式是:不稀罕给那么点,舍弃!
+ (VOID)LOS_PhysPagesFree(&seg->node);//释放节点上的物理页框
+ seg->status = SHM_SEG_FREE;//共享段变回空闲状态
#ifdef LOSCFG_KERNEL_IPC_PLIMIT
OsIPCLimitShmFree(size);
#endif
return -ENOMEM;
}
-
- ShmSetSharedFlag(seg);
+ ShmSetSharedFlag(seg);//将node的每个页面设置为共享页
IPC_SHM_USED_PAGE_COUNT += size >> PAGE_SHIFT;
- seg->status |= SHM_SEG_USED;
+ seg->status |= SHM_SEG_USED; //共享段贴上已在使用的标签
seg->ds.shm_perm.mode = (UINT32)shmflg & ACCESSPERMS;
- seg->ds.shm_perm.key = key;
- seg->ds.shm_segsz = size;
- seg->ds.shm_perm.cuid = LOS_GetUserID();
- seg->ds.shm_perm.uid = LOS_GetUserID();
- seg->ds.shm_perm.cgid = LOS_GetGroupID();
- seg->ds.shm_perm.gid = LOS_GetGroupID();
- seg->ds.shm_lpid = 0;
- seg->ds.shm_nattch = 0;
- seg->ds.shm_cpid = LOS_GetCurrProcessID();
- seg->ds.shm_atime = 0;
- seg->ds.shm_dtime = 0;
- seg->ds.shm_ctime = time(NULL);
+ seg->ds.shm_perm.key = key;//保存参数key,如此 key 和 共享ID绑定在一块
+ seg->ds.shm_segsz = size; //共享段的大小
+ seg->ds.shm_perm.cuid = LOS_GetUserID(); //设置用户ID
+ seg->ds.shm_perm.uid = LOS_GetUserID(); //设置用户ID
+ seg->ds.shm_perm.cgid = LOS_GetGroupID(); //设置组ID
+ seg->ds.shm_perm.gid = LOS_GetGroupID(); //设置组ID
+ seg->ds.shm_lpid = 0; //最后一个操作的进程
+ seg->ds.shm_nattch = 0; //绑定进程的数量
+ seg->ds.shm_cpid = LOS_GetCurrProcessID(); //获取进程ID
+ seg->ds.shm_atime = 0; //访问时间
+ seg->ds.shm_dtime = 0; //detach 分离时间 共享内存使用完之后,需要将它从进程地址空间中分离出来;将共享内存分离并不是删除它,只是使该共享内存对当前的进程不再可用
+ seg->ds.shm_ctime = time(NULL);//创建时间
#ifdef LOSCFG_SHELL
(VOID)memcpy_s(seg->ownerName, OS_PCB_NAME_LEN, OsCurrProcessGet()->processName, OS_PCB_NAME_LEN);
#endif
return segNum;
}
-
+///释放seg->node 所占物理页框,seg本身重置
STATIC INLINE VOID ShmFreeSeg(struct shmIDSource *seg, UINT32 *shmUsedPageCount)
{
UINT32 count;
- ShmClearSharedFlag(seg);
- count = LOS_PhysPagesFree(&seg->node);
- if (count != (seg->ds.shm_segsz >> PAGE_SHIFT)) {
+ ShmClearSharedFlag(seg);//先撕掉 seg->node 中vmpage的共享标签
+ count = LOS_PhysPagesFree(&seg->node);//再挨个删除物理页框
+ if (count != (seg->ds.shm_segsz >> PAGE_SHIFT)) {//异常,必须要一样
VM_ERR("free physical pages failed, count = %d, size = %d", count, seg->ds.shm_segsz >> PAGE_SHIFT);
return;
}
@@ -288,31 +363,31 @@ STATIC INLINE VOID ShmFreeSeg(struct shmIDSource *seg, UINT32 *shmUsedPageCount)
if (shmUsedPageCount != NULL) {
(*shmUsedPageCount) -= seg->ds.shm_segsz >> PAGE_SHIFT;
}
- seg->status = SHM_SEG_FREE;
- LOS_ListInit(&seg->node);
+ seg->status = SHM_SEG_FREE;//seg恢复自由之身
+ LOS_ListInit(&seg->node);//重置node
}
-
+///通过key查找 shmId
STATIC INT32 ShmFindSegByKey(key_t key)
{
INT32 i;
struct shmIDSource *seg = NULL;
- for (i = 0; i < IPC_SHM_INFO.shmmni; i++) {
+ for (i = 0; i < IPC_SHM_INFO.shmmni; i++) {//遍历共享段池,找到与key绑定的共享ID
seg = &IPC_SHM_SEGS[i];
if ((seg->status & SHM_SEG_USED) &&
- (seg->ds.shm_perm.key == key)) {
+ (seg->ds.shm_perm.key == key)) {//满足两个条件,找到后返回
return i;
}
}
return -1;
}
-
+///共享内存段有效性检查
STATIC INT32 ShmSegValidCheck(INT32 segNum, size_t size, INT32 shmFlg)
{
- struct shmIDSource *seg = &IPC_SHM_SEGS[segNum];
+ struct shmIDSource *seg = &IPC_SHM_SEGS[segNum];//拿到shmID
- if (size > seg->ds.shm_segsz) {
+ if (size > seg->ds.shm_segsz) {//段长
return -EINVAL;
}
@@ -323,7 +398,7 @@ STATIC INT32 ShmSegValidCheck(INT32 segNum, size_t size, INT32 shmFlg)
return segNum;
}
-
+///通过ID找到共享内存资源
STATIC struct shmIDSource *ShmFindSeg(int shmid)
{
struct shmIDSource *seg = NULL;
@@ -341,7 +416,7 @@ STATIC struct shmIDSource *ShmFindSeg(int shmid)
return seg;
}
-
+///共享内存映射
STATIC VOID ShmVmmMapping(LosVmSpace *space, LOS_DL_LIST *pageList, VADDR_T vaddr, UINT32 regionFlags)
{
LosVmPage *vmPage = NULL;
@@ -349,64 +424,64 @@ STATIC VOID ShmVmmMapping(LosVmSpace *space, LOS_DL_LIST *pageList, VADDR_T vadd
PADDR_T pa;
STATUS_T ret;
- LOS_DL_LIST_FOR_EACH_ENTRY(vmPage, pageList, LosVmPage, node) {
- pa = VM_PAGE_TO_PHYS(vmPage);
- LOS_AtomicInc(&vmPage->refCounts);
- ret = LOS_ArchMmuMap(&space->archMmu, va, pa, 1, regionFlags);
+ LOS_DL_LIST_FOR_EACH_ENTRY(vmPage, pageList, LosVmPage, node) {//遍历一页一页映射
+ pa = VM_PAGE_TO_PHYS(vmPage);//拿到物理地址
+ LOS_AtomicInc(&vmPage->refCounts);//自增
+ ret = LOS_ArchMmuMap(&space->archMmu, va, pa, 1, regionFlags);//虚实映射
if (ret != 1) {
VM_ERR("LOS_ArchMmuMap failed, ret = %d", ret);
}
va += PAGE_SIZE;
}
}
-
+///fork 一个共享线性区
VOID OsShmFork(LosVmSpace *space, LosVmMapRegion *oldRegion, LosVmMapRegion *newRegion)
{
struct shmIDSource *seg = NULL;
SYSV_SHM_LOCK();
- seg = ShmFindSeg(oldRegion->shmid);
+ seg = ShmFindSeg(oldRegion->shmid);//通过老区ID获取对应的共享资源ID结构体
if (seg == NULL) {
SYSV_SHM_UNLOCK();
VM_ERR("shm fork failed!");
return;
}
- newRegion->shmid = oldRegion->shmid;
- newRegion->forkFlags = oldRegion->forkFlags;
- ShmVmmMapping(space, &seg->node, newRegion->range.base, newRegion->regionFlags);
- seg->ds.shm_nattch++;
+ newRegion->shmid = oldRegion->shmid;//一样的共享区ID
+ newRegion->forkFlags = oldRegion->forkFlags;//forkFlags也一样了
+ ShmVmmMapping(space, &seg->node, newRegion->range.base, newRegion->regionFlags);//新线性区与共享内存进行映射
+ seg->ds.shm_nattch++;//附在共享线性区上的进程数++
SYSV_SHM_UNLOCK();
}
-
+///释放共享线性区
VOID OsShmRegionFree(LosVmSpace *space, LosVmMapRegion *region)
{
struct shmIDSource *seg = NULL;
SYSV_SHM_LOCK();
- seg = ShmFindSeg(region->shmid);
+ seg = ShmFindSeg(region->shmid);//通过线性区ID获取对应的共享资源ID结构体
if (seg == NULL) {
SYSV_SHM_UNLOCK();
return;
}
- LOS_ArchMmuUnmap(&space->archMmu, region->range.base, region->range.size >> PAGE_SHIFT);
- ShmPagesRefDec(seg);
- seg->ds.shm_nattch--;
+ LOS_ArchMmuUnmap(&space->archMmu, region->range.base, region->range.size >> PAGE_SHIFT);//解除线性区的映射
+ ShmPagesRefDec(seg);//ref --
+ seg->ds.shm_nattch--;//附在共享线性区上的进程数--
if (seg->ds.shm_nattch <= 0 && (seg->status & SHM_SEG_REMOVE)) {
- ShmFreeSeg(seg, &IPC_SHM_USED_PAGE_COUNT);
+ ShmFreeSeg(seg, &IPC_SHM_USED_PAGE_COUNT);//就释放掉物理内存!注意是:物理内存
} else {
seg->ds.shm_dtime = time(NULL);
seg->ds.shm_lpid = LOS_GetCurrProcessID(); /* may not be the space's PID. */
}
SYSV_SHM_UNLOCK();
}
-
+///是否为共享线性区,是否有标签?
BOOL OsIsShmRegion(LosVmMapRegion *region)
{
return (region->regionFlags & VM_MAP_REGION_FLAG_SHM) ? TRUE : FALSE;
}
-
+///获取共享内存池中已被使用的段数量
STATIC INT32 ShmSegUsedCount(VOID)
{
INT32 i;
@@ -415,16 +490,16 @@ STATIC INT32 ShmSegUsedCount(VOID)
for (i = 0; i < IPC_SHM_INFO.shmmni; i++) {
seg = &IPC_SHM_SEGS[i];
- if (seg->status & SHM_SEG_USED) {
+ if (seg->status & SHM_SEG_USED) {//找到一个
count++;
}
}
return count;
}
-
+///对共享内存段权限检查
STATIC INT32 ShmPermCheck(struct shmIDSource *seg, mode_t mode)
{
- INT32 uid = LOS_GetUserID();
+ INT32 uid = LOS_GetUserID();//当前进程的用户ID
UINT32 tmpMode = 0;
mode_t privMode = seg->ds.shm_perm.mode;
mode_t accMode;
@@ -466,6 +541,22 @@ STATIC INT32 ShmPermCheck(struct shmIDSource *seg, mode_t mode)
}
}
+/*!
+ * @brief ShmGet
+ * 得到一个共享内存标识符或创建一个共享内存对象
+ * @param key 建立新共享内存对象 标识符是IPC对象的内部名。为使多个合作进程能够在同一IPC对象上汇聚,需要提供一个外部命名方案。
+ 为此,每个IPC对象都与一个键(key)相关联,这个键作为该对象的外部名,无论何时创建IPC结构(通过msgget、semget、shmget创建),
+ 都应给IPC指定一个键, key_t由ftok创建,ftok当然在本工程里找不到,所以要写这么多.
+ * @param shmflg IPC_CREAT IPC_EXCL
+ IPC_CREAT: 在创建新的IPC时,如果key参数是IPC_PRIVATE或者和当前某种类型的IPC结构无关,则需要指明flag参数的IPC_CREAT标志位,
+ 则用来创建一个新的IPC结构。(如果IPC结构已存在,并且指定了IPC_CREAT,则IPC_CREAT什么都不做,函数也不出错)
+ IPC_EXCL: 此参数一般与IPC_CREAT配合使用来创建一个新的IPC结构。如果创建的IPC结构已存在函数就出错返回,
+ 返回EEXIST(这与open函数指定O_CREAT和O_EXCL标志原理相同)
+ * @param size 新建的共享内存大小,以字节为单位
+ * @return
+ *
+ * @see
+ */
INT32 ShmGet(key_t key, size_t size, INT32 shmflg)
{
INT32 ret;
@@ -476,13 +567,13 @@ INT32 ShmGet(key_t key, size_t size, INT32 shmflg)
if (key == IPC_PRIVATE) {
ret = ShmAllocSeg(key, size, shmflg);
} else {
- ret = ShmFindSegByKey(key);
+ ret = ShmFindSegByKey(key);//通过key查找资源ID
if (ret < 0) {
- if (((UINT32)shmflg & IPC_CREAT) == 0) {
+ if (((UINT32)shmflg & IPC_CREAT) == 0) {//
ret = -ENOENT;
goto ERROR;
} else {
- ret = ShmAllocSeg(key, size, shmflg);
+ ret = ShmAllocSeg(key, size, shmflg);//分配一个共享内存
}
} else {
shmid = ret;
@@ -491,7 +582,7 @@ INT32 ShmGet(key_t key, size_t size, INT32 shmflg)
ret = -EEXIST;
goto ERROR;
}
- ret = ShmPermCheck(ShmFindSeg(shmid), (UINT32)shmflg & ACCESSPERMS);
+ ret = ShmPermCheck(ShmFindSeg(shmid), (UINT32)shmflg & ACCESSPERMS);//对共享内存权限检查
if (ret != 0) {
ret = -ret;
goto ERROR;
@@ -526,13 +617,13 @@ INT32 ShmatParamCheck(const VOID *shmaddr, INT32 shmflg)
return 0;
}
-
+///分配一个共享线性区并映射好
LosVmMapRegion *ShmatVmmAlloc(struct shmIDSource *seg, const VOID *shmaddr,
INT32 shmflg, UINT32 prot)
{
LosVmSpace *space = OsCurrProcessGet()->vmSpace;
LosVmMapRegion *region = NULL;
- UINT32 flags = MAP_ANONYMOUS | MAP_SHARED;
+ UINT32 flags = MAP_ANONYMOUS | MAP_SHARED;//本线性区为共享+匿名标签
UINT32 mapFlags = flags | MAP_FIXED;
VADDR_T vaddr;
UINT32 regionFlags;
@@ -543,29 +634,29 @@ LosVmMapRegion *ShmatVmmAlloc(struct shmIDSource *seg, const VOID *shmaddr,
}
regionFlags = OsCvtProtFlagsToRegionFlags(prot, flags);
(VOID)LOS_MuxAcquire(&space->regionMux);
- if (shmaddr == NULL) {
- region = LOS_RegionAlloc(space, 0, seg->ds.shm_segsz, regionFlags, 0);
- } else {
+ if (shmaddr == NULL) {//未指定了共享内存连接到当前进程中的地址位置
+ region = LOS_RegionAlloc(space, 0, seg->ds.shm_segsz, regionFlags, 0);//分配线性区
+ } else {//指定时,就需要先找地址所在的线性区
if ((UINT32)shmflg & SHM_RND) {
vaddr = ROUNDDOWN((VADDR_T)(UINTPTR)shmaddr, SHMLBA);
} else {
vaddr = (VADDR_T)(UINTPTR)shmaddr;
- }
+ }//找到线性区并重新映射,当指定地址时需贴上重新映射的标签
if (!((UINT32)shmflg & SHM_REMAP) && (LOS_RegionFind(space, vaddr) ||
LOS_RegionFind(space, vaddr + seg->ds.shm_segsz - 1) ||
LOS_RegionRangeFind(space, vaddr, seg->ds.shm_segsz - 1))) {
ret = EINVAL;
goto ERROR;
}
- vaddr = (VADDR_T)LOS_MMap(vaddr, seg->ds.shm_segsz, prot, mapFlags, -1, 0);
- region = LOS_RegionFind(space, vaddr);
+ vaddr = (VADDR_T)LOS_MMap(vaddr, seg->ds.shm_segsz, prot, mapFlags, -1, 0);//做好映射
+ region = LOS_RegionFind(space, vaddr);//重新查找线性区,用于返回.
}
if (region == NULL) {
ret = ENOMEM;
goto ERROR;
}
- ShmVmmMapping(space, &seg->node, region->range.base, regionFlags);
+ ShmVmmMapping(space, &seg->node, region->range.base, regionFlags);//共享内存映射
(VOID)LOS_MuxRelease(&space->regionMux);
return region;
ERROR:
@@ -574,6 +665,17 @@ ERROR:
return NULL;
}
+/*!
+ * @brief ShmAt
+ * 用来启动对该共享内存的访问,并把共享内存连接到当前进程的地址空间。
+ * @param shm_flg 是一组标志位,通常为0。
+ * @param shmaddr 指定共享内存连接到当前进程中的地址位置,通常为空,表示让系统来选择共享内存的地址。
+ * @param shmid 是shmget()函数返回的共享内存标识符
+ * @return
+ * 如果shmat成功执行,那么内核将使与该共享存储相关的shmid_ds结构中的shm_nattch计数器值加1
+ shmid 就是个索引,就跟进程和线程的ID一样 g_shmSegs[shmid] shmid > 192个
+ * @see
+ */
VOID *ShmAt(INT32 shmid, const VOID *shmaddr, INT32 shmflg)
{
INT32 ret;
@@ -582,13 +684,13 @@ VOID *ShmAt(INT32 shmid, const VOID *shmaddr, INT32 shmflg)
struct shmIDSource *seg = NULL;
LosVmMapRegion *r = NULL;
- ret = ShmatParamCheck(shmaddr, shmflg);
+ ret = ShmatParamCheck(shmaddr, shmflg);//参数检查
if (ret != 0) {
set_errno(ret);
return (VOID *)-1;
}
- if ((UINT32)shmflg & SHM_EXEC) {
+ if ((UINT32)shmflg & SHM_EXEC) {//flag 转换
prot |= PROT_EXEC;
acc_mode |= SHM_S_IXUGO;
} else if (((UINT32)shmflg & SHM_RDONLY) == 0) {
@@ -597,7 +699,7 @@ VOID *ShmAt(INT32 shmid, const VOID *shmaddr, INT32 shmflg)
}
SYSV_SHM_LOCK();
- seg = ShmFindSeg(shmid);
+ seg = ShmFindSeg(shmid);//找到段
if (seg == NULL) {
SYSV_SHM_UNLOCK();
return (VOID *)-1;
@@ -608,18 +710,18 @@ VOID *ShmAt(INT32 shmid, const VOID *shmaddr, INT32 shmflg)
goto ERROR;
}
- seg->ds.shm_nattch++;
- r = ShmatVmmAlloc(seg, shmaddr, shmflg, prot);
+ seg->ds.shm_nattch++;//ds上记录有一个进程绑定上来
+ r = ShmatVmmAlloc(seg, shmaddr, shmflg, prot);//在当前进程空间分配一个线性区并映射到共享内存
if (r == NULL) {
seg->ds.shm_nattch--;
SYSV_SHM_UNLOCK();
return (VOID *)-1;
}
- r->shmid = shmid;
- r->regionFlags |= VM_MAP_REGION_FLAG_SHM;
- seg->ds.shm_atime = time(NULL);
- seg->ds.shm_lpid = LOS_GetCurrProcessID();
+ r->shmid = shmid;//把ID给线性区的shmid
+ r->regionFlags |= VM_MAP_REGION_FLAG_SHM;//这是一个共享线性区
+ seg->ds.shm_atime = time(NULL);//访问时间
+ seg->ds.shm_lpid = LOS_GetCurrProcessID();//进程ID
SYSV_SHM_UNLOCK();
return (VOID *)(UINTPTR)r->range.base;
@@ -630,6 +732,19 @@ ERROR:
return (VOID *)-1;
}
+/*!
+ * @brief ShmCtl
+ * 此函数可以对shmid指定的共享存储进行多种操作(删除、取信息、加锁、解锁等)
+ * @param buf 是一个结构指针,它指向共享内存模式和访问权限的结构。
+ * @param cmd command是要采取的操作,它可以取下面的三个值 :
+ IPC_STAT:把shmid_ds结构中的数据设置为共享内存的当前关联值,即用共享内存的当前关联值覆盖shmid_ds的值。
+ IPC_SET:如果进程有足够的权限,就把共享内存的当前关联值设置为shmid_ds结构中给出的值
+ IPC_RMID:删除共享内存段
+ * @param shmid 是shmget()函数返回的共享内存标识符
+ * @return
+ *
+ * @see
+ */
INT32 ShmCtl(INT32 shmid, INT32 cmd, struct shmid_ds *buf)
{
struct shmIDSource *seg = NULL;
@@ -642,7 +757,7 @@ INT32 ShmCtl(INT32 shmid, INT32 cmd, struct shmid_ds *buf)
SYSV_SHM_LOCK();
if ((cmd != IPC_INFO) && (cmd != SHM_INFO)) {
- seg = ShmFindSeg(shmid);
+ seg = ShmFindSeg(shmid);//通过索引ID找到seg
if (seg == NULL) {
SYSV_SHM_UNLOCK();
return -1;
@@ -656,13 +771,13 @@ INT32 ShmCtl(INT32 shmid, INT32 cmd, struct shmid_ds *buf)
switch (cmd) {
case IPC_STAT:
- case SHM_STAT:
+ case SHM_STAT://取段结构
ret = ShmPermCheck(seg, SHM_S_IRUGO);
if (ret != 0) {
goto ERROR;
}
- ret = LOS_ArchCopyToUser(buf, &seg->ds, sizeof(struct shmid_ds));
+ ret = LOS_ArchCopyToUser(buf, &seg->ds, sizeof(struct shmid_ds));//把内核空间的共享页数据拷贝到用户空间
if (ret != 0) {
ret = EFAULT;
goto ERROR;
@@ -671,13 +786,13 @@ INT32 ShmCtl(INT32 shmid, INT32 cmd, struct shmid_ds *buf)
ret = (unsigned int)((unsigned int)seg->ds.shm_perm.seq << 16) | (unsigned int)((unsigned int)shmid & 0xffff); /* 16: use the seq as the upper 16 bits */
}
break;
- case IPC_SET:
+ case IPC_SET://重置共享段
ret = ShmPermCheck(seg, SHM_M);
if (ret != 0) {
ret = EPERM;
goto ERROR;
}
-
+ //从用户空间拷贝数据到内核空间
ret = LOS_ArchCopyFromUser(&shm_perm, &buf->shm_perm, sizeof(struct ipc_perm));
if (ret != 0) {
ret = EFAULT;
@@ -686,14 +801,14 @@ INT32 ShmCtl(INT32 shmid, INT32 cmd, struct shmid_ds *buf)
seg->ds.shm_perm.uid = shm_perm.uid;
seg->ds.shm_perm.gid = shm_perm.gid;
seg->ds.shm_perm.mode = (seg->ds.shm_perm.mode & ~ACCESSPERMS) |
- (shm_perm.mode & ACCESSPERMS);
+ (shm_perm.mode & ACCESSPERMS);//可访问
seg->ds.shm_ctime = time(NULL);
#ifdef LOSCFG_SHELL
(VOID)memcpy_s(seg->ownerName, OS_PCB_NAME_LEN, OS_PCB_FROM_PID(shm_perm.uid)->processName,
OS_PCB_NAME_LEN);
#endif
break;
- case IPC_RMID:
+ case IPC_RMID://删除共享段
ret = ShmPermCheck(seg, SHM_M);
if (ret != 0) {
ret = EPERM;
@@ -701,11 +816,11 @@ INT32 ShmCtl(INT32 shmid, INT32 cmd, struct shmid_ds *buf)
}
seg->status |= SHM_SEG_REMOVE;
- if (seg->ds.shm_nattch <= 0) {
+ if (seg->ds.shm_nattch <= 0) {//没有任何进程在使用了
ShmFreeSeg(seg, &IPC_SHM_USED_PAGE_COUNT);
}
break;
- case IPC_INFO:
+ case IPC_INFO://把内核空间的共享页数据拷贝到用户空间
ret = LOS_ArchCopyToUser(buf, &IPC_SHM_INFO, sizeof(struct shminfo));
if (ret != 0) {
ret = EFAULT;
@@ -719,8 +834,8 @@ INT32 ShmCtl(INT32 shmid, INT32 cmd, struct shmid_ds *buf)
shmInfo.shm_tot = 0;
shmInfo.swap_attempts = 0;
shmInfo.swap_successes = 0;
- shmInfo.used_ids = ShmSegUsedCount();
- ret = LOS_ArchCopyToUser(buf, &shmInfo, sizeof(struct shm_info));
+ shmInfo.used_ids = ShmSegUsedCount();//在使用的seg数
+ ret = LOS_ArchCopyToUser(buf, &shmInfo, sizeof(struct shm_info));//把内核空间的共享页数据拷贝到用户空间
if (ret != 0) {
ret = EFAULT;
goto ERROR;
@@ -743,55 +858,63 @@ ERROR:
return -1;
}
+/**
+ * @brief 当对共享存储的操作已经结束时,则调用shmdt与该存储段分离
+ 如果shmat成功执行,那么内核将使与该共享存储相关的shmid_ds结构中的shm_nattch计数器值减1
+ * @attention 注意:这并不从系统中删除共享存储的标识符以及其相关的数据结构。共享存储的仍然存在,
+ 直至某个进程带IPC_RMID命令的调用shmctl特地删除共享存储为止
+ * @param shmaddr
+ * @return INT32
+ */
INT32 ShmDt(const VOID *shmaddr)
{
- LosVmSpace *space = OsCurrProcessGet()->vmSpace;
+ LosVmSpace *space = OsCurrProcessGet()->vmSpace;//获取进程空间
struct shmIDSource *seg = NULL;
LosVmMapRegion *region = NULL;
INT32 shmid;
INT32 ret;
- if (IS_PAGE_ALIGNED(shmaddr) == 0) {
+ if (IS_PAGE_ALIGNED(shmaddr) == 0) {//地址是否对齐
ret = EINVAL;
goto ERROR;
}
(VOID)LOS_MuxAcquire(&space->regionMux);
- region = LOS_RegionFind(space, (VADDR_T)(UINTPTR)shmaddr);
+ region = LOS_RegionFind(space, (VADDR_T)(UINTPTR)shmaddr);//找到线性区
if (region == NULL) {
ret = EINVAL;
goto ERROR_WITH_LOCK;
}
- shmid = region->shmid;
+ shmid = region->shmid;//线性区共享ID
- if (region->range.base != (VADDR_T)(UINTPTR)shmaddr) {
- ret = EINVAL;
+ if (region->range.base != (VADDR_T)(UINTPTR)shmaddr) {//这是用户空间和内核空间的一次解绑
+ ret = EINVAL; //shmaddr 必须要等于region->range.base
goto ERROR_WITH_LOCK;
}
/* remove it from aspace */
- LOS_RbDelNode(&space->regionRbTree, ®ion->rbNode);
- LOS_ArchMmuUnmap(&space->archMmu, region->range.base, region->range.size >> PAGE_SHIFT);
+ LOS_RbDelNode(&space->regionRbTree, ®ion->rbNode);//从红黑树和链表中摘除节点
+ LOS_ArchMmuUnmap(&space->archMmu, region->range.base, region->range.size >> PAGE_SHIFT);//解除线性区的映射
(VOID)LOS_MuxRelease(&space->regionMux);
/* free it */
- free(region);
+ free(region);//释放线性区所占内存池中的内存
SYSV_SHM_LOCK();
- seg = ShmFindSeg(shmid);
+ seg = ShmFindSeg(shmid);//找到seg,线性区和共享段的关系是 1:N 的关系,其他空间的线性区也会绑在共享段上
if (seg == NULL) {
ret = EINVAL;
SYSV_SHM_UNLOCK();
goto ERROR;
}
- ShmPagesRefDec(seg);
- seg->ds.shm_nattch--;
- if ((seg->ds.shm_nattch <= 0) &&
- (seg->status & SHM_SEG_REMOVE)) {
- ShmFreeSeg(seg, &IPC_SHM_USED_PAGE_COUNT);
+ ShmPagesRefDec(seg);//页面引用数 --
+ seg->ds.shm_nattch--;//使用共享内存的进程数少了一个
+ if ((seg->ds.shm_nattch <= 0) && //无任何进程使用共享内存
+ (seg->status & SHM_SEG_REMOVE)) {//状态为删除时需要释放物理页内存了,否则其他进程还要继续使用共享内存
+ ShmFreeSeg(seg, &IPC_SHM_USED_PAGE_COUNT);//释放seg 页框链表中的页框内存,再重置seg状态
} else {
- seg->ds.shm_dtime = time(NULL);
- seg->ds.shm_lpid = LOS_GetCurrProcessID();
+ seg->ds.shm_dtime = time(NULL);//记录分离的时间
+ seg->ds.shm_lpid = LOS_GetCurrProcessID();//记录操作进程ID
}
SYSV_SHM_UNLOCK();
@@ -847,7 +970,6 @@ STATIC VOID OsShmInfoCmd(VOID)
}
SYSV_SHM_UNLOCK();
}
-
STATIC VOID OsShmDeleteCmd(INT32 shmid)
{
struct shmIDSource *seg = NULL;
@@ -876,7 +998,7 @@ STATIC VOID OsShmCmdUsage(VOID)
"\t-r [shmid], Recycle the specified shared memory about shmid\n"
"\t-h | --help, print shm command usage\n");
}
-
+///共享内存
UINT32 OsShellCmdShm(INT32 argc, const CHAR *argv[])
{
INT32 shmid;