diff --git a/README.md b/README.md index 305ba24d..d767052c 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,8 @@ +<<<<<<< HEAD +# Iot_Cs_best + +======= # Brief Intro -LMFAO, we do this shit just for fun :) \ No newline at end of file +LMFAO, we do this shit just for fun :) +>>>>>>> remotes/origin/main diff --git a/doc/test.txt b/doc/test.txt new file mode 100644 index 00000000..e69de29b diff --git a/doc/泛读报告.docx b/doc/泛读报告.docx index 42d226e8..b8bbd9ce 100644 Binary files a/doc/泛读报告.docx and b/doc/泛读报告.docx differ diff --git a/src/kernel_liteos_a/apps/init/src/init.c b/src/kernel_liteos_a/apps/init/src/init.c index 784bf83d..20953ed2 100644 --- a/src/kernel_liteos_a/apps/init/src/init.c +++ b/src/kernel_liteos_a/apps/init/src/init.c @@ -54,13 +54,21 @@ int main(int argc, char * const *argv) const char *shellPath = "/bin/mksh"; #ifdef LOSCFG_QUICK_START +<<<<<<< HEAD + const char *samplePath = "/dev/shm/sample_quickstart"; +======= const char *samplePath = "/dev/shm/sample_quickstart"; +>>>>>>> remotes/origin/main ret = fork(); if (ret < 0) { printf("Failed to fork for sample_quickstart\n"); } else if (ret == 0) { +<<<<<<< HEAD + (void)execve(samplePath, NULL, NULL); +======= (void)execve(samplePath, NULL, NULL); // 执行可执行文件 +>>>>>>> remotes/origin/main exit(0); } @@ -72,24 +80,43 @@ int main(int argc, char * const *argv) close(fd); } #endif +<<<<<<< HEAD + ret = fork(); + if (ret < 0) { + printf("Failed to fork for shell\n"); + } else if (ret == 0) { + gid = getpgrp(); +======= ret = fork(); // 创建第一个程序来跑shell if (ret < 0) { printf("Failed to fork for shell\n"); } else if (ret == 0) { gid = getpgrp(); // 返回进程组ID +>>>>>>> remotes/origin/main if (gid < 0) { printf("get group id failed, pgrpid %d, errno %d\n", gid, errno); exit(0); } +<<<<<<< HEAD + ret = tcsetpgrp(STDIN_FILENO, gid); +======= ret = tcsetpgrp(STDIN_FILENO, gid); +>>>>>>> remotes/origin/main if (ret != 0) { printf("tcsetpgrp failed, errno %d\n", errno); exit(0); } +<<<<<<< HEAD + (void)execve(shellPath, NULL, NULL); + exit(0); + } + +======= (void)execve(shellPath, NULL, NULL); // 正常执行命令行程序可执行文件 exit(0); } // ret > 0 +>>>>>>> remotes/origin/main while (1) { ret = waitpid(-1, 0, WNOHANG); if (ret == 0) { diff --git a/src/kernel_liteos_a/fs/jffs2/jffs2.patch b/src/kernel_liteos_a/fs/jffs2/jffs2.patch index eba19047..7b8ab9a4 100644 --- a/src/kernel_liteos_a/fs/jffs2/jffs2.patch +++ b/src/kernel_liteos_a/fs/jffs2/jffs2.patch @@ -2311,7 +2311,11 @@ diff -Nupr old/fs/jffs2/erase.c new/fs/jffs2/erase.c diff -Nupr old/fs/jffs2/file.c new/fs/jffs2/file.c --- old/fs/jffs2/file.c 2022-05-09 17:22:53.000000000 +0800 +++ new/fs/jffs2/file.c 2022-05-10 09:43:14.250000000 +0800 +<<<<<<< HEAD +@@ -9,335 +9,30 @@ +======= @@ -9,334 +9,31 @@ +>>>>>>> remotes/origin/main * For licensing information, see the file 'LICENCE' in this directory. * */ @@ -2348,7 +2352,11 @@ diff -Nupr old/fs/jffs2/file.c new/fs/jffs2/file.c - struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); + /* FIXME: This works only with one file system mounted at a time */ int ret; +<<<<<<< HEAD +- +======= +>>>>>>> remotes/origin/main - ret = file_write_and_wait_range(filp, start, end); + ret = jffs2_read_inode_range(c, f, gc_buffer, + offset & ~(PAGE_SIZE-1), PAGE_SIZE); @@ -2361,10 +2369,15 @@ diff -Nupr old/fs/jffs2/file.c new/fs/jffs2/file.c - inode_unlock(inode); - - return 0; +<<<<<<< HEAD +-} +- +======= + return ERR_PTR(ret); + return gc_buffer; } +>>>>>>> remotes/origin/main -const struct file_operations jffs2_file_operations = -{ - .llseek = generic_file_llseek, @@ -2389,10 +2402,14 @@ diff -Nupr old/fs/jffs2/file.c new/fs/jffs2/file.c -}; - -const struct address_space_operations jffs2_file_address_operations = +<<<<<<< HEAD +-{ +======= +void jffs2_gc_release_page(struct jffs2_sb_info *c, + unsigned char *ptr, + unsigned long *priv) { +>>>>>>> remotes/origin/main - .readpage = jffs2_readpage, - .write_begin = jffs2_write_begin, - .write_end = jffs2_write_end, @@ -2448,29 +2465,57 @@ diff -Nupr old/fs/jffs2/file.c new/fs/jffs2/file.c - ret = jffs2_do_readpage_unlock(pg->mapping->host, pg); - mutex_unlock(&f->sem); - return ret; +<<<<<<< HEAD ++ return ERR_PTR(ret); ++ return gc_buffer; + } + +-static int jffs2_write_begin(struct file *filp, struct address_space *mapping, +- loff_t pos, unsigned len, unsigned flags, +- struct page **pagep, void **fsdata) ++void jffs2_gc_release_page(struct jffs2_sb_info *c, ++ unsigned char *ptr, ++ unsigned long *priv) + { +======= -} - -static int jffs2_write_begin(struct file *filp, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata) -{ +>>>>>>> remotes/origin/main - struct page *pg; - struct inode *inode = mapping->host; - struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); - struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); - pgoff_t index = pos >> PAGE_SHIFT; +<<<<<<< HEAD +- uint32_t pageofs = index << PAGE_SHIFT; +======= +>>>>>>> remotes/origin/main - int ret = 0; - - jffs2_dbg(1, "%s()\n", __func__); - +<<<<<<< HEAD +- if (pageofs > inode->i_size) { +- /* Make new hole frag from old EOF to new page */ +======= - if (pos > inode->i_size) { - /* Make new hole frag from old EOF to new position */ +>>>>>>> remotes/origin/main - struct jffs2_raw_inode ri; - struct jffs2_full_dnode *fn; - uint32_t alloc_len; - +<<<<<<< HEAD +- jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", +- (unsigned int)inode->i_size, pageofs); +======= - jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new position\n", - (unsigned int)inode->i_size, (uint32_t)pos); +>>>>>>> remotes/origin/main - - ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, - ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); @@ -2490,10 +2535,17 @@ diff -Nupr old/fs/jffs2/file.c new/fs/jffs2/file.c - ri.mode = cpu_to_jemode(inode->i_mode); - ri.uid = cpu_to_je16(i_uid_read(inode)); - ri.gid = cpu_to_je16(i_gid_read(inode)); +<<<<<<< HEAD +- ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs)); +- ri.atime = ri.ctime = ri.mtime = cpu_to_je32(JFFS2_NOW()); +- ri.offset = cpu_to_je32(inode->i_size); +- ri.dsize = cpu_to_je32(pageofs - inode->i_size); +======= - ri.isize = cpu_to_je32((uint32_t)pos); - ri.atime = ri.ctime = ri.mtime = cpu_to_je32(JFFS2_NOW()); - ri.offset = cpu_to_je32(inode->i_size); - ri.dsize = cpu_to_je32((uint32_t)pos - inode->i_size); +>>>>>>> remotes/origin/main - ri.csize = cpu_to_je32(0); - ri.compr = JFFS2_COMPR_ZERO; - ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); @@ -2523,7 +2575,11 @@ diff -Nupr old/fs/jffs2/file.c new/fs/jffs2/file.c - goto out_err; - } - jffs2_complete_reservation(c); +<<<<<<< HEAD +- inode->i_size = pageofs; +======= - inode->i_size = pos; +>>>>>>> remotes/origin/main - mutex_unlock(&f->sem); - } - diff --git a/src/kernel_liteos_a/kernel/base/core/los_bitmap.c b/src/kernel_liteos_a/kernel/base/core/los_bitmap.c index 253af3a5..63c7bbd3 100644 --- a/src/kernel_liteos_a/kernel/base/core/los_bitmap.c +++ b/src/kernel_liteos_a/kernel/base/core/los_bitmap.c @@ -33,6 +33,18 @@ #include "los_printf.h" #include "los_toolchain.h" +<<<<<<< HEAD + +#define OS_BITMAP_MASK 0x1FU +#define OS_BITMAP_WORD_MASK ~0UL + +/* find first zero bit starting from LSB */ +STATIC INLINE UINT16 Ffz(UINTPTR x) +{ + return __builtin_ffsl(~x) - 1; +} + +======= /* function: 这部分的代码提供了位操作 @@ -59,12 +71,18 @@ STATIC INLINE UINT16 Ffz(UINTPTR x) interpretation: */ +>>>>>>> remotes/origin/main VOID LOS_BitmapSet(UINT32 *bitmap, UINT16 pos) { if (bitmap == NULL) { return; } +<<<<<<< HEAD + *bitmap |= 1U << (pos & OS_BITMAP_MASK); +} + +======= *bitmap |= 1U << (pos & OS_BITMAP_MASK); //00011111 } @@ -73,6 +91,7 @@ VOID LOS_BitmapSet(UINT32 *bitmap, UINT16 pos) interpretation: */ +>>>>>>> remotes/origin/main VOID LOS_BitmapClr(UINT32 *bitmap, UINT16 pos) { if (bitmap == NULL) { @@ -82,9 +101,12 @@ VOID LOS_BitmapClr(UINT32 *bitmap, UINT16 pos) *bitmap &= ~(1U << (pos & OS_BITMAP_MASK)); } +<<<<<<< HEAD +======= /* function: 用于返回参数位图当中最高的索引位 */ +>>>>>>> remotes/origin/main UINT16 LOS_HighBitGet(UINT32 bitmap) { if (bitmap == 0) { @@ -94,9 +116,12 @@ UINT16 LOS_HighBitGet(UINT32 bitmap) return (OS_BITMAP_MASK - CLZ(bitmap)); } +<<<<<<< HEAD +======= /* function: 获取参数位图中最低位为1的索引位 */ +>>>>>>> remotes/origin/main UINT16 LOS_LowBitGet(UINT32 bitmap) { if (bitmap == 0) { @@ -106,9 +131,12 @@ UINT16 LOS_LowBitGet(UINT32 bitmap) return CTZ(bitmap); } +<<<<<<< HEAD +======= /* function: 从start位置开始设置numsSet个bit位 置1 */ +>>>>>>> remotes/origin/main VOID LOS_BitmapSetNBits(UINTPTR *bitmap, UINT32 start, UINT32 numsSet) { UINTPTR *p = bitmap + BITMAP_WORD(start); @@ -129,9 +157,12 @@ VOID LOS_BitmapSetNBits(UINTPTR *bitmap, UINT32 start, UINT32 numsSet) } } +<<<<<<< HEAD +======= /* fuction: 从start位置开始 清除numsSet个bit位置0 ,对状态字的连续标志位进行清0操作 */ +>>>>>>> remotes/origin/main VOID LOS_BitmapClrNBits(UINTPTR *bitmap, UINT32 start, UINT32 numsClear) { UINTPTR *p = bitmap + BITMAP_WORD(start); @@ -151,9 +182,13 @@ VOID LOS_BitmapClrNBits(UINTPTR *bitmap, UINT32 start, UINT32 numsClear) *p &= ~maskToClear; } } +<<<<<<< HEAD + +======= /* fuction: 从numBits位置开始找到第一个0位 */ +>>>>>>> remotes/origin/main INT32 LOS_BitmapFfz(UINTPTR *bitmap, UINT32 numBits) { INT32 bit, i; diff --git a/src/kernel_liteos_a/kernel/base/core/los_info.c b/src/kernel_liteos_a/kernel/base/core/los_info.c index 0999503d..cd8f70f6 100644 --- a/src/kernel_liteos_a/kernel/base/core/los_info.c +++ b/src/kernel_liteos_a/kernel/base/core/los_info.c @@ -32,10 +32,15 @@ #include "los_task_pri.h" #include "los_vm_dump.h" +<<<<<<< HEAD +STATIC UINT32 GetCurrParentPid(UINT32 pid, const LosProcessCB *processCB) +{ +======= // 得到父进程的pid STATIC UINT32 GetCurrParentPid(UINT32 pid, const LosProcessCB *processCB) { // 如果没有父进程 +>>>>>>> remotes/origin/main if (processCB->parentProcess == NULL) { return 0; } @@ -52,7 +57,10 @@ STATIC UINT32 GetCurrParentPid(UINT32 pid, const LosProcessCB *processCB) return processCB->parentProcess->processID; } +<<<<<<< HEAD +======= // 得到当前任务ID +>>>>>>> remotes/origin/main STATIC INLINE UINT32 GetCurrTid(const LosTaskCB *taskCB) { #ifdef LOSCFG_PID_CONTAINER @@ -63,7 +71,10 @@ STATIC INLINE UINT32 GetCurrTid(const LosTaskCB *taskCB) return taskCB->taskID; } +<<<<<<< HEAD +======= // 得到进程的状态 +>>>>>>> remotes/origin/main STATIC UINT16 GetProcessStatus(LosProcessCB *processCB) { UINT16 status; @@ -80,7 +91,10 @@ STATIC UINT16 GetProcessStatus(LosProcessCB *processCB) return status; } +<<<<<<< HEAD +======= // 得到进程的信息 +>>>>>>> remotes/origin/main STATIC VOID GetProcessInfo(ProcessInfo *pcbInfo, const LosProcessCB *processCB) { SchedParam param = {0}; @@ -138,7 +152,10 @@ STATIC VOID GetProcessMemInfo(ProcessInfo *pcbInfo, const LosProcessCB *processC } #endif +<<<<<<< HEAD +======= // 得到线程的信息 +>>>>>>> remotes/origin/main STATIC VOID GetThreadInfo(ProcessThreadInfo *threadInfo, LosProcessCB *processCB) { SchedParam param = {0}; diff --git a/src/kernel_liteos_a/kernel/base/core/los_process.c b/src/kernel_liteos_a/kernel/base/core/los_process.c index 58dd593f..332b466c 100644 --- a/src/kernel_liteos_a/kernel/base/core/los_process.c +++ b/src/kernel_liteos_a/kernel/base/core/los_process.c @@ -1,4 +1,6 @@ /* +<<<<<<< HEAD +======= 进程模块主文件 并发(Concurrent):多个线程在单个核心运行,同一时间只能一个线程运行,内核不停切换线程, 看起来像同时运行,实际上是线程不停切换 @@ -8,6 +10,7 @@ LITE_OS_SEC_BSS 和 LITE_OS_SEC_DATA_INIT 是告诉编译器这些全局变量放在哪个数据段 */ /* +>>>>>>> remotes/origin/main * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved. * @@ -68,12 +71,21 @@ #include "los_vm_phys.h" #include "los_vm_syscall.h" +<<<<<<< HEAD +LITE_OS_SEC_BSS LosProcessCB *g_processCBArray = NULL; +LITE_OS_SEC_DATA_INIT STATIC LOS_DL_LIST g_freeProcess; +LITE_OS_SEC_DATA_INIT STATIC LOS_DL_LIST g_processRecycleList; +LITE_OS_SEC_BSS UINT32 g_processMaxNum; +#ifndef LOSCFG_PID_CONTAINER +LITE_OS_SEC_BSS ProcessGroup *g_processGroup = NULL; +======= LITE_OS_SEC_BSS LosProcessCB *g_processCBArray = NULL;//进程池数组 LITE_OS_SEC_DATA_INIT STATIC LOS_DL_LIST g_freeProcess;//空闲状态下的进程链表, LITE_OS_SEC_DATA_INIT STATIC LOS_DL_LIST g_processRecycleList;//需要收回的进程列表 LITE_OS_SEC_BSS UINT32 g_processMaxNum;//进程最大数量,默认64个 #ifndef LOSCFG_PID_CONTAINER LITE_OS_SEC_BSS ProcessGroup *g_processGroup = NULL;//全局进程组,负责管理所有的进程组 +>>>>>>> remotes/origin/main #define OS_ROOT_PGRP(processCB) (g_processGroup) #endif @@ -82,12 +94,21 @@ STATIC INLINE VOID OsInsertPCBToFreeList(LosProcessCB *processCB) #ifdef LOSCFG_PID_CONTAINER OsPidContainerDestroy(processCB->container, processCB); #endif +<<<<<<< HEAD + UINT32 pid = processCB->processID; + (VOID)memset_s(processCB, sizeof(LosProcessCB), 0, sizeof(LosProcessCB)); + processCB->processID = pid; + processCB->processStatus = OS_PROCESS_FLAG_UNUSED; + processCB->timerID = (timer_t)(UINTPTR)MAX_INVALID_TIMER_VID; + LOS_ListTailInsert(&g_freeProcess, &processCB->pendList); +======= UINT32 pid = processCB->processID;//获取进程ID (VOID)memset_s(processCB, sizeof(LosProcessCB), 0, sizeof(LosProcessCB));//进程描述符数据清0 processCB->processID = pid;//进程ID processCB->processStatus = OS_PROCESS_FLAG_UNUSED;//设置为进程未使用 processCB->timerID = (timer_t)(UINTPTR)MAX_INVALID_TIMER_VID;//timeID初始化值 LOS_ListTailInsert(&g_freeProcess, &processCB->pendList);//进程节点挂入g_freeProcess以分配给后续进程使用 +>>>>>>> remotes/origin/main } VOID OsDeleteTaskFromProcess(LosTaskCB *taskCB) @@ -141,6 +162,12 @@ UINT32 OsProcessAddNewTask(UINTPTR processID, LosTaskCB *taskCB, SchedParam *par SCHEDULER_UNLOCK(intSave); return LOS_OK; } +<<<<<<< HEAD + +ProcessGroup *OsCreateProcessGroup(LosProcessCB *processCB) +{ + ProcessGroup *pgroup = LOS_MemAlloc(m_aucSysMem1, sizeof(ProcessGroup)); +======= /** * @brief 创建进程组 * @details @@ -152,10 +179,20 @@ UINT32 OsProcessAddNewTask(UINTPTR processID, LosTaskCB *taskCB, SchedParam *par ProcessGroup *OsCreateProcessGroup(LosProcessCB *processCB) { ProcessGroup *pgroup = LOS_MemAlloc(m_aucSysMem1, sizeof(ProcessGroup));//分配一个进程组 +>>>>>>> remotes/origin/main if (pgroup == NULL) { return NULL; } +<<<<<<< HEAD + pgroup->pgroupLeader = (UINTPTR)processCB; + LOS_ListInit(&pgroup->processList); + LOS_ListInit(&pgroup->exitProcessList); + + LOS_ListTailInsert(&pgroup->processList, &processCB->subordinateGroupList); + processCB->pgroup = pgroup; + processCB->processStatus |= OS_PROCESS_FLAG_GROUP_LEADER; +======= pgroup->pgroupLeader = (UINTPTR)processCB;//指定进程负责人 LOS_ListInit(&pgroup->processList);//初始化组员链表 LOS_ListInit(&pgroup->exitProcessList);//初始化僵死进程链表 @@ -163,6 +200,7 @@ ProcessGroup *OsCreateProcessGroup(LosProcessCB *processCB) LOS_ListTailInsert(&pgroup->processList, &processCB->subordinateGroupList); processCB->pgroup = pgroup; processCB->processStatus |= OS_PROCESS_FLAG_GROUP_LEADER;//进程状态贴上当老大的标签 +>>>>>>> remotes/origin/main ProcessGroup *rootPGroup = OS_ROOT_PGRP(processCB); if (rootPGroup == NULL) { @@ -173,30 +211,52 @@ ProcessGroup *OsCreateProcessGroup(LosProcessCB *processCB) } return pgroup; } +<<<<<<< HEAD + +STATIC VOID ExitProcessGroup(LosProcessCB *processCB, ProcessGroup **pgroup) +{ + LosProcessCB *pgroupCB = OS_GET_PGROUP_LEADER(processCB->pgroup); + LOS_ListDelete(&processCB->subordinateGroupList); +======= //退出进程组,参数是进程地址和进程组地址的地址 STATIC VOID ExitProcessGroup(LosProcessCB *processCB, ProcessGroup **pgroup) { LosProcessCB *pgroupCB = OS_GET_PGROUP_LEADER(processCB->pgroup); LOS_ListDelete(&processCB->subordinateGroupList);//从进程组进程链表上摘出去 +>>>>>>> remotes/origin/main if (LOS_ListEmpty(&processCB->pgroup->processList) && LOS_ListEmpty(&processCB->pgroup->exitProcessList)) { #ifdef LOSCFG_PID_CONTAINER if (processCB->pgroup != OS_ROOT_PGRP(processCB)) { #endif +<<<<<<< HEAD + LOS_ListDelete(&processCB->pgroup->groupList); +======= LOS_ListDelete(&processCB->pgroup->groupList);//从全局进程组链表上把自己摘出去 记住它是 LOS_ListTailInsert(&g_processGroup->groupList, &group->groupList) 挂上去的 +>>>>>>> remotes/origin/main *pgroup = processCB->pgroup; #ifdef LOSCFG_PID_CONTAINER } #endif pgroupCB->processStatus &= ~OS_PROCESS_FLAG_GROUP_LEADER; +<<<<<<< HEAD + if (OsProcessIsUnused(pgroupCB) && !(pgroupCB->processStatus & OS_PROCESS_FLAG_EXIT)) { + LOS_ListDelete(&pgroupCB->pendList); + OsInsertPCBToFreeList(pgroupCB); +======= if (OsProcessIsUnused(pgroupCB) && !(pgroupCB->processStatus & OS_PROCESS_FLAG_EXIT)) {//组长进程时退出的标签 LOS_ListDelete(&pgroupCB->pendList);//进程从全局进程链表上摘除 OsInsertPCBToFreeList(pgroupCB);//释放进程的资源,回到freelist再利用 +>>>>>>> remotes/origin/main } } processCB->pgroup = NULL; } +<<<<<<< HEAD + +======= /*! 通过指定组ID找到进程组 */ +>>>>>>> remotes/origin/main STATIC ProcessGroup *OsFindProcessGroup(UINT32 gid) { ProcessGroup *pgroup = NULL; @@ -216,7 +276,11 @@ STATIC ProcessGroup *OsFindProcessGroup(UINT32 gid) PRINT_INFO("%s failed! pgroup id = %u\n", __FUNCTION__, gid); return NULL; } +<<<<<<< HEAD + +======= /*! 给指定进程组发送信号 */ +>>>>>>> remotes/origin/main STATIC INT32 OsSendSignalToSpecifyProcessGroup(ProcessGroup *pgroup, siginfo_t *info, INT32 permission) { INT32 ret, success, err; @@ -309,16 +373,27 @@ STATIC LosProcessCB *OsFindExitChildProcess(const LosProcessCB *processCB, const return NULL; } +<<<<<<< HEAD + +======= /*! 唤醒等待wakePID结束的任务 */ +>>>>>>> remotes/origin/main VOID OsWaitWakeTask(LosTaskCB *taskCB, UINTPTR wakePID) { taskCB->waitID = wakePID; taskCB->ops->wake(taskCB); #ifdef LOSCFG_KERNEL_SMP +<<<<<<< HEAD + LOS_MpSchedule(OS_MP_CPU_ALL); +#endif +} + +======= LOS_MpSchedule(OS_MP_CPU_ALL);//向所有cpu发送调度指令 #endif } /*! 唤醒等待参数进程结束的任务 */ +>>>>>>> remotes/origin/main STATIC BOOL OsWaitWakeSpecifiedProcess(LOS_DL_LIST *head, const LosProcessCB *processCB, LOS_DL_LIST **anyList) { LOS_DL_LIST *list = head; @@ -326,17 +401,30 @@ STATIC BOOL OsWaitWakeSpecifiedProcess(LOS_DL_LIST *head, const LosProcessCB *pr UINTPTR processID = 0; BOOL find = FALSE; +<<<<<<< HEAD + while (list->pstNext != head) { + taskCB = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(list)); + if ((taskCB->waitFlag == OS_PROCESS_WAIT_PRO) && (taskCB->waitID == (UINTPTR)processCB)) { + if (processID == 0) { + processID = taskCB->waitID; + find = TRUE; +======= while (list->pstNext != head) {//遍历等待链表 processCB->waitList taskCB = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(list));//一个一个来 if ((taskCB->waitFlag == OS_PROCESS_WAIT_PRO) && (taskCB->waitID == (UINTPTR)processCB)) { if (processID == 0) { processID = taskCB->waitID; find = TRUE;//找到了 +>>>>>>> remotes/origin/main } else { processID = OS_INVALID_VALUE; } +<<<<<<< HEAD + OsWaitWakeTask(taskCB, processID); +======= OsWaitWakeTask(taskCB, processID);//唤醒这个任务,此时会切到 LOS_Wait runTask->waitFlag = 0;处运行 +>>>>>>> remotes/origin/main continue; } @@ -349,7 +437,11 @@ STATIC BOOL OsWaitWakeSpecifiedProcess(LOS_DL_LIST *head, const LosProcessCB *pr return find; } +<<<<<<< HEAD + +======= /*! 检查父进程的等待任务并唤醒父进程去处理等待任务 */ +>>>>>>> remotes/origin/main STATIC VOID OsWaitCheckAndWakeParentProcess(LosProcessCB *parentCB, const LosProcessCB *processCB) { LOS_DL_LIST *head = &parentCB->waitList; @@ -357,6 +449,21 @@ STATIC VOID OsWaitCheckAndWakeParentProcess(LosProcessCB *parentCB, const LosPro LosTaskCB *taskCB = NULL; BOOL findSpecified = FALSE; +<<<<<<< HEAD + if (LOS_ListEmpty(&parentCB->waitList)) { + return; + } + + findSpecified = OsWaitWakeSpecifiedProcess(head, processCB, &list); + if (findSpecified == TRUE) { + /* No thread is waiting for any child process to finish */ + if (LOS_ListEmpty(&parentCB->waitList)) { + return; + } else if (!LOS_ListEmpty(&parentCB->childrenList)) { + /* Other child processes exist, and other threads that are waiting + * for the child to finish continue to wait + */ +======= if (LOS_ListEmpty(&parentCB->waitList)) {//父进程中是否有在等待子进程退出的任务? return;//没有就退出 } @@ -370,17 +477,26 @@ STATIC VOID OsWaitCheckAndWakeParentProcess(LosProcessCB *parentCB, const LosPro /* Other child processes exist, and other threads that are waiting * for the child to finish continue to wait *///存在其他子进程,正在等待它们的子进程结束而将继续等待 +>>>>>>> remotes/origin/main return; } } /* Waiting threads are waiting for a specified child process to finish */ +<<<<<<< HEAD + if (list == NULL) { +======= if (list == NULL) {//等待线程正在等待指定的子进程结束 +>>>>>>> remotes/origin/main return; } /* No child processes exist and all waiting threads are awakened */ +<<<<<<< HEAD + if (findSpecified == TRUE) { +======= if (findSpecified == TRUE) {//所有等待的任务都被一一唤醒 +>>>>>>> remotes/origin/main while (list->pstNext != head) { taskCB = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(list)); OsWaitWakeTask(taskCB, OS_INVALID_VALUE); @@ -388,7 +504,11 @@ STATIC VOID OsWaitCheckAndWakeParentProcess(LosProcessCB *parentCB, const LosPro return; } +<<<<<<< HEAD + while (list->pstNext != head) { +======= while (list->pstNext != head) {//处理 OS_PROCESS_WAIT_GID 标签 +>>>>>>> remotes/origin/main taskCB = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(list)); if (taskCB->waitFlag == OS_PROCESS_WAIT_GID) { if (taskCB->waitID != (UINTPTR)OS_GET_PGROUP_LEADER(processCB->pgroup)) { @@ -411,7 +531,11 @@ STATIC VOID OsWaitCheckAndWakeParentProcess(LosProcessCB *parentCB, const LosPro return; } +<<<<<<< HEAD + +======= /*! 回收指定进程的资源 */ +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT VOID OsProcessResourcesToFree(LosProcessCB *processCB) { #ifdef LOSCFG_KERNEL_VM @@ -420,10 +544,17 @@ LITE_OS_SEC_TEXT VOID OsProcessResourcesToFree(LosProcessCB *processCB) } #endif +<<<<<<< HEAD +#ifdef LOSCFG_SECURITY_CAPABILITY + if (processCB->user != NULL) { + (VOID)LOS_MemFree(m_aucSysMem1, processCB->user); + processCB->user = NULL; +======= #ifdef LOSCFG_SECURITY_CAPABILITY//安全开关 if (processCB->user != NULL) { (VOID)LOS_MemFree(m_aucSysMem1, processCB->user);//删除用户 processCB->user = NULL;//重置指针为空 +>>>>>>> remotes/origin/main } #endif @@ -480,6 +611,29 @@ LITE_OS_SEC_TEXT VOID OsProcessResourcesToFree(LosProcessCB *processCB) processCB->resourceLimit = NULL; } } +<<<<<<< HEAD + +STATIC VOID OsRecycleZombiesProcess(LosProcessCB *childCB, ProcessGroup **pgroup) +{ + ExitProcessGroup(childCB, pgroup); + LOS_ListDelete(&childCB->siblingList); + if (OsProcessIsDead(childCB)) { + OsDeleteTaskFromProcess(childCB->threadGroup); + childCB->processStatus &= ~OS_PROCESS_STATUS_ZOMBIES; + childCB->processStatus |= OS_PROCESS_FLAG_UNUSED; + } + + LOS_ListDelete(&childCB->pendList); + if (childCB->processStatus & OS_PROCESS_FLAG_EXIT) { + LOS_ListHeadInsert(&g_processRecycleList, &childCB->pendList); + } else if (OsProcessIsPGroupLeader(childCB)) { + LOS_ListTailInsert(&g_processRecycleList, &childCB->pendList); + } else { + OsInsertPCBToFreeList(childCB); + } +} + +======= /*! 回收僵死状态进程的资源 */ STATIC VOID OsRecycleZombiesProcess(LosProcessCB *childCB, ProcessGroup **pgroup) { @@ -501,6 +655,7 @@ STATIC VOID OsRecycleZombiesProcess(LosProcessCB *childCB, ProcessGroup **pgroup } } /*! 当一个进程自然退出的时候,它的孩子进程由两位老祖宗收养 */ +>>>>>>> remotes/origin/main STATIC VOID OsDealAliveChildProcess(LosProcessCB *processCB) { LosProcessCB *childCB = NULL; @@ -514,35 +669,70 @@ STATIC VOID OsDealAliveChildProcess(LosProcessCB *processCB) } #endif +<<<<<<< HEAD + if (!LOS_ListEmpty(&processCB->childrenList)) { + childHead = processCB->childrenList.pstNext; + LOS_ListDelete(&(processCB->childrenList)); + if (OsProcessIsUserMode(processCB)) { +======= if (!LOS_ListEmpty(&processCB->childrenList)) {//如果存在孩子进程 childHead = processCB->childrenList.pstNext;//获取孩子链表 LOS_ListDelete(&(processCB->childrenList));//清空自己的孩子链表 if (OsProcessIsUserMode(processCB)) {//是用户态进程 +>>>>>>> remotes/origin/main parentCB = OS_PCB_FROM_PID(OS_USER_ROOT_PROCESS_ID); } else { parentCB = OsGetKernelInitProcess(); } +<<<<<<< HEAD + for (nextList = childHead; ;) { + childCB = OS_PCB_FROM_SIBLIST(nextList); + childCB->parentProcess = parentCB; + nextList = nextList->pstNext; + if (nextList == childHead) { +======= for (nextList = childHead; ;) {//遍历孩子链表 childCB = OS_PCB_FROM_SIBLIST(nextList);//找到孩子的真身 childCB->parentProcess = parentCB; nextList = nextList->pstNext;//找下一个孩子进程 if (nextList == childHead) {//一圈下来,孩子们都磕完头了 +>>>>>>> remotes/origin/main break; } } +<<<<<<< HEAD + LOS_ListTailInsertList(&parentCB->childrenList, childHead); +======= LOS_ListTailInsertList(&parentCB->childrenList, childHead);//挂到老祖宗的孩子链表上 +>>>>>>> remotes/origin/main } return; } +<<<<<<< HEAD + +======= /*! 回收指定进程的已经退出(死亡)的孩子进程所占资源 */ +>>>>>>> remotes/origin/main STATIC VOID OsChildProcessResourcesFree(const LosProcessCB *processCB) { LosProcessCB *childCB = NULL; ProcessGroup *pgroup = NULL; +<<<<<<< HEAD + while (!LOS_ListEmpty(&((LosProcessCB *)processCB)->exitChildList)) { + childCB = LOS_DL_LIST_ENTRY(processCB->exitChildList.pstNext, LosProcessCB, siblingList); + OsRecycleZombiesProcess(childCB, &pgroup); + (VOID)LOS_MemFree(m_aucSysMem1, pgroup); + } +} + +VOID OsProcessNaturalExit(LosProcessCB *processCB, UINT32 status) +{ + OsChildProcessResourcesFree(processCB); +======= while (!LOS_ListEmpty(&((LosProcessCB *)processCB)->exitChildList)) {//遍历直到没有了退出(死亡)的孩子进程 childCB = LOS_DL_LIST_ENTRY(processCB->exitChildList.pstNext, LosProcessCB, siblingList);//获取孩子进程, OsRecycleZombiesProcess(childCB, &pgroup);//其中会将childCB从exitChildList链表上摘出去 @@ -553,10 +743,30 @@ STATIC VOID OsChildProcessResourcesFree(const LosProcessCB *processCB) VOID OsProcessNaturalExit(LosProcessCB *processCB, UINT32 status) { OsChildProcessResourcesFree(processCB);//释放孩子进程的资源 +>>>>>>> remotes/origin/main /* is a child process */ if (processCB->parentProcess != NULL) { LosProcessCB *parentCB = processCB->parentProcess; +<<<<<<< HEAD + LOS_ListDelete(&processCB->siblingList); + if (!OsProcessExitCodeSignalIsSet(processCB)) { + OsProcessExitCodeSet(processCB, status); + } + LOS_ListTailInsert(&parentCB->exitChildList, &processCB->siblingList); + LOS_ListDelete(&processCB->subordinateGroupList); + LOS_ListTailInsert(&processCB->pgroup->exitProcessList, &processCB->subordinateGroupList); + + OsWaitCheckAndWakeParentProcess(parentCB, processCB); + + OsDealAliveChildProcess(processCB); + + processCB->processStatus |= OS_PROCESS_STATUS_ZOMBIES; +#ifdef LOSCFG_KERNEL_VM + (VOID)OsSendSigToProcess(parentCB, SIGCHLD, OS_KERNEL_KILL_PERMISSION); +#endif + LOS_ListHeadInsert(&g_processRecycleList, &processCB->pendList); +======= LOS_ListDelete(&processCB->siblingList);//将自己从兄弟链表中摘除,家人们,永别了! if (!OsProcessExitCodeSignalIsSet(processCB)) {//是否设置了退出码? OsProcessExitCodeSet(processCB, status);//将进程状态设为退出码 @@ -575,6 +785,7 @@ VOID OsProcessNaturalExit(LosProcessCB *processCB, UINT32 status) (VOID)OsSendSigToProcess(parentCB, SIGCHLD, OS_KERNEL_KILL_PERMISSION); #endif LOS_ListHeadInsert(&g_processRecycleList, &processCB->pendList);//将进程通过其阻塞节点挂入全局进程回收链表 +>>>>>>> remotes/origin/main return; } @@ -588,17 +799,43 @@ STATIC VOID SystemProcessEarlyInit(LosProcessCB *processCB) #ifdef LOSCFG_KERNEL_CONTAINER OsContainerInitSystemProcess(processCB); #endif +<<<<<<< HEAD + if (processCB == OsGetKernelInitProcess()) { + OsSetMainTaskProcess((UINTPTR)processCB); + } +} + +======= if (processCB == OsGetKernelInitProcess()) {//2号进程 OsSetMainTaskProcess((UINTPTR)processCB);//将内核根进程设为主任务所属进程 } } /*! 进程模块初始化,被编译放在代码段 .init 中*/ +>>>>>>> remotes/origin/main UINT32 OsProcessInit(VOID) { UINT32 index; UINT32 size; UINT32 ret; +<<<<<<< HEAD + g_processMaxNum = LOSCFG_BASE_CORE_PROCESS_LIMIT; + size = (g_processMaxNum + 1) * sizeof(LosProcessCB); + + g_processCBArray = (LosProcessCB *)LOS_MemAlloc(m_aucSysMem1, size); + if (g_processCBArray == NULL) { + return LOS_NOK; + } + (VOID)memset_s(g_processCBArray, size, 0, size); + + LOS_ListInit(&g_freeProcess); + LOS_ListInit(&g_processRecycleList); + + for (index = 0; index < g_processMaxNum; index++) { + g_processCBArray[index].processID = index; + g_processCBArray[index].processStatus = OS_PROCESS_FLAG_UNUSED; + LOS_ListTailInsert(&g_freeProcess, &g_processCBArray[index].pendList); +======= g_processMaxNum = LOSCFG_BASE_CORE_PROCESS_LIMIT;//默认支持64个进程 size = (g_processMaxNum + 1) * sizeof(LosProcessCB); @@ -615,6 +852,7 @@ UINT32 OsProcessInit(VOID) g_processCBArray[index].processID = index;//进程ID[0-g_processMaxNum-1]赋值 g_processCBArray[index].processStatus = OS_PROCESS_FLAG_UNUSED;// 默认都是白纸一张,贴上未使用标签 LOS_ListTailInsert(&g_freeProcess, &g_processCBArray[index].pendList);//注意g_freeProcess挂的是pendList节点,所以使用要通过OS_PCB_FROM_PENDLIST找到进程实体. +>>>>>>> remotes/origin/main } /* Default process to prevent thread PCB from being empty */ @@ -633,22 +871,36 @@ UINT32 OsProcessInit(VOID) #ifdef LOSCFG_KERNEL_PLIMITS OsProcLimiterSetInit(); #endif +<<<<<<< HEAD + SystemProcessEarlyInit(OsGetIdleProcess()); +======= SystemProcessEarlyInit(OsGetIdleProcess());//初始化 0,1,2号进程 +>>>>>>> remotes/origin/main SystemProcessEarlyInit(OsGetUserInitProcess()); SystemProcessEarlyInit(OsGetKernelInitProcess()); return LOS_OK; } +<<<<<<< HEAD + +======= /*! 进程回收再利用过程*/ +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT VOID OsProcessCBRecycleToFree(VOID) { UINT32 intSave; LosProcessCB *processCB = NULL; SCHEDULER_LOCK(intSave); +<<<<<<< HEAD + while (!LOS_ListEmpty(&g_processRecycleList)) { + processCB = OS_PCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&g_processRecycleList)); + if (!(processCB->processStatus & OS_PROCESS_FLAG_EXIT)) { +======= while (!LOS_ListEmpty(&g_processRecycleList)) {//循环任务回收链表,直到为空 processCB = OS_PCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&g_processRecycleList));//找到回收链表中第一个进程实体 //OS_PCB_FROM_PENDLIST 代表的是通过pendlist节点找到 PCB实体,因为g_processRecyleList上面挂的是pendlist节点位置 if (!(processCB->processStatus & OS_PROCESS_FLAG_EXIT)) {//进程没有退出标签 +>>>>>>> remotes/origin/main break; } SCHEDULER_UNLOCK(intSave); @@ -656,17 +908,37 @@ LITE_OS_SEC_TEXT VOID OsProcessCBRecycleToFree(VOID) OsTaskCBRecycleToFree(); SCHEDULER_LOCK(intSave); +<<<<<<< HEAD + processCB->processStatus &= ~OS_PROCESS_FLAG_EXIT; +#ifdef LOSCFG_KERNEL_VM + LosVmSpace *space = NULL; + if (OsProcessIsUserMode(processCB)) { + space = processCB->vmSpace; +======= processCB->processStatus &= ~OS_PROCESS_FLAG_EXIT;//给进程撕掉退出标签,(可能进程并没有这个标签) #ifdef LOSCFG_KERNEL_VM LosVmSpace *space = NULL; if (OsProcessIsUserMode(processCB)) {//进程是否是用户态进程 space = processCB->vmSpace;//只有用户态的进程才需要释放虚拟内存空间 +>>>>>>> remotes/origin/main } processCB->vmSpace = NULL; #endif /* OS_PROCESS_FLAG_GROUP_LEADER: The lead process group cannot be recycled without destroying the PCB. * !OS_PROCESS_FLAG_UNUSED: Parent process does not reclaim child process resources. */ +<<<<<<< HEAD + LOS_ListDelete(&processCB->pendList); + if (OsProcessIsPGroupLeader(processCB) || OsProcessIsDead(processCB)) { + LOS_ListTailInsert(&g_processRecycleList, &processCB->pendList); + } else { + /* Clear the bottom 4 bits of process status */ + OsInsertPCBToFreeList(processCB); + } +#ifdef LOSCFG_KERNEL_VM + SCHEDULER_UNLOCK(intSave); + (VOID)LOS_VmSpaceFree(space); +======= LOS_ListDelete(&processCB->pendList);//将进程从进程链表上摘除 if (OsProcessIsPGroupLeader(processCB) || OsProcessIsDead(processCB)) { LOS_ListTailInsert(&g_processRecycleList, &processCB->pendList);//将进程挂到进程回收链表上,因为组长不能走啊 @@ -677,13 +949,18 @@ LITE_OS_SEC_TEXT VOID OsProcessCBRecycleToFree(VOID) #ifdef LOSCFG_KERNEL_VM SCHEDULER_UNLOCK(intSave); (VOID)LOS_VmSpaceFree(space);//释放用户态进程的虚拟内存空间,因为内核只有一个虚拟空间,因此不需要释放虚拟空间. +>>>>>>> remotes/origin/main SCHEDULER_LOCK(intSave); #endif } SCHEDULER_UNLOCK(intSave); } +<<<<<<< HEAD + +======= /*! 删除PCB块 其实是 PCB块回归进程池,先进入回收链表*/ +>>>>>>> remotes/origin/main STATIC VOID OsDeInitPCB(LosProcessCB *processCB) { UINT32 intSave; @@ -699,11 +976,19 @@ STATIC VOID OsDeInitPCB(LosProcessCB *processCB) } #endif +<<<<<<< HEAD + OsProcessResourcesToFree(processCB); + + SCHEDULER_LOCK(intSave); + if (processCB->parentProcess != NULL) { + LOS_ListDelete(&processCB->siblingList); +======= OsProcessResourcesToFree(processCB);//释放进程所占用的资源 SCHEDULER_LOCK(intSave); if (processCB->parentProcess != NULL) { LOS_ListDelete(&processCB->siblingList);//将进程从兄弟链表中摘除 +>>>>>>> remotes/origin/main processCB->parentProcess = NULL; } @@ -711,8 +996,13 @@ STATIC VOID OsDeInitPCB(LosProcessCB *processCB) ExitProcessGroup(processCB, &pgroup); } +<<<<<<< HEAD + processCB->processStatus &= ~OS_PROCESS_STATUS_INIT; + processCB->processStatus |= OS_PROCESS_FLAG_EXIT; +======= processCB->processStatus &= ~OS_PROCESS_STATUS_INIT;//设置进程状态为非初始化 processCB->processStatus |= OS_PROCESS_FLAG_EXIT;//设置进程状态为退出 +>>>>>>> remotes/origin/main LOS_ListHeadInsert(&g_processRecycleList, &processCB->pendList); SCHEDULER_UNLOCK(intSave); @@ -721,7 +1011,10 @@ STATIC VOID OsDeInitPCB(LosProcessCB *processCB) return; } +<<<<<<< HEAD +======= /*! 设置进程的名字*/ +>>>>>>> remotes/origin/main UINT32 OsSetProcessName(LosProcessCB *processCB, const CHAR *name) { errno_t errRet; @@ -754,6 +1047,25 @@ UINT32 OsSetProcessName(LosProcessCB *processCB, const CHAR *name) return LOS_OK; } +<<<<<<< HEAD +STATIC UINT32 OsInitPCB(LosProcessCB *processCB, UINT32 mode, const CHAR *name) +{ + processCB->processMode = mode; + processCB->processStatus = OS_PROCESS_STATUS_INIT; + processCB->parentProcess = NULL; + processCB->threadGroup = NULL; + processCB->umask = OS_PROCESS_DEFAULT_UMASK; + processCB->timerID = (timer_t)(UINTPTR)MAX_INVALID_TIMER_VID; + + LOS_ListInit(&processCB->threadSiblingList); + LOS_ListInit(&processCB->childrenList); + LOS_ListInit(&processCB->exitChildList); + LOS_ListInit(&(processCB->waitList)); + +#ifdef LOSCFG_KERNEL_VM + if (OsProcessIsUserMode(processCB)) { + processCB->vmSpace = OsCreateUserVmSpace(); +======= /*! 初始化PCB(进程控制块)*/ STATIC UINT32 OsInitPCB(LosProcessCB *processCB, UINT32 mode, const CHAR *name) { @@ -772,13 +1084,19 @@ STATIC UINT32 OsInitPCB(LosProcessCB *processCB, UINT32 mode, const CHAR *name) #ifdef LOSCFG_KERNEL_VM if (OsProcessIsUserMode(processCB)) {//如果是用户态进程 processCB->vmSpace = OsCreateUserVmSpace();//创建用户空间 +>>>>>>> remotes/origin/main if (processCB->vmSpace == NULL) { processCB->processStatus = OS_PROCESS_FLAG_UNUSED; return LOS_ENOMEM; } } else { +<<<<<<< HEAD + processCB->vmSpace = LOS_GetKVmSpace(); + } +======= processCB->vmSpace = LOS_GetKVmSpace();//从这里也可以看出,所有内核态进程是共享一个进程空间的 }//在鸿蒙内核态进程只有kprocess 和 kidle 两个 +>>>>>>> remotes/origin/main #endif #ifdef LOSCFG_KERNEL_CPUP @@ -797,7 +1115,11 @@ STATIC UINT32 OsInitPCB(LosProcessCB *processCB, UINT32 mode, const CHAR *name) #endif #ifdef LOSCFG_SECURITY_CAPABILITY +<<<<<<< HEAD + OsInitCapability(processCB); +======= OsInitCapability(processCB);//初始化进程安全相关功能 +>>>>>>> remotes/origin/main #endif if (OsSetProcessName(processCB, name) != LOS_OK) { @@ -806,10 +1128,17 @@ STATIC UINT32 OsInitPCB(LosProcessCB *processCB, UINT32 mode, const CHAR *name) return LOS_OK; } +<<<<<<< HEAD + +#ifdef LOSCFG_SECURITY_CAPABILITY +STATIC User *OsCreateUser(UINT32 userID, UINT32 gid, UINT32 size) +{ +======= //创建用户 #ifdef LOSCFG_SECURITY_CAPABILITY STATIC User *OsCreateUser(UINT32 userID, UINT32 gid, UINT32 size)//参数size 表示组数量 {//(size - 1) * sizeof(UINT32) 用于 user->groups[..],这种设计节约了内存,不造成不需要的浪费 +>>>>>>> remotes/origin/main User *user = LOS_MemAlloc(m_aucSysMem1, sizeof(User) + (size - 1) * sizeof(UINT32)); if (user == NULL) { return NULL; @@ -819,12 +1148,20 @@ STATIC User *OsCreateUser(UINT32 userID, UINT32 gid, UINT32 size)//参数size user->effUserID = userID; user->gid = gid; user->effGid = gid; +<<<<<<< HEAD + user->groupNumber = size; + user->groups[0] = gid; + return user; +} + +======= user->groupNumber = size;//用户组数量 user->groups[0] = gid;//用户组列表,一个用户可以属于多个用户组 return user; } /*! 检查参数群组ID是否在当前用户所属群组中*/ +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT BOOL LOS_CheckInGroups(UINT32 gid) { UINT32 intSave; @@ -832,8 +1169,13 @@ LITE_OS_SEC_TEXT BOOL LOS_CheckInGroups(UINT32 gid) User *user = NULL; SCHEDULER_LOCK(intSave); +<<<<<<< HEAD + user = OsCurrUserGet(); + for (count = 0; count < user->groupNumber; count++) { +======= user = OsCurrUserGet();//当前进程所属用户 for (count = 0; count < user->groupNumber; count++) {//循环对比 +>>>>>>> remotes/origin/main if (user->groups[count] == gid) { SCHEDULER_UNLOCK(intSave); return TRUE; @@ -845,7 +1187,10 @@ LITE_OS_SEC_TEXT BOOL LOS_CheckInGroups(UINT32 gid) } #endif +<<<<<<< HEAD +======= /*! 获取当前进程的用户ID*/ +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT INT32 LOS_GetUserID(VOID) { #ifdef LOSCFG_SECURITY_CAPABILITY @@ -865,7 +1210,10 @@ LITE_OS_SEC_TEXT INT32 LOS_GetUserID(VOID) #endif } +<<<<<<< HEAD +======= /*! 获取当前进程的用户组ID*/ +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT INT32 LOS_GetGroupID(VOID) { #ifdef LOSCFG_SECURITY_CAPABILITY @@ -886,7 +1234,10 @@ LITE_OS_SEC_TEXT INT32 LOS_GetGroupID(VOID) #endif } +<<<<<<< HEAD +======= /*! 进程创建初始化*/ +>>>>>>> remotes/origin/main STATIC UINT32 OsSystemProcessInit(LosProcessCB *processCB, UINT32 flags, const CHAR *name) { UINT32 ret = OsInitPCB(processCB, flags, name); @@ -895,7 +1246,11 @@ STATIC UINT32 OsSystemProcessInit(LosProcessCB *processCB, UINT32 flags, const C } #ifdef LOSCFG_FS_VFS +<<<<<<< HEAD + processCB->files = alloc_files(); +======= processCB->files = alloc_files();//分配进程的文件的管理器 +>>>>>>> remotes/origin/main if (processCB->files == NULL) { ret = LOS_ENOMEM; goto EXIT; @@ -908,8 +1263,13 @@ STATIC UINT32 OsSystemProcessInit(LosProcessCB *processCB, UINT32 flags, const C goto EXIT; } +<<<<<<< HEAD +#ifdef LOSCFG_SECURITY_CAPABILITY + processCB->user = OsCreateUser(0, 0, 1); +======= #ifdef LOSCFG_SECURITY_CAPABILITY //用户安全宏 processCB->user = OsCreateUser(0, 0, 1); //创建用户 +>>>>>>> remotes/origin/main if (processCB->user == NULL) { ret = LOS_ENOMEM; goto EXIT; @@ -917,7 +1277,11 @@ STATIC UINT32 OsSystemProcessInit(LosProcessCB *processCB, UINT32 flags, const C #endif #ifdef LOSCFG_KERNEL_PLIMITS +<<<<<<< HEAD + ret = OsPLimitsAddProcess(NULL, processCB); +======= ret = OsPLimitsAddProcess(NULL, processCB);//删除进程控制块,归还内存 +>>>>>>> remotes/origin/main if (ret != LOS_OK) { ret = LOS_ENOMEM; goto EXIT; @@ -929,7 +1293,11 @@ EXIT: OsDeInitPCB(processCB); return ret; } +<<<<<<< HEAD + +======= /*! 创建2,0号进程,即内核态进程的老祖宗*/ +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_INIT UINT32 OsSystemProcessCreate(VOID) { LosProcessCB *kerInitProcess = OsGetKernelInitProcess(); @@ -937,14 +1305,31 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsSystemProcessCreate(VOID) if (ret != LOS_OK) { return ret; } +<<<<<<< HEAD + kerInitProcess->processStatus &= ~OS_PROCESS_STATUS_INIT; + + LosProcessCB *idleProcess = OsGetIdleProcess(); + ret = OsInitPCB(idleProcess, OS_KERNEL_MODE, "KIdle"); +======= kerInitProcess->processStatus &= ~OS_PROCESS_STATUS_INIT;//去掉初始化标签 LosProcessCB *idleProcess = OsGetIdleProcess(); ret = OsInitPCB(idleProcess, OS_KERNEL_MODE, "KIdle");//创建内核态0号进程 +>>>>>>> remotes/origin/main if (ret != LOS_OK) { return ret; } idleProcess->parentProcess = kerInitProcess; +<<<<<<< HEAD + LOS_ListTailInsert(&kerInitProcess->childrenList, &idleProcess->siblingList); + idleProcess->pgroup = kerInitProcess->pgroup; + LOS_ListTailInsert(&kerInitProcess->pgroup->processList, &idleProcess->subordinateGroupList); +#ifdef LOSCFG_SECURITY_CAPABILITY + idleProcess->user = kerInitProcess->user; +#endif +#ifdef LOSCFG_FS_VFS + idleProcess->files = kerInitProcess->files; +======= LOS_ListTailInsert(&kerInitProcess->childrenList, &idleProcess->siblingList);//挂到内核态祖宗进程的子孙链接上 idleProcess->pgroup = kerInitProcess->pgroup; LOS_ListTailInsert(&kerInitProcess->pgroup->processList, &idleProcess->subordinateGroupList); @@ -953,6 +1338,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsSystemProcessCreate(VOID) #endif #ifdef LOSCFG_FS_VFS idleProcess->files = kerInitProcess->files;//共享文件 +>>>>>>> remotes/origin/main #endif idleProcess->processStatus &= ~OS_PROCESS_STATUS_INIT; @@ -962,7 +1348,11 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsSystemProcessCreate(VOID) } return LOS_OK; } +<<<<<<< HEAD + +======= // 进程调度参数检查 +>>>>>>> remotes/origin/main INT32 OsSchedulerParamCheck(UINT16 policy, BOOL isThread, const LosSchedParam *param) { if (param == NULL) { @@ -995,17 +1385,34 @@ INT32 OsSchedulerParamCheck(UINT16 policy, BOOL isThread, const LosSchedParam *p STATIC INLINE INT32 ProcessSchedulerParamCheck(INT32 which, INT32 pid, UINT16 policy, const LosSchedParam *param) { +<<<<<<< HEAD + if (OS_PID_CHECK_INVALID(pid)) { + return LOS_EINVAL; + } + + if (which != LOS_PRIO_PROCESS) { +======= if (OS_PID_CHECK_INVALID(pid)) {//进程ID是否有效,默认 g_processMaxNum = 64 return LOS_EINVAL; } if (which != LOS_PRIO_PROCESS) {//进程标识 +>>>>>>> remotes/origin/main return LOS_EINVAL; } return OsSchedulerParamCheck(policy, FALSE, param); } +<<<<<<< HEAD +#ifdef LOSCFG_SECURITY_CAPABILITY +STATIC BOOL OsProcessCapPermitCheck(const LosProcessCB *processCB, const SchedParam *param, UINT16 policy, UINT16 prio) +{ + LosProcessCB *runProcess = OsCurrProcessGet(); + + /* always trust kernel process */ + if (!OsProcessIsUserMode(runProcess)) { +======= #ifdef LOSCFG_SECURITY_CAPABILITY//检查进程的安全许可证 STATIC BOOL OsProcessCapPermitCheck(const LosProcessCB *processCB, const SchedParam *param, UINT16 policy, UINT16 prio) { @@ -1013,6 +1420,7 @@ STATIC BOOL OsProcessCapPermitCheck(const LosProcessCB *processCB, const SchedPa /* always trust kernel process */ if (!OsProcessIsUserMode(runProcess)) {//进程必须在内核模式下,也就是说在内核模式下是安全的. +>>>>>>> remotes/origin/main return TRUE; } @@ -1028,7 +1436,11 @@ STATIC BOOL OsProcessCapPermitCheck(const LosProcessCB *processCB, const SchedPa return FALSE; } #endif +<<<<<<< HEAD + +======= // 设置进程调度计划 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT INT32 OsSetProcessScheduler(INT32 which, INT32 pid, UINT16 policy, const LosSchedParam *schedParam) { SchedParam param = { 0 }; @@ -1041,8 +1453,13 @@ LITE_OS_SEC_TEXT INT32 OsSetProcessScheduler(INT32 which, INT32 pid, UINT16 poli } LosProcessCB *processCB = OS_PCB_FROM_PID(pid); +<<<<<<< HEAD + SCHEDULER_LOCK(intSave); + if (OsProcessIsInactive(processCB)) { +======= SCHEDULER_LOCK(intSave);//持有调度自旋锁,多CPU情况下调度期间需要原子处理 if (OsProcessIsInactive(processCB)) {//进程未活动的处理 +>>>>>>> remotes/origin/main SCHEDULER_UNLOCK(intSave); return -LOS_ESRCH; } @@ -1082,6 +1499,17 @@ LITE_OS_SEC_TEXT INT32 OsSetProcessScheduler(INT32 which, INT32 pid, UINT16 poli needSched = taskCB->ops->schedParamModify(taskCB, ¶m); TO_SCHED: +<<<<<<< HEAD + SCHEDULER_UNLOCK(intSave); + + LOS_MpSchedule(OS_MP_CPU_ALL); + if (needSched && OS_SCHEDULER_ACTIVE) { + LOS_Schedule(); + } + return LOS_OK; +} + +======= SCHEDULER_UNLOCK(intSave);//还锁 LOS_MpSchedule(OS_MP_CPU_ALL);//核间中断 @@ -1091,11 +1519,16 @@ TO_SCHED: return LOS_OK; } // 设置指定进程的调度参数,包括优先级和调度策略 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT INT32 LOS_SetProcessScheduler(INT32 pid, UINT16 policy, const LosSchedParam *schedParam) { return OsSetProcessScheduler(LOS_PRIO_PROCESS, pid, policy, schedParam); } +<<<<<<< HEAD + +======= // 获得指定进程的调度策略 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT INT32 LOS_GetProcessScheduler(INT32 pid, INT32 *policy, LosSchedParam *schedParam) { UINT32 intSave; @@ -1139,7 +1572,11 @@ LITE_OS_SEC_TEXT INT32 LOS_GetProcessScheduler(INT32 pid, INT32 *policy, LosSche } return LOS_OK; } +<<<<<<< HEAD + +======= // 接口封装 - 设置进程优先级 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT INT32 LOS_SetProcessPriority(INT32 pid, INT32 prio) { INT32 ret; @@ -1159,7 +1596,11 @@ LITE_OS_SEC_TEXT INT32 LOS_SetProcessPriority(INT32 pid, INT32 prio) return OsSetProcessScheduler(LOS_PRIO_PROCESS, pid, (UINT16)policy, ¶m); } +<<<<<<< HEAD + +======= // 接口封装 - 获取进程优先级 which:标识进程,进程组,用户 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT INT32 OsGetProcessPriority(INT32 which, INT32 pid) { UINT32 intSave; @@ -1192,15 +1633,23 @@ LITE_OS_SEC_TEXT INT32 OsGetProcessPriority(INT32 which, INT32 pid) SCHEDULER_UNLOCK(intSave); return param.basePrio; } +<<<<<<< HEAD + +======= // 接口封装 - 获取指定进程优先级 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT INT32 LOS_GetProcessPriority(INT32 pid) { return OsGetProcessPriority(LOS_PRIO_PROCESS, pid); } +<<<<<<< HEAD + +======= /*! * 将任务挂入进程的waitList链表,表示这个任务在等待某个进程的退出 * 当被等待进程退出时候会将自己挂到父进程的退出子进程链表和进程组的退出进程链表. */ +>>>>>>> remotes/origin/main STATIC VOID OsWaitInsertWaitListInOrder(LosTaskCB *runTask, LosProcessCB *processCB) { LOS_DL_LIST *head = &processCB->waitList; @@ -1232,7 +1681,11 @@ STATIC VOID OsWaitInsertWaitListInOrder(LosTaskCB *runTask, LosProcessCB *proces (VOID)runTask->ops->wait(runTask, list->pstNext, LOS_WAIT_FOREVER); return; } +<<<<<<< HEAD + +======= // 设置等待子进程退出方式方法 +>>>>>>> remotes/origin/main STATIC UINT32 WaitFindSpecifiedProcess(UINT32 pid, LosTaskCB *runTask, const LosProcessCB *processCB, LosProcessCB **childCB) { @@ -1253,7 +1706,11 @@ STATIC UINT32 WaitFindSpecifiedProcess(UINT32 pid, LosTaskCB *runTask, #endif /* Wait for the child process whose process number is pid. */ *childCB = OsFindExitChildProcess(processCB, waitProcess); +<<<<<<< HEAD + if (*childCB != NULL) { +======= if (*childCB != NULL) {//找到了,确实有一个已经退出的PID,注意一个进程退出时会挂到父进程的exitChildList上 +>>>>>>> remotes/origin/main return LOS_OK; } @@ -1261,7 +1718,11 @@ STATIC UINT32 WaitFindSpecifiedProcess(UINT32 pid, LosTaskCB *runTask, return LOS_ECHILD; } +<<<<<<< HEAD + runTask->waitFlag = OS_PROCESS_WAIT_PRO; +======= runTask->waitFlag = OS_PROCESS_WAIT_PRO;//设置当前任务的等待类型 +>>>>>>> remotes/origin/main runTask->waitID = (UINTPTR)waitProcess; return LOS_OK; } @@ -1280,6 +1741,20 @@ STATIC UINT32 OsWaitSetFlag(const LosProcessCB *processCB, INT32 pid, LosProcess if (childCB != NULL) { goto WAIT_BACK; } +<<<<<<< HEAD + } else if (pid == 0) { + /* Wait for any child process in the same process group */ + childCB = OsFindGroupExitProcess(processCB->pgroup, OS_INVALID_VALUE); + if (childCB != NULL) { + goto WAIT_BACK; + } + runTask->waitID = (UINTPTR)OS_GET_PGROUP_LEADER(processCB->pgroup); + runTask->waitFlag = OS_PROCESS_WAIT_GID; + } else if (pid == -1) { + /* Wait for any child process */ + childCB = OsFindExitChildProcess(processCB, NULL); + if (childCB != NULL) { +======= } else if (pid == 0) {//等待同一进程组中的任何子进程 /* Wait for any child process in the same process group */ childCB = OsFindGroupExitProcess(processCB->pgroup, OS_INVALID_VALUE); @@ -1292,6 +1767,7 @@ STATIC UINT32 OsWaitSetFlag(const LosProcessCB *processCB, INT32 pid, LosProcess /* Wait for any child process */ childCB = OsFindExitChildProcess(processCB, NULL); if (childCB != NULL) {//找到了,确实有一个已经退出的PID +>>>>>>> remotes/origin/main goto WAIT_BACK; } runTask->waitID = pid; @@ -1309,14 +1785,22 @@ STATIC UINT32 OsWaitSetFlag(const LosProcessCB *processCB, INT32 pid, LosProcess } runTask->waitID = (UINTPTR)OS_GET_PGROUP_LEADER(pgroup); +<<<<<<< HEAD + runTask->waitFlag = OS_PROCESS_WAIT_GID; +======= runTask->waitFlag = OS_PROCESS_WAIT_GID;//设置当前任务的等待类型 +>>>>>>> remotes/origin/main } WAIT_BACK: *child = childCB; return LOS_OK; } +<<<<<<< HEAD + +======= // 等待回收孩子进程 +>>>>>>> remotes/origin/main STATIC UINT32 OsWaitRecycleChildProcess(const LosProcessCB *childCB, UINT32 intSave, INT32 *status, siginfo_t *info) { ProcessGroup *pgroup = NULL; @@ -1335,7 +1819,11 @@ STATIC UINT32 OsWaitRecycleChildProcess(const LosProcessCB *childCB, UINT32 intS SCHEDULER_UNLOCK(intSave); if (status != NULL) { +<<<<<<< HEAD + if (mode == OS_USER_MODE) { +======= if (mode == OS_USER_MODE) {//孩子为用户态进程 +>>>>>>> remotes/origin/main (VOID)LOS_ArchCopyToUser((VOID *)status, (const VOID *)(&(exitCode)), sizeof(INT32)); } else { *status = exitCode; @@ -1371,14 +1859,24 @@ STATIC UINT32 OsWaitRecycleChildProcess(const LosProcessCB *childCB, UINT32 intS (VOID)LOS_MemFree(m_aucSysMem1, pgroup); return pid; } +<<<<<<< HEAD + +STATIC UINT32 OsWaitChildProcessCheck(LosProcessCB *processCB, INT32 pid, LosProcessCB **childCB) +{ +======= // 检查要等待的孩子进程 STATIC UINT32 OsWaitChildProcessCheck(LosProcessCB *processCB, INT32 pid, LosProcessCB **childCB) {//当进程没有孩子且没有退出的孩子进程 +>>>>>>> remotes/origin/main if (LOS_ListEmpty(&(processCB->childrenList)) && LOS_ListEmpty(&(processCB->exitChildList))) { return LOS_ECHILD; } +<<<<<<< HEAD + return OsWaitSetFlag(processCB, pid, childCB); +======= return OsWaitSetFlag(processCB, pid, childCB);//设置等待子进程退出方式方法 +>>>>>>> remotes/origin/main } STATIC UINT32 OsWaitOptionsCheck(UINT32 options) @@ -1386,6 +1884,18 @@ STATIC UINT32 OsWaitOptionsCheck(UINT32 options) UINT32 flag = LOS_WAIT_WNOHANG | LOS_WAIT_WUNTRACED | LOS_WAIT_WCONTINUED; flag = ~flag & options; +<<<<<<< HEAD + if (flag != 0) { + return LOS_EINVAL; + } + + if ((options & (LOS_WAIT_WCONTINUED | LOS_WAIT_WUNTRACED)) != 0) { + return LOS_EOPNOTSUPP; + } + + if (OS_INT_ACTIVE) { + return LOS_EINTR; +======= if (flag != 0) {//三种方式中一种都没有 return LOS_EINVAL;//无效参数 } @@ -1396,11 +1906,16 @@ STATIC UINT32 OsWaitOptionsCheck(UINT32 options) if (OS_INT_ACTIVE) {//中断发生期间 return LOS_EINTR;//中断提示 +>>>>>>> remotes/origin/main } return LOS_OK; } +<<<<<<< HEAD + +======= //等待子进程结束并回收子进程,返回已经终止的子进程的进程ID号,并清除僵死进程。 +>>>>>>> remotes/origin/main STATIC INT32 OsWait(INT32 pid, USER INT32 *status, USER siginfo_t *info, UINT32 options, VOID *rusage) { (VOID)rusage; @@ -1411,13 +1926,21 @@ STATIC INT32 OsWait(INT32 pid, USER INT32 *status, USER siginfo_t *info, UINT32 LosProcessCB *processCB = OsCurrProcessGet(); LosTaskCB *runTask = OsCurrTaskGet(); SCHEDULER_LOCK(intSave); +<<<<<<< HEAD + ret = OsWaitChildProcessCheck(processCB, pid, &childCB); +======= ret = OsWaitChildProcessCheck(processCB, pid, &childCB);//先检查下看能不能找到参数要求的退出子进程 +>>>>>>> remotes/origin/main if (ret != LOS_OK) { pid = -ret; goto ERROR; } +<<<<<<< HEAD + if (childCB != NULL) { +======= if (childCB != NULL) {//找到了进程 +>>>>>>> remotes/origin/main #ifdef LOSCFG_PID_CONTAINER if (childCB == processCB) { SCHEDULER_UNLOCK(intSave); @@ -1429,6 +1952,23 @@ STATIC INT32 OsWait(INT32 pid, USER INT32 *status, USER siginfo_t *info, UINT32 #endif return (INT32)OsWaitRecycleChildProcess(childCB, intSave, status, info); } +<<<<<<< HEAD + + if ((options & LOS_WAIT_WNOHANG) != 0) { + runTask->waitFlag = 0; + pid = 0; + goto ERROR; + } + + OsWaitInsertWaitListInOrder(runTask, processCB); + + runTask->waitFlag = 0; + if (runTask->waitID == OS_INVALID_VALUE) { + pid = -LOS_ECHILD; + goto ERROR; + } + +======= //没有找到,看是否要返回还是去做个登记 if ((options & LOS_WAIT_WNOHANG) != 0) {//有LOS_WAIT_WNOHANG标签 runTask->waitFlag = 0;//等待标识置0 @@ -1445,6 +1985,7 @@ STATIC INT32 OsWait(INT32 pid, USER INT32 *status, USER siginfo_t *info, UINT32 goto ERROR; } //回收僵死进程 +>>>>>>> remotes/origin/main childCB = (LosProcessCB *)runTask->waitID; if (!OsProcessIsDead(childCB)) { pid = -LOS_ESRCH; @@ -1531,12 +2072,21 @@ UINT32 OsGetProcessGroupCB(UINT32 pid, UINTPTR *ppgroupLeader) STATIC UINT32 OsSetProcessGroupCheck(const LosProcessCB *processCB, LosProcessCB *pgroupCB) { +<<<<<<< HEAD + LosProcessCB *runProcessCB = OsCurrProcessGet(); + + if (OsProcessIsInactive(processCB)) { + return LOS_ESRCH; + } + +======= LosProcessCB *runProcessCB = OsCurrProcessGet();//拿到当前运行进程 if (OsProcessIsInactive(processCB)) {//进程是否活动 return LOS_ESRCH; } //参数进程不在用户态或者组长不在用户态 +>>>>>>> remotes/origin/main #ifdef LOSCFG_PID_CONTAINER if ((processCB->processID == OS_USER_ROOT_PROCESS_ID) || OS_PROCESS_CONTAINER_CHECK(processCB, runProcessCB)) { return LOS_EPERM; @@ -1653,12 +2203,20 @@ EXIT: SCHEDULER_UNLOCK(intSave); return gid; } +<<<<<<< HEAD + +======= // 获取当前进程的组ID +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT INT32 LOS_GetCurrProcessGroupID(VOID) { return LOS_GetProcessGroupID(OsCurrProcessGet()->processID); } +<<<<<<< HEAD + +======= // 为用户态任务分配栈空间 +>>>>>>> remotes/origin/main #ifdef LOSCFG_KERNEL_VM STATIC LosProcessCB *OsGetFreePCB(VOID) { @@ -1682,19 +2240,34 @@ STATIC LosProcessCB *OsGetFreePCB(VOID) STATIC VOID *OsUserInitStackAlloc(LosProcessCB *processCB, UINT32 *size) { LosVmMapRegion *region = NULL; +<<<<<<< HEAD + UINT32 stackSize = ALIGN(OS_USER_TASK_STACK_SIZE, PAGE_SIZE); + + region = LOS_RegionAlloc(processCB->vmSpace, 0, stackSize, + VM_MAP_REGION_FLAG_PERM_USER | VM_MAP_REGION_FLAG_PERM_READ | + VM_MAP_REGION_FLAG_PERM_WRITE, 0); +======= UINT32 stackSize = ALIGN(OS_USER_TASK_STACK_SIZE, PAGE_SIZE);//1M栈空间 按页对齐 //线性区分配虚拟内存 region = LOS_RegionAlloc(processCB->vmSpace, 0, stackSize, VM_MAP_REGION_FLAG_PERM_USER | VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE, 0);//可使用可读可写区 +>>>>>>> remotes/origin/main if (region == NULL) { return NULL; } +<<<<<<< HEAD + LOS_SetRegionTypeAnon(region); + region->regionFlags |= VM_MAP_REGION_FLAG_STACK; + + *size = stackSize; +======= LOS_SetRegionTypeAnon(region);//匿名映射 region->regionFlags |= VM_MAP_REGION_FLAG_STACK;//标记该线性区为栈区 *size = stackSize;//记录栈大小 +>>>>>>> remotes/origin/main return (VOID *)(UINTPTR)region->range.base; } @@ -1729,6 +2302,8 @@ LITE_OS_SEC_TEXT LosVmSpace *OsExecProcessVmSpaceReplace(LosVmSpace *newSpace, U return oldSpace; } +<<<<<<< HEAD +======= /** * @brief 进程的回收再利用,被LOS_DoExecveFile调用 * @param processCB @@ -1737,6 +2312,7 @@ LITE_OS_SEC_TEXT LosVmSpace *OsExecProcessVmSpaceReplace(LosVmSpace *newSpace, U * @param oldFiles * @return LITE_OS_SEC_TEXT */ +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 OsExecRecycleAndInit(LosProcessCB *processCB, const CHAR *name, LosVmSpace *oldSpace, UINTPTR oldFiles) { @@ -1786,7 +2362,11 @@ LITE_OS_SEC_TEXT UINT32 OsExecRecycleAndInit(LosProcessCB *processCB, const CHAR return LOS_OK; } +<<<<<<< HEAD + +======= // 执行用户态任务, entry为入口函数 ,其中 创建好task,task上下文 等待调度真正执行, sp:栈指针 mapBase:栈底 mapSize:栈大小 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 OsExecStart(const TSK_ENTRY_FUNC entry, UINTPTR sp, UINTPTR mapBase, UINT32 mapSize) { UINT32 intSave; @@ -1795,6 +2375,31 @@ LITE_OS_SEC_TEXT UINT32 OsExecStart(const TSK_ENTRY_FUNC entry, UINTPTR sp, UINT return LOS_NOK; } +<<<<<<< HEAD + if ((sp == 0) || (LOS_Align(sp, LOSCFG_STACK_POINT_ALIGN_SIZE) != sp)) { + return LOS_NOK; + } + + if ((mapBase == 0) || (mapSize == 0) || (sp <= mapBase) || (sp > (mapBase + mapSize))) { + return LOS_NOK; + } + + LosTaskCB *taskCB = OsCurrTaskGet(); + + SCHEDULER_LOCK(intSave); + taskCB->userMapBase = mapBase; + taskCB->userMapSize = mapSize; + taskCB->taskEntry = (TSK_ENTRY_FUNC)entry; + + TaskContext *taskContext = (TaskContext *)OsTaskStackInit(taskCB->taskID, taskCB->stackSize, + (VOID *)taskCB->topOfStack, FALSE); + OsUserTaskStackInit(taskContext, (UINTPTR)taskCB->taskEntry, sp); + SCHEDULER_UNLOCK(intSave); + return LOS_OK; +} +#endif + +======= if ((sp == 0) || (LOS_Align(sp, LOSCFG_STACK_POINT_ALIGN_SIZE) != sp)) {//对齐 return LOS_NOK; } @@ -1819,6 +2424,7 @@ LITE_OS_SEC_TEXT UINT32 OsExecStart(const TSK_ENTRY_FUNC entry, UINTPTR sp, UINT } #endif // 用户进程开始初始化 +>>>>>>> remotes/origin/main STATIC UINT32 OsUserInitProcessStart(LosProcessCB *processCB, TSK_INIT_PARAM_S *param) { UINT32 intSave; @@ -1839,7 +2445,11 @@ STATIC UINT32 OsUserInitProcessStart(LosProcessCB *processCB, TSK_INIT_PARAM_S * processCB->processStatus &= ~OS_PROCESS_STATUS_INIT; SCHEDULER_UNLOCK(intSave); +<<<<<<< HEAD + ret = LOS_SetTaskScheduler(taskID, LOS_SCHED_RR, OS_TASK_PRIORITY_LOWEST); +======= ret = LOS_SetTaskScheduler(taskID, LOS_SCHED_RR, OS_TASK_PRIORITY_LOWEST);//调度器:设置为抢占式调度和最低任务优先级(31级) +>>>>>>> remotes/origin/main if (ret != LOS_OK) { PRINT_ERR("User init process set scheduler failed! ERROR:%d \n", ret); goto EXIT; @@ -1958,7 +2568,11 @@ ERROR: OsDeInitPCB(processCB); return ret; } +<<<<<<< HEAD + +======= // 拷贝用户信息 直接用memcpy_s +>>>>>>> remotes/origin/main STATIC UINT32 OsCopyUser(LosProcessCB *childCB, LosProcessCB *parentCB) { #ifdef LOSCFG_SECURITY_CAPABILITY @@ -1973,7 +2587,10 @@ STATIC UINT32 OsCopyUser(LosProcessCB *childCB, LosProcessCB *parentCB) return LOS_OK; } +<<<<<<< HEAD +======= //拷贝一个Task过程 +>>>>>>> remotes/origin/main STATIC VOID GetCopyTaskParam(LosProcessCB *childProcessCB, UINTPTR entry, UINT32 size, TSK_INIT_PARAM_S *taskParam, SchedParam *param) { @@ -1981,6 +2598,17 @@ STATIC VOID GetCopyTaskParam(LosProcessCB *childProcessCB, UINTPTR entry, UINT32 LosTaskCB *runTask = OsCurrTaskGet(); SCHEDULER_LOCK(intSave); +<<<<<<< HEAD + if (OsProcessIsUserMode(childProcessCB)) { + taskParam->pfnTaskEntry = runTask->taskEntry; + taskParam->uwStackSize = runTask->stackSize; + taskParam->userParam.userArea = runTask->userArea; + taskParam->userParam.userMapBase = runTask->userMapBase; + taskParam->userParam.userMapSize = runTask->userMapSize; + } else { + taskParam->pfnTaskEntry = (TSK_ENTRY_FUNC)entry; + taskParam->uwStackSize = size; +======= if (OsProcessIsUserMode(childProcessCB)) {//用户态进程 taskParam->pfnTaskEntry = runTask->taskEntry;//拷贝当前任务入口地址 taskParam->uwStackSize = runTask->stackSize;//栈空间大小 @@ -1990,6 +2618,7 @@ STATIC VOID GetCopyTaskParam(LosProcessCB *childProcessCB, UINTPTR entry, UINT32 } else {//注意内核态进程创建任务的入口由外界指定,例如 OsCreateIdleProcess 指定了OsIdleTask taskParam->pfnTaskEntry = (TSK_ENTRY_FUNC)entry;//参数(sp)为内核态入口地址 taskParam->uwStackSize = size;//参数(size)为内核态栈大小 +>>>>>>> remotes/origin/main } if (runTask->taskStatus & OS_TASK_FLAG_PTHREAD_JOIN) { taskParam->uwResved = LOS_TASK_ATTR_JOINABLE; @@ -2025,6 +2654,20 @@ STATIC UINT32 OsCopyTask(UINT32 flags, LosProcessCB *childProcessCB, const CHAR } LosTaskCB *childTaskCB = childProcessCB->threadGroup; +<<<<<<< HEAD + childTaskCB->taskStatus = runTask->taskStatus; + childTaskCB->ops->schedParamModify(childTaskCB, ¶m); + if (childTaskCB->taskStatus & OS_TASK_STATUS_RUNNING) { + childTaskCB->taskStatus &= ~OS_TASK_STATUS_RUNNING; + } else { + if (OS_SCHEDULER_ACTIVE) { + LOS_Panic("Clone thread status not running error status: 0x%x\n", childTaskCB->taskStatus); + } + childTaskCB->taskStatus &= ~OS_TASK_STATUS_UNUSED; + } + + if (OsProcessIsUserMode(childProcessCB)) { +======= childTaskCB->taskStatus = runTask->taskStatus;//任务状态先同步,注意这里是赋值操作. ...01101001 childTaskCB->ops->schedParamModify(childTaskCB, ¶m); if (childTaskCB->taskStatus & OS_TASK_STATUS_RUNNING) {//因只能有一个运行的task,所以如果一样要改4号位 @@ -2037,13 +2680,18 @@ STATIC UINT32 OsCopyTask(UINT32 flags, LosProcessCB *childProcessCB, const CHAR } if (OsProcessIsUserMode(childProcessCB)) {//是否是用户进程 +>>>>>>> remotes/origin/main SCHEDULER_LOCK(intSave); OsUserCloneParentStack(childTaskCB->stackPointer, entry, runTask->topOfStack, runTask->stackSize); SCHEDULER_UNLOCK(intSave); } return LOS_OK; } +<<<<<<< HEAD + +======= //拷贝父亲大人的遗传基因信息 +>>>>>>> remotes/origin/main STATIC UINT32 OsCopyParent(UINT32 flags, LosProcessCB *childProcessCB, LosProcessCB *runProcessCB) { UINT32 intSave; @@ -2051,6 +2699,15 @@ STATIC UINT32 OsCopyParent(UINT32 flags, LosProcessCB *childProcessCB, LosProces SCHEDULER_LOCK(intSave); if (childProcessCB->parentProcess == NULL) { +<<<<<<< HEAD + if (flags & CLONE_PARENT) { + parentProcessCB = runProcessCB->parentProcess; + } else { + parentProcessCB = runProcessCB; + } + childProcessCB->parentProcess = parentProcessCB; + LOS_ListTailInsert(&parentProcessCB->childrenList, &childProcessCB->siblingList); +======= if (flags & CLONE_PARENT) { //这里指明 childProcessCB 和 runProcessCB 有同一个父亲,是兄弟关系 parentProcessCB = runProcessCB->parentProcess; } else { @@ -2059,6 +2716,7 @@ STATIC UINT32 OsCopyParent(UINT32 flags, LosProcessCB *childProcessCB, LosProces childProcessCB->parentProcess = parentProcessCB; LOS_ListTailInsert(&parentProcessCB->childrenList, &childProcessCB->siblingList);//通过我的兄弟姐妹节点,挂到父亲的孩子链表上,于我而言,父亲的这个链表上挂的都是我的兄弟姐妹 //不会被排序,老大,老二,老三 老天爷指定了。 +>>>>>>> remotes/origin/main } if (childProcessCB->pgroup == NULL) { @@ -2068,12 +2726,26 @@ STATIC UINT32 OsCopyParent(UINT32 flags, LosProcessCB *childProcessCB, LosProces SCHEDULER_UNLOCK(intSave); return LOS_OK; } +<<<<<<< HEAD + +======= //拷贝虚拟空间 +>>>>>>> remotes/origin/main STATIC UINT32 OsCopyMM(UINT32 flags, LosProcessCB *childProcessCB, LosProcessCB *runProcessCB) { status_t status; UINT32 intSave; +<<<<<<< HEAD + if (!OsProcessIsUserMode(childProcessCB)) { + return LOS_OK; + } + + if (flags & CLONE_VM) { + SCHEDULER_LOCK(intSave); + childProcessCB->vmSpace->archMmu.virtTtb = runProcessCB->vmSpace->archMmu.virtTtb; + childProcessCB->vmSpace->archMmu.physTtb = runProcessCB->vmSpace->archMmu.physTtb; +======= if (!OsProcessIsUserMode(childProcessCB)) {//不是用户模式,直接返回,什么意思?内核虚拟空间只有一个,无需COPY !!! return LOS_OK; } @@ -2082,6 +2754,7 @@ STATIC UINT32 OsCopyMM(UINT32 flags, LosProcessCB *childProcessCB, LosProcessCB SCHEDULER_LOCK(intSave); childProcessCB->vmSpace->archMmu.virtTtb = runProcessCB->vmSpace->archMmu.virtTtb;//TTB虚拟地址基地址,即L1表存放位置,virtTtb是个指针,进程的虚拟空间是指定的范围的 childProcessCB->vmSpace->archMmu.physTtb = runProcessCB->vmSpace->archMmu.physTtb;//TTB物理地址基地址,physTtb是个值,取决于运行时映射到物理内存的具体哪个位置. +>>>>>>> remotes/origin/main SCHEDULER_UNLOCK(intSave); return LOS_OK; } @@ -2092,7 +2765,11 @@ STATIC UINT32 OsCopyMM(UINT32 flags, LosProcessCB *childProcessCB, LosProcessCB } return LOS_OK; } +<<<<<<< HEAD + +======= // 拷贝进程文件描述符(proc_fd)信息 +>>>>>>> remotes/origin/main STATIC UINT32 OsCopyFile(UINT32 flags, LosProcessCB *childProcessCB, LosProcessCB *runProcessCB) { #ifdef LOSCFG_FS_VFS @@ -2122,7 +2799,11 @@ STATIC UINT32 OsCopyFile(UINT32 flags, LosProcessCB *childProcessCB, LosProcessC #endif #endif +<<<<<<< HEAD + childProcessCB->consoleID = runProcessCB->consoleID; +======= childProcessCB->consoleID = runProcessCB->consoleID;//控制台也是文件 +>>>>>>> remotes/origin/main childProcessCB->umask = runProcessCB->umask; return LOS_OK; } @@ -2130,16 +2811,28 @@ STATIC UINT32 OsCopyFile(UINT32 flags, LosProcessCB *childProcessCB, LosProcessC STATIC UINT32 OsForkInitPCB(UINT32 flags, LosProcessCB *child, const CHAR *name, UINTPTR sp, UINT32 size) { UINT32 ret; +<<<<<<< HEAD + LosProcessCB *run = OsCurrProcessGet(); + + ret = OsCopyParent(flags, child, run); +======= LosProcessCB *run = OsCurrProcessGet();//获取当前进程 ret = OsCopyParent(flags, child, run);//拷贝父亲大人的基因信息 +>>>>>>> remotes/origin/main if (ret != LOS_OK) { return ret; } +<<<<<<< HEAD + return OsCopyTask(flags, child, name, sp, size); +} + +======= return OsCopyTask(flags, child, name, sp, size);//拷贝任务,设置任务入口函数,栈大小 } //设置进程组和加入进程调度就绪队列 +>>>>>>> remotes/origin/main STATIC UINT32 OsChildSetProcessGroupAndSched(LosProcessCB *child, LosProcessCB *run) { UINT32 intSave; @@ -2163,7 +2856,11 @@ STATIC UINT32 OsChildSetProcessGroupAndSched(LosProcessCB *child, LosProcessCB * (VOID)LOS_MemFree(m_aucSysMem1, pgroup); return LOS_OK; } +<<<<<<< HEAD + +======= // 拷贝进程资源 +>>>>>>> remotes/origin/main STATIC UINT32 OsCopyProcessResources(UINT32 flags, LosProcessCB *child, LosProcessCB *run) { UINT32 ret; @@ -2173,26 +2870,54 @@ STATIC UINT32 OsCopyProcessResources(UINT32 flags, LosProcessCB *child, LosProce return ret; } +<<<<<<< HEAD + ret = OsCopyMM(flags, child, run); +======= ret = OsCopyMM(flags, child, run);//拷贝虚拟空间 +>>>>>>> remotes/origin/main if (ret != LOS_OK) { return ret; } +<<<<<<< HEAD + ret = OsCopyFile(flags, child, run); +======= ret = OsCopyFile(flags, child, run);//拷贝文件信息 +>>>>>>> remotes/origin/main if (ret != LOS_OK) { return ret; } #ifdef LOSCFG_KERNEL_LITEIPC +<<<<<<< HEAD + if (run->ipcInfo != NULL) { + child->ipcInfo = LiteIpcPoolReInit((const ProcIpcInfo *)(run->ipcInfo)); + if (child->ipcInfo == NULL) { + return LOS_ENOMEM; +======= if (run->ipcInfo != NULL) {//重新初始化IPC池 child->ipcInfo = LiteIpcPoolReInit((const ProcIpcInfo *)(run->ipcInfo));//@note_good 将沿用用户态空间地址(即线性区地址) if (child->ipcInfo == NULL) {//因为整个进程虚拟空间都是拷贝的,ipc的用户态虚拟地址当然可以拷贝,但因进程不同了,所以需要重新申请ipc池和重新 return LOS_ENOMEM;//映射池中两个地址. +>>>>>>> remotes/origin/main } } #endif #ifdef LOSCFG_SECURITY_CAPABILITY +<<<<<<< HEAD + OsCopyCapability(run, child); +#endif + return LOS_OK; +} + +STATIC INT32 OsCopyProcess(UINT32 flags, const CHAR *name, UINTPTR sp, UINT32 size) +{ + UINT32 ret, processID; + LosProcessCB *run = OsCurrProcessGet(); + + LosProcessCB *child = OsGetFreePCB(); +======= OsCopyCapability(run, child);//拷贝安全能力 #endif return LOS_OK; @@ -2204,6 +2929,7 @@ STATIC INT32 OsCopyProcess(UINT32 flags, const CHAR *name, UINTPTR sp, UINT32 si LosProcessCB *run = OsCurrProcessGet();//获取当前进程 LosProcessCB *child = OsGetFreePCB();//从进程池中申请一个进程控制块,鸿蒙进程池默认64 +>>>>>>> remotes/origin/main if (child == NULL) { return -LOS_EAGAIN; } @@ -2228,24 +2954,42 @@ STATIC INT32 OsCopyProcess(UINT32 flags, const CHAR *name, UINTPTR sp, UINT32 si #endif #endif +<<<<<<< HEAD + ret = OsForkInitPCB(flags, child, name, sp, size); +======= ret = OsForkInitPCB(flags, child, name, sp, size);//初始化进程控制块 +>>>>>>> remotes/origin/main if (ret != LOS_OK) { goto ERROR_INIT; } +<<<<<<< HEAD + ret = OsCopyProcessResources(flags, child, run); +======= ret = OsCopyProcessResources(flags, child, run);//拷贝进程的资源,包括虚拟空间,文件,安全,IPC == +>>>>>>> remotes/origin/main if (ret != LOS_OK) { goto ERROR_TASK; } +<<<<<<< HEAD + ret = OsChildSetProcessGroupAndSched(child, run); +======= ret = OsChildSetProcessGroupAndSched(child, run);//设置进程组和加入进程调度就绪队列 +>>>>>>> remotes/origin/main if (ret != LOS_OK) { goto ERROR_TASK; } +<<<<<<< HEAD + LOS_MpSchedule(OS_MP_CPU_ALL); + if (OS_SCHEDULER_ACTIVE) { + LOS_Schedule(); +======= LOS_MpSchedule(OS_MP_CPU_ALL);//给各CPU发送准备接受调度信号 if (OS_SCHEDULER_ACTIVE) {//当前CPU core处于活动状态 LOS_Schedule();// 申请调度 +>>>>>>> remotes/origin/main } return processID; @@ -2257,6 +3001,8 @@ ERROR_INIT: return -ret; } +<<<<<<< HEAD +======= /*! * @brief OsClone 进程克隆 * @@ -2267,6 +3013,7 @@ ERROR_INIT: * * @see */ +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT INT32 OsClone(UINT32 flags, UINTPTR sp, UINT32 size) { UINT32 cloneFlag = CLONE_PARENT | CLONE_THREAD | SIGCHLD; @@ -2315,6 +3062,10 @@ LITE_OS_SEC_TEXT INT32 OsClone(UINT32 flags, UINTPTR sp, UINT32 size) return OsCopyProcess(cloneFlag & flags, NULL, sp, size); } +<<<<<<< HEAD + +======= +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT INT32 LOS_Fork(UINT32 flags, const CHAR *name, const TSK_ENTRY_FUNC entry, UINT32 stackSize) { UINT32 cloneFlag = CLONE_PARENT | CLONE_THREAD | CLONE_VFORK | CLONE_FILES; @@ -2324,7 +3075,11 @@ LITE_OS_SEC_TEXT INT32 LOS_Fork(UINT32 flags, const CHAR *name, const TSK_ENTRY_ } flags |= CLONE_FILES; +<<<<<<< HEAD + return OsCopyProcess(cloneFlag & flags, name, (UINTPTR)entry, stackSize); +======= return OsCopyProcess(cloneFlag & flags, name, (UINTPTR)entry, stackSize);//拷贝一个进程 +>>>>>>> remotes/origin/main } #else LITE_OS_SEC_TEXT_INIT UINT32 OsUserInitProcess(VOID) @@ -2333,6 +3088,8 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsUserInitProcess(VOID) } #endif +<<<<<<< HEAD +======= /*! * @brief LOS_Exit * 进程退出 @@ -2341,6 +3098,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsUserInitProcess(VOID) * * @see */ +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT VOID LOS_Exit(INT32 status) { UINT32 intSave; @@ -2350,7 +3108,11 @@ LITE_OS_SEC_TEXT VOID LOS_Exit(INT32 status) /* The exit of a kernel - state process must be kernel - state and all threads must actively exit */ LosProcessCB *processCB = OsCurrProcessGet(); SCHEDULER_LOCK(intSave); +<<<<<<< HEAD + if (!OsProcessIsUserMode(processCB) && (processCB->threadNumber != 1)) { +======= if (!OsProcessIsUserMode(processCB) && (processCB->threadNumber != 1)) {//内核态下进程的退出方式,必须是所有的任务都退出了 +>>>>>>> remotes/origin/main SCHEDULER_UNLOCK(intSave); PRINT_ERR("Kernel-state processes with multiple threads are not allowed to exit directly\n"); return; @@ -2361,6 +3123,8 @@ LITE_OS_SEC_TEXT VOID LOS_Exit(INT32 status) OsRunningTaskToExit(OsCurrTaskGet(), OS_PRO_EXIT_OK); } +<<<<<<< HEAD +======= /*! * @brief LOS_GetUsedPIDList * 获取使用中的进程列表 @@ -2370,6 +3134,7 @@ LITE_OS_SEC_TEXT VOID LOS_Exit(INT32 status) * * @see */ +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT INT32 LOS_GetUsedPIDList(UINT32 *pidList, INT32 pidMaxNum) { LosProcessCB *pcb = NULL; @@ -2381,6 +3146,15 @@ LITE_OS_SEC_TEXT INT32 LOS_GetUsedPIDList(UINT32 *pidList, INT32 pidMaxNum) return 0; } SCHEDULER_LOCK(intSave); +<<<<<<< HEAD + while (OsProcessIDUserCheckInvalid(pid) == false) { + pcb = OS_PCB_FROM_PID(pid); + pid++; + if (OsProcessIsUnused(pcb)) { + continue; + } + pidList[num] = pcb->processID; +======= while (OsProcessIDUserCheckInvalid(pid) == false) {//遍历进程池 pcb = OS_PCB_FROM_PID(pid); pid++; @@ -2388,6 +3162,7 @@ LITE_OS_SEC_TEXT INT32 LOS_GetUsedPIDList(UINT32 *pidList, INT32 pidMaxNum) continue; } pidList[num] = pcb->processID;//由参数带走 +>>>>>>> remotes/origin/main num++; if (num >= pidMaxNum) { break; @@ -2413,12 +3188,20 @@ LITE_OS_SEC_TEXT struct fd_table_s *LOS_GetFdTable(UINT32 pid) return files->fdt; } #endif +<<<<<<< HEAD + +======= // 获取当前进程的进程ID +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_GetCurrProcessID(VOID) { return OsCurrProcessGet()->processID; } +<<<<<<< HEAD + +======= // 按指定状态退出指定进程 +>>>>>>> remotes/origin/main #ifdef LOSCFG_KERNEL_VM STATIC VOID ThreadGroupActiveTaskKilled(LosTaskCB *taskCB) { @@ -2491,12 +3274,20 @@ LITE_OS_SEC_TEXT VOID OsProcessThreadGroupDestroy(VOID) #endif return; } +<<<<<<< HEAD + +======= // 获取系统支持的最大进程数目 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_GetSystemProcessMaximum(VOID) { return g_processMaxNum; } +<<<<<<< HEAD + +======= // 获取用户态进程的根进程,所有用户进程都是g_processCBArray[g_userInitProcess] fork来的 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT LosProcessCB *OsGetUserInitProcess(VOID) { return &g_processCBArray[OS_USER_ROOT_PROCESS_ID]; @@ -2506,7 +3297,11 @@ LITE_OS_SEC_TEXT LosProcessCB *OsGetKernelInitProcess(VOID) { return &g_processCBArray[OS_KERNEL_ROOT_PROCESS_ID]; } +<<<<<<< HEAD + +======= // 获取空闲进程,0号进程为空闲进程,该进程不干活,专给CPU休息的。 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT LosProcessCB *OsGetIdleProcess(VOID) { return &g_processCBArray[OS_KERNEL_IDLE_PROCESS_ID]; diff --git a/src/kernel_liteos_a/kernel/base/core/los_smp.c b/src/kernel_liteos_a/kernel/base/core/los_smp.c index 560e54fa..8c26e82c 100644 --- a/src/kernel_liteos_a/kernel/base/core/los_smp.c +++ b/src/kernel_liteos_a/kernel/base/core/los_smp.c @@ -40,7 +40,11 @@ #ifdef LOSCFG_KERNEL_SMP STATIC struct SmpOps *g_smpOps = NULL; +<<<<<<< HEAD + +======= /// 多核中次级CPU核初始化,每个核都会调用一次 +>>>>>>> remotes/origin/main STATIC VOID OsSmpSecondaryInit(VOID *arg) { UNUSED(arg); @@ -56,7 +60,11 @@ STATIC VOID OsSmpSecondaryInit(VOID *arg) OsSchedStart(); } +<<<<<<< HEAD + +======= /// 设置多核操作接口, 通过外部注册 +>>>>>>> remotes/origin/main VOID LOS_SmpOpsSet(struct SmpOps *ops) { g_smpOps = ops; diff --git a/src/kernel_liteos_a/kernel/base/core/los_swtmr.c b/src/kernel_liteos_a/kernel/base/core/los_swtmr.c index b648553c..ebd40614 100644 --- a/src/kernel_liteos_a/kernel/base/core/los_swtmr.c +++ b/src/kernel_liteos_a/kernel/base/core/los_swtmr.c @@ -28,6 +28,9 @@ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +<<<<<<< HEAD + +======= /*基本概念 软件定时器,是基于系统Tick时钟中断且由软件来模拟的定时器。当经过设定的Tick数后,会触发用户自定义的回调函数。 硬件定时器受硬件的限制,数量上不足以满足用户的实际需求。因此为了满足用户需求,提供更多的定时器, @@ -84,6 +87,7 @@ 例如:系统多占用一个软件定时器,那么用户能使用的软件定时器资源就会减少一个。 创建单次不自删除属性的定时器,用户需要自行调用定时器删除接口删除定时器,回收定时器资源,避免资源泄露。 软件定时器的定时精度与系统Tick时钟的周期有关。*/ +>>>>>>> remotes/origin/main #include "los_swtmr_pri.h" #include "los_init.h" #include "los_process_pri.h" @@ -106,6 +110,16 @@ LITE_OS_SEC_BSS UINT8 *g_swtmrHandlerPool = NULL; /* Pool of Swtmr Han LITE_OS_SEC_BSS LOS_DL_LIST g_swtmrFreeList; /* Free list of Software Timer */ /* spinlock for swtmr module, only available on SMP mode */ +<<<<<<< HEAD +LITE_OS_SEC_BSS SPIN_LOCK_INIT(g_swtmrSpin); +#define SWTMR_LOCK(state) LOS_SpinLockSave(&g_swtmrSpin, &(state)) +#define SWTMR_UNLOCK(state) LOS_SpinUnlockRestore(&g_swtmrSpin, (state)) + +typedef struct { + SortLinkAttribute swtmrSortLink; + LosTaskCB *swtmrTask; /* software timer task id */ + LOS_DL_LIST swtmrHandlerQueue; /* software timer timeout queue id */ +======= LITE_OS_SEC_BSS SPIN_LOCK_INIT(g_swtmrSpin);//初始化软件钟自旋锁,只有SMP情况才需要,只要是自旋锁都是由于CPU多核的同步 #define SWTMR_LOCK(state) LOS_SpinLockSave(&g_swtmrSpin, &(state))//持有软时钟自旋锁 #define SWTMR_UNLOCK(state) LOS_SpinUnlockRestore(&g_swtmrSpin, (state))//释放软时钟自旋锁 @@ -114,6 +128,7 @@ typedef struct { SortLinkAttribute swtmrSortLink; LosTaskCB *swtmrTask; /* software timer task id *///定时器任务ID LOS_DL_LIST swtmrHandlerQueue; /* software timer timeout queue id *///定时器超时队列 +>>>>>>> remotes/origin/main } SwtmrRunqueue; STATIC SwtmrRunqueue g_swtmrRunqueue[LOSCFG_KERNEL_CORE_NUM]; @@ -309,7 +324,11 @@ STATIC INLINE VOID ScanSwtmrTimeList(SwtmrRunqueue *srq) LOS_SpinUnlockRestore(&swtmrSortLink->spinLock, intSave); return; } +<<<<<<< HEAD + +======= //软时钟的入口函数,拥有任务的最高优先级0级 +>>>>>>> remotes/origin/main STATIC VOID SwtmrTask(VOID) { SwtmrHandlerItem swtmrHandle; @@ -318,7 +337,11 @@ STATIC VOID SwtmrTask(VOID) SwtmrRunqueue *srq = &g_swtmrRunqueue[ArchCurrCpuid()]; LOS_DL_LIST *head = &srq->swtmrHandlerQueue; +<<<<<<< HEAD + for (;;) { +======= for (;;) {//死循环获取队列item,一直读干净为止 +>>>>>>> remotes/origin/main waitTime = OsSortLinkGetNextExpireTime(OsGetCurrSchedTimeCycle(), &srq->swtmrSortLink); if (waitTime != 0) { SCHEDULER_LOCK(intSave); @@ -334,18 +357,39 @@ STATIC VOID SwtmrTask(VOID) LOS_ListDelete(&swtmrHandlePtr->node); (VOID)memcpy_s(&swtmrHandle, sizeof(SwtmrHandlerItem), swtmrHandlePtr, sizeof(SwtmrHandlerItem)); +<<<<<<< HEAD + (VOID)LOS_MemboxFree(g_swtmrHandlerPool, swtmrHandlePtr); +======= (VOID)LOS_MemboxFree(g_swtmrHandlerPool, swtmrHandlePtr);//静态释放内存,注意在鸿蒙内核只有软时钟注册用到了静态内存 +>>>>>>> remotes/origin/main SwtmrHandler(&swtmrHandle); } } } +<<<<<<< HEAD +======= //创建软时钟任务,每个cpu core都可以拥有自己的软时钟任务 +>>>>>>> remotes/origin/main STATIC UINT32 SwtmrTaskCreate(UINT16 cpuid, UINT32 *swtmrTaskID) { UINT32 ret; TSK_INIT_PARAM_S swtmrTask; +<<<<<<< HEAD + (VOID)memset_s(&swtmrTask, sizeof(TSK_INIT_PARAM_S), 0, sizeof(TSK_INIT_PARAM_S)); + swtmrTask.pfnTaskEntry = (TSK_ENTRY_FUNC)SwtmrTask; + swtmrTask.uwStackSize = LOSCFG_BASE_CORE_TSK_DEFAULT_STACK_SIZE; + swtmrTask.pcName = "Swt_Task"; + swtmrTask.usTaskPrio = 0; + swtmrTask.uwResved = LOS_TASK_STATUS_DETACHED; +#ifdef LOSCFG_KERNEL_SMP + swtmrTask.usCpuAffiMask = CPUID_TO_AFFI_MASK(cpuid); +#endif + ret = LOS_TaskCreate(swtmrTaskID, &swtmrTask); + if (ret == LOS_OK) { + OS_TCB_FROM_TID(*swtmrTaskID)->taskStatus |= OS_TASK_FLAG_SYSTEM_TASK; +======= (VOID)memset_s(&swtmrTask, sizeof(TSK_INIT_PARAM_S), 0, sizeof(TSK_INIT_PARAM_S));//清0 swtmrTask.pfnTaskEntry = (TSK_ENTRY_FUNC)SwtmrTask;//入口函数 swtmrTask.uwStackSize = LOSCFG_BASE_CORE_TSK_DEFAULT_STACK_SIZE;//16k默认内核任务栈 @@ -358,6 +402,7 @@ STATIC UINT32 SwtmrTaskCreate(UINT16 cpuid, UINT32 *swtmrTaskID) ret = LOS_TaskCreate(swtmrTaskID, &swtmrTask);//创建任务并申请调度 if (ret == LOS_OK) { OS_TCB_FROM_TID(*swtmrTaskID)->taskStatus |= OS_TASK_FLAG_SYSTEM_TASK;//告知这是个任务系统 +>>>>>>> remotes/origin/main } return ret; @@ -375,6 +420,18 @@ BOOL OsIsSwtmrTask(const LosTaskCB *taskCB) } return FALSE; } +<<<<<<< HEAD + +LITE_OS_SEC_TEXT_INIT VOID OsSwtmrRecycle(UINTPTR ownerID) +{ + for (UINT16 index = 0; index < LOSCFG_BASE_CORE_SWTMR_LIMIT; index++) { + if (g_swtmrCBArray[index].uwOwnerPid == ownerID) { + LOS_SwtmrDelete(index); + } + } +} + +======= //回收指定进程的软时钟 LITE_OS_SEC_TEXT_INIT VOID OsSwtmrRecycle(UINTPTR ownerID) { @@ -385,6 +442,7 @@ LITE_OS_SEC_TEXT_INIT VOID OsSwtmrRecycle(UINTPTR ownerID) } } //软时钟初始化,注意函数在多CPU情况下会执行多次 +>>>>>>> remotes/origin/main STATIC UINT32 SwtmrBaseInit(VOID) { UINT32 ret; @@ -394,6 +452,17 @@ STATIC UINT32 SwtmrBaseInit(VOID) return LOS_ERRNO_SWTMR_NO_MEMORY; } +<<<<<<< HEAD + (VOID)memset_s(swtmr, size, 0, size); + g_swtmrCBArray = swtmr; + LOS_ListInit(&g_swtmrFreeList); + for (UINT16 index = 0; index < LOSCFG_BASE_CORE_SWTMR_LIMIT; index++, swtmr++) { + swtmr->usTimerID = index; + LOS_ListTailInsert(&g_swtmrFreeList, &swtmr->stSortList.sortLinkNode); + } + + size = LOS_MEMBOX_SIZE(sizeof(SwtmrHandlerItem), OS_SWTMR_HANDLE_QUEUE_SIZE); +======= (VOID)memset_s(swtmr, size, 0, size);//清0 g_swtmrCBArray = swtmr;//软时钟 LOS_ListInit(&g_swtmrFreeList);//初始化空间链表 @@ -403,6 +472,7 @@ STATIC UINT32 SwtmrBaseInit(VOID) } //想要用静态内存池管理,就必须要使用LOS_MEMBOX_SIZE来计算申请列表的内存大小,因为需要点前缀内存承载头部信息 size = LOS_MEMBOX_SIZE(sizeof(SwtmrHandlerItem), OS_SWTMR_HANDLE_QUEUE_SIZE);//规划一片内存区域作为软时钟处理函数的静态内存池 +>>>>>>> remotes/origin/main g_swtmrHandlerPool = (UINT8 *)LOS_MemAlloc(m_aucSysMem1, size); /* system resident resource */ if (g_swtmrHandlerPool == NULL) { return LOS_ERRNO_SWTMR_NO_MEMORY; @@ -530,6 +600,16 @@ STATIC UINT64 SwtmrToStart(SWTMR_CTRL_S *swtmr, UINT16 cpuid) if ((swtmr->uwOverrun == 0) && ((swtmr->ucMode == LOS_SWTMR_MODE_ONCE) || (swtmr->ucMode == LOS_SWTMR_MODE_OPP) || +<<<<<<< HEAD + (swtmr->ucMode == LOS_SWTMR_MODE_NO_SELFDELETE))) { + ticks = swtmr->uwExpiry; + } else { + ticks = swtmr->uwInterval; + } + swtmr->ucState = OS_SWTMR_STATUS_TICKING; + + UINT64 period = (UINT64)ticks * OS_CYCLE_PER_TICK; +======= (swtmr->ucMode == LOS_SWTMR_MODE_NO_SELFDELETE))) {//如果是一次性的定时器 ticks = swtmr->uwExpiry;//获取定时间隔 } else { @@ -538,6 +618,7 @@ STATIC UINT64 SwtmrToStart(SWTMR_CTRL_S *swtmr, UINT16 cpuid) swtmr->ucState = OS_SWTMR_STATUS_TICKING;//获取周期性定时器时间间隔 UINT64 period = (UINT64)ticks * OS_CYCLE_PER_TICK;//计数状态 +>>>>>>> remotes/origin/main UINT64 responseTime = swtmr->startTime + period; UINT64 currTime = OsGetCurrSchedTimeCycle(); if (responseTime < currTime) { @@ -578,7 +659,11 @@ STATIC INLINE VOID SwtmrStart(SWTMR_CTRL_S *swtmr) STATIC INLINE VOID SwtmrDelete(SWTMR_CTRL_S *swtmr) { /* insert to free list */ +<<<<<<< HEAD + LOS_ListTailInsert(&g_swtmrFreeList, &swtmr->stSortList.sortLinkNode); +======= LOS_ListTailInsert(&g_swtmrFreeList, &swtmr->stSortList.sortLinkNode);//直接插入空闲链表中,回收再利用 +>>>>>>> remotes/origin/main swtmr->ucState = OS_SWTMR_STATUS_UNUSED; swtmr->uwOwnerPid = OS_INVALID_VALUE; @@ -697,7 +782,11 @@ LITE_OS_SEC_TEXT STATIC UINT32 OsSwtmrTimeGet(const SWTMR_CTRL_S *swtmr) } return (UINT32)time; } +<<<<<<< HEAD + +======= //创建定时器,设置定时器的定时时长、定时模式、回调函数、并返回定时器ID +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_INIT UINT32 LOS_SwtmrCreate(UINT32 interval, UINT8 mode, SWTMR_PROC_FUNC handler, @@ -726,7 +815,11 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_SwtmrCreate(UINT32 interval, } SWTMR_LOCK(intSave); +<<<<<<< HEAD + if (LOS_ListEmpty(&g_swtmrFreeList)) { +======= if (LOS_ListEmpty(&g_swtmrFreeList)) {//空闲列表不能为空 +>>>>>>> remotes/origin/main SWTMR_UNLOCK(intSave); return LOS_ERRNO_SWTMR_MAXSIZE; } @@ -737,6 +830,15 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_SwtmrCreate(UINT32 interval, SWTMR_UNLOCK(intSave); swtmr->uwOwnerPid = (UINTPTR)OsCurrProcessGet(); +<<<<<<< HEAD + swtmr->pfnHandler = handler; + swtmr->ucMode = mode; + swtmr->uwOverrun = 0; + swtmr->uwInterval = interval; + swtmr->uwExpiry = interval; + swtmr->uwArg = arg; + swtmr->ucState = OS_SWTMR_STATUS_CREATED; +======= swtmr->pfnHandler = handler;//时间到了的回调函数 swtmr->ucMode = mode;//定时模式 swtmr->uwOverrun = 0; @@ -744,12 +846,17 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_SwtmrCreate(UINT32 interval, swtmr->uwExpiry = interval;//一次性超时间隔 swtmr->uwArg = arg;//回调函数的参数 swtmr->ucState = OS_SWTMR_STATUS_CREATED;//已创建状态 +>>>>>>> remotes/origin/main SET_SORTLIST_VALUE(&swtmr->stSortList, OS_SORT_LINK_INVALID_TIME); *swtmrID = swtmr->usTimerID; OsHookCall(LOS_HOOK_TYPE_SWTMR_CREATE, swtmr); return LOS_OK; } +<<<<<<< HEAD + +======= //接口函数 启动定时器 参数定时任务ID +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_SwtmrStart(UINT16 swtmrID) { SWTMR_CTRL_S *swtmr = NULL; @@ -761,15 +868,37 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrStart(UINT16 swtmrID) return LOS_ERRNO_SWTMR_ID_INVALID; } +<<<<<<< HEAD + swtmrCBID = swtmrID % LOSCFG_BASE_CORE_SWTMR_LIMIT; + swtmr = g_swtmrCBArray + swtmrCBID; + + SWTMR_LOCK(intSave); + if (swtmr->usTimerID != swtmrID) { +======= swtmrCBID = swtmrID % LOSCFG_BASE_CORE_SWTMR_LIMIT;//取模 swtmr = g_swtmrCBArray + swtmrCBID;//获取定时器控制结构体 SWTMR_LOCK(intSave); if (swtmr->usTimerID != swtmrID) {//ID必须一样 +>>>>>>> remotes/origin/main SWTMR_UNLOCK(intSave); return LOS_ERRNO_SWTMR_ID_INVALID; } +<<<<<<< HEAD + switch (swtmr->ucState) { + case OS_SWTMR_STATUS_UNUSED: + ret = LOS_ERRNO_SWTMR_NOT_CREATED; + break; + /* + * If the status of swtmr is timing, it should stop the swtmr first, + * then start the swtmr again. + */ + case OS_SWTMR_STATUS_TICKING: + SwtmrStop(swtmr); + /* fall-through */ + case OS_SWTMR_STATUS_CREATED: +======= switch (swtmr->ucState) {//判断定时器状态 case OS_SWTMR_STATUS_UNUSED: ret = LOS_ERRNO_SWTMR_NOT_CREATED; @@ -782,6 +911,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrStart(UINT16 swtmrID) SwtmrStop(swtmr);//先停止定时器,注意这里没有break;在OsSwtmrStop中状态将会回到了OS_SWTMR_STATUS_CRWEATED接下来就是执行启动了 /* fall-through */ case OS_SWTMR_STATUS_CREATED://已经创建好了 +>>>>>>> remotes/origin/main SwtmrStart(swtmr); break; default: @@ -793,7 +923,11 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrStart(UINT16 swtmrID) OsHookCall(LOS_HOOK_TYPE_SWTMR_START, swtmr); return ret; } +<<<<<<< HEAD + +======= //接口函数 停止计时器 参加定时任务ID +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_SwtmrStop(UINT16 swtmrID) { SWTMR_CTRL_S *swtmr = NULL; @@ -805,15 +939,34 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrStop(UINT16 swtmrID) return LOS_ERRNO_SWTMR_ID_INVALID; } +<<<<<<< HEAD + swtmrCBID = swtmrID % LOSCFG_BASE_CORE_SWTMR_LIMIT; + swtmr = g_swtmrCBArray + swtmrCBID; + SWTMR_LOCK(intSave); + + if (swtmr->usTimerID != swtmrID) { +======= swtmrCBID = swtmrID % LOSCFG_BASE_CORE_SWTMR_LIMIT;//取模 swtmr = g_swtmrCBArray + swtmrCBID;//获取定时器控制结构体 SWTMR_LOCK(intSave); if (swtmr->usTimerID != swtmrID) {//ID必须一样 +>>>>>>> remotes/origin/main SWTMR_UNLOCK(intSave); return LOS_ERRNO_SWTMR_ID_INVALID; } +<<<<<<< HEAD + switch (swtmr->ucState) { + case OS_SWTMR_STATUS_UNUSED: + ret = LOS_ERRNO_SWTMR_NOT_CREATED; + break; + case OS_SWTMR_STATUS_CREATED: + ret = LOS_ERRNO_SWTMR_NOT_STARTED; + break; + case OS_SWTMR_STATUS_TICKING: + SwtmrStop(swtmr); +======= switch (swtmr->ucState) {//判断定时器状态 case OS_SWTMR_STATUS_UNUSED: ret = LOS_ERRNO_SWTMR_NOT_CREATED;//返回没有创建 @@ -823,6 +976,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrStop(UINT16 swtmrID) break; case OS_SWTMR_STATUS_TICKING://正在计数 SwtmrStop(swtmr);//执行正在停止计时器操作 +>>>>>>> remotes/origin/main break; default: ret = LOS_ERRNO_SWTMR_STATUS_INVALID; @@ -833,7 +987,11 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrStop(UINT16 swtmrID) OsHookCall(LOS_HOOK_TYPE_SWTMR_STOP, swtmr); return ret; } +<<<<<<< HEAD + +======= //接口函数 获得软件定时器剩余Tick数 通过 *tick 带走 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_SwtmrTimeGet(UINT16 swtmrID, UINT32 *tick) { SWTMR_CTRL_S *swtmr = NULL; @@ -849,11 +1007,19 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrTimeGet(UINT16 swtmrID, UINT32 *tick) return LOS_ERRNO_SWTMR_TICK_PTR_NULL; } +<<<<<<< HEAD + swtmrCBID = swtmrID % LOSCFG_BASE_CORE_SWTMR_LIMIT; + swtmr = g_swtmrCBArray + swtmrCBID; + SWTMR_LOCK(intSave); + + if (swtmr->usTimerID != swtmrID) { +======= swtmrCBID = swtmrID % LOSCFG_BASE_CORE_SWTMR_LIMIT;//取模 swtmr = g_swtmrCBArray + swtmrCBID;//获取定时器控制结构体 SWTMR_LOCK(intSave); if (swtmr->usTimerID != swtmrID) {//ID必须一样 +>>>>>>> remotes/origin/main SWTMR_UNLOCK(intSave); return LOS_ERRNO_SWTMR_ID_INVALID; } @@ -864,8 +1030,13 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrTimeGet(UINT16 swtmrID, UINT32 *tick) case OS_SWTMR_STATUS_CREATED: ret = LOS_ERRNO_SWTMR_NOT_STARTED; break; +<<<<<<< HEAD + case OS_SWTMR_STATUS_TICKING: + *tick = OsSwtmrTimeGet(swtmr); +======= case OS_SWTMR_STATUS_TICKING://正在计数的定时器 *tick = OsSwtmrTimeGet(swtmr);//获取 +>>>>>>> remotes/origin/main break; default: ret = LOS_ERRNO_SWTMR_STATUS_INVALID; @@ -874,7 +1045,11 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrTimeGet(UINT16 swtmrID, UINT32 *tick) SWTMR_UNLOCK(intSave); return ret; } +<<<<<<< HEAD + +======= //接口函数 删除定时器 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_SwtmrDelete(UINT16 swtmrID) { SWTMR_CTRL_S *swtmr = NULL; @@ -886,11 +1061,19 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrDelete(UINT16 swtmrID) return LOS_ERRNO_SWTMR_ID_INVALID; } +<<<<<<< HEAD + swtmrCBID = swtmrID % LOSCFG_BASE_CORE_SWTMR_LIMIT; + swtmr = g_swtmrCBArray + swtmrCBID; + SWTMR_LOCK(intSave); + + if (swtmr->usTimerID != swtmrID) { +======= swtmrCBID = swtmrID % LOSCFG_BASE_CORE_SWTMR_LIMIT;//取模 swtmr = g_swtmrCBArray + swtmrCBID;//获取定时器控制结构体 SWTMR_LOCK(intSave); if (swtmr->usTimerID != swtmrID) {//ID必须一样 +>>>>>>> remotes/origin/main SWTMR_UNLOCK(intSave); return LOS_ERRNO_SWTMR_ID_INVALID; } @@ -899,10 +1082,17 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrDelete(UINT16 swtmrID) case OS_SWTMR_STATUS_UNUSED: ret = LOS_ERRNO_SWTMR_NOT_CREATED; break; +<<<<<<< HEAD + case OS_SWTMR_STATUS_TICKING: + SwtmrStop(swtmr); + /* fall-through */ + case OS_SWTMR_STATUS_CREATED: +======= case OS_SWTMR_STATUS_TICKING://正在计数就先停止在删除,这里没有break SwtmrStop(swtmr); /* fall-through */ case OS_SWTMR_STATUS_CREATED://再删除定时器 +>>>>>>> remotes/origin/main SwtmrDelete(swtmr); break; default: @@ -915,4 +1105,8 @@ LITE_OS_SEC_TEXT UINT32 LOS_SwtmrDelete(UINT16 swtmrID) return ret; } -#endif /* LOSCFG_BASE_CORE_SWTMR_ENABLE */ \ No newline at end of file +<<<<<<< HEAD +#endif /* LOSCFG_BASE_CORE_SWTMR_ENABLE */ +======= +#endif /* LOSCFG_BASE_CORE_SWTMR_ENABLE */ +>>>>>>> remotes/origin/main diff --git a/src/kernel_liteos_a/kernel/base/core/los_task.c b/src/kernel_liteos_a/kernel/base/core/los_task.c index d0091651..7d78a1d7 100644 --- a/src/kernel_liteos_a/kernel/base/core/los_task.c +++ b/src/kernel_liteos_a/kernel/base/core/los_task.c @@ -68,6 +68,15 @@ #if (LOSCFG_BASE_CORE_TSK_LIMIT <= 0) #error "task maxnum cannot be zero" #endif /* LOSCFG_BASE_CORE_TSK_LIMIT <= 0 */ +<<<<<<< HEAD + +LITE_OS_SEC_BSS LosTaskCB *g_taskCBArray; +LITE_OS_SEC_BSS LOS_DL_LIST g_losFreeTask; +LITE_OS_SEC_BSS LOS_DL_LIST g_taskRecycleList; +LITE_OS_SEC_BSS UINT32 g_taskMaxNum; +LITE_OS_SEC_BSS UINT32 g_taskScheduled; /* one bit for each cores */ +LITE_OS_SEC_BSS EVENT_CB_S g_resourceEvent; +======= /* 基本概念 从系统角度看,任务是竞争系统资源的最小运行单元。任务可以使用或等待CPU、 @@ -145,6 +154,7 @@ LITE_OS_SEC_BSS LOS_DL_LIST g_taskRecycleList;//回收任务链表 LITE_OS_SEC_BSS UINT32 g_taskMaxNum;//任务最大个数 LITE_OS_SEC_BSS UINT32 g_taskScheduled; /* one bit for each cores */ LITE_OS_SEC_BSS EVENT_CB_S g_resourceEvent;//资源的事件 +>>>>>>> remotes/origin/main /* spinlock for task module, only available on SMP mode */ LITE_OS_SEC_BSS SPIN_LOCK_INIT(g_taskSpin); @@ -152,7 +162,11 @@ STATIC VOID OsConsoleIDSetHook(UINT32 param1, UINT32 param2) __attribute__((weakref("OsSetConsoleID"))); /* temp task blocks for booting procedure */ +<<<<<<< HEAD +LITE_OS_SEC_BSS STATIC LosTaskCB g_mainTask[LOSCFG_KERNEL_CORE_NUM]; +======= LITE_OS_SEC_BSS STATIC LosTaskCB g_mainTask[LOSCFG_KERNEL_CORE_NUM];//启动引导过程中使用的临时任务 +>>>>>>> remotes/origin/main LosTaskCB *OsGetMainTask(VOID) { @@ -162,23 +176,38 @@ LosTaskCB *OsGetMainTask(VOID) VOID OsSetMainTask(VOID) { UINT32 i; +<<<<<<< HEAD + CHAR *name = "osMain"; +======= CHAR *name = "osMain";//任务名称 +>>>>>>> remotes/origin/main SchedParam schedParam = { 0 }; schedParam.policy = LOS_SCHED_RR; schedParam.basePrio = OS_PROCESS_PRIORITY_HIGHEST; schedParam.priority = OS_TASK_PRIORITY_LOWEST; +<<<<<<< HEAD + + for (i = 0; i < LOSCFG_KERNEL_CORE_NUM; i++) { + g_mainTask[i].taskStatus = OS_TASK_STATUS_UNUSED; + g_mainTask[i].taskID = LOSCFG_BASE_CORE_TSK_LIMIT; +======= //为每个CPU core 设置mainTask for (i = 0; i < LOSCFG_KERNEL_CORE_NUM; i++) { g_mainTask[i].taskStatus = OS_TASK_STATUS_UNUSED; g_mainTask[i].taskID = LOSCFG_BASE_CORE_TSK_LIMIT;//128 +>>>>>>> remotes/origin/main g_mainTask[i].processCB = OS_KERNEL_PROCESS_GROUP; #ifdef LOSCFG_KERNEL_SMP_LOCKDEP g_mainTask[i].lockDep.lockDepth = 0; g_mainTask[i].lockDep.waitLock = NULL; #endif (VOID)strncpy_s(g_mainTask[i].taskName, OS_TCB_NAME_LEN, name, OS_TCB_NAME_LEN - 1); +<<<<<<< HEAD + LOS_ListInit(&g_mainTask[i].lockList); +======= LOS_ListInit(&g_mainTask[i].lockList);//初始化任务锁链表,上面挂的是任务已申请到的互斥锁 +>>>>>>> remotes/origin/main (VOID)OsSchedParamInit(&g_mainTask[i], schedParam.policy, &schedParam, NULL); } } @@ -192,16 +221,41 @@ VOID OsSetMainTaskProcess(UINTPTR processCB) #endif } } +<<<<<<< HEAD + +LITE_OS_SEC_TEXT WEAK VOID OsIdleTask(VOID) +{ + while (1) { + WFI; +======= //空闲任务,每个CPU都有自己的空闲任务 LITE_OS_SEC_TEXT WEAK VOID OsIdleTask(VOID) { while (1) {//只有一个死循环 WFI;//WFI指令:arm core立即进入low-power standly state,进入休眠模式,等待中断 +>>>>>>> remotes/origin/main } } VOID OsTaskInsertToRecycleList(LosTaskCB *taskCB) { +<<<<<<< HEAD + LOS_ListTailInsert(&g_taskRecycleList, &taskCB->pendList); +} + +LITE_OS_SEC_TEXT_INIT VOID OsTaskJoinPostUnsafe(LosTaskCB *taskCB) +{ + if (taskCB->taskStatus & OS_TASK_FLAG_PTHREAD_JOIN) { + if (!LOS_ListEmpty(&taskCB->joinList)) { + LosTaskCB *resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(taskCB->joinList))); + OsTaskWakeClearPendMask(resumedTask); + resumedTask->ops->wake(resumedTask); + } + } + taskCB->taskStatus |= OS_TASK_STATUS_EXIT; +} + +======= LOS_ListTailInsert(&g_taskRecycleList, &taskCB->pendList);//将任务挂入回收链表,等待回收 } /* @@ -220,6 +274,7 @@ LITE_OS_SEC_TEXT_INIT VOID OsTaskJoinPostUnsafe(LosTaskCB *taskCB) taskCB->taskStatus |= OS_TASK_STATUS_EXIT;//贴上任务退出标签 } //挂起任务,任务进入等待链表,Join代表是支持通过的第一个任务去唤醒其他的任务 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 OsTaskJoinPendUnsafe(LosTaskCB *taskCB) { if (taskCB->taskStatus & OS_TASK_STATUS_INIT) { @@ -238,6 +293,15 @@ LITE_OS_SEC_TEXT UINT32 OsTaskJoinPendUnsafe(LosTaskCB *taskCB) return LOS_EINVAL; } +<<<<<<< HEAD + +LITE_OS_SEC_TEXT UINT32 OsTaskSetDetachUnsafe(LosTaskCB *taskCB) +{ + if (taskCB->taskStatus & OS_TASK_FLAG_PTHREAD_JOIN) { + if (LOS_ListEmpty(&(taskCB->joinList))) { + LOS_ListDelete(&(taskCB->joinList)); + taskCB->taskStatus &= ~OS_TASK_FLAG_PTHREAD_JOIN; +======= //任务设置分离模式Deatch和JOIN是一对有你没我的状态 LITE_OS_SEC_TEXT UINT32 OsTaskSetDetachUnsafe(LosTaskCB *taskCB) { @@ -245,6 +309,7 @@ LITE_OS_SEC_TEXT UINT32 OsTaskSetDetachUnsafe(LosTaskCB *taskCB) if (LOS_ListEmpty(&(taskCB->joinList))) {//joinlist中没有数据了 LOS_ListDelete(&(taskCB->joinList));//所谓删除就是自己指向自己 taskCB->taskStatus &= ~OS_TASK_FLAG_PTHREAD_JOIN;//去掉JOIN标签 +>>>>>>> remotes/origin/main return LOS_OK; } /* This error code has a special purpose and is not allowed to appear again on the interface */ @@ -254,26 +319,48 @@ LITE_OS_SEC_TEXT UINT32 OsTaskSetDetachUnsafe(LosTaskCB *taskCB) return LOS_EINVAL; } +<<<<<<< HEAD +======= //初始化任务模块 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_INIT UINT32 OsTaskInit(UINTPTR processCB) { UINT32 index; UINT32 size; UINT32 ret; +<<<<<<< HEAD + g_taskMaxNum = LOSCFG_BASE_CORE_TSK_LIMIT; + size = (g_taskMaxNum + 1) * sizeof(LosTaskCB); +======= g_taskMaxNum = LOSCFG_BASE_CORE_TSK_LIMIT;//任务池中最多默认128个,可谓铁打的任务池流水的线程 size = (g_taskMaxNum + 1) * sizeof(LosTaskCB);//计算需分配内存总大小 +>>>>>>> remotes/origin/main /* * This memory is resident memory and is used to save the system resources * of task control block and will not be freed. */ +<<<<<<< HEAD + g_taskCBArray = (LosTaskCB *)LOS_MemAlloc(m_aucSysMem0, size); +======= g_taskCBArray = (LosTaskCB *)LOS_MemAlloc(m_aucSysMem0, size);//任务池常驻内存,不被释放 +>>>>>>> remotes/origin/main if (g_taskCBArray == NULL) { ret = LOS_ERRNO_TSK_NO_MEMORY; goto EXIT; } (VOID)memset_s(g_taskCBArray, size, 0, size); +<<<<<<< HEAD + LOS_ListInit(&g_losFreeTask); + LOS_ListInit(&g_taskRecycleList); + for (index = 0; index < g_taskMaxNum; index++) { + g_taskCBArray[index].taskStatus = OS_TASK_STATUS_UNUSED; + g_taskCBArray[index].taskID = index; + g_taskCBArray[index].processCB = processCB; + LOS_ListTailInsert(&g_losFreeTask, &g_taskCBArray[index].pendList); + } +======= LOS_ListInit(&g_losFreeTask);//初始化空闲任务链表 LOS_ListInit(&g_taskRecycleList);//初始化回收任务链表 for (index = 0; index < g_taskMaxNum; index++) {//任务挨个初始化 @@ -282,12 +369,17 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsTaskInit(UINTPTR processCB) g_taskCBArray[index].processCB = processCB; LOS_ListTailInsert(&g_losFreeTask, &g_taskCBArray[index].pendList);//通过pendlist节点插入空闲任务列表 }//注意:这里挂的是pendList节点,可以取TCB也要通过OS_TCB_FROM-PENDLIST取 +>>>>>>> remotes/origin/main g_taskCBArray[index].taskStatus = OS_TASK_STATUS_UNUSED; g_taskCBArray[index].taskID = index; g_taskCBArray[index].processCB = processCB; +<<<<<<< HEAD + ret = OsSchedInit(); +======= ret = OsSchedInit();//调度器初始化 +>>>>>>> remotes/origin/main EXIT: if (ret != LOS_OK) { @@ -295,18 +387,37 @@ EXIT: } return ret; } +<<<<<<< HEAD + +======= //获取IdletaskId,每个CPU核都对Task进行了内部管理,做到真正的并行处理 +>>>>>>> remotes/origin/main UINT32 OsGetIdleTaskId(VOID) { return OsSchedRunqueueIdleGet()->taskID; } +<<<<<<< HEAD + +======= //创建一个空闲任务 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_INIT UINT32 OsIdleTaskCreate(UINTPTR processID) { UINT32 ret; TSK_INIT_PARAM_S taskInitParam; UINT32 idleTaskID; +<<<<<<< HEAD + (VOID)memset_s((VOID *)(&taskInitParam), sizeof(TSK_INIT_PARAM_S), 0, sizeof(TSK_INIT_PARAM_S)); + taskInitParam.pfnTaskEntry = (TSK_ENTRY_FUNC)OsIdleTask; + taskInitParam.uwStackSize = LOSCFG_BASE_CORE_TSK_IDLE_STACK_SIZE; + taskInitParam.pcName = "Idle"; + taskInitParam.policy = LOS_SCHED_IDLE; + taskInitParam.usTaskPrio = OS_TASK_PRIORITY_LOWEST; + taskInitParam.processID = processID; +#ifdef LOSCFG_KERNEL_SMP + taskInitParam.usCpuAffiMask = CPUID_TO_AFFI_MASK(ArchCurrCpuid()); +======= (VOID)memset_s((VOID *)(&taskInitParam), sizeof(TSK_INIT_PARAM_S), 0, sizeof(TSK_INIT_PARAM_S));//任务初始参数清零 taskInitParam.pfnTaskEntry = (TSK_ENTRY_FUNC)OsIdleTask;//入口函数 taskInitParam.uwStackSize = LOSCFG_BASE_CORE_TSK_IDLE_STACK_SIZE;//任务栈大小 2K @@ -316,20 +427,29 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsIdleTaskCreate(UINTPTR processID) taskInitParam.processID = processID; #ifdef LOSCFG_KERNEL_SMP taskInitParam.usCpuAffiMask = CPUID_TO_AFFI_MASK(ArchCurrCpuid());//每个idle任务只在单独的CPU上运行 +>>>>>>> remotes/origin/main #endif ret = LOS_TaskCreateOnly(&idleTaskID, &taskInitParam); if (ret != LOS_OK) { return ret; } LosTaskCB *idleTask = OS_TCB_FROM_TID(idleTaskID); +<<<<<<< HEAD + idleTask->taskStatus |= OS_TASK_FLAG_SYSTEM_TASK; +======= idleTask->taskStatus |= OS_TASK_FLAG_SYSTEM_TASK;//标记为系统任务,idle任务是给CPU休息用的,当然是个系统任务 +>>>>>>> remotes/origin/main OsSchedRunqueueIdleInit(idleTask); return LOS_TaskResume(idleTaskID); } /* +<<<<<<< HEAD + * Description : get id of current running task. +======= * Description : get id of current running task. |获取当前CPU正在执行的任务ID +>>>>>>> remotes/origin/main * Return : task id */ LITE_OS_SEC_TEXT UINT32 LOS_CurTaskIDGet(VOID) @@ -341,7 +461,11 @@ LITE_OS_SEC_TEXT UINT32 LOS_CurTaskIDGet(VOID) } return runTask->taskID; } +<<<<<<< HEAD + +======= //创建指定任务同步信号量 +>>>>>>> remotes/origin/main STATIC INLINE UINT32 TaskSyncCreate(LosTaskCB *taskCB) { #ifdef LOSCFG_KERNEL_SMP_TASK_SYNC @@ -354,7 +478,11 @@ STATIC INLINE UINT32 TaskSyncCreate(LosTaskCB *taskCB) #endif return LOS_OK; } +<<<<<<< HEAD + +======= //销毁指定任务同步信号量 +>>>>>>> remotes/origin/main STATIC INLINE VOID OsTaskSyncDestroy(UINT32 syncSignal) { #ifdef LOSCFG_KERNEL_SMP_TASK_SYNC @@ -365,7 +493,10 @@ STATIC INLINE VOID OsTaskSyncDestroy(UINT32 syncSignal) } #ifdef LOSCFG_KERNEL_SMP +<<<<<<< HEAD +======= //任务同步等待,通过信号量保持同步 +>>>>>>> remotes/origin/main STATIC INLINE UINT32 OsTaskSyncWait(const LosTaskCB *taskCB) { #ifdef LOSCFG_KERNEL_SMP_TASK_SYNC @@ -391,7 +522,11 @@ STATIC INLINE UINT32 OsTaskSyncWait(const LosTaskCB *taskCB) #endif } #endif +<<<<<<< HEAD + +======= //同步唤醒 +>>>>>>> remotes/origin/main STATIC INLINE VOID OsTaskSyncWake(const LosTaskCB *taskCB) { #ifdef LOSCFG_KERNEL_SMP_TASK_SYNC @@ -413,6 +548,16 @@ STATIC INLINE VOID OsInsertTCBToFreeList(LosTaskCB *taskCB) taskCB->taskStatus = OS_TASK_STATUS_UNUSED; LOS_ListAdd(&g_losFreeTask, &taskCB->pendList); } +<<<<<<< HEAD + +STATIC VOID OsTaskKernelResourcesToFree(UINT32 syncSignal, UINTPTR topOfStack) +{ + OsTaskSyncDestroy(syncSignal); + + (VOID)LOS_MemFree((VOID *)m_aucSysMem1, (VOID *)topOfStack); +} + +======= //释放任务在内核状态下占用的资源 STATIC VOID OsTaskKernelResourcesToFree(UINT32 syncSignal, UINTPTR topOfStack) { @@ -421,6 +566,7 @@ STATIC VOID OsTaskKernelResourcesToFree(UINT32 syncSignal, UINTPTR topOfStack) (VOID)LOS_MemFree((VOID *)m_aucSysMem1, (VOID *)topOfStack);//释放内核态空间 } //释放任务资源 +>>>>>>> remotes/origin/main STATIC VOID OsTaskResourcesToFree(LosTaskCB *taskCB) { UINT32 syncSignal = LOSCFG_BASE_IPC_SEM_LIMIT; @@ -428,7 +574,11 @@ STATIC VOID OsTaskResourcesToFree(LosTaskCB *taskCB) UINTPTR topOfStack; #ifdef LOSCFG_KERNEL_VM +<<<<<<< HEAD + if ((taskCB->taskStatus & OS_TASK_FLAG_USER_MODE) && (taskCB->userMapBase != 0)) { +======= if ((taskCB->taskStatus & OS_TASK_FLAG_USER_MODE) && (taskCB->userMapBase != 0)) {//释放用户态栈 +>>>>>>> remotes/origin/main SCHEDULER_LOCK(intSave); UINT32 mapBase = (UINTPTR)taskCB->userMapBase; UINT32 mapSize = taskCB->userMapSize; @@ -438,7 +588,11 @@ STATIC VOID OsTaskResourcesToFree(LosTaskCB *taskCB) LosProcessCB *processCB = OS_PCB_FROM_TCB(taskCB); LOS_ASSERT(!(OsProcessVmSpaceGet(processCB) == NULL)); +<<<<<<< HEAD + UINT32 ret = OsUnMMap(OsProcessVmSpaceGet(processCB), (UINTPTR)mapBase, mapSize); +======= UINT32 ret = OsUnMMap(OsProcessVmSpaceGet(processCB), (UINTPTR)mapBase, mapSize);//解除映射 +>>>>>>> remotes/origin/main if ((ret != LOS_OK) && (mapBase != 0) && !OsProcessIsInit(processCB)) { PRINT_ERR("process(%u) unmmap user task(%u) stack failed! mapbase: 0x%x size :0x%x, error: %d\n", processCB->processID, taskCB->taskID, mapBase, mapSize, ret); @@ -450,36 +604,61 @@ STATIC VOID OsTaskResourcesToFree(LosTaskCB *taskCB) } #endif +<<<<<<< HEAD + if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) { +======= if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {//任务还未使用情况 +>>>>>>> remotes/origin/main topOfStack = taskCB->topOfStack; taskCB->topOfStack = 0; #ifdef LOSCFG_KERNEL_SMP_TASK_SYNC syncSignal = taskCB->syncSignal; taskCB->syncSignal = LOSCFG_BASE_IPC_SEM_LIMIT; #endif +<<<<<<< HEAD + OsTaskKernelResourcesToFree(syncSignal, topOfStack); + + SCHEDULER_LOCK(intSave); +#ifdef LOSCFG_KERNEL_VM + OsClearSigInfoTmpList(&(taskCB->sig)); +======= OsTaskKernelResourcesToFree(syncSignal, topOfStack);//释放内核所占内存,即内核栈的栈空间 SCHEDULER_LOCK(intSave); #ifdef LOSCFG_KERNEL_VM OsClearSigInfoTmpList(&(taskCB->sig));//归还信号控制块的内存 +>>>>>>> remotes/origin/main #endif OsInsertTCBToFreeList(taskCB); SCHEDULER_UNLOCK(intSave); } return; } +<<<<<<< HEAD + +======= //批量回收任务 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT VOID OsTaskCBRecycleToFree(void) { UINT32 intSave; SCHEDULER_LOCK(intSave); +<<<<<<< HEAD + while (!LOS_ListEmpty(&g_taskRecycleList)) { + LosTaskCB *taskCB = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&g_taskRecycleList)); + LOS_ListDelete(&taskCB->pendList); + SCHEDULER_UNLOCK(intSave); + + OsTaskResourcesToFree(taskCB); +======= while (!LOS_ListEmpty(&g_taskRecycleList)) {//遍历回收链表 LosTaskCB *taskCB = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&g_taskRecycleList));//取出任务 LOS_ListDelete(&taskCB->pendList);//重置节点 SCHEDULER_UNLOCK(intSave); OsTaskResourcesToFree(taskCB);//释放任务所占资源 +>>>>>>> remotes/origin/main SCHEDULER_LOCK(intSave); } @@ -489,7 +668,11 @@ LITE_OS_SEC_TEXT VOID OsTaskCBRecycleToFree(void) /* * Description : All task entry * Input : taskID --- The ID of the task to be run +<<<<<<< HEAD + */ +======= *///所有任务的入口函数,OsTaskEntry是new task OsTaskStackInit时指定的 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_INIT VOID OsTaskEntry(UINT32 taskID) { LOS_ASSERT(!OS_TID_CHECK_INVALID(taskID)); @@ -499,6 +682,16 @@ LITE_OS_SEC_TEXT_INIT VOID OsTaskEntry(UINT32 taskID) * from interrupt and other cores. release task spinlock and enable * interrupt in sequence at the task entry. */ +<<<<<<< HEAD + LOS_SpinUnlock(&g_taskSpin); + (VOID)LOS_IntUnLock(); + + LosTaskCB *taskCB = OS_TCB_FROM_TID(taskID); + taskCB->joinRetval = taskCB->taskEntry(taskCB->args[0], taskCB->args[1], + taskCB->args[2], taskCB->args[3]); /* 2 & 3: just for args array index */ + if (!(taskCB->taskStatus & OS_TASK_FLAG_PTHREAD_JOIN)) { + taskCB->joinRetval = 0; +======= LOS_SpinUnlock(&g_taskSpin);//释放任务自旋锁 (VOID)LOS_IntUnLock();//恢复中断 @@ -507,11 +700,16 @@ LITE_OS_SEC_TEXT_INIT VOID OsTaskEntry(UINT32 taskID) taskCB->args[2], taskCB->args[3]); /* 2 & 3: just for args array index */ if (!(taskCB->taskStatus & OS_TASK_FLAG_PTHREAD_JOIN)) { taskCB->joinRetval = 0;//结合数为0 +>>>>>>> remotes/origin/main } OsRunningTaskToExit(taskCB, 0); } +<<<<<<< HEAD + +======= //任务创建参数检查 +>>>>>>> remotes/origin/main STATIC UINT32 TaskCreateParamCheck(const UINT32 *taskID, TSK_INIT_PARAM_S *initParam) { UINT32 poolSize = OS_SYS_MEM_SIZE; @@ -530,6 +728,21 @@ STATIC UINT32 TaskCreateParamCheck(const UINT32 *taskID, TSK_INIT_PARAM_S *initP } } +<<<<<<< HEAD + if (initParam->pfnTaskEntry == NULL) { + return LOS_ERRNO_TSK_ENTRY_NULL; + } + + if (initParam->usTaskPrio > OS_TASK_PRIORITY_LOWEST) { + return LOS_ERRNO_TSK_PRIOR_ERROR; + } + + if (initParam->uwStackSize > poolSize) { + return LOS_ERRNO_TSK_STKSZ_TOO_LARGE; + } + + if (initParam->uwStackSize == 0) { +======= if (initParam->pfnTaskEntry == NULL) {//入口函数不能为空 return LOS_ERRNO_TSK_ENTRY_NULL; } @@ -543,17 +756,26 @@ STATIC UINT32 TaskCreateParamCheck(const UINT32 *taskID, TSK_INIT_PARAM_S *initP } if (initParam->uwStackSize == 0) {//任何任务都必须由内核态栈,所以uwStackSize不能为0 +>>>>>>> remotes/origin/main initParam->uwStackSize = LOSCFG_BASE_CORE_TSK_DEFAULT_STACK_SIZE; } initParam->uwStackSize = (UINT32)ALIGN(initParam->uwStackSize, LOSCFG_STACK_POINT_ALIGN_SIZE); +<<<<<<< HEAD + if (initParam->uwStackSize < LOS_TASK_MIN_STACK_SIZE) { +======= if (initParam->uwStackSize < LOS_TASK_MIN_STACK_SIZE) {//运行栈空间不能低于最低值 +>>>>>>> remotes/origin/main return LOS_ERRNO_TSK_STKSZ_TOO_SMALL; } return LOS_OK; } +<<<<<<< HEAD + +======= //任务栈(内核态)内存分配,由内核态进程空间提供,即KProcess进程空间 +>>>>>>> remotes/origin/main STATIC VOID TaskCBDeInit(LosTaskCB *taskCB) { UINT32 intSave; @@ -606,13 +828,21 @@ STATIC VOID TaskCBBaseInit(LosTaskCB *taskCB, const TSK_INIT_PARAM_S *initParam) LOS_ListInit(&taskCB->joinList); } +<<<<<<< HEAD + LOS_ListInit(&taskCB->lockList); +======= LOS_ListInit(&taskCB->lockList);//初始化互斥锁链表 +>>>>>>> remotes/origin/main SET_SORTLIST_VALUE(&taskCB->sortList, OS_SORT_LINK_INVALID_TIME); #ifdef LOSCFG_KERNEL_VM taskCB->futex.index = OS_INVALID_VALUE; #endif } +<<<<<<< HEAD + +======= //任务初始化 +>>>>>>> remotes/origin/main STATIC UINT32 TaskCBInit(LosTaskCB *taskCB, const TSK_INIT_PARAM_S *initParam) { UINT32 ret; @@ -621,7 +851,11 @@ STATIC UINT32 TaskCBInit(LosTaskCB *taskCB, const TSK_INIT_PARAM_S *initParam) LosSchedParam initSchedParam = {0}; UINT16 policy = (initParam->policy == LOS_SCHED_NORMAL) ? LOS_SCHED_RR : initParam->policy; +<<<<<<< HEAD + TaskCBBaseInit(taskCB, initParam); +======= TaskCBBaseInit(taskCB, initParam);//初始化任务的基本信息,task->stackPointer指向内核态栈sp位置,该位置存着任务初始上下文 +>>>>>>> remotes/origin/main schedParam.policy = policy; ret = OsProcessAddNewTask(initParam->processID, taskCB, &schedParam, &numCount); @@ -673,27 +907,43 @@ STATIC UINT32 TaskStackInit(LosTaskCB *taskCB, const TSK_INIT_PARAM_S *initParam #endif return LOS_OK; } +<<<<<<< HEAD + +======= //获取一个空闲TCB +>>>>>>> remotes/origin/main STATIC LosTaskCB *GetFreeTaskCB(VOID) { UINT32 intSave; SCHEDULER_LOCK(intSave); +<<<<<<< HEAD + if (LOS_ListEmpty(&g_losFreeTask)) { +======= if (LOS_ListEmpty(&g_losFreeTask)) {//全局空闲task为空 +>>>>>>> remotes/origin/main SCHEDULER_UNLOCK(intSave); PRINT_ERR("No idle TCB in the system!\n"); return NULL; } LosTaskCB *taskCB = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&g_losFreeTask)); +<<<<<<< HEAD + LOS_ListDelete(LOS_DL_LIST_FIRST(&g_losFreeTask)); +======= LOS_ListDelete(LOS_DL_LIST_FIRST(&g_losFreeTask));//从g_losFreeTask链表中摘除自己 +>>>>>>> remotes/origin/main SCHEDULER_UNLOCK(intSave); return taskCB; } +<<<<<<< HEAD + +======= /* 创建任务,并使该任务进入suspend状态,不对该状态进行调度。如果需要调度,可以调用LOS_TaskResume使该任务进入ready状态 */ +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskCreateOnly(UINT32 *taskID, TSK_INIT_PARAM_S *initParam) { UINT32 errRet = TaskCreateParamCheck(taskID, initParam); @@ -733,7 +983,11 @@ DEINIT_TCB: TaskCBDeInit(taskCB); return errRet; } +<<<<<<< HEAD + +======= //创建任务,并使该任务进入ready状态,如果就绪队列中没有更高优先级的任务,则运行该任务 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskCreate(UINT32 *taskID, TSK_INIT_PARAM_S *initParam) { UINT32 ret; @@ -747,7 +1001,11 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskCreate(UINT32 *taskID, TSK_INIT_PARAM_S *in return LOS_ERRNO_TSK_YIELD_IN_INT; } +<<<<<<< HEAD + if (OsProcessIsUserMode(OsCurrProcessGet())) { +======= if (OsProcessIsUserMode(OsCurrProcessGet())) {//当前进程为用户进程 +>>>>>>> remotes/origin/main initParam->processID = (UINTPTR)OsGetKernelInitProcess(); } else { initParam->processID = (UINTPTR)OsCurrProcessGet(); @@ -773,7 +1031,11 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskCreate(UINT32 *taskID, TSK_INIT_PARAM_S *in return LOS_OK; } +<<<<<<< HEAD + +======= //恢复挂起的任务,是该任务进入ready状态 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskResume(UINT32 taskID) { UINT32 intSave; @@ -788,7 +1050,11 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskResume(UINT32 taskID) SCHEDULER_LOCK(intSave); /* clear pending signal */ +<<<<<<< HEAD + taskCB->signal &= ~SIGNAL_SUSPEND; +======= taskCB->signal &= ~SIGNAL_SUSPEND;//清楚挂起信号 +>>>>>>> remotes/origin/main if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) { errRet = LOS_ERRNO_TSK_NOT_CREATED; @@ -814,6 +1080,15 @@ LOS_ERREND: } /* +<<<<<<< HEAD + * Check if needs to do the suspend operation on the running task. + * Return TRUE, if needs to do the suspension. + * Return FALSE, if meets following circumstances: + * 1. Do the suspension across cores, if SMP is enabled + * 2. Do the suspension when preemption is disabled + * 3. Do the suspension in hard-irq + * then LOS_TaskSuspend will directly return with 'ret' value. +======= * Check if needs to do the suspend operation on the running task.//检查是否需要对正在运行的任务执行挂起操作。 * Return TRUE, if needs to do the suspension. //如果需要暂停,返回TRUE。 * Return FALSE, if meets following circumstances: //如果满足一下情况,则返回FALSE: @@ -821,6 +1096,7 @@ LOS_ERREND: * 2. Do the suspension when preemption is disabled //2.当禁用抢占时则挂起 * 3. Do the suspension in hard-irq //3.在硬中断时则挂起 * then LOS_TaskSuspend will directly return with 'ret' value. //那么LOS_taskssuspend将直接返回ret值。 +>>>>>>> remotes/origin/main */ LITE_OS_SEC_TEXT_INIT STATIC BOOL OsTaskSuspendCheckOnRun(LosTaskCB *taskCB, UINT32 *ret) { @@ -829,20 +1105,34 @@ LITE_OS_SEC_TEXT_INIT STATIC BOOL OsTaskSuspendCheckOnRun(LosTaskCB *taskCB, UIN #ifdef LOSCFG_KERNEL_SMP /* ASYNCHRONIZED. No need to do task lock checking */ +<<<<<<< HEAD + if (taskCB->currCpu != ArchCurrCpuid()) { + taskCB->signal = SIGNAL_SUSPEND; + LOS_MpSchedule(taskCB->currCpu); +======= if (taskCB->currCpu != ArchCurrCpuid()) {//跨CPU核的情况 taskCB->signal = SIGNAL_SUSPEND; LOS_MpSchedule(taskCB->currCpu);//task所属CPU执行调度 +>>>>>>> remotes/origin/main return FALSE; } #endif +<<<<<<< HEAD + if (!OsPreemptableInSched()) { +======= if (!OsPreemptableInSched()) {//不能抢占时 +>>>>>>> remotes/origin/main /* Suspending the current core's running task */ *ret = LOS_ERRNO_TSK_SUSPEND_LOCKED; return FALSE; } +<<<<<<< HEAD + if (OS_INT_ACTIVE) { +======= if (OS_INT_ACTIVE) {//正在硬抢断时 +>>>>>>> remotes/origin/main /* suspend running task in interrupt */ taskCB->signal = SIGNAL_SUSPEND; return FALSE; @@ -850,7 +1140,11 @@ LITE_OS_SEC_TEXT_INIT STATIC BOOL OsTaskSuspendCheckOnRun(LosTaskCB *taskCB, UIN return TRUE; } +<<<<<<< HEAD + +======= //任务暂停,参数可以不是当前任务,也就是说A任务可以让B任务处于阻塞状态,挂起指定的任务,然后切换任务 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT STATIC UINT32 OsTaskSuspend(LosTaskCB *taskCB) { UINT32 errRet; @@ -863,14 +1157,23 @@ LITE_OS_SEC_TEXT STATIC UINT32 OsTaskSuspend(LosTaskCB *taskCB) return LOS_ERRNO_TSK_ALREADY_SUSPENDED; } +<<<<<<< HEAD + if ((tempStatus & OS_TASK_STATUS_RUNNING) && + !OsTaskSuspendCheckOnRun(taskCB, &errRet)) { +======= if ((tempStatus & OS_TASK_STATUS_RUNNING) &&//如果参数任务正在运行,注意多Cpu core情况,贴着正在运行标签的任务并不一定是当前CPU的执行任务, !OsTaskSuspendCheckOnRun(taskCB, &errRet)) {//很有可能是别的CPU core在跑的任务 +>>>>>>> remotes/origin/main return errRet; } return taskCB->ops->suspend(taskCB); } +<<<<<<< HEAD + +======= //外部接口,对OsTaskSuspend的封装 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskSuspend(UINT32 taskID) { UINT32 intSave; @@ -890,7 +1193,11 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_TaskSuspend(UINT32 taskID) SCHEDULER_UNLOCK(intSave); return errRet; } +<<<<<<< HEAD + +======= //设置任务为不使用状态 +>>>>>>> remotes/origin/main STATIC INLINE VOID OsTaskStatusUnusedSet(LosTaskCB *taskCB) { taskCB->taskStatus |= OS_TASK_STATUS_UNUSED; @@ -1036,7 +1343,11 @@ LOS_ERREND: } return ret; } +<<<<<<< HEAD + +======= //任务延时等待,释放CPU,等待时间到期后该任务会重新进入ready状态 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_TaskDelay(UINT32 tick) { UINT32 intSave; @@ -1066,7 +1377,11 @@ LITE_OS_SEC_TEXT UINT32 LOS_TaskDelay(UINT32 tick) SCHEDULER_UNLOCK(intSave); return ret; } +<<<<<<< HEAD + +======= //获取任务的优先级 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR UINT16 LOS_TaskPriGet(UINT32 taskID) { UINT32 intSave; @@ -1087,7 +1402,11 @@ LITE_OS_SEC_TEXT_MINOR UINT16 LOS_TaskPriGet(UINT32 taskID) SCHEDULER_UNLOCK(intSave); return param.priority; } +<<<<<<< HEAD + +======= //设置指定任务的优先级 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR UINT32 LOS_TaskPriSet(UINT32 taskID, UINT16 taskPrio) { UINT32 intSave; @@ -1125,12 +1444,20 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_TaskPriSet(UINT32 taskID, UINT16 taskPrio) } return LOS_OK; } +<<<<<<< HEAD + +======= //设置当前任务的优先级 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR UINT32 LOS_CurTaskPriSet(UINT16 taskPrio) { return LOS_TaskPriSet(OsCurrTaskGet()->taskID, taskPrio); } +<<<<<<< HEAD + +======= //当前任务释放CPU,并将其移到具有相同优先级的就绪任务队列的末尾。 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR UINT32 LOS_TaskYield(VOID) { UINT32 intSave; @@ -1176,7 +1503,11 @@ LITE_OS_SEC_TEXT_MINOR VOID LOS_TaskUnlock(VOID) LOS_Schedule(); } } +<<<<<<< HEAD + +======= //获取任务信息,给shell使用的 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR UINT32 LOS_TaskInfoGet(UINT32 taskID, TSK_INFO_S *taskInfo) { UINT32 intSave; @@ -1206,8 +1537,13 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_TaskInfoGet(UINT32 taskID, TSK_INFO_S *taskInf taskCB->ops->schedParamGet(taskCB, ¶m); taskInfo->usTaskStatus = taskCB->taskStatus; taskInfo->usTaskPrio = param.priority; +<<<<<<< HEAD + taskInfo->uwStackSize = taskCB->stackSize; + taskInfo->uwTopOfStack = taskCB->topOfStack; +======= taskInfo->uwStackSize = taskCB->stackSize;//内核态栈大小 taskInfo->uwTopOfStack = taskCB->topOfStack;//内核态栈顶位置 +>>>>>>> remotes/origin/main taskInfo->uwEventMask = taskCB->eventMask; taskInfo->taskEvent = taskCB->taskEvent; taskInfo->pTaskMux = taskCB->taskMux; @@ -1218,16 +1554,28 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_TaskInfoGet(UINT32 taskID, TSK_INFO_S *taskInf } taskInfo->acName[LOS_TASK_NAMELEN - 1] = '\0'; +<<<<<<< HEAD + taskInfo->uwBottomOfStack = TRUNCATE(((UINTPTR)taskCB->topOfStack + taskCB->stackSize), + OS_TASK_STACK_ADDR_ALIGN); + taskInfo->uwCurrUsed = (UINT32)(taskInfo->uwBottomOfStack - taskInfo->uwSP); + + taskInfo->bOvf = OsStackWaterLineGet((const UINTPTR *)taskInfo->uwBottomOfStack, +======= taskInfo->uwBottomOfStack = TRUNCATE(((UINTPTR)taskCB->topOfStack + taskCB->stackSize),//这里可以看出栈顶地址是高于栈顶 OS_TASK_STACK_ADDR_ALIGN); taskInfo->uwCurrUsed = (UINT32)(taskInfo->uwBottomOfStack - taskInfo->uwSP);//当前任务栈已使用了多少 taskInfo->bOvf = OsStackWaterLineGet((const UINTPTR *)taskInfo->uwBottomOfStack,//获取栈的使用情况 +>>>>>>> remotes/origin/main (const UINTPTR *)taskInfo->uwTopOfStack, &taskInfo->uwPeakUsed); SCHEDULER_UNLOCK(intSave); return LOS_OK; } +<<<<<<< HEAD + +======= //CPU亲和性(affinity)将任务绑定在指定CPU上,用于多核CPU情况,(该函数仅在SMP模式下支持) +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT BOOL OsTaskCpuAffiSetUnsafe(UINT32 taskID, UINT16 newCpuAffiMask, UINT16 *oldCpuAffiMask) { #ifdef LOSCFG_KERNEL_SMP @@ -1253,17 +1601,29 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_TaskCpuAffiSet(UINT32 taskID, UINT16 cpuAffiMa UINT32 intSave; UINT16 currCpuMask; +<<<<<<< HEAD + if (OS_TID_CHECK_INVALID(taskID)) { + return LOS_ERRNO_TSK_ID_INVALID; + } + + if (!(cpuAffiMask & LOSCFG_KERNEL_CPU_MASK)) { +======= if (OS_TID_CHECK_INVALID(taskID)) {//检测taskid是否有效,task由task池分配,鸿蒙默认128个任务 ID范围[0:127] return LOS_ERRNO_TSK_ID_INVALID; } if (!(cpuAffiMask & LOSCFG_KERNEL_CPU_MASK)) {//检测cpu亲和力 +>>>>>>> remotes/origin/main return LOS_ERRNO_TSK_CPU_AFFINITY_MASK_ERR; } LosTaskCB *taskCB = OS_TCB_FROM_TID(taskID); SCHEDULER_LOCK(intSave); +<<<<<<< HEAD + if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) { +======= if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {//贴有未使用标签的处理 +>>>>>>> remotes/origin/main SCHEDULER_UNLOCK(intSave); return LOS_ERRNO_TSK_NOT_CREATED; } @@ -1271,13 +1631,22 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_TaskCpuAffiSet(UINT32 taskID, UINT16 cpuAffiMa SCHEDULER_UNLOCK(intSave); if (needSched && OS_SCHEDULER_ACTIVE) { +<<<<<<< HEAD + LOS_MpSchedule(currCpuMask); + LOS_Schedule(); +======= LOS_MpSchedule(currCpuMask);//发送信号调度信号给目标cpu LOS_Schedule();//申请调度 +>>>>>>> remotes/origin/main } return LOS_OK; } +<<<<<<< HEAD + +======= //查询任务被绑在哪个cpu上 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR UINT16 LOS_TaskCpuAffiGet(UINT32 taskID) { #ifdef LOSCFG_KERNEL_SMP @@ -1291,18 +1660,30 @@ LITE_OS_SEC_TEXT_MINOR UINT16 LOS_TaskCpuAffiGet(UINT32 taskID) LosTaskCB *taskCB = OS_TCB_FROM_TID(taskID); SCHEDULER_LOCK(intSave); +<<<<<<< HEAD + if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) { +======= if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {//任务必须在使用 +>>>>>>> remotes/origin/main SCHEDULER_UNLOCK(intSave); return INVALID_CPU_AFFI_MASK; } +<<<<<<< HEAD + cpuAffiMask = taskCB->cpuAffiMask; +======= cpuAffiMask = taskCB->cpuAffiMask;//获取亲和力掩码 +>>>>>>> remotes/origin/main SCHEDULER_UNLOCK(intSave); return cpuAffiMask; #else (VOID)taskID; +<<<<<<< HEAD + return 1; +======= return 1;//单核情况直接返回1,0号cpu对应0x01 +>>>>>>> remotes/origin/main #endif } @@ -1312,14 +1693,22 @@ LITE_OS_SEC_TEXT_MINOR UINT16 LOS_TaskCpuAffiGet(UINT32 taskID) LITE_OS_SEC_TEXT_MINOR VOID OsTaskProcSignal(VOID) { UINT32 ret; +<<<<<<< HEAD + +======= //私有且不可中断,无需保护。这个任务在其他cpu核看到它时总是在运行,所以它在执行代码的同时也可以继续接收信号 +>>>>>>> remotes/origin/main /* * private and uninterruptable, no protection needed. * while this task is always running when others cores see it, * so it keeps receiving signals while follow code executing. */ LosTaskCB *runTask = OsCurrTaskGet(); +<<<<<<< HEAD + if (runTask->signal == SIGNAL_NONE) { +======= if (runTask->signal == SIGNAL_NONE) {//意思是其他cpu发起了要干掉你的信号 +>>>>>>> remotes/origin/main return; } @@ -1327,23 +1716,41 @@ LITE_OS_SEC_TEXT_MINOR VOID OsTaskProcSignal(VOID) /* * clear the signal, and do the task deletion. if the signaled task has been * scheduled out, then this deletion will wait until next run. +<<<<<<< HEAD + */ + runTask->signal = SIGNAL_NONE; +======= *///如果发出信号的任务以出调度就绪队列,则此删除将等待下次运行 runTask->signal = SIGNAL_NONE;//清除信号 +>>>>>>> remotes/origin/main ret = LOS_TaskDelete(runTask->taskID); if (ret != LOS_OK) { PRINT_ERR("Task proc signal delete task(%u) failed err:0x%x\n", runTask->taskID, ret); } +<<<<<<< HEAD + } else if (runTask->signal & SIGNAL_SUSPEND) { + runTask->signal &= ~SIGNAL_SUSPEND; +======= } else if (runTask->signal & SIGNAL_SUSPEND) {//意思是其他cpu发起了要挂起你的信号 runTask->signal &= ~SIGNAL_SUSPEND;//任务贴在被其他cpu挂起的标签 +>>>>>>> remotes/origin/main /* suspend killed task may fail, ignore the result */ (VOID)LOS_TaskSuspend(runTask->taskID); #ifdef LOSCFG_KERNEL_SMP +<<<<<<< HEAD + } else if (runTask->signal & SIGNAL_AFFI) { + runTask->signal &= ~SIGNAL_AFFI; + + /* priority queue has updated, notify the target cpu */ + LOS_MpSchedule((UINT32)runTask->cpuAffiMask); +======= } else if (runTask->signal & SIGNAL_AFFI) {//意思是下次调度其他cpu要媾和你 runTask->signal &= ~SIGNAL_AFFI;//任务贴上被其他CPU媾和的标签 /* priority queue has updated, notify the target cpu */ LOS_MpSchedule((UINT32)runTask->cpuAffiMask);//发生调度,此任务将移交给媾和CPU运行. +>>>>>>> remotes/origin/main #endif } } @@ -1416,7 +1823,11 @@ INT32 OsUserProcessOperatePermissionsCheck(const LosTaskCB *taskCB, UINTPTR proc return LOS_OK; } +<<<<<<< HEAD + +======= //创建任务之前,检查用户态任务栈的参数,是否地址在用户空间 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_INIT STATIC UINT32 OsCreateUserTaskParamCheck(UINT32 processID, TSK_INIT_PARAM_S *param) { UserTaskParam *userParam = NULL; @@ -1426,6 +1837,20 @@ LITE_OS_SEC_TEXT_INIT STATIC UINT32 OsCreateUserTaskParamCheck(UINT32 processID, } userParam = ¶m->userParam; +<<<<<<< HEAD + if ((processID == OS_INVALID_VALUE) && !LOS_IsUserAddress(userParam->userArea)) { + return OS_INVALID_VALUE; + } + + if (!LOS_IsUserAddress((UINTPTR)param->pfnTaskEntry)) { + return OS_INVALID_VALUE; + } + + if (userParam->userMapBase && !LOS_IsUserAddressRange(userParam->userMapBase, userParam->userMapSize)) { + return OS_INVALID_VALUE; + } + +======= if ((processID == OS_INVALID_VALUE) && !LOS_IsUserAddress(userParam->userArea)) {//堆地址必须在用户空间 return OS_INVALID_VALUE; } @@ -1438,13 +1863,18 @@ LITE_OS_SEC_TEXT_INIT STATIC UINT32 OsCreateUserTaskParamCheck(UINT32 processID, return OS_INVALID_VALUE; } //检查堆,栈范围 +>>>>>>> remotes/origin/main if (!LOS_IsUserAddress(userParam->userSP)) { return OS_INVALID_VALUE; } return LOS_OK; } +<<<<<<< HEAD + +======= //创建一个用户态任务 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_INIT UINT32 OsCreateUserTask(UINTPTR processID, TSK_INIT_PARAM_S *initParam) { UINT32 taskID; @@ -1453,6 +1883,20 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsCreateUserTask(UINTPTR processID, TSK_INIT_PARAM_ INT32 policy; SchedParam param; +<<<<<<< HEAD + ret = OsCreateUserTaskParamCheck(processID, initParam); + if (ret != LOS_OK) { + return ret; + } + + initParam->uwStackSize = OS_USER_TASK_SYSCALL_STACK_SIZE; + initParam->usTaskPrio = OS_TASK_PRIORITY_LOWEST; + if (processID == OS_INVALID_VALUE) { + SCHEDULER_LOCK(intSave); + LosProcessCB *processCB = OsCurrProcessGet(); + initParam->processID = (UINTPTR)processCB; + initParam->consoleID = processCB->consoleID; +======= ret = OsCreateUserTaskParamCheck(processID, initParam);//检查参数,堆栈,入口地址必须在用户空间 if (ret != LOS_OK) { return ret; @@ -1465,6 +1909,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsCreateUserTask(UINTPTR processID, TSK_INIT_PARAM_ LosProcessCB *processCB = OsCurrProcessGet(); initParam->processID = (UINTPTR)processCB; initParam->consoleID = processCB->consoleID;//任务控制台ID归属 +>>>>>>> remotes/origin/main SCHEDULER_UNLOCK(intSave); ret = LOS_GetProcessScheduler(processCB->processID, &policy, NULL); if (ret != LOS_OK) { @@ -1477,6 +1922,15 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsCreateUserTask(UINTPTR processID, TSK_INIT_PARAM_ initParam->deadlineUs = param.deadlineUs; initParam->periodUs = param.periodUs; } +<<<<<<< HEAD + } else { + initParam->policy = LOS_SCHED_RR; + initParam->processID = processID; + initParam->consoleID = 0; + } + + ret = LOS_TaskCreateOnly(&taskID, initParam); +======= } else {//进程已经创建 initParam->policy = LOS_SCHED_RR;//调度方式为抢占式,注意鸿蒙不仅仅只支持抢占式调度方式 initParam->processID = processID;//进程ID赋值 @@ -1484,13 +1938,18 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsCreateUserTask(UINTPTR processID, TSK_INIT_PARAM_ } ret = LOS_TaskCreateOnly(&taskID, initParam);//只创建task实体,不申请调度 +>>>>>>> remotes/origin/main if (ret != LOS_OK) { return OS_INVALID_VALUE; } return taskID; } +<<<<<<< HEAD + +======= //获取任务的调度方式 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT INT32 LOS_GetTaskScheduler(INT32 taskID) { UINT32 intSave; @@ -1503,7 +1962,11 @@ LITE_OS_SEC_TEXT INT32 LOS_GetTaskScheduler(INT32 taskID) LosTaskCB *taskCB = OS_TCB_FROM_TID(taskID); SCHEDULER_LOCK(intSave); +<<<<<<< HEAD + if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) { +======= if (taskCB->taskStatus & OS_TASK_STATUS_UNUSED) {//任务不能是没有在使用 +>>>>>>> remotes/origin/main policy = -LOS_EINVAL; OS_GOTO_ERREND(); } @@ -1515,7 +1978,11 @@ LOS_ERREND: SCHEDULER_UNLOCK(intSave); return policy; } +<<<<<<< HEAD + +======= //设置任务的调度信息 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT INT32 LOS_SetTaskScheduler(INT32 taskID, UINT16 policy, UINT16 priority) { SchedParam param = { 0 }; @@ -1659,12 +2126,20 @@ UINT32 LOS_TaskDetach(UINT32 taskID) SCHEDULER_UNLOCK(intSave); return errRet; } +<<<<<<< HEAD + +======= //获取最大任务数 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_GetSystemTaskMaximum(VOID) { return g_taskMaxNum; } +<<<<<<< HEAD + +======= //任务池中最后一个 +>>>>>>> remotes/origin/main LosTaskCB *OsGetDefaultTaskCB(VOID) { return &g_taskCBArray[g_taskMaxNum]; @@ -1679,11 +2154,29 @@ LITE_OS_SEC_TEXT VOID OsWriteResourceEventUnsafe(UINT32 events) { (VOID)OsEventWriteUnsafe(&g_resourceEvent, events, FALSE, NULL); } +<<<<<<< HEAD + +======= //资源回收任务 +>>>>>>> remotes/origin/main STATIC VOID OsResourceRecoveryTask(VOID) { UINT32 ret; +<<<<<<< HEAD + while (1) { + ret = LOS_EventRead(&g_resourceEvent, OS_RESOURCE_EVENT_MASK, + LOS_WAITMODE_OR | LOS_WAITMODE_CLR, LOS_WAIT_FOREVER); + if (ret & (OS_RESOURCE_EVENT_FREE | OS_RESOURCE_EVENT_OOM)) { + OsTaskCBRecycleToFree(); + + OsProcessCBRecycleToFree(); + } + +#ifdef LOSCFG_ENABLE_OOM_LOOP_TASK + if (ret & OS_RESOURCE_EVENT_OOM) { + (VOID)OomCheckProcess(); +======= while (1) {//死循环,回收资源不存在退出情况,只要系统在运行资源就需要回收 ret = LOS_EventRead(&g_resourceEvent, OS_RESOURCE_EVENT_MASK, LOS_WAITMODE_OR | LOS_WAITMODE_CLR, LOS_WAIT_FOREVER);//读取资源事件 @@ -1696,27 +2189,43 @@ STATIC VOID OsResourceRecoveryTask(VOID) #ifdef LOSCFG_ENABLE_OOM_LOOP_TASK//内存溢出检测任务开关 if (ret & OS_RESOURCE_EVENT_OOM) {//触发了这个事件 (VOID)OomCheckProcess();//检查进程的内存溢出情况 +>>>>>>> remotes/origin/main } #endif } } +<<<<<<< HEAD + +======= //创建一个回收资源的任务 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 OsResourceFreeTaskCreate(VOID) { UINT32 ret; UINT32 taskID; TSK_INIT_PARAM_S taskInitParam; +<<<<<<< HEAD + ret = LOS_EventInit((PEVENT_CB_S)&g_resourceEvent); +======= ret = LOS_EventInit((PEVENT_CB_S)&g_resourceEvent);//初始化资源事件 +>>>>>>> remotes/origin/main if (ret != LOS_OK) { return LOS_NOK; } (VOID)memset_s((VOID *)(&taskInitParam), sizeof(TSK_INIT_PARAM_S), 0, sizeof(TSK_INIT_PARAM_S)); +<<<<<<< HEAD + taskInitParam.pfnTaskEntry = (TSK_ENTRY_FUNC)OsResourceRecoveryTask; + taskInitParam.uwStackSize = OS_TASK_RESOURCE_STATIC_SIZE; + taskInitParam.pcName = "ResourcesTask"; + taskInitParam.usTaskPrio = OS_TASK_RESOURCE_FREE_PRIORITY; +======= taskInitParam.pfnTaskEntry = (TSK_ENTRY_FUNC)OsResourceRecoveryTask;//入口函数 taskInitParam.uwStackSize = OS_TASK_RESOURCE_STATIC_SIZE; taskInitParam.pcName = "ResourcesTask"; taskInitParam.usTaskPrio = OS_TASK_RESOURCE_FREE_PRIORITY;//5,优先级很高 +>>>>>>> remotes/origin/main ret = LOS_TaskCreate(&taskID, &taskInitParam); if (ret == LOS_OK) { OS_TCB_FROM_TID(taskID)->taskStatus |= OS_TASK_FLAG_NO_DELETE; @@ -1724,5 +2233,9 @@ LITE_OS_SEC_TEXT UINT32 OsResourceFreeTaskCreate(VOID) return ret; } +<<<<<<< HEAD +LOS_MODULE_INIT(OsResourceFreeTaskCreate, LOS_INIT_LEVEL_KMOD_TASK); +======= LOS_MODULE_INIT(OsResourceFreeTaskCreate, LOS_INIT_LEVEL_KMOD_TASK);//资源回收任务初始化 +>>>>>>> remotes/origin/main diff --git a/src/kernel_liteos_a/kernel/base/core/los_tick.c b/src/kernel_liteos_a/kernel/base/core/los_tick.c index 4e9c4575..45f030b6 100644 --- a/src/kernel_liteos_a/kernel/base/core/los_tick.c +++ b/src/kernel_liteos_a/kernel/base/core/los_tick.c @@ -37,30 +37,51 @@ #endif +<<<<<<< HEAD +LITE_OS_SEC_DATA_INIT UINT32 g_sysClock; +LITE_OS_SEC_DATA_INIT UINT32 g_tickPerSecond; +LITE_OS_SEC_BSS DOUBLE g_cycle2NsScale; + +/* spinlock for task module */ +LITE_OS_SEC_BSS SPIN_LOCK_INIT(g_tickSpin); +======= LITE_OS_SEC_DATA_INIT UINT32 g_sysClock; //系统时钟,绝大部分部件工作的时钟源,以及所有外设的始终来源 LITE_OS_SEC_DATA_INIT UINT32 g_tickPerSecond; //每秒Tick数,harmony默认为每秒100次即10ms LITE_OS_SEC_BSS DOUBLE g_cycle2NsScale; //将周期转为纳秒级 /* spinlock for task module */ LITE_OS_SEC_BSS SPIN_LOCK_INIT(g_tickSpin); //节拍器自旋锁 +>>>>>>> remotes/origin/main /* * Description : Tick interruption handler */ +<<<<<<< HEAD +LITE_OS_SEC_TEXT VOID OsTickHandler(VOID) +======= LITE_OS_SEC_TEXT VOID OsTickHandler(VOID)//节拍中断处理函数,harmony默认1ms触发一次 +>>>>>>> remotes/origin/main { #ifdef LOSCFG_SCHED_TICK_DEBUG OsSchedDebugRecordData(); #endif #ifdef LOSCFG_KERNEL_VDSO +<<<<<<< HEAD + OsVdsoTimevalUpdate(); +======= OsVdsoTimevalUpdate(); //更新vdso数据页时间,vsdo可以直接在用户进程空间绕过系统调用获取系统时间 +>>>>>>> remotes/origin/main #endif #ifdef LOSCFG_BASE_CORE_TICK_HW_TIME HalClockIrqClear(); /* diff from every platform */ #endif +<<<<<<< HEAD + OsSchedTick(); +======= OsSchedTick();//由时钟发起的调度 +>>>>>>> remotes/origin/main } diff --git a/src/kernel_liteos_a/kernel/base/include/los_container_pri.h b/src/kernel_liteos_a/kernel/base/include/los_container_pri.h index db10f529..c1c235c6 100644 --- a/src/kernel_liteos_a/kernel/base/include/los_container_pri.h +++ b/src/kernel_liteos_a/kernel/base/include/los_container_pri.h @@ -57,10 +57,17 @@ typedef enum { CONTAINER = 0, +<<<<<<< HEAD + PID_CONTAINER, + PID_CHILD_CONTAINER, + UTS_CONTAINER, + MNT_CONTAINER, +======= PID_CONTAINER, //进程容器 PID_CHILD_CONTAINER, //子进程容器 UTS_CONTAINER, // MNT_CONTAINER, //挂载容器 +>>>>>>> remotes/origin/main IPC_CONTAINER, USER_CONTAINER, TIME_CONTAINER, @@ -70,6 +77,31 @@ typedef enum { } ContainerType; typedef struct Container { +<<<<<<< HEAD + Atomic rc; +#ifdef LOSCFG_PID_CONTAINER + struct PidContainer *pidContainer; + struct PidContainer *pidForChildContainer; +#endif +#ifdef LOSCFG_UTS_CONTAINER + struct UtsContainer *utsContainer; +#endif +#ifdef LOSCFG_MNT_CONTAINER + struct MntContainer *mntContainer; +#endif +#ifdef LOSCFG_IPC_CONTAINER + struct IpcContainer *ipcContainer; +#endif +#ifdef LOSCFG_TIME_CONTAINER + struct TimeContainer *timeContainer; + struct TimeContainer *timeForChildContainer; +#endif +#ifdef LOSCFG_NET_CONTAINER + struct NetContainer *netContainer; +#endif +} Container; + +======= Atomic rc; //原子操作 #ifdef LOSCFG_PID_CONTAINER struct PidContainer *pidContainer; //进程容器 @@ -93,6 +125,7 @@ typedef struct Container { #endif } Container; //容器数量上限 +>>>>>>> remotes/origin/main typedef struct TagContainerLimit { #ifdef LOSCFG_PID_CONTAINER UINT32 pidLimit; diff --git a/src/kernel_liteos_a/kernel/base/include/los_err_pri.h b/src/kernel_liteos_a/kernel/base/include/los_err_pri.h index e7c7fc9c..c47405e8 100644 --- a/src/kernel_liteos_a/kernel/base/include/los_err_pri.h +++ b/src/kernel_liteos_a/kernel/base/include/los_err_pri.h @@ -40,21 +40,92 @@ extern "C" { #endif /* __cplusplus */ #endif /* __cplusplus */ +<<<<<<< HEAD +/** + * @ingroup los_err + * Define the error magic word. + */ +#define OS_ERR_MAGIC_WORD 0xa1b2c3f8 + +/** + * @ingroup los_err + * @brief Error handling macro capable of returning error codes. + * + * @par Description: + * This API is used to call the error handling function by using an error code and return the same error code. + * @attention + * + * + * @param errNo [IN] Error code. + * + * @retval errNo + * @par Dependency: + * + * @see None. + */ +======= #define OS_ERR_MAGIC_WORD 0xa1b2c3f8 +>>>>>>> remotes/origin/main #define OS_RETURN_ERROR(errNo) do { \ (VOID)LOS_ErrHandle("os_unspecific_file", OS_ERR_MAGIC_WORD, errNo, 0, NULL); \ return errNo; \ } while (0) +<<<<<<< HEAD +/** + * @ingroup los_err + * @brief Error handling macro capable of returning error codes. + * + * @par Description: + * This API is used to call the error handling function by using an error code and the line number of + * the erroneous line, and return the same error code. + * @attention + * + * + * @param errLine [IN] Line number of the erroneous line. + * @param errNo [IN] Error code. + * + * @retval errNo + * @par Dependency: + * + * @see None. + */ +======= +>>>>>>> remotes/origin/main #define OS_RETURN_ERROR_P2(errLine, errNo) do { \ (VOID)LOS_ErrHandle("os_unspecific_file", errLine, errNo, 0, NULL); \ return errNo; \ } while (0) +<<<<<<< HEAD +/** + * @ingroup los_err + * @brief Macro for jumping to error handler. + * + * @par Description: + * This API is used to call the error handling function by using an error code. + * @attention + * + * + * @param errorNo [IN] Error code. + * + * @retval None. + * @par Dependency: + * + * @see None. + */ +======= +>>>>>>> remotes/origin/main #define OS_GOTO_ERR_HANDLER(errorNo) do { \ errNo = errorNo; \ errLine = OS_ERR_MAGIC_WORD; \ diff --git a/src/kernel_liteos_a/kernel/base/include/los_futex_pri.h b/src/kernel_liteos_a/kernel/base/include/los_futex_pri.h index 758a2679..bd150db2 100644 --- a/src/kernel_liteos_a/kernel/base/include/los_futex_pri.h +++ b/src/kernel_liteos_a/kernel/base/include/los_futex_pri.h @@ -33,16 +33,35 @@ #define _LOS_FUTEX_PRI_H #include "los_list.h" +<<<<<<< HEAD +#define FUTEX_WAIT 0 +#define FUTEX_WAKE 1 +#define FUTEX_REQUEUE 3 +#define FUTEX_WAKE_OP 5 +======= #define FUTEX_WAIT 0 ///< 原子性的检查 uaddr 中计数器的值是否为 val,如果是则让任务休眠,直到 FUTEX_WAKE 或者超时(time-out)。 //也就是把任务挂到 uaddr 相对应的等待队列上去。 #define FUTEX_WAKE 1 ///< 最多唤醒 val 个等待在 uaddr 上任务。 #define FUTEX_REQUEUE 3 ///< 调整指定锁在Futex表中的位置 #define FUTEX_WAKE_OP 5 +>>>>>>> remotes/origin/main #define FUTEX_LOCK_PI 6 #define FUTEX_UNLOCK_PI 7 #define FUTEX_TRYLOCK_PI 8 #define FUTEX_WAIT_BITSET 9 +<<<<<<< HEAD +#define FUTEX_PRIVATE 128 +#define FUTEX_MASK 0x3U + +typedef struct { + UINTPTR key; /* private:uvaddr shared:paddr */ + UINT32 index; /* hash bucket index */ + UINT32 pid; /* private:process id shared:OS_INVALID(-1) */ + LOS_DL_LIST pendList; /* point to pendList in TCB struct */ + LOS_DL_LIST queueList; /* thread list blocked by this lock */ + LOS_DL_LIST futexList; /* point to the next FutexNode */ +======= #define FUTEX_PRIVATE 128 //私有快锁(以虚拟地址进行哈希) #define FUTEX_MASK 0x3U /// 每个futex node对应一个被挂起的task ,key值唯一标识一把用户态锁,具有相同key值的node被queue_list串联起来表示被同一把锁阻塞的task队列。 @@ -54,6 +73,7 @@ typedef struct { LOS_DL_LIST queueList; /* thread list blocked by this lock | 挂等待这把锁的任务,其实这里挂到是FutexNode.queueList , 通过 queueList 可以找到 pendList ,通过 pendList又可以找到真正的任务*/ LOS_DL_LIST futexList; /* point to the next FutexNode | 下一把Futex锁*/ +>>>>>>> remotes/origin/main } FutexNode; extern UINT32 OsFutexInit(VOID); diff --git a/src/kernel_liteos_a/kernel/base/include/los_ipc_container_pri.h b/src/kernel_liteos_a/kernel/base/include/los_ipc_container_pri.h index d46bd84b..d36d15f3 100644 --- a/src/kernel_liteos_a/kernel/base/include/los_ipc_container_pri.h +++ b/src/kernel_liteos_a/kernel/base/include/los_ipc_container_pri.h @@ -42,6 +42,15 @@ typedef struct TagQueueCB LosQueueCB; typedef struct OsMux LosMux; typedef LosMux pthread_mutex_t; typedef struct ProcessCB LosProcessCB; +<<<<<<< HEAD + +typedef struct IpcContainer { + Atomic rc; + LosQueueCB *allQueue; + LOS_DL_LIST freeQueueList; + fd_set queueFdSet; + struct mqarray queueTable[LOSCFG_BASE_IPC_QUEUE_LIMIT]; +======= //IPC容器 typedef struct IpcContainer { Atomic rc; @@ -49,6 +58,7 @@ typedef struct IpcContainer { LOS_DL_LIST freeQueueList;//空闲队列链表 fd_set queueFdSet; struct mqarray queueTable[LOSCFG_BASE_IPC_QUEUE_LIMIT];//队列池 +>>>>>>> remotes/origin/main pthread_mutex_t mqueueMutex; struct mqpersonal *mqPrivBuf[MAX_MQ_FD]; struct shminfo shmInfo; diff --git a/src/kernel_liteos_a/kernel/base/include/los_memstat_pri.h b/src/kernel_liteos_a/kernel/base/include/los_memstat_pri.h index 7f3aee4b..4e056499 100644 --- a/src/kernel_liteos_a/kernel/base/include/los_memstat_pri.h +++ b/src/kernel_liteos_a/kernel/base/include/los_memstat_pri.h @@ -42,7 +42,11 @@ extern "C" { #endif /* __cplusplus */ typedef struct { +<<<<<<< HEAD + UINT32 memUsed; +======= UINT32 memUsed; ///< 记录任务内存使用量 +>>>>>>> remotes/origin/main } TskMemUsedInfo; extern VOID OsTaskMemUsedInc(UINT32 usedSize, UINT32 taskID); @@ -53,7 +57,11 @@ extern VOID OsTaskMemClear(UINT32 taskID); #ifdef LOS_MEM_SLAB typedef struct { +<<<<<<< HEAD + UINT32 slabUsed; +======= UINT32 slabUsed; ///< 任务占用以slab分配方式内存量 +>>>>>>> remotes/origin/main } TskSlabUsedInfo; extern VOID OsTaskSlabUsedInc(UINT32 usedSize, UINT32 taskID); diff --git a/src/kernel_liteos_a/kernel/base/include/los_mux_pri.h b/src/kernel_liteos_a/kernel/base/include/los_mux_pri.h index ebab2648..90b4afb7 100644 --- a/src/kernel_liteos_a/kernel/base/include/los_mux_pri.h +++ b/src/kernel_liteos_a/kernel/base/include/los_mux_pri.h @@ -1,6 +1,10 @@ /* * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved. +<<<<<<< HEAD + * Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved. +======= * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved. +>>>>>>> remotes/origin/main * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: diff --git a/src/kernel_liteos_a/kernel/base/include/los_percpu_pri.h b/src/kernel_liteos_a/kernel/base/include/los_percpu_pri.h index e561c03d..aeacd772 100644 --- a/src/kernel_liteos_a/kernel/base/include/los_percpu_pri.h +++ b/src/kernel_liteos_a/kernel/base/include/los_percpu_pri.h @@ -43,9 +43,15 @@ extern "C" { #ifdef LOSCFG_KERNEL_SMP typedef enum { +<<<<<<< HEAD + CPU_RUNNING = 0, /* cpu is running */ + CPU_HALT, /* cpu in the halt */ + CPU_EXC /* cpu in the exc */ +======= CPU_RUNNING = 0, ///< cpu is running | CPU正在运行状态 CPU_HALT, ///< cpu in the halt | CPU处于暂停状态 CPU_EXC ///< cpu in the exc | CPU处于异常状态 +>>>>>>> remotes/origin/main } ExcFlag; typedef struct { @@ -55,6 +61,16 @@ typedef struct { #endif } Percpu; +<<<<<<< HEAD +/* the kernel per-cpu structure */ +extern Percpu g_percpu[LOSCFG_KERNEL_CORE_NUM]; + +STATIC INLINE Percpu *OsPercpuGet(VOID) +{ + return &g_percpu[ArchCurrCpuid()]; +} + +======= /*! the kernel per-cpu structure | 每个cpu的内核描述符 */ extern Percpu g_percpu[LOSCFG_KERNEL_CORE_NUM]; /*! 获得当前运行CPU的信息 */ @@ -63,6 +79,7 @@ STATIC INLINE Percpu *OsPercpuGet(VOID) return &g_percpu[ArchCurrCpuid()]; } /*! 获得参数CPU的信息 */ +>>>>>>> remotes/origin/main STATIC INLINE Percpu *OsPercpuGetByID(UINT32 cpuid) { return &g_percpu[cpuid]; diff --git a/src/kernel_liteos_a/kernel/base/include/los_pid_container_pri.h b/src/kernel_liteos_a/kernel/base/include/los_pid_container_pri.h index eed94630..44ef9ab9 100644 --- a/src/kernel_liteos_a/kernel/base/include/los_pid_container_pri.h +++ b/src/kernel_liteos_a/kernel/base/include/los_pid_container_pri.h @@ -38,6 +38,31 @@ typedef struct TagTaskCB LosTaskCB; typedef struct ProcessCB LosProcessCB; struct ProcessGroup; struct Container; +<<<<<<< HEAD + +typedef struct { + UINT32 vid; /* Virtual ID */ + UINT32 vpid; /* Virtual parent ID */ + UINTPTR cb; /* Control block */ + LosProcessCB *realParent; /* process real parent */ + LOS_DL_LIST node; +} ProcessVid; + +#define PID_CONTAINER_LEVEL_LIMIT 3 + +typedef struct PidContainer { + Atomic rc; + Atomic level; + Atomic lock; + BOOL referenced; + UINT32 containerID; + struct PidContainer *parent; + struct ProcessGroup *rootPGroup; + LOS_DL_LIST tidFreeList; + ProcessVid tidArray[LOSCFG_BASE_CORE_TSK_LIMIT]; + LOS_DL_LIST pidFreeList; + ProcessVid pidArray[LOSCFG_BASE_CORE_PROCESS_LIMIT]; +======= //虚拟进程/任务 信息 typedef struct { UINT32 vid; /* Virtual ID | 虚拟ID*/ @@ -61,6 +86,7 @@ typedef struct PidContainer { ProcessVid tidArray[LOSCFG_BASE_CORE_TSK_LIMIT];//虚拟任务池 LOS_DL_LIST pidFreeList; //进程空闲链表 ProcessVid pidArray[LOSCFG_BASE_CORE_PROCESS_LIMIT];//虚拟进程池 +>>>>>>> remotes/origin/main } PidContainer; #define OS_PID_CONTAINER_FROM_PCB(processCB) ((processCB)->container->pidContainer) diff --git a/src/kernel_liteos_a/kernel/base/include/los_process_pri.h b/src/kernel_liteos_a/kernel/base/include/los_process_pri.h index a0d9d114..f22a76a5 100644 --- a/src/kernel_liteos_a/kernel/base/include/los_process_pri.h +++ b/src/kernel_liteos_a/kernel/base/include/los_process_pri.h @@ -1,6 +1,10 @@ /* * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved. +<<<<<<< HEAD + * Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved. +======= * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved. +>>>>>>> remotes/origin/main * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: @@ -65,6 +69,84 @@ extern "C" { #ifdef LOSCFG_SECURITY_CAPABILITY #define OS_GROUPS_NUMBER_MAX 256 +<<<<<<< HEAD +typedef struct { + UINT32 userID; + UINT32 effUserID; + UINT32 gid; + UINT32 effGid; + UINT32 groupNumber; + UINT32 groups[1]; +} User; +#endif + +typedef struct ProcessGroup { + UINTPTR pgroupLeader; /**< Process group leader is the the process that created the group */ + LOS_DL_LIST processList; /**< List of processes under this process group */ + LOS_DL_LIST exitProcessList; /**< List of closed processes (zombie processes) under this group */ + LOS_DL_LIST groupList; /**< Process group list */ +} ProcessGroup; + +typedef struct ProcessCB { + CHAR processName[OS_PCB_NAME_LEN]; /**< Process name */ + UINT32 processID; /**< Process ID */ + UINT16 processStatus; /**< [15:4] Process Status; [3:0] The number of threads currently + running in the process */ + UINT16 consoleID; /**< The console id of task belongs */ + UINT16 processMode; /**< Kernel Mode:0; User Mode:1; */ + struct ProcessCB *parentProcess; /**< Parent process */ + UINT32 exitCode; /**< Process exit status */ + LOS_DL_LIST pendList; /**< Block list to which the process belongs */ + LOS_DL_LIST childrenList; /**< Children process list */ + LOS_DL_LIST exitChildList; /**< Exit children process list */ + LOS_DL_LIST siblingList; /**< Linkage in parent's children list */ + ProcessGroup *pgroup; /**< Process group to which a process belongs */ + LOS_DL_LIST subordinateGroupList; /**< Linkage in group list */ + LosTaskCB *threadGroup; + LOS_DL_LIST threadSiblingList; /**< List of threads under this process */ + volatile UINT32 threadNumber; /**< Number of threads alive under this process */ + UINT32 threadCount; /**< Total number of threads created under this process */ + LOS_DL_LIST waitList; /**< The process holds the waitLits to support wait/waitpid */ +#ifdef LOSCFG_KERNEL_SMP + UINT32 timerCpu; /**< CPU core number of this task is delayed or pended */ +#endif + UINTPTR sigHandler; /**< Signal handler */ + sigset_t sigShare; /**< Signal share bit */ +#ifdef LOSCFG_KERNEL_LITEIPC + ProcIpcInfo *ipcInfo; /**< Memory pool for lite ipc */ +#endif +#ifdef LOSCFG_KERNEL_VM + LosVmSpace *vmSpace; /**< VMM space for processes */ +#endif +#ifdef LOSCFG_FS_VFS + struct files_struct *files; /**< Files held by the process */ +#endif + timer_t timerID; /**< ITimer */ + +#ifdef LOSCFG_SECURITY_CAPABILITY + User *user; + UINT32 capability; +#endif +#ifdef LOSCFG_SECURITY_VID + TimerIdMap timerIdMap; +#endif +#ifdef LOSCFG_DRIVERS_TZDRIVER + struct Vnode *execVnode; /**< Exec bin of the process */ +#endif + mode_t umask; +#ifdef LOSCFG_KERNEL_CPUP + OsCpupBase *processCpup; /**< Process cpu usage */ +#endif + struct rlimit *resourceLimit; +#ifdef LOSCFG_KERNEL_CONTAINER + Container *container; +#ifdef LOSCFG_USER_CONTAINER + struct Credentials *credentials; +#endif +#endif +#ifdef LOSCFG_PROC_PROCESS_DIR + struct ProcDirEntry *procDir; +======= /*! 用户描述体*/ typedef struct { UINT32 userID; ///<用户ID [0,60000],0为root用户 @@ -145,6 +227,7 @@ typedef struct ProcessCB { #endif #ifdef LOSCFG_PROC_PROCESS_DIR struct ProcDirEntry *procDir; ///< 目录文件项 +>>>>>>> remotes/origin/main #endif #ifdef LOSCFG_KERNEL_PLIMITS ProcLimiterSet *plimits; @@ -165,8 +248,13 @@ extern UINT32 g_processMaxNum; #define OS_PCB_FROM_TCB(taskCB) ((LosProcessCB *)((taskCB)->processCB)) #define OS_PCB_FROM_TID(taskID) ((LosProcessCB *)(OS_TCB_FROM_TID(taskID)->processCB)) #define OS_GET_PGROUP_LEADER(pgroup) ((LosProcessCB *)((pgroup)->pgroupLeader)) +<<<<<<< HEAD +#define OS_PCB_FROM_SIBLIST(ptr) LOS_DL_LIST_ENTRY((ptr), LosProcessCB, siblingList) +#define OS_PCB_FROM_PENDLIST(ptr) LOS_DL_LIST_ENTRY((ptr), LosProcessCB, pendList) +======= #define OS_PCB_FROM_SIBLIST(ptr) LOS_DL_LIST_ENTRY((ptr), LosProcessCB, siblingList)///< 通过siblingList节点找到 LosProcessCB #define OS_PCB_FROM_PENDLIST(ptr) LOS_DL_LIST_ENTRY((ptr), LosProcessCB, pendList) ///< 通过pendlist节点找到 LosProcessCB +>>>>>>> remotes/origin/main /** * @ingroup los_process @@ -206,7 +294,11 @@ extern UINT32 g_processMaxNum; * * The process is run out but the resources occupied by the process are not recovered. */ +<<<<<<< HEAD +#define OS_PROCESS_STATUS_ZOMBIES 0x0100U +======= #define OS_PROCESS_STATUS_ZOMBIES 0x0100U ///< 进程状态: 僵死 +>>>>>>> remotes/origin/main /** * @ingroup los_process @@ -215,7 +307,11 @@ extern UINT32 g_processMaxNum; * The process status equal this is process control block unused, * coexisting with OS_PROCESS_STATUS_ZOMBIES means that the control block is not recovered. */ +<<<<<<< HEAD +#define OS_PROCESS_FLAG_UNUSED 0x0200U +======= #define OS_PROCESS_FLAG_UNUSED 0x0200U ///< 进程未使用标签,一般用于进程的初始状态 freelist里面都是这种标签 +>>>>>>> remotes/origin/main /** * @ingroup los_process @@ -223,7 +319,11 @@ extern UINT32 g_processMaxNum; * * The process has been call exit, it only works with multiple cores. */ +<<<<<<< HEAD +#define OS_PROCESS_FLAG_EXIT 0x0400U +======= #define OS_PROCESS_FLAG_EXIT 0x0400U ///< 进程退出标签,退出的进程进入回收链表等待回收资源 +>>>>>>> remotes/origin/main /** * @ingroup los_process @@ -231,7 +331,11 @@ extern UINT32 g_processMaxNum; * * The process is the leader of the process group. */ +<<<<<<< HEAD +#define OS_PROCESS_FLAG_GROUP_LEADER 0x0800U +======= #define OS_PROCESS_FLAG_GROUP_LEADER 0x0800U ///< 进程当了进程组领导标签 +>>>>>>> remotes/origin/main /** * @ingroup los_process @@ -239,21 +343,34 @@ extern UINT32 g_processMaxNum; * * The process has performed the exec operation. */ +<<<<<<< HEAD +#define OS_PROCESS_FLAG_ALREADY_EXEC 0x1000U +======= #define OS_PROCESS_FLAG_ALREADY_EXEC 0x1000U ///< 进程已执行exec操作 load elf时使用 +>>>>>>> remotes/origin/main /** * @ingroup los_process * Flag that indicates the process or process control block status. * * The process is dying or already dying. +<<<<<<< HEAD + */ +#define OS_PROCESS_STATUS_INACTIVE (OS_PROCESS_FLAG_EXIT | OS_PROCESS_STATUS_ZOMBIES) +======= */ /// 进程不活跃状态定义: 身上贴有退出便签且状态为僵死的进程 #define OS_PROCESS_STATUS_INACTIVE (OS_PROCESS_FLAG_EXIT | OS_PROCESS_STATUS_ZOMBIES) +>>>>>>> remotes/origin/main /** * @ingroup los_process * Used to check if the process control block is unused. */ +<<<<<<< HEAD +STATIC INLINE BOOL OsProcessIsUnused(const LosProcessCB *processCB) +======= STATIC INLINE BOOL OsProcessIsUnused(const LosProcessCB *processCB)//查下进程是否还在使用? +>>>>>>> remotes/origin/main { return ((processCB->processStatus & OS_PROCESS_FLAG_UNUSED) != 0); } @@ -261,8 +378,13 @@ STATIC INLINE BOOL OsProcessIsUnused(const LosProcessCB *processCB)//查下进 /** * @ingroup los_process * Used to check if the process is inactive. +<<<<<<< HEAD + */ +STATIC INLINE BOOL OsProcessIsInactive(const LosProcessCB *processCB) +======= */ /// 进程不活跃函数定义:身上贴有不使用且不活跃标签的进程 STATIC INLINE BOOL OsProcessIsInactive(const LosProcessCB *processCB)//查下进程是否不活跃? +>>>>>>> remotes/origin/main { return ((processCB->processStatus & (OS_PROCESS_FLAG_UNUSED | OS_PROCESS_STATUS_INACTIVE)) != 0); } @@ -270,8 +392,13 @@ STATIC INLINE BOOL OsProcessIsInactive(const LosProcessCB *processCB)//查下进 /** * @ingroup los_process * Used to check if the process is dead. +<<<<<<< HEAD + */ +STATIC INLINE BOOL OsProcessIsDead(const LosProcessCB *processCB) +======= */ /// 进程死啦死啦的定义: 身上贴有不使用且状态为僵死的进程 STATIC INLINE BOOL OsProcessIsDead(const LosProcessCB *processCB)//查下进程是否死啦死啦滴? +>>>>>>> remotes/origin/main { return ((processCB->processStatus & OS_PROCESS_STATUS_ZOMBIES) != 0); } @@ -286,6 +413,62 @@ STATIC INLINE BOOL OsProcessIsPGroupLeader(const LosProcessCB *processCB) return ((processCB->processStatus & OS_PROCESS_FLAG_GROUP_LEADER) != 0); } +<<<<<<< HEAD +/** + * @ingroup los_process + * The highest priority of a kernel mode process. + */ +#define OS_PROCESS_PRIORITY_HIGHEST 0 + +/** + * @ingroup los_process + * The lowest priority of a kernel mode process + */ +#define OS_PROCESS_PRIORITY_LOWEST 31 + +/** + * @ingroup los_process + * The highest priority of a user mode process. + */ +#define OS_USER_PROCESS_PRIORITY_HIGHEST 10 + +/** + * @ingroup los_process + * The lowest priority of a user mode process + */ +#define OS_USER_PROCESS_PRIORITY_LOWEST OS_PROCESS_PRIORITY_LOWEST + +/** + * @ingroup los_process + * User state root process default priority + */ +#define OS_PROCESS_USERINIT_PRIORITY 28 + +/** + * @ingroup los_process + * ID of the kernel idle process + */ +#define OS_KERNEL_IDLE_PROCESS_ID 0U + +/** + * @ingroup los_process + * ID of the user root process + */ +#define OS_USER_ROOT_PROCESS_ID 1U + +/** + * @ingroup los_process + * ID of the kernel root process + */ +#define OS_KERNEL_ROOT_PROCESS_ID 2U + +#define OS_TASK_DEFAULT_STACK_SIZE 0x2000 +#define OS_USER_TASK_SYSCALL_STACK_SIZE 0x3000 +#define OS_USER_TASK_STACK_SIZE 0x100000 + +#define OS_KERNEL_MODE 0x0U +#define OS_USER_MODE 0x1U +======= #define OS_PROCESS_PRIORITY_HIGHEST 0 ///< 进程最高优先级 @@ -316,14 +499,21 @@ STATIC INLINE BOOL OsProcessIsPGroupLeader(const LosProcessCB *processCB) #define OS_KERNEL_MODE 0x0U ///< 内核态 #define OS_USER_MODE 0x1U ///< 用户态 /*! 用户态进程*/ +>>>>>>> remotes/origin/main STATIC INLINE BOOL OsProcessIsUserMode(const LosProcessCB *processCB) { return (processCB->processMode == OS_USER_MODE); } +<<<<<<< HEAD +#define LOS_PRIO_PROCESS 0U +#define LOS_PRIO_PGRP 1U +#define LOS_PRIO_USER 2U +======= #define LOS_PRIO_PROCESS 0U ///< 进程标识 #define LOS_PRIO_PGRP 1U ///< 进程组标识 #define LOS_PRIO_USER 2U ///< 用户标识 +>>>>>>> remotes/origin/main #define OS_USER_PRIVILEGE_PROCESS_GROUP ((UINTPTR)OsGetUserInitProcess()) #define OS_KERNEL_PROCESS_GROUP ((UINTPTR)OsGetKernelInitProcess()) @@ -333,6 +523,25 @@ STATIC INLINE BOOL OsProcessIsUserMode(const LosProcessCB *processCB) * 31 15 8 7 0 * | | exit code | core dump | signal | */ +<<<<<<< HEAD +#define OS_PRO_EXIT_OK 0 + +STATIC INLINE VOID OsProcessExitCodeCoreDumpSet(LosProcessCB *processCB) +{ + processCB->exitCode |= 0x80U; +} + +STATIC INLINE VOID OsProcessExitCodeSignalSet(LosProcessCB *processCB, UINT32 signal) +{ + processCB->exitCode |= signal & 0x7FU; +} + +STATIC INLINE VOID OsProcessExitCodeSignalClear(LosProcessCB *processCB) +{ + processCB->exitCode &= (~0x7FU); +} + +======= #define OS_PRO_EXIT_OK 0 ///< 进程正常退出 /// 置进程退出码第七位为1 STATIC INLINE VOID OsProcessExitCodeCoreDumpSet(LosProcessCB *processCB) @@ -350,23 +559,36 @@ STATIC INLINE VOID OsProcessExitCodeSignalClear(LosProcessCB *processCB) processCB->exitCode &= (~0x7FU);// 低7位全部清0 } /// 进程退出码是否被设置过,默认是 0 ,如果 & 0x7FU 还是 0 ,说明没有被设置过. +>>>>>>> remotes/origin/main STATIC INLINE BOOL OsProcessExitCodeSignalIsSet(LosProcessCB *processCB) { return (processCB->exitCode) & 0x7FU; } +<<<<<<< HEAD + +======= /// 设置进程退出号(8 ~ 15) +>>>>>>> remotes/origin/main STATIC INLINE VOID OsProcessExitCodeSet(LosProcessCB *processCB, UINT32 code) { processCB->exitCode |= ((code & 0x000000FFU) << 8U) & 0x0000FF00U; /* 8: Move 8 bits to the left, exitCode */ } #define OS_PID_CHECK_INVALID(pid) (((UINT32)(pid)) >= g_processMaxNum) +<<<<<<< HEAD + +======= /*! 内联函数 进程ID是否有效 */ +>>>>>>> remotes/origin/main STATIC INLINE BOOL OsProcessIDUserCheckInvalid(UINT32 pid) { return ((pid >= g_processMaxNum) || (pid == 0)); } +<<<<<<< HEAD + +======= /*! 获取当前进程PCB */ +>>>>>>> remotes/origin/main STATIC INLINE LosProcessCB *OsCurrProcessGet(VOID) { UINT32 intSave; @@ -378,7 +600,10 @@ STATIC INLINE LosProcessCB *OsCurrProcessGet(VOID) } #ifdef LOSCFG_SECURITY_CAPABILITY +<<<<<<< HEAD +======= /*! 获取当前进程的所属用户 */ +>>>>>>> remotes/origin/main STATIC INLINE User *OsCurrUserGet(VOID) { User *user = NULL; @@ -450,14 +675,22 @@ STATIC INLINE UINT32 OsGetRootPid(const LosProcessCB *processCB) /* * return immediately if no child has exited. */ +<<<<<<< HEAD +#define LOS_WAIT_WNOHANG (1 << 0U) +======= #define LOS_WAIT_WNOHANG (1 << 0U) ///< 如果没有孩子进程退出,则立即返回,而不是阻塞在这个函数上等待;如果结束了,则返回该子进程的进程号。 +>>>>>>> remotes/origin/main /* * return if a child has stopped (but not traced via ptrace(2)). * Status for traced children which have stopped is provided even * if this option is not specified. */ +<<<<<<< HEAD +#define LOS_WAIT_WUNTRACED (1 << 1U) +======= #define LOS_WAIT_WUNTRACED (1 << 1U) ///< 如果子进程进入暂停情况则马上返回,不予以理会结束状态。untraced +>>>>>>> remotes/origin/main #define LOS_WAIT_WSTOPPED (1 << 1U) /* @@ -469,7 +702,11 @@ STATIC INLINE UINT32 OsGetRootPid(const LosProcessCB *processCB) * return if a stopped child has been resumed by delivery of SIGCONT. * (For Linux-only options, see below.) */ +<<<<<<< HEAD +#define LOS_WAIT_WCONTINUED (1 << 3U) +======= #define LOS_WAIT_WCONTINUED (1 << 3U) ///< 可获取子进程恢复执行的状态,也就是可获取continued状态 continued +>>>>>>> remotes/origin/main /* * Leave the child in a waitable state; @@ -480,21 +717,44 @@ STATIC INLINE UINT32 OsGetRootPid(const LosProcessCB *processCB) /* * Indicates that you are already in a wait state */ +<<<<<<< HEAD +#define OS_PROCESS_WAIT (1 << 15U) +======= #define OS_PROCESS_WAIT (1 << 15U) ///< 表示已经处于等待状态 +>>>>>>> remotes/origin/main /* * Wait for any child process to finish */ +<<<<<<< HEAD +#define OS_PROCESS_WAIT_ANY OS_TASK_WAIT_ANYPROCESS +======= #define OS_PROCESS_WAIT_ANY OS_TASK_WAIT_ANYPROCESS ///< 等待任意子进程完成 +>>>>>>> remotes/origin/main /* * Wait for the child process specified by the pid to finish */ +<<<<<<< HEAD +#define OS_PROCESS_WAIT_PRO OS_TASK_WAIT_PROCESS +======= #define OS_PROCESS_WAIT_PRO OS_TASK_WAIT_PROCESS ///< 等待pid指定的子进程完成 +>>>>>>> remotes/origin/main /* * Waits for any child process in the specified process group to finish. */ +<<<<<<< HEAD +#define OS_PROCESS_WAIT_GID OS_TASK_WAIT_GID + +#define OS_PROCESS_INFO_ALL 1 +#define OS_PROCESS_DEFAULT_UMASK 0022 + +extern UINTPTR __user_init_entry; +extern UINTPTR __user_init_bss; +extern UINTPTR __user_init_end; +extern UINTPTR __user_init_load_addr; +======= #define OS_PROCESS_WAIT_GID OS_TASK_WAIT_GID ///< 等待指定进程组中的任意子进程完成 #define OS_PROCESS_INFO_ALL 1 @@ -504,6 +764,7 @@ extern UINTPTR __user_init_entry; ///< 第一个用户态进程(init)的入口 extern UINTPTR __user_init_bss; ///< 查看 LITE_USER_SEC_BSS ,赋值由liteos.ld完成 extern UINTPTR __user_init_end; ///< init 进程的用户空间初始化结束地址 extern UINTPTR __user_init_load_addr;///< init 进程的加载地址 ,由链接器赋值 +>>>>>>> remotes/origin/main extern UINT32 OsProcessInit(VOID); extern UINT32 OsSystemProcessCreate(VOID); extern VOID OsProcessNaturalExit(LosProcessCB *processCB, UINT32 status); diff --git a/src/kernel_liteos_a/kernel/base/ipc/los_event.c b/src/kernel_liteos_a/kernel/base/ipc/los_event.c index ccea383b..42a02f6f 100644 --- a/src/kernel_liteos_a/kernel/base/ipc/los_event.c +++ b/src/kernel_liteos_a/kernel/base/ipc/los_event.c @@ -1,6 +1,10 @@ /* * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved. +<<<<<<< HEAD + * Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved. +======= * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved. +>>>>>>> remotes/origin/main * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: @@ -40,7 +44,10 @@ #include "los_exc.h" #endif +<<<<<<< HEAD +======= /// 初始化一个事件控制块 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_INIT UINT32 LOS_EventInit(PEVENT_CB_S eventCB) { UINT32 intSave; @@ -49,6 +56,16 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_EventInit(PEVENT_CB_S eventCB) return LOS_ERRNO_EVENT_PTR_NULL; } +<<<<<<< HEAD + intSave = LOS_IntLock(); + eventCB->uwEventID = 0; + LOS_ListInit(&eventCB->stEventList); + LOS_IntRestore(intSave); + OsHookCall(LOS_HOOK_TYPE_EVENT_INIT, eventCB); + return LOS_OK; +} + +======= intSave = LOS_IntLock();//锁中断 eventCB->uwEventID = 0;//事件类型初始化 LOS_ListInit(&eventCB->stEventList);//事件链表初始化 @@ -57,6 +74,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_EventInit(PEVENT_CB_S eventCB) return LOS_OK; } ///事件参数检查 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT STATIC UINT32 OsEventParamCheck(const VOID *ptr, UINT32 eventMask, UINT32 mode) { if (ptr == NULL) { @@ -78,11 +96,26 @@ LITE_OS_SEC_TEXT STATIC UINT32 OsEventParamCheck(const VOID *ptr, UINT32 eventMa } return LOS_OK; } +<<<<<<< HEAD + +======= ///根据用户传入的事件值、事件掩码及校验模式,返回用户传入的事件是否符合预期 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 OsEventPoll(UINT32 *eventID, UINT32 eventMask, UINT32 mode) { UINT32 ret = 0; +<<<<<<< HEAD + LOS_ASSERT(OsIntLocked()); + LOS_ASSERT(LOS_SpinHeld(&g_taskSpin)); + + if (mode & LOS_WAITMODE_OR) { + if ((*eventID & eventMask) != 0) { + ret = *eventID & eventMask; + } + } else { + if ((eventMask != 0) && (eventMask == (*eventID & eventMask))) { +======= LOS_ASSERT(OsIntLocked());//断言不允许中断了 LOS_ASSERT(LOS_SpinHeld(&g_taskSpin));//任务自旋锁 @@ -92,27 +125,54 @@ LITE_OS_SEC_TEXT UINT32 OsEventPoll(UINT32 *eventID, UINT32 eventMask, UINT32 mo } } else {//等待全部事件发生 if ((eventMask != 0) && (eventMask == (*eventID & eventMask))) {//必须满足全部事件发生 +>>>>>>> remotes/origin/main ret = *eventID & eventMask; } } +<<<<<<< HEAD + if (ret && (mode & LOS_WAITMODE_CLR)) { +======= if (ret && (mode & LOS_WAITMODE_CLR)) {//读取完成后清除事件 +>>>>>>> remotes/origin/main *eventID = *eventID & ~ret; } return ret; } +<<<<<<< HEAD + +======= ///检查读事件 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT STATIC UINT32 OsEventReadCheck(const PEVENT_CB_S eventCB, UINT32 eventMask, UINT32 mode) { UINT32 ret; LosTaskCB *runTask = NULL; +<<<<<<< HEAD + ret = OsEventParamCheck(eventCB, eventMask, mode); +======= ret = OsEventParamCheck(eventCB, eventMask, mode);//事件参数检查 +>>>>>>> remotes/origin/main if (ret != LOS_OK) { return ret; } +<<<<<<< HEAD + if (OS_INT_ACTIVE) { + return LOS_ERRNO_EVENT_READ_IN_INTERRUPT; + } + + runTask = OsCurrTaskGet(); + if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) { + OsBackTrace(); + return LOS_ERRNO_EVENT_READ_IN_SYSTEM_TASK; + } + return LOS_OK; +} + +======= if (OS_INT_ACTIVE) {//中断正在进行 return LOS_ERRNO_EVENT_READ_IN_INTERRUPT;//不能在中断发送时读事件 } @@ -125,6 +185,7 @@ LITE_OS_SEC_TEXT STATIC UINT32 OsEventReadCheck(const PEVENT_CB_S eventCB, UINT3 return LOS_OK; } /// 读取指定事件类型的实现函数,超时时间为相对时间:单位为Tick +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT STATIC UINT32 OsEventReadImp(PEVENT_CB_S eventCB, UINT32 eventMask, UINT32 mode, UINT32 timeout, BOOL once) { @@ -133,6 +194,24 @@ LITE_OS_SEC_TEXT STATIC UINT32 OsEventReadImp(PEVENT_CB_S eventCB, UINT32 eventM OsHookCall(LOS_HOOK_TYPE_EVENT_READ, eventCB, eventMask, mode, timeout); if (once == FALSE) { +<<<<<<< HEAD + ret = OsEventPoll(&eventCB->uwEventID, eventMask, mode); + } + + if (ret == 0) { + if (timeout == 0) { + return ret; + } + + if (!OsPreemptableInSched()) { + return LOS_ERRNO_EVENT_READ_IN_LOCK; + } + + runTask->eventMask = eventMask; + runTask->eventMode = mode; + runTask->taskEvent = eventCB; + OsTaskWaitSetPendMask(OS_TASK_WAIT_EVENT, eventMask, timeout); +======= ret = OsEventPoll(&eventCB->uwEventID, eventMask, mode);//检测事件是否符合预期 } @@ -149,28 +228,56 @@ LITE_OS_SEC_TEXT STATIC UINT32 OsEventReadImp(PEVENT_CB_S eventCB, UINT32 eventM runTask->eventMode = mode; //事件模式 runTask->taskEvent = eventCB; //事件控制块 OsTaskWaitSetPendMask(OS_TASK_WAIT_EVENT, eventMask, timeout);//任务进入等待状态,等待事件的到来并设置时长和掩码 +>>>>>>> remotes/origin/main ret = runTask->ops->wait(runTask, &eventCB->stEventList, timeout); if (ret == LOS_ERRNO_TSK_TIMEOUT) { return LOS_ERRNO_EVENT_READ_TIMEOUT; } +<<<<<<< HEAD + ret = OsEventPoll(&eventCB->uwEventID, eventMask, mode); + } + return ret; +} + +======= ret = OsEventPoll(&eventCB->uwEventID, eventMask, mode);//检测事件是否符合预期 } return ret; } ///读取指定事件类型,超时时间为相对时间:单位为Tick +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT STATIC UINT32 OsEventRead(PEVENT_CB_S eventCB, UINT32 eventMask, UINT32 mode, UINT32 timeout, BOOL once) { UINT32 ret; UINT32 intSave; +<<<<<<< HEAD + ret = OsEventReadCheck(eventCB, eventMask, mode); +======= ret = OsEventReadCheck(eventCB, eventMask, mode);//读取事件检查 +>>>>>>> remotes/origin/main if (ret != LOS_OK) { return ret; } SCHEDULER_LOCK(intSave); +<<<<<<< HEAD + ret = OsEventReadImp(eventCB, eventMask, mode, timeout, once); + SCHEDULER_UNLOCK(intSave); + return ret; +} + +LITE_OS_SEC_TEXT STATIC UINT8 OsEventResume(LosTaskCB *resumedTask, const PEVENT_CB_S eventCB, UINT32 events) +{ + UINT8 exitFlag = 0; + + if (((resumedTask->eventMode & LOS_WAITMODE_OR) && ((resumedTask->eventMask & events) != 0)) || + ((resumedTask->eventMode & LOS_WAITMODE_AND) && + ((resumedTask->eventMask & eventCB->uwEventID) == resumedTask->eventMask))) { + exitFlag = 1; +======= ret = OsEventReadImp(eventCB, eventMask, mode, timeout, once);//读事件实现函数 SCHEDULER_UNLOCK(intSave); return ret; @@ -184,6 +291,7 @@ LITE_OS_SEC_TEXT STATIC UINT8 OsEventResume(LosTaskCB *resumedTask, const PEVENT ((resumedTask->eventMode & LOS_WAITMODE_AND) && ((resumedTask->eventMask & eventCB->uwEventID) == resumedTask->eventMask))) {//逻辑与 和 逻辑或 的处理 exitFlag = 1; +>>>>>>> remotes/origin/main resumedTask->taskEvent = NULL; OsTaskWakeClearPendMask(resumedTask); @@ -192,13 +300,39 @@ LITE_OS_SEC_TEXT STATIC UINT8 OsEventResume(LosTaskCB *resumedTask, const PEVENT return exitFlag; } +<<<<<<< HEAD + +======= ///以不安全的方式写事件 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT VOID OsEventWriteUnsafe(PEVENT_CB_S eventCB, UINT32 events, BOOL once, UINT8 *exitFlag) { LosTaskCB *resumedTask = NULL; LosTaskCB *nextTask = NULL; BOOL schedFlag = FALSE; OsHookCall(LOS_HOOK_TYPE_EVENT_WRITE, eventCB, events); +<<<<<<< HEAD + eventCB->uwEventID |= events; + if (!LOS_ListEmpty(&eventCB->stEventList)) { + for (resumedTask = LOS_DL_LIST_ENTRY((&eventCB->stEventList)->pstNext, LosTaskCB, pendList); + &resumedTask->pendList != &eventCB->stEventList;) { + nextTask = LOS_DL_LIST_ENTRY(resumedTask->pendList.pstNext, LosTaskCB, pendList); + if (OsEventResume(resumedTask, eventCB, events)) { + schedFlag = TRUE; + } + if (once == TRUE) { + break; + } + resumedTask = nextTask; + } + } + + if ((exitFlag != NULL) && (schedFlag == TRUE)) { + *exitFlag = 1; + } +} + +======= eventCB->uwEventID |= events;//对应位贴上标签 if (!LOS_ListEmpty(&eventCB->stEventList)) {//等待事件链表判断,处理等待事件的任务 for (resumedTask = LOS_DL_LIST_ENTRY((&eventCB->stEventList)->pstNext, LosTaskCB, pendList); @@ -219,6 +353,7 @@ LITE_OS_SEC_TEXT VOID OsEventWriteUnsafe(PEVENT_CB_S eventCB, UINT32 events, BOO } } ///写入事件 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT STATIC UINT32 OsEventWrite(PEVENT_CB_S eventCB, UINT32 events, BOOL once) { UINT32 intSave; @@ -232,6 +367,19 @@ LITE_OS_SEC_TEXT STATIC UINT32 OsEventWrite(PEVENT_CB_S eventCB, UINT32 events, return LOS_ERRNO_EVENT_SETBIT_INVALID; } +<<<<<<< HEAD + SCHEDULER_LOCK(intSave); + OsEventWriteUnsafe(eventCB, events, once, &exitFlag); + SCHEDULER_UNLOCK(intSave); + + if (exitFlag == 1) { + LOS_MpSchedule(OS_MP_CPU_ALL); + LOS_Schedule(); + } + return LOS_OK; +} + +======= SCHEDULER_LOCK(intSave); //禁止调度 OsEventWriteUnsafe(eventCB, events, once, &exitFlag);//写入事件 SCHEDULER_UNLOCK(intSave); //允许调度 @@ -243,43 +391,72 @@ LITE_OS_SEC_TEXT STATIC UINT32 OsEventWrite(PEVENT_CB_S eventCB, UINT32 events, return LOS_OK; } ///根据用户传入的事件值、事件掩码及校验模式,返回用户传入的事件是否符合预期 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_EventPoll(UINT32 *eventID, UINT32 eventMask, UINT32 mode) { UINT32 ret; UINT32 intSave; +<<<<<<< HEAD + +======= //事件参数检查 +>>>>>>> remotes/origin/main ret = OsEventParamCheck((VOID *)eventID, eventMask, mode); if (ret != LOS_OK) { return ret; } +<<<<<<< HEAD + SCHEDULER_LOCK(intSave); +======= SCHEDULER_LOCK(intSave);//申请任务自旋锁 +>>>>>>> remotes/origin/main ret = OsEventPoll(eventID, eventMask, mode); SCHEDULER_UNLOCK(intSave); return ret; } +<<<<<<< HEAD + +======= ///读取指定事件类型,超时时间为相对时间:单位为Tick +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_EventRead(PEVENT_CB_S eventCB, UINT32 eventMask, UINT32 mode, UINT32 timeout) { return OsEventRead(eventCB, eventMask, mode, timeout, FALSE); } +<<<<<<< HEAD + +======= ///写指定的事件类型 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_EventWrite(PEVENT_CB_S eventCB, UINT32 events) { return OsEventWrite(eventCB, events, FALSE); } +<<<<<<< HEAD + +======= ///只读一次事件 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR UINT32 OsEventReadOnce(PEVENT_CB_S eventCB, UINT32 eventMask, UINT32 mode, UINT32 timeout) { return OsEventRead(eventCB, eventMask, mode, timeout, TRUE); } +<<<<<<< HEAD + +======= ///只写一次事件 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR UINT32 OsEventWriteOnce(PEVENT_CB_S eventCB, UINT32 events) { return OsEventWrite(eventCB, events, TRUE); } +<<<<<<< HEAD + +======= ///销毁指定的事件控制块 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_INIT UINT32 LOS_EventDestroy(PEVENT_CB_S eventCB) { UINT32 intSave; @@ -300,7 +477,11 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_EventDestroy(PEVENT_CB_S eventCB) OsHookCall(LOS_HOOK_TYPE_EVENT_DESTROY, eventCB); return LOS_OK; } +<<<<<<< HEAD + +======= ///清除指定的事件类型 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR UINT32 LOS_EventClear(PEVENT_CB_S eventCB, UINT32 eventMask) { UINT32 intSave; @@ -315,7 +496,11 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_EventClear(PEVENT_CB_S eventCB, UINT32 eventMa return LOS_OK; } +<<<<<<< HEAD + +======= ///有条件式读事件 +>>>>>>> remotes/origin/main #ifdef LOSCFG_COMPAT_POSIX LITE_OS_SEC_TEXT UINT32 OsEventReadWithCond(const EventCond *cond, PEVENT_CB_S eventCB, UINT32 eventMask, UINT32 mode, UINT32 timeout) diff --git a/src/kernel_liteos_a/kernel/base/ipc/los_futex.c b/src/kernel_liteos_a/kernel/base/ipc/los_futex.c index ea69c554..ce2f7288 100644 --- a/src/kernel_liteos_a/kernel/base/ipc/los_futex.c +++ b/src/kernel_liteos_a/kernel/base/ipc/los_futex.c @@ -1,6 +1,10 @@ /* * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved. +<<<<<<< HEAD + * Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved. +======= * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved. +>>>>>>> remotes/origin/main * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: @@ -43,6 +47,30 @@ #ifdef LOSCFG_KERNEL_VM +<<<<<<< HEAD +#define OS_FUTEX_FROM_FUTEXLIST(ptr) LOS_DL_LIST_ENTRY(ptr, FutexNode, futexList) +#define OS_FUTEX_FROM_QUEUELIST(ptr) LOS_DL_LIST_ENTRY(ptr, FutexNode, queueList) +#define OS_FUTEX_KEY_BASE USER_ASPACE_BASE +#define OS_FUTEX_KEY_MAX (USER_ASPACE_BASE + USER_ASPACE_SIZE) + +/* private: 0~63 hash index_num + * shared: 64~79 hash index_num */ +#define FUTEX_INDEX_PRIVATE_MAX 64 +#define FUTEX_INDEX_SHARED_MAX 16 +#define FUTEX_INDEX_MAX (FUTEX_INDEX_PRIVATE_MAX + FUTEX_INDEX_SHARED_MAX) + +#define FUTEX_INDEX_SHARED_POS FUTEX_INDEX_PRIVATE_MAX +#define FUTEX_HASH_PRIVATE_MASK (FUTEX_INDEX_PRIVATE_MAX - 1) +#define FUTEX_HASH_SHARED_MASK (FUTEX_INDEX_SHARED_MAX - 1) + +typedef struct { + LosMux listLock; + LOS_DL_LIST lockList; +} FutexHash; + +FutexHash g_futexHash[FUTEX_INDEX_MAX]; + +======= #define OS_FUTEX_FROM_FUTEXLIST(ptr) LOS_DL_LIST_ENTRY(ptr, FutexNode, futexList) // 通过快锁节点找到结构体 #define OS_FUTEX_FROM_QUEUELIST(ptr) LOS_DL_LIST_ENTRY(ptr, FutexNode, queueList) // 通过队列节点找到结构体 #define OS_FUTEX_KEY_BASE USER_ASPACE_BASE ///< 进程用户空间基址 @@ -67,6 +95,7 @@ typedef struct { FutexHash g_futexHash[FUTEX_INDEX_MAX];///< 80个哈希桶 /// 对互斥锁封装 +>>>>>>> remotes/origin/main STATIC INT32 OsFutexLock(LosMux *lock) { UINT32 ret = LOS_MuxLock(lock, LOS_WAIT_FOREVER); @@ -86,15 +115,26 @@ STATIC INT32 OsFutexUnlock(LosMux *lock) } return LOS_OK; } +<<<<<<< HEAD + +======= ///< 初始化Futex(Fast userspace mutex,用户态快速互斥锁)模块 +>>>>>>> remotes/origin/main UINT32 OsFutexInit(VOID) { INT32 count; UINT32 ret; +<<<<<<< HEAD + + for (count = 0; count < FUTEX_INDEX_MAX; count++) { + LOS_ListInit(&g_futexHash[count].lockList); + ret = LOS_MuxInit(&(g_futexHash[count].listLock), NULL); +======= // 初始化 80个哈希桶 for (count = 0; count < FUTEX_INDEX_MAX; count++) { LOS_ListInit(&g_futexHash[count].lockList); // 初始化双向链表,上面挂 FutexNode ret = LOS_MuxInit(&(g_futexHash[count].listLock), NULL);//初始化互斥锁 +>>>>>>> remotes/origin/main if (ret) { return ret; } @@ -103,7 +143,11 @@ UINT32 OsFutexInit(VOID) return LOS_OK; } +<<<<<<< HEAD +LOS_MODULE_INIT(OsFutexInit, LOS_INIT_LEVEL_KMOD_EXTENDED); +======= LOS_MODULE_INIT(OsFutexInit, LOS_INIT_LEVEL_KMOD_EXTENDED);///< 注册Futex模块 +>>>>>>> remotes/origin/main #ifdef LOS_FUTEX_DEBUG STATIC VOID OsFutexShowTaskNodeAttr(const LOS_DL_LIST *futexList) @@ -154,19 +198,41 @@ VOID OsFutexHashShow(VOID) } } #endif +<<<<<<< HEAD + +======= /// 通过用户空间地址获取哈希key +>>>>>>> remotes/origin/main STATIC INLINE UINTPTR OsFutexFlagsToKey(const UINT32 *userVaddr, const UINT32 flags) { UINTPTR futexKey; if (flags & FUTEX_PRIVATE) { +<<<<<<< HEAD + futexKey = (UINTPTR)userVaddr; + } else { + futexKey = (UINTPTR)LOS_PaddrQuery((UINT32 *)userVaddr); +======= futexKey = (UINTPTR)userVaddr;//私有锁(以虚拟地址进行哈希) } else { futexKey = (UINTPTR)LOS_PaddrQuery((UINT32 *)userVaddr);//共享锁(以物理地址进行哈希) +>>>>>>> remotes/origin/main } return futexKey; } +<<<<<<< HEAD + +STATIC INLINE UINT32 OsFutexKeyToIndex(const UINTPTR futexKey, const UINT32 flags) +{ + UINT32 index = LOS_HashFNV32aBuf(&futexKey, sizeof(UINTPTR), FNV1_32A_INIT); + + if (flags & FUTEX_PRIVATE) { + index &= FUTEX_HASH_PRIVATE_MASK; + } else { + index &= FUTEX_HASH_SHARED_MASK; + index += FUTEX_INDEX_SHARED_POS; +======= /// 通过哈希key获取索引 STATIC INLINE UINT32 OsFutexKeyToIndex(const UINTPTR futexKey, const UINT32 flags) { @@ -177,10 +243,21 @@ STATIC INLINE UINT32 OsFutexKeyToIndex(const UINTPTR futexKey, const UINT32 flag } else { index &= FUTEX_HASH_SHARED_MASK; index += FUTEX_INDEX_SHARED_POS;//共享锁索引,将index锁定在 64 ~ 79号 +>>>>>>> remotes/origin/main } return index; } +<<<<<<< HEAD + +STATIC INLINE VOID OsFutexSetKey(UINTPTR futexKey, UINT32 flags, FutexNode *node) +{ + node->key = futexKey; + node->index = OsFutexKeyToIndex(futexKey, flags); + node->pid = (flags & FUTEX_PRIVATE) ? LOS_GetCurrProcessID() : OS_INVALID; +} + +======= /// 设置快锁哈希key STATIC INLINE VOID OsFutexSetKey(UINTPTR futexKey, UINT32 flags, FutexNode *node) { @@ -189,12 +266,26 @@ STATIC INLINE VOID OsFutexSetKey(UINTPTR futexKey, UINT32 flags, FutexNode *node node->pid = (flags & FUTEX_PRIVATE) ? LOS_GetCurrProcessID() : OS_INVALID;//获取进程ID,共享快锁时 快锁节点没有进程ID } //析构参数节点 +>>>>>>> remotes/origin/main STATIC INLINE VOID OsFutexDeinitFutexNode(FutexNode *node) { node->index = OS_INVALID_VALUE; node->pid = 0; LOS_ListDelete(&node->queueList); } +<<<<<<< HEAD + +STATIC INLINE VOID OsFutexReplaceQueueListHeadNode(FutexNode *oldHeadNode, FutexNode *newHeadNode) +{ + LOS_DL_LIST *futexList = oldHeadNode->futexList.pstPrev; + LOS_ListDelete(&oldHeadNode->futexList); + LOS_ListHeadInsert(futexList, &newHeadNode->futexList); + if ((newHeadNode->queueList.pstNext == NULL) || (newHeadNode->queueList.pstPrev == NULL)) { + LOS_ListInit(&newHeadNode->queueList); + } +} + +======= /// 新旧两个节点交换 futexList 位置 STATIC INLINE VOID OsFutexReplaceQueueListHeadNode(FutexNode *oldHeadNode, FutexNode *newHeadNode) { @@ -206,11 +297,16 @@ STATIC INLINE VOID OsFutexReplaceQueueListHeadNode(FutexNode *oldHeadNode, Futex } } /// 将参数节点从futexList上摘除 +>>>>>>> remotes/origin/main STATIC INLINE VOID OsFutexDeleteKeyFromFutexList(FutexNode *node) { LOS_ListDelete(&node->futexList); } +<<<<<<< HEAD + +======= /// 从哈希桶中删除快锁节点 +>>>>>>> remotes/origin/main STATIC VOID OsFutexDeleteKeyNodeFromHash(FutexNode *node, BOOL isDeleteHead, FutexNode **headNode, BOOL *queueFlags) { FutexNode *nextNode = NULL; @@ -219,8 +315,13 @@ STATIC VOID OsFutexDeleteKeyNodeFromHash(FutexNode *node, BOOL isDeleteHead, Fut return; } +<<<<<<< HEAD + if (LOS_ListEmpty(&node->queueList)) { + OsFutexDeleteKeyFromFutexList(node); +======= if (LOS_ListEmpty(&node->queueList)) {//如果没有任务在等锁 OsFutexDeleteKeyFromFutexList(node);//从快锁链表上摘除 +>>>>>>> remotes/origin/main if (queueFlags != NULL) { *queueFlags = TRUE; } @@ -228,10 +329,17 @@ STATIC VOID OsFutexDeleteKeyNodeFromHash(FutexNode *node, BOOL isDeleteHead, Fut } /* FutexList is not NULL, but the header node of queueList */ +<<<<<<< HEAD + if (node->futexList.pstNext != NULL) { + if (isDeleteHead == TRUE) { + nextNode = OS_FUTEX_FROM_QUEUELIST(LOS_DL_LIST_FIRST(&node->queueList)); + OsFutexReplaceQueueListHeadNode(node, nextNode); +======= if (node->futexList.pstNext != NULL) {//是头节点 if (isDeleteHead == TRUE) {//是否要删除头节点 nextNode = OS_FUTEX_FROM_QUEUELIST(LOS_DL_LIST_FIRST(&node->queueList));//取出第一个快锁节点 OsFutexReplaceQueueListHeadNode(node, nextNode);//两个节点交换位置 +>>>>>>> remotes/origin/main if (headNode != NULL) { *headNode = nextNode; } @@ -244,22 +352,38 @@ EXIT: OsFutexDeinitFutexNode(node); return; } +<<<<<<< HEAD + +VOID OsFutexNodeDeleteFromFutexHash(FutexNode *node, BOOL isDeleteHead, FutexNode **headNode, BOOL *queueFlags) +{ + FutexHash *hashNode = NULL; + +======= /// 从哈希桶上删除快锁 VOID OsFutexNodeDeleteFromFutexHash(FutexNode *node, BOOL isDeleteHead, FutexNode **headNode, BOOL *queueFlags) { FutexHash *hashNode = NULL; //通过key找到桶号 +>>>>>>> remotes/origin/main UINT32 index = OsFutexKeyToIndex(node->key, (node->pid == OS_INVALID) ? 0 : FUTEX_PRIVATE); if (index >= FUTEX_INDEX_MAX) { return; } +<<<<<<< HEAD + hashNode = &g_futexHash[index]; +======= hashNode = &g_futexHash[index];//找到hash桶 +>>>>>>> remotes/origin/main if (OsMuxLockUnsafe(&hashNode->listLock, LOS_WAIT_FOREVER)) { return; } +<<<<<<< HEAD + if (node->index != index) { +======= if (node->index != index) {//快锁节点桶号需和哈希桶号一致 +>>>>>>> remotes/origin/main goto EXIT; } @@ -272,6 +396,10 @@ EXIT: return; } +<<<<<<< HEAD + +======= +>>>>>>> remotes/origin/main STATIC FutexNode *OsFutexDeleteAlreadyWakeTaskAndGetNext(const FutexNode *node, FutexNode **headNode, BOOL isDeleteHead) { FutexNode *tempNode = (FutexNode *)node; @@ -293,7 +421,11 @@ STATIC FutexNode *OsFutexDeleteAlreadyWakeTaskAndGetNext(const FutexNode *node, return tempNode; } +<<<<<<< HEAD + +======= /// 插入一把新Futex锁到哈希桶中,只有是新的key时才会插入,因为其实存在多个FutexNode是一个key +>>>>>>> remotes/origin/main STATIC VOID OsFutexInsertNewFutexKeyToHash(FutexNode *node) { FutexNode *headNode = NULL; @@ -323,16 +455,27 @@ STATIC VOID OsFutexInsertNewFutexKeyToHash(FutexNode *node) futexList != &(hashNode->lockList); futexList = futexList->pstNext) { headNode = OS_FUTEX_FROM_FUTEXLIST(futexList); +<<<<<<< HEAD + if (node->key <= headNode->key) { + LOS_ListTailInsert(&(headNode->futexList), &(node->futexList)); + break; + } +======= if (node->key <= headNode->key) { LOS_ListTailInsert(&(headNode->futexList), &(node->futexList)); break; } +>>>>>>> remotes/origin/main } EXIT: return; } +<<<<<<< HEAD + +======= +>>>>>>> remotes/origin/main STATIC INT32 OsFutexInsertFindFormBackToFront(LOS_DL_LIST *queueList, const LosTaskCB *runTask, FutexNode *node) { LOS_DL_LIST *listHead = queueList; @@ -408,15 +551,49 @@ STATIC INT32 OsFutexRecycleAndFindHeadNode(FutexNode *headNode, FutexNode *node, return LOS_OK; } +<<<<<<< HEAD + +STATIC INT32 OsFutexInsertTasktoPendList(FutexNode **firstNode, FutexNode *node, const LosTaskCB *run) +{ + LosTaskCB *taskHead = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&((*firstNode)->pendList))); +======= ///< 将快锁挂到任务的阻塞链表上 STATIC INT32 OsFutexInsertTasktoPendList(FutexNode **firstNode, FutexNode *node, const LosTaskCB *run) { LosTaskCB *taskHead = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&((*firstNode)->pendList)));//获取阻塞链表首个任务 +>>>>>>> remotes/origin/main LOS_DL_LIST *queueList = &((*firstNode)->queueList); INT32 ret1 = OsSchedParamCompare(run, taskHead); if (ret1 < 0) { /* The one with the highest priority is inserted at the top of the queue */ +<<<<<<< HEAD + LOS_ListTailInsert(queueList, &(node->queueList)); + OsFutexReplaceQueueListHeadNode(*firstNode, node); + *firstNode = node; + return LOS_OK; + } + + if (LOS_ListEmpty(queueList) && (ret1 >= 0)) { + /* Insert the next position in the queue with equal priority */ + LOS_ListHeadInsert(queueList, &(node->queueList)); + return LOS_OK; + } + + FutexNode *tailNode = OS_FUTEX_FROM_QUEUELIST(LOS_DL_LIST_LAST(queueList)); + LosTaskCB *taskTail = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(tailNode->pendList))); + INT32 ret2 = OsSchedParamCompare(taskTail, run); + if ((ret2 <= 0) || (ret1 > ret2)) { + return OsFutexInsertFindFormBackToFront(queueList, run, node); + } + + return OsFutexInsertFindFromFrontToBack(queueList, run, node); +} + +STATIC FutexNode *OsFindFutexNode(const FutexNode *node) +{ + FutexHash *hashNode = &g_futexHash[node->index]; +======= LOS_ListTailInsert(queueList, &(node->queueList));//查到queueList的尾部 OsFutexReplaceQueueListHeadNode(*firstNode, node);//同时交换futexList链表上的位置 *firstNode = node; @@ -442,20 +619,34 @@ STATIC INT32 OsFutexInsertTasktoPendList(FutexNode **firstNode, FutexNode *node, STATIC FutexNode *OsFindFutexNode(const FutexNode *node) { FutexHash *hashNode = &g_futexHash[node->index];//先找到所在哈希桶 +>>>>>>> remotes/origin/main LOS_DL_LIST *futexList = &(hashNode->lockList); FutexNode *headNode = NULL; for (futexList = futexList->pstNext; +<<<<<<< HEAD + futexList != &(hashNode->lockList); + futexList = futexList->pstNext) { + headNode = OS_FUTEX_FROM_FUTEXLIST(futexList); + if ((headNode->key == node->key) && (headNode->pid == node->pid)) { + return headNode; + } +======= futexList != &(hashNode->lockList);//判断循环结束条件,相等时说明跑完一轮了 futexList = futexList->pstNext) { headNode = OS_FUTEX_FROM_FUTEXLIST(futexList);//拿到快锁节点实体 if ((headNode->key == node->key) && (headNode->pid == node->pid)) {//已经存在这个节点,注意这里的比较 return headNode;//是key和pid 一起比较,因为只有这样才能确定唯一性 +>>>>>>> remotes/origin/main } return NULL; } +<<<<<<< HEAD + +======= ///< 查找快锁并插入哈希桶中 +>>>>>>> remotes/origin/main STATIC INT32 OsFindAndInsertToHash(FutexNode *node) { FutexNode *headNode = NULL; @@ -464,7 +655,11 @@ STATIC INT32 OsFindAndInsertToHash(FutexNode *node) INT32 ret; headNode = OsFindFutexNode(node); +<<<<<<< HEAD + if (headNode == NULL) { +======= if (headNode == NULL) {//没有找到,说明这是一把新锁 +>>>>>>> remotes/origin/main OsFutexInsertNewFutexKeyToHash(node); LOS_ListInit(&(node->queueList)); return LOS_OK; @@ -483,14 +678,23 @@ STATIC INT32 OsFindAndInsertToHash(FutexNode *node) return ret; } +<<<<<<< HEAD + +======= /// 共享内存检查 +>>>>>>> remotes/origin/main STATIC INT32 OsFutexKeyShmPermCheck(const UINT32 *userVaddr, const UINT32 flags) { PADDR_T paddr; /* Check whether the futexKey is a shared lock */ +<<<<<<< HEAD + if (!(flags & FUTEX_PRIVATE)) { + paddr = (UINTPTR)LOS_PaddrQuery((UINT32 *)userVaddr); +======= if (!(flags & FUTEX_PRIVATE)) {//非私有快锁 paddr = (UINTPTR)LOS_PaddrQuery((UINT32 *)userVaddr);//能否查询到物理地址 +>>>>>>> remotes/origin/main if (paddr == 0) return LOS_NOK; } @@ -549,6 +753,15 @@ STATIC INT32 OsFutexDeleteTimeoutTaskNode(FutexHash *hashNode, FutexNode *node) } return LOS_ETIMEDOUT; } +<<<<<<< HEAD + +STATIC INT32 OsFutexInsertTaskToHash(LosTaskCB **taskCB, FutexNode **node, const UINTPTR futexKey, const UINT32 flags) +{ + INT32 ret; + *taskCB = OsCurrTaskGet(); + *node = &((*taskCB)->futex); + OsFutexSetKey(futexKey, flags, *node); +======= /// 将快锁节点插入任务 STATIC INT32 OsFutexInsertTaskToHash(LosTaskCB **taskCB, FutexNode **node, const UINTPTR futexKey, const UINT32 flags) { @@ -556,6 +769,7 @@ STATIC INT32 OsFutexInsertTaskToHash(LosTaskCB **taskCB, FutexNode **node, const *taskCB = OsCurrTaskGet(); //获取当前任务 *node = &((*taskCB)->futex); //获取当前任务的快锁节点 OsFutexSetKey(futexKey, flags, *node);//设置参数 key index pid +>>>>>>> remotes/origin/main ret = OsFindAndInsertToHash(*node); if (ret) { @@ -565,13 +779,28 @@ STATIC INT32 OsFutexInsertTaskToHash(LosTaskCB **taskCB, FutexNode **node, const LOS_ListInit(&((*node)->pendList)); return LOS_OK; } +<<<<<<< HEAD + +======= /// 将当前任务挂入等待链表中 +>>>>>>> remotes/origin/main STATIC INT32 OsFutexWaitTask(const UINT32 *userVaddr, const UINT32 flags, const UINT32 val, const UINT32 timeout) { INT32 futexRet; UINT32 intSave, lockVal; LosTaskCB *taskCB = NULL; FutexNode *node = NULL; +<<<<<<< HEAD + UINTPTR futexKey = OsFutexFlagsToKey(userVaddr, flags); + UINT32 index = OsFutexKeyToIndex(futexKey, flags); + FutexHash *hashNode = &g_futexHash[index]; + + if (OsFutexLock(&hashNode->listLock)) { + return LOS_EINVAL; + } + + if (LOS_ArchCopyFromUser(&lockVal, userVaddr, sizeof(UINT32))) { +======= UINTPTR futexKey = OsFutexFlagsToKey(userVaddr, flags);//通过地址和flags 找到 key UINT32 index = OsFutexKeyToIndex(futexKey, flags);//通过key找到哈希桶 FutexHash *hashNode = &g_futexHash[index]; @@ -581,17 +810,27 @@ STATIC INT32 OsFutexWaitTask(const UINT32 *userVaddr, const UINT32 flags, const } //userVaddr必须是用户空间虚拟地址 if (LOS_ArchCopyFromUser(&lockVal, userVaddr, sizeof(UINT32))) {//将值拷贝到内核空间 +>>>>>>> remotes/origin/main PRINT_ERR("Futex wait param check failed! copy from user failed!\n"); futexRet = LOS_EINVAL; goto EXIT_ERR; } +<<<<<<< HEAD + if (lockVal != val) { + futexRet = LOS_EBADF; + goto EXIT_ERR; + } + + if (OsFutexInsertTaskToHash(&taskCB, &node, futexKey, flags)) { +======= if (lockVal != val) {//对参数内部逻辑检查 futexRet = LOS_EBADF; goto EXIT_ERR; } //注意第二个参数 FutexNode *node = NULL if (OsFutexInsertTaskToHash(&taskCB, &node, futexKey, flags)) {// node = taskCB->futex +>>>>>>> remotes/origin/main futexRet = LOS_NOK; goto EXIT_ERR; } @@ -602,7 +841,11 @@ STATIC INT32 OsFutexWaitTask(const UINT32 *userVaddr, const UINT32 flags, const taskCB->ops->wait(taskCB, &(node->pendList), timeout); LOS_SpinUnlock(&g_taskSpin); +<<<<<<< HEAD + futexRet = OsFutexUnlock(&hashNode->listLock); +======= futexRet = OsFutexUnlock(&hashNode->listLock);// +>>>>>>> remotes/origin/main if (futexRet) { OsSchedUnlock(); LOS_IntRestore(intSave); @@ -632,12 +875,27 @@ EXIT_ERR: EXIT_UNLOCK_ERR: return futexRet; } +<<<<<<< HEAD + +======= /// 设置线程等待 | 向Futex表中插入代表被阻塞的线程的node +>>>>>>> remotes/origin/main INT32 OsFutexWait(const UINT32 *userVaddr, UINT32 flags, UINT32 val, UINT32 absTime) { INT32 ret; UINT32 timeout = LOS_WAIT_FOREVER; +<<<<<<< HEAD + ret = OsFutexWaitParamCheck(userVaddr, flags, absTime); + if (ret) { + return ret; + } + if (absTime != LOS_WAIT_FOREVER) { + timeout = OsNS2Tick((UINT64)absTime * OS_SYS_NS_PER_US); + } + + return OsFutexWaitTask(userVaddr, flags, val, timeout); +======= ret = OsFutexWaitParamCheck(userVaddr, flags, absTime);//参数检查 if (ret) { return ret; @@ -647,6 +905,7 @@ INT32 OsFutexWait(const UINT32 *userVaddr, UINT32 flags, UINT32 val, UINT32 absT } return OsFutexWaitTask(userVaddr, flags, val, timeout);//将任务挂起 timeOut 时长 +>>>>>>> remotes/origin/main } STATIC INT32 OsFutexWakeParamCheck(const UINT32 *userVaddr, UINT32 flags) @@ -657,12 +916,20 @@ STATIC INT32 OsFutexWakeParamCheck(const UINT32 *userVaddr, UINT32 flags) PRINT_ERR("Futex wake param check failed! error flags: 0x%x\n", flags); return LOS_EINVAL; } +<<<<<<< HEAD + +======= //地址必须在用户空间 +>>>>>>> remotes/origin/main if ((vaddr % sizeof(INT32)) || (vaddr < OS_FUTEX_KEY_BASE) || (vaddr >= OS_FUTEX_KEY_MAX)) { PRINT_ERR("Futex wake param check failed! error userVaddr: 0x%x\n", userVaddr); return LOS_EINVAL; } +<<<<<<< HEAD + +======= //必须得是个共享内存地址 +>>>>>>> remotes/origin/main if (flags && (OsFutexKeyShmPermCheck(userVaddr, flags) != LOS_OK)) { PRINT_ERR("Futex wake param check failed! error shared memory perm userVaddr: 0x%x\n", userVaddr); return LOS_EINVAL; @@ -672,8 +939,12 @@ STATIC INT32 OsFutexWakeParamCheck(const UINT32 *userVaddr, UINT32 flags) } /* Check to see if the task to be awakened has timed out +<<<<<<< HEAD + * if time out, to weak next pend task. +======= * if time out, to weak next pend task. * | 查看要唤醒的任务是否超时,如果超时,就唤醒,并查看下一个挂起的任务。 +>>>>>>> remotes/origin/main */ STATIC VOID OsFutexCheckAndWakePendTask(FutexNode *headNode, const INT32 wakeNumber, FutexHash *hashNode, FutexNode **nextNode, BOOL *wakeAny) @@ -708,7 +979,10 @@ STATIC VOID OsFutexCheckAndWakePendTask(FutexNode *headNode, const INT32 wakeNum } return; } +<<<<<<< HEAD +======= +>>>>>>> remotes/origin/main STATIC INT32 OsFutexWakeTask(UINTPTR futexKey, UINT32 flags, INT32 wakeNumber, FutexNode **newHeadNode, BOOL *wakeAny) { @@ -717,13 +991,21 @@ STATIC INT32 OsFutexWakeTask(UINTPTR futexKey, UINT32 flags, INT32 wakeNumber, F FutexNode *headNode = NULL; UINT32 index = OsFutexKeyToIndex(futexKey, flags); FutexHash *hashNode = &g_futexHash[index]; +<<<<<<< HEAD + FutexNode tempNode = { +======= FutexNode tempNode = { //先组成一个临时快锁节点,目的是为了找到哈希桶中是否有这个节点 +>>>>>>> remotes/origin/main .key = futexKey, .index = index, .pid = (flags & FUTEX_PRIVATE) ? LOS_GetCurrProcessID() : OS_INVALID, }; +<<<<<<< HEAD + node = OsFindFutexNode(&tempNode); +======= node = OsFindFutexNode(&tempNode);//找快锁节点 +>>>>>>> remotes/origin/main if (node == NULL) { return LOS_EBADF; } @@ -731,7 +1013,11 @@ STATIC INT32 OsFutexWakeTask(UINTPTR futexKey, UINT32 flags, INT32 wakeNumber, F headNode = node; SCHEDULER_LOCK(intSave); +<<<<<<< HEAD + OsFutexCheckAndWakePendTask(headNode, wakeNumber, hashNode, newHeadNode, wakeAny); +======= OsFutexCheckAndWakePendTask(headNode, wakeNumber, hashNode, newHeadNode, wakeAny);//再找到等这把锁的唤醒指向数量的任务 +>>>>>>> remotes/origin/main if ((*newHeadNode) != NULL) { OsFutexReplaceQueueListHeadNode(headNode, *newHeadNode); OsFutexDeinitFutexNode(headNode); @@ -743,7 +1029,11 @@ STATIC INT32 OsFutexWakeTask(UINTPTR futexKey, UINT32 flags, INT32 wakeNumber, F return LOS_OK; } +<<<<<<< HEAD + +======= /// 唤醒一个被指定锁阻塞的线程 +>>>>>>> remotes/origin/main INT32 OsFutexWake(const UINT32 *userVaddr, UINT32 flags, INT32 wakeNumber) { INT32 ret, futexRet; @@ -752,11 +1042,19 @@ INT32 OsFutexWake(const UINT32 *userVaddr, UINT32 flags, INT32 wakeNumber) FutexHash *hashNode = NULL; FutexNode *headNode = NULL; BOOL wakeAny = FALSE; +<<<<<<< HEAD + + if (OsFutexWakeParamCheck(userVaddr, flags)) { + return LOS_EINVAL; + } + +======= //1.检查参数 if (OsFutexWakeParamCheck(userVaddr, flags)) { return LOS_EINVAL; } //2.找到指定用户空间地址对应的桶 +>>>>>>> remotes/origin/main futexKey = OsFutexFlagsToKey(userVaddr, flags); index = OsFutexKeyToIndex(futexKey, flags); @@ -764,7 +1062,11 @@ INT32 OsFutexWake(const UINT32 *userVaddr, UINT32 flags, INT32 wakeNumber) if (OsFutexLock(&hashNode->listLock)) { return LOS_EINVAL; } +<<<<<<< HEAD + +======= //3.换起等待该锁的进程 +>>>>>>> remotes/origin/main ret = OsFutexWakeTask(futexKey, flags, wakeNumber, &headNode, &wakeAny); if (ret) { goto EXIT_ERR; @@ -778,7 +1080,11 @@ INT32 OsFutexWake(const UINT32 *userVaddr, UINT32 flags, INT32 wakeNumber) if (futexRet) { goto EXIT_UNLOCK_ERR; } +<<<<<<< HEAD + +======= //4.根据指定参数决定是否发起调度 +>>>>>>> remotes/origin/main if (wakeAny == TRUE) { LOS_MpSchedule(OS_MP_CPU_ALL); LOS_Schedule(); @@ -887,7 +1193,11 @@ STATIC VOID OsFutexRequeueSplitTwoLists(FutexHash *oldHashNode, FutexNode *oldHe tailNode->queueList.pstNext = &newHeadNode->queueList; return; } +<<<<<<< HEAD + +======= /// 删除旧key并获取头节点 +>>>>>>> remotes/origin/main STATIC FutexNode *OsFutexRequeueRemoveOldKeyAndGetHead(UINTPTR oldFutexKey, UINT32 flags, INT32 wakeNumber, UINTPTR newFutexKey, INT32 requeueCount, BOOL *wakeAny) { @@ -923,7 +1233,11 @@ STATIC FutexNode *OsFutexRequeueRemoveOldKeyAndGetHead(UINTPTR oldFutexKey, UINT return oldHeadNode; } +<<<<<<< HEAD + +======= /// 检查锁在Futex表中的状态 +>>>>>>> remotes/origin/main STATIC INT32 OsFutexRequeueParamCheck(const UINT32 *oldUserVaddr, UINT32 flags, const UINT32 *newUserVaddr) { VADDR_T oldVaddr = (VADDR_T)(UINTPTR)oldUserVaddr; @@ -932,12 +1246,20 @@ STATIC INT32 OsFutexRequeueParamCheck(const UINT32 *oldUserVaddr, UINT32 flags, if (oldVaddr == newVaddr) { return LOS_EINVAL; } +<<<<<<< HEAD + +======= //检查标记 +>>>>>>> remotes/origin/main if ((flags & (~FUTEX_PRIVATE)) != FUTEX_REQUEUE) { PRINT_ERR("Futex requeue param check failed! error flags: 0x%x\n", flags); return LOS_EINVAL; } +<<<<<<< HEAD + +======= //检查地址范围,必须在用户空间 +>>>>>>> remotes/origin/main if ((oldVaddr % sizeof(INT32)) || (oldVaddr < OS_FUTEX_KEY_BASE) || (oldVaddr >= OS_FUTEX_KEY_MAX)) { PRINT_ERR("Futex requeue param check failed! error old userVaddr: 0x%x\n", oldUserVaddr); return LOS_EINVAL; @@ -950,7 +1272,11 @@ STATIC INT32 OsFutexRequeueParamCheck(const UINT32 *oldUserVaddr, UINT32 flags, return LOS_OK; } +<<<<<<< HEAD + +======= /// 调整指定锁在Futex表中的位置 +>>>>>>> remotes/origin/main INT32 OsFutexRequeue(const UINT32 *userVaddr, UINT32 flags, INT32 wakeNumber, INT32 count, const UINT32 *newUserVaddr) { INT32 ret; @@ -967,12 +1293,21 @@ INT32 OsFutexRequeue(const UINT32 *userVaddr, UINT32 flags, INT32 wakeNumber, IN return LOS_EINVAL; } +<<<<<<< HEAD + oldFutexKey = OsFutexFlagsToKey(userVaddr, flags); + newFutexKey = OsFutexFlagsToKey(newUserVaddr, flags); + oldIndex = OsFutexKeyToIndex(oldFutexKey, flags); + newIndex = OsFutexKeyToIndex(newFutexKey, flags); + + oldHashNode = &g_futexHash[oldIndex]; +======= oldFutexKey = OsFutexFlagsToKey(userVaddr, flags);//先拿key newFutexKey = OsFutexFlagsToKey(newUserVaddr, flags); oldIndex = OsFutexKeyToIndex(oldFutexKey, flags);//再拿所在哈希桶位置,共有80个哈希桶 newIndex = OsFutexKeyToIndex(newFutexKey, flags); oldHashNode = &g_futexHash[oldIndex];//拿到对应哈希桶实体 +>>>>>>> remotes/origin/main if (OsFutexLock(&oldHashNode->listLock)) { return LOS_EINVAL; } diff --git a/src/kernel_liteos_a/kernel/base/ipc/los_mux.c b/src/kernel_liteos_a/kernel/base/ipc/los_mux.c index 45f43d6b..55ed73c5 100644 --- a/src/kernel_liteos_a/kernel/base/ipc/los_mux.c +++ b/src/kernel_liteos_a/kernel/base/ipc/los_mux.c @@ -1,6 +1,10 @@ /* * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved. +<<<<<<< HEAD + * Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved. +======= * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved. +>>>>>>> remotes/origin/main * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: @@ -40,19 +44,32 @@ #ifdef LOSCFG_BASE_IPC_MUX #define MUTEXATTR_TYPE_MASK 0x0FU +<<<<<<< HEAD + +======= ///互斥属性初始化 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrInit(LosMuxAttr *attr) { if (attr == NULL) { return LOS_EINVAL; } +<<<<<<< HEAD + attr->protocol = LOS_MUX_PRIO_INHERIT; + attr->prioceiling = OS_TASK_PRIORITY_LOWEST; + attr->type = LOS_MUX_DEFAULT; + return LOS_OK; +} + +======= attr->protocol = LOS_MUX_PRIO_INHERIT; //协议默认用继承方式, A(4)task等B(19)释放锁时,B的调度优先级直接升到(4) attr->prioceiling = OS_TASK_PRIORITY_LOWEST;//最低优先级 attr->type = LOS_MUX_DEFAULT; //默认 LOS_MUX_RECURSIVE return LOS_OK; } /// ????? 销毁互斥属 ,这里啥也没干呀 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrDestroy(LosMuxAttr *attr) { if (attr == NULL) { @@ -61,7 +78,11 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrDestroy(LosMuxAttr *attr) return LOS_OK; } +<<<<<<< HEAD + +======= ///获取互斥锁的类型属性,由outType接走,不送! +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrGetType(const LosMuxAttr *attr, INT32 *outType) { INT32 type; @@ -79,7 +100,11 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrGetType(const LosMuxAttr *attr, INT32 *outTyp return LOS_OK; } +<<<<<<< HEAD + +======= ///设置互斥锁的类型属性 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrSetType(LosMuxAttr *attr, INT32 type) { if ((attr == NULL) || (type < LOS_MUX_NORMAL) || (type > LOS_MUX_ERRORCHECK)) { @@ -89,7 +114,11 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrSetType(LosMuxAttr *attr, INT32 type) attr->type = (UINT8)((attr->type & ~MUTEXATTR_TYPE_MASK) | (UINT32)type); return LOS_OK; } +<<<<<<< HEAD + +======= ///获取互斥锁的类型属性 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrGetProtocol(const LosMuxAttr *attr, INT32 *protocol) { if ((attr != NULL) && (protocol != NULL)) { @@ -100,7 +129,11 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrGetProtocol(const LosMuxAttr *attr, INT32 *pr return LOS_OK; } +<<<<<<< HEAD + +======= ///设置互斥锁属性的协议 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrSetProtocol(LosMuxAttr *attr, INT32 protocol) { if (attr == NULL) { @@ -117,7 +150,11 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrSetProtocol(LosMuxAttr *attr, INT32 protocol) return LOS_EINVAL; } } +<<<<<<< HEAD + +======= ///获取互斥锁属性优先级 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrGetPrioceiling(const LosMuxAttr *attr, INT32 *prioceiling) { if (attr == NULL) { @@ -130,7 +167,11 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrGetPrioceiling(const LosMuxAttr *attr, INT32 return LOS_OK; } +<<<<<<< HEAD + +======= ///设置互斥锁属性的优先级的上限 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrSetPrioceiling(LosMuxAttr *attr, INT32 prioceiling) { if ((attr == NULL) || @@ -143,7 +184,11 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrSetPrioceiling(LosMuxAttr *attr, INT32 prioce return LOS_OK; } +<<<<<<< HEAD + +======= ///设置互斥锁的优先级的上限,老优先级由oldPrioceiling带走 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_MuxSetPrioceiling(LosMux *mutex, INT32 prioceiling, INT32 *oldPrioceiling) { INT32 ret; @@ -172,7 +217,11 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxSetPrioceiling(LosMux *mutex, INT32 prioceiling, return ret; } +<<<<<<< HEAD + +======= ///获取互斥锁的优先级的上限 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_MuxGetPrioceiling(const LosMux *mutex, INT32 *prioceiling) { if ((mutex != NULL) && (prioceiling != NULL) && (mutex->magic == OS_MUX_MAGIC)) { @@ -182,7 +231,11 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxGetPrioceiling(const LosMux *mutex, INT32 *prioce return LOS_EINVAL; } +<<<<<<< HEAD + +======= ///互斥锁是否有效 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT BOOL LOS_MuxIsValid(const LosMux *mutex) { if ((mutex != NULL) && (mutex->magic == OS_MUX_MAGIC)) { @@ -191,7 +244,11 @@ LITE_OS_SEC_TEXT BOOL LOS_MuxIsValid(const LosMux *mutex) return FALSE; } +<<<<<<< HEAD + +======= ///检查互斥锁属性是否OK,否则 no ok :|) +>>>>>>> remotes/origin/main STATIC UINT32 OsCheckMutexAttr(const LosMuxAttr *attr) { if (((INT8)(attr->type) < LOS_MUX_NORMAL) || (attr->type > LOS_MUX_ERRORCHECK)) { @@ -205,7 +262,11 @@ STATIC UINT32 OsCheckMutexAttr(const LosMuxAttr *attr) } return LOS_OK; } +<<<<<<< HEAD + +======= /// 初始化互斥锁 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_MuxInit(LosMux *mutex, const LosMuxAttr *attr) { UINT32 intSave; @@ -215,6 +276,26 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxInit(LosMux *mutex, const LosMuxAttr *attr) } if (attr == NULL) { +<<<<<<< HEAD + (VOID)LOS_MuxAttrInit(&mutex->attr); + } else { + (VOID)memcpy_s(&mutex->attr, sizeof(LosMuxAttr), attr, sizeof(LosMuxAttr)); + } + + if (OsCheckMutexAttr(&mutex->attr) != LOS_OK) { + return LOS_EINVAL; + } + + SCHEDULER_LOCK(intSave); + mutex->muxCount = 0; + mutex->owner = NULL; + LOS_ListInit(&mutex->muxList); + mutex->magic = OS_MUX_MAGIC; + SCHEDULER_UNLOCK(intSave); + return LOS_OK; +} + +======= (VOID)LOS_MuxAttrInit(&mutex->attr);//属性初始化 } else { (VOID)memcpy_s(&mutex->attr, sizeof(LosMuxAttr), attr, sizeof(LosMuxAttr));//把attr 拷贝到 mutex->attr @@ -233,6 +314,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxInit(LosMux *mutex, const LosMuxAttr *attr) return LOS_OK; } ///销毁互斥锁 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_MuxDestroy(LosMux *mutex) { UINT32 intSave; @@ -241,13 +323,30 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxDestroy(LosMux *mutex) return LOS_EINVAL; } +<<<<<<< HEAD + SCHEDULER_LOCK(intSave); + if (mutex->magic != OS_MUX_MAGIC) { + SCHEDULER_UNLOCK(intSave); +======= SCHEDULER_LOCK(intSave); //保存调度自旋锁 if (mutex->magic != OS_MUX_MAGIC) { SCHEDULER_UNLOCK(intSave);//释放调度自旋锁 +>>>>>>> remotes/origin/main return LOS_EBADF; } if (mutex->muxCount != 0) { +<<<<<<< HEAD + SCHEDULER_UNLOCK(intSave); + return LOS_EBUSY; + } + + (VOID)memset_s(mutex, sizeof(LosMux), 0, sizeof(LosMux)); + SCHEDULER_UNLOCK(intSave); + return LOS_OK; +} + +======= SCHEDULER_UNLOCK(intSave);//释放调度自旋锁 return LOS_EBUSY; } @@ -257,6 +356,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxDestroy(LosMux *mutex) return LOS_OK; } ///设置互斥锁位图 +>>>>>>> remotes/origin/main STATIC VOID OsMuxBitmapSet(const LosMux *mutex, const LosTaskCB *runTask) { if (mutex->attr.protocol != LOS_MUX_PRIO_INHERIT) { @@ -271,7 +371,11 @@ STATIC VOID OsMuxBitmapSet(const LosMux *mutex, const LosTaskCB *runTask) owner->ops->priorityInheritance(owner, ¶m); } } +<<<<<<< HEAD + +======= ///恢复互斥锁位图 +>>>>>>> remotes/origin/main VOID OsMuxBitmapRestore(const LosMux *mutex, const LOS_DL_LIST *list, const LosTaskCB *runTask) { if (mutex->attr.protocol != LOS_MUX_PRIO_INHERIT) { @@ -284,11 +388,26 @@ VOID OsMuxBitmapRestore(const LosMux *mutex, const LOS_DL_LIST *list, const LosT owner->ops->priorityRestore(owner, list, ¶m); } +<<<<<<< HEAD +======= /// 最坏情况就是拿锁失败,让出CPU,变成阻塞任务,等别的任务释放锁后排到自己了接着执行. +>>>>>>> remotes/origin/main STATIC UINT32 OsMuxPendOp(LosTaskCB *runTask, LosMux *mutex, UINT32 timeout) { UINT32 ret; +<<<<<<< HEAD + if ((mutex->muxList.pstPrev == NULL) || (mutex->muxList.pstNext == NULL)) { + /* This is for mutex macro initialization. */ + mutex->muxCount = 0; + mutex->owner = NULL; + LOS_ListInit(&mutex->muxList); + } + + if (mutex->muxCount == 0) { + mutex->muxCount++; + mutex->owner = (VOID *)runTask; +======= if ((mutex->muxList.pstPrev == NULL) || (mutex->muxList.pstNext == NULL)) {//列表为空时的处理 /* This is for mutex macro initialization. */ mutex->muxCount = 0;//锁计数器清0 @@ -299,6 +418,7 @@ STATIC UINT32 OsMuxPendOp(LosTaskCB *runTask, LosMux *mutex, UINT32 timeout) if (mutex->muxCount == 0) {//无task用锁时,肯定能拿到锁了.在里面返回 mutex->muxCount++; //互斥锁计数器加1 mutex->owner = (VOID *)runTask; //当前任务拿到锁 +>>>>>>> remotes/origin/main LOS_ListTailInsert(&runTask->lockList, &mutex->holdList); if (mutex->attr.protocol == LOS_MUX_PRIO_PROTECT) { SchedParam param = { 0 }; @@ -308,6 +428,25 @@ STATIC UINT32 OsMuxPendOp(LosTaskCB *runTask, LosMux *mutex, UINT32 timeout) } return LOS_OK; } +<<<<<<< HEAD + + if (((LosTaskCB *)mutex->owner == runTask) && (mutex->attr.type == LOS_MUX_RECURSIVE)) { + mutex->muxCount++; + return LOS_OK; + } + + if (!timeout) { + return LOS_EINVAL; + } + + if (!OsPreemptableInSched()) { + return LOS_EDEADLK; + } + + OsMuxBitmapSet(mutex, runTask); + + runTask->taskMux = (VOID *)mutex; +======= //递归锁muxCount>0 如果是递归锁就要处理两种情况 1.runtask持有锁 2.锁被别的任务拿走了 if (((LosTaskCB *)mutex->owner == runTask) && (mutex->attr.type == LOS_MUX_RECURSIVE)) {//第一种情况 runtask是锁持有方 mutex->muxCount++; //递归锁计数器加1,递归锁的目的是防止死锁,鸿蒙默认用的就是递归锁(LOS_MUX_DEFAULT = LOS_MUX_RECURSIVE) @@ -325,6 +464,7 @@ STATIC UINT32 OsMuxPendOp(LosTaskCB *runTask, LosMux *mutex, UINT32 timeout) OsMuxBitmapSet(mutex, runTask);//设置锁位图,尽可能的提高锁持有任务的优先级 runTask->taskMux = (VOID *)mutex; //记下当前任务在等待这把锁 +>>>>>>> remotes/origin/main LOS_DL_LIST *node = OsSchedLockPendFindPos(runTask, &mutex->muxList); if (node == NULL) { ret = LOS_NOK; @@ -333,10 +473,17 @@ STATIC UINT32 OsMuxPendOp(LosTaskCB *runTask, LosMux *mutex, UINT32 timeout) OsTaskWaitSetPendMask(OS_TASK_WAIT_MUTEX, (UINTPTR)mutex, timeout); ret = runTask->ops->wait(runTask, node, timeout); +<<<<<<< HEAD + if (ret == LOS_ERRNO_TSK_TIMEOUT) { + OsMuxBitmapRestore(mutex, NULL, runTask); + runTask->taskMux = NULL; + ret = LOS_ETIMEDOUT; +======= if (ret == LOS_ERRNO_TSK_TIMEOUT) {//这行代码虽和OsTaskWait挨在一起,但要过很久才会执行到,因为在OsTaskWait中CPU切换了任务上下文 OsMuxBitmapRestore(mutex, NULL, runTask); runTask->taskMux = NULL;// 所以重新回到这里时可能已经超时了 ret = LOS_ETIMEDOUT;//返回超时 +>>>>>>> remotes/origin/main } return ret; @@ -344,7 +491,11 @@ STATIC UINT32 OsMuxPendOp(LosTaskCB *runTask, LosMux *mutex, UINT32 timeout) UINT32 OsMuxLockUnsafe(LosMux *mutex, UINT32 timeout) { +<<<<<<< HEAD + LosTaskCB *runTask = OsCurrTaskGet(); +======= LosTaskCB *runTask = OsCurrTaskGet();//获取当前任务 +>>>>>>> remotes/origin/main if (mutex->magic != OS_MUX_MAGIC) { return LOS_EBADF; @@ -353,13 +504,29 @@ UINT32 OsMuxLockUnsafe(LosMux *mutex, UINT32 timeout) if (OsCheckMutexAttr(&mutex->attr) != LOS_OK) { return LOS_EINVAL; } +<<<<<<< HEAD + +======= //LOS_MUX_ERRORCHECK 时 muxCount是要等于0 ,当前任务持有锁就不能再lock了. 鸿蒙默认用的是递归锁LOS_MUX_RECURSIVE +>>>>>>> remotes/origin/main if ((mutex->attr.type == LOS_MUX_ERRORCHECK) && (mutex->owner == (VOID *)runTask)) { return LOS_EDEADLK; } return OsMuxPendOp(runTask, mutex, timeout); } +<<<<<<< HEAD + +UINT32 OsMuxTrylockUnsafe(LosMux *mutex, UINT32 timeout) +{ + LosTaskCB *runTask = OsCurrTaskGet(); + + if (mutex->magic != OS_MUX_MAGIC) { + return LOS_EBADF; + } + + if (OsCheckMutexAttr(&mutex->attr) != LOS_OK) { +======= /// 尝试加锁, UINT32 OsMuxTrylockUnsafe(LosMux *mutex, UINT32 timeout) { @@ -370,6 +537,7 @@ UINT32 OsMuxTrylockUnsafe(LosMux *mutex, UINT32 timeout) } if (OsCheckMutexAttr(&mutex->attr) != LOS_OK) {//检查互斥锁属性 +>>>>>>> remotes/origin/main return LOS_EINVAL; } @@ -378,9 +546,15 @@ UINT32 OsMuxTrylockUnsafe(LosMux *mutex, UINT32 timeout) return LOS_EBUSY; } +<<<<<<< HEAD + return OsMuxPendOp(runTask, mutex, timeout); +} + +======= return OsMuxPendOp(runTask, mutex, timeout);//当前任务去拿锁,拿不到就等timeout } /// 拿互斥锁, +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_MuxLock(LosMux *mutex, UINT32 timeout) { LosTaskCB *runTask = NULL; @@ -395,6 +569,21 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxLock(LosMux *mutex, UINT32 timeout) return LOS_EINTR; } +<<<<<<< HEAD + runTask = (LosTaskCB *)OsCurrTaskGet(); + /* DO NOT Call blocking API in system tasks */ + if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) { + PRINTK("Warning: DO NOT call %s in system tasks.\n", __FUNCTION__); + OsBackTrace(); + } + + SCHEDULER_LOCK(intSave); + ret = OsMuxLockUnsafe(mutex, timeout); + SCHEDULER_UNLOCK(intSave); + return ret; +} + +======= runTask = (LosTaskCB *)OsCurrTaskGet();//获取当前任务 /* DO NOT Call blocking API in system tasks */ if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {//不要在内核任务里用mux锁 @@ -408,6 +597,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxLock(LosMux *mutex, UINT32 timeout) return ret; } ///尝试要锁,没拿到也不等,直接返回,不纠结 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_MuxTrylock(LosMux *mutex) { LosTaskCB *runTask = NULL; @@ -422,28 +612,59 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxTrylock(LosMux *mutex) return LOS_EINTR; } +<<<<<<< HEAD + runTask = (LosTaskCB *)OsCurrTaskGet(); + /* DO NOT Call blocking API in system tasks */ + if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) { +======= runTask = (LosTaskCB *)OsCurrTaskGet();//获取当前执行的任务 /* DO NOT Call blocking API in system tasks */ if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {//系统任务不能 +>>>>>>> remotes/origin/main PRINTK("Warning: DO NOT call %s in system tasks.\n", __FUNCTION__); OsBackTrace(); } SCHEDULER_LOCK(intSave); +<<<<<<< HEAD + ret = OsMuxTrylockUnsafe(mutex, 0); +======= ret = OsMuxTrylockUnsafe(mutex, 0);//timeout = 0,不等待,没拿到锁就算了 +>>>>>>> remotes/origin/main SCHEDULER_UNLOCK(intSave); return ret; } +<<<<<<< HEAD +STATIC UINT32 OsMuxPostOp(LosTaskCB *taskCB, LosMux *mutex, BOOL *needSched) +{ + if (LOS_ListEmpty(&mutex->muxList)) { + LOS_ListDelete(&mutex->holdList); +======= STATIC UINT32 OsMuxPostOp(LosTaskCB *taskCB, LosMux *mutex, BOOL *needSched) { if (LOS_ListEmpty(&mutex->muxList)) {//如果互斥锁列表为空 LOS_ListDelete(&mutex->holdList);//把持有互斥锁的节点摘掉 +>>>>>>> remotes/origin/main mutex->owner = NULL; return LOS_OK; } +<<<<<<< HEAD + LosTaskCB *resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(mutex->muxList))); + OsMuxBitmapRestore(mutex, &mutex->muxList, resumedTask); + + mutex->muxCount = 1; + mutex->owner = (VOID *)resumedTask; + LOS_ListDelete(&mutex->holdList); + LOS_ListTailInsert(&resumedTask->lockList, &mutex->holdList); + OsTaskWakeClearPendMask(resumedTask); + resumedTask->ops->wake(resumedTask); + resumedTask->taskMux = NULL; + if (needSched != NULL) { + *needSched = TRUE; +======= LosTaskCB *resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(mutex->muxList)));//拿到等待互斥锁链表的第一个任务实体,接下来要唤醒任务 OsMuxBitmapRestore(mutex, &mutex->muxList, resumedTask); @@ -456,6 +677,7 @@ STATIC UINT32 OsMuxPostOp(LosTaskCB *taskCB, LosMux *mutex, BOOL *needSched) resumedTask->taskMux = NULL; if (needSched != NULL) {//如果不为空 *needSched = TRUE;//就走起再次调度流程 +>>>>>>> remotes/origin/main } return LOS_OK; @@ -478,21 +700,37 @@ UINT32 OsMuxUnlockUnsafe(LosTaskCB *taskCB, LosMux *mutex, BOOL *needSched) if (mutex->muxCount == 0) { return LOS_EPERM; } +<<<<<<< HEAD + + if ((--mutex->muxCount != 0) && (mutex->attr.type == LOS_MUX_RECURSIVE)) { + return LOS_OK; + } + + if (mutex->attr.protocol == LOS_MUX_PRIO_PROTECT) { +======= //注意 --mutex->muxCount 先执行了-- 操作. if ((--mutex->muxCount != 0) && (mutex->attr.type == LOS_MUX_RECURSIVE)) {//属性类型为LOS_MUX_RECURSIVE时,muxCount是可以不为0的 return LOS_OK; } if (mutex->attr.protocol == LOS_MUX_PRIO_PROTECT) {//属性协议为保护时 +>>>>>>> remotes/origin/main SchedParam param = { 0 }; taskCB->ops->schedParamGet(taskCB, ¶m); taskCB->ops->priorityRestore(taskCB, NULL, ¶m); } +<<<<<<< HEAD + /* Whether a task block the mutex lock. */ + return OsMuxPostOp(taskCB, mutex, needSched); +} + +======= /* Whether a task block the mutex lock. *///任务是否阻塞互斥锁 return OsMuxPostOp(taskCB, mutex, needSched);//一个任务去唤醒另一个在等锁的任务 } ///释放锁 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_MuxUnlock(LosMux *mutex) { LosTaskCB *runTask = NULL; @@ -508,9 +746,15 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxUnlock(LosMux *mutex) return LOS_EINTR; } +<<<<<<< HEAD + runTask = (LosTaskCB *)OsCurrTaskGet(); + /* DO NOT Call blocking API in system tasks */ + if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) { +======= runTask = (LosTaskCB *)OsCurrTaskGet();//获取当前任务 /* DO NOT Call blocking API in system tasks */ if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {//不能在系统任务里调用,因为很容易让系统任务发生死锁 +>>>>>>> remotes/origin/main PRINTK("Warning: DO NOT call %s in system tasks.\n", __FUNCTION__); OsBackTrace(); } @@ -518,12 +762,22 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxUnlock(LosMux *mutex) SCHEDULER_LOCK(intSave); ret = OsMuxUnlockUnsafe(runTask, mutex, &needSched); SCHEDULER_UNLOCK(intSave); +<<<<<<< HEAD + if (needSched == TRUE) { + LOS_MpSchedule(OS_MP_CPU_ALL); + LOS_Schedule(); +======= if (needSched == TRUE) {//需要调度的情况 LOS_MpSchedule(OS_MP_CPU_ALL);//向所有CPU发送调度指令 LOS_Schedule();//发起调度 +>>>>>>> remotes/origin/main } return ret; } +<<<<<<< HEAD +#endif /* LOSCFG_BASE_IPC_MUX */ +======= #endif /* (LOSCFG_BASE_IPC_MUX == YES) */ +>>>>>>> remotes/origin/main diff --git a/src/kernel_liteos_a/kernel/base/ipc/los_queue.c b/src/kernel_liteos_a/kernel/base/ipc/los_queue.c index 40a4c197..9924eb73 100644 --- a/src/kernel_liteos_a/kernel/base/ipc/los_queue.c +++ b/src/kernel_liteos_a/kernel/base/ipc/los_queue.c @@ -1,6 +1,10 @@ /* * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved. +<<<<<<< HEAD + * Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved. +======= * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved. +>>>>>>> remotes/origin/main * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: @@ -47,8 +51,13 @@ #endif /* LOSCFG_BASE_IPC_QUEUE_LIMIT <= 0 */ #ifndef LOSCFG_IPC_CONTAINER +<<<<<<< HEAD +LITE_OS_SEC_BSS LosQueueCB *g_allQueue = NULL; +LITE_OS_SEC_BSS STATIC LOS_DL_LIST g_freeQueueList; +======= LITE_OS_SEC_BSS LosQueueCB *g_allQueue = NULL;///< 消息队列池 LITE_OS_SEC_BSS STATIC LOS_DL_LIST g_freeQueueList;///< 空闲队列链表,管分配的,需要队列从这里申请 +>>>>>>> remotes/origin/main #define FREE_QUEUE_LIST g_freeQueueList #endif @@ -83,6 +92,13 @@ LITE_OS_SEC_TEXT_INIT LosQueueCB *OsAllQueueCBInit(LOS_DL_LIST *freeQueueList) return allQueue; } +<<<<<<< HEAD +/* + * Description : queue initial + * Return : LOS_OK on success or error code on failure + */ +======= +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_INIT UINT32 OsQueueInit(VOID) { #ifndef LOSCFG_IPC_CONTAINER @@ -93,7 +109,11 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsQueueInit(VOID) #endif return LOS_OK; } +<<<<<<< HEAD + +======= ///创建一个队列,根据用户传入队列长度和消息节点大小来开辟相应的内存空间以供该队列使用,参数queueID带走队列ID +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_INIT UINT32 LOS_QueueCreate(CHAR *queueName, UINT16 len, UINT32 *queueID, UINT32 flags, UINT16 maxMsgSize) { @@ -110,7 +130,11 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_QueueCreate(CHAR *queueName, UINT16 len, UINT32 return LOS_ERRNO_QUEUE_CREAT_PTR_NULL; } +<<<<<<< HEAD + if (maxMsgSize > (OS_NULL_SHORT - sizeof(UINT32))) { +======= if (maxMsgSize > (OS_NULL_SHORT - sizeof(UINT32))) {// maxMsgSize上限 为啥要减去 sizeof(UINT32) ,因为前面存的是队列的大小 +>>>>>>> remotes/origin/main return LOS_ERRNO_QUEUE_SIZE_TOO_BIG; } @@ -118,6 +142,61 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_QueueCreate(CHAR *queueName, UINT16 len, UINT32 return LOS_ERRNO_QUEUE_PARA_ISZERO; } +<<<<<<< HEAD + msgSize = maxMsgSize + sizeof(UINT32); + /* + * Memory allocation is time-consuming, to shorten the time of disable interrupt, + * move the memory allocation to here. + */ + queue = (UINT8 *)LOS_MemAlloc(m_aucSysMem1, (UINT32)len * msgSize); + if (queue == NULL) { + return LOS_ERRNO_QUEUE_CREATE_NO_MEMORY; + } + + SCHEDULER_LOCK(intSave); + if (LOS_ListEmpty(&FREE_QUEUE_LIST)) { + SCHEDULER_UNLOCK(intSave); + OsQueueCheckHook(); + (VOID)LOS_MemFree(m_aucSysMem1, queue); + return LOS_ERRNO_QUEUE_CB_UNAVAILABLE; + } + + unusedQueue = LOS_DL_LIST_FIRST(&FREE_QUEUE_LIST); + LOS_ListDelete(unusedQueue); + queueCB = GET_QUEUE_LIST(unusedQueue); + queueCB->queueLen = len; + queueCB->queueSize = msgSize; + queueCB->queueHandle = queue; + queueCB->queueState = OS_QUEUE_INUSED; + queueCB->readWriteableCnt[OS_QUEUE_READ] = 0; + queueCB->readWriteableCnt[OS_QUEUE_WRITE] = len; + queueCB->queueHead = 0; + queueCB->queueTail = 0; + LOS_ListInit(&queueCB->readWriteList[OS_QUEUE_READ]); + LOS_ListInit(&queueCB->readWriteList[OS_QUEUE_WRITE]); + LOS_ListInit(&queueCB->memList); + + OsQueueDbgUpdateHook(queueCB->queueID, OsCurrTaskGet()->taskEntry); + SCHEDULER_UNLOCK(intSave); + + *queueID = queueCB->queueID; + OsHookCall(LOS_HOOK_TYPE_QUEUE_CREATE, queueCB); + return LOS_OK; +} + +STATIC LITE_OS_SEC_TEXT UINT32 OsQueueReadParameterCheck(UINT32 queueID, const VOID *bufferAddr, + const UINT32 *bufferSize, UINT32 timeout) +{ + if (GET_QUEUE_INDEX(queueID) >= LOSCFG_BASE_IPC_QUEUE_LIMIT) { + return LOS_ERRNO_QUEUE_INVALID; + } + if ((bufferAddr == NULL) || (bufferSize == NULL)) { + return LOS_ERRNO_QUEUE_READ_PTR_NULL; + } + + if ((*bufferSize == 0) || (*bufferSize > (OS_NULL_SHORT - sizeof(UINT32)))) { + return LOS_ERRNO_QUEUE_READSIZE_IS_INVALID; +======= msgSize = maxMsgSize + sizeof(UINT32);//总size = 消息体内容长度 + 消息大小(UINT32) /* * Memory allocation is time-consuming, to shorten the time of disable interrupt, @@ -171,17 +250,39 @@ STATIC LITE_OS_SEC_TEXT UINT32 OsQueueReadParameterCheck(UINT32 queueID, const V if ((*bufferSize == 0) || (*bufferSize > (OS_NULL_SHORT - sizeof(UINT32)))) {//限制了读取数据的上限64K, sizeof(UINT32)代表的是队列的长度 return LOS_ERRNO_QUEUE_READSIZE_IS_INVALID; //所以要减去 +>>>>>>> remotes/origin/main } OsQueueDbgTimeUpdateHook(queueID); +<<<<<<< HEAD + if (timeout != LOS_NO_WAIT) { + if (OS_INT_ACTIVE) { + return LOS_ERRNO_QUEUE_READ_IN_INTERRUPT; +======= if (timeout != LOS_NO_WAIT) {//等待一定时间再读取 if (OS_INT_ACTIVE) {//如果碰上了硬中断 return LOS_ERRNO_QUEUE_READ_IN_INTERRUPT;//意思是:硬中断发生时是不能读消息队列的 +>>>>>>> remotes/origin/main } } return LOS_OK; } +<<<<<<< HEAD + +STATIC LITE_OS_SEC_TEXT UINT32 OsQueueWriteParameterCheck(UINT32 queueID, const VOID *bufferAddr, + const UINT32 *bufferSize, UINT32 timeout) +{ + if (GET_QUEUE_INDEX(queueID) >= LOSCFG_BASE_IPC_QUEUE_LIMIT) { + return LOS_ERRNO_QUEUE_INVALID; + } + + if (bufferAddr == NULL) { + return LOS_ERRNO_QUEUE_WRITE_PTR_NULL; + } + + if (*bufferSize == 0) { +======= ///写队列参数检查 STATIC LITE_OS_SEC_TEXT UINT32 OsQueueWriteParameterCheck(UINT32 queueID, const VOID *bufferAddr, const UINT32 *bufferSize, UINT32 timeout) @@ -195,6 +296,7 @@ STATIC LITE_OS_SEC_TEXT UINT32 OsQueueWriteParameterCheck(UINT32 queueID, const } if (*bufferSize == 0) {//这里没有限制写队列的大小,如果写入一个很大buf 会怎样? +>>>>>>> remotes/origin/main return LOS_ERRNO_QUEUE_WRITESIZE_ISZERO; } @@ -207,13 +309,32 @@ STATIC LITE_OS_SEC_TEXT UINT32 OsQueueWriteParameterCheck(UINT32 queueID, const } return LOS_OK; } +<<<<<<< HEAD + +======= ///队列buf操作,注意队列数据是按顺序来读取的,要不从头,要不从尾部,不会出现从中间读写,所有可由 head 和 tail 来管理队列. +>>>>>>> remotes/origin/main STATIC VOID OsQueueBufferOperate(LosQueueCB *queueCB, UINT32 operateType, VOID *bufferAddr, UINT32 *bufferSize) { UINT8 *queueNode = NULL; UINT32 msgDataSize; UINT16 queuePosition; +<<<<<<< HEAD + /* get the queue position */ + switch (OS_QUEUE_OPERATE_GET(operateType)) { + case OS_QUEUE_READ_HEAD: + queuePosition = queueCB->queueHead; + ((queueCB->queueHead + 1) == queueCB->queueLen) ? (queueCB->queueHead = 0) : (queueCB->queueHead++); + break; + case OS_QUEUE_WRITE_HEAD: + (queueCB->queueHead == 0) ? (queueCB->queueHead = queueCB->queueLen - 1) : (--queueCB->queueHead); + queuePosition = queueCB->queueHead; + break; + case OS_QUEUE_WRITE_TAIL: + queuePosition = queueCB->queueTail; + ((queueCB->queueTail + 1) == queueCB->queueLen) ? (queueCB->queueTail = 0) : (queueCB->queueTail++); +======= /* get the queue position | 先找到队列的位置*/ switch (OS_QUEUE_OPERATE_GET(operateType)) {//获取操作类型 case OS_QUEUE_READ_HEAD://从列队头开始读 @@ -227,26 +348,50 @@ STATIC VOID OsQueueBufferOperate(LosQueueCB *queueCB, UINT32 operateType, VOID * case OS_QUEUE_WRITE_TAIL://从列队尾部开始写 queuePosition = queueCB->queueTail;//设置队列位置为尾部位置 ((queueCB->queueTail + 1) == queueCB->queueLen) ? (queueCB->queueTail = 0) : (queueCB->queueTail++);//调整队列尾部位置 +>>>>>>> remotes/origin/main break; default: /* read tail, reserved. */ PRINT_ERR("invalid queue operate type!\n"); return; } +<<<<<<< HEAD + + queueNode = &(queueCB->queueHandle[(queuePosition * (queueCB->queueSize))]); + + if (OS_QUEUE_IS_READ(operateType)) { + if (memcpy_s(&msgDataSize, sizeof(UINT32), queueNode + queueCB->queueSize - sizeof(UINT32), + sizeof(UINT32)) != EOK) { +======= //queueHandle是create队列时,由外界参数申请的一块内存. 用于copy 使用 queueNode = &(queueCB->queueHandle[(queuePosition * (queueCB->queueSize))]);//拿到队列节点 if (OS_QUEUE_IS_READ(operateType)) {//读操作处理,读队列分两步走 if (memcpy_s(&msgDataSize, sizeof(UINT32), queueNode + queueCB->queueSize - sizeof(UINT32), sizeof(UINT32)) != EOK) {//1.先读出队列大小,由队列头四个字节表示 +>>>>>>> remotes/origin/main PRINT_ERR("get msgdatasize failed\n"); return; } msgDataSize = (*bufferSize < msgDataSize) ? *bufferSize : msgDataSize; +<<<<<<< HEAD + if (memcpy_s(bufferAddr, *bufferSize, queueNode, msgDataSize) != EOK) { +======= if (memcpy_s(bufferAddr, *bufferSize, queueNode, msgDataSize) != EOK) {//2.读表示读走已有数据,所以相当于bufferAddr接着了queueNode的数据 +>>>>>>> remotes/origin/main PRINT_ERR("copy message to buffer failed\n"); return; } +<<<<<<< HEAD + *bufferSize = msgDataSize; + } else { + if (memcpy_s(queueNode, queueCB->queueSize, bufferAddr, *bufferSize) != EOK) { + PRINT_ERR("store message failed\n"); + return; + } + if (memcpy_s(queueNode + queueCB->queueSize - sizeof(UINT32), sizeof(UINT32), bufferSize, + sizeof(UINT32)) != EOK) { +======= *bufferSize = msgDataSize;//通过入参 带走消息的大小 } else {//只有读写两种操作,这里就是写队列了.写也分两步走 , @note_thinking 这里建议鸿蒙加上 OS_QUEUE_IS_WRITE 判断 if (memcpy_s(queueNode, queueCB->queueSize, bufferAddr, *bufferSize) != EOK) {//1.写入消息内容 @@ -255,11 +400,24 @@ STATIC VOID OsQueueBufferOperate(LosQueueCB *queueCB, UINT32 operateType, VOID * } if (memcpy_s(queueNode + queueCB->queueSize - sizeof(UINT32), sizeof(UINT32), bufferSize, sizeof(UINT32)) != EOK) {//2.写入消息数据的长度,sizeof(UINT32) +>>>>>>> remotes/origin/main PRINT_ERR("store message size failed\n"); return; } } } +<<<<<<< HEAD + +STATIC UINT32 OsQueueOperateParamCheck(const LosQueueCB *queueCB, UINT32 queueID, + UINT32 operateType, const UINT32 *bufferSize) +{ + if ((queueCB->queueID != queueID) || (queueCB->queueState == OS_QUEUE_UNUSED)) { + return LOS_ERRNO_QUEUE_NOT_CREATE; + } + + if (OS_QUEUE_IS_WRITE(operateType) && (*bufferSize > (queueCB->queueSize - sizeof(UINT32)))) { + return LOS_ERRNO_QUEUE_WRITE_SIZE_TOO_BIG; +======= ///队列操作参数检查 STATIC UINT32 OsQueueOperateParamCheck(const LosQueueCB *queueCB, UINT32 queueID, UINT32 operateType, const UINT32 *bufferSize) @@ -270,31 +428,79 @@ STATIC UINT32 OsQueueOperateParamCheck(const LosQueueCB *queueCB, UINT32 queueID if (OS_QUEUE_IS_WRITE(operateType) && (*bufferSize > (queueCB->queueSize - sizeof(UINT32)))) {//写时判断 return LOS_ERRNO_QUEUE_WRITE_SIZE_TOO_BIG;//塞进来的数据太大,大于队列节点能承受的范围 +>>>>>>> remotes/origin/main } return LOS_OK; } +<<<<<<< HEAD +UINT32 OsQueueOperate(UINT32 queueID, UINT32 operateType, VOID *bufferAddr, UINT32 *bufferSize, UINT32 timeout) +{ + UINT32 ret; + UINT32 readWrite = OS_QUEUE_READ_WRITE_GET(operateType); +======= UINT32 OsQueueOperate(UINT32 queueID, UINT32 operateType, VOID *bufferAddr, UINT32 *bufferSize, UINT32 timeout) { UINT32 ret; UINT32 readWrite = OS_QUEUE_READ_WRITE_GET(operateType);//获取读/写操作标识 +>>>>>>> remotes/origin/main UINT32 intSave; OsHookCall(LOS_HOOK_TYPE_QUEUE_READ, (LosQueueCB *)GET_QUEUE_HANDLE(queueID), operateType, *bufferSize, timeout); SCHEDULER_LOCK(intSave); +<<<<<<< HEAD + LosQueueCB *queueCB = (LosQueueCB *)GET_QUEUE_HANDLE(queueID); + ret = OsQueueOperateParamCheck(queueCB, queueID, operateType, bufferSize); +======= LosQueueCB *queueCB = (LosQueueCB *)GET_QUEUE_HANDLE(queueID);//获取对应的队列控制块 ret = OsQueueOperateParamCheck(queueCB, queueID, operateType, bufferSize);//参数检查 +>>>>>>> remotes/origin/main if (ret != LOS_OK) { goto QUEUE_END; } +<<<<<<< HEAD + if (queueCB->readWriteableCnt[readWrite] == 0) { + if (timeout == LOS_NO_WAIT) { +======= if (queueCB->readWriteableCnt[readWrite] == 0) {//根据readWriteableCnt判断队列是否有消息读/写 if (timeout == LOS_NO_WAIT) {//不等待直接退出 +>>>>>>> remotes/origin/main ret = OS_QUEUE_IS_READ(operateType) ? LOS_ERRNO_QUEUE_ISEMPTY : LOS_ERRNO_QUEUE_ISFULL; goto QUEUE_END; } +<<<<<<< HEAD + if (!OsPreemptableInSched()) { + ret = LOS_ERRNO_QUEUE_PEND_IN_LOCK; + goto QUEUE_END; + } + + LosTaskCB *runTask = OsCurrTaskGet(); + OsTaskWaitSetPendMask(OS_TASK_WAIT_QUEUE, queueCB->queueID, timeout); + ret = runTask->ops->wait(runTask, &queueCB->readWriteList[readWrite], timeout); + if (ret == LOS_ERRNO_TSK_TIMEOUT) { + ret = LOS_ERRNO_QUEUE_TIMEOUT; + goto QUEUE_END; + } + } else { + queueCB->readWriteableCnt[readWrite]--; + } + + OsQueueBufferOperate(queueCB, operateType, bufferAddr, bufferSize); + + if (!LOS_ListEmpty(&queueCB->readWriteList[!readWrite])) { + LosTaskCB *resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&queueCB->readWriteList[!readWrite])); + OsTaskWakeClearPendMask(resumedTask); + resumedTask->ops->wake(resumedTask); + SCHEDULER_UNLOCK(intSave); + LOS_MpSchedule(OS_MP_CPU_ALL); + LOS_Schedule(); + return LOS_OK; + } else { + queueCB->readWriteableCnt[!readWrite]++; +======= if (!OsPreemptableInSched()) {//不支持抢占式调度 ret = LOS_ERRNO_QUEUE_PEND_IN_LOCK; goto QUEUE_END; @@ -323,13 +529,18 @@ UINT32 OsQueueOperate(UINT32 queueID, UINT32 operateType, VOID *bufferAddr, UINT return LOS_OK; } else { queueCB->readWriteableCnt[!readWrite]++;//对应队列读/写中计数器++ +>>>>>>> remotes/origin/main } QUEUE_END: SCHEDULER_UNLOCK(intSave); return ret; } +<<<<<<< HEAD + +======= ///接口函数定时读取消息队列 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_QueueReadCopy(UINT32 queueID, VOID *bufferAddr, UINT32 *bufferSize, @@ -338,15 +549,26 @@ LITE_OS_SEC_TEXT UINT32 LOS_QueueReadCopy(UINT32 queueID, UINT32 ret; UINT32 operateType; +<<<<<<< HEAD + ret = OsQueueReadParameterCheck(queueID, bufferAddr, bufferSize, timeout); +======= ret = OsQueueReadParameterCheck(queueID, bufferAddr, bufferSize, timeout);//参数检查 +>>>>>>> remotes/origin/main if (ret != LOS_OK) { return ret; } +<<<<<<< HEAD + operateType = OS_QUEUE_OPERATE_TYPE(OS_QUEUE_READ, OS_QUEUE_HEAD); + return OsQueueOperate(queueID, operateType, bufferAddr, bufferSize, timeout); +} + +======= operateType = OS_QUEUE_OPERATE_TYPE(OS_QUEUE_READ, OS_QUEUE_HEAD);//从头开始读 return OsQueueOperate(queueID, operateType, bufferAddr, bufferSize, timeout);//定时执行读操作 } ///接口函数从队列头开始写 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_QueueWriteHeadCopy(UINT32 queueID, VOID *bufferAddr, UINT32 bufferSize, @@ -355,15 +577,26 @@ LITE_OS_SEC_TEXT UINT32 LOS_QueueWriteHeadCopy(UINT32 queueID, UINT32 ret; UINT32 operateType; +<<<<<<< HEAD + ret = OsQueueWriteParameterCheck(queueID, bufferAddr, &bufferSize, timeout); +======= ret = OsQueueWriteParameterCheck(queueID, bufferAddr, &bufferSize, timeout);//参数检查 +>>>>>>> remotes/origin/main if (ret != LOS_OK) { return ret; } +<<<<<<< HEAD + operateType = OS_QUEUE_OPERATE_TYPE(OS_QUEUE_WRITE, OS_QUEUE_HEAD); + return OsQueueOperate(queueID, operateType, bufferAddr, &bufferSize, timeout); +} + +======= operateType = OS_QUEUE_OPERATE_TYPE(OS_QUEUE_WRITE, OS_QUEUE_HEAD);//从头开始写 return OsQueueOperate(queueID, operateType, bufferAddr, &bufferSize, timeout);//执行写操作 } ///接口函数 从队列尾部开始写 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_QueueWriteCopy(UINT32 queueID, VOID *bufferAddr, UINT32 bufferSize, @@ -372,16 +605,27 @@ LITE_OS_SEC_TEXT UINT32 LOS_QueueWriteCopy(UINT32 queueID, UINT32 ret; UINT32 operateType; +<<<<<<< HEAD + ret = OsQueueWriteParameterCheck(queueID, bufferAddr, &bufferSize, timeout); +======= ret = OsQueueWriteParameterCheck(queueID, bufferAddr, &bufferSize, timeout);//参数检查 +>>>>>>> remotes/origin/main if (ret != LOS_OK) { return ret; } +<<<<<<< HEAD + operateType = OS_QUEUE_OPERATE_TYPE(OS_QUEUE_WRITE, OS_QUEUE_TAIL); + return OsQueueOperate(queueID, operateType, bufferAddr, &bufferSize, timeout); +} + +======= operateType = OS_QUEUE_OPERATE_TYPE(OS_QUEUE_WRITE, OS_QUEUE_TAIL);//从尾部开始写 return OsQueueOperate(queueID, operateType, bufferAddr, &bufferSize, timeout);//执行写操作 } +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_QueueRead(UINT32 queueID, VOID *bufferAddr, UINT32 bufferSize, UINT32 timeout) { return LOS_QueueReadCopy(queueID, bufferAddr, &bufferSize, timeout); @@ -396,7 +640,10 @@ LITE_OS_SEC_TEXT UINT32 LOS_QueueWrite(UINT32 queueID, VOID *bufferAddr, UINT32 return LOS_QueueWriteCopy(queueID, &bufferAddr, bufferSize, timeout); } +<<<<<<< HEAD +======= +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_QueueWriteHead(UINT32 queueID, VOID *bufferAddr, UINT32 bufferSize, @@ -409,7 +656,10 @@ LITE_OS_SEC_TEXT UINT32 LOS_QueueWriteHead(UINT32 queueID, return LOS_QueueWriteHeadCopy(queueID, &bufferAddr, bufferSize, timeout); } +<<<<<<< HEAD +======= +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_INIT UINT32 LOS_QueueDelete(UINT32 queueID) { LosQueueCB *queueCB = NULL; @@ -422,33 +672,65 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_QueueDelete(UINT32 queueID) } SCHEDULER_LOCK(intSave); +<<<<<<< HEAD + queueCB = (LosQueueCB *)GET_QUEUE_HANDLE(queueID); +======= queueCB = (LosQueueCB *)GET_QUEUE_HANDLE(queueID);//拿到队列实体 +>>>>>>> remotes/origin/main if ((queueCB->queueID != queueID) || (queueCB->queueState == OS_QUEUE_UNUSED)) { ret = LOS_ERRNO_QUEUE_NOT_CREATE; goto QUEUE_END; } +<<<<<<< HEAD + if (!LOS_ListEmpty(&queueCB->readWriteList[OS_QUEUE_READ])) { +======= if (!LOS_ListEmpty(&queueCB->readWriteList[OS_QUEUE_READ])) {//尚有任务要读数据 +>>>>>>> remotes/origin/main ret = LOS_ERRNO_QUEUE_IN_TSKUSE; goto QUEUE_END; } +<<<<<<< HEAD + if (!LOS_ListEmpty(&queueCB->readWriteList[OS_QUEUE_WRITE])) { +======= if (!LOS_ListEmpty(&queueCB->readWriteList[OS_QUEUE_WRITE])) {//尚有任务要写数据 +>>>>>>> remotes/origin/main ret = LOS_ERRNO_QUEUE_IN_TSKUSE; goto QUEUE_END; } +<<<<<<< HEAD + if (!LOS_ListEmpty(&queueCB->memList)) { +======= if (!LOS_ListEmpty(&queueCB->memList)) {// +>>>>>>> remotes/origin/main ret = LOS_ERRNO_QUEUE_IN_TSKUSE; goto QUEUE_END; } if ((queueCB->readWriteableCnt[OS_QUEUE_WRITE] + queueCB->readWriteableCnt[OS_QUEUE_READ]) != +<<<<<<< HEAD + queueCB->queueLen) { +======= queueCB->queueLen) {//读写队列的内容长度不等于总长度 +>>>>>>> remotes/origin/main ret = LOS_ERRNO_QUEUE_IN_TSKWRITE; goto QUEUE_END; } +<<<<<<< HEAD + queue = queueCB->queueHandle; + queueCB->queueHandle = NULL; + queueCB->queueState = OS_QUEUE_UNUSED; + queueCB->queueID = SET_QUEUE_ID(GET_QUEUE_COUNT(queueCB->queueID) + 1, GET_QUEUE_INDEX(queueCB->queueID)); + OsQueueDbgUpdateHook(queueCB->queueID, NULL); + + LOS_ListTailInsert(&FREE_QUEUE_LIST, &queueCB->readWriteList[OS_QUEUE_WRITE]); + SCHEDULER_UNLOCK(intSave); + OsHookCall(LOS_HOOK_TYPE_QUEUE_DELETE, queueCB); + ret = LOS_MemFree(m_aucSysMem1, (VOID *)queue); +======= queue = queueCB->queueHandle; //队列buf queueCB->queueHandle = NULL; // queueCB->queueState = OS_QUEUE_UNUSED;//重置队列状态 @@ -459,13 +741,18 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_QueueDelete(UINT32 queueID) SCHEDULER_UNLOCK(intSave); OsHookCall(LOS_HOOK_TYPE_QUEUE_DELETE, queueCB); ret = LOS_MemFree(m_aucSysMem1, (VOID *)queue);//释放队列句柄 +>>>>>>> remotes/origin/main return ret; QUEUE_END: SCHEDULER_UNLOCK(intSave); return ret; } +<<<<<<< HEAD + +======= ///外部接口, 获取队列信息,用queueInfo 把 LosQueueCB数据接走,QUEUE_INFO_S对内部数据的封装 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR UINT32 LOS_QueueInfoGet(UINT32 queueID, QUEUE_INFO_S *queueInfo) { UINT32 intSave; @@ -477,6 +764,16 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_QueueInfoGet(UINT32 queueID, QUEUE_INFO_S *que return LOS_ERRNO_QUEUE_PTR_NULL; } +<<<<<<< HEAD + if (GET_QUEUE_INDEX(queueID) >= LOSCFG_BASE_IPC_QUEUE_LIMIT) { + return LOS_ERRNO_QUEUE_INVALID; + } + + (VOID)memset_s((VOID *)queueInfo, sizeof(QUEUE_INFO_S), 0, sizeof(QUEUE_INFO_S)); + SCHEDULER_LOCK(intSave); + + queueCB = (LosQueueCB *)GET_QUEUE_HANDLE(queueID); +======= if (GET_QUEUE_INDEX(queueID) >= LOSCFG_BASE_IPC_QUEUE_LIMIT) {//1024 return LOS_ERRNO_QUEUE_INVALID; } @@ -485,6 +782,7 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_QueueInfoGet(UINT32 queueID, QUEUE_INFO_S *que SCHEDULER_LOCK(intSave); queueCB = (LosQueueCB *)GET_QUEUE_HANDLE(queueID);//通过队列ID获取 QCB +>>>>>>> remotes/origin/main if ((queueCB->queueID != queueID) || (queueCB->queueState == OS_QUEUE_UNUSED)) { ret = LOS_ERRNO_QUEUE_NOT_CREATE; goto QUEUE_END; @@ -495,6 +793,21 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_QueueInfoGet(UINT32 queueID, QUEUE_INFO_S *que queueInfo->usQueueSize = queueCB->queueSize; queueInfo->usQueueHead = queueCB->queueHead; queueInfo->usQueueTail = queueCB->queueTail; +<<<<<<< HEAD + queueInfo->usReadableCnt = queueCB->readWriteableCnt[OS_QUEUE_READ]; + queueInfo->usWritableCnt = queueCB->readWriteableCnt[OS_QUEUE_WRITE]; + + LOS_DL_LIST_FOR_EACH_ENTRY(tskCB, &queueCB->readWriteList[OS_QUEUE_READ], LosTaskCB, pendList) { + queueInfo->uwWaitReadTask |= 1ULL << tskCB->taskID; + } + + LOS_DL_LIST_FOR_EACH_ENTRY(tskCB, &queueCB->readWriteList[OS_QUEUE_WRITE], LosTaskCB, pendList) { + queueInfo->uwWaitWriteTask |= 1ULL << tskCB->taskID; + } + + LOS_DL_LIST_FOR_EACH_ENTRY(tskCB, &queueCB->memList, LosTaskCB, pendList) { + queueInfo->uwWaitMemTask |= 1ULL << tskCB->taskID; +======= queueInfo->usReadableCnt = queueCB->readWriteableCnt[OS_QUEUE_READ];//可读数 queueInfo->usWritableCnt = queueCB->readWriteableCnt[OS_QUEUE_WRITE];//可写数 @@ -508,6 +821,7 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_QueueInfoGet(UINT32 queueID, QUEUE_INFO_S *que LOS_DL_LIST_FOR_EACH_ENTRY(tskCB, &queueCB->memList, LosTaskCB, pendList) {//同上 queueInfo->uwWaitMemTask |= 1ULL << tskCB->taskID; //MailBox模块使用 +>>>>>>> remotes/origin/main } QUEUE_END: @@ -515,5 +829,9 @@ QUEUE_END: return ret; } +<<<<<<< HEAD +#endif /* LOSCFG_BASE_IPC_QUEUE */ +======= #endif /* (LOSCFG_BASE_IPC_QUEUE == YES) */ +>>>>>>> remotes/origin/main diff --git a/src/kernel_liteos_a/kernel/base/ipc/los_rwlock.c b/src/kernel_liteos_a/kernel/base/ipc/los_rwlock.c index 5ddd3b81..21b12e7e 100644 --- a/src/kernel_liteos_a/kernel/base/ipc/los_rwlock.c +++ b/src/kernel_liteos_a/kernel/base/ipc/los_rwlock.c @@ -1,6 +1,10 @@ /* * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved. +<<<<<<< HEAD + * Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved. +======= * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved. +>>>>>>> remotes/origin/main * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: @@ -37,10 +41,16 @@ #include "los_exc.h" #include "los_sched_pri.h" +<<<<<<< HEAD +#ifdef LOSCFG_BASE_IPC_RWLOCK +#define RWLOCK_COUNT_MASK 0x00FFFFFFU + +======= #ifdef LOSCFG_BASE_IPC_RWLOCK #define RWLOCK_COUNT_MASK 0x00FFFFFFU /// 判断读写锁有效性 +>>>>>>> remotes/origin/main BOOL LOS_RwlockIsValid(const LosRwlock *rwlock) { if ((rwlock != NULL) && ((rwlock->magic & RWLOCK_COUNT_MASK) == OS_RWLOCK_MAGIC)) { @@ -49,7 +59,11 @@ BOOL LOS_RwlockIsValid(const LosRwlock *rwlock) return FALSE; } +<<<<<<< HEAD + +======= /// 创建读写锁,初始化锁信息 +>>>>>>> remotes/origin/main UINT32 LOS_RwlockInit(LosRwlock *rwlock) { UINT32 intSave; @@ -72,7 +86,11 @@ UINT32 LOS_RwlockInit(LosRwlock *rwlock) SCHEDULER_UNLOCK(intSave); return LOS_OK; } +<<<<<<< HEAD + +======= /// 删除指定的读写锁 +>>>>>>> remotes/origin/main UINT32 LOS_RwlockDestroy(LosRwlock *rwlock) { UINT32 intSave; @@ -96,18 +114,30 @@ UINT32 LOS_RwlockDestroy(LosRwlock *rwlock) SCHEDULER_UNLOCK(intSave); return LOS_OK; } +<<<<<<< HEAD + +======= /// 读写锁检查 +>>>>>>> remotes/origin/main STATIC UINT32 OsRwlockCheck(const LosRwlock *rwlock) { if (rwlock == NULL) { return LOS_EINVAL; } +<<<<<<< HEAD + if (OS_INT_ACTIVE) { + return LOS_EINTR; + } + + /* DO NOT Call blocking API in system tasks */ +======= if (OS_INT_ACTIVE) { // 读写锁不能在中断服务程序中使用。请想想为什么 ? return LOS_EINTR; } /* DO NOT Call blocking API in system tasks | 系统任务不能使用读写锁 */ +>>>>>>> remotes/origin/main LosTaskCB *runTask = (LosTaskCB *)OsCurrTaskGet(); if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) { return LOS_EPERM; @@ -115,23 +145,36 @@ STATIC UINT32 OsRwlockCheck(const LosRwlock *rwlock) return LOS_OK; } +<<<<<<< HEAD + +STATIC BOOL OsRwlockPriCompare(LosTaskCB *runTask, LOS_DL_LIST *rwList) +{ + if (!LOS_ListEmpty(rwList)) { + LosTaskCB *highestTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(rwList)); + if (OsSchedParamCompare(runTask, highestTask) < 0) { +======= /// 指定任务优先级优先级是否低于 写锁任务最高优先级 STATIC BOOL OsRwlockPriCompare(LosTaskCB *runTask, LOS_DL_LIST *rwList) { if (!LOS_ListEmpty(rwList)) { LosTaskCB *highestTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(rwList));//首个写锁任务优先级是最高的 if (OsSchedParamCompare(runTask, highestTask) < 0) {//如果当前任务优先级低于等待写锁任务 +>>>>>>> remotes/origin/main return TRUE; } return FALSE; } return TRUE; } +<<<<<<< HEAD + +======= /* 申请读模式下的锁,分三种情况: 1. 若无人持有锁,读任务可获得锁。 2. 若有人持有锁,读任务可获得锁,读取顺序按照任务优先级。 3. 若有人(非自己)持有写模式下的锁,则当前任务无法获得锁,直到写模式下的锁释放。 */ +>>>>>>> remotes/origin/main STATIC UINT32 OsRwlockRdPendOp(LosTaskCB *runTask, LosRwlock *rwlock, UINT32 timeout) { UINT32 ret; @@ -140,12 +183,21 @@ STATIC UINT32 OsRwlockRdPendOp(LosTaskCB *runTask, LosRwlock *rwlock, UINT32 tim * When the rwlock mode is read mode or free mode and the priority of the current read task * is higher than the first pended write task. current read task can obtain this rwlock. */ +<<<<<<< HEAD + if (rwlock->rwCount >= 0) { + if (OsRwlockPriCompare(runTask, &(rwlock->writeList))) { + if (rwlock->rwCount == INT8_MAX) { + return LOS_EINVAL; + } + rwlock->rwCount++; +======= if (rwlock->rwCount >= 0) {//第一和第二种情况 if (OsRwlockPriCompare(runTask, &(rwlock->writeList))) {//读优先级低于写优先级,意思就是必须先写再读 if (rwlock->rwCount == INT8_MAX) {//读锁任务达到上限 return LOS_EINVAL; } rwlock->rwCount++;//拿读锁成功 +>>>>>>> remotes/origin/main return LOS_OK; } } @@ -154,6 +206,14 @@ STATIC UINT32 OsRwlockRdPendOp(LosTaskCB *runTask, LosRwlock *rwlock, UINT32 tim return LOS_EINVAL; } +<<<<<<< HEAD + if (!OsPreemptableInSched()) { + return LOS_EDEADLK; + } + + /* The current task is not allowed to obtain the write lock when it obtains the read lock. */ + if ((LosTaskCB *)(rwlock->writeOwner) == runTask) { +======= if (!OsPreemptableInSched()) {//不可抢占时 return LOS_EDEADLK; } @@ -161,29 +221,50 @@ STATIC UINT32 OsRwlockRdPendOp(LosTaskCB *runTask, LosRwlock *rwlock, UINT32 tim /* The current task is not allowed to obtain the write lock when it obtains the read lock. | 当前任务在获得读锁时不允许获得写锁 */ if ((LosTaskCB *)(rwlock->writeOwner) == runTask) { //拥有写锁任务是否为当前任务 +>>>>>>> remotes/origin/main return LOS_EINVAL; } /* * When the rwlock mode is write mode or the priority of the current read task * is lower than the first pended write task, current read task will be pended. +<<<<<<< HEAD + */ + LOS_DL_LIST *node = OsSchedLockPendFindPos(runTask, &(rwlock->readList)); + ret = runTask->ops->wait(runTask, node, timeout); +======= | 当 rwlock 模式为写模式或当前读任务的优先级低于第一个挂起的写任务时,当前读任务将被挂起。 反正就是写锁任务优先 */ LOS_DL_LIST *node = OsSchedLockPendFindPos(runTask, &(rwlock->readList));//找到要挂入的位置 //例如现有链表内任务优先级为 0 3 8 9 23 当前为 10 时, 返回的是 9 这个节点 ret = runTask->ops->wait(runTask, node, timeout);//从尾部插入读锁链表 由此变成了 0 3 8 9 10 23 +>>>>>>> remotes/origin/main if (ret == LOS_ERRNO_TSK_TIMEOUT) { return LOS_ETIMEDOUT; } return ret; } +<<<<<<< HEAD + +======= /// 申请写模式下的锁 +>>>>>>> remotes/origin/main STATIC UINT32 OsRwlockWrPendOp(LosTaskCB *runTask, LosRwlock *rwlock, UINT32 timeout) { UINT32 ret; +<<<<<<< HEAD + /* When the rwlock is free mode, current write task can obtain this rwlock. */ + if (rwlock->rwCount == 0) { + rwlock->rwCount = -1; + rwlock->writeOwner = (VOID *)runTask; + return LOS_OK; + } + + /* Current write task can use one rwlock once again if the rwlock owner is it. */ +======= /* When the rwlock is free mode, current write task can obtain this rwlock. | 若该锁当前没有任务持有,或者持有该读模式下的锁的任务和申请该锁的任务为同一个任务,则申请成功,可立即获得写模式下的锁。*/ if (rwlock->rwCount == 0) { @@ -194,11 +275,16 @@ STATIC UINT32 OsRwlockWrPendOp(LosTaskCB *runTask, LosRwlock *rwlock, UINT32 tim /* Current write task can use one rwlock once again if the rwlock owner is it. | 如果 rwlock 拥有者是当前写入任务,则它可以再次使用该锁。*/ +>>>>>>> remotes/origin/main if ((rwlock->rwCount < 0) && ((LosTaskCB *)(rwlock->writeOwner) == runTask)) { if (rwlock->rwCount == INT8_MIN) { return LOS_EINVAL; } +<<<<<<< HEAD + rwlock->rwCount--; +======= rwlock->rwCount--;//注意再次拥有算是两把写锁了. +>>>>>>> remotes/origin/main return LOS_OK; } @@ -212,9 +298,15 @@ STATIC UINT32 OsRwlockWrPendOp(LosTaskCB *runTask, LosRwlock *rwlock, UINT32 tim /* * When the rwlock is read mode or other write task obtains this rwlock, current +<<<<<<< HEAD + * write task will be pended. + */ + LOS_DL_LIST *node = OsSchedLockPendFindPos(runTask, &(rwlock->writeList)); +======= * write task will be pended. | 当 rwlock 为读模式或其他写任务获得该 rwlock 时,当前的写任务将被挂起。直到读模式下的锁释放 */ LOS_DL_LIST *node = OsSchedLockPendFindPos(runTask, &(rwlock->writeList));//找到要挂入的位置 +>>>>>>> remotes/origin/main ret = runTask->ops->wait(runTask, node, timeout); if (ret == LOS_ERRNO_TSK_TIMEOUT) { ret = LOS_ETIMEDOUT; @@ -276,22 +368,36 @@ UINT32 OsRwlockTryWrUnsafe(LosRwlock *rwlock, UINT32 timeout) return LOS_EBADF; } +<<<<<<< HEAD + /* When the rwlock is read mode, current write task will be pended. */ +======= /* When the rwlock is read mode, current write task will be pended. | 当 rwlock 为读模式时,当前的写任务将被挂起。*/ +>>>>>>> remotes/origin/main if (rwlock->rwCount > 0) { return LOS_EBUSY; } +<<<<<<< HEAD + /* When other write task obtains this rwlock, current write task will be pended. */ +======= /* When other write task obtains this rwlock, current write task will be pended. | 当其他写任务获得这个rwlock时,当前的写任务将被挂起。*/ +>>>>>>> remotes/origin/main LosTaskCB *runTask = OsCurrTaskGet(); if ((rwlock->rwCount < 0) && ((LosTaskCB *)(rwlock->writeOwner) != runTask)) { return LOS_EBUSY; } +<<<<<<< HEAD + return OsRwlockWrPendOp(runTask, rwlock, timeout); +} + +======= return OsRwlockWrPendOp(runTask, rwlock, timeout);// } /// 申请指定的读模式下的锁 +>>>>>>> remotes/origin/main UINT32 LOS_RwlockRdLock(LosRwlock *rwlock, UINT32 timeout) { UINT32 intSave; @@ -306,7 +412,11 @@ UINT32 LOS_RwlockRdLock(LosRwlock *rwlock, UINT32 timeout) SCHEDULER_UNLOCK(intSave); return ret; } +<<<<<<< HEAD + +======= /// 尝试申请指定的读模式下的锁 +>>>>>>> remotes/origin/main UINT32 LOS_RwlockTryRdLock(LosRwlock *rwlock) { UINT32 intSave; @@ -317,11 +427,19 @@ UINT32 LOS_RwlockTryRdLock(LosRwlock *rwlock) } SCHEDULER_LOCK(intSave); +<<<<<<< HEAD + ret = OsRwlockTryRdUnsafe(rwlock, 0); + SCHEDULER_UNLOCK(intSave); + return ret; +} + +======= ret = OsRwlockTryRdUnsafe(rwlock, 0);//所谓尝试就是没锁爷就返回,不等待,不纠结.当前任务也不会被挂起 SCHEDULER_UNLOCK(intSave); return ret; } /// 申请指定的写模式下的锁 +>>>>>>> remotes/origin/main UINT32 LOS_RwlockWrLock(LosRwlock *rwlock, UINT32 timeout) { UINT32 intSave; @@ -336,7 +454,11 @@ UINT32 LOS_RwlockWrLock(LosRwlock *rwlock, UINT32 timeout) SCHEDULER_UNLOCK(intSave); return ret; } +<<<<<<< HEAD + +======= /// 尝试申请指定的写模式下的锁 +>>>>>>> remotes/origin/main UINT32 LOS_RwlockTryWrLock(LosRwlock *rwlock) { UINT32 intSave; @@ -347,15 +469,32 @@ UINT32 LOS_RwlockTryWrLock(LosRwlock *rwlock) } SCHEDULER_LOCK(intSave); +<<<<<<< HEAD + ret = OsRwlockTryWrUnsafe(rwlock, 0); + SCHEDULER_UNLOCK(intSave); + return ret; +} + +======= ret = OsRwlockTryWrUnsafe(rwlock, 0);//所谓尝试就是没锁爷就返回,不等待,不纠结.当前任务也不会被挂起 SCHEDULER_UNLOCK(intSave); return ret; } /// 获取读写锁模式 +>>>>>>> remotes/origin/main STATIC UINT32 OsRwlockGetMode(LOS_DL_LIST *readList, LOS_DL_LIST *writeList) { BOOL isReadEmpty = LOS_ListEmpty(readList); BOOL isWriteEmpty = LOS_ListEmpty(writeList); +<<<<<<< HEAD + if (isReadEmpty && isWriteEmpty) { + return RWLOCK_NONE_MODE; + } + if (!isReadEmpty && isWriteEmpty) { + return RWLOCK_READ_MODE; + } + if (isReadEmpty && !isWriteEmpty) { +======= if (isReadEmpty && isWriteEmpty) { //读写链表都没有内容 return RWLOCK_NONE_MODE; //自由模式 } @@ -363,16 +502,25 @@ STATIC UINT32 OsRwlockGetMode(LOS_DL_LIST *readList, LOS_DL_LIST *writeList) return RWLOCK_READ_MODE; } if (isReadEmpty && !isWriteEmpty) { //写链表有数据,读链表没有数据 +>>>>>>> remotes/origin/main return RWLOCK_WRITE_MODE; } LosTaskCB *pendedReadTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(readList)); LosTaskCB *pendedWriteTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(writeList)); if (OsSchedParamCompare(pendedWriteTask, pendedReadTask) <= 0) { +<<<<<<< HEAD + return RWLOCK_WRITEFIRST_MODE; + } + return RWLOCK_READFIRST_MODE; +} + +======= return RWLOCK_WRITEFIRST_MODE; //写的优先级高时,为写优先模式 } return RWLOCK_READFIRST_MODE; //读的优先级高时,为读优先模式 } /// 释放锁 +>>>>>>> remotes/origin/main STATIC UINT32 OsRwlockPostOp(LosRwlock *rwlock, BOOL *needSched) { UINT32 rwlockMode; @@ -380,6 +528,17 @@ STATIC UINT32 OsRwlockPostOp(LosRwlock *rwlock, BOOL *needSched) rwlock->rwCount = 0; rwlock->writeOwner = NULL; +<<<<<<< HEAD + rwlockMode = OsRwlockGetMode(&(rwlock->readList), &(rwlock->writeList)); + if (rwlockMode == RWLOCK_NONE_MODE) { + return LOS_OK; + } + /* In this case, rwlock will wake the first pended write task. */ + if ((rwlockMode == RWLOCK_WRITE_MODE) || (rwlockMode == RWLOCK_WRITEFIRST_MODE)) { + resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(rwlock->writeList))); + rwlock->rwCount = -1; + rwlock->writeOwner = (VOID *)resumedTask; +======= rwlockMode = OsRwlockGetMode(&(rwlock->readList), &(rwlock->writeList));//先获取模式 if (rwlockMode == RWLOCK_NONE_MODE) {//自由模式则正常返回 return LOS_OK; @@ -389,6 +548,7 @@ STATIC UINT32 OsRwlockPostOp(LosRwlock *rwlock, BOOL *needSched) resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(rwlock->writeList)));//获取任务实体 rwlock->rwCount = -1;//直接干成-1,注意这里并不是 -- rwlock->writeOwner = (VOID *)resumedTask;//有锁了则唤醒等锁的任务(写模式) +>>>>>>> remotes/origin/main resumedTask->ops->wake(resumedTask); if (needSched != NULL) { *needSched = TRUE; @@ -396,30 +556,51 @@ STATIC UINT32 OsRwlockPostOp(LosRwlock *rwlock, BOOL *needSched) return LOS_OK; } +<<<<<<< HEAD + rwlock->rwCount = 1; + resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(rwlock->readList))); + resumedTask->ops->wake(resumedTask); + while (!LOS_ListEmpty(&(rwlock->readList))) { +======= rwlock->rwCount = 1; //直接干成1,因为是释放操作 resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(rwlock->readList))); resumedTask->ops->wake(resumedTask); while (!LOS_ListEmpty(&(rwlock->readList))) {//遍历读链表,目的是要唤醒其他读模式的任务(优先级得要高于pendedWriteTaskPri才行) +>>>>>>> remotes/origin/main resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(rwlock->readList))); if (rwlockMode == RWLOCK_READFIRST_MODE) { LosTaskCB *pendedWriteTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(rwlock->writeList))); if (OsSchedParamCompare(resumedTask, pendedWriteTask) >= 0) { +<<<<<<< HEAD + break; + } +======= break;//跳出循环 } +>>>>>>> remotes/origin/main } if (rwlock->rwCount == INT8_MAX) { return EINVAL; } +<<<<<<< HEAD + rwlock->rwCount++; + resumedTask->ops->wake(resumedTask); +======= rwlock->rwCount++;//读锁任务数量增加 resumedTask->ops->wake(resumedTask);//不断唤醒读锁任务,由此实现了允许多个读操作并发,因为在多核情况下resumedTask很大可能 //与当前任务并不在同一个核上运行, 此处非常有意思,点赞! @note_good +>>>>>>> remotes/origin/main } if (needSched != NULL) { *needSched = TRUE; } return LOS_OK; } +<<<<<<< HEAD + +======= /// 释放锁,唤醒任务 +>>>>>>> remotes/origin/main UINT32 OsRwlockUnlockUnsafe(LosRwlock *rwlock, BOOL *needSched) { if ((rwlock->magic & RWLOCK_COUNT_MASK) != OS_RWLOCK_MAGIC) { @@ -431,28 +612,45 @@ UINT32 OsRwlockUnlockUnsafe(LosRwlock *rwlock, BOOL *needSched) } LosTaskCB *runTask = OsCurrTaskGet(); +<<<<<<< HEAD + if ((rwlock->rwCount < 0) && ((LosTaskCB *)(rwlock->writeOwner) != runTask)) { +======= if ((rwlock->rwCount < 0) && ((LosTaskCB *)(rwlock->writeOwner) != runTask)) {//写模式时,当前任务未持有锁 +>>>>>>> remotes/origin/main return LOS_EPERM; } /* * When the rwCount of the rwlock more than 1 or less than -1, the rwlock mode will * not changed after current unlock operation, so pended tasks can not be waken. +<<<<<<< HEAD + */ + if (rwlock->rwCount > 1) { +======= | 当 rwlock 的 rwCount 大于 1 或小于 -1 时,当前解锁操作后 rwlock 模式不会改变,因此挂起的任务不能被唤醒。 */ if (rwlock->rwCount > 1) {//读模式 +>>>>>>> remotes/origin/main rwlock->rwCount--; return LOS_OK; } +<<<<<<< HEAD + if (rwlock->rwCount < -1) { +======= if (rwlock->rwCount < -1) {//写模式 +>>>>>>> remotes/origin/main rwlock->rwCount++; return LOS_OK; } return OsRwlockPostOp(rwlock, needSched); } +<<<<<<< HEAD + +======= /// 释放指定读写锁 +>>>>>>> remotes/origin/main UINT32 LOS_RwlockUnLock(LosRwlock *rwlock) { UINT32 intSave; @@ -466,9 +664,15 @@ UINT32 LOS_RwlockUnLock(LosRwlock *rwlock) SCHEDULER_LOCK(intSave); ret = OsRwlockUnlockUnsafe(rwlock, &needSched); SCHEDULER_UNLOCK(intSave); +<<<<<<< HEAD + LOS_MpSchedule(OS_MP_CPU_ALL); + if (needSched == TRUE) { + LOS_Schedule(); +======= LOS_MpSchedule(OS_MP_CPU_ALL);//设置调度CPU的方式,所有CPU参与调度 if (needSched == TRUE) {//是否需要调度 LOS_Schedule();//产生调度,切换任务执行 +>>>>>>> remotes/origin/main } return ret; } diff --git a/src/kernel_liteos_a/kernel/base/ipc/los_sem.c b/src/kernel_liteos_a/kernel/base/ipc/los_sem.c index a161a85c..bbc2f25c 100644 --- a/src/kernel_liteos_a/kernel/base/ipc/los_sem.c +++ b/src/kernel_liteos_a/kernel/base/ipc/los_sem.c @@ -1,6 +1,10 @@ /* * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved. +<<<<<<< HEAD + * Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved. +======= * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved. +>>>>>>> remotes/origin/main * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: @@ -40,18 +44,29 @@ #include "los_percpu_pri.h" #include "los_hook.h" +<<<<<<< HEAD +======= +>>>>>>> remotes/origin/main #ifdef LOSCFG_BASE_IPC_SEM #if (LOSCFG_BASE_IPC_SEM_LIMIT <= 0) #error "sem maxnum cannot be zero" #endif /* LOSCFG_BASE_IPC_SEM_LIMIT <= 0 */ +<<<<<<< HEAD +LITE_OS_SEC_DATA_INIT STATIC LOS_DL_LIST g_unusedSemList; +LITE_OS_SEC_BSS LosSemCB *g_allSem = NULL; + +/* + * Description : Initialize the semaphore doubly linked list +======= LITE_OS_SEC_DATA_INIT STATIC LOS_DL_LIST g_unusedSemList; ///< 可用的信号量列表,干嘛不用freeList? 可以看出这里是另一个人写的代码 LITE_OS_SEC_BSS LosSemCB *g_allSem = NULL; ///< 信号池,一次分配 LOSCFG_BASE_IPC_SEM_LIMIT 个信号量 /* * Description : Initialize the semaphore doubly linked list | 信号量初始化 +>>>>>>> remotes/origin/main * Return : LOS_OK on success, or error code on failure */ LITE_OS_SEC_TEXT_INIT UINT32 OsSemInit(VOID) @@ -59,18 +74,31 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsSemInit(VOID) LosSemCB *semNode = NULL; UINT32 index; +<<<<<<< HEAD + LOS_ListInit(&g_unusedSemList); + /* system resident memory, don't free */ + g_allSem = (LosSemCB *)LOS_MemAlloc(m_aucSysMem0, (LOSCFG_BASE_IPC_SEM_LIMIT * sizeof(LosSemCB))); +======= LOS_ListInit(&g_unusedSemList);//初始化链表,链表上挂未使用的信号量,用于分配信号量,鸿蒙信号量的个数是有限的,默认1024个 /* system resident memory, don't free */ g_allSem = (LosSemCB *)LOS_MemAlloc(m_aucSysMem0, (LOSCFG_BASE_IPC_SEM_LIMIT * sizeof(LosSemCB)));//分配信号池 +>>>>>>> remotes/origin/main if (g_allSem == NULL) { return LOS_ERRNO_SEM_NO_MEMORY; } for (index = 0; index < LOSCFG_BASE_IPC_SEM_LIMIT; index++) { +<<<<<<< HEAD + semNode = ((LosSemCB *)g_allSem) + index; + semNode->semID = SET_SEM_ID(0, index); + semNode->semStat = OS_SEM_UNUSED; + LOS_ListTailInsert(&g_unusedSemList, &semNode->semList); +======= semNode = ((LosSemCB *)g_allSem) + index;//拿信号控制块, 可以直接g_allSem[index]来嘛 semNode->semID = SET_SEM_ID(0, index);//保存ID semNode->semStat = OS_SEM_UNUSED;//标记未使用 LOS_ListTailInsert(&g_unusedSemList, &semNode->semList);//通过semList把 信号块挂到空闲链表上 +>>>>>>> remotes/origin/main } if (OsSemDbgInitHook() != LOS_OK) { @@ -79,7 +107,17 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsSemInit(VOID) return LOS_OK; } +<<<<<<< HEAD +/* + * Description : Create a semaphore, + * Input : count --- semaphore count, + * maxCount --- Max number of available semaphores, + * semHandle --- Index of semaphore, + * Return : LOS_OK on success ,or error code on failure + */ +======= +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_INIT UINT32 OsSemCreate(UINT16 count, UINT16 maxCount, UINT32 *semHandle) { UINT32 intSave; @@ -92,6 +130,15 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsSemCreate(UINT16 count, UINT16 maxCount, UINT32 * return LOS_ERRNO_SEM_PTR_NULL; } +<<<<<<< HEAD + if (count > maxCount) { + OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_OVERFLOW); + } + + SCHEDULER_LOCK(intSave); + + if (LOS_ListEmpty(&g_unusedSemList)) { +======= if (count > maxCount) {//信号量不能大于最大值,两参数都是外面给的 OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_OVERFLOW); } @@ -99,11 +146,25 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsSemCreate(UINT16 count, UINT16 maxCount, UINT32 * SCHEDULER_LOCK(intSave);//进入临界区,拿自旋锁 if (LOS_ListEmpty(&g_unusedSemList)) {//没有可分配的空闲信号提供 +>>>>>>> remotes/origin/main SCHEDULER_UNLOCK(intSave); OsSemInfoGetFullDataHook(); OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_ALL_BUSY); } +<<<<<<< HEAD + unusedSem = LOS_DL_LIST_FIRST(&g_unusedSemList); + LOS_ListDelete(unusedSem); + SCHEDULER_UNLOCK(intSave); + semCreated = GET_SEM_LIST(unusedSem); + semCreated->semCount = count; + semCreated->semStat = OS_SEM_USED; + semCreated->maxSemCount = maxCount; + LOS_ListInit(&semCreated->semList); + *semHandle = semCreated->semID; + OsHookCall(LOS_HOOK_TYPE_SEM_CREATE, semCreated); + OsSemDbgUpdateHook(semCreated->semID, OsCurrTaskGet()->taskEntry, count); +======= unusedSem = LOS_DL_LIST_FIRST(&g_unusedSemList);//从未使用信号量池中取首个 LOS_ListDelete(unusedSem);//从空闲链表上摘除 SCHEDULER_UNLOCK(intSave); @@ -116,22 +177,35 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsSemCreate(UINT16 count, UINT16 maxCount, UINT32 * OsHookCall(LOS_HOOK_TYPE_SEM_CREATE, semCreated); OsSemDbgUpdateHook(semCreated->semID, OsCurrTaskGet()->taskEntry, count); +>>>>>>> remotes/origin/main return LOS_OK; ERR_HANDLER: OS_RETURN_ERROR_P2(errLine, errNo); } +<<<<<<< HEAD + +======= ///对外接口 创建信号量 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_INIT UINT32 LOS_SemCreate(UINT16 count, UINT32 *semHandle) { return OsSemCreate(count, OS_SEM_COUNT_MAX, semHandle); } +<<<<<<< HEAD + +======= ///对外接口 创建二值信号量,其计数值最大为1,可以当互斥锁用 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_INIT UINT32 LOS_BinarySemCreate(UINT16 count, UINT32 *semHandle) { return OsSemCreate(count, OS_SEM_BINARY_COUNT_MAX, semHandle); } +<<<<<<< HEAD + +======= ///对外接口 删除指定的信号量,参数就是 semID +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_INIT UINT32 LOS_SemDelete(UINT32 semHandle) { UINT32 intSave; @@ -143,15 +217,33 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_SemDelete(UINT32 semHandle) OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_INVALID); } +<<<<<<< HEAD + semDeleted = GET_SEM(semHandle); + + SCHEDULER_LOCK(intSave); + + if ((semDeleted->semStat == OS_SEM_UNUSED) || (semDeleted->semID != semHandle)) { +======= semDeleted = GET_SEM(semHandle);//通过ID拿到信号量实体 SCHEDULER_LOCK(intSave); if ((semDeleted->semStat == OS_SEM_UNUSED) || (semDeleted->semID != semHandle)) {//参数判断 +>>>>>>> remotes/origin/main SCHEDULER_UNLOCK(intSave); OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_INVALID); } +<<<<<<< HEAD + if (!LOS_ListEmpty(&semDeleted->semList)) { + SCHEDULER_UNLOCK(intSave); + OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_PENDED); + } + + LOS_ListTailInsert(&g_unusedSemList, &semDeleted->semList); + semDeleted->semStat = OS_SEM_UNUSED; + semDeleted->semID = SET_SEM_ID(GET_SEM_COUNT(semDeleted->semID) + 1, GET_SEM_INDEX(semDeleted->semID)); +======= if (!LOS_ListEmpty(&semDeleted->semList)) {//当前还有任务挂在这个信号上面,当然不能删除 SCHEDULER_UNLOCK(intSave); OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_PENDED);//这个宏很有意思,里面goto到ERR_HANDLER @@ -160,6 +252,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_SemDelete(UINT32 semHandle) LOS_ListTailInsert(&g_unusedSemList, &semDeleted->semList);//通过semList从尾部插入空闲链表 semDeleted->semStat = OS_SEM_UNUSED;//状态变成了未使用 semDeleted->semID = SET_SEM_ID(GET_SEM_COUNT(semDeleted->semID) + 1, GET_SEM_INDEX(semDeleted->semID));//设置ID +>>>>>>> remotes/origin/main OsHookCall(LOS_HOOK_TYPE_SEM_DELETE, semDeleted); OsSemDbgUpdateHook(semDeleted->semID, NULL, 0); @@ -170,11 +263,19 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_SemDelete(UINT32 semHandle) ERR_HANDLER: OS_RETURN_ERROR_P2(errLine, errNo); } +<<<<<<< HEAD + +LITE_OS_SEC_TEXT UINT32 LOS_SemPend(UINT32 semHandle, UINT32 timeout) +{ + UINT32 intSave; + LosSemCB *semPended = GET_SEM(semHandle); +======= ///对外接口 申请指定的信号量,并设置超时时间 LITE_OS_SEC_TEXT UINT32 LOS_SemPend(UINT32 semHandle, UINT32 timeout) { UINT32 intSave; LosSemCB *semPended = GET_SEM(semHandle);//通过ID拿到信号体 +>>>>>>> remotes/origin/main UINT32 retErr = LOS_OK; LosTaskCB *runTask = NULL; @@ -188,7 +289,11 @@ LITE_OS_SEC_TEXT UINT32 LOS_SemPend(UINT32 semHandle, UINT32 timeout) return LOS_ERRNO_SEM_PEND_INTERR; } +<<<<<<< HEAD + runTask = OsCurrTaskGet(); +======= runTask = OsCurrTaskGet();//获取当前任务 +>>>>>>> remotes/origin/main if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) { OsBackTrace(); return LOS_ERRNO_SEM_PEND_IN_SYSTEM_TASK; @@ -200,6 +305,15 @@ LITE_OS_SEC_TEXT UINT32 LOS_SemPend(UINT32 semHandle, UINT32 timeout) retErr = LOS_ERRNO_SEM_INVALID; goto OUT; } +<<<<<<< HEAD + /* Update the operate time, no matter the actual Pend success or not */ + OsSemDbgTimeUpdateHook(semHandle); + + if (semPended->semCount > 0) { + semPended->semCount--; + OsHookCall(LOS_HOOK_TYPE_SEM_PEND, semPended, runTask, timeout); + goto OUT; +======= /* Update the operate time, no matter the actual Pend success or not */ OsSemDbgTimeUpdateHook(semHandle); @@ -208,12 +322,17 @@ LITE_OS_SEC_TEXT UINT32 LOS_SemPend(UINT32 semHandle, UINT32 timeout) semPended->semCount--;//资源少了一个 OsHookCall(LOS_HOOK_TYPE_SEM_PEND, semPended, runTask, timeout); goto OUT;//注意这里 retErr = LOS_OK ,所以返回是OK的 +>>>>>>> remotes/origin/main } else if (!timeout) { retErr = LOS_ERRNO_SEM_UNAVAILABLE; goto OUT; } +<<<<<<< HEAD + if (!OsPreemptableInSched()) { +======= if (!OsPreemptableInSched()) {//不能申请调度 (不能调度的原因是因为没有持有调度任务自旋锁) +>>>>>>> remotes/origin/main PRINT_ERR("!!!LOS_ERRNO_SEM_PEND_IN_LOCK!!!\n"); OsBackTrace(); retErr = LOS_ERRNO_SEM_PEND_IN_LOCK; @@ -223,7 +342,11 @@ LITE_OS_SEC_TEXT UINT32 LOS_SemPend(UINT32 semHandle, UINT32 timeout) OsHookCall(LOS_HOOK_TYPE_SEM_PEND, semPended, runTask, timeout); OsTaskWaitSetPendMask(OS_TASK_WAIT_SEM, semPended->semID, timeout); retErr = runTask->ops->wait(runTask, &semPended->semList, timeout); +<<<<<<< HEAD + if (retErr == LOS_ERRNO_TSK_TIMEOUT) { +======= if (retErr == LOS_ERRNO_TSK_TIMEOUT) {//注意:这里是涉及到task切换的,把自己挂起,唤醒其他task +>>>>>>> remotes/origin/main retErr = LOS_ERRNO_SEM_TIMEOUT; } @@ -231,7 +354,11 @@ OUT: SCHEDULER_UNLOCK(intSave); return retErr; } +<<<<<<< HEAD + +======= ///以不安全的方式释放指定的信号量,所谓不安全指的是不用自旋锁 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 OsSemPostUnsafe(UINT32 semHandle, BOOL *needSched) { LosTaskCB *resumedTask = NULL; @@ -243,6 +370,20 @@ LITE_OS_SEC_TEXT UINT32 OsSemPostUnsafe(UINT32 semHandle, BOOL *needSched) /* Update the operate time, no matter the actual Post success or not */ OsSemDbgTimeUpdateHook(semHandle); +<<<<<<< HEAD + if (semPosted->semCount == OS_SEM_COUNT_MAX) { + return LOS_ERRNO_SEM_OVERFLOW; + } + if (!LOS_ListEmpty(&semPosted->semList)) { + resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(semPosted->semList))); + OsTaskWakeClearPendMask(resumedTask); + resumedTask->ops->wake(resumedTask); + if (needSched != NULL) { + *needSched = TRUE; + } + } else { + semPosted->semCount++; +======= if (semPosted->semCount == OS_SEM_COUNT_MAX) {//当前信号资源不能大于最大资源量 return LOS_ERRNO_SEM_OVERFLOW; } @@ -255,11 +396,16 @@ LITE_OS_SEC_TEXT UINT32 OsSemPostUnsafe(UINT32 semHandle, BOOL *needSched) } } else {//当前没有任务挂在semList上, semPosted->semCount++;//信号资源多一个 +>>>>>>> remotes/origin/main } OsHookCall(LOS_HOOK_TYPE_SEM_POST, semPosted, resumedTask); return LOS_OK; } +<<<<<<< HEAD + +======= ///对外接口 释放指定的信号量 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINT32 LOS_SemPost(UINT32 semHandle) { UINT32 intSave; @@ -269,15 +415,29 @@ LITE_OS_SEC_TEXT UINT32 LOS_SemPost(UINT32 semHandle) if (GET_SEM_INDEX(semHandle) >= LOSCFG_BASE_IPC_SEM_LIMIT) { return LOS_ERRNO_SEM_INVALID; } +<<<<<<< HEAD + + SCHEDULER_LOCK(intSave); + ret = OsSemPostUnsafe(semHandle, &needSched); + SCHEDULER_UNLOCK(intSave); + if (needSched) { + LOS_MpSchedule(OS_MP_CPU_ALL); + LOS_Schedule(); +======= SCHEDULER_LOCK(intSave); ret = OsSemPostUnsafe(semHandle, &needSched); SCHEDULER_UNLOCK(intSave); if (needSched) {//需要调度的情况 LOS_MpSchedule(OS_MP_CPU_ALL);//向所有CPU发送调度指令 LOS_Schedule();////发起调度 +>>>>>>> remotes/origin/main } return ret; } +<<<<<<< HEAD +#endif /* LOSCFG_BASE_IPC_SEM */ +======= #endif /* (LOSCFG_BASE_IPC_SEM == YES) */ +>>>>>>> remotes/origin/main diff --git a/src/kernel_liteos_a/kernel/base/ipc/los_sem_debug.c b/src/kernel_liteos_a/kernel/base/ipc/los_sem_debug.c index 284a800d..e19a272d 100644 --- a/src/kernel_liteos_a/kernel/base/ipc/los_sem_debug.c +++ b/src/kernel_liteos_a/kernel/base/ipc/los_sem_debug.c @@ -78,11 +78,19 @@ STATIC VOID OsSemPendedTaskNamePrint(LosSemCB *semNode) #ifdef LOSCFG_DEBUG_SEMAPHORE typedef struct { +<<<<<<< HEAD + UINT16 origSemCount; /* Number of original available semaphores */ + UINT64 lastAccessTime; /* The last operation time */ + TSK_ENTRY_FUNC creator; /* The task entry who created this sem */ +} SemDebugCB; +STATIC SemDebugCB *g_semDebugArray = NULL; +======= UINT16 origSemCount; /* Number of orignal available semaphores *///原始可用信号量数 UINT64 lastAccessTime; /* The last operation time */ //最后操作时间 TSK_ENTRY_FUNC creator; /* The task entry who created this sem */ //由哪个task的入口函数创建了这个任务 } SemDebugCB; STATIC SemDebugCB *g_semDebugArray = NULL;//默认1024个SemDebugCB debug信号量池 +>>>>>>> remotes/origin/main STATIC BOOL SemCompareValue(const IpcSortParam *sortParam, UINT32 left, UINT32 right) { @@ -102,6 +110,25 @@ UINT32 OsSemDbgInit(VOID) (VOID)memset_s(g_semDebugArray, size, 0, size); return LOS_OK; } +<<<<<<< HEAD + +VOID OsSemDbgTimeUpdate(UINT32 semID) +{ + SemDebugCB *semDebug = &g_semDebugArray[GET_SEM_INDEX(semID)]; + semDebug->lastAccessTime = LOS_TickCountGet(); + return; +} + +VOID OsSemDbgUpdate(UINT32 semID, TSK_ENTRY_FUNC creator, UINT16 count) +{ + SemDebugCB *semDebug = &g_semDebugArray[GET_SEM_INDEX(semID)]; + semDebug->creator = creator; + semDebug->lastAccessTime = LOS_TickCountGet(); + semDebug->origSemCount = count; + return; +} + +======= ///更新最后访问时间 VOID OsSemDbgTimeUpdate(UINT32 semID) { @@ -119,6 +146,7 @@ VOID OsSemDbgUpdate(UINT32 semID, TSK_ENTRY_FUNC creator, UINT16 count) return; } ///按信号量访问时间排序 +>>>>>>> remotes/origin/main STATIC VOID OsSemSort(UINT32 *semIndexArray, UINT32 usedCount) { UINT32 i, intSave; @@ -296,6 +324,10 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdSemInfoGet(UINT32 argc, const CHAR **arg return ret; } +<<<<<<< HEAD +SHELLCMD_ENTRY(sem_shellcmd, CMD_TYPE_EX, "sem", 1, (CmdCallBackFunc)OsShellCmdSemInfoGet); +======= SHELLCMD_ENTRY(sem_shellcmd, CMD_TYPE_EX, "sem", 1, (CmdCallBackFunc)OsShellCmdSemInfoGet);//采用shell命令静态注册方式 +>>>>>>> remotes/origin/main #endif diff --git a/src/kernel_liteos_a/kernel/base/ipc/los_signal.c b/src/kernel_liteos_a/kernel/base/ipc/los_signal.c index 55e34cd9..73c13860 100644 --- a/src/kernel_liteos_a/kernel/base/ipc/los_signal.c +++ b/src/kernel_liteos_a/kernel/base/ipc/los_signal.c @@ -1,6 +1,10 @@ /* * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved. +<<<<<<< HEAD + * Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved. +======= * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved. +>>>>>>> remotes/origin/main * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: @@ -53,6 +57,17 @@ int raise(int sig) #define GETUNMASKSET(procmask, pendFlag) ((~(procmask)) & (sigset_t)(pendFlag)) #define UINT64_BIT_SIZE 64 +<<<<<<< HEAD +int OsSigIsMember(const sigset_t *set, int signo) +{ + int ret = LOS_NOK; + /* In musl, sig No bits 00000100 present sig No 3, but 1<< 3 = 00001000, so signo needs minus 1 */ + signo -= 1; + /* Verify the signal */ + if (GOOD_SIGNO(signo)) { + /* Check if the signal is in the set */ + ret = ((*set & SIGNO2SET((unsigned int)signo)) != 0); +======= int OsSigIsMember(const sigset_t *set, int signo) { @@ -64,6 +79,7 @@ int OsSigIsMember(const sigset_t *set, int signo) if (GOOD_SIGNO(signo)) {//有效信号判断 /* Check if the signal is in the set */ ret = ((*set & SIGNO2SET((unsigned int)signo)) != 0);//检查信号是否还在集合中 +>>>>>>> remotes/origin/main } return ret; @@ -122,6 +138,10 @@ VOID OsClearSigInfoTmpList(sig_cb *sigcb) (VOID)LOS_MemFree(m_aucSysMem0, tmpInfoNode); } } +<<<<<<< HEAD + +======= +>>>>>>> remotes/origin/main STATIC INLINE VOID OsSigWaitTaskWake(LosTaskCB *taskCB, INT32 signo) { sig_cb *sigcb = &taskCB->sig; @@ -133,14 +153,22 @@ STATIC INLINE VOID OsSigWaitTaskWake(LosTaskCB *taskCB, INT32 signo) OsSigEmptySet(&sigcb->sigwaitmask); } } +<<<<<<< HEAD + +======= ///< 唤醒被挂起的处于等待指定信号的任务 +>>>>>>> remotes/origin/main STATIC UINT32 OsPendingTaskWake(LosTaskCB *taskCB, INT32 signo) { if (!OsTaskIsPending(taskCB) || !OsProcessIsUserMode(OS_PCB_FROM_TCB(taskCB))) { return 0; } +<<<<<<< HEAD + if ((signo != SIGKILL) && (taskCB->waitFlag != OS_TASK_WAIT_SIGNAL)) { +======= if ((signo != SIGKILL) && (taskCB->waitFlag != OS_TASK_WAIT_SIGNAL)) { // @note_thinking 这个判断会不会有问题 ? +>>>>>>> remotes/origin/main return 0; } @@ -154,6 +182,18 @@ STATIC UINT32 OsPendingTaskWake(LosTaskCB *taskCB, INT32 signo) OsTaskWakeClearPendMask(taskCB); taskCB->ops->wake(taskCB); break; +<<<<<<< HEAD + case OS_TASK_WAIT_SIGNAL: + OsSigWaitTaskWake(taskCB, signo); + break; + case OS_TASK_WAIT_LITEIPC: + OsTaskWakeClearPendMask(taskCB); + taskCB->ops->wake(taskCB); + break; + case OS_TASK_WAIT_FUTEX: + OsFutexNodeDeleteFromFutexHash(&taskCB->futex, TRUE, NULL, NULL); + OsTaskWakeClearPendMask(taskCB); +======= case OS_TASK_WAIT_SIGNAL://等待普通信号 OsSigWaitTaskWake(taskCB, signo); break; @@ -164,6 +204,7 @@ STATIC UINT32 OsPendingTaskWake(LosTaskCB *taskCB, INT32 signo) case OS_TASK_WAIT_FUTEX://等待快锁信号 OsFutexNodeDeleteFromFutexHash(&taskCB->futex, TRUE, NULL, NULL);//从哈希桶中删除快锁 OsTaskWakeClearPendMask(taskCB);//重置任务的等待信息 +>>>>>>> remotes/origin/main taskCB->ops->wake(taskCB); break; default: @@ -172,7 +213,11 @@ STATIC UINT32 OsPendingTaskWake(LosTaskCB *taskCB, INT32 signo) return 0; } +<<<<<<< HEAD + +======= ///给任务(线程)发送一个信号 +>>>>>>> remotes/origin/main int OsTcbDispatch(LosTaskCB *stcb, siginfo_t *info) { bool masked = FALSE; @@ -180,6 +225,21 @@ int OsTcbDispatch(LosTaskCB *stcb, siginfo_t *info) OS_RETURN_IF_NULL(sigcb); /* If signo is 0, not send signal, just check process or pthread exist */ +<<<<<<< HEAD + if (info->si_signo == 0) { + return 0; + } + masked = (bool)OsSigIsMember(&sigcb->sigprocmask, info->si_signo); + if (masked) { + /* If signal is in wait list and mask list, need unblock it */ + if (LOS_ListEmpty(&sigcb->waitList) || + (!LOS_ListEmpty(&sigcb->waitList) && !OsSigIsMember(&sigcb->sigwaitmask, info->si_signo))) { + OsSigAddSet(&sigcb->sigPendFlag, info->si_signo); + } + } else { + /* unmasked signal actions */ + OsSigAddSet(&sigcb->sigFlag, info->si_signo); +======= if (info->si_signo == 0) {//如果信号为0,则不发送信号,只是作为检查进程和线程是否还存在. return 0; } @@ -193,6 +253,7 @@ int OsTcbDispatch(LosTaskCB *stcb, siginfo_t *info) } else {//信号没有被屏蔽的处理 /* unmasked signal actions */ OsSigAddSet(&sigcb->sigFlag, info->si_signo);//不屏蔽的信号集 +>>>>>>> remotes/origin/main } if (OsAddSigInfoToTmpList(sigcb, info) == LOS_NOK) { @@ -207,6 +268,16 @@ void OsSigMaskSwitch(LosTaskCB * const rtcb, sigset_t set) sigset_t unmaskset; rtcb->sig.sigprocmask = set; +<<<<<<< HEAD + unmaskset = GETUNMASKSET(rtcb->sig.sigprocmask, rtcb->sig.sigPendFlag); + if (unmaskset != NULL_SIGNAL_SET) { + /* pendlist do */ + rtcb->sig.sigFlag |= unmaskset; + rtcb->sig.sigPendFlag ^= unmaskset; + } +} + +======= unmaskset = GETUNMASKSET(rtcb->sig.sigprocmask, rtcb->sig.sigPendFlag);//过滤出没有被屏蔽的信号集 if (unmaskset != NULL_SIGNAL_SET) { /* pendlist do */ @@ -216,6 +287,7 @@ void OsSigMaskSwitch(LosTaskCB * const rtcb, sigset_t set) } +>>>>>>> remotes/origin/main int OsSigprocMask(int how, const sigset_t_l *setl, sigset_t_l *oldsetl) { LosTaskCB *spcb = NULL; @@ -225,11 +297,19 @@ int OsSigprocMask(int how, const sigset_t_l *setl, sigset_t_l *oldsetl) SCHEDULER_LOCK(intSave); spcb = OsCurrTaskGet(); +<<<<<<< HEAD + /* If requested, copy the old mask to user. */ + if (oldsetl != NULL) { + *(sigset_t *)oldsetl = spcb->sig.sigprocmask; + } + /* If requested, modify the current signal mask. */ +======= /* If requested, copy the old mask to user. | 如果需要,请将旧掩码复制给用户*/ if (oldsetl != NULL) { *(sigset_t *)oldsetl = spcb->sig.sigprocmask; } /* If requested, modify the current signal mask. | 如有要求,修改当前信号屏蔽*/ +>>>>>>> remotes/origin/main if (setl != NULL) { set = *(sigset_t *)setl; /* Okay, determine what we are supposed to do */ @@ -238,35 +318,68 @@ int OsSigprocMask(int how, const sigset_t_l *setl, sigset_t_l *oldsetl) * set pointed to by set as the new sigprocmask. */ case SIG_BLOCK: +<<<<<<< HEAD + spcb->sig.sigprocmask |= set; +======= spcb->sig.sigprocmask |= set;//增加信号屏蔽位 +>>>>>>> remotes/origin/main break; /* Set the intersection of the current set and the * signal set pointed to by set as the new sigprocmask. */ case SIG_UNBLOCK: +<<<<<<< HEAD + spcb->sig.sigprocmask &= ~(set); + break; + /* Set the signal set pointed to by set as the new sigprocmask. */ + case SIG_SETMASK: + spcb->sig.sigprocmask = set; +======= spcb->sig.sigprocmask &= ~(set);//解除信号屏蔽位 break; /* Set the signal set pointed to by set as the new sigprocmask. */ case SIG_SETMASK: spcb->sig.sigprocmask = set;//设置一个新的屏蔽掩码 +>>>>>>> remotes/origin/main break; default: ret = -EINVAL; break; } /* If pending mask not in sigmask, need set sigflag. */ +<<<<<<< HEAD + OsSigMaskSwitch(spcb, spcb->sig.sigprocmask); +======= OsSigMaskSwitch(spcb, spcb->sig.sigprocmask);//更新与屏蔽信号相关的变量 +>>>>>>> remotes/origin/main } SCHEDULER_UNLOCK(intSave); return ret; } +<<<<<<< HEAD + +======= ///让进程的每一个task执行参数函数 +>>>>>>> remotes/origin/main int OsSigProcessForeachChild(LosProcessCB *spcb, ForEachTaskCB handler, void *arg) { int ret; /* Visit the main thread last (if present) */ +<<<<<<< HEAD + LosTaskCB *taskCB = NULL; + LOS_DL_LIST_FOR_EACH_ENTRY(taskCB, &(spcb->threadSiblingList), LosTaskCB, threadList) { + ret = handler(taskCB, arg); + OS_RETURN_IF(ret != 0, ret); + } + return LOS_OK; +} + +static int SigProcessSignalHandler(LosTaskCB *tcb, void *arg) +{ + struct ProcessSignalInfo *info = (struct ProcessSignalInfo *)arg; +======= LosTaskCB *taskCB = NULL;//遍历进程的 threadList 链表,里面存放的都是task节点 LOS_DL_LIST_FOR_EACH_ENTRY(taskCB, &(spcb->threadSiblingList), LosTaskCB, threadList) {//遍历进程的任务列表 ret = handler(taskCB, arg);//回调参数函数 @@ -278,6 +391,7 @@ int OsSigProcessForeachChild(LosProcessCB *spcb, ForEachTaskCB handler, void *ar static int SigProcessSignalHandler(LosTaskCB *tcb, void *arg) { struct ProcessSignalInfo *info = (struct ProcessSignalInfo *)arg;//先把参数解出来 +>>>>>>> remotes/origin/main int ret; int isMember; @@ -285,6 +399,15 @@ static int SigProcessSignalHandler(LosTaskCB *tcb, void *arg) return 0; } +<<<<<<< HEAD + /* If the default tcb is not set, then set this one as default. */ + if (!info->defaultTcb) { + info->defaultTcb = tcb; + } + + isMember = OsSigIsMember(&tcb->sig.sigwaitmask, info->sigInfo->si_signo); + if (isMember && (!info->awakenedTcb)) { +======= /* If the default tcb is not setted, then set this one as default. */ if (!info->defaultTcb) {//如果没有默认发送方的任务,即默认参数任务. info->defaultTcb = tcb; @@ -292,69 +415,131 @@ static int SigProcessSignalHandler(LosTaskCB *tcb, void *arg) isMember = OsSigIsMember(&tcb->sig.sigwaitmask, info->sigInfo->si_signo);//任务是否在等待这个信号 if (isMember && (!info->awakenedTcb)) {//是在等待,并尚未向该任务时发送信号时 +>>>>>>> remotes/origin/main /* This means the task is waiting for this signal. Stop looking for it and use this tcb. * The requirement is: if more than one task in this task group is waiting for the signal, * then only one indeterminate task in the group will receive the signal. */ +<<<<<<< HEAD + ret = OsTcbDispatch(tcb, info->sigInfo); + OS_RETURN_IF(ret < 0, ret); +======= ret = OsTcbDispatch(tcb, info->sigInfo);//发送信号,注意这是给其他任务发送信号,tcb不是当前任务 OS_RETURN_IF(ret < 0, ret);//这种写法很有意思 +>>>>>>> remotes/origin/main /* set this tcb as awakenedTcb */ info->awakenedTcb = tcb; OS_RETURN_IF(info->receivedTcb != NULL, SIG_STOP_VISIT); /* Stop search */ } /* Is this signal unblocked on this thread? */ +<<<<<<< HEAD + isMember = OsSigIsMember(&tcb->sig.sigprocmask, info->sigInfo->si_signo); + if ((!isMember) && (!info->receivedTcb) && (tcb != info->awakenedTcb)) { + /* if unblockedTcb of this signal is not set, then set it. */ +======= isMember = OsSigIsMember(&tcb->sig.sigprocmask, info->sigInfo->si_signo);//任务是否屏蔽了这个信号 if ((!isMember) && (!info->receivedTcb) && (tcb != info->awakenedTcb)) {//没有屏蔽,有唤醒任务没有接收任务. /* if unblockedTcb of this signal is not setted, then set it. */ +>>>>>>> remotes/origin/main if (!info->unblockedTcb) { info->unblockedTcb = tcb; } +<<<<<<< HEAD + ret = OsTcbDispatch(tcb, info->sigInfo); + OS_RETURN_IF(ret < 0, ret); + /* set this tcb as receivedTcb */ + info->receivedTcb = tcb; +======= ret = OsTcbDispatch(tcb, info->sigInfo);//向任务发送信号 OS_RETURN_IF(ret < 0, ret); /* set this tcb as receivedTcb */ info->receivedTcb = tcb;//设置这个任务为接收任务 +>>>>>>> remotes/origin/main OS_RETURN_IF(info->awakenedTcb != NULL, SIG_STOP_VISIT); /* Stop search */ } return 0; /* Keep searching */ } +<<<<<<< HEAD + +static int SigProcessKillSigHandler(LosTaskCB *tcb, void *arg) +{ + struct ProcessSignalInfo *info = (struct ProcessSignalInfo *)arg; +======= ///进程收到 SIGKILL 信号后,通知任务tcb处理. static int SigProcessKillSigHandler(LosTaskCB *tcb, void *arg) { struct ProcessSignalInfo *info = (struct ProcessSignalInfo *)arg;//转参 +>>>>>>> remotes/origin/main return OsPendingTaskWake(tcb, info->sigInfo->si_signo); } +<<<<<<< HEAD +======= //处理信号发送 +>>>>>>> remotes/origin/main static void SigProcessLoadTcb(struct ProcessSignalInfo *info, siginfo_t *sigInfo) { LosTaskCB *tcb = NULL; +<<<<<<< HEAD + if (info->awakenedTcb == NULL && info->receivedTcb == NULL) { + if (info->unblockedTcb) { + tcb = info->unblockedTcb; + } else if (info->defaultTcb) { +======= if (info->awakenedTcb == NULL && info->receivedTcb == NULL) {//信号即没有指定接收task 也没有指定被唤醒task if (info->unblockedTcb) {//如果进程信号信息体中有阻塞task tcb = info->unblockedTcb;// } else if (info->defaultTcb) {//如果有默认的发送方task +>>>>>>> remotes/origin/main tcb = info->defaultTcb; } else { return; } /* Deliver the signal to the selected task */ +<<<<<<< HEAD + (void)OsTcbDispatch(tcb, sigInfo); + } +} + +======= (void)OsTcbDispatch(tcb, sigInfo);//向所选任务发送信号 } } ///给参数进程发送参数信号 +>>>>>>> remotes/origin/main int OsSigProcessSend(LosProcessCB *spcb, siginfo_t *sigInfo) { int ret; struct ProcessSignalInfo info = { +<<<<<<< HEAD + .sigInfo = sigInfo, + .defaultTcb = NULL, +======= .sigInfo = sigInfo, //信号内容 .defaultTcb = NULL, //以下四个值将在OsSigProcessForeachChild中根据条件完善 +>>>>>>> remotes/origin/main .unblockedTcb = NULL, .awakenedTcb = NULL, .receivedTcb = NULL }; +<<<<<<< HEAD + + if (info.sigInfo == NULL) { + return -EFAULT; + } + + /* visit all taskcb and dispatch signal */ + if (info.sigInfo->si_signo == SIGKILL) { + OsSigAddSet(&spcb->sigShare, info.sigInfo->si_signo); + (void)OsSigProcessForeachChild(spcb, SigProcessKillSigHandler, &info); + return 0; + } else { + ret = OsSigProcessForeachChild(spcb, SigProcessSignalHandler, &info); +======= //总之是要从进程中找个至少一个任务来接受这个信号,优先级 //awakenedTcb > receivedTcb > unblockedTcb > defaultTcb if (info.sigInfo == NULL){ @@ -367,20 +552,37 @@ int OsSigProcessSend(LosProcessCB *spcb, siginfo_t *sigInfo) return 0; } else { ret = OsSigProcessForeachChild(spcb, SigProcessSignalHandler, &info);//进程通知所有task处理信号 +>>>>>>> remotes/origin/main } if (ret < 0) { return ret; } +<<<<<<< HEAD + SigProcessLoadTcb(&info, sigInfo); + return 0; +} + +======= SigProcessLoadTcb(&info, sigInfo);//确保能给一个任务发送信号 return 0; } ///信号集全部清0 +>>>>>>> remotes/origin/main int OsSigEmptySet(sigset_t *set) { *set = NULL_SIGNAL_SET; return 0; } +<<<<<<< HEAD +/* Privilege process can't send to kernel and privilege process */ +static int OsSignalPermissionToCheck(const LosProcessCB *spcb) +{ + UINTPTR gid = (UINTPTR)OS_GET_PGROUP_LEADER(spcb->pgroup); + if (gid == OS_KERNEL_PROCESS_GROUP) { + return -EPERM; + } else if (gid == OS_USER_PRIVILEGE_PROCESS_GROUP) { +======= /* Privilege process can't send to kernel and privilege process */ //内核进程组和用户特权进程组无法发送 static int OsSignalPermissionToCheck(const LosProcessCB *spcb) { @@ -389,18 +591,33 @@ static int OsSignalPermissionToCheck(const LosProcessCB *spcb) if (gid == OS_KERNEL_PROCESS_GROUP) {//内核进程组 return -EPERM; } else if (gid == OS_USER_PRIVILEGE_PROCESS_GROUP) {//用户特权进程组 +>>>>>>> remotes/origin/main return -EPERM; } return 0; } +<<<<<<< HEAD + +======= ///信号分发,发送信号权限/进程组过滤. +>>>>>>> remotes/origin/main STATIC int SendSigPermissionCheck(LosProcessCB *spcb, int permission) { if (spcb == NULL) { return -ESRCH; } +<<<<<<< HEAD + if (OsProcessIsUnused(spcb)) { + return -ESRCH; + } + +#ifdef LOSCFG_SECURITY_CAPABILITY + LosProcessCB *current = OsCurrProcessGet(); + /* Kernel process always has kill permission and user process should check permission */ + if (OsProcessIsUserMode(current) && !(current->processStatus & OS_PROCESS_FLAG_EXIT)) { +======= if (OsProcessIsUnused(spcb)) {//进程是否还在使用,不一定是当前进程但必须是个有效进程 return -ESRCH; } @@ -409,6 +626,7 @@ STATIC int SendSigPermissionCheck(LosProcessCB *spcb, int permission) LosProcessCB *current = OsCurrProcessGet();//获取当前进程,检查当前进程是否有发送信号的权限. /* Kernel process always has kill permission and user process should check permission *///内核进程总是有kill权限,用户进程需要检查权限 if (OsProcessIsUserMode(current) && !(current->processStatus & OS_PROCESS_FLAG_EXIT)) {//用户进程检查能力范围 +>>>>>>> remotes/origin/main if ((current != spcb) && (!IsCapPermit(CAP_KILL)) && (current->user->userID != spcb->user->userID)) { return -EPERM; } @@ -441,7 +659,11 @@ int OsSendSigToProcess(LosProcessCB *spcb, int sig, int permission) info.si_code = SI_USER; info.si_value.sival_ptr = NULL; +<<<<<<< HEAD + return OsSigProcessSend(spcb, &info); +======= return OsSigProcessSend(spcb, &info);//给参数进程发送信号 +>>>>>>> remotes/origin/main } int OsDispatch(pid_t pid, siginfo_t *info, int permission) @@ -474,14 +696,24 @@ int OsKill(pid_t pid, int sig, int permission) return -EINVAL; } +<<<<<<< HEAD + /* Create the siginfo structure */ + info.si_signo = sig; + info.si_code = SI_USER; +======= /* Create the siginfo structure */ //创建信号结构体 info.si_signo = sig; //信号编号 info.si_code = SI_USER; //来自用户进程信号 +>>>>>>> remotes/origin/main info.si_value.sival_ptr = NULL; if (pid > 0) { /* Send the signal to the specify process */ +<<<<<<< HEAD + ret = OsDispatch(pid, &info, permission); +======= ret = OsDispatch(pid, &info, permission);//发送信号 +>>>>>>> remotes/origin/main } else if (pid == -1) { /* Send SIG to all processes */ ret = OsSendSignalToAllProcess(&info, permission); @@ -493,17 +725,29 @@ int OsKill(pid_t pid, int sig, int permission) } return ret; } +<<<<<<< HEAD + +======= ///给发送信号过程加锁 +>>>>>>> remotes/origin/main int OsKillLock(pid_t pid, int sig) { int ret; unsigned int intSave; SCHEDULER_LOCK(intSave); +<<<<<<< HEAD + ret = OsKill(pid, sig, OS_USER_KILL_PERMISSION); + SCHEDULER_UNLOCK(intSave); + return ret; +} + +======= ret = OsKill(pid, sig, OS_USER_KILL_PERMISSION);//用户权限向进程发送信号 SCHEDULER_UNLOCK(intSave); return ret; } +>>>>>>> remotes/origin/main INT32 OsTaskKillUnsafe(UINT32 taskID, INT32 signo) { siginfo_t info; @@ -522,7 +766,11 @@ INT32 OsTaskKillUnsafe(UINT32 taskID, INT32 signo) * dispatch rules. */ return OsTcbDispatch(taskCB, &info); } +<<<<<<< HEAD + +======= ///发送信号 +>>>>>>> remotes/origin/main int OsPthreadKill(UINT32 tid, int signo) { int ret; @@ -540,7 +788,11 @@ int OsPthreadKill(UINT32 tid, int signo) SCHEDULER_UNLOCK(intSave); return ret; } +<<<<<<< HEAD + +======= ///向信号集中加入signo信号 +>>>>>>> remotes/origin/main int OsSigAddSet(sigset_t *set, int signo) { /* Verify the signal */ @@ -548,6 +800,15 @@ int OsSigAddSet(sigset_t *set, int signo) return -EINVAL; } else { /* In musl, sig No bits 00000100 present sig No 3, but 1<< 3 = 00001000, so signo needs minus 1 */ +<<<<<<< HEAD + signo -= 1; + /* Add the signal to the set */ + *set |= SIGNO2SET((unsigned int)signo); + return LOS_OK; + } +} + +======= signo -= 1;// 信号范围是 [1 ~ 64 ],而保存变量位的范围是[0 ~ 63] /* Add the signal to the set */ *set |= SIGNO2SET((unsigned int)signo);//填充信号集 @@ -555,6 +816,7 @@ int OsSigAddSet(sigset_t *set, int signo) } } ///获取阻塞当前任务的信号集 +>>>>>>> remotes/origin/main int OsSigPending(sigset_t *set) { LosTaskCB *tcb = NULL; @@ -566,7 +828,11 @@ int OsSigPending(sigset_t *set) SCHEDULER_LOCK(intSave); tcb = OsCurrTaskGet(); +<<<<<<< HEAD + *set = tcb->sig.sigPendFlag; +======= *set = tcb->sig.sigPendFlag;//被阻塞的信号集 +>>>>>>> remotes/origin/main SCHEDULER_UNLOCK(intSave); return LOS_OK; } @@ -581,7 +847,11 @@ STATIC int FindFirstSetedBit(UINT64 n) for (count = 0; (count < UINT64_BIT_SIZE) && (n ^ 1ULL); n >>= 1, count++) {} return (count < UINT64_BIT_SIZE) ? count : (-1); } +<<<<<<< HEAD + +======= ///等待信号时间 +>>>>>>> remotes/origin/main int OsSigTimedWaitNoLock(sigset_t *set, siginfo_t *info, unsigned int timeout) { LosTaskCB *task = NULL; @@ -592,19 +862,32 @@ int OsSigTimedWaitNoLock(sigset_t *set, siginfo_t *info, unsigned int timeout) sigcb = &task->sig; if (sigcb->waitList.pstNext == NULL) { +<<<<<<< HEAD + LOS_ListInit(&sigcb->waitList); + } + /* If pendingflag & set > 0, should clear pending flag */ +======= LOS_ListInit(&sigcb->waitList);//初始化信号等待链表 } /* If pendingflag & set > 0, shound clear pending flag */ +>>>>>>> remotes/origin/main sigset_t clear = sigcb->sigPendFlag & *set; if (clear) { sigcb->sigPendFlag ^= clear; ret = FindFirstSetedBit((UINT64)clear) + 1; OsMoveTmpInfoToUnbInfo(sigcb, ret); } else { +<<<<<<< HEAD + OsSigAddSet(set, SIGKILL); + OsSigAddSet(set, SIGSTOP); + + sigcb->sigwaitmask |= *set; +======= OsSigAddSet(set, SIGKILL);//kill 9 14 必须要处理 OsSigAddSet(set, SIGSTOP);//终止进程的信号也必须处理 sigcb->sigwaitmask |= *set;//按位加到等待集上,也就是说sigwaitmask的信号来了都是要处理的. +>>>>>>> remotes/origin/main OsTaskWaitSetPendMask(OS_TASK_WAIT_SIGNAL, sigcb->sigwaitmask, timeout); ret = task->ops->wait(task, &sigcb->waitList, timeout); if (ret == LOS_ERRNO_TSK_TIMEOUT) { @@ -617,7 +900,11 @@ int OsSigTimedWaitNoLock(sigset_t *set, siginfo_t *info, unsigned int timeout) } return ret; } +<<<<<<< HEAD + +======= ///让当前任务等待的信号 +>>>>>>> remotes/origin/main int OsSigTimedWait(sigset_t *set, siginfo_t *info, unsigned int timeout) { int ret; @@ -625,12 +912,20 @@ int OsSigTimedWait(sigset_t *set, siginfo_t *info, unsigned int timeout) SCHEDULER_LOCK(intSave); +<<<<<<< HEAD + ret = OsSigTimedWaitNoLock(set, info, timeout); +======= ret = OsSigTimedWaitNoLock(set, info, timeout);//以不加锁的方式等待 +>>>>>>> remotes/origin/main SCHEDULER_UNLOCK(intSave); return ret; } +<<<<<<< HEAD + +======= ///通过信号挂起当前任务 +>>>>>>> remotes/origin/main int OsPause(void) { LosTaskCB *spcb = NULL; @@ -640,7 +935,11 @@ int OsPause(void) oldSigprocmask = spcb->sig.sigprocmask; return OsSigSuspend(&oldSigprocmask); } +<<<<<<< HEAD + +======= ///用参数set代替进程的原有掩码,并暂停进程执行,直到收到信号再恢复原有掩码并继续执行进程。 +>>>>>>> remotes/origin/main int OsSigSuspend(const sigset_t *set) { unsigned int intSave; @@ -680,7 +979,10 @@ int OsSigSuspend(const sigset_t *set) return -EINTR; } +<<<<<<< HEAD +======= +>>>>>>> remotes/origin/main int OsSigAction(int sig, const sigaction_t *act, sigaction_t *oact) { UINTPTR addr; @@ -689,6 +991,16 @@ int OsSigAction(int sig, const sigaction_t *act, sigaction_t *oact) if (!GOOD_SIGNO(sig) || sig < 1 || act == NULL) { return -EINVAL; } +<<<<<<< HEAD + if (LOS_ArchCopyFromUser(&action, act, sizeof(sigaction_t)) != LOS_OK) { + return -EFAULT; + } + + if (sig == SIGSYS) { + addr = OsGetSigHandler(); + if (addr == 0) { + OsSetSigHandler((unsigned long)(UINTPTR)action.sa_handler); +======= //将数据从用户空间拷贝到内核空间 if (LOS_ArchCopyFromUser(&action, act, sizeof(sigaction_t)) != LOS_OK) { return -EFAULT; @@ -700,6 +1012,7 @@ int OsSigAction(int sig, const sigaction_t *act, sigaction_t *oact) OsSetSigHandler((unsigned long)(UINTPTR)action.sa_handler);//设置进程信号处理函数 //void (*sa_handler)(int); //信号处理函数——普通版 //void (*sa_sigaction)(int, siginfo_t *, void *);//信号处理函数——高级版 +>>>>>>> remotes/origin/main return LOS_OK; } return -EINVAL; @@ -724,11 +1037,18 @@ VOID OsSigIntUnlock(VOID) (VOID)LOS_AtomicSub((Atomic *)&sigcb->sigIntLock, 1); } +<<<<<<< HEAD +======= +>>>>>>> remotes/origin/main VOID *OsSaveSignalContext(VOID *sp, VOID *newSp) { UINTPTR sigHandler; UINT32 intSave; +<<<<<<< HEAD + +======= +>>>>>>> remotes/origin/main LosTaskCB *task = OsCurrTaskGet(); LosProcessCB *process = OsCurrProcessGet(); sig_cb *sigcb = &task->sig; @@ -761,7 +1081,11 @@ VOID *OsSaveSignalContext(VOID *sp, VOID *newSp) OsProcessExitCodeSignalSet(process, signo); sigcb->sigContext = sp; +<<<<<<< HEAD + OsInitSignalContext(sp, newSp, sigHandler, signo, sigVal); +======= OsInitSignalContext(sp, newSp, sigHandler, signo, sigVal);//初始化信号上下文 +>>>>>>> remotes/origin/main /* sig No bits 00000100 present sig No 3, but 1<< 3 = 00001000, so signo needs minus 1 */ sigcb->sigFlag ^= 1ULL << (signo - 1); @@ -774,7 +1098,10 @@ VOID *OsSaveSignalContext(VOID *sp, VOID *newSp) return sp; } +<<<<<<< HEAD +======= +>>>>>>> remotes/origin/main VOID *OsRestorSignalContext(VOID *sp) { UINT32 intSave; @@ -793,8 +1120,13 @@ VOID *OsRestorSignalContext(VOID *sp) VOID *saveContext = sigcb->sigContext; sigcb->sigContext = NULL; sigcb->count--; +<<<<<<< HEAD + process->sigShare = 0; + OsProcessExitCodeSignalClear(process); +======= process->sigShare = 0; //回到用户态,信号共享清0 OsProcessExitCodeSignalClear(process);//清空进程退出码 +>>>>>>> remotes/origin/main SCHEDULER_UNLOCK(intSave); return saveContext; } diff --git a/src/kernel_liteos_a/kernel/base/mem/common/los_memstat.c b/src/kernel_liteos_a/kernel/base/mem/common/los_memstat.c index 03815a3f..fa219606 100644 --- a/src/kernel_liteos_a/kernel/base/mem/common/los_memstat.c +++ b/src/kernel_liteos_a/kernel/base/mem/common/los_memstat.c @@ -32,9 +32,15 @@ #include "los_memstat_pri.h" #include "los_task_pri.h" +<<<<<<< HEAD + +LITE_OS_SEC_BSS_MINOR STATIC TskMemUsedInfo g_tskMemUsedInfo[LOSCFG_BASE_CORE_TSK_LIMIT]; + +======= /// 记录每个任务对内存的使用情况 LITE_OS_SEC_BSS_MINOR STATIC TskMemUsedInfo g_tskMemUsedInfo[LOSCFG_BASE_CORE_TSK_LIMIT]; /// 计算指定任务对内存使用增加量 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR VOID OsTaskMemUsedInc(UINT32 usedSize, UINT32 taskID) { if (taskID >= LOSCFG_BASE_CORE_TSK_LIMIT) { @@ -43,9 +49,15 @@ LITE_OS_SEC_TEXT_MINOR VOID OsTaskMemUsedInc(UINT32 usedSize, UINT32 taskID) if (OS_INT_ACTIVE) { return; } +<<<<<<< HEAD + g_tskMemUsedInfo[taskID].memUsed += usedSize; +} + +======= g_tskMemUsedInfo[taskID].memUsed += usedSize; ///< 叠加 } /// 计算指定任务对内存使用减少量 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR VOID OsTaskMemUsedDec(UINT32 usedSize, UINT32 taskID) { if (taskID >= LOSCFG_BASE_CORE_TSK_LIMIT) { @@ -59,9 +71,15 @@ LITE_OS_SEC_TEXT_MINOR VOID OsTaskMemUsedDec(UINT32 usedSize, UINT32 taskID) OsCurrTaskGet()->taskName, g_tskMemUsedInfo[taskID].memUsed, usedSize); return; } +<<<<<<< HEAD + g_tskMemUsedInfo[taskID].memUsed -= usedSize; +} + +======= g_tskMemUsedInfo[taskID].memUsed -= usedSize; ///< 递减 } /// 获取指定任务对内存的使用情况 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR UINT32 OsTaskMemUsage(UINT32 taskID) { if (taskID >= LOSCFG_BASE_CORE_TSK_LIMIT) { @@ -70,7 +88,11 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsTaskMemUsage(UINT32 taskID) return g_tskMemUsedInfo[taskID].memUsed; } +<<<<<<< HEAD + +======= /// 清空任务内存使用记录 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR VOID OsTaskMemClear(UINT32 taskID) { if (taskID >= LOSCFG_BASE_CORE_TSK_LIMIT) { @@ -82,8 +104,13 @@ LITE_OS_SEC_TEXT_MINOR VOID OsTaskMemClear(UINT32 taskID) } g_tskMemUsedInfo[taskID].memUsed = 0; } +<<<<<<< HEAD + +#ifdef LOS_MEM_SLAB +======= // Slab是一种内存分配器,通过将内存划分不同大小的空间分配给对象使用来进行缓存管理,应用于内核对象的缓存。 #ifdef LOS_MEM_SLAB // +>>>>>>> remotes/origin/main LITE_OS_SEC_BSS_MINOR STATIC TskSlabUsedInfo g_tskSlabUsedInfo[LOSCFG_BASE_CORE_TSK_LIMIT]; LITE_OS_SEC_TEXT_MINOR VOID OsTaskSlabUsedInc(UINT32 usedSize, UINT32 taskID) diff --git a/src/kernel_liteos_a/kernel/base/mem/membox/los_membox.c b/src/kernel_liteos_a/kernel/base/mem/membox/los_membox.c index 4180d3bd..aed57bd8 100644 --- a/src/kernel_liteos_a/kernel/base/mem/membox/los_membox.c +++ b/src/kernel_liteos_a/kernel/base/mem/membox/los_membox.c @@ -16,6 +16,8 @@ * to endorse or promote products derived from this software without specific prior written * permission. * +<<<<<<< HEAD +======= * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CON/* * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved. @@ -34,6 +36,7 @@ * to endorse or promote products derived from this software without specific prior written * permission. * +>>>>>>> remotes/origin/main * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR @@ -51,6 +54,8 @@ #include "los_hwi.h" #include "los_spinlock.h" +<<<<<<< HEAD +======= #ifdef LOSCFG_AARCH64 #define OS_MEMBOX_MAGIC 0xa55a5aa5a55a5aa5 #else @@ -256,6 +261,7 @@ TRIBUTORS #include "los_hwi.h" #include "los_spinlock.h" +>>>>>>> remotes/origin/main #ifdef LOSCFG_AARCH64 #define OS_MEMBOX_MAGIC 0xa55a5aa5a55a5aa5 diff --git a/src/kernel_liteos_a/kernel/base/mem/tlsf/los_memory.c b/src/kernel_liteos_a/kernel/base/mem/tlsf/los_memory.c index 5086ca7b..ef8e8856 100644 --- a/src/kernel_liteos_a/kernel/base/mem/tlsf/los_memory.c +++ b/src/kernel_liteos_a/kernel/base/mem/tlsf/los_memory.c @@ -43,7 +43,11 @@ #include "los_lms_pri.h" #endif +<<<<<<< HEAD +/* Used to cut non-essential functions. */ +======= /* Used to cut non-essential functions. | 用于削减非必要功能 */ +>>>>>>> remotes/origin/main #define OS_MEM_FREE_BY_TASKID 0 #ifdef LOSCFG_KERNEL_VM #define OS_MEM_EXPAND_ENABLE 1 @@ -56,16 +60,42 @@ /* column num of the output info of mem node */ #define OS_MEM_COLUMN_NUM 8 +<<<<<<< HEAD +UINT8 *m_aucSysMem0 = NULL; +UINT8 *m_aucSysMem1 = NULL; + +#ifdef LOSCFG_MEM_MUL_POOL +VOID *g_poolHead = NULL; +======= UINT8 *m_aucSysMem0 = NULL; ///< 异常交互动态内存池地址的起始地址,当不支持异常交互特性时,m_aucSysMem0等于m_aucSysMem1。 UINT8 *m_aucSysMem1 = NULL; ///< (内核态)系统动态内存池地址的起始地址 #ifdef LOSCFG_MEM_MUL_POOL VOID *g_poolHead = NULL; ///内存池头,由它牵引多个内存池 +>>>>>>> remotes/origin/main #endif /* The following is the macro definition and interface implementation related to the TLSF. */ /* Supposing a Second Level Index: SLI = 3. */ +<<<<<<< HEAD +#define OS_MEM_SLI 3 +/* Giving 1 free list for each small bucket: 4, 8, 12, up to 124. */ +#define OS_MEM_SMALL_BUCKET_COUNT 31 +#define OS_MEM_SMALL_BUCKET_MAX_SIZE 128 +/* Giving OS_MEM_FREE_LIST_NUM free lists for each large bucket. */ +#define OS_MEM_LARGE_BUCKET_COUNT 24 +#define OS_MEM_FREE_LIST_NUM (1 << OS_MEM_SLI) +/* OS_MEM_SMALL_BUCKET_MAX_SIZE to the power of 2 is 7. */ +#define OS_MEM_LARGE_START_BUCKET 7 + +/* The count of free list. */ +#define OS_MEM_FREE_LIST_COUNT (OS_MEM_SMALL_BUCKET_COUNT + (OS_MEM_LARGE_BUCKET_COUNT << OS_MEM_SLI)) +/* The bitmap is used to indicate whether the free list is empty, 1: not empty, 0: empty. */ +#define OS_MEM_BITMAP_WORDS ((OS_MEM_FREE_LIST_COUNT >> 5) + 1) + +#define OS_MEM_BITMAP_MASK 0x1FU +======= #define OS_MEM_SLI 3 ///< 二级小区间级数, /* Giving 1 free list for each small bucket: 4, 8, 12, up to 124. */ #define OS_MEM_SMALL_BUCKET_COUNT 31 ///< 小桶的偏移单位 从 4 ~ 124 ,共32级 @@ -82,6 +112,7 @@ VOID *g_poolHead = NULL; ///内存池头,由它牵引多个内存池 #define OS_MEM_BITMAP_WORDS ((OS_MEM_FREE_LIST_COUNT >> 5) + 1) ///< 224 >> 5 + 1 = 7 ,为什么要右移 5 因为 2^5 = 32 是一个32位整型的大小 ///< 而 32 * 7 = 224 ,也就是说用 int[7]当位图就能表示完 224个链表 ,此处,一定要理解好,因为这是理解 TLSF 算法的关键. #define OS_MEM_BITMAP_MASK 0x1FU ///< 因为一个int型为 32位, 2^5 = 32,所以此处 0x1FU = 5个1 足以. +>>>>>>> remotes/origin/main /* Used to find the first bit of 1 in bitmap. */ STATIC INLINE UINT16 OsMemFFS(UINT32 bitmap) @@ -101,7 +132,11 @@ STATIC INLINE UINT32 OsMemLog2(UINT32 size) return OsMemFLS(size); } +<<<<<<< HEAD +/* Get the first level: f = log2(size). */ +======= /* Get the first level: f = log2(size). | 获取第一级*/ +>>>>>>> remotes/origin/main STATIC INLINE UINT32 OsMemFlGet(UINT32 size) { if (size < OS_MEM_SMALL_BUCKET_MAX_SIZE) { @@ -110,13 +145,62 @@ STATIC INLINE UINT32 OsMemFlGet(UINT32 size) return OsMemLog2(size); } +<<<<<<< HEAD +/* Get the second level: s = (size - 2^f) * 2^SLI / 2^f. */ +======= /* Get the second level: s = (size - 2^f) * 2^SLI / 2^f. | 获取第二级 */ +>>>>>>> remotes/origin/main STATIC INLINE UINT32 OsMemSlGet(UINT32 size, UINT32 fl) { return (((size << OS_MEM_SLI) >> fl) - OS_MEM_FREE_LIST_NUM); } /* The following is the memory algorithm related macro definition and interface implementation. */ +<<<<<<< HEAD + +struct OsMemNodeHead { + UINT32 magic; + union { + struct OsMemNodeHead *prev; /* The prev is used for current node points to the previous node */ + struct OsMemNodeHead *next; /* The next is used for last node points to the expand node */ + } ptr; +#ifdef LOSCFG_MEM_LEAKCHECK + UINTPTR linkReg[LOS_RECORD_LR_CNT]; +#endif + UINT32 sizeAndFlag; +}; + +struct OsMemUsedNodeHead { + struct OsMemNodeHead header; +#if OS_MEM_FREE_BY_TASKID + UINT32 taskID; +#endif +}; + +struct OsMemFreeNodeHead { + struct OsMemNodeHead header; + struct OsMemFreeNodeHead *prev; + struct OsMemFreeNodeHead *next; +}; + +struct OsMemPoolInfo { + VOID *pool; + UINT32 totalSize; + UINT32 attr; +#ifdef LOSCFG_MEM_WATERLINE + UINT32 waterLine; /* Maximum usage size in a memory pool */ + UINT32 curUsedSize; /* Current usage size in a memory pool */ +#endif +}; + +struct OsMemPoolHead { + struct OsMemPoolInfo info; + UINT32 freeListBitmap[OS_MEM_BITMAP_WORDS]; + struct OsMemFreeNodeHead *freeList[OS_MEM_FREE_LIST_COUNT]; + SPIN_LOCK_S spinlock; +#ifdef LOSCFG_MEM_MUL_POOL + VOID *nextPool; +======= /// 内存池节点 struct OsMemNodeHead { UINT32 magic; ///< 魔法数字 0xABCDDCBA @@ -160,6 +244,7 @@ struct OsMemPoolHead { SPIN_LOCK_S spinlock; ///< 操作本池的自旋锁,涉及CPU多核竞争,所以必须得是自旋锁 #ifdef LOSCFG_MEM_MUL_POOL VOID *nextPool; ///< 指向下一个内存池 OsMemPoolHead 类型 +>>>>>>> remotes/origin/main #endif }; @@ -168,6 +253,18 @@ struct OsMemPoolHead { #define MEM_UNLOCK(pool, state) LOS_SpinUnlockRestore(&(pool)->spinlock, (state)) /* The memory pool support expand. */ +<<<<<<< HEAD +#define OS_MEM_POOL_EXPAND_ENABLE 0x01 +/* The memory pool support no lock. */ +#define OS_MEM_POOL_LOCK_ENABLE 0x02 + +#define OS_MEM_NODE_MAGIC 0xABCDDCBA +#define OS_MEM_MIN_ALLOC_SIZE (sizeof(struct OsMemFreeNodeHead) - sizeof(struct OsMemUsedNodeHead)) + +#define OS_MEM_NODE_USED_FLAG 0x80000000U +#define OS_MEM_NODE_ALIGNED_FLAG 0x40000000U +#define OS_MEM_NODE_LAST_FLAG 0x20000000U /* Sentinel Node */ +======= #define OS_MEM_POOL_EXPAND_ENABLE 0x01 ///< 支持扩展 /* The memory pool support no lock. */ #define OS_MEM_POOL_LOCK_ENABLE 0x02 ///< 加锁 @@ -178,6 +275,7 @@ struct OsMemPoolHead { #define OS_MEM_NODE_USED_FLAG 0x80000000U ///< 已使用标签 #define OS_MEM_NODE_ALIGNED_FLAG 0x40000000U ///< 对齐标签 #define OS_MEM_NODE_LAST_FLAG 0x20000000U /* Sentinel Node | 哨兵节点标签*/ +>>>>>>> remotes/origin/main #define OS_MEM_NODE_ALIGNED_AND_USED_FLAG (OS_MEM_NODE_USED_FLAG | OS_MEM_NODE_ALIGNED_FLAG | OS_MEM_NODE_LAST_FLAG) #define OS_MEM_NODE_GET_ALIGNED_FLAG(sizeAndFlag) \ @@ -226,33 +324,56 @@ STATIC INLINE UINT32 OsMemAllocCheck(struct OsMemPoolHead *pool, UINT32 intSave) #if OS_MEM_FREE_BY_TASKID STATIC INLINE VOID OsMemNodeSetTaskID(struct OsMemUsedNodeHead *node) { +<<<<<<< HEAD + node->taskID = LOS_CurTaskIDGet(); +======= node->taskID = LOS_CurTaskIDGet();//将当前任务ID绑定到内存池节点上 +>>>>>>> remotes/origin/main } #endif #ifdef LOSCFG_MEM_WATERLINE STATIC INLINE VOID OsMemWaterUsedRecord(struct OsMemPoolHead *pool, UINT32 size) { +<<<<<<< HEAD + pool->info.curUsedSize += size; + if (pool->info.curUsedSize > pool->info.waterLine) { + pool->info.waterLine = pool->info.curUsedSize; +======= pool->info.curUsedSize += size; //延长可使用空间 if (pool->info.curUsedSize > pool->info.waterLine) { pool->info.waterLine = pool->info.curUsedSize; //警戒线加高 +>>>>>>> remotes/origin/main } } #else STATIC INLINE VOID OsMemWaterUsedRecord(struct OsMemPoolHead *pool, UINT32 size) { +<<<<<<< HEAD + (VOID)pool; + (VOID)size; +======= (VOID)pool; (VOID)size; +>>>>>>> remotes/origin/main } #endif #if OS_MEM_EXPAND_ENABLE +<<<<<<< HEAD +STATIC INLINE struct OsMemNodeHead *OsMemLastSentinelNodeGet(const struct OsMemNodeHead *sentinelNode) +{ + struct OsMemNodeHead *node = NULL; + VOID *ptr = sentinelNode->ptr.next; + UINT32 size = OS_MEM_NODE_GET_SIZE(sentinelNode->sizeAndFlag); +======= /// 更新哨兵节点内容 STATIC INLINE struct OsMemNodeHead *OsMemLastSentinelNodeGet(const struct OsMemNodeHead *sentinelNode) { struct OsMemNodeHead *node = NULL; VOID *ptr = sentinelNode->ptr.next;//返回不连续的内存块 UINT32 size = OS_MEM_NODE_GET_SIZE(sentinelNode->sizeAndFlag); // 获取大小 +>>>>>>> remotes/origin/main while ((ptr != NULL) && (size != 0)) { node = OS_MEM_END_NODE(ptr, size); @@ -262,7 +383,11 @@ STATIC INLINE struct OsMemNodeHead *OsMemLastSentinelNodeGet(const struct OsMemN return node; } +<<<<<<< HEAD + +======= /// 检查哨兵节点 +>>>>>>> remotes/origin/main STATIC INLINE BOOL OsMemSentinelNodeCheck(struct OsMemNodeHead *sentinelNode) { if (!OS_MEM_NODE_GET_USED_FLAG(sentinelNode->sizeAndFlag)) { @@ -275,7 +400,11 @@ STATIC INLINE BOOL OsMemSentinelNodeCheck(struct OsMemNodeHead *sentinelNode) return TRUE; } +<<<<<<< HEAD + +======= /// 是否为最后一个哨兵节点 +>>>>>>> remotes/origin/main STATIC INLINE BOOL OsMemIsLastSentinelNode(struct OsMemNodeHead *sentinelNode) { if (OsMemSentinelNodeCheck(sentinelNode) == FALSE) { @@ -290,11 +419,19 @@ STATIC INLINE BOOL OsMemIsLastSentinelNode(struct OsMemNodeHead *sentinelNode) return FALSE; } +<<<<<<< HEAD + +STATIC INLINE VOID OsMemSentinelNodeSet(struct OsMemNodeHead *sentinelNode, VOID *newNode, UINT32 size) +{ + if (sentinelNode->ptr.next != NULL) { + sentinelNode = OsMemLastSentinelNodeGet(sentinelNode); +======= /// 设置哨兵节点内容 STATIC INLINE VOID OsMemSentinelNodeSet(struct OsMemNodeHead *sentinelNode, VOID *newNode, UINT32 size) { if (sentinelNode->ptr.next != NULL) { //哨兵节点有 逻辑地址不连续的衔接内存块 sentinelNode = OsMemLastSentinelNodeGet(sentinelNode);//更新哨兵节点内容 +>>>>>>> remotes/origin/main } sentinelNode->sizeAndFlag = size; @@ -330,6 +467,16 @@ STATIC INLINE struct OsMemNodeHead *PreSentinelNodeGet(const VOID *pool, const s return NULL; } +<<<<<<< HEAD + +UINT32 OsMemLargeNodeFree(const VOID *ptr) +{ + LosVmPage *page = OsVmVaddrToPage((VOID *)ptr); + if ((page == NULL) || (page->nPages == 0)) { + return LOS_NOK; + } + LOS_PhysPagesFreeContiguous((VOID *)ptr, page->nPages); +======= /// 大内存释放 UINT32 OsMemLargeNodeFree(const VOID *ptr) { @@ -338,6 +485,7 @@ UINT32 OsMemLargeNodeFree(const VOID *ptr) return LOS_NOK; } LOS_PhysPagesFreeContiguous((VOID *)ptr, page->nPages);//释放连续的几个物理页 +>>>>>>> remotes/origin/main return LOS_OK; } @@ -376,7 +524,11 @@ STATIC INLINE BOOL TryShrinkPool(const VOID *pool, const struct OsMemNodeHead *n #endif return TRUE; } +<<<<<<< HEAD + +======= /// 内存池扩展实现 +>>>>>>> remotes/origin/main STATIC INLINE INT32 OsMemPoolExpandSub(VOID *pool, UINT32 size, UINT32 intSave) { UINT32 tryCount = MAX_SHRINK_PAGECACHE_TRY; @@ -384,11 +536,19 @@ STATIC INLINE INT32 OsMemPoolExpandSub(VOID *pool, UINT32 size, UINT32 intSave) struct OsMemNodeHead *newNode = NULL; struct OsMemNodeHead *endNode = NULL; +<<<<<<< HEAD + size = ROUNDUP(size + OS_MEM_NODE_HEAD_SIZE, PAGE_SIZE); + endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize); + +RETRY: + newNode = (struct OsMemNodeHead *)LOS_PhysPagesAllocContiguous(size >> PAGE_SHIFT); +======= size = ROUNDUP(size + OS_MEM_NODE_HEAD_SIZE, PAGE_SIZE);//圆整 endNode = OS_MEM_END_NODE(pool, poolInfo->info.totalSize);//获取哨兵节点 RETRY: newNode = (struct OsMemNodeHead *)LOS_PhysPagesAllocContiguous(size >> PAGE_SHIFT);//申请新的内存池 | 物理内存 +>>>>>>> remotes/origin/main if (newNode == NULL) { if (tryCount > 0) { tryCount--; @@ -412,6 +572,28 @@ RETRY: size = (resize == 0) ? size : resize; } #endif +<<<<<<< HEAD + newNode->sizeAndFlag = (size - OS_MEM_NODE_HEAD_SIZE); + newNode->ptr.prev = OS_MEM_END_NODE(newNode, size); + OsMemSentinelNodeSet(endNode, newNode, size); + OsMemFreeNodeAdd(pool, (struct OsMemFreeNodeHead *)newNode); + + endNode = OS_MEM_END_NODE(newNode, size); + (VOID)memset(endNode, 0, sizeof(*endNode)); + endNode->ptr.next = NULL; + endNode->magic = OS_MEM_NODE_MAGIC; + OsMemSentinelNodeSet(endNode, NULL, 0); + OsMemWaterUsedRecord(poolInfo, OS_MEM_NODE_HEAD_SIZE); + + return 0; +} + +STATIC INLINE INT32 OsMemPoolExpand(VOID *pool, UINT32 allocSize, UINT32 intSave) +{ + UINT32 expandDefault = MEM_EXPAND_SIZE(LOS_MemPoolSizeGet(pool)); + UINT32 expandSize = MAX(expandDefault, allocSize); + UINT32 tryCount = 1; +======= newNode->sizeAndFlag = (size - OS_MEM_NODE_HEAD_SIZE);//设置新节点大小 newNode->ptr.prev = OS_MEM_END_NODE(newNode, size);//新节点的前节点指向新节点的哨兵节点 OsMemSentinelNodeSet(endNode, newNode, size);//设置老内存池的哨兵节点信息,其实就是指向新内存块 @@ -432,6 +614,7 @@ STATIC INLINE INT32 OsMemPoolExpand(VOID *pool, UINT32 allocSize, UINT32 intSave UINT32 expandDefault = MEM_EXPAND_SIZE(LOS_MemPoolSizeGet(pool));//至少要扩展现有内存池的 1/8 大小 UINT32 expandSize = MAX(expandDefault, allocSize); UINT32 tryCount = 1;//尝试次数 +>>>>>>> remotes/origin/main UINT32 ret; do { @@ -448,7 +631,11 @@ STATIC INLINE INT32 OsMemPoolExpand(VOID *pool, UINT32 allocSize, UINT32 intSave return -1; } +<<<<<<< HEAD + +======= ///< 允许指定内存池扩展 +>>>>>>> remotes/origin/main VOID LOS_MemExpandEnable(VOID *pool) { if (pool == NULL) { @@ -487,7 +674,11 @@ STATIC INLINE VOID OsLmsAllocAlignMark(VOID *ptr, VOID *alignedPtr, UINT32 size) g_lms->simpleMark((UINTPTR)ptr + sizeof(UINT32), (UINTPTR)alignedPtr, LMS_SHADOW_REDZONE_U8); } +<<<<<<< HEAD + /* mark remaining as redzone */ +======= /* mark remining as redzone */ +>>>>>>> remotes/origin/main g_lms->simpleMark(LMS_ADDR_ALIGN((UINTPTR)alignedPtr + size), (UINTPTR)OS_MEM_NEXT_NODE(allocNode), LMS_SHADOW_REDZONE_U8); } @@ -523,7 +714,12 @@ STATIC INLINE VOID OsLmsReallocResizeMark(struct OsMemNodeHead *node, UINT32 res g_lms->simpleMark((UINTPTR)node + resize, (UINTPTR)OS_MEM_NEXT_NODE(node), LMS_SHADOW_REDZONE_U8); } #endif +<<<<<<< HEAD + +#ifdef LOSCFG_MEM_LEAKCHECK +======= #ifdef LOSCFG_MEM_LEAKCHECK //内存泄漏检查 +>>>>>>> remotes/origin/main STATIC INLINE VOID OsMemLinkRegisterRecord(struct OsMemNodeHead *node) { LOS_RecordLR(node->linkReg, LOS_RECORD_LR_CNT, LOS_RECORD_LR_CNT, LOS_OMIT_LR_CNT); @@ -549,7 +745,11 @@ STATIC INLINE VOID OsMemUsedNodePrint(struct OsMemNodeHead *node) PRINTK("\n"); } } +<<<<<<< HEAD + +======= /// 打印已使用的节点 +>>>>>>> remotes/origin/main VOID OsMemUsedNodeShow(VOID *pool) { if (pool == NULL) { @@ -626,17 +826,29 @@ STATIC VOID OsMemNodeBacktraceInfo(const struct OsMemNodeHead *tmpNode, STATIC INLINE UINT32 OsMemFreeListIndexGet(UINT32 size) { +<<<<<<< HEAD + UINT32 fl = OsMemFlGet(size); +======= UINT32 fl = OsMemFlGet(size);//获取一级位图 +>>>>>>> remotes/origin/main if (size < OS_MEM_SMALL_BUCKET_MAX_SIZE) { return fl; } +<<<<<<< HEAD + UINT32 sl = OsMemSlGet(size, fl); +======= UINT32 sl = OsMemSlGet(size, fl);//获取二级位图 +>>>>>>> remotes/origin/main return (OS_MEM_SMALL_BUCKET_COUNT + ((fl - OS_MEM_LARGE_START_BUCKET) << OS_MEM_SLI) + sl); } STATIC INLINE struct OsMemFreeNodeHead *OsMemFindCurSuitableBlock(struct OsMemPoolHead *poolHead, +<<<<<<< HEAD + UINT32 index, UINT32 size) +======= UINT32 index, UINT32 size) +>>>>>>> remotes/origin/main { struct OsMemFreeNodeHead *node = NULL; @@ -663,7 +875,11 @@ STATIC INLINE UINT32 OsMemNotEmptyIndexGet(struct OsMemPoolHead *poolHead, UINT3 return OS_MEM_FREE_LIST_COUNT; } +<<<<<<< HEAD + +======= /// 找到下一个合适的块 +>>>>>>> remotes/origin/main STATIC INLINE struct OsMemFreeNodeHead *OsMemFindNextSuitableBlock(VOID *pool, UINT32 size, UINT32 *outIndex) { struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool; @@ -730,13 +946,22 @@ STATIC INLINE VOID OsMemListAdd(struct OsMemPoolHead *pool, UINT32 listIndex, st OsMemSetFreeListBit(pool, listIndex); node->header.magic = OS_MEM_NODE_MAGIC; } +<<<<<<< HEAD + +======= /// 从空闲链表中删除 +>>>>>>> remotes/origin/main STATIC INLINE VOID OsMemListDelete(struct OsMemPoolHead *pool, UINT32 listIndex, struct OsMemFreeNodeHead *node) { if (node == pool->freeList[listIndex]) { pool->freeList[listIndex] = node->next; +<<<<<<< HEAD + if (node->next == NULL) { + OsMemClearFreeListBit(pool, listIndex); +======= if (node->next == NULL) {//如果链表空了 OsMemClearFreeListBit(pool, listIndex);//将位图位 置为 0 +>>>>>>> remotes/origin/main } else { node->next->prev = NULL; } @@ -748,27 +973,47 @@ STATIC INLINE VOID OsMemListDelete(struct OsMemPoolHead *pool, UINT32 listIndex, } node->header.magic = OS_MEM_NODE_MAGIC; } +<<<<<<< HEAD + +STATIC INLINE VOID OsMemFreeNodeAdd(VOID *pool, struct OsMemFreeNodeHead *node) +{ + UINT32 index = OsMemFreeListIndexGet(node->header.sizeAndFlag); +======= /// 添加一个空闲节点 STATIC INLINE VOID OsMemFreeNodeAdd(VOID *pool, struct OsMemFreeNodeHead *node) { UINT32 index = OsMemFreeListIndexGet(node->header.sizeAndFlag);//根据大小定位索引位 +>>>>>>> remotes/origin/main if (index >= OS_MEM_FREE_LIST_COUNT) { LOS_Panic("The index of free lists is error, index = %u\n", index); return; } +<<<<<<< HEAD + OsMemListAdd(pool, index, node); +} + +STATIC INLINE VOID OsMemFreeNodeDelete(VOID *pool, struct OsMemFreeNodeHead *node) +{ + UINT32 index = OsMemFreeListIndexGet(node->header.sizeAndFlag); +======= OsMemListAdd(pool, index, node);//挂入链表 } /// 从空闲链表上摘除节点 STATIC INLINE VOID OsMemFreeNodeDelete(VOID *pool, struct OsMemFreeNodeHead *node) { UINT32 index = OsMemFreeListIndexGet(node->header.sizeAndFlag);//根据大小定位索引位 +>>>>>>> remotes/origin/main if (index >= OS_MEM_FREE_LIST_COUNT) { LOS_Panic("The index of free lists is error, index = %u\n", index); return; } OsMemListDelete(pool, index, node); } +<<<<<<< HEAD + +======= //获取一个空闲的节点 +>>>>>>> remotes/origin/main STATIC INLINE struct OsMemNodeHead *OsMemFreeNodeGet(VOID *pool, UINT32 size) { struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool; @@ -782,11 +1027,24 @@ STATIC INLINE struct OsMemNodeHead *OsMemFreeNodeGet(VOID *pool, UINT32 size) return &firstNode->header; } +<<<<<<< HEAD + +======= /// 合并节点,和前面的节点合并 node 消失 +>>>>>>> remotes/origin/main STATIC INLINE VOID OsMemMergeNode(struct OsMemNodeHead *node) { struct OsMemNodeHead *nextNode = NULL; +<<<<<<< HEAD + node->ptr.prev->sizeAndFlag += node->sizeAndFlag; + nextNode = (struct OsMemNodeHead *)((UINTPTR)node + node->sizeAndFlag); + if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag)) { + nextNode->ptr.prev = node->ptr.prev; + } +} + +======= node->ptr.prev->sizeAndFlag += node->sizeAndFlag; //前节点长度变长 nextNode = (struct OsMemNodeHead *)((UINTPTR)node + node->sizeAndFlag); // 下一个节点位置 if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag)) {//不是哨兵节点 @@ -794,11 +1052,39 @@ STATIC INLINE VOID OsMemMergeNode(struct OsMemNodeHead *node) } } /// 切割节点 +>>>>>>> remotes/origin/main STATIC INLINE VOID OsMemSplitNode(VOID *pool, struct OsMemNodeHead *allocNode, UINT32 allocSize) { struct OsMemFreeNodeHead *newFreeNode = NULL; struct OsMemNodeHead *nextNode = NULL; +<<<<<<< HEAD + newFreeNode = (struct OsMemFreeNodeHead *)(VOID *)((UINT8 *)allocNode + allocSize); + newFreeNode->header.ptr.prev = allocNode; + newFreeNode->header.sizeAndFlag = allocNode->sizeAndFlag - allocSize; + allocNode->sizeAndFlag = allocSize; + nextNode = OS_MEM_NEXT_NODE(&newFreeNode->header); + if (!OS_MEM_NODE_GET_LAST_FLAG(nextNode->sizeAndFlag)) { + nextNode->ptr.prev = &newFreeNode->header; + if (!OS_MEM_NODE_GET_USED_FLAG(nextNode->sizeAndFlag)) { + OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode); + OsMemMergeNode(nextNode); + } + } + + OsMemFreeNodeAdd(pool, newFreeNode); +} + +STATIC INLINE VOID *OsMemCreateUsedNode(VOID *addr) +{ + struct OsMemUsedNodeHead *node = (struct OsMemUsedNodeHead *)addr; + +#if OS_MEM_FREE_BY_TASKID + OsMemNodeSetTaskID(node); +#endif + +#ifdef LOSCFG_KERNEL_LMS +======= newFreeNode = (struct OsMemFreeNodeHead *)(VOID *)((UINT8 *)allocNode + allocSize);//切割后出现的新空闲节点,在分配节点的右侧 newFreeNode->header.ptr.prev = allocNode;//新节点指向前节点,说明是从左到右切割 newFreeNode->header.sizeAndFlag = allocNode->sizeAndFlag - allocSize;//新空闲节点大小 @@ -824,11 +1110,17 @@ STATIC INLINE VOID *OsMemCreateUsedNode(VOID *addr) #endif #ifdef LOSCFG_KERNEL_LMS //检测内存泄漏 +>>>>>>> remotes/origin/main struct OsMemNodeHead *newNode = (struct OsMemNodeHead *)node; if (g_lms != NULL) { g_lms->mallocMark(newNode, OS_MEM_NEXT_NODE(newNode), OS_MEM_NODE_HEAD_SIZE); } #endif +<<<<<<< HEAD + return node + 1; +} + +======= return node + 1; //@note_good 这个地方挺有意思的,只是将结构体扩展下,留一个 int 位 ,变成了已使用节点,返回的地址正是要分配给应用的地址 } @@ -843,6 +1135,7 @@ STATIC INLINE VOID *OsMemCreateUsedNode(VOID *addr) * * @see */ +>>>>>>> remotes/origin/main STATIC UINT32 OsMemPoolInit(VOID *pool, UINT32 size) { struct OsMemPoolHead *poolHead = (struct OsMemPoolHead *)pool; @@ -864,6 +1157,32 @@ STATIC UINT32 OsMemPoolInit(VOID *pool, UINT32 size) #endif LOS_SpinInit(&poolHead->spinlock); +<<<<<<< HEAD + poolHead->info.pool = pool; + poolHead->info.totalSize = size; + poolHead->info.attr = OS_MEM_POOL_LOCK_ENABLE; /* default attr: lock, not expand. */ + + newNode = OS_MEM_FIRST_NODE(pool); + newNode->sizeAndFlag = (size - sizeof(struct OsMemPoolHead) - OS_MEM_NODE_HEAD_SIZE); + newNode->ptr.prev = NULL; + newNode->magic = OS_MEM_NODE_MAGIC; + OsMemFreeNodeAdd(pool, (struct OsMemFreeNodeHead *)newNode); + + /* The last mem node */ + endNode = OS_MEM_END_NODE(pool, size); + endNode->magic = OS_MEM_NODE_MAGIC; +#if OS_MEM_EXPAND_ENABLE + endNode->ptr.next = NULL; + OsMemSentinelNodeSet(endNode, NULL, 0); +#else + endNode->sizeAndFlag = 0; + endNode->ptr.prev = newNode; + OS_MEM_NODE_SET_USED_FLAG(endNode->sizeAndFlag); +#endif +#ifdef LOSCFG_MEM_WATERLINE + poolHead->info.curUsedSize = sizeof(struct OsMemPoolHead) + OS_MEM_NODE_HEAD_SIZE; + poolHead->info.waterLine = poolHead->info.curUsedSize; +======= poolHead->info.pool = pool; //内存池的起始地址,但注意真正的内存并不是从此处分配,它只是用来记录这个内存块的开始位置而已. poolHead->info.totalSize = size;//内存池总大小 poolHead->info.attr = OS_MEM_POOL_LOCK_ENABLE; /* default attr: lock, not expand. | 默认是上锁,不支持扩展,需扩展得另外设置*/ @@ -890,6 +1209,7 @@ STATIC UINT32 OsMemPoolInit(VOID *pool, UINT32 size) poolHead->info.curUsedSize = sizeof(struct OsMemPoolHead) + OS_MEM_NODE_HEAD_SIZE;//内存池已使用了这么多空间,这些都是存内存池自身数据的空间, //但此处是否还要算是 endNode ? @note_thinking poolHead->info.waterLine = poolHead->info.curUsedSize; //设置吃水线 +>>>>>>> remotes/origin/main #endif #ifdef LOSCFG_KERNEL_LMS if (resize != 0) { @@ -909,13 +1229,21 @@ STATIC VOID OsMemPoolDeinit(const VOID *pool, UINT32 size) #endif (VOID)memset_s(pool, size, 0, sizeof(struct OsMemPoolHead)); } +<<<<<<< HEAD + +======= /// 新增内存池 +>>>>>>> remotes/origin/main STATIC UINT32 OsMemPoolAdd(VOID *pool, UINT32 size) { VOID *nextPool = g_poolHead; VOID *curPool = g_poolHead; UINTPTR poolEnd; +<<<<<<< HEAD + while (nextPool != NULL) { +======= while (nextPool != NULL) {//单链表遍历方式 +>>>>>>> remotes/origin/main poolEnd = (UINTPTR)nextPool + LOS_MemPoolSizeGet(nextPool); if (((pool <= nextPool) && (((UINTPTR)pool + size) > (UINTPTR)nextPool)) || (((UINTPTR)pool < poolEnd) && (((UINTPTR)pool + size) >= poolEnd))) { @@ -929,6 +1257,17 @@ STATIC UINT32 OsMemPoolAdd(VOID *pool, UINT32 size) } if (g_poolHead == NULL) { +<<<<<<< HEAD + g_poolHead = pool; + } else { + ((struct OsMemPoolHead *)curPool)->nextPool = pool; + } + + ((struct OsMemPoolHead *)pool)->nextPool = NULL; + return LOS_OK; +} + +======= g_poolHead = pool; //首个内存池 } else { ((struct OsMemPoolHead *)curPool)->nextPool = pool; //两池扯上关系 @@ -938,6 +1277,7 @@ STATIC UINT32 OsMemPoolAdd(VOID *pool, UINT32 size) return LOS_OK; } /// 删除内存池 +>>>>>>> remotes/origin/main STATIC UINT32 OsMemPoolDelete(const VOID *pool) { UINT32 ret = LOS_NOK; @@ -968,6 +1308,8 @@ STATIC UINT32 OsMemPoolDelete(const VOID *pool) } #endif +<<<<<<< HEAD +======= /*! * @brief LOS_MemInit 初始化一块指定的动态内存池,大小为size * 初始一个内存池后生成一个内存池控制头、尾节点EndNode,剩余的内存被标记为FreeNode内存节点。 @@ -977,30 +1319,46 @@ STATIC UINT32 OsMemPoolDelete(const VOID *pool) * @attention EndNode作为内存池末尾的节点,size为0。 * @see */ +>>>>>>> remotes/origin/main UINT32 LOS_MemInit(VOID *pool, UINT32 size) { if ((pool == NULL) || (size <= OS_MEM_MIN_POOL_SIZE)) { return OS_ERROR; } +<<<<<<< HEAD + size = OS_MEM_ALIGN(size, OS_MEM_ALIGN_SIZE); +======= size = OS_MEM_ALIGN(size, OS_MEM_ALIGN_SIZE);//4个字节对齐 +>>>>>>> remotes/origin/main if (OsMemPoolInit(pool, size)) { return OS_ERROR; } +<<<<<<< HEAD +#ifdef LOSCFG_MEM_MUL_POOL +======= #ifdef LOSCFG_MEM_MUL_POOL //多内存池开关 +>>>>>>> remotes/origin/main if (OsMemPoolAdd(pool, size)) { (VOID)OsMemPoolDeInit(pool, size); return OS_ERROR; } #endif +<<<<<<< HEAD + OsHookCall(LOS_HOOK_TYPE_MEM_INIT, pool, size); +======= OsHookCall(LOS_HOOK_TYPE_MEM_INIT, pool, size);//打印日志 +>>>>>>> remotes/origin/main return LOS_OK; } #ifdef LOSCFG_MEM_MUL_POOL +<<<<<<< HEAD +======= /// 删除指定内存池 +>>>>>>> remotes/origin/main UINT32 LOS_MemDeInit(VOID *pool) { struct OsMemPoolHead *tmpPool = (struct OsMemPoolHead *)pool; @@ -1020,7 +1378,11 @@ UINT32 LOS_MemDeInit(VOID *pool) OsHookCall(LOS_HOOK_TYPE_MEM_DEINIT, tmpPool); return LOS_OK; } +<<<<<<< HEAD + +======= /// 打印系统中已初始化的所有内存池,包括内存池的起始地址、内存池大小、空闲内存总大小、已使用内存总大小、最大的空闲内存块大小、空闲内存块数量、已使用的内存块数量。 +>>>>>>> remotes/origin/main UINT32 LOS_MemPoolList(VOID) { VOID *nextPool = g_poolHead; @@ -1034,7 +1396,11 @@ UINT32 LOS_MemPoolList(VOID) return index; } #endif +<<<<<<< HEAD + +======= /// 从指定动态内存池中申请size长度的内存 +>>>>>>> remotes/origin/main STATIC INLINE VOID *OsMemAlloc(struct OsMemPoolHead *pool, UINT32 size, UINT32 intSave) { struct OsMemNodeHead *allocNode = NULL; @@ -1047,6 +1413,17 @@ STATIC INLINE VOID *OsMemAlloc(struct OsMemPoolHead *pool, UINT32 size, UINT32 i UINT32 allocSize = OS_MEM_ALIGN(size + OS_MEM_NODE_HEAD_SIZE, OS_MEM_ALIGN_SIZE); #if OS_MEM_EXPAND_ENABLE +<<<<<<< HEAD +retry: +#endif + allocNode = OsMemFreeNodeGet(pool, allocSize); + if (allocNode == NULL) { +#if OS_MEM_EXPAND_ENABLE + if (pool->info.attr & OS_MEM_POOL_EXPAND_ENABLE) { + INT32 ret = OsMemPoolExpand(pool, allocSize, intSave); + if (ret == 0) { + goto retry; +======= retry: //这种写法也挺赞的 @note_good #endif allocNode = OsMemFreeNodeGet(pool, allocSize);//获取空闲节点 @@ -1056,6 +1433,7 @@ retry: //这种写法也挺赞的 @note_good INT32 ret = OsMemPoolExpand(pool, allocSize, intSave);//扩展内存池 if (ret == 0) { goto retry;//再来一遍 +>>>>>>> remotes/origin/main } } #endif @@ -1070,6 +1448,24 @@ retry: //这种写法也挺赞的 @note_good return NULL; } +<<<<<<< HEAD + if ((allocSize + OS_MEM_NODE_HEAD_SIZE + OS_MEM_MIN_ALLOC_SIZE) <= allocNode->sizeAndFlag) { + OsMemSplitNode(pool, allocNode, allocSize); + } + + OS_MEM_NODE_SET_USED_FLAG(allocNode->sizeAndFlag); + OsMemWaterUsedRecord(pool, OS_MEM_NODE_GET_SIZE(allocNode->sizeAndFlag)); + +#ifdef LOSCFG_MEM_LEAKCHECK + OsMemLinkRegisterRecord(allocNode); +#endif + return OsMemCreateUsedNode((VOID *)allocNode); +} + +VOID *LOS_MemAlloc(VOID *pool, UINT32 size) +{ + if ((pool == NULL) || (size == 0)) { +======= if ((allocSize + OS_MEM_NODE_HEAD_SIZE + OS_MEM_MIN_ALLOC_SIZE) <= allocNode->sizeAndFlag) {//所需小于内存池可供分配量 OsMemSplitNode(pool, allocNode, allocSize);//劈开内存池 } @@ -1086,6 +1482,7 @@ retry: //这种写法也挺赞的 @note_good VOID *LOS_MemAlloc(VOID *pool, UINT32 size) { if ((pool == NULL) || (size == 0)) {//没提供内存池时 +>>>>>>> remotes/origin/main return (size > 0) ? OsVmBootMemAlloc(size) : NULL; } @@ -1102,6 +1499,16 @@ VOID *LOS_MemAlloc(VOID *pool, UINT32 size) break; } MEM_LOCK(poolHead, intSave); +<<<<<<< HEAD + ptr = OsMemAlloc(poolHead, size, intSave); + MEM_UNLOCK(poolHead, intSave); + } while (0); + + OsHookCall(LOS_HOOK_TYPE_MEM_ALLOC, pool, ptr, size); + return ptr; +} + +======= ptr = OsMemAlloc(poolHead, size, intSave);//真正的分配内存函数,详细查看 鸿蒙内核源码分析(内存池篇) MEM_UNLOCK(poolHead, intSave); } while (0); @@ -1110,6 +1517,7 @@ VOID *LOS_MemAlloc(VOID *pool, UINT32 size) return ptr; } /// 从指定内存池中申请size长度的内存且地址按boundary字节对齐的内存 +>>>>>>> remotes/origin/main VOID *LOS_MemAllocAlign(VOID *pool, UINT32 size, UINT32 boundary) { UINT32 gapSize; @@ -1166,10 +1574,17 @@ VOID *LOS_MemAllocAlign(VOID *pool, UINT32 size, UINT32 boundary) ptr = alignedPtr; } while (0); +<<<<<<< HEAD + OsHookCall(LOS_HOOK_TYPE_MEM_ALLOCALIGN, pool, ptr, size, boundary); + return ptr; +} + +======= OsHookCall(LOS_HOOK_TYPE_MEM_ALLOCALIGN, pool, ptr, size, boundary);//打印对齐日志,表示程序曾临幸过此处 return ptr; } /// 内存池有效性检查 +>>>>>>> remotes/origin/main STATIC INLINE BOOL OsMemAddrValidCheck(const struct OsMemPoolHead *pool, const VOID *addr) { UINT32 size; @@ -1183,7 +1598,11 @@ STATIC INLINE BOOL OsMemAddrValidCheck(const struct OsMemPoolHead *pool, const V if (OS_MEM_MIDDLE_ADDR_OPEN_END(pool + 1, addr, (UINTPTR)pool + size)) { return TRUE; } +<<<<<<< HEAD +#if OS_MEM_EXPAND_ENABLE +======= #if OS_MEM_EXPAND_ENABLE //如果支持可扩展 +>>>>>>> remotes/origin/main struct OsMemNodeHead *node = NULL; struct OsMemNodeHead *sentinel = OS_MEM_END_NODE(pool, size); while (OsMemIsLastSentinelNode(sentinel) == FALSE) { @@ -1273,7 +1692,11 @@ STATIC UINT32 OsMemCheckUsedNode(const struct OsMemPoolHead *pool, const struct return LOS_OK; } +<<<<<<< HEAD + +======= /// 释放内存 +>>>>>>> remotes/origin/main STATIC INLINE UINT32 OsMemFree(struct OsMemPoolHead *pool, struct OsMemNodeHead *node) { UINT32 ret = OsMemCheckUsedNode(pool, node); @@ -1283,10 +1706,17 @@ STATIC INLINE UINT32 OsMemFree(struct OsMemPoolHead *pool, struct OsMemNodeHead } #ifdef LOSCFG_MEM_WATERLINE +<<<<<<< HEAD + pool->info.curUsedSize -= OS_MEM_NODE_GET_SIZE(node->sizeAndFlag); +#endif + + node->sizeAndFlag = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag); +======= pool->info.curUsedSize -= OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);//降低水位线 #endif node->sizeAndFlag = OS_MEM_NODE_GET_SIZE(node->sizeAndFlag);//获取大小和标记 +>>>>>>> remotes/origin/main #ifdef LOSCFG_MEM_LEAKCHECK OsMemLinkRegisterRecord(node); #endif @@ -1297,6 +1727,19 @@ STATIC INLINE UINT32 OsMemFree(struct OsMemPoolHead *pool, struct OsMemNodeHead g_lms->check((UINTPTR)node + OS_MEM_NODE_HEAD_SIZE, TRUE); } #endif +<<<<<<< HEAD + struct OsMemNodeHead *preNode = node->ptr.prev; /* merage preNode */ + if ((preNode != NULL) && !OS_MEM_NODE_GET_USED_FLAG(preNode->sizeAndFlag)) { + OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)preNode); + OsMemMergeNode(node); + node = preNode; + } + + struct OsMemNodeHead *nextNode = OS_MEM_NEXT_NODE(node); /* merage nextNode */ + if ((nextNode != NULL) && !OS_MEM_NODE_GET_USED_FLAG(nextNode->sizeAndFlag)) { + OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode); + OsMemMergeNode(nextNode); +======= struct OsMemNodeHead *preNode = node->ptr.prev; /* merage preNode | 合并前一个节点*/ if ((preNode != NULL) && !OS_MEM_NODE_GET_USED_FLAG(preNode->sizeAndFlag)) { OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)preNode);//删除前节点的信息 @@ -1308,6 +1751,7 @@ STATIC INLINE UINT32 OsMemFree(struct OsMemPoolHead *pool, struct OsMemNodeHead if ((nextNode != NULL) && !OS_MEM_NODE_GET_USED_FLAG(nextNode->sizeAndFlag)) { OsMemFreeNodeDelete(pool, (struct OsMemFreeNodeHead *)nextNode);//删除后节点信息 OsMemMergeNode(nextNode);//合并节点 +>>>>>>> remotes/origin/main } #if OS_MEM_EXPAND_ENABLE @@ -1328,7 +1772,11 @@ STATIC INLINE UINT32 OsMemFree(struct OsMemPoolHead *pool, struct OsMemNodeHead #endif return ret; } +<<<<<<< HEAD + +======= /// 释放从指定动态内存中申请的内存 +>>>>>>> remotes/origin/main UINT32 LOS_MemFree(VOID *pool, VOID *ptr) { UINT32 intSave; @@ -1344,13 +1792,21 @@ UINT32 LOS_MemFree(VOID *pool, VOID *ptr) struct OsMemNodeHead *node = NULL; do { +<<<<<<< HEAD + UINT32 gapSize = *(UINT32 *)((UINTPTR)ptr - sizeof(UINT32)); +======= UINT32 gapSize = *(UINT32 *)((UINTPTR)ptr - sizeof(UINT32));//获取节点大小和标签 即: sizeAndFlag +>>>>>>> remotes/origin/main if (OS_MEM_NODE_GET_ALIGNED_FLAG(gapSize) && OS_MEM_NODE_GET_USED_FLAG(gapSize)) { PRINT_ERR("[%s:%d]gapSize:0x%x error\n", __FUNCTION__, __LINE__, gapSize); break; } +<<<<<<< HEAD + node = (struct OsMemNodeHead *)((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE); +======= node = (struct OsMemNodeHead *)((UINTPTR)ptr - OS_MEM_NODE_HEAD_SIZE);//定位到节点开始位置 +>>>>>>> remotes/origin/main if (OS_MEM_NODE_GET_ALIGNED_FLAG(gapSize)) { gapSize = OS_MEM_NODE_GET_ALIGNED_GAPSIZE(gapSize); @@ -1438,7 +1894,11 @@ STATIC INLINE VOID *OsGetRealPtr(const VOID *pool, VOID *ptr) } STATIC INLINE VOID *OsMemRealloc(struct OsMemPoolHead *pool, const VOID *ptr, +<<<<<<< HEAD + struct OsMemNodeHead *node, UINT32 size, UINT32 intSave) +======= struct OsMemNodeHead *node, UINT32 size, UINT32 intSave) +>>>>>>> remotes/origin/main { struct OsMemNodeHead *nextNode = NULL; UINT32 allocSize = OS_MEM_ALIGN(size + OS_MEM_NODE_HEAD_SIZE, OS_MEM_ALIGN_SIZE); @@ -1469,7 +1929,11 @@ STATIC INLINE VOID *OsMemRealloc(struct OsMemPoolHead *pool, const VOID *ptr, } return tmpPtr; } +<<<<<<< HEAD + +======= /// 按size大小重新分配内存块,并将原内存块内容拷贝到新内存块。如果新内存块申请成功,则释放原内存块 +>>>>>>> remotes/origin/main VOID *LOS_MemRealloc(VOID *pool, VOID *ptr, UINT32 size) { if ((pool == NULL) || OS_MEM_NODE_GET_USED_FLAG(size) || OS_MEM_NODE_GET_ALIGNED_FLAG(size)) { @@ -1562,7 +2026,11 @@ UINT32 LOS_MemFreeByTaskID(VOID *pool, UINT32 taskID) return LOS_OK; } #endif +<<<<<<< HEAD + +======= /// 获取指定动态内存池的总大小 +>>>>>>> remotes/origin/main UINT32 LOS_MemPoolSizeGet(const VOID *pool) { UINT32 count = 0; @@ -1571,6 +2039,20 @@ UINT32 LOS_MemPoolSizeGet(const VOID *pool) return LOS_NOK; } +<<<<<<< HEAD + count += ((struct OsMemPoolHead *)pool)->info.totalSize; + +#if OS_MEM_EXPAND_ENABLE + UINT32 size; + struct OsMemNodeHead *node = NULL; + struct OsMemNodeHead *sentinel = OS_MEM_END_NODE(pool, count); + + while (OsMemIsLastSentinelNode(sentinel) == FALSE) { + size = OS_MEM_NODE_GET_SIZE(sentinel->sizeAndFlag); + node = OsMemSentinelNodeGet(sentinel); + sentinel = OS_MEM_END_NODE(node, size); + count += size; +======= count += ((struct OsMemPoolHead *)pool)->info.totalSize; // 这里的 += 好像没必要吧?, = 就可以了, @note_thinking #if OS_MEM_EXPAND_ENABLE //支持扩展 @@ -1583,11 +2065,16 @@ UINT32 LOS_MemPoolSizeGet(const VOID *pool) node = OsMemSentinelNodeGet(sentinel);//再获取哨兵节点 sentinel = OS_MEM_END_NODE(node, size);//获取尾节点 count += size; //内存池大小变大 +>>>>>>> remotes/origin/main } #endif return count; } +<<<<<<< HEAD + +======= /// 获取指定动态内存池的总使用量大小 +>>>>>>> remotes/origin/main UINT32 LOS_MemTotalUsedGet(VOID *pool) { struct OsMemNodeHead *tmpNode = NULL; @@ -1660,7 +2147,11 @@ STATIC UINT32 OsMemAddrValidCheckPrint(const VOID *pool, struct OsMemFreeNodeHea } STATIC UINT32 OsMemIntegrityCheckSub(struct OsMemNodeHead **tmpNode, const VOID *pool, +<<<<<<< HEAD + const struct OsMemNodeHead *endNode) +======= const struct OsMemNodeHead *endNode) +>>>>>>> remotes/origin/main { if (!OS_MEM_MAGIC_VALID(*tmpNode)) { OsMemMagicCheckPrint(tmpNode); @@ -1676,7 +2167,11 @@ STATIC UINT32 OsMemIntegrityCheckSub(struct OsMemNodeHead **tmpNode, const VOID } STATIC UINT32 OsMemFreeListNodeCheck(const struct OsMemPoolHead *pool, +<<<<<<< HEAD + const struct OsMemFreeNodeHead *node) +======= const struct OsMemFreeNodeHead *node) +>>>>>>> remotes/origin/main { if (!OsMemAddrValidCheck(pool, node) || !OsMemAddrValidCheck(pool, node->prev) || @@ -1737,9 +2232,15 @@ OUT: #endif } } +<<<<<<< HEAD + +STATIC UINT32 OsMemIntegrityCheck(const struct OsMemPoolHead *pool, struct OsMemNodeHead **tmpNode, + struct OsMemNodeHead **preNode) +======= //对指定内存池做完整性检查, STATIC UINT32 OsMemIntegrityCheck(const struct OsMemPoolHead *pool, struct OsMemNodeHead **tmpNode, struct OsMemNodeHead **preNode) +>>>>>>> remotes/origin/main { struct OsMemNodeHead *endNode = OS_MEM_END_NODE(pool, pool->info.totalSize); @@ -1862,7 +2363,11 @@ STATIC INLINE UINT32 OsMemAllocCheck(struct OsMemPoolHead *pool, UINT32 intSave) return LOS_OK; } #endif +<<<<<<< HEAD + +======= /// 对指定内存池做完整性检查 +>>>>>>> remotes/origin/main UINT32 LOS_MemIntegrityCheck(const VOID *pool) { if (pool == NULL) { @@ -1887,7 +2392,11 @@ ERROR_OUT: } STATIC INLINE VOID OsMemInfoGet(struct OsMemPoolHead *poolInfo, struct OsMemNodeHead *node, +<<<<<<< HEAD + LOS_MEM_POOL_STATUS *poolStatus) +======= LOS_MEM_POOL_STATUS *poolStatus) +>>>>>>> remotes/origin/main { UINT32 totalUsedSize = 0; UINT32 totalFreeSize = 0; @@ -1916,6 +2425,10 @@ STATIC INLINE VOID OsMemInfoGet(struct OsMemPoolHead *poolInfo, struct OsMemNode poolStatus->freeNodeNum += freeNodeNum; } +<<<<<<< HEAD +UINT32 LOS_MemInfoGet(VOID *pool, LOS_MEM_POOL_STATUS *poolStatus) +{ +======= /*! * @brief LOS_MemInfoGet * 获取指定内存池的内存结构信息,包括空闲内存大小、已使用内存大小、空闲内存块数量、已使用的内存块数量、最大的空闲内存块大小 @@ -1927,6 +2440,7 @@ STATIC INLINE VOID OsMemInfoGet(struct OsMemPoolHead *poolInfo, struct OsMemNode */ UINT32 LOS_MemInfoGet(VOID *pool, LOS_MEM_POOL_STATUS *poolStatus) {//内存碎片率计算:同样调用LOS_MemInfoGet接口,可以获取内存池的剩余内存大小和最大空闲内存块大小,然后根据公式(fragment=100-最大空闲内存块大小/剩余内存大小)得出此时的动态内存池碎片率。 +>>>>>>> remotes/origin/main struct OsMemPoolHead *poolInfo = pool; if (poolStatus == NULL) { @@ -2007,7 +2521,11 @@ STATIC VOID OsMemInfoPrint(VOID *pool) status.freeNodeNum); #endif } +<<<<<<< HEAD + +======= /// 打印指定内存池的空闲内存块的大小及数量 +>>>>>>> remotes/origin/main UINT32 LOS_MemFreeNodeShow(VOID *pool) { struct OsMemPoolHead *poolInfo = (struct OsMemPoolHead *)pool; @@ -2054,7 +2572,11 @@ UINT32 LOS_MemFreeNodeShow(VOID *pool) return LOS_OK; } +<<<<<<< HEAD + +======= ///内核空间动态内存(堆内存)初始化 , 争取系统动态内存池 +>>>>>>> remotes/origin/main STATUS_T OsKHeapInit(size_t size) { STATUS_T ret; @@ -2073,14 +2595,45 @@ STATUS_T OsKHeapInit(size_t size) return -1; } +<<<<<<< HEAD + m_aucSysMem0 = m_aucSysMem1 = ptr; + ret = LOS_MemInit(m_aucSysMem0, size); +======= m_aucSysMem0 = m_aucSysMem1 = ptr;// 指定内核内存池的位置 ret = LOS_MemInit(m_aucSysMem0, size); //初始化内存池,供内核分配动态内存 +>>>>>>> remotes/origin/main if (ret != LOS_OK) { PRINT_ERR("vmm_kheap_init LOS_MemInit failed!\n"); g_vmBootMemBase -= size; return ret; } #if OS_MEM_EXPAND_ENABLE +<<<<<<< HEAD + LOS_MemExpandEnable(OS_SYS_MEM_ADDR); +#endif + return LOS_OK; +} + +BOOL OsMemIsHeapNode(const VOID *ptr) +{ + struct OsMemPoolHead *pool = (struct OsMemPoolHead *)m_aucSysMem1; + struct OsMemNodeHead *firstNode = OS_MEM_FIRST_NODE(pool); + struct OsMemNodeHead *endNode = OS_MEM_END_NODE(pool, pool->info.totalSize); + + if (OS_MEM_MIDDLE_ADDR(firstNode, ptr, endNode)) { + return TRUE; + } + +#if OS_MEM_EXPAND_ENABLE + UINT32 intSave; + UINT32 size; + MEM_LOCK(pool, intSave); + while (OsMemIsLastSentinelNode(endNode) == FALSE) { + size = OS_MEM_NODE_GET_SIZE(endNode->sizeAndFlag); + firstNode = OsMemSentinelNodeGet(endNode); + endNode = OS_MEM_END_NODE(firstNode, size); + if (OS_MEM_MIDDLE_ADDR(firstNode, ptr, endNode)) { +======= LOS_MemExpandEnable(OS_SYS_MEM_ADDR);//支持扩展系统动态内存 #endif return LOS_OK; @@ -2105,6 +2658,7 @@ BOOL OsMemIsHeapNode(const VOID *ptr) firstNode = OsMemSentinelNodeGet(endNode);//获取下一块的开始地址 endNode = OS_MEM_END_NODE(firstNode, size);//获取下一块的尾节点 if (OS_MEM_MIDDLE_ADDR(firstNode, ptr, endNode)) {//判断地址是否在该块中 +>>>>>>> remotes/origin/main MEM_UNLOCK(pool, intSave); return TRUE; } @@ -2113,5 +2667,8 @@ BOOL OsMemIsHeapNode(const VOID *ptr) #endif return FALSE; } +<<<<<<< HEAD +======= +>>>>>>> remotes/origin/main diff --git a/src/kernel_liteos_a/kernel/base/misc/kill_shellcmd.c b/src/kernel_liteos_a/kernel/base/misc/kill_shellcmd.c index c0a7adfb..ebefbc76 100644 --- a/src/kernel_liteos_a/kernel/base/misc/kill_shellcmd.c +++ b/src/kernel_liteos_a/kernel/base/misc/kill_shellcmd.c @@ -48,6 +48,8 @@ #include "shell.h" #endif +<<<<<<< HEAD +======= /********************************************* 命令功能 命令用于发送特定信号给指定进程。 @@ -65,6 +67,7 @@ pid 进程ID [1,MAX_INT] 必须指定发送的信号编号及进程号。 进程编号取值范围根据系统配置变化,例如系统最大支持pid为256,则取值范围缩小为[1-256]。 *********************************************/ +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR VOID OsPrintKillUsage(VOID) { PRINTK("\nkill: usage: kill [sigspec] [pid]\n"); diff --git a/src/kernel_liteos_a/kernel/base/misc/los_misc.c b/src/kernel_liteos_a/kernel/base/misc/los_misc.c index f8b7cd85..85fa9021 100644 --- a/src/kernel_liteos_a/kernel/base/misc/los_misc.c +++ b/src/kernel_liteos_a/kernel/base/misc/los_misc.c @@ -31,10 +31,14 @@ #include "los_task_pri.h" +<<<<<<< HEAD + +======= // 这个函数接受两个参数,一个是地址(addr),另一个是对齐边界(boundary)。它返回一个对齐后的地址。 // 这个函数首先检查地址加上边界再减去1是否大于地址,如果是, // 就返回地址加上边界再减去1然后与~((UINTPTR)(boundary - 1))进行位与运算的结果, // 否则就返回地址与~((UINTPTR)(boundary - 1))进行位与运算的结果。这实际上是在对地址进行对齐。 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT UINTPTR LOS_Align(UINTPTR addr, UINT32 boundary) { if ((addr + boundary - 1) > addr) { @@ -44,6 +48,15 @@ LITE_OS_SEC_TEXT UINTPTR LOS_Align(UINTPTR addr, UINT32 boundary) } } +<<<<<<< HEAD +LITE_OS_SEC_TEXT_MINOR VOID LOS_Msleep(UINT32 msecs) +{ + UINT32 interval; + + if (msecs == 0) { + interval = 0; + } else { +======= LITE_OS_SEC_TEXT_MINOR VOID LOS_Msleep(UINT32 msecs) { @@ -52,6 +65,7 @@ LITE_OS_SEC_TEXT_MINOR VOID LOS_Msleep(UINT32 msecs) if (msecs == 0) { interval = 0; // } else { 否则,它将毫秒数转换为tick数(可能是操作系统的时间单位),如果转换后的间隔为0,就将间隔设置为1。然后,它调用LOS_TaskDelay函数来延迟指定的间隔。 +>>>>>>> remotes/origin/main interval = LOS_MS2Tick(msecs); if (interval == 0) { interval = 1; diff --git a/src/kernel_liteos_a/kernel/base/misc/los_stackinfo.c b/src/kernel_liteos_a/kernel/base/misc/los_stackinfo.c index 49c46afd..08bf636e 100644 --- a/src/kernel_liteos_a/kernel/base/misc/los_stackinfo.c +++ b/src/kernel_liteos_a/kernel/base/misc/los_stackinfo.c @@ -37,6 +37,12 @@ #include "shcmd.h" #include "shell.h" #endif +<<<<<<< HEAD + +const StackInfo *g_stackInfo = NULL; +UINT32 g_stackNum; + +======= /** * @file los_stackinfo.c * @brief 栈内容 @@ -66,10 +72,27 @@ const StackInfo *g_stackInfo = NULL; ///< CPU所有工作模式的栈信息 UINT32 g_stackNum; ///< CPU所有工作模式的栈数量 ///获取栈的吃水线 +>>>>>>> remotes/origin/main UINT32 OsStackWaterLineGet(const UINTPTR *stackBottom, const UINTPTR *stackTop, UINT32 *peakUsed) { UINT32 size; const UINTPTR *tmp = NULL; +<<<<<<< HEAD + if (*stackTop == OS_STACK_MAGIC_WORD) { + tmp = stackTop + 1; + while ((tmp < stackBottom) && (*tmp == OS_STACK_INIT)) { + tmp++; + } + size = (UINT32)((UINTPTR)stackBottom - (UINTPTR)tmp); + *peakUsed = (size == 0) ? size : (size + sizeof(CHAR *)); + return LOS_OK; + } else { + *peakUsed = OS_INVALID_WATERLINE; + return LOS_NOK; + } +} + +======= if (*stackTop == OS_STACK_MAGIC_WORD) {//栈顶值是否等于 magic 0xCCCCCCCC tmp = stackTop + 1; while ((tmp < stackBottom) && (*tmp == OS_STACK_INIT)) {//记录从栈顶到栈低有多少个连续的 0xCACACACA @@ -84,6 +107,7 @@ UINT32 OsStackWaterLineGet(const UINTPTR *stackBottom, const UINTPTR *stackTop, } } ///异常情况下的栈检查,主要就是检查栈顶值有没有被改写 +>>>>>>> remotes/origin/main VOID OsExcStackCheck(VOID) { UINT32 index; @@ -96,7 +120,11 @@ VOID OsExcStackCheck(VOID) for (index = 0; index < g_stackNum; index++) { for (cpuid = 0; cpuid < LOSCFG_KERNEL_CORE_NUM; cpuid++) { stackTop = (UINTPTR *)((UINTPTR)g_stackInfo[index].stackTop + cpuid * g_stackInfo[index].stackSize); +<<<<<<< HEAD + if (*stackTop != OS_STACK_MAGIC_WORD) { +======= if (*stackTop != OS_STACK_MAGIC_WORD) {// 只要栈顶内容不是 0xCCCCCCCCC 就是溢出了. +>>>>>>> remotes/origin/main PRINT_ERR("cpu:%u %s overflow , magic word changed to 0x%x\n", LOSCFG_KERNEL_CORE_NUM - 1 - cpuid, g_stackInfo[index].stackName, *stackTop); } @@ -104,7 +132,10 @@ VOID OsExcStackCheck(VOID) } } +<<<<<<< HEAD +======= ///打印栈的信息 把每个CPU的栈信息打印出来 +>>>>>>> remotes/origin/main VOID OsExcStackInfo(VOID) { UINT32 index; @@ -119,17 +150,43 @@ VOID OsExcStackInfo(VOID) PrintExcInfo("\n stack name cpu id stack addr total size used size\n" " ---------- ------ --------- -------- --------\n"); +<<<<<<< HEAD + + for (index = 0; index < g_stackNum; index++) { + for (cpuid = 0; cpuid < LOSCFG_KERNEL_CORE_NUM; cpuid++) { + stackTop = (UINTPTR *)((UINTPTR)g_stackInfo[index].stackTop + cpuid * g_stackInfo[index].stackSize); + stack = (UINTPTR *)((UINTPTR)stackTop + g_stackInfo[index].stackSize); + (VOID)OsStackWaterLineGet(stack, stackTop, &size); +======= for (index = 0; index < g_stackNum; index++) { for (cpuid = 0; cpuid < LOSCFG_KERNEL_CORE_NUM; cpuid++) {//可以看出 各个CPU的栈是紧挨的的 stackTop = (UINTPTR *)((UINTPTR)g_stackInfo[index].stackTop + cpuid * g_stackInfo[index].stackSize); stack = (UINTPTR *)((UINTPTR)stackTop + g_stackInfo[index].stackSize); (VOID)OsStackWaterLineGet(stack, stackTop, &size);//获取吃水线, 鸿蒙用WaterLine 这个词用的很妙 +>>>>>>> remotes/origin/main PrintExcInfo("%11s %-5d %-10p 0x%-8x 0x%-4x\n", g_stackInfo[index].stackName, LOSCFG_KERNEL_CORE_NUM - 1 - cpuid, stackTop, g_stackInfo[index].stackSize, size); } } +<<<<<<< HEAD + OsExcStackCheck(); +} + +VOID OsExcStackInfoReg(const StackInfo *stackInfo, UINT32 stackNum) +{ + g_stackInfo = stackInfo; + g_stackNum = stackNum; +} + +VOID OsStackInit(VOID *stacktop, UINT32 stacksize) +{ + /* initialize the task stack, write magic num to stack top */ + errno_t ret = memset_s(stacktop, stacksize, (INT32)OS_STACK_INIT, stacksize); + if (ret == EOK) { + *((UINTPTR *)stacktop) = OS_STACK_MAGIC_WORD; +======= OsExcStackCheck();//发生异常时栈检查 } @@ -147,9 +204,14 @@ VOID OsStackInit(VOID *stacktop, UINT32 stacksize) errno_t ret = memset_s(stacktop, stacksize, (INT32)OS_STACK_INIT, stacksize);//清一色填 0xCACACACA if (ret == EOK) { *((UINTPTR *)stacktop) = OS_STACK_MAGIC_WORD;//0xCCCCCCCCC 中文就是"烫烫烫烫" 这几个字懂点计算机的人都不会陌生了. +>>>>>>> remotes/origin/main } } #ifdef LOSCFG_SHELL_CMD_DEBUG +<<<<<<< HEAD +SHELLCMD_ENTRY(stack_shellcmd, CMD_TYPE_EX, "stack", 1, (CmdCallBackFunc)OsExcStackInfo); +======= SHELLCMD_ENTRY(stack_shellcmd, CMD_TYPE_EX, "stack", 1, (CmdCallBackFunc)OsExcStackInfo);//采用shell命令静态注册方式 +>>>>>>> remotes/origin/main #endif diff --git a/src/kernel_liteos_a/kernel/base/misc/mempt_shellcmd.c b/src/kernel_liteos_a/kernel/base/misc/mempt_shellcmd.c index 7177ace6..113cac35 100644 --- a/src/kernel_liteos_a/kernel/base/misc/mempt_shellcmd.c +++ b/src/kernel_liteos_a/kernel/base/misc/mempt_shellcmd.c @@ -193,7 +193,11 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdUname(INT32 argc, const CHAR *argv[]) if (argc == 1) { if (strcmp(argv[0], "-a") == 0) { +<<<<<<< HEAD + PRINTK("%s %d.%d.%d.%d %s %s\n", KERNEL_NAME, KERNEL_MAJOR, KERNEL_MINOR, KERNEL_PATCH, KERNEL_ITRE, \ +======= PRINTK("%s %d.%d.%d.%d %s %s\n", KERNEL_NAME, KERNEL_MAJOR, KERNEL_MINOR, KERNEL_PATCH, KERNEL_ITRE,\ +>>>>>>> remotes/origin/main __DATE__, __TIME__); return 0; } else if (strcmp(argv[0], "-s") == 0) { diff --git a/src/kernel_liteos_a/kernel/base/misc/swtmr_shellcmd.c b/src/kernel_liteos_a/kernel/base/misc/swtmr_shellcmd.c index 49c8349c..e6814e45 100644 --- a/src/kernel_liteos_a/kernel/base/misc/swtmr_shellcmd.c +++ b/src/kernel_liteos_a/kernel/base/misc/swtmr_shellcmd.c @@ -58,6 +58,15 @@ STATIC VOID OsPrintSwtmrMsg(const SWTMR_CTRL_S *swtmr) (VOID)LOS_SwtmrTimeGet(swtmr->usTimerID, &ticks); PRINTK("%7u%10s%8s%12u%7u%#12x%#12x\n", +<<<<<<< HEAD + swtmr->usTimerID % LOSCFG_BASE_CORE_SWTMR_LIMIT, + g_shellSwtmrStatus[swtmr->ucState], + g_shellSwtmrMode[swtmr->ucMode], + swtmr->uwInterval, + ticks, + swtmr->uwArg, + swtmr->pfnHandler); +======= swtmr->usTimerID % LOSCFG_BASE_CORE_SWTMR_LIMIT, //软件定时器ID。 g_shellSwtmrStatus[swtmr->ucState], //软件定时器状态,状态可能为:"UnUsed", "Created", "Ticking"。 g_shellSwtmrMode[swtmr->ucMode], //软件定时器模式。模式可能为:"Once", "Period", "NSD(单次定时器,定时结束后不会自动删除)" @@ -65,14 +74,19 @@ STATIC VOID OsPrintSwtmrMsg(const SWTMR_CTRL_S *swtmr) ticks, swtmr->uwArg, //传入的参数。 swtmr->pfnHandler); //回调函数的地址。 +>>>>>>> remotes/origin/main } STATIC INLINE VOID OsPrintSwtmrMsgHead(VOID) { PRINTK("\r\nSwTmrID State Mode Interval Count Arg handlerAddr\n"); } +<<<<<<< HEAD + +======= ///shell命令之swtmr 命令用于查询系统软件定时器相关信息。 //参数缺省时,默认显示所有软件定时器的相关信息。 +>>>>>>> remotes/origin/main STATIC UINT32 SwtmrBaseInfoGet(UINT32 timerID) { SWTMR_CTRL_S *swtmr = g_swtmrCBArray; @@ -174,6 +188,11 @@ SWTMR_HELP: PRINTK(" swtmr ID --- Specifies information about a software timer.\n"); return LOS_OK; } +<<<<<<< HEAD + +SHELLCMD_ENTRY(swtmr_shellcmd, CMD_TYPE_EX, "swtmr", 1, (CmdCallBackFunc)OsShellCmdSwtmrInfoGet); +======= SHELLCMD_ENTRY(swtmr_shellcmd, CMD_TYPE_EX, "swtmr", 1, (CmdCallBackFunc)OsShellCmdSwtmrInfoGet);//采用shell命令静态注册方式 +>>>>>>> remotes/origin/main #endif /* LOSCFG_SHELL */ diff --git a/src/kernel_liteos_a/kernel/base/misc/sysinfo_shellcmd.c b/src/kernel_liteos_a/kernel/base/misc/sysinfo_shellcmd.c index 1d3849b2..70b30ed1 100644 --- a/src/kernel_liteos_a/kernel/base/misc/sysinfo_shellcmd.c +++ b/src/kernel_liteos_a/kernel/base/misc/sysinfo_shellcmd.c @@ -118,7 +118,11 @@ UINT32 OsShellCmdSwtmrCntGet(VOID) LOS_IntRestore(intSave); return swtmrCnt; } +<<<<<<< HEAD + +======= ///查看系统资源使用情况 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR VOID OsShellCmdSystemInfoGet(VOID) { UINT8 isTaskEnable = TRUE; @@ -137,6 +141,29 @@ LITE_OS_SEC_TEXT_MINOR VOID OsShellCmdSystemInfoGet(VOID) #else UINT8 isSwtmrEnable = FALSE; #endif +<<<<<<< HEAD + + PRINTK("\n Module Used Total Enabled\n"); + PRINTK("--------------------------------------------\n"); + PRINTK(" Task %-10u%-10d%s\n", + OsShellCmdTaskCntGet(), + LOSCFG_BASE_CORE_TSK_LIMIT, + SYSINFO_ENABLED(isTaskEnable)); + PRINTK(" Sem %-10u%-10d%s\n", + OsShellCmdSemCntGet(), + LOSCFG_BASE_IPC_SEM_LIMIT, + SYSINFO_ENABLED(isSemEnable)); + PRINTK(" Queue %-10u%-10d%s\n", + OsShellCmdQueueCntGet(), + LOSCFG_BASE_IPC_QUEUE_LIMIT, + SYSINFO_ENABLED(isQueueEnable)); + PRINTK(" SwTmr %-10u%-10d%s\n", + OsShellCmdSwtmrCntGet(), + LOSCFG_BASE_CORE_SWTMR_LIMIT, + SYSINFO_ENABLED(isSwtmrEnable)); +} + +======= //模块名称 当前使用量 最大可用量 模块是否开启 PRINTK("\n Module Used Total Enabled\n"); PRINTK("--------------------------------------------\n"); @@ -158,6 +185,7 @@ LITE_OS_SEC_TEXT_MINOR VOID OsShellCmdSystemInfoGet(VOID) SYSINFO_ENABLED(isSwtmrEnable)); //定时器是否失效 YES or NO } ///systeminfo命令用于显示当前操作系统内资源使用情况,包括任务、信号量、互斥量、队列、定时器等。 +>>>>>>> remotes/origin/main INT32 OsShellCmdSystemInfo(INT32 argc, const CHAR **argv) { if (argc == 0) { diff --git a/src/kernel_liteos_a/kernel/base/misc/vm_shellcmd.c b/src/kernel_liteos_a/kernel/base/misc/vm_shellcmd.c index b8455e8a..c55e8610 100644 --- a/src/kernel_liteos_a/kernel/base/misc/vm_shellcmd.c +++ b/src/kernel_liteos_a/kernel/base/misc/vm_shellcmd.c @@ -54,7 +54,11 @@ #define VMM_CMD "vmm" #define OOM_CMD "oom" #define VMM_PMM_CMD "v2p" +<<<<<<< HEAD + +======= //dump内核空间 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR VOID OsDumpKernelAspace(VOID) { LosVmSpace *kAspace = LOS_GetKVmSpace(); @@ -104,6 +108,28 @@ LITE_OS_SEC_TEXT_MINOR VOID OsDoDumpVm(pid_t pid) PRINTK("\tThe process [%d] not active\n", pid); } } +<<<<<<< HEAD + +LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdDumpVm(INT32 argc, const CHAR *argv[]) +{ + if (argc == 0) { + OsDumpAllAspace(); + } else if (argc == 1) { + pid_t pid = OsPid(argv[0]); + if (strcmp(argv[0], "-a") == 0) { + OsDumpAllAspace(); + } else if (strcmp(argv[0], "-k") == 0) { + OsDumpKernelAspace(); + } else if (pid >= 0) { + OsDoDumpVm(pid); + } else if (strcmp(argv[0], "-h") == 0 || strcmp(argv[0], "--help") == 0) { + OsPrintUsage(); + } else { + PRINTK("%s: invalid option: %s\n", VMM_CMD, argv[0]); + OsPrintUsage(); + } + } else { +======= ///查看进程的虚拟内存使用情况。vmm [-a / -h / --help], vmm [pid] LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdDumpVm(INT32 argc, const CHAR *argv[]) { @@ -124,6 +150,7 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdDumpVm(INT32 argc, const CHAR *argv[]) OsPrintUsage(); } } else { //多于一个参数 例如 # vmm 3 9 +>>>>>>> remotes/origin/main OsPrintUsage(); } @@ -135,7 +162,11 @@ LITE_OS_SEC_TEXT_MINOR VOID V2PPrintUsage(VOID) PRINTK("pid vaddr(0x1000000~0x3e000000), print physical address of virtual address\n" "-h | --help, print v2p command usage\n"); } +<<<<<<< HEAD + +======= ///v2p 虚拟内存对应的物理内存 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdV2P(INT32 argc, const CHAR *argv[]) { UINT32 vaddr; @@ -180,7 +211,11 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdV2P(INT32 argc, const CHAR *argv[]) return LOS_OK; } +<<<<<<< HEAD + +======= ///查看系统内存物理页及pagecache物理页使用情况 , Debug版本才具备的命令 # pmm +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdDumpPmm(VOID) { OsVmPhysDump(); @@ -192,6 +227,14 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdDumpPmm(VOID) LITE_OS_SEC_TEXT_MINOR VOID OomPrintUsage(VOID) { +<<<<<<< HEAD + PRINTK("\t-i [interval], set oom check interval (ms)\n" + "\t-m [mem byte], set oom low memory threshold (Byte)\n" + "\t-r [mem byte], set page cache reclaim memory threshold (Byte)\n" + "\t-h | --help, print vmm command usage\n"); +} + +======= PRINTK("\t-i [interval], set oom check interval (ms)\n" //设置oom线程任务检查的时间间隔。 "\t-m [mem byte], set oom low memory threshold (Byte)\n" //设置低内存阈值。 "\t-r [mem byte], set page cache reclaim memory threshold (Byte)\n" //设置pagecache内存回收阈值。 @@ -199,6 +242,7 @@ LITE_OS_SEC_TEXT_MINOR VOID OomPrintUsage(VOID) } ///查看和设置低内存阈值以及pagecache内存回收阈值。参数缺省时,显示oom功能当前配置信息。 //当系统内存不足时,会打印出内存不足的提示信息。 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdOom(INT32 argc, const CHAR *argv[]) { UINT32 lowMemThreshold; @@ -220,7 +264,11 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdOom(INT32 argc, const CHAR *argv[]) PRINTK("[oom] low mem threshold %s(byte) invalid.\n", argv[1]); return OS_ERROR; } else { +<<<<<<< HEAD + OomSetLowMemThreashold(lowMemThreshold); +======= OomSetLowMemThreashold(lowMemThreshold);//设置低内存阈值 +>>>>>>> remotes/origin/main } } else if (strcmp(argv[0], "-i") == 0) { checkInterval = strtoul((CHAR *)argv[1], &endPtr, 0); @@ -228,7 +276,11 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdOom(INT32 argc, const CHAR *argv[]) PRINTK("[oom] check interval %s(us) invalid.\n", argv[1]); return OS_ERROR; } else { +<<<<<<< HEAD + OomSetCheckInterval(checkInterval); +======= OomSetCheckInterval(checkInterval);//设置oom线程任务检查的时间间隔 +>>>>>>> remotes/origin/main } } else if (strcmp(argv[0], "-r") == 0) { reclaimMemThreshold = strtoul((CHAR *)argv[1], &endPtr, 0); @@ -236,7 +288,11 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdOom(INT32 argc, const CHAR *argv[]) PRINTK("[oom] reclaim mem threshold %s(byte) invalid.\n", argv[1]); return OS_ERROR; } else { +<<<<<<< HEAD + OomSetReclaimMemThreashold(reclaimMemThreshold); +======= OomSetReclaimMemThreashold(reclaimMemThreshold);//设置pagecache内存回收阈值 +>>>>>>> remotes/origin/main } } else { PRINTK("%s: invalid option: %s %s\n", OOM_CMD, argv[0], argv[1]); @@ -251,6 +307,15 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdOom(INT32 argc, const CHAR *argv[]) } #ifdef LOSCFG_SHELL_CMD_DEBUG +<<<<<<< HEAD +SHELLCMD_ENTRY(oom_shellcmd, CMD_TYPE_SHOW, OOM_CMD, 2, (CmdCallBackFunc)OsShellCmdOom); +SHELLCMD_ENTRY(vm_shellcmd, CMD_TYPE_SHOW, VMM_CMD, 1, (CmdCallBackFunc)OsShellCmdDumpVm); +SHELLCMD_ENTRY(v2p_shellcmd, CMD_TYPE_SHOW, VMM_PMM_CMD, 1, (CmdCallBackFunc)OsShellCmdV2P); +#endif + +#ifdef LOSCFG_SHELL +SHELLCMD_ENTRY(pmm_shellcmd, CMD_TYPE_SHOW, "pmm", 0, (CmdCallBackFunc)OsShellCmdDumpPmm); +======= SHELLCMD_ENTRY(oom_shellcmd, CMD_TYPE_SHOW, OOM_CMD, 2, (CmdCallBackFunc)OsShellCmdOom);//采用shell命令静态注册方式 SHELLCMD_ENTRY(vm_shellcmd, CMD_TYPE_SHOW, VMM_CMD, 1, (CmdCallBackFunc)OsShellCmdDumpVm);//采用shell命令静态注册方式 vmm SHELLCMD_ENTRY(v2p_shellcmd, CMD_TYPE_SHOW, VMM_PMM_CMD, 1, (CmdCallBackFunc)OsShellCmdV2P);//采用shell命令静态注册方式 v2p @@ -258,6 +323,7 @@ SHELLCMD_ENTRY(v2p_shellcmd, CMD_TYPE_SHOW, VMM_PMM_CMD, 1, (CmdCallBackFunc)OsS #ifdef LOSCFG_SHELL SHELLCMD_ENTRY(pmm_shellcmd, CMD_TYPE_SHOW, "pmm", 0, (CmdCallBackFunc)OsShellCmdDumpPmm);//采用shell命令静态注册方式 +>>>>>>> remotes/origin/main #endif #endif diff --git a/src/kernel_liteos_a/kernel/base/mp/los_mp.c b/src/kernel_liteos_a/kernel/base/mp/los_mp.c index e2a13431..cfaf4331 100644 --- a/src/kernel_liteos_a/kernel/base/mp/los_mp.c +++ b/src/kernel_liteos_a/kernel/base/mp/los_mp.c @@ -36,14 +36,29 @@ #include "los_swtmr.h" #include "los_task_pri.h" +<<<<<<< HEAD +#ifdef LOSCFG_KERNEL_SMP + +======= #ifdef LOSCFG_KERNEL_SMP //给参数CPU发送调度信号 +>>>>>>> remotes/origin/main #ifdef LOSCFG_KERNEL_SMP_CALL LITE_OS_SEC_BSS SPIN_LOCK_INIT(g_mpCallSpin); #define MP_CALL_LOCK(state) LOS_SpinLockSave(&g_mpCallSpin, &(state)) #define MP_CALL_UNLOCK(state) LOS_SpinUnlockRestore(&g_mpCallSpin, (state)) #endif +<<<<<<< HEAD + +VOID LOS_MpSchedule(UINT32 target) +{ + UINT32 cpuid = ArchCurrCpuid(); + target &= ~(1U << cpuid); + HalIrqSendIpi(target, LOS_MP_IPI_SCHEDULE); +} + +======= VOID LOS_MpSchedule(UINT32 target)//target每位对应CPU core { UINT32 cpuid = ArchCurrCpuid(); @@ -51,19 +66,37 @@ VOID LOS_MpSchedule(UINT32 target)//target每位对应CPU core HalIrqSendIpi(target, LOS_MP_IPI_SCHEDULE);//向目标CPU发送调度信号,核间中断(Inter-Processor Interrupts),IPI } ///硬中断唤醒处理函数 +>>>>>>> remotes/origin/main VOID OsMpWakeHandler(VOID) { /* generic wakeup ipi, do nothing */ } +<<<<<<< HEAD + +VOID OsMpScheduleHandler(VOID) +{ +======= ///硬中断调度处理函数 VOID OsMpScheduleHandler(VOID) {//将调度标志设置为与唤醒功能不同,这样就可以在硬中断结束时触发调度程序。 +>>>>>>> remotes/origin/main /* * set schedule flag to differ from wake function, * so that the scheduler can be triggered at the end of irq. */ OsSchedRunqueuePendingSet(); } +<<<<<<< HEAD + +VOID OsMpHaltHandler(VOID) +{ + (VOID)LOS_IntLock(); + OsPercpuGet()->excFlag = CPU_HALT; + + while (1) {} +} + +======= ///硬中断暂停处理函数 VOID OsMpHaltHandler(VOID) { @@ -73,6 +106,7 @@ VOID OsMpHaltHandler(VOID) while (1) {}//陷入空循环,也就是空闲状态 } ///MP定时器处理函数, 递归检查所有可用任务 +>>>>>>> remotes/origin/main VOID OsMpCollectTasks(VOID) { LosTaskCB *taskCB = NULL; @@ -80,19 +114,32 @@ VOID OsMpCollectTasks(VOID) UINT32 ret; /* recursive checking all the available task */ +<<<<<<< HEAD + for (; taskID <= g_taskMaxNum; taskID++) { +======= for (; taskID <= g_taskMaxNum; taskID++) { //递归检查所有可用任务 +>>>>>>> remotes/origin/main taskCB = &g_taskCBArray[taskID]; if (OsTaskIsUnused(taskCB) || OsTaskIsRunning(taskCB)) { continue; } +<<<<<<< HEAD + /* + * though task status is not atomic, this check may success but not accomplish + * the deletion; this deletion will be handled until the next run. + */ + if (taskCB->signal & SIGNAL_KILL) { + ret = LOS_TaskDelete(taskID); +======= /* 虽然任务状态不是原子的,但此检查可能成功,但无法完成删除,此删除将在下次运行之前处理 * though task status is not atomic, this check may success but not accomplish * the deletion; this deletion will be handled until the next run. */ if (taskCB->signal & SIGNAL_KILL) {//任务收到被干掉信号 ret = LOS_TaskDelete(taskID);//干掉任务,回归任务池 +>>>>>>> remotes/origin/main if (ret != LOS_OK) { PRINT_WARN("GC collect task failed err:0x%x\n", ret); } @@ -101,7 +148,10 @@ VOID OsMpCollectTasks(VOID) } #ifdef LOSCFG_KERNEL_SMP_CALL +<<<<<<< HEAD +======= +>>>>>>> remotes/origin/main VOID OsMpFuncCall(UINT32 target, SMP_FUNC_CALL func, VOID *args) { UINT32 index; @@ -111,6 +161,15 @@ VOID OsMpFuncCall(UINT32 target, SMP_FUNC_CALL func, VOID *args) return; } +<<<<<<< HEAD + if (!(target & OS_MP_CPU_ALL)) { + return; + } + + for (index = 0; index < LOSCFG_KERNEL_CORE_NUM; index++) { + if (CPUID_TO_AFFI_MASK(index) & target) { + MpCallFunc *mpCallFunc = (MpCallFunc *)LOS_MemAlloc(m_aucSysMem0, sizeof(MpCallFunc)); +======= if (!(target & OS_MP_CPU_ALL)) {//检查目标CPU是否正确 return; } @@ -118,6 +177,7 @@ VOID OsMpFuncCall(UINT32 target, SMP_FUNC_CALL func, VOID *args) for (index = 0; index < LOSCFG_KERNEL_CORE_NUM; index++) {//遍历所有核 if (CPUID_TO_AFFI_MASK(index) & target) { MpCallFunc *mpCallFunc = (MpCallFunc *)LOS_MemAlloc(m_aucSysMem0, sizeof(MpCallFunc));//从内核空间 分配回调结构体 +>>>>>>> remotes/origin/main if (mpCallFunc == NULL) { PRINT_ERR("smp func call malloc failed\n"); return; @@ -126,6 +186,19 @@ VOID OsMpFuncCall(UINT32 target, SMP_FUNC_CALL func, VOID *args) mpCallFunc->args = args; MP_CALL_LOCK(intSave); +<<<<<<< HEAD + LOS_ListAdd(&g_percpu[index].funcLink, &(mpCallFunc->node)); + MP_CALL_UNLOCK(intSave); + } + } + HalIrqSendIpi(target, LOS_MP_IPI_FUNC_CALL); +} + +VOID OsMpFuncCallHandler(VOID) +{ + UINT32 intSave; + UINT32 cpuid = ArchCurrCpuid(); +======= LOS_ListAdd(&g_percpu[index].funcLink, &(mpCallFunc->node));//将回调结构体挂入链表尾部 MP_CALL_UNLOCK(intSave); } @@ -144,10 +217,21 @@ VOID OsMpFuncCallHandler(VOID) { UINT32 intSave; UINT32 cpuid = ArchCurrCpuid();//获取当前CPU +>>>>>>> remotes/origin/main LOS_DL_LIST *list = NULL; MpCallFunc *mpCallFunc = NULL; MP_CALL_LOCK(intSave); +<<<<<<< HEAD + while (!LOS_ListEmpty(&g_percpu[cpuid].funcLink)) { + list = LOS_DL_LIST_FIRST(&g_percpu[cpuid].funcLink); + LOS_ListDelete(list); + MP_CALL_UNLOCK(intSave); + + mpCallFunc = LOS_DL_LIST_ENTRY(list, MpCallFunc, node); + mpCallFunc->func(mpCallFunc->args); + (VOID)LOS_MemFree(m_aucSysMem0, mpCallFunc); +======= while (!LOS_ListEmpty(&g_percpu[cpuid].funcLink)) {//遍历回调函数链表,知道为空 list = LOS_DL_LIST_FIRST(&g_percpu[cpuid].funcLink);//获取链表第一个数据 LOS_ListDelete(list);//将自己从链表上摘除 @@ -156,11 +240,25 @@ VOID OsMpFuncCallHandler(VOID) mpCallFunc = LOS_DL_LIST_ENTRY(list, MpCallFunc, node);//获取回调函数 mpCallFunc->func(mpCallFunc->args);//获取参数并回调该函数 (VOID)LOS_MemFree(m_aucSysMem0, mpCallFunc);//释放回调函数内存 +>>>>>>> remotes/origin/main MP_CALL_LOCK(intSave); } MP_CALL_UNLOCK(intSave); } +<<<<<<< HEAD + +VOID OsMpFuncCallInit(VOID) +{ + UINT32 index; + /* init funclink for each core */ + for (index = 0; index < LOSCFG_KERNEL_CORE_NUM; index++) { + LOS_ListInit(&g_percpu[index].funcLink); + } +} +#endif /* LOSCFG_KERNEL_SMP_CALL */ + +======= /// CPU层级的回调模块初始化 VOID OsMpFuncCallInit(VOID) { @@ -172,20 +270,31 @@ VOID OsMpFuncCallInit(VOID) } #endif /* LOSCFG_KERNEL_SMP_CALL */ //MP(multiprocessing) 多核处理器初始化 +>>>>>>> remotes/origin/main UINT32 OsMpInit(VOID) { UINT16 swtmrId; +<<<<<<< HEAD + (VOID)LOS_SwtmrCreate(OS_MP_GC_PERIOD, LOS_SWTMR_MODE_PERIOD, + (SWTMR_PROC_FUNC)OsMpCollectTasks, &swtmrId, 0); + (VOID)LOS_SwtmrStart(swtmrId); +======= (VOID)LOS_SwtmrCreate(OS_MP_GC_PERIOD, LOS_SWTMR_MODE_PERIOD, //创建一个周期性,持续时间为 100个tick的定时器 (SWTMR_PROC_FUNC)OsMpCollectTasks, &swtmrId, 0);//OsMpCollectTasks为超时回调函数 (VOID)LOS_SwtmrStart(swtmrId);//开始定时任务 +>>>>>>> remotes/origin/main #ifdef LOSCFG_KERNEL_SMP_CALL OsMpFuncCallInit(); #endif return LOS_OK; } +<<<<<<< HEAD +LOS_MODULE_INIT(OsMpInit, LOS_INIT_LEVEL_KMOD_TASK); +======= LOS_MODULE_INIT(OsMpInit, LOS_INIT_LEVEL_KMOD_TASK);//多处理器模块初始化 +>>>>>>> remotes/origin/main #endif diff --git a/src/kernel_liteos_a/kernel/base/mp/los_percpu.c b/src/kernel_liteos_a/kernel/base/mp/los_percpu.c index 527c028f..241a8993 100644 --- a/src/kernel_liteos_a/kernel/base/mp/los_percpu.c +++ b/src/kernel_liteos_a/kernel/base/mp/los_percpu.c @@ -33,7 +33,11 @@ #include "los_printf.h" #ifdef LOSCFG_KERNEL_SMP +<<<<<<< HEAD +Percpu g_percpu[LOSCFG_KERNEL_CORE_NUM]; +======= Percpu g_percpu[LOSCFG_KERNEL_CORE_NUM]; ///< CPU池,池大小由CPU核数决定 +>>>>>>> remotes/origin/main VOID OsAllCpuStatusOutput(VOID) { diff --git a/src/kernel_liteos_a/kernel/base/om/los_err.c b/src/kernel_liteos_a/kernel/base/om/los_err.c index f3ad8a21..3ce5b013 100644 --- a/src/kernel_liteos_a/kernel/base/om/los_err.c +++ b/src/kernel_liteos_a/kernel/base/om/los_err.c @@ -31,6 +31,11 @@ #include "los_err.h" +<<<<<<< HEAD + +LITE_OS_SEC_BSS STATIC LOS_ERRORHANDLE_FUNC g_errHandleHook = NULL; + +======= /** 基本概念 错误处理指程序运行错误时,调用错误处理模块的接口函数,上报错误信息,并调用注册的钩子函数 @@ -54,6 +59,7 @@ LITE_OS_SEC_BSS STATIC LOS_ERRORHANDLE_FUNC g_errHandleHook = NULL;///< 错误 * @param para 错误标签系统内部调用时,入参为NULL * @return LITE_OS_SEC_TEXT_INIT */ +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_INIT UINT32 LOS_ErrHandle(CHAR *fileName, UINT32 lineNo, UINT32 errorNo, UINT32 paraLen, VOID *para) { @@ -63,7 +69,11 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_ErrHandle(CHAR *fileName, UINT32 lineNo, UINT32 return LOS_OK; } +<<<<<<< HEAD + +======= ///设置钩子函数,处理错误 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_INIT VOID LOS_SetErrHandleHook(LOS_ERRORHANDLE_FUNC fun) { g_errHandleHook = fun; diff --git a/src/kernel_liteos_a/kernel/base/sched/los_idle.c b/src/kernel_liteos_a/kernel/base/sched/los_idle.c index 0ba434dc..da114c76 100644 --- a/src/kernel_liteos_a/kernel/base/sched/los_idle.c +++ b/src/kernel_liteos_a/kernel/base/sched/los_idle.c @@ -44,7 +44,11 @@ STATIC VOID IdleTimeSliceUpdate(SchedRunqueue *rq, LosTaskCB *taskCB, UINT64 cur STATIC INT32 IdleParamCompare(const SchedPolicy *sp1, const SchedPolicy *sp2); STATIC VOID IdlePriorityInheritance(LosTaskCB *owner, const SchedParam *param); STATIC VOID IdlePriorityRestore(LosTaskCB *owner, const LOS_DL_LIST *list, const SchedParam *param); +<<<<<<< HEAD + +======= //空闲调度 +>>>>>>> remotes/origin/main const STATIC SchedOps g_idleOps = { .dequeue = IdleDequeue, .enqueue = IdleEnqueue, diff --git a/src/kernel_liteos_a/kernel/base/sched/los_priority.c b/src/kernel_liteos_a/kernel/base/sched/los_priority.c index 5f1c858d..8bef4dc4 100644 --- a/src/kernel_liteos_a/kernel/base/sched/los_priority.c +++ b/src/kernel_liteos_a/kernel/base/sched/los_priority.c @@ -1,5 +1,9 @@ /* +<<<<<<< HEAD + * Copyright (c) 2022-2023 Huawei Device Co., Ltd. All rights reserved. +======= * Copyright (c) 2022-2022 Huawei Device Co., Ltd. All rights reserved. +>>>>>>> remotes/origin/main * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: @@ -43,8 +47,11 @@ #define OS_SCHED_READY_MAX 30 #define OS_TIME_SLICE_MIN (INT32)((50 * OS_SYS_NS_PER_US) / OS_NS_PER_CYCLE) /* 50us */ +<<<<<<< HEAD +======= //基于优先数调度算法 Highest-Priority-First (HPF) +>>>>>>> remotes/origin/main STATIC HPFRunqueue g_schedHPF; STATIC VOID HPFDequeue(SchedRunqueue *rq, LosTaskCB *taskCB); @@ -65,7 +72,11 @@ STATIC VOID HPFTimeSliceUpdate(SchedRunqueue *rq, LosTaskCB *taskCB, UINT64 curr STATIC INT32 HPFParamCompare(const SchedPolicy *sp1, const SchedPolicy *sp2); STATIC VOID HPFPriorityInheritance(LosTaskCB *owner, const SchedParam *param); STATIC VOID HPFPriorityRestore(LosTaskCB *owner, const LOS_DL_LIST *list, const SchedParam *param); +<<<<<<< HEAD + +======= //优先级调度算法操作 +>>>>>>> remotes/origin/main const STATIC SchedOps g_priorityOps = { .dequeue = HPFDequeue, .enqueue = HPFEnqueue, @@ -245,7 +256,11 @@ STATIC INLINE VOID PriQueInsert(HPFRunqueue *rq, LosTaskCB *taskCB) taskCB->taskStatus &= ~OS_TASK_STATUS_BLOCKED; taskCB->taskStatus |= OS_TASK_STATUS_READY; } +<<<<<<< HEAD + +======= //入就绪队列 +>>>>>>> remotes/origin/main STATIC VOID HPFEnqueue(SchedRunqueue *rq, LosTaskCB *taskCB) { #ifdef LOSCFG_SCHED_HPF_DEBUG @@ -255,14 +270,24 @@ STATIC VOID HPFEnqueue(SchedRunqueue *rq, LosTaskCB *taskCB) #endif PriQueInsert(rq->hpfRunqueue, taskCB); } +<<<<<<< HEAD + +======= //出就绪队列 +>>>>>>> remotes/origin/main STATIC VOID HPFDequeue(SchedRunqueue *rq, LosTaskCB *taskCB) { SchedHPF *sched = (SchedHPF *)&taskCB->sp; +<<<<<<< HEAD + if (taskCB->taskStatus & OS_TASK_STATUS_READY) { + PriQueDelete(rq->hpfRunqueue, sched->basePrio, &taskCB->pendList, sched->priority); + taskCB->taskStatus &= ~OS_TASK_STATUS_READY; +======= if (taskCB->taskStatus & OS_TASK_STATUS_READY) {//是否有就绪状态 PriQueDelete(rq->hpfRunqueue, sched->basePrio, &taskCB->pendList, sched->priority); taskCB->taskStatus &= ~OS_TASK_STATUS_READY;//更新成非就绪状态 +>>>>>>> remotes/origin/main } } @@ -477,7 +502,11 @@ STATIC VOID HPFPriorityInheritance(LosTaskCB *owner, const SchedParam *param) LOS_BitmapSet(&sp->priBitmap, sp->priority); sp->priority = param->priority; } +<<<<<<< HEAD + +======= /// 恢复任务优先级 +>>>>>>> remotes/origin/main STATIC VOID HPFPriorityRestore(LosTaskCB *owner, const LOS_DL_LIST *list, const SchedParam *param) { UINT16 priority; @@ -500,8 +529,13 @@ STATIC VOID HPFPriorityRestore(LosTaskCB *owner, const LOS_DL_LIST *list, const } if ((list != NULL) && !LOS_ListEmpty((LOS_DL_LIST *)list)) { +<<<<<<< HEAD + priority = LOS_HighBitGet(sp->priBitmap); + LOS_DL_LIST_FOR_EACH_ENTRY(pendedTask, list, LosTaskCB, pendList) { +======= priority = LOS_HighBitGet(sp->priBitmap);//获取在历史调度中最高优先级 LOS_DL_LIST_FOR_EACH_ENTRY(pendedTask, list, LosTaskCB, pendList) {//遍历链表 +>>>>>>> remotes/origin/main SchedHPF *pendSp = (SchedHPF *)&pendedTask->sp; if ((pendedTask->ops == owner->ops) && (priority != pendSp->priority)) { LOS_BitmapClr(&sp->priBitmap, pendSp->priority); @@ -539,7 +573,11 @@ VOID HPFProcessDefaultSchedParamGet(SchedParam *param) { param->basePrio = OS_USER_PROCESS_PRIORITY_HIGHEST; } +<<<<<<< HEAD + +======= //HPF 调度策略初始化 +>>>>>>> remotes/origin/main VOID HPFSchedPolicyInit(SchedRunqueue *rq) { if (ArchCurrCpuid() > 0) { diff --git a/src/kernel_liteos_a/kernel/base/sched/los_sortlink.c b/src/kernel_liteos_a/kernel/base/sched/los_sortlink.c index 7dcfce04..2eca1624 100644 --- a/src/kernel_liteos_a/kernel/base/sched/los_sortlink.c +++ b/src/kernel_liteos_a/kernel/base/sched/los_sortlink.c @@ -1,6 +1,10 @@ /* * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved. +<<<<<<< HEAD + * Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved. +======= * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved. +>>>>>>> remotes/origin/main * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: @@ -30,7 +34,11 @@ */ #include "los_sortlink_pri.h" +<<<<<<< HEAD + +======= /// 排序链表初始化 +>>>>>>> remotes/origin/main VOID OsSortLinkInit(SortLinkAttribute *sortLinkHeader) { LOS_ListInit(&sortLinkHeader->sortLink); @@ -38,6 +46,33 @@ VOID OsSortLinkInit(SortLinkAttribute *sortLinkHeader) sortLinkHeader->nodeNum = 0; } +<<<<<<< HEAD +STATIC INLINE VOID AddNode2SortLink(SortLinkAttribute *sortLinkHeader, SortLinkList *sortList) +{ + LOS_DL_LIST *head = (LOS_DL_LIST *)&sortLinkHeader->sortLink; + + if (LOS_ListEmpty(head)) { + LOS_ListHeadInsert(head, &sortList->sortLinkNode); + sortLinkHeader->nodeNum++; + return; + } + + SortLinkList *listSorted = LOS_DL_LIST_ENTRY(head->pstNext, SortLinkList, sortLinkNode); + if (listSorted->responseTime > sortList->responseTime) { + LOS_ListAdd(head, &sortList->sortLinkNode); + sortLinkHeader->nodeNum++; + return; + } else if (listSorted->responseTime == sortList->responseTime) { + LOS_ListAdd(head->pstNext, &sortList->sortLinkNode); + sortLinkHeader->nodeNum++; + return; + } + + LOS_DL_LIST *prevNode = head->pstPrev; + do { + listSorted = LOS_DL_LIST_ENTRY(prevNode, SortLinkList, sortLinkNode); + if (listSorted->responseTime <= sortList->responseTime) { +======= /*! * @brief OsAddNode2SortLink 向链表中插入结点,并按时间顺序排列 * @@ -72,13 +107,19 @@ STATIC INLINE VOID AddNode2SortLink(SortLinkAttribute *sortLinkHeader, SortLinkL do { // @note_good 这里写的有点妙,也是双向链表的魅力所在 listSorted = LOS_DL_LIST_ENTRY(prevNode, SortLinkList, sortLinkNode);//一个个遍历,先比大的再比小的 if (listSorted->responseTime <= sortList->responseTime) {//如果时间比你小,就插到后面 +>>>>>>> remotes/origin/main LOS_ListAdd(prevNode, &sortList->sortLinkNode); sortLinkHeader->nodeNum++; break; } +<<<<<<< HEAD + prevNode = prevNode->pstPrev; + } while (1); +======= prevNode = prevNode->pstPrev;//再拿上一个更小的responseTime进行比较 } while (1);//死循环 +>>>>>>> remotes/origin/main } VOID OsAdd2SortLink(SortLinkAttribute *head, SortLinkList *node, UINT64 responseTime, UINT16 idleCpu) diff --git a/src/kernel_liteos_a/kernel/base/vm/los_vm_dump.c b/src/kernel_liteos_a/kernel/base/vm/los_vm_dump.c index 1fbc80b9..05d64221 100644 --- a/src/kernel_liteos_a/kernel/base/vm/los_vm_dump.c +++ b/src/kernel_liteos_a/kernel/base/vm/los_vm_dump.c @@ -159,26 +159,44 @@ UINT32 OsKProcessPmUsage(LosVmSpace *kSpace, UINT32 *actualPm) /* Kernel resident memory, include default heap memory */ memUsed = SYS_MEM_SIZE_DEFAULT - (totalCount << PAGE_SHIFT); +<<<<<<< HEAD + spaceList = LOS_GetVmSpaceList(); + LosMux *vmSpaceListMux = OsGVmSpaceMuxGet(); + (VOID)LOS_MuxAcquire(vmSpaceListMux); + LOS_DL_LIST_FOR_EACH_ENTRY(space, spaceList, LosVmSpace, node) { + if (space == LOS_GetKVmSpace()) { +======= spaceList = LOS_GetVmSpaceList();//获取虚拟空间链表,上面挂了所有虚拟空间 LosMux *vmSpaceListMux = OsGVmSpaceMuxGet(); (VOID)LOS_MuxAcquire(vmSpaceListMux); LOS_DL_LIST_FOR_EACH_ENTRY(space, spaceList, LosVmSpace, node) {//遍历链表 if (space == LOS_GetKVmSpace()) {//内核空间不统计 +>>>>>>> remotes/origin/main continue; } UProcessUsed += OsUProcessPmUsage(space, NULL, NULL); } (VOID)LOS_MuxRelease(vmSpaceListMux); +<<<<<<< HEAD + /* Kernel dynamic memory, include extended heap memory */ + memUsed += ((usedCount << PAGE_SHIFT) - UProcessUsed); + /* Remaining heap memory */ +======= /* Kernel dynamic memory, include extended heap memory */ //内核动态内存,包括扩展堆内存 memUsed += ((usedCount << PAGE_SHIFT) - UProcessUsed); /* Remaining heap memory */ //剩余堆内存 +>>>>>>> remotes/origin/main memUsed -= freeMem; *actualPm = memUsed; return memUsed; } +<<<<<<< HEAD + +======= ///shell task 物理内存的使用情况 +>>>>>>> remotes/origin/main UINT32 OsShellCmdProcessPmUsage(LosVmSpace *space, UINT32 *sharePm, UINT32 *actualPm) { if (space == NULL) { @@ -194,7 +212,11 @@ UINT32 OsShellCmdProcessPmUsage(LosVmSpace *space, UINT32 *sharePm, UINT32 *actu } return OsUProcessPmUsage(space, sharePm, actualPm); } +<<<<<<< HEAD + +======= ///虚拟空间物理内存的使用情况,参数同时带走共享物理内存sharePm和actualPm单位是字节 +>>>>>>> remotes/origin/main UINT32 OsUProcessPmUsage(LosVmSpace *space, UINT32 *sharePm, UINT32 *actualPm) { LosVmMapRegion *region = NULL; @@ -230,10 +252,17 @@ UINT32 OsUProcessPmUsage(LosVmSpace *space, UINT32 *sharePm, UINT32 *actualPm) continue; } +<<<<<<< HEAD + shareRef = LOS_AtomicRead(&page->refCounts); + if (shareRef > 1) { + if (sharePm != NULL) { + *sharePm += PAGE_SIZE; +======= shareRef = LOS_AtomicRead(&page->refCounts);//ref 大于1 说明page被其他空间也引用了 if (shareRef > 1) { if (sharePm != NULL) { *sharePm += PAGE_SIZE;//一页4K字节 +>>>>>>> remotes/origin/main } pmSize += PAGE_SIZE / shareRef; } else { @@ -250,9 +279,13 @@ UINT32 OsUProcessPmUsage(LosVmSpace *space, UINT32 *sharePm, UINT32 *actualPm) return pmSize; } +<<<<<<< HEAD + +======= /// @brief 通过虚拟空间获取进程实体 /// @param space /// @return +>>>>>>> remotes/origin/main LosProcessCB *OsGetPIDByAspace(const LosVmSpace *space) { UINT32 pid; @@ -260,6 +293,15 @@ LosProcessCB *OsGetPIDByAspace(const LosVmSpace *space) LosProcessCB *processCB = NULL; SCHEDULER_LOCK(intSave); +<<<<<<< HEAD + for (pid = 0; pid < g_processMaxNum; ++pid) { + processCB = g_processCBArray + pid; + if (OsProcessIsUnused(processCB)) { + continue; + } + + if (processCB->vmSpace == space) { +======= for (pid = 0; pid < g_processMaxNum; ++pid) {//循环进程池,进程池本质是个数组 processCB = g_processCBArray + pid; if (OsProcessIsUnused(processCB)) {//进程还没被分配使用 @@ -267,6 +309,7 @@ LosProcessCB *OsGetPIDByAspace(const LosVmSpace *space) } if (processCB->vmSpace == space) {//找到了 +>>>>>>> remotes/origin/main SCHEDULER_UNLOCK(intSave); return processCB; } @@ -274,11 +317,15 @@ LosProcessCB *OsGetPIDByAspace(const LosVmSpace *space) SCHEDULER_UNLOCK(intSave); return NULL; } +<<<<<<< HEAD + +======= /// @brief 统计虚拟空间中某个线性区的页数 /// @param space /// @param region /// @param pssPages /// @return +>>>>>>> remotes/origin/main UINT32 OsCountRegionPages(LosVmSpace *space, LosVmMapRegion *region, UINT32 *pssPages) { UINT32 regionPages = 0; @@ -312,7 +359,11 @@ UINT32 OsCountRegionPages(LosVmSpace *space, LosVmMapRegion *region, UINT32 *pss return regionPages; } +<<<<<<< HEAD + +======= ///统计虚拟空间的总页数 +>>>>>>> remotes/origin/main UINT32 OsCountAspacePages(LosVmSpace *space) { UINT32 spacePages = 0; @@ -396,30 +447,49 @@ VOID OsDumpRegion2(LosVmSpace *space, LosVmMapRegion *region) region->range.size, flagsStr, regionPages, pssPages); (VOID)LOS_MemFree(m_aucSysMem0, flagsStr); } +<<<<<<< HEAD + +======= ///dump 指定虚拟空间的信息 +>>>>>>> remotes/origin/main VOID OsDumpAspace(LosVmSpace *space) { LosVmMapRegion *region = NULL; LosRbNode *pstRbNode = NULL; LosRbNode *pstRbNodeNext = NULL; UINT32 spacePages; +<<<<<<< HEAD + LosProcessCB *pcb = OsGetPIDByAspace(space); +======= LosProcessCB *pcb = OsGetPIDByAspace(space);//通过虚拟空间找到进程实体 +>>>>>>> remotes/origin/main if (pcb == NULL) { return; } +<<<<<<< HEAD + + spacePages = OsCountAspacePages(space); +======= //进程ID | 进程虚拟内存控制块地址信息 | 虚拟内存起始地址 | 虚拟内存大小 | 已使用的物理页数量 spacePages = OsCountAspacePages(space);//获取空间的页数 +>>>>>>> remotes/origin/main PRINTK("\r\n PID aspace name base size pages \n"); PRINTK(" ---- ------ ---- ---- ----- ----\n"); PRINTK(" %-4d %#010x %-10.10s %#010x %#010x %d\n", pcb->processID, space, pcb->processName, space->base, space->size, spacePages); +<<<<<<< HEAD + PRINTK("\r\n\t region name base size mmu_flags pages pg/ref\n"); + PRINTK("\t ------ ---- ---- ---- --------- ----- -----\n"); + RB_SCAN_SAFE(&space->regionRbTree, pstRbNode, pstRbNodeNext) +======= //虚拟区间控制块地址信息 | 虚拟区间类型 | 虚拟区间起始地址 | 虚拟区间大小 | 虚拟区间mmu映射属性 | 已使用的物理页数量(包括共享内存部分 | 已使用的物理页数量 PRINTK("\r\n\t region name base size mmu_flags pages pg/ref\n"); PRINTK("\t ------ ---- ---- ---- --------- ----- -----\n"); RB_SCAN_SAFE(&space->regionRbTree, pstRbNode, pstRbNodeNext)//按region 轮询统计 +>>>>>>> remotes/origin/main region = (LosVmMapRegion *)pstRbNode; if (region != NULL) { OsDumpRegion2(space, region); @@ -430,6 +500,16 @@ VOID OsDumpAspace(LosVmSpace *space) RB_SCAN_SAFE_END(&space->regionRbTree, pstRbNode, pstRbNodeNext) return; } +<<<<<<< HEAD + +VOID OsDumpAllAspace(VOID) +{ + LosVmSpace *space = NULL; + LOS_DL_LIST *aspaceList = LOS_GetVmSpaceList(); + LOS_DL_LIST_FOR_EACH_ENTRY(space, aspaceList, LosVmSpace, node) { + (VOID)LOS_MuxAcquire(&space->regionMux); + OsDumpAspace(space); +======= ///查看所有进程使用虚拟内存的情况 VOID OsDumpAllAspace(VOID) { @@ -438,6 +518,7 @@ VOID OsDumpAllAspace(VOID) LOS_DL_LIST_FOR_EACH_ENTRY(space, aspaceList, LosVmSpace, node) {//循环取出进程虚拟 (VOID)LOS_MuxAcquire(&space->regionMux); OsDumpAspace(space);//dump 空间 +>>>>>>> remotes/origin/main (VOID)LOS_MuxRelease(&space->regionMux); } return; @@ -456,11 +537,19 @@ STATUS_T OsRegionOverlapCheck(LosVmSpace *space, LosVmMapRegion *region) (VOID)LOS_MuxRelease(&space->regionMux); return ret; } +<<<<<<< HEAD + +VOID OsDumpPte(VADDR_T vaddr) +{ + UINT32 l1Index = vaddr >> MMU_DESCRIPTOR_L1_SMALL_SHIFT; + LosVmSpace *space = LOS_SpaceGet(vaddr); +======= ///dump 页表项 VOID OsDumpPte(VADDR_T vaddr) { UINT32 l1Index = vaddr >> MMU_DESCRIPTOR_L1_SMALL_SHIFT; LosVmSpace *space = LOS_SpaceGet(vaddr);//通过虚拟地址获取空间,内核分三个空间 内核进程空间,内核堆空间,用户进程空间 +>>>>>>> remotes/origin/main UINT32 ttEntry; LosVmPage *page = NULL; PTE_T *l2Table = NULL; @@ -470,6 +559,16 @@ VOID OsDumpPte(VADDR_T vaddr) return; } +<<<<<<< HEAD + ttEntry = space->archMmu.virtTtb[l1Index]; + if (ttEntry) { + l2Table = LOS_PaddrToKVaddr(MMU_DESCRIPTOR_L1_PAGE_TABLE_ADDR(ttEntry)); + l2Index = (vaddr % MMU_DESCRIPTOR_L1_SMALL_SIZE) >> PAGE_SHIFT; + if (l2Table == NULL) { + goto ERR; + } + page = LOS_VmPageGet(l2Table[l2Index] & ~(PAGE_SIZE - 1)); +======= ttEntry = space->archMmu.virtTtb[l1Index];//找到 L1 页面项 if (ttEntry) { l2Table = LOS_PaddrToKVaddr(MMU_DESCRIPTOR_L1_PAGE_TABLE_ADDR(ttEntry));//找到L1页面项对应的 L2表 @@ -478,19 +577,29 @@ VOID OsDumpPte(VADDR_T vaddr) goto ERR; } page = LOS_VmPageGet(l2Table[l2Index] & ~(PAGE_SIZE - 1));//获取物理页框 +>>>>>>> remotes/origin/main if (page == NULL) { goto ERR; } PRINTK("vaddr %p, l1Index %d, ttEntry %p, l2Table %p, l2Index %d, pfn %p count %d\n", +<<<<<<< HEAD + vaddr, l1Index, ttEntry, l2Table, l2Index, l2Table[l2Index], LOS_AtomicRead(&page->refCounts)); + } else { +======= vaddr, l1Index, ttEntry, l2Table, l2Index, l2Table[l2Index], LOS_AtomicRead(&page->refCounts));//打印L1 L2 页表项 } else {//不在L1表 +>>>>>>> remotes/origin/main PRINTK("vaddr %p, l1Index %d, ttEntry %p\n", vaddr, l1Index, ttEntry); } return; ERR: PRINTK("%s, error vaddr: %#x, l2Table: %#x, l2Index: %#x\n", __FUNCTION__, vaddr, l2Table, l2Index); } +<<<<<<< HEAD + +======= ///获取段剩余页框数 +>>>>>>> remotes/origin/main UINT32 OsVmPhySegPagesGet(LosVmPhysSeg *seg) { UINT32 intSave; @@ -498,6 +607,16 @@ UINT32 OsVmPhySegPagesGet(LosVmPhysSeg *seg) UINT32 segFreePages = 0; LOS_SpinLockSave(&seg->freeListLock, &intSave); +<<<<<<< HEAD + for (flindex = 0; flindex < VM_LIST_ORDER_MAX; flindex++) { + segFreePages += ((1 << flindex) * seg->freeList[flindex].listCnt); + } + LOS_SpinUnlockRestore(&seg->freeListLock, intSave); + + return segFreePages; +} + +======= for (flindex = 0; flindex < VM_LIST_ORDER_MAX; flindex++) {//遍历块组 segFreePages += ((1 << flindex) * seg->freeList[flindex].listCnt);//1 << flindex等于页数, * 节点数 得到组块的总页数. } @@ -517,6 +636,7 @@ UINT32 OsVmPhySegPagesGet(LosVmPhysSeg *seg) * inactive file: pagecache中,不活跃的文件页数量 * pmm pages total:总的物理页数,used:已使用的物理页数,free:空闲的物理页数 ************************************************************/ +>>>>>>> remotes/origin/main VOID OsVmPhysDump(VOID) { LosVmPhysSeg *seg = NULL; @@ -528,7 +648,11 @@ VOID OsVmPhysDump(VOID) UINT32 flindex; UINT32 listCount[VM_LIST_ORDER_MAX] = {0}; +<<<<<<< HEAD + for (segIndex = 0; segIndex < g_vmPhysSegNum; segIndex++) { +======= for (segIndex = 0; segIndex < g_vmPhysSegNum; segIndex++) {//循环取段 +>>>>>>> remotes/origin/main seg = &g_vmPhysSeg[segIndex]; if (seg->size > 0) { segFreePages = OsVmPhySegPagesGet(seg); @@ -558,7 +682,11 @@ VOID OsVmPhysDump(VOID) PRINTK("\n\rpmm pages: total = %u, used = %u, free = %u\n", totalPages, (totalPages - totalFreePages), totalFreePages); } +<<<<<<< HEAD + +======= ///获取物理内存的使用信息,两个参数接走数据 +>>>>>>> remotes/origin/main VOID OsVmPhysUsedInfoGet(UINT32 *usedCount, UINT32 *totalCount) { UINT32 index; @@ -571,12 +699,21 @@ VOID OsVmPhysUsedInfoGet(UINT32 *usedCount, UINT32 *totalCount) *usedCount = 0; *totalCount = 0; +<<<<<<< HEAD + for (index = 0; index < g_vmPhysSegNum; index++) { + physSeg = &g_vmPhysSeg[index]; + if (physSeg->size > 0) { + *totalCount += physSeg->size >> PAGE_SHIFT; + segFreePages = OsVmPhySegPagesGet(physSeg); + *usedCount += (*totalCount - segFreePages); +======= for (index = 0; index < g_vmPhysSegNum; index++) {//循环取段 physSeg = &g_vmPhysSeg[index]; if (physSeg->size > 0) { *totalCount += physSeg->size >> PAGE_SHIFT;//叠加段的总页数 segFreePages = OsVmPhySegPagesGet(physSeg);//获取段的剩余页数 *usedCount += (*totalCount - segFreePages);//叠加段的使用页数 +>>>>>>> remotes/origin/main } } } diff --git a/src/kernel_liteos_a/kernel/base/vm/los_vm_fault.c b/src/kernel_liteos_a/kernel/base/vm/los_vm_fault.c index 902f84ea..9118a18f 100644 --- a/src/kernel_liteos_a/kernel/base/vm/los_vm_fault.c +++ b/src/kernel_liteos_a/kernel/base/vm/los_vm_fault.c @@ -54,7 +54,11 @@ extern char __exc_table_start[]; extern char __exc_table_end[]; +<<<<<<< HEAD + +======= //线性正确性检查 +>>>>>>> remotes/origin/main STATIC STATUS_T OsVmRegionPermissionCheck(LosVmMapRegion *region, UINT32 flags) { if ((region->regionFlags & VM_MAP_REGION_FLAG_PERM_READ) != VM_MAP_REGION_FLAG_PERM_READ) { @@ -62,14 +66,22 @@ STATIC STATUS_T OsVmRegionPermissionCheck(LosVmMapRegion *region, UINT32 flags) return LOS_NOK; } +<<<<<<< HEAD + if ((flags & VM_MAP_PF_FLAG_WRITE) == VM_MAP_PF_FLAG_WRITE) { +======= if ((flags & VM_MAP_PF_FLAG_WRITE) == VM_MAP_PF_FLAG_WRITE) {//写入许可 +>>>>>>> remotes/origin/main if ((region->regionFlags & VM_MAP_REGION_FLAG_PERM_WRITE) != VM_MAP_REGION_FLAG_PERM_WRITE) { VM_ERR("write permission check failed operation flags %x, region flags %x", flags, region->regionFlags); return LOS_NOK; } } +<<<<<<< HEAD + if ((flags & VM_MAP_PF_FLAG_INSTRUCTION) == VM_MAP_PF_FLAG_INSTRUCTION) { +======= if ((flags & VM_MAP_PF_FLAG_INSTRUCTION) == VM_MAP_PF_FLAG_INSTRUCTION) {//指令 +>>>>>>> remotes/origin/main if ((region->regionFlags & VM_MAP_REGION_FLAG_PERM_EXECUTE) != VM_MAP_REGION_FLAG_PERM_EXECUTE) { VM_ERR("exec permission check failed operation flags %x, region flags %x", flags, region->regionFlags); return LOS_NOK; @@ -97,8 +109,12 @@ STATIC VOID OsFaultTryFixup(ExcContext *frame, VADDR_T excVaddr, STATUS_T *statu } #ifdef LOSCFG_FS_VFS +<<<<<<< HEAD +STATIC STATUS_T OsDoReadFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault) +======= //读页时发生缺页的处理 STATIC STATUS_T OsDoReadFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)//读缺页 +>>>>>>> remotes/origin/main { status_t ret; PADDR_T paddr; @@ -106,16 +122,36 @@ STATIC STATUS_T OsDoReadFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)// VADDR_T vaddr = (VADDR_T)vmPgFault->vaddr; LosVmSpace *space = region->space; +<<<<<<< HEAD + ret = LOS_ArchMmuQuery(&space->archMmu, vaddr, NULL, NULL); + if (ret == LOS_OK) { + return LOS_OK; + } + if (region->unTypeData.rf.vmFOps == NULL || region->unTypeData.rf.vmFOps->fault == NULL) { +======= ret = LOS_ArchMmuQuery(&space->archMmu, vaddr, NULL, NULL);//查询是否缺页 if (ret == LOS_OK) {//注意这里时LOS_OK却返回,都OK了说明查到了物理地址,有页了。 return LOS_OK;//查到了就说明不缺页的,缺页就是因为虚拟地址没有映射到物理地址嘛 } if (region->unTypeData.rf.vmFOps == NULL || region->unTypeData.rf.vmFOps->fault == NULL) {//线性区必须有实现了缺页接口 +>>>>>>> remotes/origin/main VM_ERR("region args invalid, file path: %s", region->unTypeData.rf.vnode->filePath); return LOS_ERRNO_VM_INVALID_ARGS; } (VOID)LOS_MuxAcquire(®ion->unTypeData.rf.vnode->mapping.mux_lock); +<<<<<<< HEAD + ret = region->unTypeData.rf.vmFOps->fault(region, vmPgFault); + if (ret == LOS_OK) { + paddr = LOS_PaddrQuery(vmPgFault->pageKVaddr); + page = LOS_VmPageGet(paddr); + if (page != NULL) { /* just incase of page null */ + LOS_AtomicInc(&page->refCounts); + OsCleanPageLocked(page); + } + ret = LOS_ArchMmuMap(&space->archMmu, vaddr, paddr, 1, + region->regionFlags & (~VM_MAP_REGION_FLAG_PERM_WRITE)); +======= ret = region->unTypeData.rf.vmFOps->fault(region, vmPgFault);// 函数指针,执行的是g_commVmOps.OsVmmFileFault if (ret == LOS_OK) { paddr = LOS_PaddrQuery(vmPgFault->pageKVaddr);//查询物理地址 @@ -126,6 +162,7 @@ STATIC STATUS_T OsDoReadFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)// } ret = LOS_ArchMmuMap(&space->archMmu, vaddr, paddr, 1, region->regionFlags & (~VM_MAP_REGION_FLAG_PERM_WRITE));//重新映射为非可写 +>>>>>>> remotes/origin/main if (ret < 0) { VM_ERR("LOS_ArchMmuMap failed"); OsDelMapInfo(region, vmPgFault, false); @@ -141,7 +178,11 @@ STATIC STATUS_T OsDoReadFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault)// return LOS_ERRNO_VM_NO_MEMORY; } +<<<<<<< HEAD +/* unmap a page when cow happened only */ +======= /* unmap a page when cow happened only *///仅当写时拷贝发生时取消页面映射 +>>>>>>> remotes/origin/main STATIC LosVmPage *OsCowUnmapOrg(LosArchMmu *archMmu, LosVmMapRegion *region, LosVmPgFault *vmf) { UINT32 intSave; @@ -169,7 +210,11 @@ STATIC LosVmPage *OsCowUnmapOrg(LosArchMmu *archMmu, LosVmMapRegion *region, Los return oldPage; } #endif +<<<<<<< HEAD + +======= //在私有线性区写入文件时发生缺页的处理 +>>>>>>> remotes/origin/main status_t OsDoCowFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault) { STATUS_T ret; @@ -187,23 +232,40 @@ status_t OsDoCowFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault) } space = region->space; +<<<<<<< HEAD + ret = LOS_ArchMmuQuery(&space->archMmu, (VADDR_T)vmPgFault->vaddr, &oldPaddr, NULL); + if (ret == LOS_OK) { + oldPage = OsCowUnmapOrg(&space->archMmu, region, vmPgFault); + } + + newPage = LOS_PhysPageAlloc(); +======= ret = LOS_ArchMmuQuery(&space->archMmu, (VADDR_T)vmPgFault->vaddr, &oldPaddr, NULL);//查询出老物理地址 if (ret == LOS_OK) { oldPage = OsCowUnmapOrg(&space->archMmu, region, vmPgFault);//取消页面映射 } newPage = LOS_PhysPageAlloc();//分配一个新页面 +>>>>>>> remotes/origin/main if (newPage == NULL) { VM_ERR("LOS_PhysPageAlloc failed"); ret = LOS_ERRNO_VM_NO_MEMORY; goto ERR_OUT; } +<<<<<<< HEAD + newPaddr = VM_PAGE_TO_PHYS(newPage); + kvaddr = OsVmPageToVaddr(newPage); + + (VOID)LOS_MuxAcquire(®ion->unTypeData.rf.vnode->mapping.mux_lock); + ret = region->unTypeData.rf.vmFOps->fault(region, vmPgFault); +======= newPaddr = VM_PAGE_TO_PHYS(newPage);//拿到新的物理地址 kvaddr = OsVmPageToVaddr(newPage);//拿到新的虚拟地址 (VOID)LOS_MuxAcquire(®ion->unTypeData.rf.vnode->mapping.mux_lock); ret = region->unTypeData.rf.vmFOps->fault(region, vmPgFault);// 函数指针 g_commVmOps.OsVmmFileFault +>>>>>>> remotes/origin/main if (ret != LOS_OK) { VM_ERR("call region->vm_ops->fault fail"); (VOID)LOS_MuxRelease(®ion->unTypeData.rf.vnode->mapping.mux_lock); @@ -215,6 +277,17 @@ status_t OsDoCowFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault) * we can take it as a normal file cow map. 2.this page has done file cow map, * we can take it as a anonymous cow map. */ +<<<<<<< HEAD + if ((oldPaddr == 0) || (LOS_PaddrToKVaddr(oldPaddr) == vmPgFault->pageKVaddr)) { + (VOID)memcpy_s(kvaddr, PAGE_SIZE, vmPgFault->pageKVaddr, PAGE_SIZE); + LOS_AtomicInc(&newPage->refCounts); + OsCleanPageLocked(LOS_VmPageGet(LOS_PaddrQuery(vmPgFault->pageKVaddr))); + } else { + OsPhysSharePageCopy(oldPaddr, &newPaddr, newPage); + /* use old page free the new one */ + if (newPaddr == oldPaddr) { + LOS_PhysPageFree(newPage); +======= if ((oldPaddr == 0) || (LOS_PaddrToKVaddr(oldPaddr) == vmPgFault->pageKVaddr)) {//没有映射或者 已在pagecache有映射 (VOID)memcpy_s(kvaddr, PAGE_SIZE, vmPgFault->pageKVaddr, PAGE_SIZE);//直接copy到新页 LOS_AtomicInc(&newPage->refCounts);//引用ref++ @@ -224,11 +297,16 @@ status_t OsDoCowFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault) /* use old page free the new one */ if (newPaddr == oldPaddr) {//注意这里newPaddr可能已经被改变了,参数传入的是 &newPaddr LOS_PhysPageFree(newPage);//释放新页,别浪费的内存,内核使用内存是一分钱当十块用. +>>>>>>> remotes/origin/main newPage = NULL; } } +<<<<<<< HEAD + ret = LOS_ArchMmuMap(&space->archMmu, (VADDR_T)vmPgFault->vaddr, newPaddr, 1, region->regionFlags); +======= ret = LOS_ArchMmuMap(&space->archMmu, (VADDR_T)vmPgFault->vaddr, newPaddr, 1, region->regionFlags);//把新物理地址映射给缺页的虚拟地址,这样就不会缺页啦 +>>>>>>> remotes/origin/main if (ret < 0) { VM_ERR("LOS_ArchMmuMap failed"); ret = LOS_ERRNO_VM_NO_MEMORY; @@ -253,7 +331,11 @@ ERR_OUT: return ret; } +<<<<<<< HEAD + +======= ///在共享线性区写文件操作发生缺页的情况处理,因为线性区是共享的 +>>>>>>> remotes/origin/main status_t OsDoSharedFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault) { STATUS_T ret; @@ -269,10 +351,17 @@ status_t OsDoSharedFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault) return LOS_ERRNO_VM_INVALID_ARGS; } +<<<<<<< HEAD + ret = LOS_ArchMmuQuery(&space->archMmu, vmPgFault->vaddr, &paddr, NULL); + if (ret == LOS_OK) { + LOS_ArchMmuUnmap(&space->archMmu, vmPgFault->vaddr, 1); + ret = LOS_ArchMmuMap(&space->archMmu, vaddr, paddr, 1, region->regionFlags); +======= ret = LOS_ArchMmuQuery(&space->archMmu, vmPgFault->vaddr, &paddr, NULL);//查询物理地址 if (ret == LOS_OK) { LOS_ArchMmuUnmap(&space->archMmu, vmPgFault->vaddr, 1);//先取消映射 ret = LOS_ArchMmuMap(&space->archMmu, vaddr, paddr, 1, region->regionFlags);//再重新映射,为啥这么干,是因为regionFlags变了, +>>>>>>> remotes/origin/main if (ret < 0) { VM_ERR("LOS_ArchMmuMap failed. ret=%d", ret); return LOS_ERRNO_VM_NO_MEMORY; @@ -280,16 +369,27 @@ status_t OsDoSharedFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault) LOS_SpinLockSave(®ion->unTypeData.rf.vnode->mapping.list_lock, &intSave); fpage = OsFindGetEntry(®ion->unTypeData.rf.vnode->mapping, vmPgFault->pgoff); +<<<<<<< HEAD + if (fpage) { + OsMarkPageDirty(fpage, region, 0, 0); +======= if (fpage) {//在页高速缓存(page cache)中找到了 OsMarkPageDirty(fpage, region, 0, 0);//标记为脏页 +>>>>>>> remotes/origin/main } LOS_SpinUnlockRestore(®ion->unTypeData.rf.vnode->mapping.list_lock, intSave); return LOS_OK; } +<<<<<<< HEAD + + (VOID)LOS_MuxAcquire(®ion->unTypeData.rf.vnode->mapping.mux_lock); + ret = region->unTypeData.rf.vmFOps->fault(region, vmPgFault); +======= //以下是没有映射到物理地址的处理 (VOID)LOS_MuxAcquire(®ion->unTypeData.rf.vnode->mapping.mux_lock); ret = region->unTypeData.rf.vmFOps->fault(region, vmPgFault);//函数指针,执行的是g_commVmOps.OsVmmFileFault +>>>>>>> remotes/origin/main if (ret == LOS_OK) { paddr = LOS_PaddrQuery(vmPgFault->pageKVaddr); page = LOS_VmPageGet(paddr); @@ -320,11 +420,29 @@ status_t OsDoSharedFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault) * For COW fault, pagecache is copied to private anonyous pages and the changes on this page * won't write through to the underlying file. For SHARED fault, pagecache is mapping with * region->arch_mmu_flags and the changes on this page will write through to the underlying file +<<<<<<< HEAD + */ +======= */ //操作文件时产生缺页中断 +>>>>>>> remotes/origin/main STATIC STATUS_T OsDoFileFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault, UINT32 flags) { STATUS_T ret; +<<<<<<< HEAD + if (flags & VM_MAP_PF_FLAG_WRITE) { + if (region->regionFlags & VM_MAP_REGION_FLAG_SHARED) { + ret = OsDoSharedFault(region, vmPgFault); + } else { + ret = OsDoCowFault(region, vmPgFault); + } + } else { + ret = OsDoReadFault(region, vmPgFault); + } + return ret; +} + +======= if (flags & VM_MAP_PF_FLAG_WRITE) {//写页的时候产生缺页 if (region->regionFlags & VM_MAP_REGION_FLAG_SHARED) {//共享线性区 ret = OsDoSharedFault(region, vmPgFault);//写操作时的共享缺页,最复杂,此页上的更改将写入磁盘文件 @@ -350,6 +468,7 @@ STATIC STATUS_T OsDoFileFault(LosVmMapRegion *region, LosVmPgFault *vmPgFault, U * @param frame * @return STATUS_T */ +>>>>>>> remotes/origin/main STATUS_T OsVmPageFaultHandler(VADDR_T vaddr, UINT32 flags, ExcContext *frame) { LosVmSpace *space = LOS_SpaceGet(vaddr); @@ -368,9 +487,15 @@ STATUS_T OsVmPageFaultHandler(VADDR_T vaddr, UINT32 flags, ExcContext *frame) return status; } +<<<<<<< HEAD + if (((flags & VM_MAP_PF_FLAG_USER) != 0) && (!LOS_IsUserAddress(vaddr))) { + VM_ERR("user space not allowed to access invalid address: %#x", vaddr); + return LOS_ERRNO_VM_ACCESS_DENIED; +======= if (((flags & VM_MAP_PF_FLAG_USER) != 0) && (!LOS_IsUserAddress(vaddr))) {//地址保护,用户空间不允许跨界访问 VM_ERR("user space not allowed to access invalid address: %#x", vaddr); return LOS_ERRNO_VM_ACCESS_DENIED;//拒绝访问 +>>>>>>> remotes/origin/main } #ifdef LOSCFG_KERNEL_PLIMITS @@ -380,7 +505,11 @@ STATUS_T OsVmPageFaultHandler(VADDR_T vaddr, UINT32 flags, ExcContext *frame) #endif (VOID)LOS_MuxAcquire(&space->regionMux); +<<<<<<< HEAD + region = LOS_RegionFind(space, vaddr); +======= region = LOS_RegionFind(space, vaddr);//通过虚拟地址找到所在线性区 +>>>>>>> remotes/origin/main if (region == NULL) { VM_ERR("region not exists, vaddr: %#x", vaddr); status = LOS_ERRNO_VM_NOT_FOUND; @@ -389,11 +518,19 @@ STATUS_T OsVmPageFaultHandler(VADDR_T vaddr, UINT32 flags, ExcContext *frame) status = OsVmRegionPermissionCheck(region, flags); if (status != LOS_OK) { +<<<<<<< HEAD + status = LOS_ERRNO_VM_ACCESS_DENIED; + goto CHECK_FAILED; + } + + if (OomCheckProcess()) { +======= status = LOS_ERRNO_VM_ACCESS_DENIED;//拒绝访问 goto CHECK_FAILED; } if (OomCheckProcess()) {//低内存检查 +>>>>>>> remotes/origin/main /* * under low memory, when user process request memory allocation * it will fail, and result is LOS_NOK and current user process @@ -403,6 +540,20 @@ STATUS_T OsVmPageFaultHandler(VADDR_T vaddr, UINT32 flags, ExcContext *frame) goto CHECK_FAILED; } +<<<<<<< HEAD + vaddr = ROUNDDOWN(vaddr, PAGE_SIZE); +#ifdef LOSCFG_FS_VFS + if (LOS_IsRegionFileValid(region)) { + if (region->unTypeData.rf.vnode == NULL) { + goto CHECK_FAILED; + } + vmPgFault.vaddr = vaddr; + vmPgFault.pgoff = ((vaddr - region->range.base) >> PAGE_SHIFT) + region->pgOff; + vmPgFault.flags = flags; + vmPgFault.pageKVaddr = NULL; + + status = OsDoFileFault(region, &vmPgFault, flags); +======= vaddr = ROUNDDOWN(vaddr, PAGE_SIZE);//为啥要向下圆整,因为这一页要重新使用,需找到页面基地址 #ifdef LOSCFG_FS_VFS if (LOS_IsRegionFileValid(region)) {//是否为文件线性区 @@ -415,6 +566,7 @@ STATUS_T OsVmPageFaultHandler(VADDR_T vaddr, UINT32 flags, ExcContext *frame) vmPgFault.pageKVaddr = NULL;//缺失页初始化没有物理地址 status = OsDoFileFault(region, &vmPgFault, flags);//缺页处理 +>>>>>>> remotes/origin/main if (status) { VM_ERR("vm fault error, status=%d", status); goto CHECK_FAILED; @@ -422,13 +574,29 @@ STATUS_T OsVmPageFaultHandler(VADDR_T vaddr, UINT32 flags, ExcContext *frame) goto DONE; } #endif +<<<<<<< HEAD + + newPage = LOS_PhysPageAlloc(); +======= //请求调页:推迟到不能再推迟为止 newPage = LOS_PhysPageAlloc();//分配一个新的物理页 +>>>>>>> remotes/origin/main if (newPage == NULL) { status = LOS_ERRNO_VM_NO_MEMORY; goto CHECK_FAILED; } +<<<<<<< HEAD + newPaddr = VM_PAGE_TO_PHYS(newPage); + (VOID)memset_s(OsVmPageToVaddr(newPage), PAGE_SIZE, 0, PAGE_SIZE); + status = LOS_ArchMmuQuery(&space->archMmu, vaddr, &oldPaddr, NULL); + if (status >= 0) { + LOS_ArchMmuUnmap(&space->archMmu, vaddr, 1); + OsPhysSharePageCopy(oldPaddr, &newPaddr, newPage); + /* use old page free the new one */ + if (newPaddr == oldPaddr) { + LOS_PhysPageFree(newPage); +======= newPaddr = VM_PAGE_TO_PHYS(newPage);//获取物理地址 (VOID)memset_s(OsVmPageToVaddr(newPage), PAGE_SIZE, 0, PAGE_SIZE);//获取虚拟地址 清0 status = LOS_ArchMmuQuery(&space->archMmu, vaddr, &oldPaddr, NULL);//通过虚拟地址查询老物理地址 @@ -438,11 +606,16 @@ STATUS_T OsVmPageFaultHandler(VADDR_T vaddr, UINT32 flags, ExcContext *frame) /* use old page free the new one */ if (newPaddr == oldPaddr) {//新老物理地址一致 LOS_PhysPageFree(newPage);//继续使用旧页释放新页 +>>>>>>> remotes/origin/main newPage = NULL; } /* map all of the pages */ +<<<<<<< HEAD + status = LOS_ArchMmuMap(&space->archMmu, vaddr, newPaddr, 1, region->regionFlags); +======= status = LOS_ArchMmuMap(&space->archMmu, vaddr, newPaddr, 1, region->regionFlags);//重新映射新物理地址 +>>>>>>> remotes/origin/main if (status < 0) { VM_ERR("failed to map replacement page, status:%d", status); status = LOS_ERRNO_VM_MAP_FAILED; @@ -453,8 +626,13 @@ STATUS_T OsVmPageFaultHandler(VADDR_T vaddr, UINT32 flags, ExcContext *frame) goto DONE; } else { /* map all of the pages */ +<<<<<<< HEAD + LOS_AtomicInc(&newPage->refCounts); + status = LOS_ArchMmuMap(&space->archMmu, vaddr, newPaddr, 1, region->regionFlags); +======= LOS_AtomicInc(&newPage->refCounts);//引用数自增 status = LOS_ArchMmuMap(&space->archMmu, vaddr, newPaddr, 1, region->regionFlags);//映射新物理地址,如此下次就不会缺页了 +>>>>>>> remotes/origin/main if (status < 0) { VM_ERR("failed to map page, status:%d", status); status = LOS_ERRNO_VM_MAP_FAILED; diff --git a/src/kernel_liteos_a/kernel/base/vm/los_vm_filemap.c b/src/kernel_liteos_a/kernel/base/vm/los_vm_filemap.c index 2da700d8..cc8e97d0 100644 --- a/src/kernel_liteos_a/kernel/base/vm/los_vm_filemap.c +++ b/src/kernel_liteos_a/kernel/base/vm/los_vm_filemap.c @@ -66,6 +66,11 @@ VOID ResetPageCacheHitInfo(int *try, int *hit) #define TRACE_TRY_CACHE() #define TRACE_HIT_CACHE() #endif +<<<<<<< HEAD + +#ifdef LOSCFG_KERNEL_VM + +======= #ifdef LOSCFG_KERNEL_VM /** @@ -80,28 +85,63 @@ VOID ResetPageCacheHitInfo(int *try, int *hit) * @param pgoff * @return STATIC */ +>>>>>>> remotes/origin/main STATIC VOID OsPageCacheAdd(LosFilePage *page, struct page_mapping *mapping, VM_OFFSET_T pgoff) { LosFilePage *fpage = NULL; +<<<<<<< HEAD + LOS_DL_LIST_FOR_EACH_ENTRY(fpage, &mapping->page_list, LosFilePage, node) { + if (fpage->pgoff > pgoff) { + LOS_ListTailInsert(&fpage->node, &page->node); +======= LOS_DL_LIST_FOR_EACH_ENTRY(fpage, &mapping->page_list, LosFilePage, node) {//遍历page_list链表 if (fpage->pgoff > pgoff) {//插入的条件,这样插入保证了按pgoff 从小到大排序 LOS_ListTailInsert(&fpage->node, &page->node);//等于挂到fpage节点的前面了 +>>>>>>> remotes/origin/main goto done_add; } } +<<<<<<< HEAD + LOS_ListTailInsert(&mapping->page_list, &page->node); + +done_add: + mapping->nrpages++; +} + +======= LOS_ListTailInsert(&mapping->page_list, &page->node);//将页挂到文件映射的链表上,相当于挂到了最后 done_add: mapping->nrpages++; //文件在缓存中多了一个 文件页 } ///将页面加到活动文件页LRU链表上 +>>>>>>> remotes/origin/main VOID OsAddToPageacheLru(LosFilePage *page, struct page_mapping *mapping, VM_OFFSET_T pgoff) { OsPageCacheAdd(page, mapping, pgoff); OsLruCacheAdd(page, VM_LRU_ACTIVE_FILE); } +<<<<<<< HEAD + +VOID OsPageCacheDel(LosFilePage *fpage) +{ + /* delete from file cache list */ + LOS_ListDelete(&fpage->node); + fpage->mapping->nrpages--; + + /* unmap and remove map info */ + if (OsIsPageMapped(fpage)) { + OsUnmapAllLocked(fpage); + } + + LOS_PhysPageFree(fpage->vmPage); + + LOS_MemFree(m_aucSysMem0, fpage); +} + +======= ///从页高速缓存上删除页 VOID OsPageCacheDel(LosFilePage *fpage) { @@ -124,15 +164,52 @@ VOID OsPageCacheDel(LosFilePage *fpage) 在两个地方会被被空间映射 1.缺页中断 2.克隆地址空间 **************************************************************************************************/ +>>>>>>> remotes/origin/main VOID OsAddMapInfo(LosFilePage *page, LosArchMmu *archMmu, VADDR_T vaddr) { LosMapInfo *info = NULL; +<<<<<<< HEAD + info = (LosMapInfo *)LOS_MemAlloc(m_aucSysMem0, sizeof(LosMapInfo)); +======= info = (LosMapInfo *)LOS_MemAlloc(m_aucSysMem0, sizeof(LosMapInfo));//分配一个映射信息 +>>>>>>> remotes/origin/main if (info == NULL) { VM_ERR("OsAddMapInfo alloc memory failed!"); return; } +<<<<<<< HEAD + info->page = page; + info->archMmu = archMmu; + info->vaddr = vaddr; + + LOS_ListAdd(&page->i_mmap, &info->node); + page->n_maps++; +} + +LosMapInfo *OsGetMapInfo(const LosFilePage *page, const LosArchMmu *archMmu, VADDR_T vaddr) +{ + LosMapInfo *info = NULL; + const LOS_DL_LIST *immap = &page->i_mmap; + + LOS_DL_LIST_FOR_EACH_ENTRY(info, immap, LosMapInfo, node) { + if ((info->archMmu == archMmu) && (info->vaddr == vaddr) && (info->page == page)) { + return info; + } + } + + return NULL; +} + +VOID OsDeletePageCacheLru(LosFilePage *page) +{ + /* delete from lru list */ + OsLruCacheDel(page); + /* delete from cache list and free pmm if needed */ + OsPageCacheDel(page); +} + +======= info->page = page; //文件页 info->archMmu = archMmu;//进程MMU,完成虚实地址转换 info->vaddr = vaddr; //虚拟地址 @@ -163,17 +240,26 @@ VOID OsDeletePageCacheLru(LosFilePage *page) } //解除文件页和进程的映射关系 +>>>>>>> remotes/origin/main STATIC VOID OsPageCacheUnmap(LosFilePage *fpage, LosArchMmu *archMmu, VADDR_T vaddr) { UINT32 intSave; LosMapInfo *info = NULL; LOS_SpinLockSave(&fpage->physSeg->lruLock, &intSave); +<<<<<<< HEAD + info = OsGetMapInfo(fpage, archMmu, vaddr); + if (info == NULL) { + VM_ERR("OsPageCacheUnmap get map info failed!"); + } else { + OsUnmapPageLocked(fpage, info); +======= info = OsGetMapInfo(fpage, archMmu, vaddr);//获取文件页在进程的映射信息 if (info == NULL) { VM_ERR("OsPageCacheUnmap get map info failed!"); } else { OsUnmapPageLocked(fpage, info);//解除进程和文件页映射关系 +>>>>>>> remotes/origin/main } if (!(OsIsPageMapped(fpage) && ((fpage->flags & VM_MAP_REGION_FLAG_PERM_EXECUTE) || OsIsPageDirty(fpage->vmPage)))) { @@ -182,7 +268,11 @@ STATIC VOID OsPageCacheUnmap(LosFilePage *fpage, LosArchMmu *archMmu, VADDR_T va LOS_SpinUnlockRestore(&fpage->physSeg->lruLock, intSave); } +<<<<<<< HEAD + +======= ///删除文件 +>>>>>>> remotes/origin/main VOID OsVmmFileRemove(LosVmMapRegion *region, LosArchMmu *archMmu, VM_OFFSET_T pgoff) { UINT32 intSave; @@ -195,6 +285,15 @@ VOID OsVmmFileRemove(LosVmMapRegion *region, LosArchMmu *archMmu, VM_OFFSET_T pg LosVmPage *mapPage = NULL; if (!LOS_IsRegionFileValid(region) || (region->unTypeData.rf.vnode == NULL)) { +<<<<<<< HEAD + return; + } + vnode = region->unTypeData.rf.vnode; + mapping = &vnode->mapping; + vaddr = region->range.base + ((UINT32)(pgoff - region->pgOff) << PAGE_SHIFT); + + status_t status = LOS_ArchMmuQuery(archMmu, vaddr, &paddr, NULL); +======= return;//判断是否为文件映射,是否已map } vnode = region->unTypeData.rf.vnode; @@ -202,10 +301,27 @@ VOID OsVmmFileRemove(LosVmMapRegion *region, LosArchMmu *archMmu, VM_OFFSET_T pg vaddr = region->range.base + ((UINT32)(pgoff - region->pgOff) << PAGE_SHIFT);//得到虚拟地址 status_t status = LOS_ArchMmuQuery(archMmu, vaddr, &paddr, NULL);//获取物理地址 +>>>>>>> remotes/origin/main if (status != LOS_OK) { return; } +<<<<<<< HEAD + mapPage = LOS_VmPageGet(paddr); + + /* is page is in cache list */ + LOS_SpinLockSave(&mapping->list_lock, &intSave); + fpage = OsFindGetEntry(mapping, pgoff); + /* no cache or have cache but not map(cow), free it direct */ + if ((fpage == NULL) || (fpage->vmPage != mapPage)) { + LOS_PhysPageFree(mapPage); + LOS_ArchMmuUnmap(archMmu, vaddr, 1); + /* this is a page cache map! */ + } else { + OsPageCacheUnmap(fpage, archMmu, vaddr); + if (OsIsPageDirty(fpage->vmPage)) { + tmpPage = OsDumpDirtyPage(fpage); +======= mapPage = LOS_VmPageGet(paddr);//获取物理页框 /* is page is in cache list */ @@ -220,6 +336,7 @@ VOID OsVmmFileRemove(LosVmMapRegion *region, LosArchMmu *archMmu, VM_OFFSET_T pg OsPageCacheUnmap(fpage, archMmu, vaddr);////取消缓存中的映射 if (OsIsPageDirty(fpage->vmPage)) {//脏页处理 tmpPage = OsDumpDirtyPage(fpage);//dump 脏页 +>>>>>>> remotes/origin/main } } LOS_SpinUnlockRestore(&mapping->list_lock, intSave); @@ -229,6 +346,17 @@ VOID OsVmmFileRemove(LosVmMapRegion *region, LosArchMmu *archMmu, VM_OFFSET_T pg } return; } +<<<<<<< HEAD + +VOID OsMarkPageDirty(LosFilePage *fpage, const LosVmMapRegion *region, INT32 off, INT32 len) +{ + if (region != NULL) { + OsSetPageDirty(fpage->vmPage); + fpage->dirtyOff = off; + fpage->dirtyEnd = len; + } else { + OsSetPageDirty(fpage->vmPage); +======= ///标记page为脏页 进程修改了高速缓存里的数据时,该页就被内核标记为脏页 VOID OsMarkPageDirty(LosFilePage *fpage, const LosVmMapRegion *region, INT32 off, INT32 len) { @@ -238,6 +366,7 @@ VOID OsMarkPageDirty(LosFilePage *fpage, const LosVmMapRegion *region, INT32 off fpage->dirtyEnd = len;//脏页结束位置 } else { OsSetPageDirty(fpage->vmPage);//设置为脏页 +>>>>>>> remotes/origin/main if ((off + len) > fpage->dirtyEnd) { fpage->dirtyEnd = off + len; } @@ -274,22 +403,37 @@ STATIC UINT32 GetDirtySize(LosFilePage *fpage, struct Vnode *vnode) return PAGE_SIZE; } +<<<<<<< HEAD + +======= ///冲洗脏页,回写磁盘 +>>>>>>> remotes/origin/main STATIC INT32 OsFlushDirtyPage(LosFilePage *fpage) { UINT32 ret; size_t len; char *buff = NULL; +<<<<<<< HEAD + struct Vnode *vnode = fpage->mapping->host; +======= struct Vnode *vnode = fpage->mapping->host;/* owner of this mapping */ //此映射属于哪个文件,注意是1:1的关系. +>>>>>>> remotes/origin/main if (vnode == NULL) { VM_ERR("page cache vnode error"); return LOS_NOK; } +<<<<<<< HEAD + len = fpage->dirtyEnd - fpage->dirtyOff; + len = (len == 0) ? GetDirtySize(fpage, vnode) : len; + if (len == 0) { + OsCleanPageDirty(fpage->vmPage); +======= len = fpage->dirtyEnd - fpage->dirtyOff;//计算出脏数据长度 len = (len == 0) ? GetDirtySize(fpage, vnode) : len; if (len == 0) {//没有脏数据 OsCleanPageDirty(fpage->vmPage);//页面取消脏标签 +>>>>>>> remotes/origin/main return LOS_OK; } @@ -306,7 +450,11 @@ STATIC INT32 OsFlushDirtyPage(LosFilePage *fpage) return ret; } +<<<<<<< HEAD + +======= ///备份脏页,老脏页撕掉脏页标签 +>>>>>>> remotes/origin/main LosFilePage *OsDumpDirtyPage(LosFilePage *oldFPage) { LosFilePage *newFPage = NULL; @@ -318,11 +466,19 @@ LosFilePage *OsDumpDirtyPage(LosFilePage *oldFPage) } OsCleanPageDirty(oldFPage->vmPage); +<<<<<<< HEAD + (VOID)memcpy_s(newFPage, sizeof(LosFilePage), oldFPage, sizeof(LosFilePage)); + + return newFPage; +} + +======= (VOID)memcpy_s(newFPage, sizeof(LosFilePage), oldFPage, sizeof(LosFilePage));//直接内存拷贝 return newFPage; } ///冲洗脏页数据,将脏页数据回写磁盘 +>>>>>>> remotes/origin/main VOID OsDoFlushDirtyPage(LosFilePage *fpage) { if (fpage == NULL) { @@ -344,7 +500,11 @@ STATIC VOID OsReleaseFpage(struct page_mapping *mapping, LosFilePage *fpage) LOS_SpinUnlockRestore(lruLock, lruSave); LOS_SpinUnlockRestore(&mapping->list_lock, intSave); } +<<<<<<< HEAD + +======= ///删除映射信息 +>>>>>>> remotes/origin/main VOID OsDelMapInfo(LosVmMapRegion *region, LosVmPgFault *vmf, BOOL cleanDirty) { UINT32 intSave; @@ -365,9 +525,15 @@ VOID OsDelMapInfo(LosVmMapRegion *region, LosVmPgFault *vmf, BOOL cleanDirty) } if (cleanDirty) { +<<<<<<< HEAD + OsCleanPageDirty(fpage->vmPage); + } + info = OsGetMapInfo(fpage, ®ion->space->archMmu, (vaddr_t)vmf->vaddr); +======= OsCleanPageDirty(fpage->vmPage);//恢复干净页 } info = OsGetMapInfo(fpage, ®ion->space->archMmu, (vaddr_t)vmf->vaddr);//通过虚拟地址获取映射信息 +>>>>>>> remotes/origin/main if (info != NULL) { fpage->n_maps--; LOS_ListDelete(&info->node); @@ -378,10 +544,14 @@ VOID OsDelMapInfo(LosVmMapRegion *region, LosVmPgFault *vmf, BOOL cleanDirty) } LOS_SpinUnlockRestore(&mapping->list_lock, intSave); } +<<<<<<< HEAD + +======= /*! 文件缺页时的处理,先读入磁盘数据,再重新读页数据 被 OsDoReadFault(...),OsDoCowFault(...),OsDoSharedFault(...) 等调用 */ +>>>>>>> remotes/origin/main INT32 OsVmmFileFault(LosVmMapRegion *region, LosVmPgFault *vmf) { INT32 ret; @@ -393,7 +563,11 @@ INT32 OsVmmFileFault(LosVmMapRegion *region, LosVmPgFault *vmf) struct page_mapping *mapping = NULL; LosFilePage *fpage = NULL; +<<<<<<< HEAD + if (!LOS_IsRegionFileValid(region) || (region->unTypeData.rf.vnode == NULL) || (vmf == NULL)) { +======= if (!LOS_IsRegionFileValid(region) || (region->unTypeData.rf.vnode == NULL) || (vmf == NULL)) {//文件是否映射到了内存 +>>>>>>> remotes/origin/main VM_ERR("Input param is NULL"); return LOS_NOK; } @@ -402,6 +576,15 @@ INT32 OsVmmFileFault(LosVmMapRegion *region, LosVmPgFault *vmf) /* get or create a new cache node */ LOS_SpinLockSave(&mapping->list_lock, &intSave); +<<<<<<< HEAD + fpage = OsFindGetEntry(mapping, vmf->pgoff); + TRACE_TRY_CACHE(); + if (fpage != NULL) { + TRACE_HIT_CACHE(); + OsPageRefIncLocked(fpage); + } else { + fpage = OsPageCacheAlloc(mapping, vmf->pgoff); +======= fpage = OsFindGetEntry(mapping, vmf->pgoff);//获取文件页 TRACE_TRY_CACHE(); if (fpage != NULL) {//找到了,说明该页已经在页高速缓存中 @@ -409,11 +592,22 @@ INT32 OsVmmFileFault(LosVmMapRegion *region, LosVmPgFault *vmf) OsPageRefIncLocked(fpage); } else {//真的缺页了,页高速缓存中没找到 fpage = OsPageCacheAlloc(mapping, vmf->pgoff);//分配一个文件页,将数据初始化好,包括vmpage(物理页框) +>>>>>>> remotes/origin/main if (fpage == NULL) { LOS_SpinUnlockRestore(&mapping->list_lock, intSave); VM_ERR("Failed to alloc a page frame"); return LOS_NOK; } +<<<<<<< HEAD + newCache = true; + } + OsSetPageLocked(fpage->vmPage); + LOS_SpinUnlockRestore(&mapping->list_lock, intSave); + kvaddr = OsVmPageToVaddr(fpage->vmPage); + + /* read file to new page cache */ + if (newCache) { +======= newCache = true;//分配了新文件页 } OsSetPageLocked(fpage->vmPage);//对vmpage上锁 @@ -422,6 +616,7 @@ INT32 OsVmmFileFault(LosVmMapRegion *region, LosVmPgFault *vmf) /* read file to new page cache */ if (newCache) {//新cache +>>>>>>> remotes/origin/main ret = vnode->vop->ReadPage(vnode, kvaddr, fpage->pgoff << PAGE_SHIFT); if (ret == 0) { VM_ERR("Failed to read from file!"); @@ -429,18 +624,37 @@ INT32 OsVmmFileFault(LosVmMapRegion *region, LosVmPgFault *vmf) return LOS_NOK; } LOS_SpinLockSave(&mapping->list_lock, &intSave); +<<<<<<< HEAD + OsAddToPageacheLru(fpage, mapping, vmf->pgoff); +======= OsAddToPageacheLru(fpage, mapping, vmf->pgoff);//将fpage挂入pageCache 和 LruCache +>>>>>>> remotes/origin/main LOS_SpinUnlockRestore(&mapping->list_lock, intSave); } LOS_SpinLockSave(&mapping->list_lock, &intSave); /* cow fault case no need to save mapinfo */ if (!((vmf->flags & VM_MAP_PF_FLAG_WRITE) && !(region->regionFlags & VM_MAP_REGION_FLAG_SHARED))) { +<<<<<<< HEAD + OsAddMapInfo(fpage, ®ion->space->archMmu, (vaddr_t)vmf->vaddr); +======= OsAddMapInfo(fpage, ®ion->space->archMmu, (vaddr_t)vmf->vaddr);//添加<虚拟地址,文件页>的映射关系,如此进程以后就能通过虚拟地址操作文件页了. +>>>>>>> remotes/origin/main fpage->flags = region->regionFlags; } /* share page fault, mark the page dirty */ +<<<<<<< HEAD + if ((vmf->flags & VM_MAP_PF_FLAG_WRITE) && (region->regionFlags & VM_MAP_REGION_FLAG_SHARED)) { + OsMarkPageDirty(fpage, region, 0, 0); + } + + vmf->pageKVaddr = kvaddr; + LOS_SpinUnlockRestore(&mapping->list_lock, intSave); + return LOS_OK; +} + +======= if ((vmf->flags & VM_MAP_PF_FLAG_WRITE) && (region->regionFlags & VM_MAP_REGION_FLAG_SHARED)) {//有过写操作或者为共享线性区 OsMarkPageDirty(fpage, region, 0, 0);//标记为脏页,要回写磁盘,内核会在适当的时候回写磁盘 } @@ -450,11 +664,16 @@ INT32 OsVmmFileFault(LosVmMapRegion *region, LosVmPgFault *vmf) return LOS_OK; } ///文件缓存冲洗,把所有fpage冲洗一边,把脏页洗到dirtyList中,配合OsFileCacheRemove理解 +>>>>>>> remotes/origin/main VOID OsFileCacheFlush(struct page_mapping *mapping) { UINT32 intSave; UINT32 lruLock; +<<<<<<< HEAD + LOS_DL_LIST_HEAD(dirtyList); +======= LOS_DL_LIST_HEAD(dirtyList);//LOS_DL_LIST list = { &(list), &(list) }; +>>>>>>> remotes/origin/main LosFilePage *ftemp = NULL; LosFilePage *fpage = NULL; @@ -462,18 +681,34 @@ VOID OsFileCacheFlush(struct page_mapping *mapping) return; } LOS_SpinLockSave(&mapping->list_lock, &intSave); +<<<<<<< HEAD + LOS_DL_LIST_FOR_EACH_ENTRY(fpage, &mapping->page_list, LosFilePage, node) { + LOS_SpinLockSave(&fpage->physSeg->lruLock, &lruLock); + if (OsIsPageDirty(fpage->vmPage)) { + ftemp = OsDumpDirtyPage(fpage); + if (ftemp != NULL) { + LOS_ListTailInsert(&dirtyList, &ftemp->node); +======= LOS_DL_LIST_FOR_EACH_ENTRY(fpage, &mapping->page_list, LosFilePage, node) {//循环从page_list中取node给fpage LOS_SpinLockSave(&fpage->physSeg->lruLock, &lruLock); if (OsIsPageDirty(fpage->vmPage)) {//是否为脏页 ftemp = OsDumpDirtyPage(fpage);//这里挺妙的,copy出一份新页,老页变成了非脏页继续用 if (ftemp != NULL) { LOS_ListTailInsert(&dirtyList, &ftemp->node);//将新页插入脏页List,等待回写磁盘 +>>>>>>> remotes/origin/main } } LOS_SpinUnlockRestore(&fpage->physSeg->lruLock, lruLock); } LOS_SpinUnlockRestore(&mapping->list_lock, intSave); +<<<<<<< HEAD + LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, ftemp, &dirtyList, LosFilePage, node) { + OsDoFlushDirtyPage(fpage); + } +} + +======= LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, ftemp, &dirtyList, LosFilePage, node) {//仔细看这个宏,关键在 &(item)->member != (list); OsDoFlushDirtyPage(fpage);//立马洗掉,所以dirtyList可以不是全局变量 } @@ -483,16 +718,54 @@ VOID OsFileCacheFlush(struct page_mapping *mapping) 删除文件缓存,清空文件在page cache的痕迹 参数 mapping 可理解为文件在内存的身份证 ******************************************************************************/ +>>>>>>> remotes/origin/main VOID OsFileCacheRemove(struct page_mapping *mapping) { UINT32 intSave; UINT32 lruSave; SPIN_LOCK_S *lruLock = NULL; +<<<<<<< HEAD + LOS_DL_LIST_HEAD(dirtyList); +======= LOS_DL_LIST_HEAD(dirtyList);//定义一个叫dirtyList的双循环链表并初始化,用于挂脏页 +>>>>>>> remotes/origin/main LosFilePage *ftemp = NULL; LosFilePage *fpage = NULL; LosFilePage *fnext = NULL; +<<<<<<< HEAD + LOS_SpinLockSave(&mapping->list_lock, &intSave); + LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, &mapping->page_list, LosFilePage, node) { + lruLock = &fpage->physSeg->lruLock; + LOS_SpinLockSave(lruLock, &lruSave); + if (OsIsPageDirty(fpage->vmPage)) { + ftemp = OsDumpDirtyPage(fpage); + if (ftemp != NULL) { + LOS_ListTailInsert(&dirtyList, &ftemp->node); + } + } + + OsDeletePageCacheLru(fpage); + LOS_SpinUnlockRestore(lruLock, lruSave); + } + LOS_SpinUnlockRestore(&mapping->list_lock, intSave); + + LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, &dirtyList, LosFilePage, node) { + OsDoFlushDirtyPage(fpage); + } +} + +LosVmFileOps g_commVmOps = { + .open = NULL, + .close = NULL, + .fault = OsVmmFileFault, + .remove = OsVmmFileRemove, +}; + +INT32 OsVfsFileMmap(struct file *filep, LosVmMapRegion *region) +{ + region->unTypeData.rf.vmFOps = &g_commVmOps; +======= LOS_SpinLockSave(&mapping->list_lock, &intSave);//多进程操作,必须上锁. LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, &mapping->page_list, LosFilePage, node) {//遍历文件在内存中产生的所有文件页(例如1,4,8页)不一定连续,取决于用户的读取顺序 lruLock = &fpage->physSeg->lruLock; @@ -524,15 +797,20 @@ LosVmFileOps g_commVmOps = {// INT32 OsVfsFileMmap(struct file *filep, LosVmMapRegion *region) { region->unTypeData.rf.vmFOps = &g_commVmOps;//文件操作 +>>>>>>> remotes/origin/main region->unTypeData.rf.vnode = filep->f_vnode; region->unTypeData.rf.f_oflags = filep->f_oflags; return ENOERR; } +<<<<<<< HEAD + +======= /*! 有名映射,可理解为文件映射,跟匿名映射相对应 参数filep是广义的文件,在鸿蒙内核,目录/普通文件/字符设备/块设备/网络套接字/管道/链接 都是文件 */ +>>>>>>> remotes/origin/main STATUS_T OsNamedMMap(struct file *filep, LosVmMapRegion *region) { struct Vnode *vnode = NULL; @@ -545,10 +823,17 @@ STATUS_T OsNamedMMap(struct file *filep, LosVmMapRegion *region) vnode->useCount++; VnodeDrop(); if (filep->ops != NULL && filep->ops->mmap != NULL) { +<<<<<<< HEAD + if (vnode->type == VNODE_TYPE_CHR || vnode->type == VNODE_TYPE_BLK) { + LOS_SetRegionTypeDev(region); + } else { + LOS_SetRegionTypeFile(region); +======= if (vnode->type == VNODE_TYPE_CHR || vnode->type == VNODE_TYPE_BLK) {//块设备或者字符设备 /dev/.. LOS_SetRegionTypeDev(region);//设置为设备类型 } else { LOS_SetRegionTypeFile(region);//设置为文件类型 +>>>>>>> remotes/origin/main } int ret = filep->ops->mmap(filep, region); if (ret != LOS_OK) { @@ -564,14 +849,26 @@ STATUS_T OsNamedMMap(struct file *filep, LosVmMapRegion *region) return LOS_OK; } +<<<<<<< HEAD +======= /************************************************************************************************** 通过位置从文件映射页中找到的指定的文件页 举例:mapping->page_list上节点的数据可能只有是文件 1,3,4,6 页的数据,此时来找文件第5页的数据就会没有 **************************************************************************************************/ +>>>>>>> remotes/origin/main LosFilePage *OsFindGetEntry(struct page_mapping *mapping, VM_OFFSET_T pgoff) { LosFilePage *fpage = NULL; +<<<<<<< HEAD + LOS_DL_LIST_FOR_EACH_ENTRY(fpage, &mapping->page_list, LosFilePage, node) { + if (fpage->pgoff == pgoff) { + return fpage; + } + + if (fpage->pgoff > pgoff) { + break; +======= LOS_DL_LIST_FOR_EACH_ENTRY(fpage, &mapping->page_list, LosFilePage, node) {//遍历文件页 if (fpage->pgoff == pgoff) {//找到指定的页, return fpage; @@ -579,6 +876,7 @@ LosFilePage *OsFindGetEntry(struct page_mapping *mapping, VM_OFFSET_T pgoff) if (fpage->pgoff > pgoff) {//大于之前还没有找到,说明不在链表中,往后的也不用找了, break;//因为 mapping->page_list节点上的数据都是按 fpage->pgoff 从小到大的顺序排列的. +>>>>>>> remotes/origin/main } } @@ -586,11 +884,14 @@ LosFilePage *OsFindGetEntry(struct page_mapping *mapping, VM_OFFSET_T pgoff) } /* need mutex & change memory to dma zone. */ +<<<<<<< HEAD +======= /*! 以页高速缓存方式分配一个文件页 LosFilePage Direct Memory Access(存储器直接访问)指一种高速的数据传输操作,允许在外部设备和存储器之间直接读写数据。 整个数据传输操作在一个称为"DMA控制器"的控制下进行的。CPU只需在数据传输开始和结束时做一点处理(开始和结束时候要做中断处理) */ +>>>>>>> remotes/origin/main LosFilePage *OsPageCacheAlloc(struct page_mapping *mapping, VM_OFFSET_T pgoff) { VOID *kvaddr = NULL; @@ -598,26 +899,58 @@ LosFilePage *OsPageCacheAlloc(struct page_mapping *mapping, VM_OFFSET_T pgoff) LosVmPage *vmPage = NULL; LosFilePage *fpage = NULL; +<<<<<<< HEAD + vmPage = LOS_PhysPageAlloc(); +======= vmPage = LOS_PhysPageAlloc(); //先分配一个物理页 +>>>>>>> remotes/origin/main if (vmPage == NULL) { VM_ERR("alloc vm page failed"); return NULL; } +<<<<<<< HEAD + physSeg = OsVmPhysSegGet(vmPage); + kvaddr = OsVmPageToVaddr(vmPage); + if ((physSeg == NULL) || (kvaddr == NULL)) { + LOS_PhysPageFree(vmPage); +======= physSeg = OsVmPhysSegGet(vmPage);//通过页获取所在seg kvaddr = OsVmPageToVaddr(vmPage);//获取内核空间的虚拟地址,具体点进去看函数说明,这里一定要理解透彻! if ((physSeg == NULL) || (kvaddr == NULL)) { LOS_PhysPageFree(vmPage); //异常情况要释放vmPage +>>>>>>> remotes/origin/main VM_ERR("alloc vm page failed!"); return NULL; } +<<<<<<< HEAD + fpage = (LosFilePage *)LOS_MemAlloc(m_aucSysMem0, sizeof(LosFilePage)); + if (fpage == NULL) { + LOS_PhysPageFree(vmPage); +======= fpage = (LosFilePage *)LOS_MemAlloc(m_aucSysMem0, sizeof(LosFilePage));//从内存池中分配一个filePage if (fpage == NULL) { LOS_PhysPageFree(vmPage); //异常情况要释放vmPage +>>>>>>> remotes/origin/main VM_ERR("Failed to allocate for page!"); return NULL; } +<<<<<<< HEAD + (VOID)memset_s((VOID *)fpage, sizeof(LosFilePage), 0, sizeof(LosFilePage)); + + LOS_ListInit(&fpage->i_mmap); + LOS_ListInit(&fpage->node); + LOS_ListInit(&fpage->lru); + fpage->n_maps = 0; + fpage->dirtyOff = PAGE_SIZE; + fpage->dirtyEnd = 0; + fpage->physSeg = physSeg; + fpage->vmPage = vmPage; + fpage->mapping = mapping; + fpage->pgoff = pgoff; + (VOID)memset_s(kvaddr, PAGE_SIZE, 0, PAGE_SIZE); +======= (VOID)memset_s((VOID *)fpage, sizeof(LosFilePage), 0, sizeof(LosFilePage));//调标准库函数 置0 LOS_ListInit(&fpage->i_mmap); //初始化映射,链表上挂 MapInfo @@ -631,6 +964,7 @@ LosFilePage *OsPageCacheAlloc(struct page_mapping *mapping, VM_OFFSET_T pgoff) fpage->mapping = mapping; //记录所有文件页映射 fpage->pgoff = pgoff; //将文件切成一页页,页标 (VOID)memset_s(kvaddr, PAGE_SIZE, 0, PAGE_SIZE);//页内数据清0 +>>>>>>> remotes/origin/main return fpage; } @@ -644,4 +978,8 @@ INT32 OsVfsFileMmap(struct file *filep, LosVmMapRegion *region) } #endif -#endif \ No newline at end of file +<<<<<<< HEAD +#endif +======= +#endif +>>>>>>> remotes/origin/main diff --git a/src/kernel_liteos_a/kernel/base/vm/los_vm_iomap.c b/src/kernel_liteos_a/kernel/base/vm/los_vm_iomap.c index 0d076a53..bb521607 100644 --- a/src/kernel_liteos_a/kernel/base/vm/los_vm_iomap.c +++ b/src/kernel_liteos_a/kernel/base/vm/los_vm_iomap.c @@ -1,4 +1,6 @@ /* +<<<<<<< HEAD +======= 直接内存访问 直接内存访问(Direct Memory Access,DMA)是计算机科学中的一种内存访问技术。它允许某些电脑内部的 硬件子系统(电脑外设),可以独立地直接读写系统内存,而不需中央处理器(CPU)介入处理 @@ -38,6 +40,7 @@ /* +>>>>>>> remotes/origin/main * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved. * @@ -75,7 +78,11 @@ #include "los_vm_map.h" #include "los_memory.h" +<<<<<<< HEAD + +======= ///分配DMA空间 +>>>>>>> remotes/origin/main VOID *LOS_DmaMemAlloc(DMA_ADDR_T *dmaAddr, size_t size, size_t align, enum DmaMemType type) { VOID *kVaddr = NULL; @@ -92,24 +99,42 @@ VOID *LOS_DmaMemAlloc(DMA_ADDR_T *dmaAddr, size_t size, size_t align, enum DmaMe #ifdef LOSCFG_KERNEL_VM kVaddr = LOS_KernelMallocAlign(size, align); #else +<<<<<<< HEAD + kVaddr = LOS_MemAllocAlign(OS_SYS_MEM_ADDR, size, align); +#endif + if (kVaddr == NULL) { + VM_ERR("failed, size = %u, align = %u", size, align); +======= kVaddr = LOS_MemAllocAlign(OS_SYS_MEM_ADDR, size, align);//不走内存池方式, 直接申请物理页 #endif if (kVaddr == NULL) { VM_ERR("failed, size = %u, align = %u", size, align);//从内存池中申请 +>>>>>>> remotes/origin/main return NULL; } if (dmaAddr != NULL) { +<<<<<<< HEAD + *dmaAddr = (DMA_ADDR_T)LOS_PaddrQuery(kVaddr); + } + + if (type == DMA_NOCACHE) { +======= *dmaAddr = (DMA_ADDR_T)LOS_PaddrQuery(kVaddr);//查询物理地址, DMA直接将数据灌到物理地址 } if (type == DMA_NOCACHE) {//无缓存模式 , 计算新的虚拟地址 +>>>>>>> remotes/origin/main kVaddr = (VOID *)VMM_TO_UNCACHED_ADDR((UINTPTR)kVaddr); } return kVaddr; } +<<<<<<< HEAD + +======= ///释放DMA指针 +>>>>>>> remotes/origin/main VOID LOS_DmaMemFree(VOID *vaddr) { UINTPTR addr; @@ -118,6 +143,15 @@ VOID LOS_DmaMemFree(VOID *vaddr) return; } addr = (UINTPTR)vaddr; +<<<<<<< HEAD + + if ((addr >= UNCACHED_VMM_BASE) && (addr < UNCACHED_VMM_BASE + UNCACHED_VMM_SIZE)) { + addr = UNCACHED_TO_VMM_ADDR(addr); +#ifdef LOSCFG_KERNEL_VM + LOS_KernelFree((VOID *)addr); +#else + LOS_MemFree(OS_SYS_MEM_ADDR, (VOID *)addr); +======= // 未缓存区 if ((addr >= UNCACHED_VMM_BASE) && (addr < UNCACHED_VMM_BASE + UNCACHED_VMM_SIZE)) { addr = UNCACHED_TO_VMM_ADDR(addr);//转换成未缓存区地址 @@ -125,6 +159,7 @@ VOID LOS_DmaMemFree(VOID *vaddr) LOS_KernelFree((VOID *)addr); #else LOS_MemFree(OS_SYS_MEM_ADDR, (VOID *)addr);//内存池方式释放 +>>>>>>> remotes/origin/main #endif } else if ((addr >= KERNEL_VMM_BASE) && (addr < KERNEL_VMM_BASE + KERNEL_VMM_SIZE)) { #ifdef LOSCFG_KERNEL_VM diff --git a/src/kernel_liteos_a/kernel/base/vm/los_vm_map.c b/src/kernel_liteos_a/kernel/base/vm/los_vm_map.c index 4aeef716..21b8809e 100644 --- a/src/kernel_liteos_a/kernel/base/vm/los_vm_map.c +++ b/src/kernel_liteos_a/kernel/base/vm/los_vm_map.c @@ -1,4 +1,6 @@ /* +<<<<<<< HEAD +======= 基本概念 虚拟内存管理是计算机系统管理内存的一种技术。每个进程都有连续的虚拟地址空间,虚拟地址空间的大小由CPU的位数决定, 32位的硬件平台可以提供的最大的寻址空间为0-4GiB。整个4GiB空间分成两部分,LiteOS-A内核占据3GiB的高地址空间, @@ -39,6 +41,7 @@ */ /* +>>>>>>> remotes/origin/main * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved. * @@ -91,6 +94,14 @@ #ifdef LOSCFG_KERNEL_VM +<<<<<<< HEAD +#define VM_MAP_WASTE_MEM_LEVEL (PAGE_SIZE >> 2) +LosMux g_vmSpaceListMux; +LOS_DL_LIST_HEAD(g_vmSpaceList); +LosVmSpace g_kVmSpace; +LosVmSpace g_vMallocSpace; + +======= #define VM_MAP_WASTE_MEM_LEVEL (PAGE_SIZE >> 2) ///< 浪费内存大小(1K) LosMux g_vmSpaceListMux; ///< 用于锁g_vmSpaceList的互斥量 LOS_DL_LIST_HEAD(g_vmSpaceList); ///< 初始化全局虚拟空间节点,所有虚拟空间都挂到此节点上. @@ -101,10 +112,22 @@ LosVmSpace g_vMallocSpace; ///< 内核分配空间,用于内核分配内存 * 获取进程空间系列接口 ************************************************************/ /// 获取当前进程空间结构体指针 +>>>>>>> remotes/origin/main LosVmSpace *LOS_CurrSpaceGet(VOID) { return OsCurrProcessGet()->vmSpace; } +<<<<<<< HEAD + +LosVmSpace *LOS_SpaceGet(VADDR_T vaddr) +{ + if (LOS_IsKernelAddress(vaddr)) { + return LOS_GetKVmSpace(); + } else if (LOS_IsUserAddress(vaddr)) { + return LOS_CurrSpaceGet(); + } else if (LOS_IsVmallocAddress(vaddr)) { + return LOS_GetVmallocSpace(); +======= /// 获取虚拟地址对应的进程空间结构体指针 LosVmSpace *LOS_SpaceGet(VADDR_T vaddr) { @@ -114,42 +137,66 @@ LosVmSpace *LOS_SpaceGet(VADDR_T vaddr) return LOS_CurrSpaceGet(); } else if (LOS_IsVmallocAddress(vaddr)) {//是否为内核分配空间 return LOS_GetVmallocSpace();//获取内核分配空间 +>>>>>>> remotes/origin/main } else { return NULL; } } +<<<<<<< HEAD + +======= ///内核空间只有g_kVmSpace一个,所有的内核进程都共用一个内核空间 +>>>>>>> remotes/origin/main LosVmSpace *LOS_GetKVmSpace(VOID) { return &g_kVmSpace; } +<<<<<<< HEAD + +======= ///获取进程空间链表指针 g_vmSpaceList中挂的是进程空间 g_kVmSpace, g_vMallocSpace,所有用户进程的空间(独有一个进程空间) +>>>>>>> remotes/origin/main LOS_DL_LIST *LOS_GetVmSpaceList(VOID) { return &g_vmSpaceList; } +<<<<<<< HEAD + +======= ///获取内核堆空间的全局变量 +>>>>>>> remotes/origin/main LosVmSpace *LOS_GetVmallocSpace(VOID) { return &g_vMallocSpace; } +<<<<<<< HEAD +======= /************************************************************ * 虚拟地址区间region相关的操作 ************************************************************/ ///释放挂在红黑树上节点,等于释放了线性区 +>>>>>>> remotes/origin/main ULONG_T OsRegionRbFreeFn(LosRbNode *pstNode) { LOS_MemFree(m_aucSysMem0, pstNode); return LOS_OK; } +<<<<<<< HEAD + +======= ///通过红黑树节点找到对应的线性区 +>>>>>>> remotes/origin/main VOID *OsRegionRbGetKeyFn(LosRbNode *pstNode) { LosVmMapRegion *region = (LosVmMapRegion *)LOS_DL_LIST_ENTRY(pstNode, LosVmMapRegion, rbNode); return (VOID *)®ion->range; } +<<<<<<< HEAD + +======= ///比较两个红黑树节点 +>>>>>>> remotes/origin/main ULONG_T OsRegionRbCmpKeyFn(const VOID *pNodeKeyA, const VOID *pNodeKeyB) { LosVmMapRange rangeA = *(LosVmMapRange *)pNodeKeyA; @@ -159,6 +206,24 @@ ULONG_T OsRegionRbCmpKeyFn(const VOID *pNodeKeyA, const VOID *pNodeKeyB) UINT32 startB = rangeB.base; UINT32 endB = rangeB.base + rangeB.size - 1; +<<<<<<< HEAD + if (startA > endB) { + return RB_BIGGER; + } else if (startA >= startB) { + if (endA <= endB) { + return RB_EQUAL; + } else { + return RB_BIGGER; + } + } else if (startA <= startB) { + if (endA >= endB) { + return RB_EQUAL; + } else { + return RB_SMALLER; + } + } else if (endA < startB) { + return RB_SMALLER; +======= if (startA > endB) {// A基地址大于B的结束地址 return RB_BIGGER; //说明线性区A更大,在右边 } else if (startA >= startB) { @@ -175,10 +240,18 @@ ULONG_T OsRegionRbCmpKeyFn(const VOID *pNodeKeyA, const VOID *pNodeKeyB) } } else if (endA < startB) {//A结束地址小于B的开始地址 return RB_SMALLER;//说明A在 +>>>>>>> remotes/origin/main } return RB_EQUAL; } +<<<<<<< HEAD +STATIC BOOL OsVmSpaceInitCommon(LosVmSpace *vmSpace, VADDR_T *virtTtb) +{ + LOS_RbInitTree(&vmSpace->regionRbTree, OsRegionRbCmpKeyFn, OsRegionRbFreeFn, OsRegionRbGetKeyFn); + + status_t retval = LOS_MuxInit(&vmSpace->regionMux, NULL); +======= /*! * @brief OsVmSpaceInitCommon 初始化进程虚拟空间,必须提供L1表的虚拟内存地址 * @@ -193,12 +266,24 @@ STATIC BOOL OsVmSpaceInitCommon(LosVmSpace *vmSpace, VADDR_T *virtTtb) LOS_RbInitTree(&vmSpace->regionRbTree, OsRegionRbCmpKeyFn, OsRegionRbFreeFn, OsRegionRbGetKeyFn);//初始化虚拟存储空间-以红黑树组织方式 status_t retval = LOS_MuxInit(&vmSpace->regionMux, NULL);//初始化互斥量 +>>>>>>> remotes/origin/main if (retval != LOS_OK) { VM_ERR("Create mutex for vm space failed, status: %d", retval); return FALSE; } (VOID)LOS_MuxAcquire(&g_vmSpaceListMux); +<<<<<<< HEAD + LOS_ListAdd(&g_vmSpaceList, &vmSpace->node); + (VOID)LOS_MuxRelease(&g_vmSpaceListMux); + + return OsArchMmuInit(&vmSpace->archMmu, virtTtb); +} + +VOID OsVmMapInit(VOID) +{ + status_t retval = LOS_MuxInit(&g_vmSpaceListMux, NULL); +======= LOS_ListAdd(&g_vmSpaceList, &vmSpace->node);//将虚拟空间挂入全局虚拟空间双循环链表上 (VOID)LOS_MuxRelease(&g_vmSpaceListMux); @@ -208,10 +293,20 @@ STATIC BOOL OsVmSpaceInitCommon(LosVmSpace *vmSpace, VADDR_T *virtTtb) VOID OsVmMapInit(VOID) { status_t retval = LOS_MuxInit(&g_vmSpaceListMux, NULL);//初始化虚拟空间的互斥量 +>>>>>>> remotes/origin/main if (retval != LOS_OK) { VM_ERR("Create mutex for g_vmSpaceList failed, status: %d", retval); } } +<<<<<<< HEAD + +BOOL OsKernVmSpaceInit(LosVmSpace *vmSpace, VADDR_T *virtTtb) +{ + vmSpace->base = KERNEL_ASPACE_BASE; + vmSpace->size = KERNEL_ASPACE_SIZE; + vmSpace->mapBase = KERNEL_VMM_BASE; + vmSpace->mapSize = KERNEL_VMM_SIZE; +======= ///初始化内核虚拟空间 BOOL OsKernVmSpaceInit(LosVmSpace *vmSpace, VADDR_T *virtTtb)//内核空间页表是编译时放在bbs段指定的,共用 L1表 { @@ -232,10 +327,45 @@ BOOL OsVMallocSpaceInit(LosVmSpace *vmSpace, VADDR_T *virtTtb)//内核动态空 vmSpace->size = VMALLOC_SIZE; //内核堆空间大小 vmSpace->mapBase = VMALLOC_START; //内核堆空间映射基地址 vmSpace->mapSize = VMALLOC_SIZE; //内核堆空间映射区大小 +>>>>>>> remotes/origin/main #ifdef LOSCFG_DRIVERS_TZDRIVER vmSpace->codeStart = 0; vmSpace->codeEnd = 0; #endif +<<<<<<< HEAD + return OsVmSpaceInitCommon(vmSpace, virtTtb); +} + +BOOL OsVMallocSpaceInit(LosVmSpace *vmSpace, VADDR_T *virtTtb) +{ + vmSpace->base = VMALLOC_START; + vmSpace->size = VMALLOC_SIZE; + vmSpace->mapBase = VMALLOC_START; + vmSpace->mapSize = VMALLOC_SIZE; +#ifdef LOSCFG_DRIVERS_TZDRIVER + vmSpace->codeStart = 0; + vmSpace->codeEnd = 0; +#endif + return OsVmSpaceInitCommon(vmSpace, virtTtb); +} + +VOID OsKSpaceInit(VOID) +{ + OsVmMapInit(); + OsKernVmSpaceInit(&g_kVmSpace, OsGFirstTableGet()); + OsVMallocSpaceInit(&g_vMallocSpace, OsGFirstTableGet()); +} + +BOOL OsUserVmSpaceInit(LosVmSpace *vmSpace, VADDR_T *virtTtb) +{ + vmSpace->base = USER_ASPACE_BASE; + vmSpace->size = USER_ASPACE_SIZE; + vmSpace->mapBase = USER_MAP_BASE; + vmSpace->mapSize = USER_MAP_SIZE; + vmSpace->heapBase = USER_HEAP_BASE; + vmSpace->heapNow = USER_HEAP_BASE; + vmSpace->heap = NULL; +======= return OsVmSpaceInitCommon(vmSpace, virtTtb);//创建MMU,为后续的虚实映射做好初始化的工作 } ///内核虚拟空间初始化 @@ -263,17 +393,40 @@ BOOL OsUserVmSpaceInit(LosVmSpace *vmSpace, VADDR_T *virtTtb) vmSpace->heapBase = USER_HEAP_BASE;//用户堆区开始地址,只有用户进程需要设置这里,动态内存的开始地址 vmSpace->heapNow = USER_HEAP_BASE;//堆区最新指向地址,用户堆空间大小可通过系统调用 do_brk()扩展 vmSpace->heap = NULL; //最近分配的一个堆线性区 +>>>>>>> remotes/origin/main #ifdef LOSCFG_DRIVERS_TZDRIVER vmSpace->codeStart = 0; vmSpace->codeEnd = 0; #endif +<<<<<<< HEAD + return OsVmSpaceInitCommon(vmSpace, virtTtb); +} + +======= return OsVmSpaceInitCommon(vmSpace, virtTtb);//创建MMU,为后续的虚实映射做好初始化的工作 } /// 创建用户进程空间 +>>>>>>> remotes/origin/main LosVmSpace *OsCreateUserVmSpace(VOID) { BOOL retVal = FALSE; +<<<<<<< HEAD + LosVmSpace *space = LOS_MemAlloc(m_aucSysMem0, sizeof(LosVmSpace)); + if (space == NULL) { + return NULL; + } + + VADDR_T *ttb = LOS_PhysPagesAllocContiguous(1); + if (ttb == NULL) { + (VOID)LOS_MemFree(m_aucSysMem0, space); + return NULL; + } + + (VOID)memset_s(ttb, PAGE_SIZE, 0, PAGE_SIZE); + retVal = OsUserVmSpaceInit(space, ttb); + LosVmPage *vmPage = OsVmVaddrToPage(ttb); +======= LosVmSpace *space = LOS_MemAlloc(m_aucSysMem0, sizeof(LosVmSpace));//在内核空间申请用户进程空间 if (space == NULL) { return NULL; @@ -288,17 +441,26 @@ LosVmSpace *OsCreateUserVmSpace(VOID) (VOID)memset_s(ttb, PAGE_SIZE, 0, PAGE_SIZE);//4K空间置0 retVal = OsUserVmSpaceInit(space, ttb);//初始化用户空间,mmu LosVmPage *vmPage = OsVmVaddrToPage(ttb);//找到所在物理页框 +>>>>>>> remotes/origin/main if ((retVal == FALSE) || (vmPage == NULL)) { (VOID)LOS_MemFree(m_aucSysMem0, space); LOS_PhysPagesFreeContiguous(ttb, 1); return NULL; } +<<<<<<< HEAD + LOS_ListAdd(&space->archMmu.ptList, &(vmPage->node)); +======= LOS_ListAdd(&space->archMmu.ptList, &(vmPage->node));//页表链表,先挂上L1,后续还会挂上 N个L2表 +>>>>>>> remotes/origin/main return space; } +<<<<<<< HEAD +STATIC BOOL OsVmSpaceParamCheck(const LosVmSpace *vmSpace) +======= STATIC BOOL OsVmSpaceParamCheck(const LosVmSpace *vmSpace)//这么简单也要写个函数? +>>>>>>> remotes/origin/main { if (vmSpace == NULL) { return FALSE; @@ -306,7 +468,10 @@ STATIC BOOL OsVmSpaceParamCheck(const LosVmSpace *vmSpace)//这么简单也要 return TRUE; } +<<<<<<< HEAD +======= //虚拟内存空间克隆,被用于fork进程 +>>>>>>> remotes/origin/main STATUS_T LOS_VmSpaceClone(UINT32 cloneFlags, LosVmSpace *oldVmSpace, LosVmSpace *newVmSpace) { LosRbNode *pstRbNode = NULL; @@ -321,6 +486,18 @@ STATUS_T LOS_VmSpaceClone(UINT32 cloneFlags, LosVmSpace *oldVmSpace, LosVmSpace return LOS_ERRNO_VM_INVALID_ARGS; } +<<<<<<< HEAD + if ((OsIsVmRegionEmpty(oldVmSpace) == TRUE) || (oldVmSpace == &g_kVmSpace)) { + return LOS_ERRNO_VM_INVALID_ARGS; + } + + /* search the region list */ + newVmSpace->mapBase = oldVmSpace->mapBase; + newVmSpace->heapBase = oldVmSpace->heapBase; + newVmSpace->heapNow = oldVmSpace->heapNow; + (VOID)LOS_MuxAcquire(&oldVmSpace->regionMux); + RB_SCAN_SAFE(&oldVmSpace->regionRbTree, pstRbNode, pstRbNodeNext) +======= if ((OsIsVmRegionEmpty(oldVmSpace) == TRUE) || (oldVmSpace == &g_kVmSpace)) {//不允许clone内核空间,内核空间是独一无二的. return LOS_ERRNO_VM_INVALID_ARGS; } @@ -331,13 +508,18 @@ STATUS_T LOS_VmSpaceClone(UINT32 cloneFlags, LosVmSpace *oldVmSpace, LosVmSpace newVmSpace->heapNow = oldVmSpace->heapNow; //复制堆区当前使用到哪了 (VOID)LOS_MuxAcquire(&oldVmSpace->regionMux); RB_SCAN_SAFE(&oldVmSpace->regionRbTree, pstRbNode, pstRbNodeNext)//红黑树循环开始 +>>>>>>> remotes/origin/main LosVmMapRegion *oldRegion = (LosVmMapRegion *)pstRbNode; #if defined(LOSCFG_KERNEL_SHM) && defined(LOSCFG_IPC_CONTAINER) if ((oldRegion->regionFlags & VM_MAP_REGION_FLAG_SHM) && (cloneFlags & CLONE_NEWIPC)) { continue; } #endif +<<<<<<< HEAD + LosVmMapRegion *newRegion = OsVmRegionDup(newVmSpace, oldRegion, oldRegion->range.base, oldRegion->range.size); +======= LosVmMapRegion *newRegion = OsVmRegionDup(newVmSpace, oldRegion, oldRegion->range.base, oldRegion->range.size);//复制线性区 +>>>>>>> remotes/origin/main if (newRegion == NULL) { VM_ERR("dup new region failed"); ret = LOS_ERRNO_VM_NO_MEMORY; @@ -345,6 +527,37 @@ STATUS_T LOS_VmSpaceClone(UINT32 cloneFlags, LosVmSpace *oldVmSpace, LosVmSpace } #ifdef LOSCFG_KERNEL_SHM +<<<<<<< HEAD + if (oldRegion->regionFlags & VM_MAP_REGION_FLAG_SHM) { + OsShmFork(newVmSpace, oldRegion, newRegion); + continue; + } +#endif + + if (oldRegion == oldVmSpace->heap) { + newVmSpace->heap = newRegion; + } + + numPages = newRegion->range.size >> PAGE_SHIFT; + for (i = 0; i < numPages; i++) { + vaddr = newRegion->range.base + (i << PAGE_SHIFT); + if (LOS_ArchMmuQuery(&oldVmSpace->archMmu, vaddr, &paddr, &flags) != LOS_OK) { + continue; + } + + page = LOS_VmPageGet(paddr); + if (page != NULL) { + LOS_AtomicInc(&page->refCounts); + } + if (flags & VM_MAP_REGION_FLAG_PERM_WRITE) { + LOS_ArchMmuUnmap(&oldVmSpace->archMmu, vaddr, 1); + LOS_ArchMmuMap(&oldVmSpace->archMmu, vaddr, paddr, 1, flags & ~VM_MAP_REGION_FLAG_PERM_WRITE); + } + LOS_ArchMmuMap(&newVmSpace->archMmu, vaddr, paddr, 1, flags & ~VM_MAP_REGION_FLAG_PERM_WRITE); + +#ifdef LOSCFG_FS_VFS + if (LOS_IsRegionFileValid(oldRegion)) { +======= if (oldRegion->regionFlags & VM_MAP_REGION_FLAG_SHM) {//如果老线性区是共享内存 OsShmFork(newVmSpace, oldRegion, newRegion);//fork共享线性区,如此新虚拟空间也能用那个线性区 continue;//不往下走了,因为共享内存不需要重新映射,下面无非就是需要MMU映射虚拟地址<-->物理地址 @@ -374,21 +587,34 @@ STATUS_T LOS_VmSpaceClone(UINT32 cloneFlags, LosVmSpace *oldVmSpace, LosVmSpace #ifdef LOSCFG_FS_VFS //文件系统开关 if (LOS_IsRegionFileValid(oldRegion)) {//是都是一个文件映射线性区 +>>>>>>> remotes/origin/main LosFilePage *fpage = NULL; LOS_SpinLockSave(&oldRegion->unTypeData.rf.vnode->mapping.list_lock, &intSave); fpage = OsFindGetEntry(&oldRegion->unTypeData.rf.vnode->mapping, newRegion->pgOff + i); if ((fpage != NULL) && (fpage->vmPage == page)) { /* cow page no need map */ +<<<<<<< HEAD + OsAddMapInfo(fpage, &newVmSpace->archMmu, vaddr); +======= OsAddMapInfo(fpage, &newVmSpace->archMmu, vaddr);//添加文件页映射,记录页面被进程映射过 +>>>>>>> remotes/origin/main } LOS_SpinUnlockRestore(&oldRegion->unTypeData.rf.vnode->mapping.list_lock, intSave); } #endif } +<<<<<<< HEAD + RB_SCAN_SAFE_END(&oldVmSpace->regionRbTree, pstRbNode, pstRbNodeNext) + (VOID)LOS_MuxRelease(&oldVmSpace->regionMux); + return ret; +} + +======= RB_SCAN_SAFE_END(&oldVmSpace->regionRbTree, pstRbNode, pstRbNodeNext)//红黑树循环结束 (VOID)LOS_MuxRelease(&oldVmSpace->regionMux); return ret; } ///通过虚拟(线性)地址查找所属线性区,红黑树 +>>>>>>> remotes/origin/main LosVmMapRegion *OsFindRegion(LosRbTree *regionRbTree, VADDR_T vaddr, size_t len) { LosVmMapRegion *regionRst = NULL; @@ -402,18 +628,30 @@ LosVmMapRegion *OsFindRegion(LosRbTree *regionRbTree, VADDR_T vaddr, size_t len) } return regionRst; } +<<<<<<< HEAD + +======= /// 查找线性区 根据起始地址在进程空间内查找是否存在 +>>>>>>> remotes/origin/main LosVmMapRegion *LOS_RegionFind(LosVmSpace *vmSpace, VADDR_T addr) { LosVmMapRegion *region = NULL; +<<<<<<< HEAD + (VOID)LOS_MuxAcquire(&vmSpace->regionMux); +======= (VOID)LOS_MuxAcquire(&vmSpace->regionMux);//因进程空间是隔离的,所以此处只会涉及到任务(线程)之间的竞争,故使用互斥锁,而自旋锁则用于CPU核间的竞争 +>>>>>>> remotes/origin/main region = OsFindRegion(&vmSpace->regionRbTree, addr, 1); (VOID)LOS_MuxRelease(&vmSpace->regionMux); return region; } +<<<<<<< HEAD + +======= /// 查找线性区 根据地址区间在进程空间内查找是否存在 +>>>>>>> remotes/origin/main LosVmMapRegion *LOS_RegionRangeFind(LosVmSpace *vmSpace, VADDR_T addr, size_t len) { LosVmMapRegion *region = NULL; @@ -424,14 +662,22 @@ LosVmMapRegion *LOS_RegionRangeFind(LosVmSpace *vmSpace, VADDR_T addr, size_t le return region; } +<<<<<<< HEAD + +======= /// 分配指定长度的线性区 +>>>>>>> remotes/origin/main VADDR_T OsAllocRange(LosVmSpace *vmSpace, size_t len) { LosVmMapRegion *curRegion = NULL; LosRbNode *pstRbNode = NULL; LosRbNode *pstRbNodeTmp = NULL; LosRbTree *regionRbTree = &vmSpace->regionRbTree; +<<<<<<< HEAD + VADDR_T curEnd = vmSpace->mapBase; +======= VADDR_T curEnd = vmSpace->mapBase;//获取映射区基地址 +>>>>>>> remotes/origin/main VADDR_T nextStart; curRegion = LOS_RegionFind(vmSpace, vmSpace->mapBase); @@ -450,7 +696,11 @@ VADDR_T OsAllocRange(LosVmSpace *vmSpace, size_t len) curEnd = curRegion->range.base + curRegion->range.size; } RB_MID_SCAN_END(regionRbTree, pstRbNode) +<<<<<<< HEAD + } else { +======= } else {//红黑树扫描排序,从小到大 +>>>>>>> remotes/origin/main /* rbtree scan is sorted, from small to big */ RB_SCAN_SAFE(regionRbTree, pstRbNode, pstRbNodeTmp) curRegion = (LosVmMapRegion *)pstRbNode; @@ -473,34 +723,58 @@ VADDR_T OsAllocRange(LosVmSpace *vmSpace, size_t len) return 0; } +<<<<<<< HEAD + +======= /// 分配指定开始地址和长度的线性区 +>>>>>>> remotes/origin/main VADDR_T OsAllocSpecificRange(LosVmSpace *vmSpace, VADDR_T vaddr, size_t len, UINT32 regionFlags) { STATUS_T status; +<<<<<<< HEAD + if (LOS_IsRangeInSpace(vmSpace, vaddr, len) == FALSE) { +======= if (LOS_IsRangeInSpace(vmSpace, vaddr, len) == FALSE) {//虚拟地址是否在进程空间范围内 +>>>>>>> remotes/origin/main return 0; } if ((LOS_RegionFind(vmSpace, vaddr) != NULL) || (LOS_RegionFind(vmSpace, vaddr + len - 1) != NULL) || +<<<<<<< HEAD + (LOS_RegionRangeFind(vmSpace, vaddr, len - 1) != NULL)) { + if ((regionFlags & VM_MAP_REGION_FLAG_FIXED_NOREPLACE) != 0) { + return 0; + } else if ((regionFlags & VM_MAP_REGION_FLAG_FIXED) != 0) { + status = LOS_UnMMap(vaddr, len); +======= (LOS_RegionRangeFind(vmSpace, vaddr, len - 1) != NULL)) {//没找到的情况 if ((regionFlags & VM_MAP_REGION_FLAG_FIXED_NOREPLACE) != 0) { return 0; } else if ((regionFlags & VM_MAP_REGION_FLAG_FIXED) != 0) {//线性区未填满,则解除这部分空间的映射 status = LOS_UnMMap(vaddr, len);//解除映射 +>>>>>>> remotes/origin/main if (status != LOS_OK) { VM_ERR("unmap specific range va: %#x, len: %#x failed, status: %d", vaddr, len, status); return 0; } } else { +<<<<<<< HEAD + return OsAllocRange(vmSpace, len); +======= return OsAllocRange(vmSpace, len);//默认分配一个 +>>>>>>> remotes/origin/main } } return vaddr; } +<<<<<<< HEAD + +======= ///映射类型为文件的线性区是否有效 +>>>>>>> remotes/origin/main BOOL LOS_IsRegionFileValid(LosVmMapRegion *region) { if ((region != NULL) && (LOS_IsRegionTypeFile(region)) && @@ -509,7 +783,11 @@ BOOL LOS_IsRegionFileValid(LosVmMapRegion *region) } return FALSE; } +<<<<<<< HEAD + +======= ///向红黑树中插入线性区 +>>>>>>> remotes/origin/main BOOL OsInsertRegion(LosRbTree *regionRbTree, LosVmMapRegion *region) { if (LOS_RbAddNode(regionRbTree, (LosRbNode *)region) == FALSE) { @@ -519,14 +797,35 @@ BOOL OsInsertRegion(LosRbTree *regionRbTree, LosVmMapRegion *region) } return TRUE; } +<<<<<<< HEAD + +LosVmMapRegion *OsCreateRegion(VADDR_T vaddr, size_t len, UINT32 regionFlags, unsigned long offset) +{ + LosVmMapRegion *region = LOS_MemAlloc(m_aucSysMem0, sizeof(LosVmMapRegion)); +======= ///创建一个线性区 LosVmMapRegion *OsCreateRegion(VADDR_T vaddr, size_t len, UINT32 regionFlags, unsigned long offset) { LosVmMapRegion *region = LOS_MemAlloc(m_aucSysMem0, sizeof(LosVmMapRegion));//只是分配一个线性区结构体 +>>>>>>> remotes/origin/main if (region == NULL) { VM_ERR("memory allocate for LosVmMapRegion failed"); return region; } +<<<<<<< HEAD + + (void)memset_s(region, sizeof(LosVmMapRegion), 0, sizeof(LosVmMapRegion)); + region->range.base = vaddr; + region->range.size = len; + region->pgOff = offset; + region->regionFlags = regionFlags; + region->regionType = VM_MAP_REGION_TYPE_NONE; + region->forkFlags = 0; + region->shmid = -1; + return region; +} + +======= //创建线性区的本质就是在画饼,见如下操作: (void)memset_s(region, sizeof(LosVmMapRegion), 0, sizeof(LosVmMapRegion)); region->range.base = vaddr; //虚拟地址作为线性区的基地址 @@ -539,12 +838,22 @@ LosVmMapRegion *OsCreateRegion(VADDR_T vaddr, size_t len, UINT32 regionFlags, un return region; } ///通过虚拟地址查询映射的物理地址 +>>>>>>> remotes/origin/main PADDR_T LOS_PaddrQuery(VOID *vaddr) { PADDR_T paddr = 0; STATUS_T status; LosVmSpace *space = NULL; LosArchMmu *archMmu = NULL; +<<<<<<< HEAD + + if (LOS_IsKernelAddress((VADDR_T)(UINTPTR)vaddr)) { + archMmu = &g_kVmSpace.archMmu; + } else if (LOS_IsUserAddress((VADDR_T)(UINTPTR)vaddr)) { + space = OsCurrProcessGet()->vmSpace; + archMmu = &space->archMmu; + } else if (LOS_IsVmallocAddress((VADDR_T)(UINTPTR)vaddr)) { +======= //先取出对应空间的mmu if (LOS_IsKernelAddress((VADDR_T)(UINTPTR)vaddr)) {//是否内核空间地址 archMmu = &g_kVmSpace.archMmu; @@ -552,13 +861,18 @@ PADDR_T LOS_PaddrQuery(VOID *vaddr) space = OsCurrProcessGet()->vmSpace; archMmu = &space->archMmu; } else if (LOS_IsVmallocAddress((VADDR_T)(UINTPTR)vaddr)) {//是否为分配空间地址,堆区地址 +>>>>>>> remotes/origin/main archMmu = &g_vMallocSpace.archMmu; } else { VM_ERR("vaddr is beyond range"); return 0; } +<<<<<<< HEAD + status = LOS_ArchMmuQuery(archMmu, (VADDR_T)(UINTPTR)vaddr, &paddr, 0); +======= status = LOS_ArchMmuQuery(archMmu, (VADDR_T)(UINTPTR)vaddr, &paddr, 0);//查询物理地址 +>>>>>>> remotes/origin/main if (status == LOS_OK) { return paddr; } else { @@ -566,10 +880,13 @@ PADDR_T LOS_PaddrQuery(VOID *vaddr) } } +<<<<<<< HEAD +======= /*! * 这里不是真的分配物理内存,而是逻辑上画一个连续的区域,标记这个区域可以拿用,表示内存已经归你了。 但真正的物理内存的占用会延迟到使用的时候才由缺页中断调入内存 */ +>>>>>>> remotes/origin/main LosVmMapRegion *LOS_RegionAlloc(LosVmSpace *vmSpace, VADDR_T vaddr, size_t len, UINT32 regionFlags, VM_OFFSET_T pgoff) { VADDR_T rstVaddr; @@ -580,33 +897,62 @@ LosVmMapRegion *LOS_RegionAlloc(LosVmSpace *vmSpace, VADDR_T vaddr, size_t len, * this is the most portable method of creating a new mapping. If addr is not NULL, * then the kernel takes it as where to place the mapping; */ +<<<<<<< HEAD + (VOID)LOS_MuxAcquire(&vmSpace->regionMux); + if (vaddr == 0) { + rstVaddr = OsAllocRange(vmSpace, len); + } else { + /* if it is already mmapped here, we unmmap it */ + rstVaddr = OsAllocSpecificRange(vmSpace, vaddr, len, regionFlags); +======= (VOID)LOS_MuxAcquire(&vmSpace->regionMux);//获得互斥锁 if (vaddr == 0) {//如果地址是0,根据线性区管理的实际情况,自动创建虚拟地址, 这是创建新映射的最便捷的方法。 rstVaddr = OsAllocRange(vmSpace, len); } else { /* if it is already mmapped here, we unmmap it | 如果已经被映射了, 则解除映射关系*/ rstVaddr = OsAllocSpecificRange(vmSpace, vaddr, len, regionFlags);//创建包含指定虚拟地址的线性区, rstVaddr != vaddr || rstVaddr == vaddr +>>>>>>> remotes/origin/main if (rstVaddr == 0) { VM_ERR("alloc specific range va: %#x, len: %#x failed", vaddr, len); goto OUT; } } +<<<<<<< HEAD + if (rstVaddr == 0) { + goto OUT; + } + + newRegion = OsCreateRegion(rstVaddr, len, regionFlags, pgoff); +======= if (rstVaddr == 0) {//没有可供映射的虚拟地址 goto OUT; } newRegion = OsCreateRegion(rstVaddr, len, regionFlags, pgoff);//创建一个线性区,指定线性区的开始地址rstVaddr ... +>>>>>>> remotes/origin/main if (newRegion == NULL) { goto OUT; } newRegion->space = vmSpace; +<<<<<<< HEAD + isInsertSucceed = OsInsertRegion(&vmSpace->regionRbTree, newRegion); + if (isInsertSucceed == FALSE) { + (VOID)LOS_MemFree(m_aucSysMem0, newRegion); +======= isInsertSucceed = OsInsertRegion(&vmSpace->regionRbTree, newRegion);//插入红黑树和双循环链表中管理 if (isInsertSucceed == FALSE) {//插入失败 (VOID)LOS_MemFree(m_aucSysMem0, newRegion);//从内存池中释放 +>>>>>>> remotes/origin/main newRegion = NULL; } OUT: +<<<<<<< HEAD + (VOID)LOS_MuxRelease(&vmSpace->regionMux); + return newRegion; +} + +======= (VOID)LOS_MuxRelease(&vmSpace->regionMux);//释放互斥锁 return newRegion; } @@ -614,6 +960,7 @@ OUT: * 删除匿名页,匿名页就是内存映射页 * 1.解除映射关系 2.释放物理内存 */ +>>>>>>> remotes/origin/main STATIC VOID OsAnonPagesRemove(LosArchMmu *archMmu, VADDR_T vaddr, UINT32 count) { status_t status; @@ -625,20 +972,36 @@ STATIC VOID OsAnonPagesRemove(LosArchMmu *archMmu, VADDR_T vaddr, UINT32 count) return; } +<<<<<<< HEAD + while (count > 0) { + count--; + status = LOS_ArchMmuQuery(archMmu, vaddr, &paddr, NULL); + if (status != LOS_OK) { +======= while (count > 0) {//一页页操作 count--; status = LOS_ArchMmuQuery(archMmu, vaddr, &paddr, NULL);//通过虚拟地址拿到物理地址 if (status != LOS_OK) {//失败,拿下一页的物理地址 +>>>>>>> remotes/origin/main vaddr += PAGE_SIZE; continue; } +<<<<<<< HEAD + LOS_ArchMmuUnmap(archMmu, vaddr, 1); + + page = LOS_VmPageGet(paddr); + if (page != NULL) { + if (!OsIsPageShared(page)) { + LOS_PhysPageFree(page); +======= LOS_ArchMmuUnmap(archMmu, vaddr, 1);//解除一页的映射 page = LOS_VmPageGet(paddr);//通过物理地址获取所在物理页框的起始地址 if (page != NULL) {//获取成功 if (!OsIsPageShared(page)) {//不是共享页,共享页会有专门的共享标签,共享本质是有无多个进程对该页的引用 LOS_PhysPageFree(page);//释放物理页框 +>>>>>>> remotes/origin/main } } vaddr += PAGE_SIZE; @@ -682,7 +1045,11 @@ STATIC VOID OsFilePagesRemove(LosVmSpace *space, LosVmMapRegion *region) } } #endif +<<<<<<< HEAD + +======= /// 释放进程空间指定线性区 +>>>>>>> remotes/origin/main STATUS_T LOS_RegionFree(LosVmSpace *space, LosVmMapRegion *region) { if ((space == NULL) || (region == NULL)) { @@ -692,14 +1059,43 @@ STATUS_T LOS_RegionFree(LosVmSpace *space, LosVmMapRegion *region) (VOID)LOS_MuxAcquire(&space->regionMux); +<<<<<<< HEAD +#ifdef LOSCFG_FS_VFS + if (LOS_IsRegionFileValid(region)) { + OsFilePagesRemove(space, region); +======= #ifdef LOSCFG_FS_VFS //文件开关 if (LOS_IsRegionFileValid(region)) {//是否为文件线性区 OsFilePagesRemove(space, region);//删除文件页 +>>>>>>> remotes/origin/main VnodeHold(); region->unTypeData.rf.vnode->useCount--; VnodeDrop(); } else #endif +<<<<<<< HEAD + +#ifdef LOSCFG_KERNEL_SHM + if (OsIsShmRegion(region)) { + OsShmRegionFree(space, region); + } else if (LOS_IsRegionTypeDev(region)) { +#else + if (LOS_IsRegionTypeDev(region)) { +#endif + OsDevPagesRemove(&space->archMmu, region->range.base, region->range.size >> PAGE_SHIFT); + } else { + OsAnonPagesRemove(&space->archMmu, region->range.base, region->range.size >> PAGE_SHIFT); + } + + /* remove it from space */ + LOS_RbDelNode(&space->regionRbTree, ®ion->rbNode); + /* free it */ + LOS_MemFree(m_aucSysMem0, region); + (VOID)LOS_MuxRelease(&space->regionMux); + return LOS_OK; +} + +======= #ifdef LOSCFG_KERNEL_SHM //共享内存开关 if (OsIsShmRegion(region)) { //是否为共享内存线性区 OsShmRegionFree(space, region);//释放共享线性区 @@ -720,6 +1116,7 @@ STATUS_T LOS_RegionFree(LosVmSpace *space, LosVmMapRegion *region) return LOS_OK; } /// 复制线性区 +>>>>>>> remotes/origin/main LosVmMapRegion *OsVmRegionDup(LosVmSpace *space, LosVmMapRegion *oldRegion, VADDR_T vaddr, size_t size) { LosVmMapRegion *newRegion = NULL; @@ -727,16 +1124,42 @@ LosVmMapRegion *OsVmRegionDup(LosVmSpace *space, LosVmMapRegion *oldRegion, VADD (VOID)LOS_MuxAcquire(&space->regionMux); regionFlags = oldRegion->regionFlags; +<<<<<<< HEAD + if (vaddr == 0) { + regionFlags &= ~(VM_MAP_REGION_FLAG_FIXED | VM_MAP_REGION_FLAG_FIXED_NOREPLACE); + } else { + regionFlags |= VM_MAP_REGION_FLAG_FIXED; + } + newRegion = LOS_RegionAlloc(space, vaddr, size, regionFlags, oldRegion->pgOff); +======= if (vaddr == 0) {//不指定地址 regionFlags &= ~(VM_MAP_REGION_FLAG_FIXED | VM_MAP_REGION_FLAG_FIXED_NOREPLACE); //撕掉两个标签 } else { regionFlags |= VM_MAP_REGION_FLAG_FIXED; //贴上填满线性区标签 } newRegion = LOS_RegionAlloc(space, vaddr, size, regionFlags, oldRegion->pgOff); //分配一个线性区 +>>>>>>> remotes/origin/main if (newRegion == NULL) { VM_ERR("LOS_RegionAlloc failed"); goto REGIONDUPOUT; } +<<<<<<< HEAD + newRegion->regionType = oldRegion->regionType; + +#ifdef LOSCFG_KERNEL_SHM + if (OsIsShmRegion(oldRegion)) { + newRegion->shmid = oldRegion->shmid; + } +#endif + +#ifdef LOSCFG_FS_VFS + if (LOS_IsRegionTypeFile(oldRegion)) { + newRegion->unTypeData.rf.vmFOps = oldRegion->unTypeData.rf.vmFOps; + newRegion->unTypeData.rf.vnode = oldRegion->unTypeData.rf.vnode; + newRegion->unTypeData.rf.f_oflags = oldRegion->unTypeData.rf.f_oflags; + VnodeHold(); + newRegion->unTypeData.rf.vnode->useCount++; +======= newRegion->regionType = oldRegion->regionType;//复制线性区类型(文件,设备,匿名) #ifdef LOSCFG_KERNEL_SHM @@ -752,6 +1175,7 @@ LosVmMapRegion *OsVmRegionDup(LosVmSpace *space, LosVmMapRegion *oldRegion, VADD newRegion->unTypeData.rf.f_oflags = oldRegion->unTypeData.rf.f_oflags;//读写标签 VnodeHold(); newRegion->unTypeData.rf.vnode->useCount++;//索引节点使用数增加 +>>>>>>> remotes/origin/main VnodeDrop(); } #endif @@ -760,14 +1184,24 @@ REGIONDUPOUT: (VOID)LOS_MuxRelease(&space->regionMux); return newRegion; } +<<<<<<< HEAD + +======= /// 劈开线性区 +>>>>>>> remotes/origin/main STATIC LosVmMapRegion *OsVmRegionSplit(LosVmMapRegion *oldRegion, VADDR_T newRegionStart) { LosVmMapRegion *newRegion = NULL; LosVmSpace *space = oldRegion->space; +<<<<<<< HEAD + size_t size = LOS_RegionSize(newRegionStart, LOS_RegionEndAddr(oldRegion)); + + oldRegion->range.size = LOS_RegionSize(oldRegion->range.base, newRegionStart - 1); +======= size_t size = LOS_RegionSize(newRegionStart, LOS_RegionEndAddr(oldRegion));//获取线性区大小 oldRegion->range.size = LOS_RegionSize(oldRegion->range.base, newRegionStart - 1);//获取旧线性区大小 +>>>>>>> remotes/origin/main if (oldRegion->range.size == 0) { LOS_RbDelNode(&space->regionRbTree, &oldRegion->rbNode); } @@ -782,14 +1216,22 @@ STATIC LosVmMapRegion *OsVmRegionSplit(LosVmMapRegion *oldRegion, VADDR_T newReg #endif return newRegion; } +<<<<<<< HEAD + +======= ///对线性区进行调整 +>>>>>>> remotes/origin/main STATUS_T OsVmRegionAdjust(LosVmSpace *space, VADDR_T newRegionStart, size_t size) { LosVmMapRegion *region = NULL; VADDR_T nextRegionBase = newRegionStart + size; LosVmMapRegion *newRegion = NULL; +<<<<<<< HEAD + region = LOS_RegionFind(space, newRegionStart); +======= region = LOS_RegionFind(space, newRegionStart);//先找到线性区 +>>>>>>> remotes/origin/main if ((region != NULL) && (newRegionStart > region->range.base)) { newRegion = OsVmRegionSplit(region, newRegionStart); if (newRegion == NULL) { @@ -809,7 +1251,11 @@ STATUS_T OsVmRegionAdjust(LosVmSpace *space, VADDR_T newRegionStart, size_t size return LOS_OK; } +<<<<<<< HEAD + +======= ///删除线性区 +>>>>>>> remotes/origin/main STATUS_T OsRegionsRemove(LosVmSpace *space, VADDR_T regionBase, size_t size) { STATUS_T status; @@ -820,12 +1266,20 @@ STATUS_T OsRegionsRemove(LosVmSpace *space, VADDR_T regionBase, size_t size) (VOID)LOS_MuxAcquire(&space->regionMux); +<<<<<<< HEAD + status = OsVmRegionAdjust(space, regionBase, size); +======= status = OsVmRegionAdjust(space, regionBase, size);//线性区调整 +>>>>>>> remotes/origin/main if (status != LOS_OK) { goto ERR_REGION_SPLIT; } +<<<<<<< HEAD + RB_SCAN_SAFE(&space->regionRbTree, pstRbNodeTemp, pstRbNodeNext) +======= RB_SCAN_SAFE(&space->regionRbTree, pstRbNodeTemp, pstRbNodeNext)//扫描虚拟空间内的线性区 +>>>>>>> remotes/origin/main regionTemp = (LosVmMapRegion *)pstRbNodeTemp; if (regionTemp->range.base > regionEnd) { break; @@ -844,7 +1298,11 @@ ERR_REGION_SPLIT: (VOID)LOS_MuxRelease(&space->regionMux); return status; } +<<<<<<< HEAD + +======= ///根据指定参数范围[addr,addr+len] 释放用户空间中堆区所占用的物理内存 +>>>>>>> remotes/origin/main INT32 OsUserHeapFree(LosVmSpace *vmSpace, VADDR_T addr, size_t len) { LosVmMapRegion *vmRegion = NULL; @@ -853,15 +1311,35 @@ INT32 OsUserHeapFree(LosVmSpace *vmSpace, VADDR_T addr, size_t len) VADDR_T vaddr; STATUS_T ret; +<<<<<<< HEAD + if (vmSpace == LOS_GetKVmSpace() || vmSpace->heap == NULL) { + return -1; + } + + vmRegion = LOS_RegionFind(vmSpace, addr); +======= if (vmSpace == LOS_GetKVmSpace() || vmSpace->heap == NULL) {//虚拟空间堆区必须在非内核空间 return -1; } vmRegion = LOS_RegionFind(vmSpace, addr);//通过参数虚拟地址红黑树找到线性区,线性区范围内包含了参数虚拟地址 +>>>>>>> remotes/origin/main if (vmRegion == NULL) { return -1; } +<<<<<<< HEAD + if (vmRegion == vmSpace->heap) { + vaddr = addr; + while (len > 0) { + if (LOS_ArchMmuQuery(&vmSpace->archMmu, vaddr, &paddr, 0) == LOS_OK) { + ret = LOS_ArchMmuUnmap(&vmSpace->archMmu, vaddr, 1); + if (ret <= 0) { + VM_ERR("unmap failed, ret = %d", ret); + } + vmPage = LOS_VmPageGet(paddr); + LOS_PhysPageFree(vmPage); +======= if (vmRegion == vmSpace->heap) {//地址所在的线性区为堆区 vaddr = addr; while (len > 0) {//参数0 代表不获取 flags 信息 @@ -872,6 +1350,7 @@ INT32 OsUserHeapFree(LosVmSpace *vmSpace, VADDR_T addr, size_t len) } vmPage = LOS_VmPageGet(paddr);//获取物理页面信息 LOS_PhysPageFree(vmPage);//释放页 +>>>>>>> remotes/origin/main } vaddr += PAGE_SIZE; len -= PAGE_SIZE; @@ -881,7 +1360,11 @@ INT32 OsUserHeapFree(LosVmSpace *vmSpace, VADDR_T addr, size_t len) return -1; } +<<<<<<< HEAD + +======= ///线性区是否支持扩展 +>>>>>>> remotes/origin/main STATUS_T OsIsRegionCanExpand(LosVmSpace *space, LosVmMapRegion *region, size_t size) { LosVmMapRegion *nextRegion = NULL; @@ -898,13 +1381,21 @@ STATUS_T OsIsRegionCanExpand(LosVmSpace *space, LosVmMapRegion *region, size_t s return LOS_NOK; } +<<<<<<< HEAD + +======= ///解除一定范围的虚拟地址的映射关系 +>>>>>>> remotes/origin/main STATUS_T OsUnMMap(LosVmSpace *space, VADDR_T addr, size_t size) { size = LOS_Align(size, PAGE_SIZE); addr = LOS_Align(addr, PAGE_SIZE); (VOID)LOS_MuxAcquire(&space->regionMux); +<<<<<<< HEAD + STATUS_T status = OsRegionsRemove(space, addr, size); +======= STATUS_T status = OsRegionsRemove(space, addr, size);//删除线性区 +>>>>>>> remotes/origin/main if (status != LOS_OK) { status = -EINVAL; VM_ERR("region_split failed"); @@ -915,19 +1406,39 @@ ERR_REGION_SPLIT: (VOID)LOS_MuxRelease(&space->regionMux); return status; } +<<<<<<< HEAD + +======= /// 释放所有线性区 +>>>>>>> remotes/origin/main STATIC VOID OsVmSpaceAllRegionFree(LosVmSpace *space) { LosRbNode *pstRbNode = NULL; LosRbNode *pstRbNodeNext = NULL; /* free all of the regions */ +<<<<<<< HEAD + RB_SCAN_SAFE(&space->regionRbTree, pstRbNode, pstRbNodeNext) + LosVmMapRegion *region = (LosVmMapRegion *)pstRbNode; +======= RB_SCAN_SAFE(&space->regionRbTree, pstRbNode, pstRbNodeNext) //遍历红黑树 LosVmMapRegion *region = (LosVmMapRegion *)pstRbNode;//拿到线性区 +>>>>>>> remotes/origin/main if (region->range.size == 0) { VM_ERR("space free, region: %#x flags: %#x, base:%#x, size: %#x", region, region->regionFlags, region->range.base, region->range.size); } +<<<<<<< HEAD + STATUS_T ret = LOS_RegionFree(space, region); + if (ret != LOS_OK) { + VM_ERR("free region error, space %p, region %p", space, region); + } + RB_SCAN_SAFE_END(&space->regionRbTree, pstRbNode, pstRbNodeNext) + + return; +} + +======= STATUS_T ret = LOS_RegionFree(space, region);//释放线性区 if (ret != LOS_OK) { VM_ERR("free region error, space %p, region %p", space, region); @@ -937,6 +1448,7 @@ STATIC VOID OsVmSpaceAllRegionFree(LosVmSpace *space) return; } /// 释放虚拟空间 +>>>>>>> remotes/origin/main STATUS_T OsVmSpaceRegionFree(LosVmSpace *space) { if (space == NULL) { @@ -954,21 +1466,34 @@ STATUS_T OsVmSpaceRegionFree(LosVmSpace *space) return LOS_OK; } +<<<<<<< HEAD + +======= ///释放虚拟空间,注意内核空间不能被释放掉,永驻内存 +>>>>>>> remotes/origin/main STATUS_T LOS_VmSpaceFree(LosVmSpace *space) { if (space == NULL) { return LOS_ERRNO_VM_INVALID_ARGS; } +<<<<<<< HEAD + if (space == &g_kVmSpace) { +======= if (space == &g_kVmSpace) {//不能释放内核虚拟空间,内核空间常驻内存 +>>>>>>> remotes/origin/main VM_ERR("try to free kernel aspace, not allowed"); return LOS_OK; } /* pop it out of the global aspace list */ (VOID)LOS_MuxAcquire(&space->regionMux); +<<<<<<< HEAD + + LOS_ListDelete(&space->node); +======= LOS_ListDelete(&space->node);//从g_vmSpaceList链表里删除,g_vmSpaceList记录了所有空间节点。 +>>>>>>> remotes/origin/main OsVmSpaceAllRegionFree(space); @@ -991,7 +1516,11 @@ STATUS_T LOS_VmSpaceFree(LosVmSpace *space) LOS_MemFree(m_aucSysMem0, space); return LOS_OK; } +<<<<<<< HEAD + +======= ///虚拟地址和size是否在空间 +>>>>>>> remotes/origin/main BOOL LOS_IsRangeInSpace(const LosVmSpace *space, VADDR_T vaddr, size_t size) { /* is the starting address within the address space */ @@ -1011,7 +1540,11 @@ BOOL LOS_IsRangeInSpace(const LosVmSpace *space, VADDR_T vaddr, size_t size) } return TRUE; } +<<<<<<< HEAD + +======= /// 在进程空间中预留一块内存空间 +>>>>>>> remotes/origin/main STATUS_T LOS_VmSpaceReserve(LosVmSpace *space, size_t size, VADDR_T vaddr) { UINT32 regionFlags = 0; @@ -1032,7 +1565,11 @@ STATUS_T LOS_VmSpaceReserve(LosVmSpace *space, size_t size, VADDR_T vaddr) return region ? LOS_OK : LOS_ERRNO_VM_NO_MEMORY; } +<<<<<<< HEAD + +======= ///实现从虚拟地址到物理地址的映射,将指定长度的物理地址区间与虚拟地址区间做映射,需提前申请物理地址区间 +>>>>>>> remotes/origin/main STATUS_T LOS_VaddrToPaddrMmap(LosVmSpace *space, VADDR_T vaddr, PADDR_T paddr, size_t len, UINT32 flags) { STATUS_T ret; @@ -1047,19 +1584,34 @@ STATUS_T LOS_VaddrToPaddrMmap(LosVmSpace *space, VADDR_T vaddr, PADDR_T paddr, s } if (space == NULL) { +<<<<<<< HEAD + space = OsCurrProcessGet()->vmSpace; + } + + region = LOS_RegionFind(space, vaddr); + if (region != NULL) { +======= space = OsCurrProcessGet()->vmSpace;//获取当前进程的空间 } region = LOS_RegionFind(space, vaddr);//通过虚拟地址查找线性区 if (region != NULL) {//已经被映射过了,失败返回 +>>>>>>> remotes/origin/main VM_ERR("vaddr : 0x%x already used!", vaddr); return LOS_ERRNO_VM_BUSY; } +<<<<<<< HEAD + region = LOS_RegionAlloc(space, vaddr, len, flags, 0); + if (region == NULL) { + VM_ERR("failed"); + return LOS_ERRNO_VM_NO_MEMORY; +======= region = LOS_RegionAlloc(space, vaddr, len, flags, 0);//通过虚拟地址 创建一个region if (region == NULL) { VM_ERR("failed"); return LOS_ERRNO_VM_NO_MEMORY;//内存不够 +>>>>>>> remotes/origin/main } while (len > 0) { @@ -1069,9 +1621,15 @@ STATUS_T LOS_VaddrToPaddrMmap(LosVmSpace *space, VADDR_T vaddr, PADDR_T paddr, s VM_ERR("Page is NULL"); return LOS_ERRNO_VM_NOT_VALID; } +<<<<<<< HEAD + LOS_AtomicInc(&vmPage->refCounts); + + ret = LOS_ArchMmuMap(&space->archMmu, vaddr, paddr, 1, region->regionFlags); +======= LOS_AtomicInc(&vmPage->refCounts);//ref自增 ret = LOS_ArchMmuMap(&space->archMmu, vaddr, paddr, 1, region->regionFlags);//mmu map +>>>>>>> remotes/origin/main if (ret <= 0) { VM_ERR("LOS_ArchMmuMap failed: %d", ret); LOS_RegionFree(space, region); @@ -1085,10 +1643,16 @@ STATUS_T LOS_VaddrToPaddrMmap(LosVmSpace *space, VADDR_T vaddr, PADDR_T paddr, s return LOS_OK; } +<<<<<<< HEAD +VOID *LOS_VMalloc(size_t size) +{ + LosVmSpace *space = &g_vMallocSpace; +======= //对外接口|申请内核堆空间内存 VOID *LOS_VMalloc(size_t size) { LosVmSpace *space = &g_vMallocSpace;//从内核动态空间申请 +>>>>>>> remotes/origin/main LosVmMapRegion *region = NULL; size_t sizeCount; size_t count; @@ -1097,6 +1661,18 @@ VOID *LOS_VMalloc(size_t size) PADDR_T pa; STATUS_T ret; +<<<<<<< HEAD + size = LOS_Align(size, PAGE_SIZE); + if ((size == 0) || (size > space->size)) { + return NULL; + } + sizeCount = size >> PAGE_SHIFT; + + LOS_DL_LIST_HEAD(pageList); + (VOID)LOS_MuxAcquire(&space->regionMux); + + count = LOS_PhysPagesAlloc(sizeCount, &pageList); +======= size = LOS_Align(size, PAGE_SIZE);// if ((size == 0) || (size > space->size)) { return NULL; @@ -1107,18 +1683,46 @@ VOID *LOS_VMalloc(size_t size) (VOID)LOS_MuxAcquire(&space->regionMux);//获得互斥锁 count = LOS_PhysPagesAlloc(sizeCount, &pageList);//一页一页申请,并从pageList尾部插入 +>>>>>>> remotes/origin/main if (count < sizeCount) { VM_ERR("failed to allocate enough pages (ask %zu, got %zu)", sizeCount, count); goto ERROR; } +<<<<<<< HEAD + /* allocate a region and put it in the aspace list */ + region = LOS_RegionAlloc(space, 0, size, VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE, 0); +======= /* allocate a region and put it in the aspace list *///分配一个可读写的线性区,并挂在space region = LOS_RegionAlloc(space, 0, size, VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE, 0);//注意第二个参数是 vaddr = 0 !!! +>>>>>>> remotes/origin/main if (region == NULL) { VM_ERR("alloc region failed, size = %x", size); goto ERROR; } +<<<<<<< HEAD + va = region->range.base; + while ((vmPage = LOS_ListRemoveHeadType(&pageList, LosVmPage, node))) { + pa = vmPage->physAddr; + LOS_AtomicInc(&vmPage->refCounts); + ret = LOS_ArchMmuMap(&space->archMmu, va, pa, 1, region->regionFlags); + if (ret != 1) { + VM_ERR("LOS_ArchMmuMap failed!, err;%d", ret); + } + va += PAGE_SIZE; + } + + (VOID)LOS_MuxRelease(&space->regionMux); + return (VOID *)(UINTPTR)region->range.base; + +ERROR: + (VOID)LOS_PhysPagesFree(&pageList); + (VOID)LOS_MuxRelease(&space->regionMux); + return NULL; +} + +======= va = region->range.base;//va 该区范围基地址为虚拟地址的开始位置,理解va怎么来的是理解线性地址的关键! while ((vmPage = LOS_ListRemoveHeadType(&pageList, LosVmPage, node))) {//从pageList循环拿page pa = vmPage->physAddr;//获取page物理地址,因上面是通过LOS_PhysPagesAlloc分配 @@ -1139,6 +1743,7 @@ ERROR: return NULL; } ///对外接口|释放内核堆空间内存 +>>>>>>> remotes/origin/main VOID LOS_VFree(const VOID *addr) { LosVmSpace *space = &g_vMallocSpace; @@ -1152,13 +1757,21 @@ VOID LOS_VFree(const VOID *addr) (VOID)LOS_MuxAcquire(&space->regionMux); +<<<<<<< HEAD + region = LOS_RegionFind(space, (VADDR_T)(UINTPTR)addr); +======= region = LOS_RegionFind(space, (VADDR_T)(UINTPTR)addr);//先找到线性区 +>>>>>>> remotes/origin/main if (region == NULL) { VM_ERR("find region failed"); goto DONE; } +<<<<<<< HEAD + ret = LOS_RegionFree(space, region); +======= ret = LOS_RegionFree(space, region);//释放线性区 +>>>>>>> remotes/origin/main if (ret) { VM_ERR("free region failed, ret = %d", ret); } @@ -1169,9 +1782,16 @@ DONE: LosMux *OsGVmSpaceMuxGet(VOID) { +<<<<<<< HEAD + return &g_vmSpaceListMux; +} + +STATIC INLINE BOOL OsMemLargeAlloc(UINT32 size) +======= return &g_vmSpaceListMux; } STATIC INLINE BOOL OsMemLargeAlloc(UINT32 size)//是不是分配浪费大于1K的内存 +>>>>>>> remotes/origin/main { if (g_kHeapInited == FALSE) { return FALSE; @@ -1193,6 +1813,20 @@ PADDR_T LOS_PaddrQuery(VOID *vaddr) return (PADDR_T)VMM_TO_DMA_ADDR((VADDR_T)vaddr); } #endif +<<<<<<< HEAD + +VOID *LOS_KernelMalloc(UINT32 size) +{ + VOID *ptr = NULL; + +#ifdef LOSCFG_KERNEL_VM + if (OsMemLargeAlloc(size)) { + ptr = LOS_PhysPagesAllocContiguous(ROUNDUP(size, PAGE_SIZE) >> PAGE_SHIFT); + } else +#endif + { + ptr = LOS_MemAlloc(OS_SYS_MEM_ADDR, size); +======= ///内核空间内存分配,申请小于16KiB的内存则通过堆内存池获取,否则申请多个连续物理页 VOID *LOS_KernelMalloc(UINT32 size) { @@ -1205,16 +1839,25 @@ VOID *LOS_KernelMalloc(UINT32 size) #endif { ptr = LOS_MemAlloc(OS_SYS_MEM_ADDR, size);//从内存池分配 +>>>>>>> remotes/origin/main } return ptr; } +<<<<<<< HEAD + +======= /// 申请具有对齐属性的内存,申请规则:申请小于16KiB的内存则通过堆内存池获取,否则申请多个连续物理页 +>>>>>>> remotes/origin/main VOID *LOS_KernelMallocAlign(UINT32 size, UINT32 boundary) { VOID *ptr = NULL; +<<<<<<< HEAD +#ifdef LOSCFG_KERNEL_VM +======= #ifdef LOSCFG_KERNEL_VM +>>>>>>> remotes/origin/main if (OsMemLargeAlloc(size) && IS_ALIGNED(PAGE_SIZE, boundary)) { ptr = LOS_PhysPagesAllocContiguous(ROUNDUP(size, PAGE_SIZE) >> PAGE_SHIFT); } else @@ -1225,7 +1868,11 @@ VOID *LOS_KernelMallocAlign(UINT32 size, UINT32 boundary) return ptr; } +<<<<<<< HEAD + +======= /// 重新分配内核内存空间 +>>>>>>> remotes/origin/main VOID *LOS_KernelRealloc(VOID *ptr, UINT32 size) { VOID *tmpPtr = NULL; @@ -1233,7 +1880,10 @@ VOID *LOS_KernelRealloc(VOID *ptr, UINT32 size) #ifdef LOSCFG_KERNEL_VM LosVmPage *page = NULL; errno_t ret; +<<<<<<< HEAD +======= +>>>>>>> remotes/origin/main if (ptr == NULL) { tmpPtr = LOS_KernelMalloc(size); } else { @@ -1270,7 +1920,11 @@ VOID LOS_KernelFree(VOID *ptr) { #ifdef LOSCFG_KERNEL_VM UINT32 ret; +<<<<<<< HEAD + if (OsMemIsHeapNode(ptr) == FALSE) { +======= if (OsMemIsHeapNode(ptr) == FALSE) {//判断地址是否在堆区 +>>>>>>> remotes/origin/main ret = OsMemLargeNodeFree(ptr); if (ret != LOS_OK) { VM_ERR("KernelFree %p failed", ptr); @@ -1279,6 +1933,12 @@ VOID LOS_KernelFree(VOID *ptr) } else #endif { +<<<<<<< HEAD + (VOID)LOS_MemFree(OS_SYS_MEM_ADDR, ptr); + } +} +======= (VOID)LOS_MemFree(OS_SYS_MEM_ADDR, ptr);//从内存池中释放 } -} \ No newline at end of file +} +>>>>>>> remotes/origin/main diff --git a/src/kernel_liteos_a/kernel/base/vm/los_vm_page.c b/src/kernel_liteos_a/kernel/base/vm/los_vm_page.c index a711ea7c..17eba645 100644 --- a/src/kernel_liteos_a/kernel/base/vm/los_vm_page.c +++ b/src/kernel_liteos_a/kernel/base/vm/los_vm_page.c @@ -40,6 +40,19 @@ #ifdef LOSCFG_KERNEL_VM +<<<<<<< HEAD +LosVmPage *g_vmPageArray = NULL; +size_t g_vmPageArraySize; + +STATIC VOID OsVmPageInit(LosVmPage *page, paddr_t pa, UINT8 segID) +{ + LOS_ListInit(&page->node); + page->flags = FILE_PAGE_FREE; + LOS_AtomicSet(&page->refCounts, 0); + page->physAddr = pa; + page->segID = segID; + page->order = VM_LIST_ORDER_MAX; +======= LosVmPage *g_vmPageArray = NULL;//物理页框数组 size_t g_vmPageArraySize;//物理页框大小 //页框初始化 @@ -51,15 +64,23 @@ STATIC VOID OsVmPageInit(LosVmPage *page, paddr_t pa, UINT8 segID) page->physAddr = pa;//物理地址 page->segID = segID;//物理地址使用段管理,段ID page->order = VM_LIST_ORDER_MAX;//初始化值,不属于任何块组 +>>>>>>> remotes/origin/main page->nPages = 0; #ifdef LOSCFG_PAGE_TABLE_FINE_LOCK LOS_SpinInit(&page->lock); #endif } +<<<<<<< HEAD + +STATIC INLINE VOID OsVmPageOrderListInit(LosVmPage *page, size_t nPages) +{ + OsVmPhysPagesFreeContiguous(page, nPages); +======= ///伙伴算法初始化 STATIC INLINE VOID OsVmPageOrderListInit(LosVmPage *page, size_t nPages) {//@note_why 此时所有页面 page->order = VM_LIST_ORDER_MAX,能挂入伙伴算法的链表吗? OsVmPhysPagesFreeContiguous(page, nPages);//释放连续的物理页框 +>>>>>>> remotes/origin/main } #define VMPAGEINIT(page, pa, segID) do { \ @@ -68,10 +89,13 @@ STATIC INLINE VOID OsVmPageOrderListInit(LosVmPage *page, size_t nPages) (pa) += PAGE_SIZE; \ } while (0) +<<<<<<< HEAD +======= /*! 完成对物理内存整体初始化,本函数一定运行在实模式下 1.申请大块内存g_vmPageArray存放LosVmPage,按4K一页划分物理内存存放在数组中. */ +>>>>>>> remotes/origin/main VOID OsVmPageStartup(VOID) { struct VmPhysSeg *seg = NULL; @@ -80,7 +104,11 @@ VOID OsVmPageStartup(VOID) UINT32 nPage; INT32 segID; +<<<<<<< HEAD + OsVmPhysAreaSizeAdjust(ROUNDUP((g_vmBootMemBase - KERNEL_ASPACE_BASE), PAGE_SIZE)); +======= OsVmPhysAreaSizeAdjust(ROUNDUP((g_vmBootMemBase - KERNEL_ASPACE_BASE), PAGE_SIZE));//校正 g_physArea size +>>>>>>> remotes/origin/main /* * Pages getting from OsVmPhysPageNumGet() interface here contain the memory @@ -89,6 +117,15 @@ VOID OsVmPageStartup(VOID) */ UINT32 pageNum = OsVmPhysPageNumGet(); nPage = pageNum * PAGE_SIZE / (sizeof(LosVmPage) + PAGE_SIZE); +<<<<<<< HEAD + g_vmPageArraySize = nPage * sizeof(LosVmPage); + g_vmPageArray = (LosVmPage *)OsVmBootMemAlloc(g_vmPageArraySize); + + OsVmPhysAreaSizeAdjust(ROUNDUP(g_vmPageArraySize, PAGE_SIZE)); + + OsVmPhysSegAdd(); + OsVmPhysInit(); +======= g_vmPageArraySize = nPage * sizeof(LosVmPage);//页表总大小 g_vmPageArray = (LosVmPage *)OsVmBootMemAlloc(g_vmPageArraySize);//实模式下申请内存,此时还没有初始化MMU @@ -96,13 +133,20 @@ VOID OsVmPageStartup(VOID) OsVmPhysSegAdd();// 完成对段的初始化 OsVmPhysInit();// 加入空闲链表和设置置换算法,LRU(最近最久未使用)算法 +>>>>>>> remotes/origin/main #ifdef LOSCFG_KERNEL_PLIMITS OsMemLimitSetLimit(pageNum * PAGE_SIZE); #endif +<<<<<<< HEAD + for (segID = 0; segID < g_vmPhysSegNum; segID++) { + seg = &g_vmPhysSeg[segID]; + nPage = seg->size >> PAGE_SHIFT; +======= for (segID = 0; segID < g_vmPhysSegNum; segID++) {//遍历物理段,将段切成一页一页 seg = &g_vmPhysSeg[segID]; nPage = seg->size >> PAGE_SHIFT;//本段总页数 +>>>>>>> remotes/origin/main UINT32 count = nPage >> 3; /* 3: 2 ^ 3, nPage / 8, cycle count */ UINT32 left = nPage & 0x7; /* 0x7: nPage % 8, left page */ @@ -120,17 +164,29 @@ VOID OsVmPageStartup(VOID) for (; left > 0; left--) { VMPAGEINIT(page, pa, segID); } +<<<<<<< HEAD + OsVmPageOrderListInit(seg->pageBase, nPage); + } +} + +======= OsVmPageOrderListInit(seg->pageBase, nPage);//伙伴算法初始化,将所有页加入空闲链表供分配 } } ///通过物理地址获取页框 +>>>>>>> remotes/origin/main LosVmPage *LOS_VmPageGet(PADDR_T paddr) { INT32 segID; LosVmPage *page = NULL; +<<<<<<< HEAD + for (segID = 0; segID < g_vmPhysSegNum; segID++) { + page = OsVmPhysToPage(paddr, segID); +======= for (segID = 0; segID < g_vmPhysSegNum; segID++) {//物理内存采用段页管理 page = OsVmPhysToPage(paddr, segID);//通过物理地址和段ID找出物理页框 +>>>>>>> remotes/origin/main if (page != NULL) { break; } diff --git a/src/kernel_liteos_a/kernel/base/vm/los_vm_phys.c b/src/kernel_liteos_a/kernel/base/vm/los_vm_phys.c index 3dc182e6..c36052ea 100644 --- a/src/kernel_liteos_a/kernel/base/vm/los_vm_phys.c +++ b/src/kernel_liteos_a/kernel/base/vm/los_vm_phys.c @@ -1,3 +1,5 @@ +<<<<<<< HEAD +======= /*! 基本概念 物理内存是计算机上最重要的资源之一,指的是实际的内存设备提供的、可以通过CPU总线直接进行寻址的内存空间, @@ -24,6 +26,7 @@ 并将合并后的内存块挂到索引为1的链表上,此时继续判断是否有伙伴,重复上述操作。 */ +>>>>>>> remotes/origin/main /* * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved. @@ -67,6 +70,19 @@ #define ONE_PAGE 1 +<<<<<<< HEAD +/* Physical memory area array */ +STATIC struct VmPhysArea g_physArea[] = { + { + .start = SYS_MEM_BASE, + .size = SYS_MEM_SIZE_DEFAULT, + }, +}; + +struct VmPhysSeg g_vmPhysSeg[VM_PHYS_SEG_MAX]; +INT32 g_vmPhysSegNum = 0; + +======= /* Physical memory area array | 物理内存区数组 */ STATIC struct VmPhysArea g_physArea[] = {///< 这里只有一个区域,即只生成一个段 { @@ -78,15 +94,32 @@ STATIC struct VmPhysArea g_physArea[] = {///< 这里只有一个区域,即只生 struct VmPhysSeg g_vmPhysSeg[VM_PHYS_SEG_MAX]; ///< 最大32段 INT32 g_vmPhysSegNum = 0; ///< 段数 /// 获取段数组,全局变量,变量放在 .bbs 区 +>>>>>>> remotes/origin/main LosVmPhysSeg *OsGVmPhysSegGet(void) { return g_vmPhysSeg; } +<<<<<<< HEAD + +======= /// 初始化Lru置换链表 +>>>>>>> remotes/origin/main STATIC VOID OsVmPhysLruInit(struct VmPhysSeg *seg) { INT32 i; UINT32 intSave; +<<<<<<< HEAD + LOS_SpinInit(&seg->lruLock); + + LOS_SpinLockSave(&seg->lruLock, &intSave); + for (i = 0; i < VM_NR_LRU_LISTS; i++) { + seg->lruSize[i] = 0; + LOS_ListInit(&seg->lruList[i]); + } + LOS_SpinUnlockRestore(&seg->lruLock, intSave); +} + +======= LOS_SpinInit(&seg->lruLock);//初始化自旋锁,自旋锁用于CPU多核同步 LOS_SpinLockSave(&seg->lruLock, &intSave); @@ -97,6 +130,7 @@ STATIC VOID OsVmPhysLruInit(struct VmPhysSeg *seg) LOS_SpinUnlockRestore(&seg->lruLock, intSave); } /// 创建物理段,由区划分转成段管理 +>>>>>>> remotes/origin/main STATIC INT32 OsVmPhysSegCreate(paddr_t start, size_t size) { struct VmPhysSeg *seg = NULL; @@ -105,8 +139,13 @@ STATIC INT32 OsVmPhysSegCreate(paddr_t start, size_t size) return -1; } +<<<<<<< HEAD + seg = &g_vmPhysSeg[g_vmPhysSegNum++]; + for (; (seg > g_vmPhysSeg) && ((seg - 1)->start > (start + size)); seg--) { +======= seg = &g_vmPhysSeg[g_vmPhysSegNum++];//拿到一段数据 for (; (seg > g_vmPhysSeg) && ((seg - 1)->start > (start + size)); seg--) {//定位到合适的段 +>>>>>>> remotes/origin/main *seg = *(seg - 1); } seg->start = start; @@ -114,21 +153,35 @@ STATIC INT32 OsVmPhysSegCreate(paddr_t start, size_t size) return 0; } +<<<<<<< HEAD + +======= /// 添加物理段 +>>>>>>> remotes/origin/main VOID OsVmPhysSegAdd(VOID) { INT32 i, ret; LOS_ASSERT(g_vmPhysSegNum < VM_PHYS_SEG_MAX); +<<<<<<< HEAD + + for (i = 0; i < (sizeof(g_physArea) / sizeof(g_physArea[0])); i++) { + ret = OsVmPhysSegCreate(g_physArea[i].start, g_physArea[i].size); +======= for (i = 0; i < (sizeof(g_physArea) / sizeof(g_physArea[0])); i++) {//遍历g_physArea数组 ret = OsVmPhysSegCreate(g_physArea[i].start, g_physArea[i].size);//由区划分转成段管理 +>>>>>>> remotes/origin/main if (ret != 0) { VM_ERR("create phys seg failed"); } } } +<<<<<<< HEAD + +======= /// 段区域大小调整 +>>>>>>> remotes/origin/main VOID OsVmPhysAreaSizeAdjust(size_t size) { /* @@ -139,25 +192,50 @@ VOID OsVmPhysAreaSizeAdjust(size_t size) g_physArea[0].size -= size; } +<<<<<<< HEAD +======= /// 获得物理内存的总页数 +>>>>>>> remotes/origin/main UINT32 OsVmPhysPageNumGet(VOID) { UINT32 nPages = 0; INT32 i; for (i = 0; i < (sizeof(g_physArea) / sizeof(g_physArea[0])); i++) { +<<<<<<< HEAD + nPages += g_physArea[i].size >> PAGE_SHIFT; + } + + return nPages; +} + +======= nPages += g_physArea[i].size >> PAGE_SHIFT;//右移12位,相当于除以4K, 计算出总页数 } return nPages;//返回所有物理内存总页数 } /// 初始化空闲链表,分配物理页框使用伙伴算法 +>>>>>>> remotes/origin/main STATIC INLINE VOID OsVmPhysFreeListInit(struct VmPhysSeg *seg) { int i; UINT32 intSave; struct VmFreeList *list = NULL; +<<<<<<< HEAD + LOS_SpinInit(&seg->freeListLock); + + LOS_SpinLockSave(&seg->freeListLock, &intSave); + for (i = 0; i < VM_LIST_ORDER_MAX; i++) { + list = &seg->freeList[i]; + LOS_ListInit(&list->node); + list->listCnt = 0; + } + LOS_SpinUnlockRestore(&seg->freeListLock, intSave); +} + +======= LOS_SpinInit(&seg->freeListLock);//初始化用于分配的自旋锁 LOS_SpinLockSave(&seg->freeListLock, &intSave); @@ -169,6 +247,7 @@ STATIC INLINE VOID OsVmPhysFreeListInit(struct VmPhysSeg *seg) LOS_SpinUnlockRestore(&seg->freeListLock, intSave); } /// 物理段初始化 +>>>>>>> remotes/origin/main VOID OsVmPhysInit(VOID) { struct VmPhysSeg *seg = NULL; @@ -177,6 +256,15 @@ VOID OsVmPhysInit(VOID) for (i = 0; i < g_vmPhysSegNum; i++) { seg = &g_vmPhysSeg[i]; +<<<<<<< HEAD + seg->pageBase = &g_vmPageArray[nPages]; + nPages += seg->size >> PAGE_SHIFT; + OsVmPhysFreeListInit(seg); + OsVmPhysLruInit(seg); + } +} + +======= seg->pageBase = &g_vmPageArray[nPages];//记录本段首页物理页框地址 nPages += seg->size >> PAGE_SHIFT;//偏移12位,按4K一页,算出本段总页数 OsVmPhysFreeListInit(seg); //初始化空闲链表,分配页框使用伙伴算法 @@ -184,6 +272,7 @@ VOID OsVmPhysInit(VOID) } } /// 将页框挂入空闲链表,分配物理页框从空闲链表里拿 +>>>>>>> remotes/origin/main STATIC VOID OsVmPhysFreeListAddUnsafe(LosVmPage *page, UINT8 order) { struct VmPhysSeg *seg = NULL; @@ -200,12 +289,29 @@ STATIC VOID OsVmPhysFreeListAddUnsafe(LosVmPage *page, UINT8 order) LOS_ListTailInsert(&list->node, &page->node); list->listCnt++; } +<<<<<<< HEAD + +======= ///将物理页框从空闲链表上摘除,见于物理页框被分配的情况 +>>>>>>> remotes/origin/main STATIC VOID OsVmPhysFreeListDelUnsafe(LosVmPage *page) { struct VmPhysSeg *seg = NULL; struct VmFreeList *list = NULL; +<<<<<<< HEAD + if ((page->segID >= VM_PHYS_SEG_MAX) || (page->order >= VM_LIST_ORDER_MAX)) { + LOS_Panic("The page segment id(%u) or order(%u) is invalid\n", page->segID, page->order); + } + + seg = &g_vmPhysSeg[page->segID]; + list = &seg->freeList[page->order]; + list->listCnt--; + LOS_ListDelete(&page->node); + page->order = VM_LIST_ORDER_MAX; +} + +======= if ((page->segID >= VM_PHYS_SEG_MAX) || (page->order >= VM_LIST_ORDER_MAX)) {//等于VM_LIST_ORDER_MAX也不行,说明伙伴算法最大支持 2^8的分配 LOS_Panic("The page segment id(%u) or order(%u) is invalid\n", page->segID, page->order); } @@ -225,11 +331,22 @@ STATIC VOID OsVmPhysFreeListDelUnsafe(LosVmPage *page) * @param newOrder 却找到个 2^8肉块 * @return STATIC */ +>>>>>>> remotes/origin/main STATIC VOID OsVmPhysPagesSpiltUnsafe(LosVmPage *page, UINT8 oldOrder, UINT8 newOrder) { UINT32 order; LosVmPage *buddyPage = NULL; +<<<<<<< HEAD + for (order = newOrder; order > oldOrder;) { + order--; + buddyPage = &page[VM_ORDER_TO_PAGES(order)]; + LOS_ASSERT(buddyPage->order == VM_LIST_ORDER_MAX); + OsVmPhysFreeListAddUnsafe(buddyPage, order); + } +} + +======= for (order = newOrder; order > oldOrder;) {//把肉剁碎的过程,把多余的肉块切成2^7,2^6...标准块, order--;//越切越小,逐一挂到对应的空闲链表上 buddyPage = &page[VM_ORDER_TO_PAGES(order)];//@note_good 先把多余的肉割出来,这句代码很赞!因为LosVmPage本身是在一个大数组上,page[nPages]可直接定位 @@ -238,6 +355,7 @@ STATIC VOID OsVmPhysPagesSpiltUnsafe(LosVmPage *page, UINT8 oldOrder, UINT8 newO } } ///通过物理地址获取所属参数段的物理页框 +>>>>>>> remotes/origin/main LosVmPage *OsVmPhysToPage(paddr_t pa, UINT8 segID) { struct VmPhysSeg *seg = NULL; @@ -251,8 +369,13 @@ LosVmPage *OsVmPhysToPage(paddr_t pa, UINT8 segID) return NULL; } +<<<<<<< HEAD + offset = pa - seg->start; + return (seg->pageBase + (offset >> PAGE_SHIFT)); +======= offset = pa - seg->start;//得到物理地址的偏移量 return (seg->pageBase + (offset >> PAGE_SHIFT));//得到对应的物理页框 +>>>>>>> remotes/origin/main } LosVmPage *OsVmPaddrToPage(paddr_t paddr) @@ -268,6 +391,27 @@ LosVmPage *OsVmPaddrToPage(paddr_t paddr) } return NULL; } +<<<<<<< HEAD + +VOID *OsVmPageToVaddr(LosVmPage *page) +{ + VADDR_T vaddr; + vaddr = KERNEL_ASPACE_BASE + page->physAddr - SYS_MEM_BASE; + + return (VOID *)(UINTPTR)vaddr; +} + +LosVmPage *OsVmVaddrToPage(VOID *ptr) +{ + struct VmPhysSeg *seg = NULL; + PADDR_T pa = LOS_PaddrQuery(ptr); + UINT32 segID; + + for (segID = 0; segID < g_vmPhysSegNum; segID++) { + seg = &g_vmPhysSeg[segID]; + if ((pa >= seg->start) && (pa < (seg->start + seg->size))) { + return seg->pageBase + ((pa - seg->start) >> PAGE_SHIFT); +======= /*! * @brief 通过page获取内核空间的虚拟地址 参考OsArchMmuInit \n #define SYS_MEM_BASE DDR_MEM_ADDR /* physical memory base 物理地址的起始地址 * / @@ -293,12 +437,17 @@ LosVmPage *OsVmVaddrToPage(VOID *ptr) seg = &g_vmPhysSeg[segID]; if ((pa >= seg->start) && (pa < (seg->start + seg->size))) {//找到物理地址所在的段 return seg->pageBase + ((pa - seg->start) >> PAGE_SHIFT);//段基地址+页偏移索引 得到虚拟地址经映射所在物理页框 +>>>>>>> remotes/origin/main } } return NULL; } +<<<<<<< HEAD + +======= /// 回收一定范围内的页框 +>>>>>>> remotes/origin/main STATIC INLINE VOID OsVmRecycleExtraPages(LosVmPage *page, size_t startPage, size_t endPage) { if (startPage >= endPage) { @@ -307,7 +456,11 @@ STATIC INLINE VOID OsVmRecycleExtraPages(LosVmPage *page, size_t startPage, size OsVmPhysPagesFreeContiguous(page, endPage - startPage); } +<<<<<<< HEAD + +======= /// 大块的物理内存分配 +>>>>>>> remotes/origin/main STATIC LosVmPage *OsVmPhysLargeAlloc(struct VmPhysSeg *seg, size_t nPages) { struct VmFreeList *list = NULL; @@ -317,11 +470,19 @@ STATIC LosVmPage *OsVmPhysLargeAlloc(struct VmPhysSeg *seg, size_t nPages) PADDR_T paEnd; size_t size = nPages << PAGE_SHIFT; +<<<<<<< HEAD + list = &seg->freeList[VM_LIST_ORDER_MAX - 1]; + LOS_DL_LIST_FOR_EACH_ENTRY(page, &list->node, LosVmPage, node) { + paStart = page->physAddr; + paEnd = paStart + size; + if (paEnd > (seg->start + seg->size)) { +======= list = &seg->freeList[VM_LIST_ORDER_MAX - 1];//先找伙伴算法中内存块最大的开撸 LOS_DL_LIST_FOR_EACH_ENTRY(page, &list->node, LosVmPage, node) {//遍历链表 paStart = page->physAddr; paEnd = paStart + size; if (paEnd > (seg->start + seg->size)) {//匹配物理地址范围 +>>>>>>> remotes/origin/main continue; } @@ -343,7 +504,11 @@ STATIC LosVmPage *OsVmPhysLargeAlloc(struct VmPhysSeg *seg, size_t nPages) return NULL; } +<<<<<<< HEAD + +======= /// 申请物理页并挂在对应的链表上 +>>>>>>> remotes/origin/main STATIC LosVmPage *OsVmPhysPagesAlloc(struct VmPhysSeg *seg, size_t nPages) { struct VmFreeList *list = NULL; @@ -353,6 +518,15 @@ STATIC LosVmPage *OsVmPhysPagesAlloc(struct VmPhysSeg *seg, size_t nPages) UINT32 newOrder; order = OsVmPagesToOrder(nPages); +<<<<<<< HEAD + if (order < VM_LIST_ORDER_MAX) { + for (newOrder = order; newOrder < VM_LIST_ORDER_MAX; newOrder++) { + list = &seg->freeList[newOrder]; + if (LOS_ListEmpty(&list->node)) { + continue; + } + page = LOS_DL_LIST_ENTRY(LOS_DL_LIST_FIRST(&list->node), LosVmPage, node); +======= if (order < VM_LIST_ORDER_MAX) {//按正常的伙伴算法分配 for (newOrder = order; newOrder < VM_LIST_ORDER_MAX; newOrder++) {//从小往大了撸 list = &seg->freeList[newOrder]; @@ -360,6 +534,7 @@ STATIC LosVmPage *OsVmPhysPagesAlloc(struct VmPhysSeg *seg, size_t nPages) continue;//继续往大的找 } page = LOS_DL_LIST_ENTRY(LOS_DL_LIST_FIRST(&list->node), LosVmPage, node);//找到了直接返回第一个节点 +>>>>>>> remotes/origin/main goto DONE; } } else { @@ -380,7 +555,11 @@ DONE: return page; } +<<<<<<< HEAD + +======= /// 释放物理页框,所谓释放物理页就是把页挂到空闲链表中 +>>>>>>> remotes/origin/main VOID OsVmPhysPagesFree(LosVmPage *page, UINT8 order) { paddr_t pa; @@ -390,6 +569,17 @@ VOID OsVmPhysPagesFree(LosVmPage *page, UINT8 order) return; } +<<<<<<< HEAD + if (order < VM_LIST_ORDER_MAX - 1) { + pa = VM_PAGE_TO_PHYS(page); + do { + pa ^= VM_ORDER_TO_PHYS(order); + buddyPage = OsVmPhysToPage(pa, page->segID); + if ((buddyPage == NULL) || (buddyPage->order != order)) { + break; + } + OsVmPhysFreeListDelUnsafe(buddyPage); +======= if (order < VM_LIST_ORDER_MAX - 1) {//order[0,7] pa = VM_PAGE_TO_PHYS(page);//获取物理地址 do {//按位异或 @@ -399,21 +589,51 @@ VOID OsVmPhysPagesFree(LosVmPage *page, UINT8 order) break; } OsVmPhysFreeListDelUnsafe(buddyPage);//注意buddypage是连续的物理页框 例如order=2时,2^2=4页就是一个块组 |_|_|_|_| +>>>>>>> remotes/origin/main order++; pa &= ~(VM_ORDER_TO_PHYS(order) - 1); page = OsVmPhysToPage(pa, page->segID); } while (order < VM_LIST_ORDER_MAX - 1); } +<<<<<<< HEAD + OsVmPhysFreeListAddUnsafe(page, order); +} + +======= OsVmPhysFreeListAddUnsafe(page, order);//伙伴算法 空闲节点增加 } ///连续的释放物理页框, 如果8页连在一块是一起释放的 +>>>>>>> remotes/origin/main VOID OsVmPhysPagesFreeContiguous(LosVmPage *page, size_t nPages) { paddr_t pa; UINT32 order; size_t n; +<<<<<<< HEAD + while (TRUE) { + pa = VM_PAGE_TO_PHYS(page); + order = VM_PHYS_TO_ORDER(pa); + n = VM_ORDER_TO_PAGES(order); + if (n > nPages) { + break; + } + OsVmPhysPagesFree(page, order); + nPages -= n; + page += n; + } + + while (nPages > 0) { + order = LOS_HighBitGet(nPages); + n = VM_ORDER_TO_PAGES(order); + OsVmPhysPagesFree(page, order); + nPages -= n; + page += n; + } +} + +======= while (TRUE) {//死循环 pa = VM_PAGE_TO_PHYS(page);//获取页面物理地址 order = VM_PHYS_TO_ORDER(pa);//通过物理地址找到伙伴算法的级别 @@ -443,6 +663,7 @@ VOID OsVmPhysPagesFreeContiguous(LosVmPage *page, size_t nPages) * * @see */ +>>>>>>> remotes/origin/main STATIC LosVmPage *OsVmPhysPagesGet(size_t nPages) { UINT32 intSave; @@ -453,11 +674,19 @@ STATIC LosVmPage *OsVmPhysPagesGet(size_t nPages) for (segID = 0; segID < g_vmPhysSegNum; segID++) { seg = &g_vmPhysSeg[segID]; LOS_SpinLockSave(&seg->freeListLock, &intSave); +<<<<<<< HEAD + page = OsVmPhysPagesAlloc(seg, nPages); + if (page != NULL) { + /* the first page of continuous physical addresses holds refCounts */ + LOS_AtomicSet(&page->refCounts, 0); + page->nPages = nPages; +======= page = OsVmPhysPagesAlloc(seg, nPages);//分配指定页数的物理页,nPages需小于伙伴算法一次能分配的最大页数 if (page != NULL) {//分配成功 /* */ LOS_AtomicSet(&page->refCounts, 0);//设置引用次数为0 page->nPages = nPages;//页数 +>>>>>>> remotes/origin/main LOS_SpinUnlockRestore(&seg->freeListLock, intSave); return page; } @@ -465,7 +694,11 @@ STATIC LosVmPage *OsVmPhysPagesGet(size_t nPages) } return NULL; } +<<<<<<< HEAD + +======= ///分配连续的物理页 +>>>>>>> remotes/origin/main VOID *LOS_PhysPagesAllocContiguous(size_t nPages) { LosVmPage *page = NULL; @@ -473,15 +706,26 @@ VOID *LOS_PhysPagesAllocContiguous(size_t nPages) if (nPages == 0) { return NULL; } +<<<<<<< HEAD + + page = OsVmPhysPagesGet(nPages); +======= //鸿蒙 nPages 不能大于 2^8 次方,即256个页,1M内存,仅限于内核态,用户态不限制分配大小. page = OsVmPhysPagesGet(nPages);//通过伙伴算法获取物理上连续的页 +>>>>>>> remotes/origin/main if (page == NULL) { return NULL; } +<<<<<<< HEAD + return OsVmPageToVaddr(page); +} + +======= return OsVmPageToVaddr(page);//通过物理页找虚拟地址 } /// 释放指定页数地址连续的物理内存 +>>>>>>> remotes/origin/main VOID LOS_PhysPagesFreeContiguous(VOID *ptr, size_t nPages) { UINT32 intSave; @@ -492,17 +736,29 @@ VOID LOS_PhysPagesFreeContiguous(VOID *ptr, size_t nPages) return; } +<<<<<<< HEAD + page = OsVmVaddrToPage(ptr); +======= page = OsVmVaddrToPage(ptr);//通过虚拟地址找到页框 +>>>>>>> remotes/origin/main if (page == NULL) { VM_ERR("vm page of ptr(%#x) is null", ptr); return; } +<<<<<<< HEAD + page->nPages = 0; +======= page->nPages = 0;//被分配的页数置为0,表示不被分配 +>>>>>>> remotes/origin/main seg = &g_vmPhysSeg[page->segID]; LOS_SpinLockSave(&seg->freeListLock, &intSave); +<<<<<<< HEAD + OsVmPhysPagesFreeContiguous(page, nPages); +======= OsVmPhysPagesFreeContiguous(page, nPages);//具体释放实现 +>>>>>>> remotes/origin/main LOS_SpinUnlockRestore(&seg->freeListLock, intSave); #ifdef LOSCFG_KERNEL_PLIMITS @@ -517,7 +773,11 @@ PADDR_T OsKVaddrToPaddr(VADDR_T kvaddr) } return (kvaddr - KERNEL_ASPACE_BASE + SYS_MEM_BASE); } +<<<<<<< HEAD + +======= /// 通过物理地址获取内核虚拟地址 +>>>>>>> remotes/origin/main VADDR_T *LOS_PaddrToKVaddr(PADDR_T paddr) { struct VmPhysSeg *seg = NULL; @@ -533,10 +793,17 @@ VADDR_T *LOS_PaddrToKVaddr(PADDR_T paddr) return (VADDR_T *)(UINTPTR)(paddr - SYS_MEM_BASE + KERNEL_ASPACE_BASE); } } +<<<<<<< HEAD + + return (VADDR_T *)(UINTPTR)(paddr - SYS_MEM_BASE + KERNEL_ASPACE_BASE); +} + +======= //内核 return (VADDR_T *)(UINTPTR)(paddr - SYS_MEM_BASE + KERNEL_ASPACE_BASE);// } ///释放一个物理页框 +>>>>>>> remotes/origin/main VOID LOS_PhysPageFree(LosVmPage *page) { UINT32 intSave; @@ -546,12 +813,21 @@ VOID LOS_PhysPageFree(LosVmPage *page) return; } +<<<<<<< HEAD + if (LOS_AtomicDecRet(&page->refCounts) <= 0) { + seg = &g_vmPhysSeg[page->segID]; + LOS_SpinLockSave(&seg->freeListLock, &intSave); + + OsVmPhysPagesFreeContiguous(page, ONE_PAGE); + LOS_AtomicSet(&page->refCounts, 0); +======= if (LOS_AtomicDecRet(&page->refCounts) <= 0) {//减少引用数后不能小于0 seg = &g_vmPhysSeg[page->segID]; LOS_SpinLockSave(&seg->freeListLock, &intSave); OsVmPhysPagesFreeContiguous(page, ONE_PAGE);//释放一页 LOS_AtomicSet(&page->refCounts, 0);//只要物理内存被释放了,引用数就必须得重置为 0 +>>>>>>> remotes/origin/main LOS_SpinUnlockRestore(&seg->freeListLock, intSave); } @@ -559,6 +835,14 @@ VOID LOS_PhysPageFree(LosVmPage *page) OsMemLimitMemFree(PAGE_SIZE); #endif } +<<<<<<< HEAD + +LosVmPage *LOS_PhysPageAlloc(VOID) +{ + return OsVmPhysPagesGet(ONE_PAGE); +} + +======= /// 申请一个物理页 LosVmPage *LOS_PhysPageAlloc(VOID) { @@ -575,6 +859,7 @@ LosVmPage *LOS_PhysPageAlloc(VOID) * * @see */ +>>>>>>> remotes/origin/main size_t LOS_PhysPagesAlloc(size_t nPages, LOS_DL_LIST *list) { LosVmPage *page = NULL; @@ -585,17 +870,29 @@ size_t LOS_PhysPagesAlloc(size_t nPages, LOS_DL_LIST *list) } while (nPages--) { +<<<<<<< HEAD + page = OsVmPhysPagesGet(ONE_PAGE); + if (page == NULL) { + break; + } + LOS_ListTailInsert(list, &page->node); +======= page = OsVmPhysPagesGet(ONE_PAGE);//一页一页分配,由伙伴算法分配 if (page == NULL) { break; } LOS_ListTailInsert(list, &page->node);//从参数链表list尾部挂入新页面结点 +>>>>>>> remotes/origin/main count++; } return count; } +<<<<<<< HEAD + +======= ///拷贝共享页面 +>>>>>>> remotes/origin/main VOID OsPhysSharePageCopy(PADDR_T oldPaddr, PADDR_T *newPaddr, LosVmPage *newPage) { UINT32 intSave; @@ -609,12 +906,35 @@ VOID OsPhysSharePageCopy(PADDR_T oldPaddr, PADDR_T *newPaddr, LosVmPage *newPage return; } +<<<<<<< HEAD + oldPage = LOS_VmPageGet(oldPaddr); +======= oldPage = LOS_VmPageGet(oldPaddr);//由物理地址得到页框 +>>>>>>> remotes/origin/main if (oldPage == NULL) { VM_ERR("invalid oldPaddr %p", oldPaddr); return; } +<<<<<<< HEAD + seg = &g_vmPhysSeg[oldPage->segID]; + LOS_SpinLockSave(&seg->freeListLock, &intSave); + if (LOS_AtomicRead(&oldPage->refCounts) == 1) { + *newPaddr = oldPaddr; + } else { + newMem = LOS_PaddrToKVaddr(*newPaddr); + oldMem = LOS_PaddrToKVaddr(oldPaddr); + if ((newMem == NULL) || (oldMem == NULL)) { + LOS_SpinUnlockRestore(&seg->freeListLock, intSave); + return; + } + if (memcpy_s(newMem, PAGE_SIZE, oldMem, PAGE_SIZE) != EOK) { + VM_ERR("memcpy_s failed"); + } + + LOS_AtomicInc(&newPage->refCounts); + LOS_AtomicDec(&oldPage->refCounts); +======= seg = &g_vmPhysSeg[oldPage->segID];//拿到物理段 LOS_SpinLockSave(&seg->freeListLock, &intSave); if (LOS_AtomicRead(&oldPage->refCounts) == 1) {//页面引用次数仅一次,说明只有一个进程在操作 @@ -632,20 +952,31 @@ VOID OsPhysSharePageCopy(PADDR_T oldPaddr, PADDR_T *newPaddr, LosVmPage *newPage LOS_AtomicInc(&newPage->refCounts);//新页引用次数以原子方式自动减量 LOS_AtomicDec(&oldPage->refCounts);//老页引用次数以原子方式自动减量 +>>>>>>> remotes/origin/main } LOS_SpinUnlockRestore(&seg->freeListLock, intSave); return; } +<<<<<<< HEAD + +======= ///获取物理页框所在段 +>>>>>>> remotes/origin/main struct VmPhysSeg *OsVmPhysSegGet(LosVmPage *page) { if ((page == NULL) || (page->segID >= VM_PHYS_SEG_MAX)) { return NULL; } +<<<<<<< HEAD + return (OsGVmPhysSegGet() + page->segID); +} + +======= return (OsGVmPhysSegGet() + page->segID);//等用于OsGVmPhysSegGet()[page->segID] } ///获取参数nPages对应的块组,例如 7 -> 2^3 返回 3 +>>>>>>> remotes/origin/main UINT32 OsVmPagesToOrder(size_t nPages) { UINT32 order; @@ -654,7 +985,11 @@ UINT32 OsVmPagesToOrder(size_t nPages) return order; } +<<<<<<< HEAD + +======= ///释放双链表中的所有节点内存,本质是回归到伙伴orderlist中 +>>>>>>> remotes/origin/main size_t LOS_PhysPagesFree(LOS_DL_LIST *list) { UINT32 intSave; @@ -667,6 +1002,18 @@ size_t LOS_PhysPagesFree(LOS_DL_LIST *list) return 0; } +<<<<<<< HEAD + LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(page, nPage, list, LosVmPage, node) { + LOS_ListDelete(&page->node); + if (LOS_AtomicDecRet(&page->refCounts) <= 0) { + seg = &g_vmPhysSeg[page->segID]; + LOS_SpinLockSave(&seg->freeListLock, &intSave); + OsVmPhysPagesFreeContiguous(page, ONE_PAGE); + LOS_AtomicSet(&page->refCounts, 0); + LOS_SpinUnlockRestore(&seg->freeListLock, intSave); + } + count++; +======= LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(page, nPage, list, LosVmPage, node) {//宏循环 LOS_ListDelete(&page->node);//先把自己摘出去 if (LOS_AtomicDecRet(&page->refCounts) <= 0) {//无引用 @@ -677,6 +1024,7 @@ size_t LOS_PhysPagesFree(LOS_DL_LIST *list) LOS_SpinUnlockRestore(&seg->freeListLock, intSave);//恢复锁 } count++;//继续取下一个node +>>>>>>> remotes/origin/main } return count; @@ -691,3 +1039,7 @@ VADDR_T *LOS_PaddrToKVaddr(PADDR_T paddr) return (VADDR_T *)DMA_TO_VMM_ADDR(paddr); } #endif +<<<<<<< HEAD + +======= +>>>>>>> remotes/origin/main diff --git a/src/kernel_liteos_a/kernel/base/vm/los_vm_scan.c b/src/kernel_liteos_a/kernel/base/vm/los_vm_scan.c index 01624eac..7c79155d 100644 --- a/src/kernel_liteos_a/kernel/base/vm/los_vm_scan.c +++ b/src/kernel_liteos_a/kernel/base/vm/los_vm_scan.c @@ -37,10 +37,13 @@ #ifdef LOSCFG_KERNEL_VM /* unmap a lru page by map record info caller need lru lock */ +<<<<<<< HEAD +======= /************************************************************************************************** 解除文件页和进程(mmu)的映射关系 参数info记录了进程的MMU **************************************************************************************************/ +>>>>>>> remotes/origin/main VOID OsUnmapPageLocked(LosFilePage *page, LosMapInfo *info) { if (page == NULL || info == NULL) { @@ -51,21 +54,101 @@ VOID OsUnmapPageLocked(LosFilePage *page, LosMapInfo *info) LOS_ListDelete(&info->node); LOS_AtomicDec(&page->vmPage->refCounts); LOS_ArchMmuUnmap(info->archMmu, info->vaddr, 1); +<<<<<<< HEAD + LOS_MemFree(m_aucSysMem0, info); +} + +======= LOS_MemFree(m_aucSysMem0, info);//释放虚拟 } ///解除文件页在所有进程的映射 +>>>>>>> remotes/origin/main VOID OsUnmapAllLocked(LosFilePage *page) { LosMapInfo *info = NULL; LosMapInfo *next = NULL; LOS_DL_LIST *immap = &page->i_mmap; +<<<<<<< HEAD + + LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(info, next, immap, LosMapInfo, node) { +======= LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(info, next, immap, LosMapInfo, node) {//遍历 immap->info 链表 +>>>>>>> remotes/origin/main OsUnmapPageLocked(page, info); } } /* add a new lru node to lru list, lruType can be file or anon */ +<<<<<<< HEAD +VOID OsLruCacheAdd(LosFilePage *fpage, enum OsLruList lruType) +{ + UINT32 intSave; + LosVmPhysSeg *physSeg = fpage->physSeg; + LosVmPage *page = fpage->vmPage; + + LOS_SpinLockSave(&physSeg->lruLock, &intSave); + OsSetPageActive(page); + OsCleanPageReferenced(page); + physSeg->lruSize[lruType]++; + LOS_ListTailInsert(&physSeg->lruList[lruType], &fpage->lru); + + LOS_SpinUnlockRestore(&physSeg->lruLock, intSave); +} + +/* delete a lru node, caller need hold lru_lock */ +VOID OsLruCacheDel(LosFilePage *fpage) +{ + LosVmPhysSeg *physSeg = fpage->physSeg; + int type = OsIsPageActive(fpage->vmPage) ? VM_LRU_ACTIVE_FILE : VM_LRU_INACTIVE_FILE; + + physSeg->lruSize[type]--; + LOS_ListDelete(&fpage->lru); +} + +BOOL OsInactiveListIsLow(LosVmPhysSeg *physSeg) +{ + return (physSeg->lruSize[VM_LRU_ACTIVE_FILE] > + physSeg->lruSize[VM_LRU_INACTIVE_FILE]) ? TRUE : FALSE; +} + +/* move a page from inactive list to active list head */ +STATIC INLINE VOID OsMoveToActiveList(LosFilePage *fpage) +{ + LosVmPhysSeg *physSeg = fpage->physSeg; + + physSeg->lruSize[VM_LRU_ACTIVE_FILE]++; + physSeg->lruSize[VM_LRU_INACTIVE_FILE]--; + LOS_ListDelete(&fpage->lru); + LOS_ListTailInsert(&physSeg->lruList[VM_LRU_ACTIVE_FILE], &fpage->lru); +} + +/* move a page from active list to inactive list head */ +STATIC INLINE VOID OsMoveToInactiveList(LosFilePage *fpage) +{ + LosVmPhysSeg *physSeg = fpage->physSeg; + + physSeg->lruSize[VM_LRU_ACTIVE_FILE]--; + physSeg->lruSize[VM_LRU_INACTIVE_FILE]++; + LOS_ListDelete(&fpage->lru); + LOS_ListTailInsert(&physSeg->lruList[VM_LRU_INACTIVE_FILE], &fpage->lru); +} + +/* move a page to the most active pos in lru list(active head) */ +STATIC INLINE VOID OsMoveToActiveHead(LosFilePage *fpage) +{ + LosVmPhysSeg *physSeg = fpage->physSeg; + LOS_ListDelete(&fpage->lru); + LOS_ListTailInsert(&physSeg->lruList[VM_LRU_ACTIVE_FILE], &fpage->lru); +} + +/* move a page to the most active pos in lru list(inactive head) */ +STATIC INLINE VOID OsMoveToInactiveHead(LosFilePage *fpage) +{ + LosVmPhysSeg *physSeg = fpage->physSeg; + LOS_ListDelete(&fpage->lru); + LOS_ListTailInsert(&physSeg->lruList[VM_LRU_INACTIVE_FILE], &fpage->lru); +======= VOID OsLruCacheAdd(LosFilePage *fpage, enum OsLruList lruType)//在lru列表中添加一个新的lru节点,lruType可以是文件或匿名 { UINT32 intSave; @@ -133,6 +216,7 @@ STATIC INLINE VOID OsMoveToInactiveHead(LosFilePage *fpage)//鸿蒙会从inactiv LosVmPhysSeg *physSeg = fpage->physSeg; //得到页面对应段 LOS_ListDelete(&fpage->lru); //将自己从lru链表中摘出来 LOS_ListTailInsert(&physSeg->lruList[VM_LRU_INACTIVE_FILE], &fpage->lru);//加入不活动页双循环链表中 +>>>>>>> remotes/origin/main } /* page referced add: (call by page cache get) @@ -142,7 +226,11 @@ ref:0, act:0 --> ref:1, act:0 ref:1, act:0 --> ref:0, act:1 ref:0, act:1 --> ref:1, act:1 */ +<<<<<<< HEAD +VOID OsPageRefIncLocked(LosFilePage *fpage) +======= VOID OsPageRefIncLocked(LosFilePage *fpage)// ref ,act 标签转换功能 +>>>>>>> remotes/origin/main { BOOL isOrgActive; UINT32 intSave; @@ -152,6 +240,18 @@ VOID OsPageRefIncLocked(LosFilePage *fpage)// ref ,act 标签转换功能 return; } +<<<<<<< HEAD + LOS_SpinLockSave(&fpage->physSeg->lruLock, &intSave); + + page = fpage->vmPage; + isOrgActive = OsIsPageActive(page); + + if (OsIsPageReferenced(page) && !OsIsPageActive(page)) { + OsCleanPageReferenced(page); + OsSetPageActive(page); + } else if (!OsIsPageReferenced(page)) { + OsSetPageReferenced(page); +======= LOS_SpinLockSave(&fpage->physSeg->lruLock, &intSave);//要处理lruList,先拿锁 page = fpage->vmPage;//拿到物理页框 @@ -162,6 +262,7 @@ VOID OsPageRefIncLocked(LosFilePage *fpage)// ref ,act 标签转换功能 OsSetPageActive(page); //贴上活动标签 } else if (!OsIsPageReferenced(page)) { OsSetPageReferenced(page);//ref:0, act:0 --> ref:1, act:0 +>>>>>>> remotes/origin/main } if (!isOrgActive && OsIsPageActive(page)) { @@ -179,14 +280,22 @@ VOID OsPageRefIncLocked(LosFilePage *fpage)// ref ,act 标签转换功能 LOS_SpinUnlockRestore(&fpage->physSeg->lruLock, intSave); } +<<<<<<< HEAD +/* page referced dec: (call by shrinker) +======= /* page referced dec: (call by thrinker) +>>>>>>> remotes/origin/main ----------inactive----------|----------active------------ [ref:0,act:0], [ref:1,act:0]|[ref:0,act:1], [ref:1,act:1] ref:1, act:1 --> ref:0, act:1 ref:0, act:1 --> ref:1, act:0 ref:1, act:0 --> ref:0, act:0 */ +<<<<<<< HEAD +VOID OsPageRefDecNoLock(LosFilePage *fpage) +======= VOID OsPageRefDecNoLock(LosFilePage *fpage) // ref ,act 标签转换功能 +>>>>>>> remotes/origin/main { BOOL isOrgActive; LosVmPage *page = NULL; @@ -198,7 +307,11 @@ VOID OsPageRefDecNoLock(LosFilePage *fpage) // ref ,act 标签转换功能 page = fpage->vmPage; isOrgActive = OsIsPageActive(page); +<<<<<<< HEAD + if (!OsIsPageReferenced(page) && OsIsPageActive(page)) { +======= if (!OsIsPageReferenced(page) && OsIsPageActive(page)) {//[ref:0,act:1]的情况 +>>>>>>> remotes/origin/main OsCleanPageActive(page); OsSetPageReferenced(page); } else if (OsIsPageReferenced(page)) { @@ -209,13 +322,38 @@ VOID OsPageRefDecNoLock(LosFilePage *fpage) // ref ,act 标签转换功能 OsMoveToInactiveList(fpage); } } +<<<<<<< HEAD + +======= ///缩小活动页链表 +>>>>>>> remotes/origin/main VOID OsShrinkActiveList(LosVmPhysSeg *physSeg, int nScan) { LosFilePage *fpage = NULL; LosFilePage *fnext = NULL; LOS_DL_LIST *activeFile = &physSeg->lruList[VM_LRU_ACTIVE_FILE]; +<<<<<<< HEAD + LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, activeFile, LosFilePage, lru) { + if (LOS_SpinTrylock(&fpage->mapping->list_lock) != LOS_OK) { + continue; + } + + /* happened when caller hold cache lock and try reclaim this page */ + if (OsIsPageLocked(fpage->vmPage)) { + LOS_SpinUnlock(&fpage->mapping->list_lock); + continue; + } + + if (OsIsPageMapped(fpage) && (fpage->flags & VM_MAP_REGION_FLAG_PERM_EXECUTE)) { + LOS_SpinUnlock(&fpage->mapping->list_lock); + continue; + } + + OsPageRefDecNoLock(fpage); + + LOS_SpinUnlock(&fpage->mapping->list_lock); +======= LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, activeFile, LosFilePage, lru) {//一页一页处理 if (LOS_SpinTrylock(&fpage->mapping->list_lock) != LOS_OK) {//尝试获取文件页所在的page_mapping锁 continue;//接着处理下一文件页 @@ -235,13 +373,18 @@ VOID OsShrinkActiveList(LosVmPhysSeg *physSeg, int nScan) OsPageRefDecNoLock(fpage); //将页面移到未活动文件链表 LOS_SpinUnlock(&fpage->mapping->list_lock); //释放page_mapping锁. +>>>>>>> remotes/origin/main if (--nScan <= 0) { break; } } } +<<<<<<< HEAD + +======= ///缩小未活动页链表 +>>>>>>> remotes/origin/main int OsShrinkInactiveList(LosVmPhysSeg *physSeg, int nScan, LOS_DL_LIST *list) { UINT32 nrReclaimed = 0; @@ -252,6 +395,38 @@ int OsShrinkInactiveList(LosVmPhysSeg *physSeg, int nScan, LOS_DL_LIST *list) LosFilePage *ftemp = NULL; LOS_DL_LIST *inactive_file = &physSeg->lruList[VM_LRU_INACTIVE_FILE]; +<<<<<<< HEAD + LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, inactive_file, LosFilePage, lru) { + flock = &fpage->mapping->list_lock; + + if (LOS_SpinTrylock(flock) != LOS_OK) { + continue; + } + + page = fpage->vmPage; + if (OsIsPageLocked(page)) { + LOS_SpinUnlock(flock); + continue; + } + + if (OsIsPageMapped(fpage) && (OsIsPageDirty(page) || (fpage->flags & VM_MAP_REGION_FLAG_PERM_EXECUTE))) { + LOS_SpinUnlock(flock); + continue; + } + + if (OsIsPageDirty(page)) { + ftemp = OsDumpDirtyPage(fpage); + if (ftemp != NULL) { + LOS_ListTailInsert(list, &ftemp->node); + } + } + + OsDeletePageCacheLru(fpage); + LOS_SpinUnlock(flock); + nrReclaimed++; + + if (--nScan <= 0) { +======= LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, inactive_file, LosFilePage, lru) {//遍历链表一页一页处理 flock = &fpage->mapping->list_lock; @@ -282,6 +457,7 @@ int OsShrinkInactiveList(LosVmPhysSeg *physSeg, int nScan, LOS_DL_LIST *list) nrReclaimed++;//成功回收了一页 if (--nScan <= 0) {//继续回收 +>>>>>>> remotes/origin/main break; } } @@ -290,25 +466,62 @@ int OsShrinkInactiveList(LosVmPhysSeg *physSeg, int nScan, LOS_DL_LIST *list) } #ifdef LOSCFG_FS_VFS +<<<<<<< HEAD +int OsTryShrinkMemory(size_t nPage) +======= int OsTryShrinkMemory(size_t nPage)//尝试收缩文件页 +>>>>>>> remotes/origin/main { UINT32 intSave; size_t totalPages; size_t nReclaimed = 0; LosVmPhysSeg *physSeg = NULL; UINT32 index; +<<<<<<< HEAD + LOS_DL_LIST_HEAD(dirtyList); +======= LOS_DL_LIST_HEAD(dirtyList);//初始化脏页链表,上面将挂所有脏页用于同步到磁盘后回收 +>>>>>>> remotes/origin/main LosFilePage *fpage = NULL; LosFilePage *fnext = NULL; if (nPage == 0) { +<<<<<<< HEAD + nPage = VM_FILEMAP_MIN_SCAN; +======= nPage = VM_FILEMAP_MIN_SCAN;// +>>>>>>> remotes/origin/main } if (nPage > VM_FILEMAP_MAX_SCAN) { nPage = VM_FILEMAP_MAX_SCAN; } +<<<<<<< HEAD + for (index = 0; index < g_vmPhysSegNum; index++) { + physSeg = &g_vmPhysSeg[index]; + LOS_SpinLockSave(&physSeg->lruLock, &intSave); + totalPages = physSeg->lruSize[VM_LRU_ACTIVE_FILE] + physSeg->lruSize[VM_LRU_INACTIVE_FILE]; + if (totalPages < VM_FILEMAP_MIN_SCAN) { + LOS_SpinUnlockRestore(&physSeg->lruLock, intSave); + continue; + } + + if (OsInactiveListIsLow(physSeg)) { + OsShrinkActiveList(physSeg, (nPage < VM_FILEMAP_MIN_SCAN) ? VM_FILEMAP_MIN_SCAN : nPage); + } + + nReclaimed += OsShrinkInactiveList(physSeg, nPage, &dirtyList); + LOS_SpinUnlockRestore(&physSeg->lruLock, intSave); + + if (nReclaimed >= nPage) { + break; + } + } + + LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, &dirtyList, LosFilePage, node) { + OsDoFlushDirtyPage(fpage); +======= for (index = 0; index < g_vmPhysSegNum; index++) {//遍历整个物理段组 physSeg = &g_vmPhysSeg[index];//一段段来 LOS_SpinLockSave(&physSeg->lruLock, &intSave); @@ -332,6 +545,7 @@ int OsTryShrinkMemory(size_t nPage)//尝试收缩文件页 LOS_DL_LIST_FOR_EACH_ENTRY_SAFE(fpage, fnext, &dirtyList, LosFilePage, node) {//遍历处理脏页数据 OsDoFlushDirtyPage(fpage);//冲洗脏页数据,将脏页数据回写磁盘 +>>>>>>> remotes/origin/main } return nReclaimed; @@ -344,4 +558,8 @@ int OsTryShrinkMemory(size_t nPage) #endif #endif -#endif \ No newline at end of file +<<<<<<< HEAD +#endif +======= +#endif +>>>>>>> remotes/origin/main diff --git a/src/kernel_liteos_a/kernel/base/vm/los_vm_syscall.c b/src/kernel_liteos_a/kernel/base/vm/los_vm_syscall.c index 72bdd10b..b0ea89e0 100644 --- a/src/kernel_liteos_a/kernel/base/vm/los_vm_syscall.c +++ b/src/kernel_liteos_a/kernel/base/vm/los_vm_syscall.c @@ -65,7 +65,11 @@ STATUS_T OsCheckMMapParams(VADDR_T *vaddr, unsigned long flags, size_t len, unsi return -EINVAL; } +<<<<<<< HEAD + if ((flags & MAP_SUPPORT_MASK) == 0) { +======= if ((flags & MAP_SUPPORT_MASK) == 0) {//映射权限限制 +>>>>>>> remotes/origin/main return -EINVAL; } if (((flags & MAP_SHARED_PRIVATE) == 0) || ((flags & MAP_SHARED_PRIVATE) == MAP_SHARED_PRIVATE)) { @@ -95,13 +99,19 @@ STATUS_T OsNamedMmapingPermCheck(struct file *filep, unsigned long flags, unsign return LOS_OK; } +<<<<<<< HEAD + +======= ///匿名映射 +>>>>>>> remotes/origin/main STATUS_T OsAnonMMap(LosVmMapRegion *region) { LOS_SetRegionTypeAnon(region); return LOS_OK; } +<<<<<<< HEAD +======= /** mmap基础概念: 一种内存映射文件的方法,即将一个文件或者其它对象映射到进程的地址空间,实现文件磁盘地址和进程虚拟地址空间中一段虚拟地址的一一对映关系. @@ -136,13 +146,19 @@ STATUS_T OsAnonMMap(LosVmMapRegion *region) 成功返回:虚拟内存地址,这地址是页对齐。 失败返回:(void *)-1。 */ +>>>>>>> remotes/origin/main VADDR_T LOS_MMap(VADDR_T vaddr, size_t len, unsigned prot, unsigned long flags, int fd, unsigned long pgoff) { STATUS_T status; VADDR_T resultVaddr; UINT32 regionFlags; +<<<<<<< HEAD + LosVmMapRegion *newRegion = NULL; + struct file *filep = NULL; +======= LosVmMapRegion *newRegion = NULL;//应用的内存分配对应到内核就是分配一个线性区 struct file *filep = NULL;// inode : file = 1:N ,一对多关系,一个inode可以被多个进程打开,返回不同的file但都指向同一个inode +>>>>>>> remotes/origin/main LosVmSpace *vmSpace = OsCurrProcessGet()->vmSpace; len = ROUNDUP(len, PAGE_SIZE); @@ -150,9 +166,15 @@ VADDR_T LOS_MMap(VADDR_T vaddr, size_t len, unsigned prot, unsigned long flags, if (checkRst != LOS_OK) { return checkRst; } +<<<<<<< HEAD + + if (LOS_IsNamedMapping(flags)) { + status = fs_getfilep(fd, &filep); +======= if (LOS_IsNamedMapping(flags)) {//是否文件映射 status = fs_getfilep(fd, &filep);//获取文件描述符和状态 +>>>>>>> remotes/origin/main if (status < 0) { return -EBADF; } @@ -165,6 +187,32 @@ VADDR_T LOS_MMap(VADDR_T vaddr, size_t len, unsigned prot, unsigned long flags, (VOID)LOS_MuxAcquire(&vmSpace->regionMux); /* user mode calls mmap to release heap physical memory without releasing heap virtual space */ +<<<<<<< HEAD + status = OsUserHeapFree(vmSpace, vaddr, len); + if (status == LOS_OK) { + resultVaddr = vaddr; + goto MMAP_DONE; + } + + regionFlags = OsCvtProtFlagsToRegionFlags(prot, flags); + newRegion = LOS_RegionAlloc(vmSpace, vaddr, len, regionFlags, pgoff); + if (newRegion == NULL) { + resultVaddr = (VADDR_T)-ENOMEM; + goto MMAP_DONE; + } + newRegion->regionFlags |= VM_MAP_REGION_FLAG_MMAP; + resultVaddr = newRegion->range.base; + + if (LOS_IsNamedMapping(flags)) { + status = OsNamedMMap(filep, newRegion); + } else { + status = OsAnonMMap(newRegion); + } + + if (status != LOS_OK) { + LOS_RbDelNode(&vmSpace->regionRbTree, &newRegion->rbNode); + LOS_RegionFree(vmSpace, newRegion); +======= status = OsUserHeapFree(vmSpace, vaddr, len);//用户模式释放堆物理内存而不释放堆虚拟空间 if (status == LOS_OK) {//OsUserHeapFree 干两件事 1.解除映射关系 2.释放物理页 resultVaddr = vaddr; @@ -189,6 +237,7 @@ VADDR_T LOS_MMap(VADDR_T vaddr, size_t len, unsigned prot, unsigned long flags, if (status != LOS_OK) { LOS_RbDelNode(&vmSpace->regionRbTree, &newRegion->rbNode);//从红黑树和双循环链表中删除 LOS_RegionFree(vmSpace, newRegion);//释放 +>>>>>>> remotes/origin/main resultVaddr = (VADDR_T)-ENOMEM; goto MMAP_DONE; } @@ -197,7 +246,11 @@ MMAP_DONE: (VOID)LOS_MuxRelease(&vmSpace->regionMux); return resultVaddr; } +<<<<<<< HEAD + +======= ///解除映射关系 +>>>>>>> remotes/origin/main STATUS_T LOS_UnMMap(VADDR_T addr, size_t size) { if ((addr <= 0) || (size == 0)) { @@ -206,6 +259,10 @@ STATUS_T LOS_UnMMap(VADDR_T addr, size_t size) return OsUnMMap(OsCurrProcessGet()->vmSpace, addr, size); } +<<<<<<< HEAD + +======= +>>>>>>> remotes/origin/main STATIC INLINE BOOL OsProtMprotectPermCheck(unsigned long prot, LosVmMapRegion *region) { UINT32 protFlags = 0; @@ -219,11 +276,26 @@ STATIC INLINE BOOL OsProtMprotectPermCheck(unsigned long prot, LosVmMapRegion *r return ((protFlags & permFlags) == protFlags); } +<<<<<<< HEAD + +======= /// 收缩堆区 +>>>>>>> remotes/origin/main VOID *OsShrinkHeap(VOID *addr, LosVmSpace *space) { VADDR_T newBrk, oldBrk; +<<<<<<< HEAD + newBrk = LOS_Align((VADDR_T)(UINTPTR)addr, PAGE_SIZE); + oldBrk = LOS_Align(space->heapNow, PAGE_SIZE); + if (LOS_UnMMap(newBrk, (oldBrk - newBrk)) < 0) { + return (void *)(UINTPTR)space->heapNow; + } + space->heapNow = (VADDR_T)(UINTPTR)addr; + return addr; +} + +======= newBrk = LOS_Align((VADDR_T)(UINTPTR)addr, PAGE_SIZE);//新堆顶 oldBrk = LOS_Align(space->heapNow, PAGE_SIZE);//旧堆顶 if (LOS_UnMMap(newBrk, (oldBrk - newBrk)) < 0) {//解除相差区的映射 @@ -241,6 +313,7 @@ VOID *OsShrinkHeap(VOID *addr, LosVmSpace *space) 由此出现了断断续续的线性区,内核回收线性区时会检测是否和周边的线性区可合并成一个更大 的线性区用于分配。 */ +>>>>>>> remotes/origin/main VOID *LOS_DoBrk(VOID *addr) { LosVmSpace *space = OsCurrProcessGet()->vmSpace; @@ -250,6 +323,24 @@ VOID *LOS_DoBrk(VOID *addr) VOID *alignAddr = NULL; VOID *shrinkAddr = NULL; +<<<<<<< HEAD + if (addr == NULL) { + return (void *)(UINTPTR)space->heapNow; + } + + if ((UINTPTR)addr < (UINTPTR)space->heapBase) { + return (VOID *)-ENOMEM; + } + + size = (UINTPTR)addr - (UINTPTR)space->heapBase; + size = ROUNDUP(size, PAGE_SIZE); + alignAddr = (CHAR *)(UINTPTR)(space->heapBase) + size; + PRINT_INFO("brk addr %p , size 0x%x, alignAddr %p, align %d\n", addr, size, alignAddr, PAGE_SIZE); + + (VOID)LOS_MuxAcquire(&space->regionMux); + if (addr < (VOID *)(UINTPTR)space->heapNow) { + shrinkAddr = OsShrinkHeap(addr, space); +======= if (addr == NULL) {//参数地址未传情况 return (void *)(UINTPTR)space->heapNow;//以现有指向地址为基础进行扩展 } @@ -266,25 +357,46 @@ VOID *LOS_DoBrk(VOID *addr) (VOID)LOS_MuxAcquire(&space->regionMux); if (addr < (VOID *)(UINTPTR)space->heapNow) {//如果地址小于堆区现地址 shrinkAddr = OsShrinkHeap(addr, space);//收缩堆区 +>>>>>>> remotes/origin/main (VOID)LOS_MuxRelease(&space->regionMux); return shrinkAddr; } +<<<<<<< HEAD + if ((UINTPTR)alignAddr >= space->mapBase) { + VM_ERR("Process heap memory space is insufficient"); +======= if ((UINTPTR)alignAddr >= space->mapBase) {//参数地址 大于映射区地址 VM_ERR("Process heap memory space is insufficient");//进程堆空间不足 +>>>>>>> remotes/origin/main ret = (VOID *)-ENOMEM; goto REGION_ALLOC_FAILED; } +<<<<<<< HEAD + if (space->heapBase == space->heapNow) { + region = LOS_RegionAlloc(space, space->heapBase, size, + VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE | +======= if (space->heapBase == space->heapNow) {//往往是第一次调用本函数才会出现,因为初始化时 heapBase = heapNow region = LOS_RegionAlloc(space, space->heapBase, size,//分配一个可读/可写/可使用的线性区,只需分配一次 VM_MAP_REGION_FLAG_PERM_READ | VM_MAP_REGION_FLAG_PERM_WRITE |//线性区的大小由range.size决定 +>>>>>>> remotes/origin/main VM_MAP_REGION_FLAG_FIXED | VM_MAP_REGION_FLAG_PERM_USER, 0); if (region == NULL) { ret = (VOID *)-ENOMEM; VM_ERR("LOS_RegionAlloc failed"); goto REGION_ALLOC_FAILED; } +<<<<<<< HEAD + region->regionFlags |= VM_MAP_REGION_FLAG_HEAP; + space->heap = region; + } + + space->heapNow = (VADDR_T)(UINTPTR)alignAddr; + space->heap->range.size = size; + ret = (VOID *)(UINTPTR)space->heapNow; +======= region->regionFlags |= VM_MAP_REGION_FLAG_HEAP;//贴上线性区类型为堆区的标签,注意一个线性区可以有多种标签 space->heap = region;//指定线性区为堆区 } @@ -292,18 +404,28 @@ VOID *LOS_DoBrk(VOID *addr) space->heapNow = (VADDR_T)(UINTPTR)alignAddr;//更新堆区顶部位置 space->heap->range.size = size; //更新堆区大小,经此操作线性区变大或缩小了 ret = (VOID *)(UINTPTR)space->heapNow;//返回堆顶 +>>>>>>> remotes/origin/main REGION_ALLOC_FAILED: (VOID)LOS_MuxRelease(&space->regionMux); return ret; } +<<<<<<< HEAD + +======= /// 继承老线性区的标签 +>>>>>>> remotes/origin/main STATIC UINT32 OsInheritOldRegionName(UINT32 oldRegionFlags) { UINT32 vmFlags = 0; +<<<<<<< HEAD + if (oldRegionFlags & VM_MAP_REGION_FLAG_HEAP) { + vmFlags |= VM_MAP_REGION_FLAG_HEAP; +======= if (oldRegionFlags & VM_MAP_REGION_FLAG_HEAP) { //如果是从大堆区中申请的 vmFlags |= VM_MAP_REGION_FLAG_HEAP; //线性区则贴上堆区标签 +>>>>>>> remotes/origin/main } else if (oldRegionFlags & VM_MAP_REGION_FLAG_STACK) { vmFlags |= VM_MAP_REGION_FLAG_STACK; } else if (oldRegionFlags & VM_MAP_REGION_FLAG_TEXT) { @@ -318,7 +440,11 @@ STATIC UINT32 OsInheritOldRegionName(UINT32 oldRegionFlags) return vmFlags; } +<<<<<<< HEAD + +======= ///修改内存段的访问权限 +>>>>>>> remotes/origin/main INT32 LOS_DoMprotect(VADDR_T vaddr, size_t len, unsigned long prot) { LosVmSpace *space = OsCurrProcessGet()->vmSpace; @@ -328,7 +454,11 @@ INT32 LOS_DoMprotect(VADDR_T vaddr, size_t len, unsigned long prot) int ret; (VOID)LOS_MuxAcquire(&space->regionMux); +<<<<<<< HEAD + region = LOS_RegionFind(space, vaddr); +======= region = LOS_RegionFind(space, vaddr);//通过虚拟地址找到线性区 +>>>>>>> remotes/origin/main if (!IS_ALIGNED(vaddr, PAGE_SIZE) || (region == NULL) || (vaddr > vaddr + len)) { ret = -EINVAL; goto OUT_MPROTECT; @@ -338,18 +468,30 @@ INT32 LOS_DoMprotect(VADDR_T vaddr, size_t len, unsigned long prot) ret = -EINVAL; goto OUT_MPROTECT; } +<<<<<<< HEAD + +======= //如果是堆区或VDSO区,说明区内容是不能修改的 +>>>>>>> remotes/origin/main if ((region->regionFlags & VM_MAP_REGION_FLAG_VDSO) || (region->regionFlags & VM_MAP_REGION_FLAG_HEAP)) { ret = -EPERM; goto OUT_MPROTECT; } +<<<<<<< HEAD + +======= //如果是共享文件,说明内容也不能修改 +>>>>>>> remotes/origin/main if (LOS_IsRegionTypeFile(region) && (region->regionFlags & VM_MAP_REGION_FLAG_SHARED)) { if (!OsProtMprotectPermCheck(prot, region)) { ret = -EACCES; goto OUT_MPROTECT; } } +<<<<<<< HEAD + +======= +>>>>>>> remotes/origin/main len = LOS_Align(len, PAGE_SIZE); /* can't operation cross region */ if ((region->range.base + region->range.size) < (vaddr + len)) { @@ -358,11 +500,19 @@ INT32 LOS_DoMprotect(VADDR_T vaddr, size_t len, unsigned long prot) } /* if only move some part of region, we need to split first */ +<<<<<<< HEAD + if (region->range.size > len) { + OsVmRegionAdjust(space, vaddr, len); + } + + vmFlags = OsCvtProtFlagsToRegionFlags(prot, 0); +======= if (region->range.size > len) {//如果只修改部分区域,我们需要先拆分区 OsVmRegionAdjust(space, vaddr, len);//调整下线性区范围 } vmFlags = OsCvtProtFlagsToRegionFlags(prot, 0);//转换FLAGS +>>>>>>> remotes/origin/main vmFlags |= (region->regionFlags & VM_MAP_REGION_FLAG_SHARED) ? VM_MAP_REGION_FLAG_SHARED : 0; vmFlags |= OsInheritOldRegionName(region->regionFlags); region = LOS_RegionFind(space, vaddr); @@ -372,7 +522,11 @@ INT32 LOS_DoMprotect(VADDR_T vaddr, size_t len, unsigned long prot) } region->regionFlags = vmFlags; count = len >> PAGE_SHIFT; +<<<<<<< HEAD + ret = LOS_ArchMmuChangeProt(&space->archMmu, vaddr, count, region->regionFlags); +======= ret = LOS_ArchMmuChangeProt(&space->archMmu, vaddr, count, region->regionFlags);//修改访问权限实体函数 +>>>>>>> remotes/origin/main if (ret) { ret = -ENOMEM; goto OUT_MPROTECT; @@ -427,7 +581,11 @@ STATUS_T OsMremapCheck(VADDR_T addr, size_t oldLen, VADDR_T newAddr, size_t newL } } +<<<<<<< HEAD + /* avoid new region overlapping with the old one */ +======= /* avoid new region overlaping with the old one */ +>>>>>>> remotes/origin/main if (flags & MREMAP_FIXED) { if (((region->range.base + region->range.size) > newAddr) && (region->range.base < (newAddr + newLen))) { @@ -441,7 +599,11 @@ STATUS_T OsMremapCheck(VADDR_T addr, size_t oldLen, VADDR_T newAddr, size_t newL return LOS_OK; } +<<<<<<< HEAD + +======= ///重新映射虚拟内存地址。 +>>>>>>> remotes/origin/main VADDR_T LOS_DoMremap(VADDR_T oldAddress, size_t oldSize, size_t newSize, int flags, VADDR_T newAddr) { LosVmMapRegion *regionOld = NULL; @@ -536,7 +698,11 @@ OUT_MREMAP: (VOID)LOS_MuxRelease(&space->regionMux); return ret; } +<<<<<<< HEAD + +======= ///输出内存线性区 +>>>>>>> remotes/origin/main VOID LOS_DumpMemRegion(VADDR_T vaddr) { LosVmSpace *space = NULL; @@ -546,6 +712,17 @@ VOID LOS_DumpMemRegion(VADDR_T vaddr) return; } +<<<<<<< HEAD + if (LOS_IsRangeInSpace(space, ROUNDDOWN(vaddr, MB), MB) == FALSE) { + return; + } + + OsDumpPte(vaddr); + OsDumpAspace(space); +} +#endif + +======= if (LOS_IsRangeInSpace(space, ROUNDDOWN(vaddr, MB), MB) == FALSE) {//是否在空间范围内 return; } @@ -553,4 +730,5 @@ VOID LOS_DumpMemRegion(VADDR_T vaddr) OsDumpPte(vaddr);//dump L1 L2 OsDumpAspace(space);//dump 空间 } -#endif \ No newline at end of file +#endif +>>>>>>> remotes/origin/main diff --git a/src/kernel_liteos_a/kernel/base/vm/oom.c b/src/kernel_liteos_a/kernel/base/vm/oom.c index 60d80128..192b3276 100644 --- a/src/kernel_liteos_a/kernel/base/vm/oom.c +++ b/src/kernel_liteos_a/kernel/base/vm/oom.c @@ -47,8 +47,13 @@ #ifdef LOSCFG_KERNEL_VM +<<<<<<< HEAD +LITE_OS_SEC_BSS OomCB *g_oomCB = NULL; +static SPIN_LOCK_INIT(g_oomSpinLock); +======= LITE_OS_SEC_BSS OomCB *g_oomCB = NULL; //全局内存溢出控制块 static SPIN_LOCK_INIT(g_oomSpinLock);//内存溢出自旋锁 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR STATIC UINT32 OomScoreProcess(LosProcessCB *candidateProcess) { @@ -57,20 +62,32 @@ LITE_OS_SEC_TEXT_MINOR STATIC UINT32 OomScoreProcess(LosProcessCB *candidateProc #ifndef LOSCFG_KERNEL_SMP (VOID)LOS_MuxAcquire(&candidateProcess->vmSpace->regionMux); #endif +<<<<<<< HEAD + /* we only consider actual physical memory here. */ +======= /* we only consider actual physical memory here. */ //只考虑实际的物理内存 +>>>>>>> remotes/origin/main OsUProcessPmUsage(candidateProcess->vmSpace, NULL, &actualPm); #ifndef LOSCFG_KERNEL_SMP (VOID)LOS_MuxRelease(&candidateProcess->vmSpace->regionMux); #endif return actualPm; } +<<<<<<< HEAD + +======= ///用于设置 g_oomCB->processVictimCB 回调函数 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR STATIC UINT32 OomKillProcess(UINTPTR param) { /* we will not kill process, and do nothing here */ return LOS_OK; } +<<<<<<< HEAD + +======= ///强制收缩内存 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR STATIC UINT32 OomForceShrinkMemory(VOID) { UINT32 i; @@ -80,14 +97,21 @@ LITE_OS_SEC_TEXT_MINOR STATIC UINT32 OomForceShrinkMemory(VOID) * TryShrinkMemory maybe reclaim 0 pages in the first time from active list * to inactive list, and in the second time reclaim memory from inactive list. */ +<<<<<<< HEAD +======= //TryShrinkMemory可能会在第一时间从活动列表中回收0页到非活动列表,并在第二次从非活动列表中回收内存。 +>>>>>>> remotes/origin/main for (i = 0; i < MAX_SHRINK_PAGECACHE_TRY; i++) { reclaimMemPages += OsTryShrinkMemory(0); } return reclaimMemPages; } +<<<<<<< HEAD + +======= ///内存不足时回收页高速缓存 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR STATIC BOOL OomReclaimPageCache(VOID) { UINT32 totalPm = 0; @@ -97,14 +121,38 @@ LITE_OS_SEC_TEXT_MINOR STATIC BOOL OomReclaimPageCache(VOID) UINT32 i; for (i = 0; i < MAX_SHRINK_PAGECACHE_TRY; i++) { +<<<<<<< HEAD + OsVmPhysUsedInfoGet(&usedPm, &totalPm); + isReclaimMemory = ((totalPm - usedPm) << PAGE_SHIFT) < g_oomCB->reclaimMemThreshold; + if (isReclaimMemory) { +======= OsVmPhysUsedInfoGet(&usedPm, &totalPm);//获取总的和已经使用的物理内存数量 isReclaimMemory = ((totalPm - usedPm) << PAGE_SHIFT) < g_oomCB->reclaimMemThreshold;//检查是否过了回收门槛 if (isReclaimMemory) {//要回收了 +>>>>>>> remotes/origin/main /* * we do force memory reclaim from page cache here. * if we get memory, we will reclaim pagecache memory again. * if there is no memory to reclaim, we will return. */ +<<<<<<< HEAD + reclaimMemPages = OomForceShrinkMemory(); + if (reclaimMemPages > 0) { + continue; + } + } + break; + } + + return isReclaimMemory; +} + +/* + * check is low memory or not, if low memory, try to kill process. + * return is kill process or not. + */ +LITE_OS_SEC_TEXT_MINOR BOOL OomCheckProcess(VOID) +======= //在这里强制从页缓存中回收内存, reclaimMemPages = OomForceShrinkMemory();//强制回收内存 if (reclaimMemPages > 0) {//如果得到内存,将再次回收pagecache内存 @@ -122,19 +170,29 @@ LITE_OS_SEC_TEXT_MINOR STATIC BOOL OomReclaimPageCache(VOID) * return is kill process or not. */ LITE_OS_SEC_TEXT_MINOR BOOL OomCheckProcess(VOID)//检查内存是否不足,如果内存不足,请尝试终止进程,返回是否kill进程 +>>>>>>> remotes/origin/main { UINT32 totalPm; UINT32 usedPm; BOOL isLowMemory = FALSE; /* +<<<<<<< HEAD + * spinlock the current core schedule, make sure oom process atomic + * spinlock other place entering OomCheckProcess, make sure oom process mutex +======= * spinlock the current core schedule, make sure oom process atomic //旋转锁定当前核心计划,确保oom进程原子化 * spinlock other place entering OomCheckProcess, make sure oom process mutex //旋转锁定其他进入OomCheckProcess的地方,确保oom进程互斥 +>>>>>>> remotes/origin/main */ LOS_SpinLock(&g_oomSpinLock); /* first we will check if we need to reclaim pagecache memory */ +<<<<<<< HEAD + if (OomReclaimPageCache() == FALSE) { +======= if (OomReclaimPageCache() == FALSE) {// +>>>>>>> remotes/origin/main LOS_SpinUnlock(&g_oomSpinLock); goto NO_VICTIM_PROCESS; } @@ -142,7 +200,13 @@ LITE_OS_SEC_TEXT_MINOR BOOL OomCheckProcess(VOID)//检查内存是否不足, /* get free bytes */ OsVmPhysUsedInfoGet(&usedPm, &totalPm); isLowMemory = ((totalPm - usedPm) << PAGE_SHIFT) < g_oomCB->lowMemThreshold; +<<<<<<< HEAD + + LOS_SpinUnlock(&g_oomSpinLock); + +======= LOS_SpinUnlock(&g_oomSpinLock); +>>>>>>> remotes/origin/main if (isLowMemory) { PRINTK("[oom] OS is in low memory state\n" "total physical memory: %#x(byte), used: %#x(byte)," @@ -155,6 +219,16 @@ NO_VICTIM_PROCESS: return isLowMemory; } +<<<<<<< HEAD +#ifdef LOSCFG_ENABLE_OOM_LOOP_TASK +STATIC VOID OomWriteEvent(VOID) +{ + OsWriteResourceEvent(OS_RESOURCE_EVENT_OOM); +} +#endif + +LITE_OS_SEC_TEXT_MINOR VOID OomInfodump(VOID) +======= #ifdef LOSCFG_ENABLE_OOM_LOOP_TASK //内存溢出监测任务开关 STATIC VOID OomWriteEvent(VOID) // OomTaskInit中创建的定时器回调 { @@ -163,6 +237,7 @@ STATIC VOID OomWriteEvent(VOID) // OomTaskInit中创建的定时器回调 #endif //打印内存不足时的信息 LITE_OS_SEC_TEXT_MINOR VOID OomInfodump(VOID) //打印内存溢出信息 +>>>>>>> remotes/origin/main { PRINTK("[oom] oom loop task status: %s\n" " oom low memory threshold: %#x(byte)\n" @@ -172,7 +247,11 @@ LITE_OS_SEC_TEXT_MINOR VOID OomInfodump(VOID) //打印内存溢出信息 g_oomCB->lowMemThreshold, g_oomCB->reclaimMemThreshold, g_oomCB->checkInterval); } +<<<<<<< HEAD + +======= ///设置低内存门槛 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR VOID OomSetLowMemThreashold(UINT32 lowMemThreshold) { if ((lowMemThreshold > OOM_DEFAULT_LOW_MEM_THRESHOLD_MAX)) { @@ -186,7 +265,11 @@ LITE_OS_SEC_TEXT_MINOR VOID OomSetLowMemThreashold(UINT32 lowMemThreshold) g_oomCB->lowMemThreshold); } } +<<<<<<< HEAD + +======= ///设置回收内存的门槛 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR VOID OomSetReclaimMemThreashold(UINT32 reclaimMemThreshold) { UINT32 totalPm = 0; @@ -204,7 +287,11 @@ LITE_OS_SEC_TEXT_MINOR VOID OomSetReclaimMemThreashold(UINT32 reclaimMemThreshol g_oomCB->reclaimMemThreshold); } } +<<<<<<< HEAD + +======= ///设置监控间隔 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR VOID OomSetCheckInterval(UINT32 checkInterval) { if ((checkInterval >= OOM_CHECK_MIN) && (checkInterval <= OOM_CHECK_MAX)) { @@ -216,7 +303,11 @@ LITE_OS_SEC_TEXT_MINOR VOID OomSetCheckInterval(UINT32 checkInterval) g_oomCB->checkInterval, OOM_CHECK_MIN, OOM_CHECK_MAX); } } +<<<<<<< HEAD + +======= ///内存不足监控任务初始化, OOM 通过开一个软件定时器来检查内存的使用情况 +>>>>>>> remotes/origin/main LITE_OS_SEC_TEXT_MINOR UINT32 OomTaskInit(VOID) { g_oomCB = (OomCB *)LOS_MemAlloc(m_aucSysMem0, sizeof(OomCB)); @@ -225,6 +316,19 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OomTaskInit(VOID) return LOS_NOK; } +<<<<<<< HEAD + g_oomCB->lowMemThreshold = OOM_DEFAULT_LOW_MEM_THRESHOLD; + g_oomCB->reclaimMemThreshold = OOM_DEFAULT_RECLAIM_MEM_THRESHOLD; + g_oomCB->checkInterval = OOM_DEFAULT_CHECK_INTERVAL; + g_oomCB->processVictimCB = (OomFn)OomKillProcess; + g_oomCB->scoreCB = (OomFn)OomScoreProcess; + g_oomCB->enabled = FALSE; + +#ifdef LOSCFG_ENABLE_OOM_LOOP_TASK + g_oomCB->enabled = TRUE; + UINT32 ret = LOS_SwtmrCreate(g_oomCB->checkInterval, LOS_SWTMR_MODE_PERIOD, (SWTMR_PROC_FUNC)OomWriteEvent, + &g_oomCB->swtmrID, (UINTPTR)g_oomCB); +======= g_oomCB->lowMemThreshold = OOM_DEFAULT_LOW_MEM_THRESHOLD; //运行任务的门槛 g_oomCB->reclaimMemThreshold = OOM_DEFAULT_RECLAIM_MEM_THRESHOLD; //回收内存的门槛 g_oomCB->checkInterval = OOM_DEFAULT_CHECK_INTERVAL; //检测时间间隔 1S @@ -236,16 +340,28 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OomTaskInit(VOID) g_oomCB->enabled = TRUE; UINT32 ret = LOS_SwtmrCreate(g_oomCB->checkInterval, LOS_SWTMR_MODE_PERIOD, (SWTMR_PROC_FUNC)OomWriteEvent, &g_oomCB->swtmrID, (UINTPTR)g_oomCB);//创建检测定时器 +>>>>>>> remotes/origin/main if (ret != LOS_OK) { return ret; } +<<<<<<< HEAD + return LOS_SwtmrStart(g_oomCB->swtmrID); +======= return LOS_SwtmrStart(g_oomCB->swtmrID);//启动定时器 +>>>>>>> remotes/origin/main #else return LOS_OK; #endif } +<<<<<<< HEAD +LOS_MODULE_INIT(OomTaskInit, LOS_INIT_LEVEL_KMOD_TASK); + +#endif + +======= LOS_MODULE_INIT(OomTaskInit, LOS_INIT_LEVEL_KMOD_TASK);//初始化内存监控模块 -#endif \ No newline at end of file +#endif +>>>>>>> remotes/origin/main diff --git a/src/kernel_liteos_a/kernel/base/vm/shm.c b/src/kernel_liteos_a/kernel/base/vm/shm.c index a61cb0f4..6582d99f 100644 --- a/src/kernel_liteos_a/kernel/base/vm/shm.c +++ b/src/kernel_liteos_a/kernel/base/vm/shm.c @@ -1,3 +1,5 @@ +<<<<<<< HEAD +======= /*! 什么是共享内存 顾名思义,共享内存就是允许两个不相关的进程访问同一个物理内存。共享内存是在两个正在运行的进程之间 @@ -12,6 +14,7 @@ 共享线性区可以由任意的进程创建,每个使用共享线性区都必须经过映射. */ +>>>>>>> remotes/origin/main /* * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved. @@ -69,9 +72,15 @@ #ifdef LOSCFG_KERNEL_SHM +<<<<<<< HEAD +#define SHM_SEG_FREE 0x2000 +#define SHM_SEG_USED 0x4000 +#define SHM_SEG_REMOVE 0x8000 +======= #define SHM_SEG_FREE 0x2000 //空闲未使用 #define SHM_SEG_USED 0x4000 //已使用 #define SHM_SEG_REMOVE 0x8000 //删除 +>>>>>>> remotes/origin/main #ifndef SHM_M #define SHM_M 010000 @@ -80,17 +89,34 @@ #ifndef SHM_X #define SHM_X 0100 #endif +<<<<<<< HEAD + +#ifndef ACCESSPERMS +#define ACCESSPERMS (S_IRWXU | S_IRWXG | S_IRWXO) +#endif +======= #ifndef ACCESSPERMS #define ACCESSPERMS (S_IRWXU | S_IRWXG | S_IRWXO)//文件权限值意思就是 用户,用户组,其他可读可写. #endif //代表含义U:user G:group O:other +>>>>>>> remotes/origin/main #define SHM_S_IRUGO (S_IRUSR | S_IRGRP | S_IROTH) #define SHM_S_IWUGO (S_IWUSR | S_IWGRP | S_IWOTH) #define SHM_S_IXUGO (S_IXUSR | S_IXGRP | S_IXOTH) +<<<<<<< HEAD + +#define SHM_GROUPE_TO_USER 3 +#define SHM_OTHER_TO_USER 6 + +#ifndef LOSCFG_IPC_CONTAINER +STATIC LosMux g_sysvShmMux; + +======= #define SHM_GROUPE_TO_USER 3 #define SHM_OTHER_TO_USER 6 #ifndef LOSCFG_IPC_CONTAINER STATIC LosMux g_sysvShmMux; +>>>>>>> remotes/origin/main /* private data */ STATIC struct shminfo g_shmInfo; STATIC struct shmIDSource *g_shmSegs = NULL; @@ -106,6 +132,8 @@ STATIC UINT32 g_shmUsedPageCount; #define SYSV_SHM_LOCK() (VOID)LOS_MuxLock(&IPC_SHM_SYS_VSHM_MUTEX, LOS_WAIT_FOREVER) #define SYSV_SHM_UNLOCK() (VOID)LOS_MuxUnlock(&IPC_SHM_SYS_VSHM_MUTEX) +<<<<<<< HEAD +======= #if 0 // @note_#if0 //内核为每一个IPC对象保存一个ipc_perm结构体,该结构说明了IPC对象的权限和所有者 @@ -160,6 +188,7 @@ STATIC UINT32 g_shmUsedPageCount; #endif //共享内存初始化 +>>>>>>> remotes/origin/main struct shmIDSource *OsShmCBInit(LosMux *sysvShmMux, struct shminfo *shmInfo, UINT32 *shmUsedPageCount) { UINT32 ret; @@ -168,6 +197,10 @@ struct shmIDSource *OsShmCBInit(LosMux *sysvShmMux, struct shminfo *shmInfo, UIN if ((sysvShmMux == NULL) || (shmInfo == NULL) || (shmUsedPageCount == NULL)) { return NULL; } +<<<<<<< HEAD + +======= +>>>>>>> remotes/origin/main ret = LOS_MuxInit(sysvShmMux, NULL); if (ret != LOS_OK) { goto ERROR; @@ -178,6 +211,10 @@ struct shmIDSource *OsShmCBInit(LosMux *sysvShmMux, struct shminfo *shmInfo, UIN shmInfo->shmmni = SHM_MNI; shmInfo->shmseg = SHM_SEG; shmInfo->shmall = SHM_ALL; +<<<<<<< HEAD + +======= +>>>>>>> remotes/origin/main struct shmIDSource *shmSegs = LOS_MemAlloc((VOID *)OS_SYS_MEM_ADDR, sizeof(struct shmIDSource) * shmInfo->shmmni); if (shmSegs == NULL) { (VOID)LOS_MuxDestroy(sysvShmMux); @@ -187,9 +224,15 @@ struct shmIDSource *OsShmCBInit(LosMux *sysvShmMux, struct shminfo *shmInfo, UIN 0, (sizeof(struct shmIDSource) * shmInfo->shmmni)); for (i = 0; i < shmInfo->shmmni; i++) { +<<<<<<< HEAD + shmSegs[i].status = SHM_SEG_FREE; + shmSegs[i].ds.shm_perm.seq = i + 1; + LOS_ListInit(&shmSegs[i].node); +======= shmSegs[i].status = SHM_SEG_FREE;//节点初始状态为空闲 shmSegs[i].ds.shm_perm.seq = i + 1;//struct ipc_perm shm_perm;系统为每一个IPC对象保存一个ipc_perm结构体,结构说明了IPC对象的权限和所有者 LOS_ListInit(&shmSegs[i].node);//初始化节点 +>>>>>>> remotes/origin/main } *shmUsedPageCount = 0; @@ -199,6 +242,10 @@ ERROR: VM_ERR("ShmInit fail\n"); return NULL; } +<<<<<<< HEAD + +======= +>>>>>>> remotes/origin/main UINT32 ShmInit(VOID) { #ifndef LOSCFG_IPC_CONTAINER @@ -210,9 +257,14 @@ UINT32 ShmInit(VOID) return LOS_OK; } +<<<<<<< HEAD +LOS_MODULE_INIT(ShmInit, LOS_INIT_LEVEL_VM_COMPLETE); + +======= LOS_MODULE_INIT(ShmInit, LOS_INIT_LEVEL_VM_COMPLETE);//共享内存模块初始化 //共享内存反初始化 +>>>>>>> remotes/origin/main UINT32 ShmDeinit(VOID) { UINT32 ret; @@ -227,7 +279,11 @@ UINT32 ShmDeinit(VOID) return 0; } +<<<<<<< HEAD + +======= ///给共享段中所有物理页框贴上共享标签 +>>>>>>> remotes/origin/main STATIC inline VOID ShmSetSharedFlag(struct shmIDSource *seg) { LosVmPage *page = NULL; @@ -236,7 +292,11 @@ STATIC inline VOID ShmSetSharedFlag(struct shmIDSource *seg) OsSetPageShared(page); } } +<<<<<<< HEAD + +======= ///给共享段中所有物理页框撕掉共享标签 +>>>>>>> remotes/origin/main STATIC inline VOID ShmClearSharedFlag(struct shmIDSource *seg) { LosVmPage *page = NULL; @@ -245,7 +305,11 @@ STATIC inline VOID ShmClearSharedFlag(struct shmIDSource *seg) OsCleanPageShared(page); } } +<<<<<<< HEAD + +======= ///seg下所有共享页引用减少 +>>>>>>> remotes/origin/main STATIC VOID ShmPagesRefDec(struct shmIDSource *seg) { LosVmPage *page = NULL; @@ -255,6 +319,8 @@ STATIC VOID ShmPagesRefDec(struct shmIDSource *seg) } } +<<<<<<< HEAD +======= /** * @brief 为共享段分配物理内存 例如:参数size = 4097, LOS_Align(size, PAGE_SIZE) = 8192 @@ -264,6 +330,7 @@ STATIC VOID ShmPagesRefDec(struct shmIDSource *seg) * @param shmflg * @return STATIC */ +>>>>>>> remotes/origin/main STATIC INT32 ShmAllocSegCheck(key_t key, size_t *size, INT32 *segNum) { INT32 i; @@ -272,7 +339,11 @@ STATIC INT32 ShmAllocSegCheck(key_t key, size_t *size, INT32 *segNum) return -EINVAL; } +<<<<<<< HEAD + *size = LOS_Align(*size, PAGE_SIZE); +======= *size = LOS_Align(*size, PAGE_SIZE);//必须对齐 +>>>>>>> remotes/origin/main if ((IPC_SHM_USED_PAGE_COUNT + (*size >> PAGE_SHIFT)) > IPC_SHM_INFO.shmall) { return -ENOMEM; } @@ -282,10 +353,18 @@ STATIC INT32 ShmAllocSegCheck(key_t key, size_t *size, INT32 *segNum) return -ENOMEM; } #endif +<<<<<<< HEAD + + for (i = 0; i < IPC_SHM_INFO.shmmni; i++) { + if (IPC_SHM_SEGS[i].status & SHM_SEG_FREE) { + IPC_SHM_SEGS[i].status &= ~SHM_SEG_FREE; + *segNum = i; +======= for (i = 0; i < IPC_SHM_INFO.shmmni; i++) {//试图找到一个空闲段与参数key绑定 if (IPC_SHM_SEGS[i].status & SHM_SEG_FREE) {//找到空闲段 IPC_SHM_SEGS[i].status &= ~SHM_SEG_FREE;//变成非空闲状态 *segNum = i;//标号 +>>>>>>> remotes/origin/main break; } } @@ -306,16 +385,45 @@ STATIC INT32 ShmAllocSeg(key_t key, size_t size, INT32 shmflg) if (ret < 0) { return ret; } +<<<<<<< HEAD + + seg = &IPC_SHM_SEGS[segNum]; + count = LOS_PhysPagesAlloc(size >> PAGE_SHIFT, &seg->node); + if (count != (size >> PAGE_SHIFT)) { + (VOID)LOS_PhysPagesFree(&seg->node); + seg->status = SHM_SEG_FREE; +======= seg = &IPC_SHM_SEGS[segNum]; count = LOS_PhysPagesAlloc(size >> PAGE_SHIFT, &seg->node);//分配共享页面,函数内部把node都挂好了. if (count != (size >> PAGE_SHIFT)) {//当未分配到足够的内存时,处理方式是:不稀罕给那么点,舍弃! (VOID)LOS_PhysPagesFree(&seg->node);//释放节点上的物理页框 seg->status = SHM_SEG_FREE;//共享段变回空闲状态 +>>>>>>> remotes/origin/main #ifdef LOSCFG_KERNEL_IPC_PLIMIT OsIPCLimitShmFree(size); #endif return -ENOMEM; } +<<<<<<< HEAD + + ShmSetSharedFlag(seg); + IPC_SHM_USED_PAGE_COUNT += size >> PAGE_SHIFT; + + seg->status |= SHM_SEG_USED; + seg->ds.shm_perm.mode = (UINT32)shmflg & ACCESSPERMS; + seg->ds.shm_perm.key = key; + seg->ds.shm_segsz = size; + seg->ds.shm_perm.cuid = LOS_GetUserID(); + seg->ds.shm_perm.uid = LOS_GetUserID(); + seg->ds.shm_perm.cgid = LOS_GetGroupID(); + seg->ds.shm_perm.gid = LOS_GetGroupID(); + seg->ds.shm_lpid = 0; + seg->ds.shm_nattch = 0; + seg->ds.shm_cpid = LOS_GetCurrProcessID(); + seg->ds.shm_atime = 0; + seg->ds.shm_dtime = 0; + seg->ds.shm_ctime = time(NULL); +======= ShmSetSharedFlag(seg);//将node的每个页面设置为共享页 IPC_SHM_USED_PAGE_COUNT += size >> PAGE_SHIFT; @@ -333,20 +441,31 @@ STATIC INT32 ShmAllocSeg(key_t key, size_t size, INT32 shmflg) seg->ds.shm_atime = 0; //访问时间 seg->ds.shm_dtime = 0; //detach 分离时间 共享内存使用完之后,需要将它从进程地址空间中分离出来;将共享内存分离并不是删除它,只是使该共享内存对当前的进程不再可用 seg->ds.shm_ctime = time(NULL);//创建时间 +>>>>>>> remotes/origin/main #ifdef LOSCFG_SHELL (VOID)memcpy_s(seg->ownerName, OS_PCB_NAME_LEN, OsCurrProcessGet()->processName, OS_PCB_NAME_LEN); #endif return segNum; } +<<<<<<< HEAD + +======= ///释放seg->node 所占物理页框,seg本身重置 +>>>>>>> remotes/origin/main STATIC INLINE VOID ShmFreeSeg(struct shmIDSource *seg, UINT32 *shmUsedPageCount) { UINT32 count; +<<<<<<< HEAD + ShmClearSharedFlag(seg); + count = LOS_PhysPagesFree(&seg->node); + if (count != (seg->ds.shm_segsz >> PAGE_SHIFT)) { +======= ShmClearSharedFlag(seg);//先撕掉 seg->node 中vmpage的共享标签 count = LOS_PhysPagesFree(&seg->node);//再挨个删除物理页框 if (count != (seg->ds.shm_segsz >> PAGE_SHIFT)) {//异常,必须要一样 +>>>>>>> remotes/origin/main VM_ERR("free physical pages failed, count = %d, size = %d", count, seg->ds.shm_segsz >> PAGE_SHIFT); return; } @@ -356,31 +475,54 @@ STATIC INLINE VOID ShmFreeSeg(struct shmIDSource *seg, UINT32 *shmUsedPageCount) if (shmUsedPageCount != NULL) { (*shmUsedPageCount) -= seg->ds.shm_segsz >> PAGE_SHIFT; } +<<<<<<< HEAD + seg->status = SHM_SEG_FREE; + LOS_ListInit(&seg->node); +} + +======= seg->status = SHM_SEG_FREE;//seg恢复自由之身 LOS_ListInit(&seg->node);//重置node } ///通过key查找 shmId +>>>>>>> remotes/origin/main STATIC INT32 ShmFindSegByKey(key_t key) { INT32 i; struct shmIDSource *seg = NULL; +<<<<<<< HEAD + for (i = 0; i < IPC_SHM_INFO.shmmni; i++) { + seg = &IPC_SHM_SEGS[i]; + if ((seg->status & SHM_SEG_USED) && + (seg->ds.shm_perm.key == key)) { +======= for (i = 0; i < IPC_SHM_INFO.shmmni; i++) {//遍历共享段池,找到与key绑定的共享ID seg = &IPC_SHM_SEGS[i]; if ((seg->status & SHM_SEG_USED) && (seg->ds.shm_perm.key == key)) {//满足两个条件,找到后返回 +>>>>>>> remotes/origin/main return i; } } return -1; } +<<<<<<< HEAD + +STATIC INT32 ShmSegValidCheck(INT32 segNum, size_t size, INT32 shmFlg) +{ + struct shmIDSource *seg = &IPC_SHM_SEGS[segNum]; + + if (size > seg->ds.shm_segsz) { +======= ///共享内存段有效性检查 STATIC INT32 ShmSegValidCheck(INT32 segNum, size_t size, INT32 shmFlg) { struct shmIDSource *seg = &IPC_SHM_SEGS[segNum];//拿到shmID if (size > seg->ds.shm_segsz) {//段长 +>>>>>>> remotes/origin/main return -EINVAL; } @@ -391,7 +533,11 @@ STATIC INT32 ShmSegValidCheck(INT32 segNum, size_t size, INT32 shmFlg) return segNum; } +<<<<<<< HEAD + +======= ///通过ID找到共享内存资源 +>>>>>>> remotes/origin/main STATIC struct shmIDSource *ShmFindSeg(int shmid) { struct shmIDSource *seg = NULL; @@ -409,7 +555,11 @@ STATIC struct shmIDSource *ShmFindSeg(int shmid) return seg; } +<<<<<<< HEAD + +======= ///共享内存映射 +>>>>>>> remotes/origin/main STATIC VOID ShmVmmMapping(LosVmSpace *space, LOS_DL_LIST *pageList, VADDR_T vaddr, UINT32 regionFlags) { LosVmPage *vmPage = NULL; @@ -417,29 +567,53 @@ STATIC VOID ShmVmmMapping(LosVmSpace *space, LOS_DL_LIST *pageList, VADDR_T vadd PADDR_T pa; STATUS_T ret; +<<<<<<< HEAD + LOS_DL_LIST_FOR_EACH_ENTRY(vmPage, pageList, LosVmPage, node) { + pa = VM_PAGE_TO_PHYS(vmPage); + LOS_AtomicInc(&vmPage->refCounts); + ret = LOS_ArchMmuMap(&space->archMmu, va, pa, 1, regionFlags); +======= LOS_DL_LIST_FOR_EACH_ENTRY(vmPage, pageList, LosVmPage, node) {//遍历一页一页映射 pa = VM_PAGE_TO_PHYS(vmPage);//拿到物理地址 LOS_AtomicInc(&vmPage->refCounts);//自增 ret = LOS_ArchMmuMap(&space->archMmu, va, pa, 1, regionFlags);//虚实映射 +>>>>>>> remotes/origin/main if (ret != 1) { VM_ERR("LOS_ArchMmuMap failed, ret = %d", ret); } va += PAGE_SIZE; } } +<<<<<<< HEAD + +======= ///fork 一个共享线性区 +>>>>>>> remotes/origin/main VOID OsShmFork(LosVmSpace *space, LosVmMapRegion *oldRegion, LosVmMapRegion *newRegion) { struct shmIDSource *seg = NULL; SYSV_SHM_LOCK(); +<<<<<<< HEAD + seg = ShmFindSeg(oldRegion->shmid); +======= seg = ShmFindSeg(oldRegion->shmid);//通过老区ID获取对应的共享资源ID结构体 +>>>>>>> remotes/origin/main if (seg == NULL) { SYSV_SHM_UNLOCK(); VM_ERR("shm fork failed!"); return; } +<<<<<<< HEAD + newRegion->shmid = oldRegion->shmid; + newRegion->forkFlags = oldRegion->forkFlags; + ShmVmmMapping(space, &seg->node, newRegion->range.base, newRegion->regionFlags); + seg->ds.shm_nattch++; + SYSV_SHM_UNLOCK(); +} + +======= newRegion->shmid = oldRegion->shmid;//一样的共享区ID newRegion->forkFlags = oldRegion->forkFlags;//forkFlags也一样了 ShmVmmMapping(space, &seg->node, newRegion->range.base, newRegion->regionFlags);//新线性区与共享内存进行映射 @@ -447,34 +621,55 @@ VOID OsShmFork(LosVmSpace *space, LosVmMapRegion *oldRegion, LosVmMapRegion *new SYSV_SHM_UNLOCK(); } ///释放共享线性区 +>>>>>>> remotes/origin/main VOID OsShmRegionFree(LosVmSpace *space, LosVmMapRegion *region) { struct shmIDSource *seg = NULL; SYSV_SHM_LOCK(); +<<<<<<< HEAD + seg = ShmFindSeg(region->shmid); +======= seg = ShmFindSeg(region->shmid);//通过线性区ID获取对应的共享资源ID结构体 +>>>>>>> remotes/origin/main if (seg == NULL) { SYSV_SHM_UNLOCK(); return; } +<<<<<<< HEAD + LOS_ArchMmuUnmap(&space->archMmu, region->range.base, region->range.size >> PAGE_SHIFT); + ShmPagesRefDec(seg); + seg->ds.shm_nattch--; + if (seg->ds.shm_nattch <= 0 && (seg->status & SHM_SEG_REMOVE)) { + ShmFreeSeg(seg, &IPC_SHM_USED_PAGE_COUNT); +======= LOS_ArchMmuUnmap(&space->archMmu, region->range.base, region->range.size >> PAGE_SHIFT);//解除线性区的映射 ShmPagesRefDec(seg);//ref -- seg->ds.shm_nattch--;//附在共享线性区上的进程数-- if (seg->ds.shm_nattch <= 0 && (seg->status & SHM_SEG_REMOVE)) { ShmFreeSeg(seg, &IPC_SHM_USED_PAGE_COUNT);//就释放掉物理内存!注意是:物理内存 +>>>>>>> remotes/origin/main } else { seg->ds.shm_dtime = time(NULL); seg->ds.shm_lpid = LOS_GetCurrProcessID(); /* may not be the space's PID. */ } SYSV_SHM_UNLOCK(); } +<<<<<<< HEAD + +======= ///是否为共享线性区,是否有标签? +>>>>>>> remotes/origin/main BOOL OsIsShmRegion(LosVmMapRegion *region) { return (region->regionFlags & VM_MAP_REGION_FLAG_SHM) ? TRUE : FALSE; } +<<<<<<< HEAD + +======= ///获取共享内存池中已被使用的段数量 +>>>>>>> remotes/origin/main STATIC INT32 ShmSegUsedCount(VOID) { INT32 i; @@ -483,16 +678,27 @@ STATIC INT32 ShmSegUsedCount(VOID) for (i = 0; i < IPC_SHM_INFO.shmmni; i++) { seg = &IPC_SHM_SEGS[i]; +<<<<<<< HEAD + if (seg->status & SHM_SEG_USED) { +======= if (seg->status & SHM_SEG_USED) {//找到一个 +>>>>>>> remotes/origin/main count++; } } return count; } +<<<<<<< HEAD + +STATIC INT32 ShmPermCheck(struct shmIDSource *seg, mode_t mode) +{ + INT32 uid = LOS_GetUserID(); +======= ///对共享内存段权限检查 STATIC INT32 ShmPermCheck(struct shmIDSource *seg, mode_t mode) { INT32 uid = LOS_GetUserID();//当前进程的用户ID +>>>>>>> remotes/origin/main UINT32 tmpMode = 0; mode_t privMode = seg->ds.shm_perm.mode; mode_t accMode; @@ -534,6 +740,8 @@ STATIC INT32 ShmPermCheck(struct shmIDSource *seg, mode_t mode) } } +<<<<<<< HEAD +======= /*! * @brief ShmGet * 得到一个共享内存标识符或创建一个共享内存对象 @@ -550,6 +758,7 @@ STATIC INT32 ShmPermCheck(struct shmIDSource *seg, mode_t mode) * * @see */ +>>>>>>> remotes/origin/main INT32 ShmGet(key_t key, size_t size, INT32 shmflg) { INT32 ret; @@ -560,6 +769,15 @@ INT32 ShmGet(key_t key, size_t size, INT32 shmflg) if (key == IPC_PRIVATE) { ret = ShmAllocSeg(key, size, shmflg); } else { +<<<<<<< HEAD + ret = ShmFindSegByKey(key); + if (ret < 0) { + if (((UINT32)shmflg & IPC_CREAT) == 0) { + ret = -ENOENT; + goto ERROR; + } else { + ret = ShmAllocSeg(key, size, shmflg); +======= ret = ShmFindSegByKey(key);//通过key查找资源ID if (ret < 0) { if (((UINT32)shmflg & IPC_CREAT) == 0) {// @@ -567,6 +785,7 @@ INT32 ShmGet(key_t key, size_t size, INT32 shmflg) goto ERROR; } else { ret = ShmAllocSeg(key, size, shmflg);//分配一个共享内存 +>>>>>>> remotes/origin/main } } else { shmid = ret; @@ -575,7 +794,11 @@ INT32 ShmGet(key_t key, size_t size, INT32 shmflg) ret = -EEXIST; goto ERROR; } +<<<<<<< HEAD + ret = ShmPermCheck(ShmFindSeg(shmid), (UINT32)shmflg & ACCESSPERMS); +======= ret = ShmPermCheck(ShmFindSeg(shmid), (UINT32)shmflg & ACCESSPERMS);//对共享内存权限检查 +>>>>>>> remotes/origin/main if (ret != 0) { ret = -ret; goto ERROR; @@ -610,13 +833,21 @@ INT32 ShmatParamCheck(const VOID *shmaddr, INT32 shmflg) return 0; } +<<<<<<< HEAD + +======= ///分配一个共享线性区并映射好 +>>>>>>> remotes/origin/main LosVmMapRegion *ShmatVmmAlloc(struct shmIDSource *seg, const VOID *shmaddr, INT32 shmflg, UINT32 prot) { LosVmSpace *space = OsCurrProcessGet()->vmSpace; LosVmMapRegion *region = NULL; +<<<<<<< HEAD + UINT32 flags = MAP_ANONYMOUS | MAP_SHARED; +======= UINT32 flags = MAP_ANONYMOUS | MAP_SHARED;//本线性区为共享+匿名标签 +>>>>>>> remotes/origin/main UINT32 mapFlags = flags | MAP_FIXED; VADDR_T vaddr; UINT32 regionFlags; @@ -627,29 +858,48 @@ LosVmMapRegion *ShmatVmmAlloc(struct shmIDSource *seg, const VOID *shmaddr, } regionFlags = OsCvtProtFlagsToRegionFlags(prot, flags); (VOID)LOS_MuxAcquire(&space->regionMux); +<<<<<<< HEAD + if (shmaddr == NULL) { + region = LOS_RegionAlloc(space, 0, seg->ds.shm_segsz, regionFlags, 0); + } else { +======= if (shmaddr == NULL) {//未指定了共享内存连接到当前进程中的地址位置 region = LOS_RegionAlloc(space, 0, seg->ds.shm_segsz, regionFlags, 0);//分配线性区 } else {//指定时,就需要先找地址所在的线性区 +>>>>>>> remotes/origin/main if ((UINT32)shmflg & SHM_RND) { vaddr = ROUNDDOWN((VADDR_T)(UINTPTR)shmaddr, SHMLBA); } else { vaddr = (VADDR_T)(UINTPTR)shmaddr; +<<<<<<< HEAD + } +======= }//找到线性区并重新映射,当指定地址时需贴上重新映射的标签 +>>>>>>> remotes/origin/main if (!((UINT32)shmflg & SHM_REMAP) && (LOS_RegionFind(space, vaddr) || LOS_RegionFind(space, vaddr + seg->ds.shm_segsz - 1) || LOS_RegionRangeFind(space, vaddr, seg->ds.shm_segsz - 1))) { ret = EINVAL; goto ERROR; } +<<<<<<< HEAD + vaddr = (VADDR_T)LOS_MMap(vaddr, seg->ds.shm_segsz, prot, mapFlags, -1, 0); + region = LOS_RegionFind(space, vaddr); +======= vaddr = (VADDR_T)LOS_MMap(vaddr, seg->ds.shm_segsz, prot, mapFlags, -1, 0);//做好映射 region = LOS_RegionFind(space, vaddr);//重新查找线性区,用于返回. +>>>>>>> remotes/origin/main } if (region == NULL) { ret = ENOMEM; goto ERROR; } +<<<<<<< HEAD + ShmVmmMapping(space, &seg->node, region->range.base, regionFlags); +======= ShmVmmMapping(space, &seg->node, region->range.base, regionFlags);//共享内存映射 +>>>>>>> remotes/origin/main (VOID)LOS_MuxRelease(&space->regionMux); return region; ERROR: @@ -658,6 +908,8 @@ ERROR: return NULL; } +<<<<<<< HEAD +======= /*! * @brief ShmAt * 用来启动对该共享内存的访问,并把共享内存连接到当前进程的地址空间。 @@ -669,6 +921,7 @@ ERROR: shmid 就是个索引,就跟进程和线程的ID一样 g_shmSegs[shmid] shmid > 192个 * @see */ +>>>>>>> remotes/origin/main VOID *ShmAt(INT32 shmid, const VOID *shmaddr, INT32 shmflg) { INT32 ret; @@ -677,13 +930,21 @@ VOID *ShmAt(INT32 shmid, const VOID *shmaddr, INT32 shmflg) struct shmIDSource *seg = NULL; LosVmMapRegion *r = NULL; +<<<<<<< HEAD + ret = ShmatParamCheck(shmaddr, shmflg); +======= ret = ShmatParamCheck(shmaddr, shmflg);//参数检查 +>>>>>>> remotes/origin/main if (ret != 0) { set_errno(ret); return (VOID *)-1; } +<<<<<<< HEAD + if ((UINT32)shmflg & SHM_EXEC) { +======= if ((UINT32)shmflg & SHM_EXEC) {//flag 转换 +>>>>>>> remotes/origin/main prot |= PROT_EXEC; acc_mode |= SHM_S_IXUGO; } else if (((UINT32)shmflg & SHM_RDONLY) == 0) { @@ -692,7 +953,11 @@ VOID *ShmAt(INT32 shmid, const VOID *shmaddr, INT32 shmflg) } SYSV_SHM_LOCK(); +<<<<<<< HEAD + seg = ShmFindSeg(shmid); +======= seg = ShmFindSeg(shmid);//找到段 +>>>>>>> remotes/origin/main if (seg == NULL) { SYSV_SHM_UNLOCK(); return (VOID *)-1; @@ -703,18 +968,30 @@ VOID *ShmAt(INT32 shmid, const VOID *shmaddr, INT32 shmflg) goto ERROR; } +<<<<<<< HEAD + seg->ds.shm_nattch++; + r = ShmatVmmAlloc(seg, shmaddr, shmflg, prot); +======= seg->ds.shm_nattch++;//ds上记录有一个进程绑定上来 r = ShmatVmmAlloc(seg, shmaddr, shmflg, prot);//在当前进程空间分配一个线性区并映射到共享内存 +>>>>>>> remotes/origin/main if (r == NULL) { seg->ds.shm_nattch--; SYSV_SHM_UNLOCK(); return (VOID *)-1; } +<<<<<<< HEAD + r->shmid = shmid; + r->regionFlags |= VM_MAP_REGION_FLAG_SHM; + seg->ds.shm_atime = time(NULL); + seg->ds.shm_lpid = LOS_GetCurrProcessID(); +======= r->shmid = shmid;//把ID给线性区的shmid r->regionFlags |= VM_MAP_REGION_FLAG_SHM;//这是一个共享线性区 seg->ds.shm_atime = time(NULL);//访问时间 seg->ds.shm_lpid = LOS_GetCurrProcessID();//进程ID +>>>>>>> remotes/origin/main SYSV_SHM_UNLOCK(); return (VOID *)(UINTPTR)r->range.base; @@ -725,6 +1002,8 @@ ERROR: return (VOID *)-1; } +<<<<<<< HEAD +======= /*! * @brief ShmCtl * 此函数可以对shmid指定的共享存储进行多种操作(删除、取信息、加锁、解锁等) @@ -738,6 +1017,7 @@ ERROR: * * @see */ +>>>>>>> remotes/origin/main INT32 ShmCtl(INT32 shmid, INT32 cmd, struct shmid_ds *buf) { struct shmIDSource *seg = NULL; @@ -750,7 +1030,11 @@ INT32 ShmCtl(INT32 shmid, INT32 cmd, struct shmid_ds *buf) SYSV_SHM_LOCK(); if ((cmd != IPC_INFO) && (cmd != SHM_INFO)) { +<<<<<<< HEAD + seg = ShmFindSeg(shmid); +======= seg = ShmFindSeg(shmid);//通过索引ID找到seg +>>>>>>> remotes/origin/main if (seg == NULL) { SYSV_SHM_UNLOCK(); return -1; @@ -764,13 +1048,21 @@ INT32 ShmCtl(INT32 shmid, INT32 cmd, struct shmid_ds *buf) switch (cmd) { case IPC_STAT: +<<<<<<< HEAD + case SHM_STAT: +======= case SHM_STAT://取段结构 +>>>>>>> remotes/origin/main ret = ShmPermCheck(seg, SHM_S_IRUGO); if (ret != 0) { goto ERROR; } +<<<<<<< HEAD + ret = LOS_ArchCopyToUser(buf, &seg->ds, sizeof(struct shmid_ds)); +======= ret = LOS_ArchCopyToUser(buf, &seg->ds, sizeof(struct shmid_ds));//把内核空间的共享页数据拷贝到用户空间 +>>>>>>> remotes/origin/main if (ret != 0) { ret = EFAULT; goto ERROR; @@ -779,13 +1071,21 @@ INT32 ShmCtl(INT32 shmid, INT32 cmd, struct shmid_ds *buf) ret = (unsigned int)((unsigned int)seg->ds.shm_perm.seq << 16) | (unsigned int)((unsigned int)shmid & 0xffff); /* 16: use the seq as the upper 16 bits */ } break; +<<<<<<< HEAD + case IPC_SET: +======= case IPC_SET://重置共享段 +>>>>>>> remotes/origin/main ret = ShmPermCheck(seg, SHM_M); if (ret != 0) { ret = EPERM; goto ERROR; } +<<<<<<< HEAD + +======= //从用户空间拷贝数据到内核空间 +>>>>>>> remotes/origin/main ret = LOS_ArchCopyFromUser(&shm_perm, &buf->shm_perm, sizeof(struct ipc_perm)); if (ret != 0) { ret = EFAULT; @@ -794,14 +1094,22 @@ INT32 ShmCtl(INT32 shmid, INT32 cmd, struct shmid_ds *buf) seg->ds.shm_perm.uid = shm_perm.uid; seg->ds.shm_perm.gid = shm_perm.gid; seg->ds.shm_perm.mode = (seg->ds.shm_perm.mode & ~ACCESSPERMS) | +<<<<<<< HEAD + (shm_perm.mode & ACCESSPERMS); +======= (shm_perm.mode & ACCESSPERMS);//可访问 +>>>>>>> remotes/origin/main seg->ds.shm_ctime = time(NULL); #ifdef LOSCFG_SHELL (VOID)memcpy_s(seg->ownerName, OS_PCB_NAME_LEN, OS_PCB_FROM_PID(shm_perm.uid)->processName, OS_PCB_NAME_LEN); #endif break; +<<<<<<< HEAD + case IPC_RMID: +======= case IPC_RMID://删除共享段 +>>>>>>> remotes/origin/main ret = ShmPermCheck(seg, SHM_M); if (ret != 0) { ret = EPERM; @@ -809,11 +1117,19 @@ INT32 ShmCtl(INT32 shmid, INT32 cmd, struct shmid_ds *buf) } seg->status |= SHM_SEG_REMOVE; +<<<<<<< HEAD + if (seg->ds.shm_nattch <= 0) { + ShmFreeSeg(seg, &IPC_SHM_USED_PAGE_COUNT); + } + break; + case IPC_INFO: +======= if (seg->ds.shm_nattch <= 0) {//没有任何进程在使用了 ShmFreeSeg(seg, &IPC_SHM_USED_PAGE_COUNT); } break; case IPC_INFO://把内核空间的共享页数据拷贝到用户空间 +>>>>>>> remotes/origin/main ret = LOS_ArchCopyToUser(buf, &IPC_SHM_INFO, sizeof(struct shminfo)); if (ret != 0) { ret = EFAULT; @@ -827,8 +1143,13 @@ INT32 ShmCtl(INT32 shmid, INT32 cmd, struct shmid_ds *buf) shmInfo.shm_tot = 0; shmInfo.swap_attempts = 0; shmInfo.swap_successes = 0; +<<<<<<< HEAD + shmInfo.used_ids = ShmSegUsedCount(); + ret = LOS_ArchCopyToUser(buf, &shmInfo, sizeof(struct shm_info)); +======= shmInfo.used_ids = ShmSegUsedCount();//在使用的seg数 ret = LOS_ArchCopyToUser(buf, &shmInfo, sizeof(struct shm_info));//把内核空间的共享页数据拷贝到用户空间 +>>>>>>> remotes/origin/main if (ret != 0) { ret = EFAULT; goto ERROR; @@ -851,6 +1172,11 @@ ERROR: return -1; } +<<<<<<< HEAD +INT32 ShmDt(const VOID *shmaddr) +{ + LosVmSpace *space = OsCurrProcessGet()->vmSpace; +======= /** * @brief 当对共享存储的操作已经结束时,则调用shmdt与该存储段分离 如果shmat成功执行,那么内核将使与该共享存储相关的shmid_ds结构中的shm_nattch计数器值减1 @@ -862,30 +1188,56 @@ ERROR: INT32 ShmDt(const VOID *shmaddr) { LosVmSpace *space = OsCurrProcessGet()->vmSpace;//获取进程空间 +>>>>>>> remotes/origin/main struct shmIDSource *seg = NULL; LosVmMapRegion *region = NULL; INT32 shmid; INT32 ret; +<<<<<<< HEAD + if (IS_PAGE_ALIGNED(shmaddr) == 0) { +======= if (IS_PAGE_ALIGNED(shmaddr) == 0) {//地址是否对齐 +>>>>>>> remotes/origin/main ret = EINVAL; goto ERROR; } (VOID)LOS_MuxAcquire(&space->regionMux); +<<<<<<< HEAD + region = LOS_RegionFind(space, (VADDR_T)(UINTPTR)shmaddr); +======= region = LOS_RegionFind(space, (VADDR_T)(UINTPTR)shmaddr);//找到线性区 +>>>>>>> remotes/origin/main if (region == NULL) { ret = EINVAL; goto ERROR_WITH_LOCK; } +<<<<<<< HEAD + shmid = region->shmid; + + if (region->range.base != (VADDR_T)(UINTPTR)shmaddr) { + ret = EINVAL; +======= shmid = region->shmid;//线性区共享ID if (region->range.base != (VADDR_T)(UINTPTR)shmaddr) {//这是用户空间和内核空间的一次解绑 ret = EINVAL; //shmaddr 必须要等于region->range.base +>>>>>>> remotes/origin/main goto ERROR_WITH_LOCK; } /* remove it from aspace */ +<<<<<<< HEAD + LOS_RbDelNode(&space->regionRbTree, ®ion->rbNode); + LOS_ArchMmuUnmap(&space->archMmu, region->range.base, region->range.size >> PAGE_SHIFT); + (VOID)LOS_MuxRelease(&space->regionMux); + /* free it */ + free(region); + + SYSV_SHM_LOCK(); + seg = ShmFindSeg(shmid); +======= LOS_RbDelNode(&space->regionRbTree, ®ion->rbNode);//从红黑树和链表中摘除节点 LOS_ArchMmuUnmap(&space->archMmu, region->range.base, region->range.size >> PAGE_SHIFT);//解除线性区的映射 (VOID)LOS_MuxRelease(&space->regionMux); @@ -894,12 +1246,23 @@ INT32 ShmDt(const VOID *shmaddr) SYSV_SHM_LOCK(); seg = ShmFindSeg(shmid);//找到seg,线性区和共享段的关系是 1:N 的关系,其他空间的线性区也会绑在共享段上 +>>>>>>> remotes/origin/main if (seg == NULL) { ret = EINVAL; SYSV_SHM_UNLOCK(); goto ERROR; } +<<<<<<< HEAD + ShmPagesRefDec(seg); + seg->ds.shm_nattch--; + if ((seg->ds.shm_nattch <= 0) && + (seg->status & SHM_SEG_REMOVE)) { + ShmFreeSeg(seg, &IPC_SHM_USED_PAGE_COUNT); + } else { + seg->ds.shm_dtime = time(NULL); + seg->ds.shm_lpid = LOS_GetCurrProcessID(); +======= ShmPagesRefDec(seg);//页面引用数 -- seg->ds.shm_nattch--;//使用共享内存的进程数少了一个 if ((seg->ds.shm_nattch <= 0) && //无任何进程使用共享内存 @@ -908,6 +1271,7 @@ INT32 ShmDt(const VOID *shmaddr) } else { seg->ds.shm_dtime = time(NULL);//记录分离的时间 seg->ds.shm_lpid = LOS_GetCurrProcessID();//记录操作进程ID +>>>>>>> remotes/origin/main } SYSV_SHM_UNLOCK(); @@ -963,6 +1327,10 @@ STATIC VOID OsShmInfoCmd(VOID) } SYSV_SHM_UNLOCK(); } +<<<<<<< HEAD + +======= +>>>>>>> remotes/origin/main STATIC VOID OsShmDeleteCmd(INT32 shmid) { struct shmIDSource *seg = NULL; @@ -991,7 +1359,11 @@ STATIC VOID OsShmCmdUsage(VOID) "\t-r [shmid], Recycle the specified shared memory about shmid\n" "\t-h | --help, print shm command usage\n"); } +<<<<<<< HEAD + +======= ///共享内存 +>>>>>>> remotes/origin/main UINT32 OsShellCmdShm(INT32 argc, const CHAR *argv[]) { INT32 shmid; @@ -1026,3 +1398,7 @@ DONE: SHELLCMD_ENTRY(shm_shellcmd, CMD_TYPE_SHOW, "shm", 2, (CmdCallBackFunc)OsShellCmdShm); #endif #endif +<<<<<<< HEAD + +======= +>>>>>>> remotes/origin/main