Compare commits

...

26 Commits

Author SHA1 Message Date
m4l6qprxo 6ebf33facf Merge pull request 'finale version' (#41) from zhoumingyang_branch into main
1 year ago
eee d96e84d42e finale version
1 year ago
eee f005e88968 fdsf
1 year ago
eee c3359dcc46 add smg
1 year ago
eee fe4c734b0c apps code reading partially
1 year ago
piyl8cs5f 1500194ec8 Merge pull request '想不到吧,头文件我也能读' (#37) from gonghao_branch into main
1 year ago
Dio 5f874bbc3d 不是哥们,include你也读啊?
1 year ago
piyl8cs5f 9c207a7b97 Merge pull request 'ipc修改完毕' (#36) from gonghao_branch into main
1 year ago
Dio 03118dec50 ipc尽数拿下
1 year ago
m4l6qprxo fd5d66210a Merge pull request '1' (#35) from zhoumingyang_branch into main
1 year ago
eee 3a42194ac0 report 90% finished
1 year ago
m4l6qprxo 41a63c0f32 Merge pull request '0' (#34) from zhoumingyang_branch into main
1 year ago
eee 86cbb88fc2 心得体会完成
1 year ago
m4l6qprxo 706942896d Merge pull request 'eeee' (#33) from zhoumingyang_branch into main
1 year ago
eee 3e36d446af 软件体系架构图完成
1 year ago
m4l6qprxo 3a81ef790d Merge pull request 'limited' (#32) from zhoumingyang_branch into main
1 year ago
eee 3ca4b7410a misc content update
1 year ago
piyl8cs5f 88242a3652 Merge pull request 'some ipc' (#31) from gonghao_branch into main
1 year ago
Dio 1bebb56d46 ipc to be continued
1 year ago
piyl8cs5f 6ee979fcf4 Merge pull request 'have to say that it's easy to tell it's funny' (#30) from gonghao_branch into main
1 year ago
Dio 1cc04a3818 something cheaty added
1 year ago
m4l6qprxo 771c129ed9 Merge pull request 'ahaha' (#29) from zhoumingyang_branch into main
1 year ago
eee aa3d903163 用例交互图
1 year ago
piyl8cs5f 7397569d05 Merge pull request 'sched大概差不多' (#28) from gonghao_branch into main
1 year ago
Dio aa1fab3ff7 sched大概差不多
1 year ago
m4l6qprxo 99cc02b3cb Merge pull request 'vm解救' (#25) from danwanhao_branch into main
1 year ago

24
.gitignore vendored

@ -1,24 +0,0 @@
# General ignored file types
*.o
*.a
*.so
*.swp
# IDE settings
.vscode
.idea
.settings
.cproject
.project
# VIM files
cscope*
tags
# Menuconfig temp files
/config.h
/.config
/.config.old
# Build temp files
/out

@ -1,7 +0,0 @@
{
"files.associations": {
"los_swtmr_pri.h": "c",
"los_vm_syscall.h": "c",
"regex": "c"
}
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

@ -32,7 +32,10 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
/// @brief 缓冲区写入测试
/// @param buf
/// @param start
/// @param end
static void BufWriteTest(void *buf, int start, int end)
{
for (int i = start; i <= end; i++) {
@ -40,6 +43,10 @@ static void BufWriteTest(void *buf, int start, int end)
}
}
/// @brief 缓冲区读入测试
/// @param buf
/// @param start
/// @param end
static void BufReadTest(void *buf, int start, int end)
{
char tmp;
@ -47,7 +54,8 @@ static void BufReadTest(void *buf, int start, int end)
tmp = ((char *)buf)[i];
}
}
/// @brief Lms分配测试
/// @param
static void LmsMallocTest(void)
{
#define TEST_SIZE 16
@ -64,7 +72,8 @@ static void LmsMallocTest(void)
free(buf);
printf("\n-------- LmsMallocTest End --------\n");
}
/// @brief Lms回收测试
/// @param
static void LmsReallocTest(void)
{
#define TEST_SIZE 64
@ -85,6 +94,8 @@ static void LmsReallocTest(void)
printf("\n-------- LmsReallocTest End --------\n");
}
/// @brief 分配存储器测试
/// @param
static void LmsCallocTest(void)
{
#define TEST_SIZE 16
@ -99,6 +110,8 @@ static void LmsCallocTest(void)
printf("\n-------- LmsCallocTest End --------\n");
}
/// @brief 分配虚拟存储测试
/// @param
static void LmsVallocTest(void)
{
#define TEST_SIZE 4096
@ -112,7 +125,8 @@ static void LmsVallocTest(void)
free(buf);
printf("\n-------- LmsVallocTest End --------\n");
}
/// @brief 线性分配测试
/// @param
static void LmsAlignedAllocTest(void)
{
#define TEST_ALIGN_SIZE 64
@ -127,7 +141,8 @@ static void LmsAlignedAllocTest(void)
free(buf);
printf("\n-------- LmsAlignedAllocTest End --------\n");
}
/// @brief memset测试
/// @param
static void LmsMemsetTest(void)
{
#define TEST_SIZE 32

@ -40,7 +40,10 @@
#include "perf_list.h"
#include "perf_stat.h"
#include "perf_record.h"
/// @brief 性能分析工具perf的代码
/// @param argc
/// @param argv
/// @return
int main(int argc, char **argv)
{
#define TWO_ARGS 2

@ -33,21 +33,29 @@
#include <string.h>
#include "option.h"
#include "perf_list.h"
/// @brief 解析命令行参数,并根据参数的类型将其值赋给相应的变量。
/// @param argv
/// @param index
/// @param opts
/// @return
static int ParseOption(char **argv, int *index, PerfOption *opts)
{
int ret = 0;
const char *str = NULL;
// 使用while循环遍历 opts 结构体数组,直到遇到一个空指针。
while ((opts->name != NULL) && (*opts->name != 0)) {
if (strcmp(argv[*index], opts->name) == 0) {
switch (opts->type) {
switch (opts->type) {
//如果选项类型是 OPTION_TYPE_UINT则将命令行参数转换为无符号整数并存储到当前选项的值中。
case OPTION_TYPE_UINT:
*opts->value = strtoul(argv[++(*index)], NULL, 0);
break;
//如果选项类型是 OPTION_TYPE_STRING则将命令行参数赋值给当前选项的字符串值。
case OPTION_TYPE_STRING:
*opts->str = argv[++(*index)];
break;
// 如果选项类型是 OPTION_TYPE_CALLBACK则调用当前选项的回调函数并将命令行参数作为参数传递给回调函数。
//如果回调函数返回值不为0则打印解析错误信息并将 ret 设置为-1。
case OPTION_TYPE_CALLBACK:
str = argv[++(*index)];
if ((*opts->cb)(str) != 0) {
@ -55,6 +63,7 @@ static int ParseOption(char **argv, int *index, PerfOption *opts)
ret = -1;
}
break;
//如果在while循环中没有找到匹配的选项则打印无效选项信息并将 ret 设置为-1。
default:
printf("invalid option\n");
ret = -1;
@ -67,19 +76,27 @@ static int ParseOption(char **argv, int *index, PerfOption *opts)
return -1;
}
/// @brief 这段代码是一个命令行参数解析函数它的目的是从命令行参数中提取选项和子命令并将它们存储在opts cmd中
/// @param argc
/// @param argv
/// @param opts
/// @param cmd
/// @return
int ParseOptions(int argc, char **argv, PerfOption *opts, SubCmd *cmd)
{
int i;
// 定义一个变量 index初始值为0用于表示当前正在处理的命令行参数的索引。
int index = 0;
//使用while循环遍历命令行参数直到索引超出范围或者遇到非选项参数。
//在循环中,调用 ParseOption 函数解析当前选项,并将解析结果存储在 opts 结构体中。如果解析失败,则返回-1。
while ((index < argc) && (argv[index] != NULL) && (*argv[index] == '-')) {
if (ParseOption(argv, &index, opts) != 0) {
return -1;
}
index++;
}
//如果在while循环结束后还有剩余的命令行参数则将第一个参数赋值给 cmd 结构体的 path 成员,
//并将其余参数存储在 cmd 结构体的 params 数组中。如果缺少子命令参数,则打印错误信息并返回-1。
if ((index < argc) && (argv[index] != NULL)) {
cmd->path = argv[index];
cmd->params[0] = argv[index];
@ -88,7 +105,7 @@ int ParseOptions(int argc, char **argv, PerfOption *opts, SubCmd *cmd)
printf("no subcmd to execute\n");
return -1;
}
// 使用for循环遍历剩余的命令行参数并将它们存储在 cmd 结构体的 params 数组中。循环会一直执行直到达到最大参数数量或者索引超出范围
for (i = 1; (index < argc) && (i < CMD_MAX_PARAMS); index++, i++) {
cmd->params[i] = argv[index];
}
@ -128,7 +145,10 @@ EXIT:
free(list);
return ret;
}
/// @brief 这段代码是一个C语言函数其功能是将一个字符串转换为对应的PerfEvent结构体指针。
///这个函数对于输入的字符串在全局的g_events数组中进行查找并返回匹配的PerfEvent结构体指针。
/// @param str
/// @return
static inline const PerfEvent *StrToEvent(const char *str)
{
const PerfEvent *evt = &g_events[0];
@ -140,7 +160,11 @@ static inline const PerfEvent *StrToEvent(const char *str)
}
return NULL;
}
/// @brief 这段代码定义了一个名为 ParseEvents 的函数,其目的是解析一个以逗号分隔的字符串,并将其转换为 PerfEventConfig 结构体中的事件配置。
/// @param argv
/// @param eventsCfg
/// @param len
/// @return
int ParseEvents(const char *argv, PerfEventConfig *eventsCfg, unsigned int *len)
{
int ret;

@ -57,10 +57,10 @@
typedef enum {
CONTAINER = 0,
PID_CONTAINER,
PID_CHILD_CONTAINER,
UTS_CONTAINER,
MNT_CONTAINER,
PID_CONTAINER, //进程容器
PID_CHILD_CONTAINER, //子进程容器
UTS_CONTAINER, //
MNT_CONTAINER, //挂载容器
IPC_CONTAINER,
USER_CONTAINER,
TIME_CONTAINER,
@ -70,29 +70,29 @@ typedef enum {
} ContainerType;
typedef struct Container {
Atomic rc;
Atomic rc; //原子操作
#ifdef LOSCFG_PID_CONTAINER
struct PidContainer *pidContainer;
struct PidContainer *pidForChildContainer;
struct PidContainer *pidContainer; //进程容器
struct PidContainer *pidForChildContainer;//进程的孩子容器
#endif
#ifdef LOSCFG_UTS_CONTAINER
struct UtsContainer *utsContainer;
struct UtsContainer *utsContainer; //
#endif
#ifdef LOSCFG_MNT_CONTAINER
struct MntContainer *mntContainer;
struct MntContainer *mntContainer; //挂载容器
#endif
#ifdef LOSCFG_IPC_CONTAINER
struct IpcContainer *ipcContainer;
struct IpcContainer *ipcContainer; //IPC容器
#endif
#ifdef LOSCFG_TIME_CONTAINER
struct TimeContainer *timeContainer;
struct TimeContainer *timeForChildContainer;
struct TimeContainer *timeContainer; //时间容器
struct TimeContainer *timeForChildContainer;
#endif
#ifdef LOSCFG_NET_CONTAINER
struct NetContainer *netContainer;
struct NetContainer *netContainer; //网络容器
#endif
} Container;
//容器数量上限
typedef struct TagContainerLimit {
#ifdef LOSCFG_PID_CONTAINER
UINT32 pidLimit;

@ -40,78 +40,21 @@ extern "C" {
#endif /* __cplusplus */
#endif /* __cplusplus */
/**
* @ingroup los_err
* Define the error magic word.
*/
#define OS_ERR_MAGIC_WORD 0xa1b2c3f8
/**
* @ingroup los_err
* @brief Error handling macro capable of returning error codes.
*
* @par Description:
* This API is used to call the error handling function by using an error code and return the same error code.
* @attention
* <ul>
* <li>None.</li>
* </ul>
*
* @param errNo [IN] Error code.
*
* @retval errNo
* @par Dependency:
* <ul><li>los_err_pri.h: the header file that contains the API declaration.</li></ul>
* @see None.
*/
#define OS_RETURN_ERROR(errNo) do { \
(VOID)LOS_ErrHandle("os_unspecific_file", OS_ERR_MAGIC_WORD, errNo, 0, NULL); \
return errNo; \
} while (0)
/**
* @ingroup los_err
* @brief Error handling macro capable of returning error codes.
*
* @par Description:
* This API is used to call the error handling function by using an error code and the line number of
* the erroneous line, and return the same error code.
* @attention
* <ul>
* <li>None.</li>
* </ul>
*
* @param errLine [IN] Line number of the erroneous line.
* @param errNo [IN] Error code.
*
* @retval errNo
* @par Dependency:
* <ul><li>los_err_pri.h: the header file that contains the API declaration.</li></ul>
* @see None.
*/
#define OS_RETURN_ERROR_P2(errLine, errNo) do { \
(VOID)LOS_ErrHandle("os_unspecific_file", errLine, errNo, 0, NULL); \
return errNo; \
} while (0)
/**
* @ingroup los_err
* @brief Macro for jumping to error handler.
*
* @par Description:
* This API is used to call the error handling function by using an error code.
* @attention
* <ul>
* <li>None.</li>
* </ul>
*
* @param errorNo [IN] Error code.
*
* @retval None.
* @par Dependency:
* <ul><li>los_err_pri.h: the header file that contains the API declaration.</li></ul>
* @see None.
*/
#define OS_GOTO_ERR_HANDLER(errorNo) do { \
errNo = errorNo; \
errLine = OS_ERR_MAGIC_WORD; \

@ -33,25 +33,27 @@
#define _LOS_FUTEX_PRI_H
#include "los_list.h"
#define FUTEX_WAIT 0
#define FUTEX_WAKE 1
#define FUTEX_REQUEUE 3
#define FUTEX_WAKE_OP 5
#define FUTEX_WAIT 0 ///< 原子性的检查 uaddr 中计数器的值是否为 val如果是则让任务休眠直到 FUTEX_WAKE 或者超时time-out
//也就是把任务挂到 uaddr 相对应的等待队列上去。
#define FUTEX_WAKE 1 ///< 最多唤醒 val 个等待在 uaddr 上任务。
#define FUTEX_REQUEUE 3 ///< 调整指定锁在Futex表中的位置
#define FUTEX_WAKE_OP 5
#define FUTEX_LOCK_PI 6
#define FUTEX_UNLOCK_PI 7
#define FUTEX_TRYLOCK_PI 8
#define FUTEX_WAIT_BITSET 9
#define FUTEX_PRIVATE 128
#define FUTEX_PRIVATE 128 //私有快锁(以虚拟地址进行哈希)
#define FUTEX_MASK 0x3U
/// 每个futex node对应一个被挂起的task key值唯一标识一把用户态锁具有相同key值的node被queue_list串联起来表示被同一把锁阻塞的task队列。
typedef struct {
UINTPTR key; /* private:uvaddr shared:paddr */
UINT32 index; /* hash bucket index */
UINT32 pid; /* private:process id shared:OS_INVALID(-1) */
LOS_DL_LIST pendList; /* point to pendList in TCB struct */
LOS_DL_LIST queueList; /* thread list blocked by this lock */
LOS_DL_LIST futexList; /* point to the next FutexNode */
UINTPTR key; /* private:uvaddr | 私有锁,用虚拟地址 shared:paddr | 共享锁,用物理地址*/
UINT32 index; /* hash bucket index | 哈希桶索引 OsFutexKeyToIndex */
UINT32 pid; /* private:process id shared:OS_INVALID(-1) | 私有锁:进程ID , 共享锁为 -1 */
LOS_DL_LIST pendList; /* point to pendList in TCB struct | 指向 TCB 结构中的 pendList, 通过它找到任务*/
LOS_DL_LIST queueList; /* thread list blocked by this lock | 挂等待这把锁的任务其实这里挂到是FutexNode.queueList ,
queueList pendList , pendList*/
LOS_DL_LIST futexList; /* point to the next FutexNode | 下一把Futex锁*/
} FutexNode;
extern UINT32 OsFutexInit(VOID);

@ -42,13 +42,13 @@ typedef struct TagQueueCB LosQueueCB;
typedef struct OsMux LosMux;
typedef LosMux pthread_mutex_t;
typedef struct ProcessCB LosProcessCB;
//IPC容器
typedef struct IpcContainer {
Atomic rc;
LosQueueCB *allQueue;
LOS_DL_LIST freeQueueList;
LosQueueCB *allQueue; //队列控制块(读写分离模式)
LOS_DL_LIST freeQueueList;//空闲队列链表
fd_set queueFdSet;
struct mqarray queueTable[LOSCFG_BASE_IPC_QUEUE_LIMIT];
struct mqarray queueTable[LOSCFG_BASE_IPC_QUEUE_LIMIT];//队列池
pthread_mutex_t mqueueMutex;
struct mqpersonal *mqPrivBuf[MAX_MQ_FD];
struct shminfo shmInfo;

@ -42,7 +42,7 @@ extern "C" {
#endif /* __cplusplus */
typedef struct {
UINT32 memUsed;
UINT32 memUsed; ///< 记录任务内存使用量
} TskMemUsedInfo;
extern VOID OsTaskMemUsedInc(UINT32 usedSize, UINT32 taskID);
@ -53,7 +53,7 @@ extern VOID OsTaskMemClear(UINT32 taskID);
#ifdef LOS_MEM_SLAB
typedef struct {
UINT32 slabUsed;
UINT32 slabUsed; ///< 任务占用以slab分配方式内存量
} TskSlabUsedInfo;
extern VOID OsTaskSlabUsedInc(UINT32 usedSize, UINT32 taskID);

@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:

@ -43,9 +43,9 @@ extern "C" {
#ifdef LOSCFG_KERNEL_SMP
typedef enum {
CPU_RUNNING = 0, /* cpu is running */
CPU_HALT, /* cpu in the halt */
CPU_EXC /* cpu in the exc */
CPU_RUNNING = 0, ///< cpu is running | CPU正在运行状态
CPU_HALT, ///< cpu in the halt | CPU处于暂停状态
CPU_EXC ///< cpu in the exc | CPU处于异常状态
} ExcFlag;
typedef struct {
@ -55,14 +55,14 @@ typedef struct {
#endif
} Percpu;
/* the kernel per-cpu structure */
/*! the kernel per-cpu structure | 每个cpu的内核描述符 */
extern Percpu g_percpu[LOSCFG_KERNEL_CORE_NUM];
/*! 获得当前运行CPU的信息 */
STATIC INLINE Percpu *OsPercpuGet(VOID)
{
return &g_percpu[ArchCurrCpuid()];
return &g_percpu[ArchCurrCpuid()];
}
/*! 获得参数CPU的信息 */
STATIC INLINE Percpu *OsPercpuGetByID(UINT32 cpuid)
{
return &g_percpu[cpuid];

@ -38,29 +38,29 @@ typedef struct TagTaskCB LosTaskCB;
typedef struct ProcessCB LosProcessCB;
struct ProcessGroup;
struct Container;
//虚拟进程/任务 信息
typedef struct {
UINT32 vid; /* Virtual ID */
UINT32 vpid; /* Virtual parent ID */
UINTPTR cb; /* Control block */
LosProcessCB *realParent; /* process real parent */
LOS_DL_LIST node;
UINT32 vid; /* Virtual ID | 虚拟ID*/
UINT32 vpid; /* Virtual parent ID | 父进程虚拟ID*/
UINTPTR cb; /* Control block | 控制块*/
LosProcessCB *realParent; /* process real parent | 进程真实的父进程 */
LOS_DL_LIST node;//用于挂入 PidContainer.pidFreeList | tidFreeList
} ProcessVid;
#define PID_CONTAINER_LEVEL_LIMIT 3
//进程容器
typedef struct PidContainer {
Atomic rc;
Atomic level;
Atomic lock;
BOOL referenced;
UINT32 containerID;
struct PidContainer *parent;
struct ProcessGroup *rootPGroup;
LOS_DL_LIST tidFreeList;
ProcessVid tidArray[LOSCFG_BASE_CORE_TSK_LIMIT];
LOS_DL_LIST pidFreeList;
ProcessVid pidArray[LOSCFG_BASE_CORE_PROCESS_LIMIT];
Atomic rc; //原子操作
Atomic level; //等级0为最高级父比子高一级
Atomic lock; //锁
BOOL referenced; //是否被引用
UINT32 containerID; //容器ID
struct PidContainer *parent; //父进程容器
struct ProcessGroup *rootPGroup; //进程组
LOS_DL_LIST tidFreeList; //任务空闲链表
ProcessVid tidArray[LOSCFG_BASE_CORE_TSK_LIMIT];//虚拟任务池
LOS_DL_LIST pidFreeList; //进程空闲链表
ProcessVid pidArray[LOSCFG_BASE_CORE_PROCESS_LIMIT];//虚拟进程池
} PidContainer;
#define OS_PID_CONTAINER_FROM_PCB(processCB) ((processCB)->container->pidContainer)

@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@ -65,82 +65,86 @@ extern "C" {
#ifdef LOSCFG_SECURITY_CAPABILITY
#define OS_GROUPS_NUMBER_MAX 256
/*! 用户描述体*/
typedef struct {
UINT32 userID;
UINT32 userID; ///<用户ID [0,60000],0为root用户
UINT32 effUserID;
UINT32 gid;
UINT32 gid; ///<用户组ID [0,60000],0为root用户组
UINT32 effGid;
UINT32 groupNumber;
UINT32 groups[1];
UINT32 groupNumber;///< 用户组数量
UINT32 groups[1]; //所属用户组列表,一个用户可属多个用户组
} User;
#endif
/*! 进程组结构体*/
typedef struct ProcessGroup {
UINTPTR pgroupLeader; /**< Process group leader is the the process that created the group */
LOS_DL_LIST processList; /**< List of processes under this process group */
LOS_DL_LIST exitProcessList; /**< List of closed processes (zombie processes) under this group */
LOS_DL_LIST groupList; /**< Process group list */
UINTPTR pgroupLeader; /**< Process group leader is the the process that created the group | 负责创建进程组的进程首地址*/
LOS_DL_LIST processList; /**< List of processes under this process group | 属于该进程组的进程链表*/
LOS_DL_LIST exitProcessList; /**< List of closed processes (zombie processes) under this group | 进程组的僵死进程链表*/
LOS_DL_LIST groupList; /**< Process group list | 进程组链表,上面挂的都是进程组*/
} ProcessGroup;
/**
* .
*/
typedef struct ProcessCB {
CHAR processName[OS_PCB_NAME_LEN]; /**< Process name */
UINT32 processID; /**< Process ID */
CHAR processName[OS_PCB_NAME_LEN]; /**< Process name | 进程名称 */
UINT32 processID; /**< Process ID = leader thread ID | 进程ID,由进程池分配,范围[0,64] */
UINT16 processStatus; /**< [15:4] Process Status; [3:0] The number of threads currently
running in the process */
UINT16 consoleID; /**< The console id of task belongs */
UINT16 processMode; /**< Kernel Mode:0; User Mode:1; */ // 用戶模式1 内核模式0
running in the process | . ,! @note_good */
UINT16 consoleID; /**< The console id of task belongs | 任务的控制台id归属 */
UINT16 processMode; /**< Kernel Mode:0; User Mode:1; | 模式指定为内核还是用户进程 */
struct ProcessCB *parentProcess; /**< Parent process */
UINT32 exitCode; /**< Process exit status */
LOS_DL_LIST pendList; /**< Block list to which the process belongs */
LOS_DL_LIST childrenList; /**< Children process list */ // 存放子進程,链表
LOS_DL_LIST exitChildList; /**< Exit children process list */ // 要退出的子进程
LOS_DL_LIST siblingList; /**< Linkage in parent's children list */ //兄弟进程链表
ProcessGroup *pgroup; /**< Process group to which a process belongs */ //所属进程组
LOS_DL_LIST subordinateGroupList; /**< Linkage in group list */ // 进程是组长时有哪些组员进程
LosTaskCB *threadGroup; // 哪个线程组是进程的主线程ID
LOS_DL_LIST threadSiblingList; /**< List of threads under this process */ // 进程的线程任务列表
volatile UINT32 threadNumber; /**< Number of threads alive under this process */ // 此进程下的活动线程数量
UINT32 threadCount; /**< Total number of threads created under this process */ // 在此进程下创建的线程总数
LOS_DL_LIST waitList; /**< The process holds the waitLits to support wait/waitpid */ // 进程持有等待链表以支持wait/waitpid
UINT32 exitCode; /**< Process exit status | 进程退出状态码*/
LOS_DL_LIST pendList; /**< Block list to which the process belongs | 进程所在的阻塞列表,进程因阻塞挂入相应的链表.*/
LOS_DL_LIST childrenList; /**< Children process list | 孩子进程都挂到这里,形成双循环链表*/
LOS_DL_LIST exitChildList; /**< Exit children process list | 要退出的孩子进程链表,白发人要送黑发人.*/
LOS_DL_LIST siblingList; /**< Linkage in parent's children list | 兄弟进程链表, 56个民族是一家,来自同一个父进程.*/
ProcessGroup *pgroup; /**< Process group to which a process belongs | 所属进程组*/
LOS_DL_LIST subordinateGroupList; /**< Linkage in group list | 进程组员链表*/
LosTaskCB *threadGroup;
LOS_DL_LIST threadSiblingList; /**< List of threads under this process | 进程的线程(任务)列表 */
volatile UINT32 threadNumber; /**< Number of threads alive under this process | 此进程下的活动线程数*/
UINT32 threadCount; /**< Total number of threads created under this process | 在此进程下创建的线程总数*/ //
LOS_DL_LIST waitList; /**< The process holds the waitLits to support wait/waitpid | 父进程通过进程等待的方式,回收子进程资源,获取子进程退出信息*/
#ifdef LOSCFG_KERNEL_SMP
UINT32 timerCpu; /**< CPU core number of this task is delayed or pended */ // 统计各个线程被延迟或者阻塞的时间
UINT32 timerCpu; /**< CPU core number of this task is delayed or pended | 统计各线程被延期或阻塞的时间*/
#endif
UINTPTR sigHandler; /**< Signal handler */ // 信号处理函数
sigset_t sigShare; /**< Signal share bit */ // 信号共享位
UINTPTR sigHandler; /**< Signal handler | 信号处理函数,处理如 SIGSYS 等信号*/
sigset_t sigShare; /**< Signal share bit | 信号共享位 sigset_t是个64位的变量,对应64种信号*/
#ifdef LOSCFG_KERNEL_LITEIPC
ProcIpcInfo *ipcInfo; /**< Memory pool for lite ipc */ //用于进程间通讯的虚拟设备文件系统,设备装载点为 /dev/lite_ipc
ProcIpcInfo *ipcInfo; /**< Memory pool for lite ipc | 用于进程间通讯的虚拟设备文件系统,设备装载点为 /dev/lite_ipc*/
#endif
#ifdef LOSCFG_KERNEL_VM
LosVmSpace *vmSpace; /**< VMM space for processes */ //虚拟空间描述进程虚拟内存的数据结构linux称为内存描述符
LosVmSpace *vmSpace; /**< VMM space for processes | 虚拟空间,描述进程虚拟内存的数据结构linux称为内存描述符 */
#endif
#ifdef LOSCFG_FS_VFS
struct files_struct *files; /**< Files held by the process */
#endif
timer_t timerID; /**< ITimer */
struct files_struct *files; /**< Files held by the process | 进程所持有的所有文件,注者称之为进程的文件管理器*/
#endif //每个进程都有属于自己的文件管理器,记录对文件的操作. 注意:一个文件可以被多个进程操作
timer_t timerID; /**< iTimer */
#ifdef LOSCFG_SECURITY_CAPABILITY
User *user; // 进程的拥有者
UINT32 capability; // 安全能力范围
#ifdef LOSCFG_SECURITY_CAPABILITY //安全能力
User *user; ///< 进程的拥有者
UINT32 capability; ///< 安全能力范围 对应 CAP_SETGID
#endif
#ifdef LOSCFG_SECURITY_VID
TimerIdMap timerIdMap;
#ifdef LOSCFG_SECURITY_VID //虚拟ID映射功能
TimerIdMap timerIdMap;
#endif
#ifdef LOSCFG_DRIVERS_TZDRIVER
struct Vnode *execVnode; /**< Exec bin of the process */
struct Vnode *execVnode; /**< Exec bin of the process | 进程的可执行文件 */
#endif
mode_t umask;
mode_t umask; ///< umask(user file-creatiopn mode mask)为用户文件创建掩码,是创建文件或文件夹时默认权限的基础。
#ifdef LOSCFG_KERNEL_CPUP
OsCpupBase *processCpup; /**< Process cpu usage */
OsCpupBase *processCpup; /**< Process cpu usage | 进程占用CPU情况统计*/
#endif
struct rlimit *resourceLimit;
struct rlimit *resourceLimit; ///< 每个进程在运行时系统不会无限制的允许单个进程不断的消耗资源,因此都会设置资源限制。
#ifdef LOSCFG_KERNEL_CONTAINER
Container *container;
Container *container; ///< 内核容器
#ifdef LOSCFG_USER_CONTAINER
struct Credentials *credentials;
struct Credentials *credentials; ///< 用户身份证
#endif
#endif
#ifdef LOSCFG_PROC_PROCESS_DIR
struct ProcDirEntry *procDir;
struct ProcDirEntry *procDir; ///< 目录文件项
#endif
#ifdef LOSCFG_KERNEL_PLIMITS
ProcLimiterSet *plimits;
@ -161,8 +165,8 @@ extern UINT32 g_processMaxNum;
#define OS_PCB_FROM_TCB(taskCB) ((LosProcessCB *)((taskCB)->processCB))
#define OS_PCB_FROM_TID(taskID) ((LosProcessCB *)(OS_TCB_FROM_TID(taskID)->processCB))
#define OS_GET_PGROUP_LEADER(pgroup) ((LosProcessCB *)((pgroup)->pgroupLeader))
#define OS_PCB_FROM_SIBLIST(ptr) LOS_DL_LIST_ENTRY((ptr), LosProcessCB, siblingList)
#define OS_PCB_FROM_PENDLIST(ptr) LOS_DL_LIST_ENTRY((ptr), LosProcessCB, pendList)
#define OS_PCB_FROM_SIBLIST(ptr) LOS_DL_LIST_ENTRY((ptr), LosProcessCB, siblingList)///< 通过siblingList节点找到 LosProcessCB
#define OS_PCB_FROM_PENDLIST(ptr) LOS_DL_LIST_ENTRY((ptr), LosProcessCB, pendList) ///< 通过pendlist节点找到 LosProcessCB
/**
* @ingroup los_process
@ -202,7 +206,7 @@ extern UINT32 g_processMaxNum;
*
* The process is run out but the resources occupied by the process are not recovered.
*/
#define OS_PROCESS_STATUS_ZOMBIES 0x0100U
#define OS_PROCESS_STATUS_ZOMBIES 0x0100U ///< 进程状态: 僵死
/**
* @ingroup los_process
@ -211,7 +215,7 @@ extern UINT32 g_processMaxNum;
* The process status equal this is process control block unused,
* coexisting with OS_PROCESS_STATUS_ZOMBIES means that the control block is not recovered.
*/
#define OS_PROCESS_FLAG_UNUSED 0x0200U
#define OS_PROCESS_FLAG_UNUSED 0x0200U ///< 进程未使用标签,一般用于进程的初始状态 freelist里面都是这种标签
/**
* @ingroup los_process
@ -219,7 +223,7 @@ extern UINT32 g_processMaxNum;
*
* The process has been call exit, it only works with multiple cores.
*/
#define OS_PROCESS_FLAG_EXIT 0x0400U
#define OS_PROCESS_FLAG_EXIT 0x0400U ///< 进程退出标签,退出的进程进入回收链表等待回收资源
/**
* @ingroup los_process
@ -227,7 +231,7 @@ extern UINT32 g_processMaxNum;
*
* The process is the leader of the process group.
*/
#define OS_PROCESS_FLAG_GROUP_LEADER 0x0800U
#define OS_PROCESS_FLAG_GROUP_LEADER 0x0800U ///< 进程当了进程组领导标签
/**
* @ingroup los_process
@ -235,21 +239,21 @@ extern UINT32 g_processMaxNum;
*
* The process has performed the exec operation.
*/
#define OS_PROCESS_FLAG_ALREADY_EXEC 0x1000U
#define OS_PROCESS_FLAG_ALREADY_EXEC 0x1000U ///< 进程已执行exec操作 load elf时使用
/**
* @ingroup los_process
* Flag that indicates the process or process control block status.
*
* The process is dying or already dying.
*/
#define OS_PROCESS_STATUS_INACTIVE (OS_PROCESS_FLAG_EXIT | OS_PROCESS_STATUS_ZOMBIES)
*/ /// 进程不活跃状态定义: 身上贴有退出便签且状态为僵死的进程
#define OS_PROCESS_STATUS_INACTIVE (OS_PROCESS_FLAG_EXIT | OS_PROCESS_STATUS_ZOMBIES)
/**
* @ingroup los_process
* Used to check if the process control block is unused.
*/
STATIC INLINE BOOL OsProcessIsUnused(const LosProcessCB *processCB)
STATIC INLINE BOOL OsProcessIsUnused(const LosProcessCB *processCB)//查下进程是否还在使用?
{
return ((processCB->processStatus & OS_PROCESS_FLAG_UNUSED) != 0);
}
@ -257,8 +261,8 @@ STATIC INLINE BOOL OsProcessIsUnused(const LosProcessCB *processCB)
/**
* @ingroup los_process
* Used to check if the process is inactive.
*/
STATIC INLINE BOOL OsProcessIsInactive(const LosProcessCB *processCB)
*/ /// 进程不活跃函数定义:身上贴有不使用且不活跃标签的进程
STATIC INLINE BOOL OsProcessIsInactive(const LosProcessCB *processCB)//查下进程是否不活跃?
{
return ((processCB->processStatus & (OS_PROCESS_FLAG_UNUSED | OS_PROCESS_STATUS_INACTIVE)) != 0);
}
@ -266,8 +270,8 @@ STATIC INLINE BOOL OsProcessIsInactive(const LosProcessCB *processCB)
/**
* @ingroup los_process
* Used to check if the process is dead.
*/
STATIC INLINE BOOL OsProcessIsDead(const LosProcessCB *processCB)
*/ /// 进程死啦死啦的定义: 身上贴有不使用且状态为僵死的进程
STATIC INLINE BOOL OsProcessIsDead(const LosProcessCB *processCB)//查下进程是否死啦死啦滴?
{
return ((processCB->processStatus & OS_PROCESS_STATUS_ZOMBIES) != 0);
}
@ -282,68 +286,44 @@ STATIC INLINE BOOL OsProcessIsPGroupLeader(const LosProcessCB *processCB)
return ((processCB->processStatus & OS_PROCESS_FLAG_GROUP_LEADER) != 0);
}
/**
* @ingroup los_process
* The highest priority of a kernel mode process.
*/
#define OS_PROCESS_PRIORITY_HIGHEST 0
/**
* @ingroup los_process
* The lowest priority of a kernel mode process
*/
#define OS_PROCESS_PRIORITY_LOWEST 31
#define OS_PROCESS_PRIORITY_HIGHEST 0 ///< 进程最高优先级
/**
* @ingroup los_process
* The highest priority of a user mode process.
*/
#define OS_USER_PROCESS_PRIORITY_HIGHEST 10
/**
* @ingroup los_process
* The lowest priority of a user mode process
*/
#define OS_USER_PROCESS_PRIORITY_LOWEST OS_PROCESS_PRIORITY_LOWEST
#define OS_PROCESS_PRIORITY_LOWEST 31 ///< 进程最低优先级
/**
* @ingroup los_process
* User state root process default priority
*/
#define OS_PROCESS_USERINIT_PRIORITY 28
/**
* @ingroup los_process
* ID of the kernel idle process
*/
#define OS_KERNEL_IDLE_PROCESS_ID 0U
#define OS_USER_PROCESS_PRIORITY_HIGHEST 10 ///< 内核模式和用户模式的优先级分割线 10-31 用户级, 0-9内核级
/**
* @ingroup los_process
* ID of the user root process
*/
#define OS_USER_ROOT_PROCESS_ID 1U
/**
* @ingroup los_process
* ID of the kernel root process
*/
#define OS_KERNEL_ROOT_PROCESS_ID 2U
#define OS_USER_PROCESS_PRIORITY_LOWEST OS_PROCESS_PRIORITY_LOWEST ///< 用户进程的最低优先级
#define OS_PROCESS_USERINIT_PRIORITY 28 ///< 用户进程默认的优先级,28级好低啊
#define OS_TASK_DEFAULT_STACK_SIZE 0x2000
#define OS_USER_TASK_SYSCALL_STACK_SIZE 0x3000
#define OS_USER_TASK_STACK_SIZE 0x100000
#define OS_KERNEL_IDLE_PROCESS_ID 0U //0号进程为空闲进程
#define OS_KERNEL_MODE 0x0U
#define OS_USER_MODE 0x1U
#define OS_USER_ROOT_PROCESS_ID 1U //1号为用户态根进程
#define OS_KERNEL_ROOT_PROCESS_ID 2U //1号为内核态根进程
#define OS_TASK_DEFAULT_STACK_SIZE 0x2000 ///< task默认栈大小 8K
#define OS_USER_TASK_SYSCALL_STACK_SIZE 0x3000 ///< 用户通过系统调用的栈大小 12K ,这时是运行在内核模式下
#define OS_USER_TASK_STACK_SIZE 0x100000 ///< 用户任务运行在用户空间的栈大小 1M
#define OS_KERNEL_MODE 0x0U ///< 内核态
#define OS_USER_MODE 0x1U ///< 用户态
/*! 用户态进程*/
STATIC INLINE BOOL OsProcessIsUserMode(const LosProcessCB *processCB)
{
return (processCB->processMode == OS_USER_MODE);
}
#define LOS_PRIO_PROCESS 0U
#define LOS_PRIO_PGRP 1U
#define LOS_PRIO_USER 2U
#define LOS_PRIO_PROCESS 0U ///< 进程标识
#define LOS_PRIO_PGRP 1U ///< 进程组标识
#define LOS_PRIO_USER 2U ///< 用户标识
#define OS_USER_PRIVILEGE_PROCESS_GROUP ((UINTPTR)OsGetUserInitProcess())
#define OS_KERNEL_PROCESS_GROUP ((UINTPTR)OsGetKernelInitProcess())
@ -353,40 +333,40 @@ STATIC INLINE BOOL OsProcessIsUserMode(const LosProcessCB *processCB)
* 31 15 8 7 0
* | | exit code | core dump | signal |
*/
#define OS_PRO_EXIT_OK 0
#define OS_PRO_EXIT_OK 0 ///< 进程正常退出
/// 置进程退出码第七位为1
STATIC INLINE VOID OsProcessExitCodeCoreDumpSet(LosProcessCB *processCB)
{
processCB->exitCode |= 0x80U;
processCB->exitCode |= 0x80U; // 0b10000000
}
/// 设置进程退出信号(0 ~ 7)
STATIC INLINE VOID OsProcessExitCodeSignalSet(LosProcessCB *processCB, UINT32 signal)
{
processCB->exitCode |= signal & 0x7FU;
processCB->exitCode |= signal & 0x7FU;// 0b01111111
}
/// 清除进程退出信号(0 ~ 7)
STATIC INLINE VOID OsProcessExitCodeSignalClear(LosProcessCB *processCB)
{
processCB->exitCode &= (~0x7FU);
processCB->exitCode &= (~0x7FU);// 低7位全部清0
}
/// 进程退出码是否被设置过,默认是 0 ,如果 & 0x7FU 还是 0 ,说明没有被设置过.
STATIC INLINE BOOL OsProcessExitCodeSignalIsSet(LosProcessCB *processCB)
{
return (processCB->exitCode) & 0x7FU;
}
/// 设置进程退出号(8 ~ 15)
STATIC INLINE VOID OsProcessExitCodeSet(LosProcessCB *processCB, UINT32 code)
{
processCB->exitCode |= ((code & 0x000000FFU) << 8U) & 0x0000FF00U; /* 8: Move 8 bits to the left, exitCode */
}
#define OS_PID_CHECK_INVALID(pid) (((UINT32)(pid)) >= g_processMaxNum)
/*! 内联函数 进程ID是否有效 */
STATIC INLINE BOOL OsProcessIDUserCheckInvalid(UINT32 pid)
{
return ((pid >= g_processMaxNum) || (pid == 0));
}
/*! 获取当前进程PCB */
STATIC INLINE LosProcessCB *OsCurrProcessGet(VOID)
{
UINT32 intSave;
@ -398,6 +378,7 @@ STATIC INLINE LosProcessCB *OsCurrProcessGet(VOID)
}
#ifdef LOSCFG_SECURITY_CAPABILITY
/*! 获取当前进程的所属用户 */
STATIC INLINE User *OsCurrUserGet(VOID)
{
User *user = NULL;
@ -469,14 +450,14 @@ STATIC INLINE UINT32 OsGetRootPid(const LosProcessCB *processCB)
/*
* return immediately if no child has exited.
*/
#define LOS_WAIT_WNOHANG (1 << 0U)
#define LOS_WAIT_WNOHANG (1 << 0U) ///< 如果没有孩子进程退出,则立即返回,而不是阻塞在这个函数上等待;如果结束了,则返回该子进程的进程号。
/*
* return if a child has stopped (but not traced via ptrace(2)).
* Status for traced children which have stopped is provided even
* if this option is not specified.
*/
#define LOS_WAIT_WUNTRACED (1 << 1U)
#define LOS_WAIT_WUNTRACED (1 << 1U) ///< 如果子进程进入暂停情况则马上返回不予以理会结束状态。untraced
#define LOS_WAIT_WSTOPPED (1 << 1U)
/*
@ -488,7 +469,7 @@ STATIC INLINE UINT32 OsGetRootPid(const LosProcessCB *processCB)
* return if a stopped child has been resumed by delivery of SIGCONT.
* (For Linux-only options, see below.)
*/
#define LOS_WAIT_WCONTINUED (1 << 3U)
#define LOS_WAIT_WCONTINUED (1 << 3U) ///< 可获取子进程恢复执行的状态也就是可获取continued状态 continued
/*
* Leave the child in a waitable state;
@ -499,30 +480,30 @@ STATIC INLINE UINT32 OsGetRootPid(const LosProcessCB *processCB)
/*
* Indicates that you are already in a wait state
*/
#define OS_PROCESS_WAIT (1 << 15U)
#define OS_PROCESS_WAIT (1 << 15U) ///< 表示已经处于等待状态
/*
* Wait for any child process to finish
*/
#define OS_PROCESS_WAIT_ANY OS_TASK_WAIT_ANYPROCESS
#define OS_PROCESS_WAIT_ANY OS_TASK_WAIT_ANYPROCESS ///< 等待任意子进程完成
/*
* Wait for the child process specified by the pid to finish
*/
#define OS_PROCESS_WAIT_PRO OS_TASK_WAIT_PROCESS
#define OS_PROCESS_WAIT_PRO OS_TASK_WAIT_PROCESS ///< 等待pid指定的子进程完成
/*
* Waits for any child process in the specified process group to finish.
*/
#define OS_PROCESS_WAIT_GID OS_TASK_WAIT_GID
#define OS_PROCESS_WAIT_GID OS_TASK_WAIT_GID ///< 等待指定进程组中的任意子进程完成
#define OS_PROCESS_INFO_ALL 1
#define OS_PROCESS_DEFAULT_UMASK 0022
extern UINTPTR __user_init_entry;
extern UINTPTR __user_init_bss;
extern UINTPTR __user_init_end;
extern UINTPTR __user_init_load_addr;
#define OS_PROCESS_DEFAULT_UMASK 0022 ///< 系统默认的用户掩码(umask),大多数的Linux系统的默认掩码为022。
//用户掩码的作用是用户在创建文件时从文件的默认权限中去除掩码中的权限。所以文件创建之后的权限实际为:创建文件的权限为0666-0022=0644。创建文件夹的权限为0777-0022=0755
extern UINTPTR __user_init_entry; ///< 第一个用户态进程(init)的入口地址 查看 LITE_USER_SEC_ENTRY
extern UINTPTR __user_init_bss; ///< 查看 LITE_USER_SEC_BSS ,赋值由liteos.ld完成
extern UINTPTR __user_init_end; ///< init 进程的用户空间初始化结束地址
extern UINTPTR __user_init_load_addr;///< init 进程的加载地址 ,由链接器赋值
extern UINT32 OsProcessInit(VOID);
extern UINT32 OsSystemProcessCreate(VOID);
extern VOID OsProcessNaturalExit(LosProcessCB *processCB, UINT32 status);

@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@ -39,8 +39,8 @@
#ifdef LOSCFG_BASE_CORE_SWTMR_ENABLE
#include "los_exc.h"
#endif
/*
*/
/// 初始化一个事件控制块
LITE_OS_SEC_TEXT_INIT UINT32 LOS_EventInit(PEVENT_CB_S eventCB)
{
UINT32 intSave;
@ -49,14 +49,14 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_EventInit(PEVENT_CB_S eventCB)
return LOS_ERRNO_EVENT_PTR_NULL;
}
intSave = LOS_IntLock();
eventCB->uwEventID = 0;
LOS_ListInit(&eventCB->stEventList);
LOS_IntRestore(intSave);
intSave = LOS_IntLock();//锁中断
eventCB->uwEventID = 0;//事件类型初始化
LOS_ListInit(&eventCB->stEventList);//事件链表初始化
LOS_IntRestore(intSave);//恢复中断
OsHookCall(LOS_HOOK_TYPE_EVENT_INIT, eventCB);
return LOS_OK;
}
///事件参数检查
LITE_OS_SEC_TEXT STATIC UINT32 OsEventParamCheck(const VOID *ptr, UINT32 eventMask, UINT32 mode)
{
if (ptr == NULL) {
@ -78,52 +78,53 @@ LITE_OS_SEC_TEXT STATIC UINT32 OsEventParamCheck(const VOID *ptr, UINT32 eventMa
}
return LOS_OK;
}
///根据用户传入的事件值、事件掩码及校验模式,返回用户传入的事件是否符合预期
LITE_OS_SEC_TEXT UINT32 OsEventPoll(UINT32 *eventID, UINT32 eventMask, UINT32 mode)
{
UINT32 ret = 0;
LOS_ASSERT(OsIntLocked());
LOS_ASSERT(LOS_SpinHeld(&g_taskSpin));
LOS_ASSERT(OsIntLocked());//断言不允许中断了
LOS_ASSERT(LOS_SpinHeld(&g_taskSpin));//任务自旋锁
if (mode & LOS_WAITMODE_OR) {
if (mode & LOS_WAITMODE_OR) {//如果模式是读取掩码中任意事件
if ((*eventID & eventMask) != 0) {
ret = *eventID & eventMask;
}
} else {
if ((eventMask != 0) && (eventMask == (*eventID & eventMask))) {
} else {//等待全部事件发生
if ((eventMask != 0) && (eventMask == (*eventID & eventMask))) {//必须满足全部事件发生
ret = *eventID & eventMask;
}
}
if (ret && (mode & LOS_WAITMODE_CLR)) {
if (ret && (mode & LOS_WAITMODE_CLR)) {//读取完成后清除事件
*eventID = *eventID & ~ret;
}
return ret;
}
///检查读事件
LITE_OS_SEC_TEXT STATIC UINT32 OsEventReadCheck(const PEVENT_CB_S eventCB, UINT32 eventMask, UINT32 mode)
{
UINT32 ret;
LosTaskCB *runTask = NULL;
ret = OsEventParamCheck(eventCB, eventMask, mode);
ret = OsEventParamCheck(eventCB, eventMask, mode);//事件参数检查
if (ret != LOS_OK) {
return ret;
}
if (OS_INT_ACTIVE) {
return LOS_ERRNO_EVENT_READ_IN_INTERRUPT;
if (OS_INT_ACTIVE) {//中断正在进行
return LOS_ERRNO_EVENT_READ_IN_INTERRUPT;//不能在中断发送时读事件
}
runTask = OsCurrTaskGet();
if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {
runTask = OsCurrTaskGet();//获取当前任务
if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {//任务属于系统任务
OsBackTrace();
return LOS_ERRNO_EVENT_READ_IN_SYSTEM_TASK;
return LOS_ERRNO_EVENT_READ_IN_SYSTEM_TASK;//不能在系统任务中读取事件
}
return LOS_OK;
}
/// 读取指定事件类型的实现函数超时时间为相对时间单位为Tick
LITE_OS_SEC_TEXT STATIC UINT32 OsEventReadImp(PEVENT_CB_S eventCB, UINT32 eventMask, UINT32 mode,
UINT32 timeout, BOOL once)
{
@ -132,57 +133,57 @@ LITE_OS_SEC_TEXT STATIC UINT32 OsEventReadImp(PEVENT_CB_S eventCB, UINT32 eventM
OsHookCall(LOS_HOOK_TYPE_EVENT_READ, eventCB, eventMask, mode, timeout);
if (once == FALSE) {
ret = OsEventPoll(&eventCB->uwEventID, eventMask, mode);
ret = OsEventPoll(&eventCB->uwEventID, eventMask, mode);//检测事件是否符合预期
}
if (ret == 0) {
if (timeout == 0) {
if (ret == 0) {//不符合预期时
if (timeout == 0) {//不等待的情况
return ret;
}
if (!OsPreemptableInSched()) {
if (!OsPreemptableInSched()) {//不能抢占式调度
return LOS_ERRNO_EVENT_READ_IN_LOCK;
}
runTask->eventMask = eventMask;
runTask->eventMode = mode;
runTask->taskEvent = eventCB;
OsTaskWaitSetPendMask(OS_TASK_WAIT_EVENT, eventMask, timeout);
runTask->eventMask = eventMask; //等待事件
runTask->eventMode = mode; //事件模式
runTask->taskEvent = eventCB; //事件控制块
OsTaskWaitSetPendMask(OS_TASK_WAIT_EVENT, eventMask, timeout);//任务进入等待状态,等待事件的到来并设置时长和掩码
ret = runTask->ops->wait(runTask, &eventCB->stEventList, timeout);
if (ret == LOS_ERRNO_TSK_TIMEOUT) {
return LOS_ERRNO_EVENT_READ_TIMEOUT;
}
ret = OsEventPoll(&eventCB->uwEventID, eventMask, mode);
ret = OsEventPoll(&eventCB->uwEventID, eventMask, mode);//检测事件是否符合预期
}
return ret;
}
///读取指定事件类型超时时间为相对时间单位为Tick
LITE_OS_SEC_TEXT STATIC UINT32 OsEventRead(PEVENT_CB_S eventCB, UINT32 eventMask, UINT32 mode, UINT32 timeout,
BOOL once)
{
UINT32 ret;
UINT32 intSave;
ret = OsEventReadCheck(eventCB, eventMask, mode);
ret = OsEventReadCheck(eventCB, eventMask, mode);//读取事件检查
if (ret != LOS_OK) {
return ret;
}
SCHEDULER_LOCK(intSave);
ret = OsEventReadImp(eventCB, eventMask, mode, timeout, once);
ret = OsEventReadImp(eventCB, eventMask, mode, timeout, once);//读事件实现函数
SCHEDULER_UNLOCK(intSave);
return ret;
}
///事件恢复操作
LITE_OS_SEC_TEXT STATIC UINT8 OsEventResume(LosTaskCB *resumedTask, const PEVENT_CB_S eventCB, UINT32 events)
{
UINT8 exitFlag = 0;
UINT8 exitFlag = 0;//是否唤醒
if (((resumedTask->eventMode & LOS_WAITMODE_OR) && ((resumedTask->eventMask & events) != 0)) ||
((resumedTask->eventMode & LOS_WAITMODE_AND) &&
((resumedTask->eventMask & eventCB->uwEventID) == resumedTask->eventMask))) {
exitFlag = 1;
((resumedTask->eventMask & eventCB->uwEventID) == resumedTask->eventMask))) {//逻辑与 和 逻辑或 的处理
exitFlag = 1;
resumedTask->taskEvent = NULL;
OsTaskWakeClearPendMask(resumedTask);
@ -191,33 +192,33 @@ LITE_OS_SEC_TEXT STATIC UINT8 OsEventResume(LosTaskCB *resumedTask, const PEVENT
return exitFlag;
}
///以不安全的方式写事件
LITE_OS_SEC_TEXT VOID OsEventWriteUnsafe(PEVENT_CB_S eventCB, UINT32 events, BOOL once, UINT8 *exitFlag)
{
LosTaskCB *resumedTask = NULL;
LosTaskCB *nextTask = NULL;
BOOL schedFlag = FALSE;
OsHookCall(LOS_HOOK_TYPE_EVENT_WRITE, eventCB, events);
eventCB->uwEventID |= events;
if (!LOS_ListEmpty(&eventCB->stEventList)) {
eventCB->uwEventID |= events;//对应位贴上标签
if (!LOS_ListEmpty(&eventCB->stEventList)) {//等待事件链表判断,处理等待事件的任务
for (resumedTask = LOS_DL_LIST_ENTRY((&eventCB->stEventList)->pstNext, LosTaskCB, pendList);
&resumedTask->pendList != &eventCB->stEventList;) {
nextTask = LOS_DL_LIST_ENTRY(resumedTask->pendList.pstNext, LosTaskCB, pendList);
if (OsEventResume(resumedTask, eventCB, events)) {
schedFlag = TRUE;
&resumedTask->pendList != &eventCB->stEventList;) {//循环获取任务链表
nextTask = LOS_DL_LIST_ENTRY(resumedTask->pendList.pstNext, LosTaskCB, pendList);//获取任务实体
if (OsEventResume(resumedTask, eventCB, events)) {//是否恢复任务
schedFlag = TRUE;//任务已加至就绪队列,申请发生一次调度
}
if (once == TRUE) {
break;
if (once == TRUE) {//是否只处理一次任务
break;//退出循环
}
resumedTask = nextTask;
resumedTask = nextTask;//检查链表中下一个任务
}
}
if ((exitFlag != NULL) && (schedFlag == TRUE)) {
if ((exitFlag != NULL) && (schedFlag == TRUE)) {//是否让外面调度
*exitFlag = 1;
}
}
///写入事件
LITE_OS_SEC_TEXT STATIC UINT32 OsEventWrite(PEVENT_CB_S eventCB, UINT32 events, BOOL once)
{
UINT32 intSave;
@ -231,54 +232,54 @@ LITE_OS_SEC_TEXT STATIC UINT32 OsEventWrite(PEVENT_CB_S eventCB, UINT32 events,
return LOS_ERRNO_EVENT_SETBIT_INVALID;
}
SCHEDULER_LOCK(intSave);
OsEventWriteUnsafe(eventCB, events, once, &exitFlag);
SCHEDULER_UNLOCK(intSave);
SCHEDULER_LOCK(intSave); //禁止调度
OsEventWriteUnsafe(eventCB, events, once, &exitFlag);//写入事件
SCHEDULER_UNLOCK(intSave); //允许调度
if (exitFlag == 1) {
LOS_MpSchedule(OS_MP_CPU_ALL);
LOS_Schedule();
if (exitFlag == 1) { //需要发生调度
LOS_MpSchedule(OS_MP_CPU_ALL);//通知所有CPU调度
LOS_Schedule();//执行调度
}
return LOS_OK;
}
///根据用户传入的事件值、事件掩码及校验模式,返回用户传入的事件是否符合预期
LITE_OS_SEC_TEXT UINT32 LOS_EventPoll(UINT32 *eventID, UINT32 eventMask, UINT32 mode)
{
UINT32 ret;
UINT32 intSave;
//事件参数检查
ret = OsEventParamCheck((VOID *)eventID, eventMask, mode);
if (ret != LOS_OK) {
return ret;
}
SCHEDULER_LOCK(intSave);
SCHEDULER_LOCK(intSave);//申请任务自旋锁
ret = OsEventPoll(eventID, eventMask, mode);
SCHEDULER_UNLOCK(intSave);
return ret;
}
///读取指定事件类型超时时间为相对时间单位为Tick
LITE_OS_SEC_TEXT UINT32 LOS_EventRead(PEVENT_CB_S eventCB, UINT32 eventMask, UINT32 mode, UINT32 timeout)
{
return OsEventRead(eventCB, eventMask, mode, timeout, FALSE);
}
///写指定的事件类型
LITE_OS_SEC_TEXT UINT32 LOS_EventWrite(PEVENT_CB_S eventCB, UINT32 events)
{
return OsEventWrite(eventCB, events, FALSE);
}
///只读一次事件
LITE_OS_SEC_TEXT_MINOR UINT32 OsEventReadOnce(PEVENT_CB_S eventCB, UINT32 eventMask, UINT32 mode,
UINT32 timeout)
{
return OsEventRead(eventCB, eventMask, mode, timeout, TRUE);
}
///只写一次事件
LITE_OS_SEC_TEXT_MINOR UINT32 OsEventWriteOnce(PEVENT_CB_S eventCB, UINT32 events)
{
return OsEventWrite(eventCB, events, TRUE);
}
///销毁指定的事件控制块
LITE_OS_SEC_TEXT_INIT UINT32 LOS_EventDestroy(PEVENT_CB_S eventCB)
{
UINT32 intSave;
@ -299,7 +300,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_EventDestroy(PEVENT_CB_S eventCB)
OsHookCall(LOS_HOOK_TYPE_EVENT_DESTROY, eventCB);
return LOS_OK;
}
///清除指定的事件类型
LITE_OS_SEC_TEXT_MINOR UINT32 LOS_EventClear(PEVENT_CB_S eventCB, UINT32 eventMask)
{
UINT32 intSave;
@ -314,7 +315,7 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_EventClear(PEVENT_CB_S eventCB, UINT32 eventMa
return LOS_OK;
}
///有条件式读事件
#ifdef LOSCFG_COMPAT_POSIX
LITE_OS_SEC_TEXT UINT32 OsEventReadWithCond(const EventCond *cond, PEVENT_CB_S eventCB,
UINT32 eventMask, UINT32 mode, UINT32 timeout)

@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@ -43,28 +43,30 @@
#ifdef LOSCFG_KERNEL_VM
#define OS_FUTEX_FROM_FUTEXLIST(ptr) LOS_DL_LIST_ENTRY(ptr, FutexNode, futexList)
#define OS_FUTEX_FROM_QUEUELIST(ptr) LOS_DL_LIST_ENTRY(ptr, FutexNode, queueList)
#define OS_FUTEX_KEY_BASE USER_ASPACE_BASE
#define OS_FUTEX_KEY_MAX (USER_ASPACE_BASE + USER_ASPACE_SIZE)
#define OS_FUTEX_FROM_FUTEXLIST(ptr) LOS_DL_LIST_ENTRY(ptr, FutexNode, futexList) // 通过快锁节点找到结构体
#define OS_FUTEX_FROM_QUEUELIST(ptr) LOS_DL_LIST_ENTRY(ptr, FutexNode, queueList) // 通过队列节点找到结构体
#define OS_FUTEX_KEY_BASE USER_ASPACE_BASE ///< 进程用户空间基址
#define OS_FUTEX_KEY_MAX (USER_ASPACE_BASE + USER_ASPACE_SIZE) ///< 进程用户空间尾址
/* private: 0~63 hash index_num
* shared: 64~79 hash index_num */
#define FUTEX_INDEX_PRIVATE_MAX 64
#define FUTEX_INDEX_SHARED_MAX 16
#define FUTEX_INDEX_MAX (FUTEX_INDEX_PRIVATE_MAX + FUTEX_INDEX_SHARED_MAX)
#define FUTEX_INDEX_PRIVATE_MAX 64 ///< 0~63号桶用于存放私有锁以虚拟地址进行哈希,同一进程不同线程共享futex变量表明变量在进程地址空间中的位置
///< 它告诉内核这个futex是进程专有的不可以与其他进程共享。它仅仅用作同一进程的线程间同步。
#define FUTEX_INDEX_SHARED_MAX 16 ///< 64~79号桶用于存放共享锁以物理地址进行哈希,不同进程间通过文件共享futex变量表明该变量在文件中的位置
#define FUTEX_INDEX_MAX (FUTEX_INDEX_PRIVATE_MAX + FUTEX_INDEX_SHARED_MAX) ///< 80个哈希桶
#define FUTEX_INDEX_SHARED_POS FUTEX_INDEX_PRIVATE_MAX
#define FUTEX_INDEX_SHARED_POS FUTEX_INDEX_PRIVATE_MAX ///< 共享锁开始位置
#define FUTEX_HASH_PRIVATE_MASK (FUTEX_INDEX_PRIVATE_MAX - 1)
#define FUTEX_HASH_SHARED_MASK (FUTEX_INDEX_SHARED_MAX - 1)
/// 单独哈希桶,上面挂了一个个 FutexNode
typedef struct {
LosMux listLock;
LOS_DL_LIST lockList;
LosMux listLock;///< 内核操作lockList的互斥锁
LOS_DL_LIST lockList;///< 用于挂载 FutexNode (Fast userspace mutex用户态快速互斥锁)
} FutexHash;
FutexHash g_futexHash[FUTEX_INDEX_MAX];
FutexHash g_futexHash[FUTEX_INDEX_MAX];///< 80个哈希桶
/// 对互斥锁封装
STATIC INT32 OsFutexLock(LosMux *lock)
{
UINT32 ret = LOS_MuxLock(lock, LOS_WAIT_FOREVER);
@ -84,15 +86,15 @@ STATIC INT32 OsFutexUnlock(LosMux *lock)
}
return LOS_OK;
}
///< 初始化Futex(Fast userspace mutex用户态快速互斥锁)模块
UINT32 OsFutexInit(VOID)
{
INT32 count;
UINT32 ret;
// 初始化 80个哈希桶
for (count = 0; count < FUTEX_INDEX_MAX; count++) {
LOS_ListInit(&g_futexHash[count].lockList);
ret = LOS_MuxInit(&(g_futexHash[count].listLock), NULL);
LOS_ListInit(&g_futexHash[count].lockList); // 初始化双向链表,上面挂 FutexNode
ret = LOS_MuxInit(&(g_futexHash[count].listLock), NULL);//初始化互斥锁
if (ret) {
return ret;
}
@ -101,7 +103,7 @@ UINT32 OsFutexInit(VOID)
return LOS_OK;
}
LOS_MODULE_INIT(OsFutexInit, LOS_INIT_LEVEL_KMOD_EXTENDED);
LOS_MODULE_INIT(OsFutexInit, LOS_INIT_LEVEL_KMOD_EXTENDED);///< 注册Futex模块
#ifdef LOS_FUTEX_DEBUG
STATIC VOID OsFutexShowTaskNodeAttr(const LOS_DL_LIST *futexList)
@ -152,63 +154,63 @@ VOID OsFutexHashShow(VOID)
}
}
#endif
/// 通过用户空间地址获取哈希key
STATIC INLINE UINTPTR OsFutexFlagsToKey(const UINT32 *userVaddr, const UINT32 flags)
{
UINTPTR futexKey;
if (flags & FUTEX_PRIVATE) {
futexKey = (UINTPTR)userVaddr;
futexKey = (UINTPTR)userVaddr;//私有锁(以虚拟地址进行哈希)
} else {
futexKey = (UINTPTR)LOS_PaddrQuery((UINT32 *)userVaddr);
futexKey = (UINTPTR)LOS_PaddrQuery((UINT32 *)userVaddr);//共享锁(以物理地址进行哈希)
}
return futexKey;
}
/// 通过哈希key获取索引
STATIC INLINE UINT32 OsFutexKeyToIndex(const UINTPTR futexKey, const UINT32 flags)
{
UINT32 index = LOS_HashFNV32aBuf(&futexKey, sizeof(UINTPTR), FNV1_32A_INIT);
UINT32 index = LOS_HashFNV32aBuf(&futexKey, sizeof(UINTPTR), FNV1_32A_INIT);//获取哈希桶索引
if (flags & FUTEX_PRIVATE) {
index &= FUTEX_HASH_PRIVATE_MASK;
index &= FUTEX_HASH_PRIVATE_MASK;//将index锁定在 0 ~ 63号
} else {
index &= FUTEX_HASH_SHARED_MASK;
index += FUTEX_INDEX_SHARED_POS;
index += FUTEX_INDEX_SHARED_POS;//共享锁索引,将index锁定在 64 ~ 79号
}
return index;
}
/// 设置快锁哈希key
STATIC INLINE VOID OsFutexSetKey(UINTPTR futexKey, UINT32 flags, FutexNode *node)
{
node->key = futexKey;
node->index = OsFutexKeyToIndex(futexKey, flags);
node->pid = (flags & FUTEX_PRIVATE) ? LOS_GetCurrProcessID() : OS_INVALID;
node->key = futexKey;//哈希key
node->index = OsFutexKeyToIndex(futexKey, flags);//哈希桶索引
node->pid = (flags & FUTEX_PRIVATE) ? LOS_GetCurrProcessID() : OS_INVALID;//获取进程ID,共享快锁时 快锁节点没有进程ID
}
//析构参数节点
STATIC INLINE VOID OsFutexDeinitFutexNode(FutexNode *node)
{
node->index = OS_INVALID_VALUE;
node->pid = 0;
LOS_ListDelete(&node->queueList);
}
/// 新旧两个节点交换 futexList 位置
STATIC INLINE VOID OsFutexReplaceQueueListHeadNode(FutexNode *oldHeadNode, FutexNode *newHeadNode)
{
LOS_DL_LIST *futexList = oldHeadNode->futexList.pstPrev;
LOS_ListDelete(&oldHeadNode->futexList);
LOS_ListHeadInsert(futexList, &newHeadNode->futexList);
if ((newHeadNode->queueList.pstNext == NULL) || (newHeadNode->queueList.pstPrev == NULL)) {
LOS_ListInit(&newHeadNode->queueList);
LOS_ListDelete(&oldHeadNode->futexList);//将旧节点从futexList链表上摘除
LOS_ListHeadInsert(futexList, &newHeadNode->futexList);//将新节点从头部插入futexList链表
if ((newHeadNode->queueList.pstNext == NULL) || (newHeadNode->queueList.pstPrev == NULL)) {//新节点前后没有等待这把锁的任务
LOS_ListInit(&newHeadNode->queueList);//初始化等锁任务链表
}
}
/// 将参数节点从futexList上摘除
STATIC INLINE VOID OsFutexDeleteKeyFromFutexList(FutexNode *node)
{
LOS_ListDelete(&node->futexList);
}
/// 从哈希桶中删除快锁节点
STATIC VOID OsFutexDeleteKeyNodeFromHash(FutexNode *node, BOOL isDeleteHead, FutexNode **headNode, BOOL *queueFlags)
{
FutexNode *nextNode = NULL;
@ -217,8 +219,8 @@ STATIC VOID OsFutexDeleteKeyNodeFromHash(FutexNode *node, BOOL isDeleteHead, Fut
return;
}
if (LOS_ListEmpty(&node->queueList)) {
OsFutexDeleteKeyFromFutexList(node);
if (LOS_ListEmpty(&node->queueList)) {//如果没有任务在等锁
OsFutexDeleteKeyFromFutexList(node);//从快锁链表上摘除
if (queueFlags != NULL) {
*queueFlags = TRUE;
}
@ -226,10 +228,10 @@ STATIC VOID OsFutexDeleteKeyNodeFromHash(FutexNode *node, BOOL isDeleteHead, Fut
}
/* FutexList is not NULL, but the header node of queueList */
if (node->futexList.pstNext != NULL) {
if (isDeleteHead == TRUE) {
nextNode = OS_FUTEX_FROM_QUEUELIST(LOS_DL_LIST_FIRST(&node->queueList));
OsFutexReplaceQueueListHeadNode(node, nextNode);
if (node->futexList.pstNext != NULL) {//是头节点
if (isDeleteHead == TRUE) {//是否要删除头节点
nextNode = OS_FUTEX_FROM_QUEUELIST(LOS_DL_LIST_FIRST(&node->queueList));//取出第一个快锁节点
OsFutexReplaceQueueListHeadNode(node, nextNode);//两个节点交换位置
if (headNode != NULL) {
*headNode = nextNode;
}
@ -242,22 +244,22 @@ EXIT:
OsFutexDeinitFutexNode(node);
return;
}
/// 从哈希桶上删除快锁
VOID OsFutexNodeDeleteFromFutexHash(FutexNode *node, BOOL isDeleteHead, FutexNode **headNode, BOOL *queueFlags)
{
FutexHash *hashNode = NULL;
//通过key找到桶号
UINT32 index = OsFutexKeyToIndex(node->key, (node->pid == OS_INVALID) ? 0 : FUTEX_PRIVATE);
if (index >= FUTEX_INDEX_MAX) {
return;
}
hashNode = &g_futexHash[index];
hashNode = &g_futexHash[index];//找到hash桶
if (OsMuxLockUnsafe(&hashNode->listLock, LOS_WAIT_FOREVER)) {
return;
}
if (node->index != index) {
if (node->index != index) {//快锁节点桶号需和哈希桶号一致
goto EXIT;
}
@ -270,7 +272,6 @@ EXIT:
return;
}
STATIC FutexNode *OsFutexDeleteAlreadyWakeTaskAndGetNext(const FutexNode *node, FutexNode **headNode, BOOL isDeleteHead)
{
FutexNode *tempNode = (FutexNode *)node;
@ -292,7 +293,7 @@ STATIC FutexNode *OsFutexDeleteAlreadyWakeTaskAndGetNext(const FutexNode *node,
return tempNode;
}
/// 插入一把新Futex锁到哈希桶中,只有是新的key时才会插入,因为其实存在多个FutexNode是一个key
STATIC VOID OsFutexInsertNewFutexKeyToHash(FutexNode *node)
{
FutexNode *headNode = NULL;
@ -322,16 +323,16 @@ STATIC VOID OsFutexInsertNewFutexKeyToHash(FutexNode *node)
futexList != &(hashNode->lockList);
futexList = futexList->pstNext) {
headNode = OS_FUTEX_FROM_FUTEXLIST(futexList);
if (node->key <= headNode->key) {
if (node->key <= headNode->key) {
LOS_ListTailInsert(&(headNode->futexList), &(node->futexList));
break;
}
}
EXIT:
return;
}
STATIC INT32 OsFutexInsertFindFormBackToFront(LOS_DL_LIST *queueList, const LosTaskCB *runTask, FutexNode *node)
{
LOS_DL_LIST *listHead = queueList;
@ -407,55 +408,54 @@ STATIC INT32 OsFutexRecycleAndFindHeadNode(FutexNode *headNode, FutexNode *node,
return LOS_OK;
}
///< 将快锁挂到任务的阻塞链表上
STATIC INT32 OsFutexInsertTasktoPendList(FutexNode **firstNode, FutexNode *node, const LosTaskCB *run)
{
LosTaskCB *taskHead = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&((*firstNode)->pendList)));
LosTaskCB *taskHead = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&((*firstNode)->pendList)));//获取阻塞链表首个任务
LOS_DL_LIST *queueList = &((*firstNode)->queueList);
INT32 ret1 = OsSchedParamCompare(run, taskHead);
if (ret1 < 0) {
/* The one with the highest priority is inserted at the top of the queue */
LOS_ListTailInsert(queueList, &(node->queueList));
OsFutexReplaceQueueListHeadNode(*firstNode, node);
LOS_ListTailInsert(queueList, &(node->queueList));//查到queueList的尾部
OsFutexReplaceQueueListHeadNode(*firstNode, node);//同时交换futexList链表上的位置
*firstNode = node;
return LOS_OK;
}
//如果等锁链表上没有任务或者当前任务大于链表首个任务
if (LOS_ListEmpty(queueList) && (ret1 >= 0)) {
/* Insert the next position in the queue with equal priority */
LOS_ListHeadInsert(queueList, &(node->queueList));
LOS_ListHeadInsert(queueList, &(node->queueList));//从头部插入当前任务,当前任务是要被挂起的
return LOS_OK;
}
FutexNode *tailNode = OS_FUTEX_FROM_QUEUELIST(LOS_DL_LIST_LAST(queueList));
LosTaskCB *taskTail = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(tailNode->pendList)));
FutexNode *tailNode = OS_FUTEX_FROM_QUEUELIST(LOS_DL_LIST_LAST(queueList));//获取尾部节点
LosTaskCB *taskTail = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(tailNode->pendList)));//获取阻塞任务的最后一个
INT32 ret2 = OsSchedParamCompare(taskTail, run);
if ((ret2 <= 0) || (ret1 > ret2)) {
return OsFutexInsertFindFormBackToFront(queueList, run, node);
return OsFutexInsertFindFormBackToFront(queueList, run, node);//从后往前插入
}
return OsFutexInsertFindFromFrontToBack(queueList, run, node);
return OsFutexInsertFindFromFrontToBack(queueList, run, node);//否则从前往后插入
}
/// 由指定快锁找到对应哈希桶
STATIC FutexNode *OsFindFutexNode(const FutexNode *node)
{
FutexHash *hashNode = &g_futexHash[node->index];
FutexHash *hashNode = &g_futexHash[node->index];//先找到所在哈希桶
LOS_DL_LIST *futexList = &(hashNode->lockList);
FutexNode *headNode = NULL;
for (futexList = futexList->pstNext;
futexList != &(hashNode->lockList);
futexList != &(hashNode->lockList);//判断循环结束条件,相等时说明跑完一轮了
futexList = futexList->pstNext) {
headNode = OS_FUTEX_FROM_FUTEXLIST(futexList);
if ((headNode->key == node->key) && (headNode->pid == node->pid)) {
return headNode;
}
headNode = OS_FUTEX_FROM_FUTEXLIST(futexList);//拿到快锁节点实体
if ((headNode->key == node->key) && (headNode->pid == node->pid)) {//已经存在这个节点,注意这里的比较
return headNode;//是key和pid 一起比较,因为只有这样才能确定唯一性
}
return NULL;
}
///< 查找快锁并插入哈希桶中
STATIC INT32 OsFindAndInsertToHash(FutexNode *node)
{
FutexNode *headNode = NULL;
@ -464,7 +464,7 @@ STATIC INT32 OsFindAndInsertToHash(FutexNode *node)
INT32 ret;
headNode = OsFindFutexNode(node);
if (headNode == NULL) {
if (headNode == NULL) {//没有找到,说明这是一把新锁
OsFutexInsertNewFutexKeyToHash(node);
LOS_ListInit(&(node->queueList));
return LOS_OK;
@ -483,14 +483,14 @@ STATIC INT32 OsFindAndInsertToHash(FutexNode *node)
return ret;
}
/// 共享内存检查
STATIC INT32 OsFutexKeyShmPermCheck(const UINT32 *userVaddr, const UINT32 flags)
{
PADDR_T paddr;
/* Check whether the futexKey is a shared lock */
if (!(flags & FUTEX_PRIVATE)) {
paddr = (UINTPTR)LOS_PaddrQuery((UINT32 *)userVaddr);
if (!(flags & FUTEX_PRIVATE)) {//非私有快锁
paddr = (UINTPTR)LOS_PaddrQuery((UINT32 *)userVaddr);//能否查询到物理地址
if (paddr == 0) return LOS_NOK;
}
@ -549,13 +549,13 @@ STATIC INT32 OsFutexDeleteTimeoutTaskNode(FutexHash *hashNode, FutexNode *node)
}
return LOS_ETIMEDOUT;
}
/// 将快锁节点插入任务
STATIC INT32 OsFutexInsertTaskToHash(LosTaskCB **taskCB, FutexNode **node, const UINTPTR futexKey, const UINT32 flags)
{
INT32 ret;
*taskCB = OsCurrTaskGet();
*node = &((*taskCB)->futex);
OsFutexSetKey(futexKey, flags, *node);
*taskCB = OsCurrTaskGet(); //获取当前任务
*node = &((*taskCB)->futex); //获取当前任务的快锁节点
OsFutexSetKey(futexKey, flags, *node);//设置参数 key index pid
ret = OsFindAndInsertToHash(*node);
if (ret) {
@ -565,33 +565,33 @@ STATIC INT32 OsFutexInsertTaskToHash(LosTaskCB **taskCB, FutexNode **node, const
LOS_ListInit(&((*node)->pendList));
return LOS_OK;
}
/// 将当前任务挂入等待链表中
STATIC INT32 OsFutexWaitTask(const UINT32 *userVaddr, const UINT32 flags, const UINT32 val, const UINT32 timeout)
{
INT32 futexRet;
UINT32 intSave, lockVal;
LosTaskCB *taskCB = NULL;
FutexNode *node = NULL;
UINTPTR futexKey = OsFutexFlagsToKey(userVaddr, flags);
UINT32 index = OsFutexKeyToIndex(futexKey, flags);
UINTPTR futexKey = OsFutexFlagsToKey(userVaddr, flags);//通过地址和flags 找到 key
UINT32 index = OsFutexKeyToIndex(futexKey, flags);//通过key找到哈希桶
FutexHash *hashNode = &g_futexHash[index];
if (OsFutexLock(&hashNode->listLock)) {
if (OsFutexLock(&hashNode->listLock)) {//操作快锁节点链表前先上互斥锁
return LOS_EINVAL;
}
if (LOS_ArchCopyFromUser(&lockVal, userVaddr, sizeof(UINT32))) {
//userVaddr必须是用户空间虚拟地址
if (LOS_ArchCopyFromUser(&lockVal, userVaddr, sizeof(UINT32))) {//将值拷贝到内核空间
PRINT_ERR("Futex wait param check failed! copy from user failed!\n");
futexRet = LOS_EINVAL;
goto EXIT_ERR;
}
if (lockVal != val) {
if (lockVal != val) {//对参数内部逻辑检查
futexRet = LOS_EBADF;
goto EXIT_ERR;
}
if (OsFutexInsertTaskToHash(&taskCB, &node, futexKey, flags)) {
//注意第二个参数 FutexNode *node = NULL
if (OsFutexInsertTaskToHash(&taskCB, &node, futexKey, flags)) {// node = taskCB->futex
futexRet = LOS_NOK;
goto EXIT_ERR;
}
@ -602,7 +602,7 @@ STATIC INT32 OsFutexWaitTask(const UINT32 *userVaddr, const UINT32 flags, const
taskCB->ops->wait(taskCB, &(node->pendList), timeout);
LOS_SpinUnlock(&g_taskSpin);
futexRet = OsFutexUnlock(&hashNode->listLock);
futexRet = OsFutexUnlock(&hashNode->listLock);//
if (futexRet) {
OsSchedUnlock();
LOS_IntRestore(intSave);
@ -632,21 +632,21 @@ EXIT_ERR:
EXIT_UNLOCK_ERR:
return futexRet;
}
/// 设置线程等待 | 向Futex表中插入代表被阻塞的线程的node
INT32 OsFutexWait(const UINT32 *userVaddr, UINT32 flags, UINT32 val, UINT32 absTime)
{
INT32 ret;
UINT32 timeout = LOS_WAIT_FOREVER;
ret = OsFutexWaitParamCheck(userVaddr, flags, absTime);
ret = OsFutexWaitParamCheck(userVaddr, flags, absTime);//参数检查
if (ret) {
return ret;
}
if (absTime != LOS_WAIT_FOREVER) {
timeout = OsNS2Tick((UINT64)absTime * OS_SYS_NS_PER_US);
if (absTime != LOS_WAIT_FOREVER) {//转换时间 , 内核的时间单位是 tick
timeout = OsNS2Tick((UINT64)absTime * OS_SYS_NS_PER_US); //转成 tick
}
return OsFutexWaitTask(userVaddr, flags, val, timeout);
return OsFutexWaitTask(userVaddr, flags, val, timeout);//将任务挂起 timeOut 时长
}
STATIC INT32 OsFutexWakeParamCheck(const UINT32 *userVaddr, UINT32 flags)
@ -657,12 +657,12 @@ STATIC INT32 OsFutexWakeParamCheck(const UINT32 *userVaddr, UINT32 flags)
PRINT_ERR("Futex wake param check failed! error flags: 0x%x\n", flags);
return LOS_EINVAL;
}
//地址必须在用户空间
if ((vaddr % sizeof(INT32)) || (vaddr < OS_FUTEX_KEY_BASE) || (vaddr >= OS_FUTEX_KEY_MAX)) {
PRINT_ERR("Futex wake param check failed! error userVaddr: 0x%x\n", userVaddr);
return LOS_EINVAL;
}
//必须得是个共享内存地址
if (flags && (OsFutexKeyShmPermCheck(userVaddr, flags) != LOS_OK)) {
PRINT_ERR("Futex wake param check failed! error shared memory perm userVaddr: 0x%x\n", userVaddr);
return LOS_EINVAL;
@ -672,7 +672,8 @@ STATIC INT32 OsFutexWakeParamCheck(const UINT32 *userVaddr, UINT32 flags)
}
/* Check to see if the task to be awakened has timed out
* if time out, to weak next pend task.
* if time out, to weak next pend task.
* | ,
*/
STATIC VOID OsFutexCheckAndWakePendTask(FutexNode *headNode, const INT32 wakeNumber,
FutexHash *hashNode, FutexNode **nextNode, BOOL *wakeAny)
@ -707,6 +708,7 @@ STATIC VOID OsFutexCheckAndWakePendTask(FutexNode *headNode, const INT32 wakeNum
}
return;
}
STATIC INT32 OsFutexWakeTask(UINTPTR futexKey, UINT32 flags, INT32 wakeNumber, FutexNode **newHeadNode, BOOL *wakeAny)
{
@ -715,13 +717,13 @@ STATIC INT32 OsFutexWakeTask(UINTPTR futexKey, UINT32 flags, INT32 wakeNumber, F
FutexNode *headNode = NULL;
UINT32 index = OsFutexKeyToIndex(futexKey, flags);
FutexHash *hashNode = &g_futexHash[index];
FutexNode tempNode = {
FutexNode tempNode = { //先组成一个临时快锁节点,目的是为了找到哈希桶中是否有这个节点
.key = futexKey,
.index = index,
.pid = (flags & FUTEX_PRIVATE) ? LOS_GetCurrProcessID() : OS_INVALID,
};
node = OsFindFutexNode(&tempNode);
node = OsFindFutexNode(&tempNode);//找快锁节点
if (node == NULL) {
return LOS_EBADF;
}
@ -729,7 +731,7 @@ STATIC INT32 OsFutexWakeTask(UINTPTR futexKey, UINT32 flags, INT32 wakeNumber, F
headNode = node;
SCHEDULER_LOCK(intSave);
OsFutexCheckAndWakePendTask(headNode, wakeNumber, hashNode, newHeadNode, wakeAny);
OsFutexCheckAndWakePendTask(headNode, wakeNumber, hashNode, newHeadNode, wakeAny);//再找到等这把锁的唤醒指向数量的任务
if ((*newHeadNode) != NULL) {
OsFutexReplaceQueueListHeadNode(headNode, *newHeadNode);
OsFutexDeinitFutexNode(headNode);
@ -741,7 +743,7 @@ STATIC INT32 OsFutexWakeTask(UINTPTR futexKey, UINT32 flags, INT32 wakeNumber, F
return LOS_OK;
}
/// 唤醒一个被指定锁阻塞的线程
INT32 OsFutexWake(const UINT32 *userVaddr, UINT32 flags, INT32 wakeNumber)
{
INT32 ret, futexRet;
@ -750,11 +752,11 @@ INT32 OsFutexWake(const UINT32 *userVaddr, UINT32 flags, INT32 wakeNumber)
FutexHash *hashNode = NULL;
FutexNode *headNode = NULL;
BOOL wakeAny = FALSE;
//1.检查参数
if (OsFutexWakeParamCheck(userVaddr, flags)) {
return LOS_EINVAL;
}
//2.找到指定用户空间地址对应的桶
futexKey = OsFutexFlagsToKey(userVaddr, flags);
index = OsFutexKeyToIndex(futexKey, flags);
@ -762,7 +764,7 @@ INT32 OsFutexWake(const UINT32 *userVaddr, UINT32 flags, INT32 wakeNumber)
if (OsFutexLock(&hashNode->listLock)) {
return LOS_EINVAL;
}
//3.换起等待该锁的进程
ret = OsFutexWakeTask(futexKey, flags, wakeNumber, &headNode, &wakeAny);
if (ret) {
goto EXIT_ERR;
@ -776,7 +778,7 @@ INT32 OsFutexWake(const UINT32 *userVaddr, UINT32 flags, INT32 wakeNumber)
if (futexRet) {
goto EXIT_UNLOCK_ERR;
}
//4.根据指定参数决定是否发起调度
if (wakeAny == TRUE) {
LOS_MpSchedule(OS_MP_CPU_ALL);
LOS_Schedule();
@ -885,7 +887,7 @@ STATIC VOID OsFutexRequeueSplitTwoLists(FutexHash *oldHashNode, FutexNode *oldHe
tailNode->queueList.pstNext = &newHeadNode->queueList;
return;
}
/// 删除旧key并获取头节点
STATIC FutexNode *OsFutexRequeueRemoveOldKeyAndGetHead(UINTPTR oldFutexKey, UINT32 flags, INT32 wakeNumber,
UINTPTR newFutexKey, INT32 requeueCount, BOOL *wakeAny)
{
@ -921,7 +923,7 @@ STATIC FutexNode *OsFutexRequeueRemoveOldKeyAndGetHead(UINTPTR oldFutexKey, UINT
return oldHeadNode;
}
/// 检查锁在Futex表中的状态
STATIC INT32 OsFutexRequeueParamCheck(const UINT32 *oldUserVaddr, UINT32 flags, const UINT32 *newUserVaddr)
{
VADDR_T oldVaddr = (VADDR_T)(UINTPTR)oldUserVaddr;
@ -930,12 +932,12 @@ STATIC INT32 OsFutexRequeueParamCheck(const UINT32 *oldUserVaddr, UINT32 flags,
if (oldVaddr == newVaddr) {
return LOS_EINVAL;
}
//检查标记
if ((flags & (~FUTEX_PRIVATE)) != FUTEX_REQUEUE) {
PRINT_ERR("Futex requeue param check failed! error flags: 0x%x\n", flags);
return LOS_EINVAL;
}
//检查地址范围,必须在用户空间
if ((oldVaddr % sizeof(INT32)) || (oldVaddr < OS_FUTEX_KEY_BASE) || (oldVaddr >= OS_FUTEX_KEY_MAX)) {
PRINT_ERR("Futex requeue param check failed! error old userVaddr: 0x%x\n", oldUserVaddr);
return LOS_EINVAL;
@ -948,7 +950,7 @@ STATIC INT32 OsFutexRequeueParamCheck(const UINT32 *oldUserVaddr, UINT32 flags,
return LOS_OK;
}
/// 调整指定锁在Futex表中的位置
INT32 OsFutexRequeue(const UINT32 *userVaddr, UINT32 flags, INT32 wakeNumber, INT32 count, const UINT32 *newUserVaddr)
{
INT32 ret;
@ -965,12 +967,12 @@ INT32 OsFutexRequeue(const UINT32 *userVaddr, UINT32 flags, INT32 wakeNumber, IN
return LOS_EINVAL;
}
oldFutexKey = OsFutexFlagsToKey(userVaddr, flags);
oldFutexKey = OsFutexFlagsToKey(userVaddr, flags);//先拿key
newFutexKey = OsFutexFlagsToKey(newUserVaddr, flags);
oldIndex = OsFutexKeyToIndex(oldFutexKey, flags);
oldIndex = OsFutexKeyToIndex(oldFutexKey, flags);//再拿所在哈希桶位置,共有80个哈希桶
newIndex = OsFutexKeyToIndex(newFutexKey, flags);
oldHashNode = &g_futexHash[oldIndex];
oldHashNode = &g_futexHash[oldIndex];//拿到对应哈希桶实体
if (OsFutexLock(&oldHashNode->listLock)) {
return LOS_EINVAL;
}

@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@ -40,19 +40,19 @@
#ifdef LOSCFG_BASE_IPC_MUX
#define MUTEXATTR_TYPE_MASK 0x0FU
///互斥属性初始化
LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrInit(LosMuxAttr *attr)
{
if (attr == NULL) {
return LOS_EINVAL;
}
attr->protocol = LOS_MUX_PRIO_INHERIT;
attr->prioceiling = OS_TASK_PRIORITY_LOWEST;
attr->type = LOS_MUX_DEFAULT;
attr->protocol = LOS_MUX_PRIO_INHERIT; //协议默认用继承方式, A(4)task等B(19)释放锁时,B的调度优先级直接升到(4)
attr->prioceiling = OS_TASK_PRIORITY_LOWEST;//最低优先级
attr->type = LOS_MUX_DEFAULT; //默认 LOS_MUX_RECURSIVE
return LOS_OK;
}
/// ????? 销毁互斥属 ,这里啥也没干呀
LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrDestroy(LosMuxAttr *attr)
{
if (attr == NULL) {
@ -61,7 +61,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrDestroy(LosMuxAttr *attr)
return LOS_OK;
}
///获取互斥锁的类型属性,由outType接走,不送!
LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrGetType(const LosMuxAttr *attr, INT32 *outType)
{
INT32 type;
@ -79,7 +79,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrGetType(const LosMuxAttr *attr, INT32 *outTyp
return LOS_OK;
}
///设置互斥锁的类型属性
LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrSetType(LosMuxAttr *attr, INT32 type)
{
if ((attr == NULL) || (type < LOS_MUX_NORMAL) || (type > LOS_MUX_ERRORCHECK)) {
@ -89,7 +89,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrSetType(LosMuxAttr *attr, INT32 type)
attr->type = (UINT8)((attr->type & ~MUTEXATTR_TYPE_MASK) | (UINT32)type);
return LOS_OK;
}
///获取互斥锁的类型属性
LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrGetProtocol(const LosMuxAttr *attr, INT32 *protocol)
{
if ((attr != NULL) && (protocol != NULL)) {
@ -100,7 +100,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrGetProtocol(const LosMuxAttr *attr, INT32 *pr
return LOS_OK;
}
///设置互斥锁属性的协议
LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrSetProtocol(LosMuxAttr *attr, INT32 protocol)
{
if (attr == NULL) {
@ -117,7 +117,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrSetProtocol(LosMuxAttr *attr, INT32 protocol)
return LOS_EINVAL;
}
}
///获取互斥锁属性优先级
LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrGetPrioceiling(const LosMuxAttr *attr, INT32 *prioceiling)
{
if (attr == NULL) {
@ -130,7 +130,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrGetPrioceiling(const LosMuxAttr *attr, INT32
return LOS_OK;
}
///设置互斥锁属性的优先级的上限
LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrSetPrioceiling(LosMuxAttr *attr, INT32 prioceiling)
{
if ((attr == NULL) ||
@ -143,7 +143,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxAttrSetPrioceiling(LosMuxAttr *attr, INT32 prioce
return LOS_OK;
}
///设置互斥锁的优先级的上限,老优先级由oldPrioceiling带走
LITE_OS_SEC_TEXT UINT32 LOS_MuxSetPrioceiling(LosMux *mutex, INT32 prioceiling, INT32 *oldPrioceiling)
{
INT32 ret;
@ -172,7 +172,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxSetPrioceiling(LosMux *mutex, INT32 prioceiling,
return ret;
}
///获取互斥锁的优先级的上限
LITE_OS_SEC_TEXT UINT32 LOS_MuxGetPrioceiling(const LosMux *mutex, INT32 *prioceiling)
{
if ((mutex != NULL) && (prioceiling != NULL) && (mutex->magic == OS_MUX_MAGIC)) {
@ -182,7 +182,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxGetPrioceiling(const LosMux *mutex, INT32 *prioce
return LOS_EINVAL;
}
///互斥锁是否有效
LITE_OS_SEC_TEXT BOOL LOS_MuxIsValid(const LosMux *mutex)
{
if ((mutex != NULL) && (mutex->magic == OS_MUX_MAGIC)) {
@ -191,7 +191,7 @@ LITE_OS_SEC_TEXT BOOL LOS_MuxIsValid(const LosMux *mutex)
return FALSE;
}
///检查互斥锁属性是否OK,否则 no ok :|)
STATIC UINT32 OsCheckMutexAttr(const LosMuxAttr *attr)
{
if (((INT8)(attr->type) < LOS_MUX_NORMAL) || (attr->type > LOS_MUX_ERRORCHECK)) {
@ -205,7 +205,7 @@ STATIC UINT32 OsCheckMutexAttr(const LosMuxAttr *attr)
}
return LOS_OK;
}
/// 初始化互斥锁
LITE_OS_SEC_TEXT UINT32 LOS_MuxInit(LosMux *mutex, const LosMuxAttr *attr)
{
UINT32 intSave;
@ -215,24 +215,24 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxInit(LosMux *mutex, const LosMuxAttr *attr)
}
if (attr == NULL) {
(VOID)LOS_MuxAttrInit(&mutex->attr);
(VOID)LOS_MuxAttrInit(&mutex->attr);//属性初始化
} else {
(VOID)memcpy_s(&mutex->attr, sizeof(LosMuxAttr), attr, sizeof(LosMuxAttr));
(VOID)memcpy_s(&mutex->attr, sizeof(LosMuxAttr), attr, sizeof(LosMuxAttr));//把attr 拷贝到 mutex->attr
}
if (OsCheckMutexAttr(&mutex->attr) != LOS_OK) {
if (OsCheckMutexAttr(&mutex->attr) != LOS_OK) {//检查属性
return LOS_EINVAL;
}
SCHEDULER_LOCK(intSave);
mutex->muxCount = 0;
mutex->owner = NULL;
LOS_ListInit(&mutex->muxList);
mutex->magic = OS_MUX_MAGIC;
SCHEDULER_UNLOCK(intSave);
SCHEDULER_LOCK(intSave); //拿到调度自旋锁
mutex->muxCount = 0; //锁定互斥量的次数
mutex->owner = NULL; //谁持有该锁
LOS_ListInit(&mutex->muxList); //互斥量双循环链表
mutex->magic = OS_MUX_MAGIC; //固定标识,互斥锁的魔法数字
SCHEDULER_UNLOCK(intSave); //释放调度自旋锁
return LOS_OK;
}
///销毁互斥锁
LITE_OS_SEC_TEXT UINT32 LOS_MuxDestroy(LosMux *mutex)
{
UINT32 intSave;
@ -241,22 +241,22 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxDestroy(LosMux *mutex)
return LOS_EINVAL;
}
SCHEDULER_LOCK(intSave);
SCHEDULER_LOCK(intSave); //保存调度自旋锁
if (mutex->magic != OS_MUX_MAGIC) {
SCHEDULER_UNLOCK(intSave);
SCHEDULER_UNLOCK(intSave);//释放调度自旋锁
return LOS_EBADF;
}
if (mutex->muxCount != 0) {
SCHEDULER_UNLOCK(intSave);
SCHEDULER_UNLOCK(intSave);//释放调度自旋锁
return LOS_EBUSY;
}
(VOID)memset_s(mutex, sizeof(LosMux), 0, sizeof(LosMux));
SCHEDULER_UNLOCK(intSave);
(VOID)memset_s(mutex, sizeof(LosMux), 0, sizeof(LosMux));//很简单,全部清0处理.
SCHEDULER_UNLOCK(intSave); //释放调度自旋锁
return LOS_OK;
}
///设置互斥锁位图
STATIC VOID OsMuxBitmapSet(const LosMux *mutex, const LosTaskCB *runTask)
{
if (mutex->attr.protocol != LOS_MUX_PRIO_INHERIT) {
@ -271,7 +271,7 @@ STATIC VOID OsMuxBitmapSet(const LosMux *mutex, const LosTaskCB *runTask)
owner->ops->priorityInheritance(owner, &param);
}
}
///恢复互斥锁位图
VOID OsMuxBitmapRestore(const LosMux *mutex, const LOS_DL_LIST *list, const LosTaskCB *runTask)
{
if (mutex->attr.protocol != LOS_MUX_PRIO_INHERIT) {
@ -284,20 +284,21 @@ VOID OsMuxBitmapRestore(const LosMux *mutex, const LOS_DL_LIST *list, const LosT
owner->ops->priorityRestore(owner, list, &param);
}
/// 最坏情况就是拿锁失败,让出CPU,变成阻塞任务,等别的任务释放锁后排到自己了接着执行.
STATIC UINT32 OsMuxPendOp(LosTaskCB *runTask, LosMux *mutex, UINT32 timeout)
{
UINT32 ret;
if ((mutex->muxList.pstPrev == NULL) || (mutex->muxList.pstNext == NULL)) {
if ((mutex->muxList.pstPrev == NULL) || (mutex->muxList.pstNext == NULL)) {//列表为空时的处理
/* This is for mutex macro initialization. */
mutex->muxCount = 0;
mutex->owner = NULL;
LOS_ListInit(&mutex->muxList);
mutex->muxCount = 0;//锁计数器清0
mutex->owner = NULL;//锁没有归属任务
LOS_ListInit(&mutex->muxList);//初始化锁的任务链表,后续申请这把锁任务都会挂上去
}
if (mutex->muxCount == 0) {
mutex->muxCount++;
mutex->owner = (VOID *)runTask;
if (mutex->muxCount == 0) {//无task用锁时,肯定能拿到锁了.在里面返回
mutex->muxCount++; //互斥锁计数器加1
mutex->owner = (VOID *)runTask; //当前任务拿到锁
LOS_ListTailInsert(&runTask->lockList, &mutex->holdList);
if (mutex->attr.protocol == LOS_MUX_PRIO_PROTECT) {
SchedParam param = { 0 };
@ -307,23 +308,23 @@ STATIC UINT32 OsMuxPendOp(LosTaskCB *runTask, LosMux *mutex, UINT32 timeout)
}
return LOS_OK;
}
if (((LosTaskCB *)mutex->owner == runTask) && (mutex->attr.type == LOS_MUX_RECURSIVE)) {
mutex->muxCount++;
return LOS_OK;
//递归锁muxCount>0 如果是递归锁就要处理两种情况 1.runtask持有锁 2.锁被别的任务拿走了
if (((LosTaskCB *)mutex->owner == runTask) && (mutex->attr.type == LOS_MUX_RECURSIVE)) {//第一种情况 runtask是锁持有方
mutex->muxCount++; //递归锁计数器加1,递归锁的目的是防止死锁,鸿蒙默认用的就是递归锁(LOS_MUX_DEFAULT = LOS_MUX_RECURSIVE)
return LOS_OK; //成功退出
}
if (!timeout) {
return LOS_EINVAL;
//到了这里说明锁在别的任务那里,当前任务只能被阻塞了.
if (!timeout) {//参数timeout表示等待多久再来拿锁
return LOS_EINVAL;//timeout = 0表示不等了,没拿到锁就返回不纠结,返回错误.见于LOS_MuxTrylock
}
if (!OsPreemptableInSched()) {
return LOS_EDEADLK;
//自己要被阻塞,只能申请调度,让出CPU core 让别的任务上
if (!OsPreemptableInSched()) {//不能申请调度 (不能调度的原因是因为没有持有调度任务自旋锁)
return LOS_EDEADLK;//返回错误,自旋锁被别的CPU core 持有
}
OsMuxBitmapSet(mutex, runTask);
OsMuxBitmapSet(mutex, runTask);//设置锁位图,尽可能的提高锁持有任务的优先级
runTask->taskMux = (VOID *)mutex;
runTask->taskMux = (VOID *)mutex; //记下当前任务在等待这把锁
LOS_DL_LIST *node = OsSchedLockPendFindPos(runTask, &mutex->muxList);
if (node == NULL) {
ret = LOS_NOK;
@ -332,10 +333,10 @@ STATIC UINT32 OsMuxPendOp(LosTaskCB *runTask, LosMux *mutex, UINT32 timeout)
OsTaskWaitSetPendMask(OS_TASK_WAIT_MUTEX, (UINTPTR)mutex, timeout);
ret = runTask->ops->wait(runTask, node, timeout);
if (ret == LOS_ERRNO_TSK_TIMEOUT) {
if (ret == LOS_ERRNO_TSK_TIMEOUT) {//这行代码虽和OsTaskWait挨在一起,但要过很久才会执行到,因为在OsTaskWait中CPU切换了任务上下文
OsMuxBitmapRestore(mutex, NULL, runTask);
runTask->taskMux = NULL;
ret = LOS_ETIMEDOUT;
runTask->taskMux = NULL;// 所以重新回到这里时可能已经超时了
ret = LOS_ETIMEDOUT;//返回超时
}
return ret;
@ -343,7 +344,7 @@ STATIC UINT32 OsMuxPendOp(LosTaskCB *runTask, LosMux *mutex, UINT32 timeout)
UINT32 OsMuxLockUnsafe(LosMux *mutex, UINT32 timeout)
{
LosTaskCB *runTask = OsCurrTaskGet();
LosTaskCB *runTask = OsCurrTaskGet();//获取当前任务
if (mutex->magic != OS_MUX_MAGIC) {
return LOS_EBADF;
@ -352,23 +353,23 @@ UINT32 OsMuxLockUnsafe(LosMux *mutex, UINT32 timeout)
if (OsCheckMutexAttr(&mutex->attr) != LOS_OK) {
return LOS_EINVAL;
}
//LOS_MUX_ERRORCHECK 时 muxCount是要等于0 ,当前任务持有锁就不能再lock了. 鸿蒙默认用的是递归锁LOS_MUX_RECURSIVE
if ((mutex->attr.type == LOS_MUX_ERRORCHECK) && (mutex->owner == (VOID *)runTask)) {
return LOS_EDEADLK;
}
return OsMuxPendOp(runTask, mutex, timeout);
}
/// 尝试加锁,
UINT32 OsMuxTrylockUnsafe(LosMux *mutex, UINT32 timeout)
{
LosTaskCB *runTask = OsCurrTaskGet();
LosTaskCB *runTask = OsCurrTaskGet();//获取当前任务
if (mutex->magic != OS_MUX_MAGIC) {
if (mutex->magic != OS_MUX_MAGIC) {//检查MAGIC有没有被改变
return LOS_EBADF;
}
if (OsCheckMutexAttr(&mutex->attr) != LOS_OK) {
if (OsCheckMutexAttr(&mutex->attr) != LOS_OK) {//检查互斥锁属性
return LOS_EINVAL;
}
@ -377,9 +378,9 @@ UINT32 OsMuxTrylockUnsafe(LosMux *mutex, UINT32 timeout)
return LOS_EBUSY;
}
return OsMuxPendOp(runTask, mutex, timeout);
return OsMuxPendOp(runTask, mutex, timeout);//当前任务去拿锁,拿不到就等timeout
}
/// 拿互斥锁,
LITE_OS_SEC_TEXT UINT32 LOS_MuxLock(LosMux *mutex, UINT32 timeout)
{
LosTaskCB *runTask = NULL;
@ -394,19 +395,19 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxLock(LosMux *mutex, UINT32 timeout)
return LOS_EINTR;
}
runTask = (LosTaskCB *)OsCurrTaskGet();
runTask = (LosTaskCB *)OsCurrTaskGet();//获取当前任务
/* DO NOT Call blocking API in system tasks */
if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {
if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {//不要在内核任务里用mux锁
PRINTK("Warning: DO NOT call %s in system tasks.\n", __FUNCTION__);
OsBackTrace();
OsBackTrace();//打印task信息
}
SCHEDULER_LOCK(intSave);
ret = OsMuxLockUnsafe(mutex, timeout);
SCHEDULER_LOCK(intSave);//调度自旋锁
ret = OsMuxLockUnsafe(mutex, timeout);//如果任务没拿到锁,将进入阻塞队列一直等待,直到timeout或者持锁任务释放锁时唤醒它
SCHEDULER_UNLOCK(intSave);
return ret;
}
///尝试要锁,没拿到也不等,直接返回,不纠结
LITE_OS_SEC_TEXT UINT32 LOS_MuxTrylock(LosMux *mutex)
{
LosTaskCB *runTask = NULL;
@ -421,39 +422,40 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxTrylock(LosMux *mutex)
return LOS_EINTR;
}
runTask = (LosTaskCB *)OsCurrTaskGet();
runTask = (LosTaskCB *)OsCurrTaskGet();//获取当前执行的任务
/* DO NOT Call blocking API in system tasks */
if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {
if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {//系统任务不能
PRINTK("Warning: DO NOT call %s in system tasks.\n", __FUNCTION__);
OsBackTrace();
}
SCHEDULER_LOCK(intSave);
ret = OsMuxTrylockUnsafe(mutex, 0);
ret = OsMuxTrylockUnsafe(mutex, 0);//timeout = 0,不等待,没拿到锁就算了
SCHEDULER_UNLOCK(intSave);
return ret;
}
STATIC UINT32 OsMuxPostOp(LosTaskCB *taskCB, LosMux *mutex, BOOL *needSched)
{
if (LOS_ListEmpty(&mutex->muxList)) {
LOS_ListDelete(&mutex->holdList);
if (LOS_ListEmpty(&mutex->muxList)) {//如果互斥锁列表为空
LOS_ListDelete(&mutex->holdList);//把持有互斥锁的节点摘掉
mutex->owner = NULL;
return LOS_OK;
}
LosTaskCB *resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(mutex->muxList)));
LosTaskCB *resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(mutex->muxList)));//拿到等待互斥锁链表的第一个任务实体,接下来要唤醒任务
OsMuxBitmapRestore(mutex, &mutex->muxList, resumedTask);
mutex->muxCount = 1;
mutex->owner = (VOID *)resumedTask;
LOS_ListDelete(&mutex->holdList);
LOS_ListTailInsert(&resumedTask->lockList, &mutex->holdList);
mutex->muxCount = 1;//互斥锁数量为1
mutex->owner = (VOID *)resumedTask;//互斥锁的持有人换了
LOS_ListDelete(&mutex->holdList);//自然要从等锁链表中把自己摘出去
LOS_ListTailInsert(&resumedTask->lockList, &mutex->holdList);//把锁挂到恢复任务的锁链表上,lockList是任务持有的所有锁记录
OsTaskWakeClearPendMask(resumedTask);
resumedTask->ops->wake(resumedTask);
resumedTask->taskMux = NULL;
if (needSched != NULL) {
*needSched = TRUE;
if (needSched != NULL) {//如果不为空
*needSched = TRUE;//就走起再次调度流程
}
return LOS_OK;
@ -476,21 +478,21 @@ UINT32 OsMuxUnlockUnsafe(LosTaskCB *taskCB, LosMux *mutex, BOOL *needSched)
if (mutex->muxCount == 0) {
return LOS_EPERM;
}
if ((--mutex->muxCount != 0) && (mutex->attr.type == LOS_MUX_RECURSIVE)) {
//注意 --mutex->muxCount 先执行了-- 操作.
if ((--mutex->muxCount != 0) && (mutex->attr.type == LOS_MUX_RECURSIVE)) {//属性类型为LOS_MUX_RECURSIVE时,muxCount是可以不为0的
return LOS_OK;
}
if (mutex->attr.protocol == LOS_MUX_PRIO_PROTECT) {
if (mutex->attr.protocol == LOS_MUX_PRIO_PROTECT) {//属性协议为保护时
SchedParam param = { 0 };
taskCB->ops->schedParamGet(taskCB, &param);
taskCB->ops->priorityRestore(taskCB, NULL, &param);
}
/* Whether a task block the mutex lock. */
return OsMuxPostOp(taskCB, mutex, needSched);
/* Whether a task block the mutex lock. *///任务是否阻塞互斥锁
return OsMuxPostOp(taskCB, mutex, needSched);//一个任务去唤醒另一个在等锁的任务
}
///释放锁
LITE_OS_SEC_TEXT UINT32 LOS_MuxUnlock(LosMux *mutex)
{
LosTaskCB *runTask = NULL;
@ -506,9 +508,9 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxUnlock(LosMux *mutex)
return LOS_EINTR;
}
runTask = (LosTaskCB *)OsCurrTaskGet();
runTask = (LosTaskCB *)OsCurrTaskGet();//获取当前任务
/* DO NOT Call blocking API in system tasks */
if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {
if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {//不能在系统任务里调用,因为很容易让系统任务发生死锁
PRINTK("Warning: DO NOT call %s in system tasks.\n", __FUNCTION__);
OsBackTrace();
}
@ -516,12 +518,12 @@ LITE_OS_SEC_TEXT UINT32 LOS_MuxUnlock(LosMux *mutex)
SCHEDULER_LOCK(intSave);
ret = OsMuxUnlockUnsafe(runTask, mutex, &needSched);
SCHEDULER_UNLOCK(intSave);
if (needSched == TRUE) {
LOS_MpSchedule(OS_MP_CPU_ALL);
LOS_Schedule();
if (needSched == TRUE) {//需要调度的情况
LOS_MpSchedule(OS_MP_CPU_ALL);//向所有CPU发送调度指令
LOS_Schedule();//发起调度
}
return ret;
}
#endif /* LOSCFG_BASE_IPC_MUX */
#endif /* (LOSCFG_BASE_IPC_MUX == YES) */

@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@ -47,8 +47,8 @@
#endif /* LOSCFG_BASE_IPC_QUEUE_LIMIT <= 0 */
#ifndef LOSCFG_IPC_CONTAINER
LITE_OS_SEC_BSS LosQueueCB *g_allQueue = NULL;
LITE_OS_SEC_BSS STATIC LOS_DL_LIST g_freeQueueList;
LITE_OS_SEC_BSS LosQueueCB *g_allQueue = NULL;///< 消息队列池
LITE_OS_SEC_BSS STATIC LOS_DL_LIST g_freeQueueList;///< 空闲队列链表,管分配的,需要队列从这里申请
#define FREE_QUEUE_LIST g_freeQueueList
#endif
@ -83,10 +83,6 @@ LITE_OS_SEC_TEXT_INIT LosQueueCB *OsAllQueueCBInit(LOS_DL_LIST *freeQueueList)
return allQueue;
}
/*
* Description : queue initial
* Return : LOS_OK on success or error code on failure
*/
LITE_OS_SEC_TEXT_INIT UINT32 OsQueueInit(VOID)
{
#ifndef LOSCFG_IPC_CONTAINER
@ -97,7 +93,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsQueueInit(VOID)
#endif
return LOS_OK;
}
///创建一个队列,根据用户传入队列长度和消息节点大小来开辟相应的内存空间以供该队列使用参数queueID带走队列ID
LITE_OS_SEC_TEXT_INIT UINT32 LOS_QueueCreate(CHAR *queueName, UINT16 len, UINT32 *queueID,
UINT32 flags, UINT16 maxMsgSize)
{
@ -114,7 +110,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_QueueCreate(CHAR *queueName, UINT16 len, UINT32
return LOS_ERRNO_QUEUE_CREAT_PTR_NULL;
}
if (maxMsgSize > (OS_NULL_SHORT - sizeof(UINT32))) {
if (maxMsgSize > (OS_NULL_SHORT - sizeof(UINT32))) {// maxMsgSize上限 为啥要减去 sizeof(UINT32) ,因为前面存的是队列的大小
return LOS_ERRNO_QUEUE_SIZE_TOO_BIG;
}
@ -122,83 +118,83 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_QueueCreate(CHAR *queueName, UINT16 len, UINT32
return LOS_ERRNO_QUEUE_PARA_ISZERO;
}
msgSize = maxMsgSize + sizeof(UINT32);
msgSize = maxMsgSize + sizeof(UINT32);//总size = 消息体内容长度 + 消息大小(UINT32)
/*
* Memory allocation is time-consuming, to shorten the time of disable interrupt,
* move the memory allocation to here.
*/
queue = (UINT8 *)LOS_MemAlloc(m_aucSysMem1, (UINT32)len * msgSize);
if (queue == NULL) {
return LOS_ERRNO_QUEUE_CREATE_NO_MEMORY;
*///内存分配非常耗时,为了缩短禁用中断的时间,将内存分配移到此处,用的时候分配队列内存
queue = (UINT8 *)LOS_MemAlloc(m_aucSysMem1, (UINT32)len * msgSize);//从系统内存池中分配,由这里提供读写队列的内存
if (queue == NULL) {//这里是一次把队列要用到的所有最大内存都申请下来了,能保证不会出现后续使用过程中内存不够的问题出现
return LOS_ERRNO_QUEUE_CREATE_NO_MEMORY;//调用处有 OsSwtmrInit sys_mbox_new DoMqueueCreate ==
}
SCHEDULER_LOCK(intSave);
if (LOS_ListEmpty(&FREE_QUEUE_LIST)) {
SCHEDULER_UNLOCK(intSave);
if (LOS_ListEmpty(&FREE_QUEUE_LIST)) {//没有空余的队列ID的处理,注意软时钟定时器是由 g_swtmrCBArray统一管理的,里面有正在使用和可分配空闲的队列
SCHEDULER_UNLOCK(intSave);//g_freeQueueList是管理可用于分配的队列链表,申请消息队列的ID需要向它要
OsQueueCheckHook();
(VOID)LOS_MemFree(m_aucSysMem1, queue);
(VOID)LOS_MemFree(m_aucSysMem1, queue);//没有就要释放 queue申请的内存
return LOS_ERRNO_QUEUE_CB_UNAVAILABLE;
}
unusedQueue = LOS_DL_LIST_FIRST(&FREE_QUEUE_LIST);
LOS_ListDelete(unusedQueue);
queueCB = GET_QUEUE_LIST(unusedQueue);
queueCB->queueLen = len;
queueCB->queueSize = msgSize;
queueCB->queueHandle = queue;
queueCB->queueState = OS_QUEUE_INUSED;
queueCB->readWriteableCnt[OS_QUEUE_READ] = 0;
queueCB->readWriteableCnt[OS_QUEUE_WRITE] = len;
queueCB->queueHead = 0;
queueCB->queueTail = 0;
LOS_ListInit(&queueCB->readWriteList[OS_QUEUE_READ]);
LOS_ListInit(&queueCB->readWriteList[OS_QUEUE_WRITE]);
LOS_ListInit(&queueCB->memList);
OsQueueDbgUpdateHook(queueCB->queueID, OsCurrTaskGet()->taskEntry);
unusedQueue = LOS_DL_LIST_FIRST(&FREE_QUEUE_LIST);//找到一个没有被使用的队列
LOS_ListDelete(unusedQueue);//将自己从g_freeQueueList中摘除, unusedQueue只是个 LOS_DL_LIST 结点.
queueCB = GET_QUEUE_LIST(unusedQueue);//通过unusedQueue找到整个消息队列(LosQueueCB)
queueCB->queueLen = len; //队列中消息的总个数,注意这个一旦创建是不能变的.
queueCB->queueSize = msgSize;//消息节点的大小,注意这个一旦创建也是不能变的.
queueCB->queueHandle = queue; //队列句柄,队列内容存储区.
queueCB->queueState = OS_QUEUE_INUSED; //队列状态使用中
queueCB->readWriteableCnt[OS_QUEUE_READ] = 0;//可读资源计数OS_QUEUE_READ(0):可读.
queueCB->readWriteableCnt[OS_QUEUE_WRITE] = len;//可些资源计数 OS_QUEUE_WRITE(1):可写, 默认len可写.
queueCB->queueHead = 0;//队列头节点
queueCB->queueTail = 0;//队列尾节点
LOS_ListInit(&queueCB->readWriteList[OS_QUEUE_READ]);//初始化可读队列任务链表
LOS_ListInit(&queueCB->readWriteList[OS_QUEUE_WRITE]);//初始化可写队列任务链表
LOS_ListInit(&queueCB->memList);//
OsQueueDbgUpdateHook(queueCB->queueID, OsCurrTaskGet()->taskEntry);//在创建或删除队列调试信息时更新任务条目
SCHEDULER_UNLOCK(intSave);
*queueID = queueCB->queueID;
*queueID = queueCB->queueID;//带走队列ID
OsHookCall(LOS_HOOK_TYPE_QUEUE_CREATE, queueCB);
return LOS_OK;
}
///读队列参数检查
STATIC LITE_OS_SEC_TEXT UINT32 OsQueueReadParameterCheck(UINT32 queueID, const VOID *bufferAddr,
const UINT32 *bufferSize, UINT32 timeout)
{
if (GET_QUEUE_INDEX(queueID) >= LOSCFG_BASE_IPC_QUEUE_LIMIT) {
if (GET_QUEUE_INDEX(queueID) >= LOSCFG_BASE_IPC_QUEUE_LIMIT) {//队列ID不能超上限
return LOS_ERRNO_QUEUE_INVALID;
}
if ((bufferAddr == NULL) || (bufferSize == NULL)) {
if ((bufferAddr == NULL) || (bufferSize == NULL)) {//缓存地址和大小参数判断
return LOS_ERRNO_QUEUE_READ_PTR_NULL;
}
if ((*bufferSize == 0) || (*bufferSize > (OS_NULL_SHORT - sizeof(UINT32)))) {
return LOS_ERRNO_QUEUE_READSIZE_IS_INVALID;
if ((*bufferSize == 0) || (*bufferSize > (OS_NULL_SHORT - sizeof(UINT32)))) {//限制了读取数据的上限64K, sizeof(UINT32)代表的是队列的长度
return LOS_ERRNO_QUEUE_READSIZE_IS_INVALID; //所以要减去
}
OsQueueDbgTimeUpdateHook(queueID);
if (timeout != LOS_NO_WAIT) {
if (OS_INT_ACTIVE) {
return LOS_ERRNO_QUEUE_READ_IN_INTERRUPT;
if (timeout != LOS_NO_WAIT) {//等待一定时间再读取
if (OS_INT_ACTIVE) {//如果碰上了硬中断
return LOS_ERRNO_QUEUE_READ_IN_INTERRUPT;//意思是:硬中断发生时是不能读消息队列的
}
}
return LOS_OK;
}
///写队列参数检查
STATIC LITE_OS_SEC_TEXT UINT32 OsQueueWriteParameterCheck(UINT32 queueID, const VOID *bufferAddr,
const UINT32 *bufferSize, UINT32 timeout)
{
if (GET_QUEUE_INDEX(queueID) >= LOSCFG_BASE_IPC_QUEUE_LIMIT) {
if (GET_QUEUE_INDEX(queueID) >= LOSCFG_BASE_IPC_QUEUE_LIMIT) {//队列ID不能超上限
return LOS_ERRNO_QUEUE_INVALID;
}
if (bufferAddr == NULL) {
if (bufferAddr == NULL) {//没有数据源
return LOS_ERRNO_QUEUE_WRITE_PTR_NULL;
}
if (*bufferSize == 0) {
if (*bufferSize == 0) {//这里没有限制写队列的大小,如果写入一个很大buf 会怎样?
return LOS_ERRNO_QUEUE_WRITESIZE_ISZERO;
}
@ -211,128 +207,129 @@ STATIC LITE_OS_SEC_TEXT UINT32 OsQueueWriteParameterCheck(UINT32 queueID, const
}
return LOS_OK;
}
///队列buf操作,注意队列数据是按顺序来读取的,要不从头,要不从尾部,不会出现从中间读写,所有可由 head 和 tail 来管理队列.
STATIC VOID OsQueueBufferOperate(LosQueueCB *queueCB, UINT32 operateType, VOID *bufferAddr, UINT32 *bufferSize)
{
UINT8 *queueNode = NULL;
UINT32 msgDataSize;
UINT16 queuePosition;
/* get the queue position */
switch (OS_QUEUE_OPERATE_GET(operateType)) {
case OS_QUEUE_READ_HEAD:
queuePosition = queueCB->queueHead;
((queueCB->queueHead + 1) == queueCB->queueLen) ? (queueCB->queueHead = 0) : (queueCB->queueHead++);
/* get the queue position | 先找到队列的位置*/
switch (OS_QUEUE_OPERATE_GET(operateType)) {//获取操作类型
case OS_QUEUE_READ_HEAD://从列队头开始读
queuePosition = queueCB->queueHead;//拿到头部位置
((queueCB->queueHead + 1) == queueCB->queueLen) ? (queueCB->queueHead = 0) : (queueCB->queueHead++);//调整队列头部位置
break;
case OS_QUEUE_WRITE_HEAD:
(queueCB->queueHead == 0) ? (queueCB->queueHead = queueCB->queueLen - 1) : (--queueCB->queueHead);
queuePosition = queueCB->queueHead;
case OS_QUEUE_WRITE_HEAD://从列队头开始写
(queueCB->queueHead == 0) ? (queueCB->queueHead = queueCB->queueLen - 1) : (--queueCB->queueHead);//调整队列头部位置
queuePosition = queueCB->queueHead;//拿到头部位置
break;
case OS_QUEUE_WRITE_TAIL:
queuePosition = queueCB->queueTail;
((queueCB->queueTail + 1) == queueCB->queueLen) ? (queueCB->queueTail = 0) : (queueCB->queueTail++);
case OS_QUEUE_WRITE_TAIL://从列队尾部开始写
queuePosition = queueCB->queueTail;//设置队列位置为尾部位置
((queueCB->queueTail + 1) == queueCB->queueLen) ? (queueCB->queueTail = 0) : (queueCB->queueTail++);//调整队列尾部位置
break;
default: /* read tail, reserved. */
PRINT_ERR("invalid queue operate type!\n");
return;
}
//queueHandle是create队列时,由外界参数申请的一块内存. 用于copy 使用
queueNode = &(queueCB->queueHandle[(queuePosition * (queueCB->queueSize))]);//拿到队列节点
queueNode = &(queueCB->queueHandle[(queuePosition * (queueCB->queueSize))]);
if (OS_QUEUE_IS_READ(operateType)) {
if (OS_QUEUE_IS_READ(operateType)) {//读操作处理,读队列分两步走
if (memcpy_s(&msgDataSize, sizeof(UINT32), queueNode + queueCB->queueSize - sizeof(UINT32),
sizeof(UINT32)) != EOK) {
sizeof(UINT32)) != EOK) {//1.先读出队列大小,由队列头四个字节表示
PRINT_ERR("get msgdatasize failed\n");
return;
}
msgDataSize = (*bufferSize < msgDataSize) ? *bufferSize : msgDataSize;
if (memcpy_s(bufferAddr, *bufferSize, queueNode, msgDataSize) != EOK) {
if (memcpy_s(bufferAddr, *bufferSize, queueNode, msgDataSize) != EOK) {//2.读表示读走已有数据,所以相当于bufferAddr接着了queueNode的数据
PRINT_ERR("copy message to buffer failed\n");
return;
}
*bufferSize = msgDataSize;
} else {
if (memcpy_s(queueNode, queueCB->queueSize, bufferAddr, *bufferSize) != EOK) {
PRINT_ERR("store message failed\n");
*bufferSize = msgDataSize;//通过入参 带走消息的大小
} else {//只有读写两种操作,这里就是写队列了.写也分两步走 , @note_thinking 这里建议鸿蒙加上 OS_QUEUE_IS_WRITE 判断
if (memcpy_s(queueNode, queueCB->queueSize, bufferAddr, *bufferSize) != EOK) {//1.写入消息内容
PRINT_ERR("store message failed\n");//表示把外面数据写进来,所以相当于queueNode接着了bufferAddr的数据
return;
}
if (memcpy_s(queueNode + queueCB->queueSize - sizeof(UINT32), sizeof(UINT32), bufferSize,
sizeof(UINT32)) != EOK) {
sizeof(UINT32)) != EOK) {//2.写入消息数据的长度,sizeof(UINT32)
PRINT_ERR("store message size failed\n");
return;
}
}
}
///队列操作参数检查
STATIC UINT32 OsQueueOperateParamCheck(const LosQueueCB *queueCB, UINT32 queueID,
UINT32 operateType, const UINT32 *bufferSize)
{
if ((queueCB->queueID != queueID) || (queueCB->queueState == OS_QUEUE_UNUSED)) {
if ((queueCB->queueID != queueID) || (queueCB->queueState == OS_QUEUE_UNUSED)) {//队列ID和状态判断
return LOS_ERRNO_QUEUE_NOT_CREATE;
}
if (OS_QUEUE_IS_WRITE(operateType) && (*bufferSize > (queueCB->queueSize - sizeof(UINT32)))) {
return LOS_ERRNO_QUEUE_WRITE_SIZE_TOO_BIG;
if (OS_QUEUE_IS_WRITE(operateType) && (*bufferSize > (queueCB->queueSize - sizeof(UINT32)))) {//写时判断
return LOS_ERRNO_QUEUE_WRITE_SIZE_TOO_BIG;//塞进来的数据太大,大于队列节点能承受的范围
}
return LOS_OK;
}
UINT32 OsQueueOperate(UINT32 queueID, UINT32 operateType, VOID *bufferAddr, UINT32 *bufferSize, UINT32 timeout)
{
UINT32 ret;
UINT32 readWrite = OS_QUEUE_READ_WRITE_GET(operateType);
UINT32 readWrite = OS_QUEUE_READ_WRITE_GET(operateType);//获取读/写操作标识
UINT32 intSave;
OsHookCall(LOS_HOOK_TYPE_QUEUE_READ, (LosQueueCB *)GET_QUEUE_HANDLE(queueID), operateType, *bufferSize, timeout);
SCHEDULER_LOCK(intSave);
LosQueueCB *queueCB = (LosQueueCB *)GET_QUEUE_HANDLE(queueID);
ret = OsQueueOperateParamCheck(queueCB, queueID, operateType, bufferSize);
LosQueueCB *queueCB = (LosQueueCB *)GET_QUEUE_HANDLE(queueID);//获取对应的队列控制块
ret = OsQueueOperateParamCheck(queueCB, queueID, operateType, bufferSize);//参数检查
if (ret != LOS_OK) {
goto QUEUE_END;
}
if (queueCB->readWriteableCnt[readWrite] == 0) {
if (timeout == LOS_NO_WAIT) {
if (queueCB->readWriteableCnt[readWrite] == 0) {//根据readWriteableCnt判断队列是否有消息读/写
if (timeout == LOS_NO_WAIT) {//不等待直接退出
ret = OS_QUEUE_IS_READ(operateType) ? LOS_ERRNO_QUEUE_ISEMPTY : LOS_ERRNO_QUEUE_ISFULL;
goto QUEUE_END;
}
if (!OsPreemptableInSched()) {
if (!OsPreemptableInSched()) {//不支持抢占式调度
ret = LOS_ERRNO_QUEUE_PEND_IN_LOCK;
goto QUEUE_END;
}
//任务等待,这里很重要啊,将自己从就绪列表摘除,让出了CPU并发起了调度,并挂在readWriteList[readWrite]上,挂的都等待读/写消息的task
LosTaskCB *runTask = OsCurrTaskGet();
OsTaskWaitSetPendMask(OS_TASK_WAIT_QUEUE, queueCB->queueID, timeout);
ret = runTask->ops->wait(runTask, &queueCB->readWriteList[readWrite], timeout);
if (ret == LOS_ERRNO_TSK_TIMEOUT) {
if (ret == LOS_ERRNO_TSK_TIMEOUT) {//唤醒后如果超时了,返回读/写消息失败
ret = LOS_ERRNO_QUEUE_TIMEOUT;
goto QUEUE_END;
goto QUEUE_END;//
}
} else {
queueCB->readWriteableCnt[readWrite]--;
queueCB->readWriteableCnt[readWrite]--;//对应队列中计数器--,说明一条消息只能被读/写一次
}
OsQueueBufferOperate(queueCB, operateType, bufferAddr, bufferSize);
OsQueueBufferOperate(queueCB, operateType, bufferAddr, bufferSize);//发起读或写队列操作
if (!LOS_ListEmpty(&queueCB->readWriteList[!readWrite])) {
LosTaskCB *resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&queueCB->readWriteList[!readWrite]));
if (!LOS_ListEmpty(&queueCB->readWriteList[!readWrite])) {//如果还有任务在排着队等待读/写入消息(当时不能读/写的原因有可能当时队列满了==)
LosTaskCB *resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&queueCB->readWriteList[!readWrite]));//取出要读/写消息的任务
OsTaskWakeClearPendMask(resumedTask);
resumedTask->ops->wake(resumedTask);
SCHEDULER_UNLOCK(intSave);
LOS_MpSchedule(OS_MP_CPU_ALL);
LOS_Schedule();
LOS_MpSchedule(OS_MP_CPU_ALL);//让所有CPU发出调度申请,因为很可能那个要读/写消息的队列是由其他CPU执行
LOS_Schedule();//申请调度
return LOS_OK;
} else {
queueCB->readWriteableCnt[!readWrite]++;
queueCB->readWriteableCnt[!readWrite]++;//对应队列读/写中计数器++
}
QUEUE_END:
SCHEDULER_UNLOCK(intSave);
return ret;
}
///接口函数定时读取消息队列
LITE_OS_SEC_TEXT UINT32 LOS_QueueReadCopy(UINT32 queueID,
VOID *bufferAddr,
UINT32 *bufferSize,
@ -341,15 +338,15 @@ LITE_OS_SEC_TEXT UINT32 LOS_QueueReadCopy(UINT32 queueID,
UINT32 ret;
UINT32 operateType;
ret = OsQueueReadParameterCheck(queueID, bufferAddr, bufferSize, timeout);
ret = OsQueueReadParameterCheck(queueID, bufferAddr, bufferSize, timeout);//参数检查
if (ret != LOS_OK) {
return ret;
}
operateType = OS_QUEUE_OPERATE_TYPE(OS_QUEUE_READ, OS_QUEUE_HEAD);
return OsQueueOperate(queueID, operateType, bufferAddr, bufferSize, timeout);
operateType = OS_QUEUE_OPERATE_TYPE(OS_QUEUE_READ, OS_QUEUE_HEAD);//从头开始读
return OsQueueOperate(queueID, operateType, bufferAddr, bufferSize, timeout);//定时执行读操作
}
///接口函数从队列头开始写
LITE_OS_SEC_TEXT UINT32 LOS_QueueWriteHeadCopy(UINT32 queueID,
VOID *bufferAddr,
UINT32 bufferSize,
@ -358,15 +355,15 @@ LITE_OS_SEC_TEXT UINT32 LOS_QueueWriteHeadCopy(UINT32 queueID,
UINT32 ret;
UINT32 operateType;
ret = OsQueueWriteParameterCheck(queueID, bufferAddr, &bufferSize, timeout);
ret = OsQueueWriteParameterCheck(queueID, bufferAddr, &bufferSize, timeout);//参数检查
if (ret != LOS_OK) {
return ret;
}
operateType = OS_QUEUE_OPERATE_TYPE(OS_QUEUE_WRITE, OS_QUEUE_HEAD);
return OsQueueOperate(queueID, operateType, bufferAddr, &bufferSize, timeout);
operateType = OS_QUEUE_OPERATE_TYPE(OS_QUEUE_WRITE, OS_QUEUE_HEAD);//从头开始写
return OsQueueOperate(queueID, operateType, bufferAddr, &bufferSize, timeout);//执行写操作
}
///接口函数 从队列尾部开始写
LITE_OS_SEC_TEXT UINT32 LOS_QueueWriteCopy(UINT32 queueID,
VOID *bufferAddr,
UINT32 bufferSize,
@ -375,15 +372,16 @@ LITE_OS_SEC_TEXT UINT32 LOS_QueueWriteCopy(UINT32 queueID,
UINT32 ret;
UINT32 operateType;
ret = OsQueueWriteParameterCheck(queueID, bufferAddr, &bufferSize, timeout);
ret = OsQueueWriteParameterCheck(queueID, bufferAddr, &bufferSize, timeout);//参数检查
if (ret != LOS_OK) {
return ret;
}
operateType = OS_QUEUE_OPERATE_TYPE(OS_QUEUE_WRITE, OS_QUEUE_TAIL);
return OsQueueOperate(queueID, operateType, bufferAddr, &bufferSize, timeout);
operateType = OS_QUEUE_OPERATE_TYPE(OS_QUEUE_WRITE, OS_QUEUE_TAIL);//从尾部开始写
return OsQueueOperate(queueID, operateType, bufferAddr, &bufferSize, timeout);//执行写操作
}
LITE_OS_SEC_TEXT UINT32 LOS_QueueRead(UINT32 queueID, VOID *bufferAddr, UINT32 bufferSize, UINT32 timeout)
{
return LOS_QueueReadCopy(queueID, bufferAddr, &bufferSize, timeout);
@ -398,6 +396,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_QueueWrite(UINT32 queueID, VOID *bufferAddr, UINT32
return LOS_QueueWriteCopy(queueID, &bufferAddr, bufferSize, timeout);
}
LITE_OS_SEC_TEXT UINT32 LOS_QueueWriteHead(UINT32 queueID,
VOID *bufferAddr,
UINT32 bufferSize,
@ -410,6 +409,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_QueueWriteHead(UINT32 queueID,
return LOS_QueueWriteHeadCopy(queueID, &bufferAddr, bufferSize, timeout);
}
LITE_OS_SEC_TEXT_INIT UINT32 LOS_QueueDelete(UINT32 queueID)
{
LosQueueCB *queueCB = NULL;
@ -422,50 +422,50 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_QueueDelete(UINT32 queueID)
}
SCHEDULER_LOCK(intSave);
queueCB = (LosQueueCB *)GET_QUEUE_HANDLE(queueID);
queueCB = (LosQueueCB *)GET_QUEUE_HANDLE(queueID);//拿到队列实体
if ((queueCB->queueID != queueID) || (queueCB->queueState == OS_QUEUE_UNUSED)) {
ret = LOS_ERRNO_QUEUE_NOT_CREATE;
goto QUEUE_END;
}
if (!LOS_ListEmpty(&queueCB->readWriteList[OS_QUEUE_READ])) {
if (!LOS_ListEmpty(&queueCB->readWriteList[OS_QUEUE_READ])) {//尚有任务要读数据
ret = LOS_ERRNO_QUEUE_IN_TSKUSE;
goto QUEUE_END;
}
if (!LOS_ListEmpty(&queueCB->readWriteList[OS_QUEUE_WRITE])) {
if (!LOS_ListEmpty(&queueCB->readWriteList[OS_QUEUE_WRITE])) {//尚有任务要写数据
ret = LOS_ERRNO_QUEUE_IN_TSKUSE;
goto QUEUE_END;
}
if (!LOS_ListEmpty(&queueCB->memList)) {
if (!LOS_ListEmpty(&queueCB->memList)) {//
ret = LOS_ERRNO_QUEUE_IN_TSKUSE;
goto QUEUE_END;
}
if ((queueCB->readWriteableCnt[OS_QUEUE_WRITE] + queueCB->readWriteableCnt[OS_QUEUE_READ]) !=
queueCB->queueLen) {
queueCB->queueLen) {//读写队列的内容长度不等于总长度
ret = LOS_ERRNO_QUEUE_IN_TSKWRITE;
goto QUEUE_END;
}
queue = queueCB->queueHandle;
queueCB->queueHandle = NULL;
queueCB->queueState = OS_QUEUE_UNUSED;
queueCB->queueID = SET_QUEUE_ID(GET_QUEUE_COUNT(queueCB->queueID) + 1, GET_QUEUE_INDEX(queueCB->queueID));
queue = queueCB->queueHandle; //队列buf
queueCB->queueHandle = NULL; //
queueCB->queueState = OS_QUEUE_UNUSED;//重置队列状态
queueCB->queueID = SET_QUEUE_ID(GET_QUEUE_COUNT(queueCB->queueID) + 1, GET_QUEUE_INDEX(queueCB->queueID));//@note_why 这里需要这样做吗?
OsQueueDbgUpdateHook(queueCB->queueID, NULL);
LOS_ListTailInsert(&FREE_QUEUE_LIST, &queueCB->readWriteList[OS_QUEUE_WRITE]);
SCHEDULER_UNLOCK(intSave);
LOS_ListTailInsert(&FREE_QUEUE_LIST, &queueCB->readWriteList[OS_QUEUE_WRITE]);//回收,将节点挂入可分配链表,等待重新被分配再利用
SCHEDULER_UNLOCK(intSave);
OsHookCall(LOS_HOOK_TYPE_QUEUE_DELETE, queueCB);
ret = LOS_MemFree(m_aucSysMem1, (VOID *)queue);
ret = LOS_MemFree(m_aucSysMem1, (VOID *)queue);//释放队列句柄
return ret;
QUEUE_END:
SCHEDULER_UNLOCK(intSave);
return ret;
}
///外部接口, 获取队列信息,用queueInfo 把 LosQueueCB数据接走,QUEUE_INFO_S对内部数据的封装
LITE_OS_SEC_TEXT_MINOR UINT32 LOS_QueueInfoGet(UINT32 queueID, QUEUE_INFO_S *queueInfo)
{
UINT32 intSave;
@ -477,14 +477,14 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_QueueInfoGet(UINT32 queueID, QUEUE_INFO_S *que
return LOS_ERRNO_QUEUE_PTR_NULL;
}
if (GET_QUEUE_INDEX(queueID) >= LOSCFG_BASE_IPC_QUEUE_LIMIT) {
if (GET_QUEUE_INDEX(queueID) >= LOSCFG_BASE_IPC_QUEUE_LIMIT) {//1024
return LOS_ERRNO_QUEUE_INVALID;
}
(VOID)memset_s((VOID *)queueInfo, sizeof(QUEUE_INFO_S), 0, sizeof(QUEUE_INFO_S));
(VOID)memset_s((VOID *)queueInfo, sizeof(QUEUE_INFO_S), 0, sizeof(QUEUE_INFO_S));//接走数据之前先清0
SCHEDULER_LOCK(intSave);
queueCB = (LosQueueCB *)GET_QUEUE_HANDLE(queueID);
queueCB = (LosQueueCB *)GET_QUEUE_HANDLE(queueID);//通过队列ID获取 QCB
if ((queueCB->queueID != queueID) || (queueCB->queueState == OS_QUEUE_UNUSED)) {
ret = LOS_ERRNO_QUEUE_NOT_CREATE;
goto QUEUE_END;
@ -495,19 +495,19 @@ LITE_OS_SEC_TEXT_MINOR UINT32 LOS_QueueInfoGet(UINT32 queueID, QUEUE_INFO_S *que
queueInfo->usQueueSize = queueCB->queueSize;
queueInfo->usQueueHead = queueCB->queueHead;
queueInfo->usQueueTail = queueCB->queueTail;
queueInfo->usReadableCnt = queueCB->readWriteableCnt[OS_QUEUE_READ];
queueInfo->usWritableCnt = queueCB->readWriteableCnt[OS_QUEUE_WRITE];
queueInfo->usReadableCnt = queueCB->readWriteableCnt[OS_QUEUE_READ];//可读数
queueInfo->usWritableCnt = queueCB->readWriteableCnt[OS_QUEUE_WRITE];//可写数
LOS_DL_LIST_FOR_EACH_ENTRY(tskCB, &queueCB->readWriteList[OS_QUEUE_READ], LosTaskCB, pendList) {
queueInfo->uwWaitReadTask |= 1ULL << tskCB->taskID;
}
LOS_DL_LIST_FOR_EACH_ENTRY(tskCB, &queueCB->readWriteList[OS_QUEUE_READ], LosTaskCB, pendList) {//找出哪些task需要读消息
queueInfo->uwWaitReadTask |= 1ULL << tskCB->taskID;//记录等待读消息的任务号, uwWaitReadTask 每一位代表一个任务编号
}//0b..011011011 代表 0,1,3,4,6,7 号任务有数据等待读消息.
LOS_DL_LIST_FOR_EACH_ENTRY(tskCB, &queueCB->readWriteList[OS_QUEUE_WRITE], LosTaskCB, pendList) {
queueInfo->uwWaitWriteTask |= 1ULL << tskCB->taskID;
}
LOS_DL_LIST_FOR_EACH_ENTRY(tskCB, &queueCB->readWriteList[OS_QUEUE_WRITE], LosTaskCB, pendList) {//找出哪些task需要写消息
queueInfo->uwWaitWriteTask |= 1ULL << tskCB->taskID;//记录等待写消息的任务号, uwWaitWriteTask 每一位代表一个任务编号
}////0b..011011011 代表 0,1,3,4,6,7 号任务有数据等待写消息.
LOS_DL_LIST_FOR_EACH_ENTRY(tskCB, &queueCB->memList, LosTaskCB, pendList) {
queueInfo->uwWaitMemTask |= 1ULL << tskCB->taskID;
LOS_DL_LIST_FOR_EACH_ENTRY(tskCB, &queueCB->memList, LosTaskCB, pendList) {//同上
queueInfo->uwWaitMemTask |= 1ULL << tskCB->taskID; //MailBox模块使用
}
QUEUE_END:
@ -515,5 +515,5 @@ QUEUE_END:
return ret;
}
#endif /* LOSCFG_BASE_IPC_QUEUE */
#endif /* (LOSCFG_BASE_IPC_QUEUE == YES) */

@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@ -37,9 +37,10 @@
#include "los_exc.h"
#include "los_sched_pri.h"
#ifdef LOSCFG_BASE_IPC_RWLOCK
#define RWLOCK_COUNT_MASK 0x00FFFFFFU
/// 判断读写锁有效性
BOOL LOS_RwlockIsValid(const LosRwlock *rwlock)
{
if ((rwlock != NULL) && ((rwlock->magic & RWLOCK_COUNT_MASK) == OS_RWLOCK_MAGIC)) {
@ -48,7 +49,7 @@ BOOL LOS_RwlockIsValid(const LosRwlock *rwlock)
return FALSE;
}
/// 创建读写锁,初始化锁信息
UINT32 LOS_RwlockInit(LosRwlock *rwlock)
{
UINT32 intSave;
@ -71,7 +72,7 @@ UINT32 LOS_RwlockInit(LosRwlock *rwlock)
SCHEDULER_UNLOCK(intSave);
return LOS_OK;
}
/// 删除指定的读写锁
UINT32 LOS_RwlockDestroy(LosRwlock *rwlock)
{
UINT32 intSave;
@ -95,18 +96,18 @@ UINT32 LOS_RwlockDestroy(LosRwlock *rwlock)
SCHEDULER_UNLOCK(intSave);
return LOS_OK;
}
/// 读写锁检查
STATIC UINT32 OsRwlockCheck(const LosRwlock *rwlock)
{
if (rwlock == NULL) {
return LOS_EINVAL;
}
if (OS_INT_ACTIVE) {
if (OS_INT_ACTIVE) { // 读写锁不能在中断服务程序中使用。请想想为什么 ?
return LOS_EINTR;
}
/* DO NOT Call blocking API in system tasks */
/* DO NOT Call blocking API in system tasks | 系统任务不能使用读写锁 */
LosTaskCB *runTask = (LosTaskCB *)OsCurrTaskGet();
if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {
return LOS_EPERM;
@ -114,19 +115,23 @@ STATIC UINT32 OsRwlockCheck(const LosRwlock *rwlock)
return LOS_OK;
}
/// 指定任务优先级优先级是否低于 写锁任务最高优先级
STATIC BOOL OsRwlockPriCompare(LosTaskCB *runTask, LOS_DL_LIST *rwList)
{
if (!LOS_ListEmpty(rwList)) {
LosTaskCB *highestTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(rwList));
if (OsSchedParamCompare(runTask, highestTask) < 0) {
LosTaskCB *highestTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(rwList));//首个写锁任务优先级是最高的
if (OsSchedParamCompare(runTask, highestTask) < 0) {//如果当前任务优先级低于等待写锁任务
return TRUE;
}
return FALSE;
}
return TRUE;
}
/* 申请读模式下的锁,分三种情况:
1.
2.
3.
*/
STATIC UINT32 OsRwlockRdPendOp(LosTaskCB *runTask, LosRwlock *rwlock, UINT32 timeout)
{
UINT32 ret;
@ -135,12 +140,12 @@ STATIC UINT32 OsRwlockRdPendOp(LosTaskCB *runTask, LosRwlock *rwlock, UINT32 tim
* When the rwlock mode is read mode or free mode and the priority of the current read task
* is higher than the first pended write task. current read task can obtain this rwlock.
*/
if (rwlock->rwCount >= 0) {
if (OsRwlockPriCompare(runTask, &(rwlock->writeList))) {
if (rwlock->rwCount == INT8_MAX) {
if (rwlock->rwCount >= 0) {//第一和第二种情况
if (OsRwlockPriCompare(runTask, &(rwlock->writeList))) {//读优先级低于写优先级,意思就是必须先写再读
if (rwlock->rwCount == INT8_MAX) {//读锁任务达到上限
return LOS_EINVAL;
}
rwlock->rwCount++;
rwlock->rwCount++;//拿读锁成功
return LOS_OK;
}
}
@ -149,45 +154,51 @@ STATIC UINT32 OsRwlockRdPendOp(LosTaskCB *runTask, LosRwlock *rwlock, UINT32 tim
return LOS_EINVAL;
}
if (!OsPreemptableInSched()) {
if (!OsPreemptableInSched()) {//不可抢占时
return LOS_EDEADLK;
}
/* The current task is not allowed to obtain the write lock when it obtains the read lock. */
if ((LosTaskCB *)(rwlock->writeOwner) == runTask) {
/* The current task is not allowed to obtain the write lock when it obtains the read lock.
| */
if ((LosTaskCB *)(rwlock->writeOwner) == runTask) { //拥有写锁任务是否为当前任务
return LOS_EINVAL;
}
/*
* When the rwlock mode is write mode or the priority of the current read task
* is lower than the first pended write task, current read task will be pended.
| rwlock
*/
LOS_DL_LIST *node = OsSchedLockPendFindPos(runTask, &(rwlock->readList));
ret = runTask->ops->wait(runTask, node, timeout);
LOS_DL_LIST *node = OsSchedLockPendFindPos(runTask, &(rwlock->readList));//找到要挂入的位置
//例如现有链表内任务优先级为 0 3 8 9 23 当前为 10 时, 返回的是 9 这个节点
ret = runTask->ops->wait(runTask, node, timeout);//从尾部插入读锁链表 由此变成了 0 3 8 9 10 23
if (ret == LOS_ERRNO_TSK_TIMEOUT) {
return LOS_ETIMEDOUT;
}
return ret;
}
/// 申请写模式下的锁
STATIC UINT32 OsRwlockWrPendOp(LosTaskCB *runTask, LosRwlock *rwlock, UINT32 timeout)
{
UINT32 ret;
/* When the rwlock is free mode, current write task can obtain this rwlock. */
/* When the rwlock is free mode, current write task can obtain this rwlock.
| */
if (rwlock->rwCount == 0) {
rwlock->rwCount = -1;
rwlock->writeOwner = (VOID *)runTask;
rwlock->writeOwner = (VOID *)runTask;//直接给当前进程锁
return LOS_OK;
}
/* Current write task can use one rwlock once again if the rwlock owner is it. */
/* Current write task can use one rwlock once again if the rwlock owner is it.
| rwlock 使*/
if ((rwlock->rwCount < 0) && ((LosTaskCB *)(rwlock->writeOwner) == runTask)) {
if (rwlock->rwCount == INT8_MIN) {
return LOS_EINVAL;
}
rwlock->rwCount--;
rwlock->rwCount--;//注意再次拥有算是两把写锁了.
return LOS_OK;
}
@ -201,9 +212,9 @@ STATIC UINT32 OsRwlockWrPendOp(LosTaskCB *runTask, LosRwlock *rwlock, UINT32 tim
/*
* When the rwlock is read mode or other write task obtains this rwlock, current
* write task will be pended.
* write task will be pended. | rwlock rwlock
*/
LOS_DL_LIST *node = OsSchedLockPendFindPos(runTask, &(rwlock->writeList));
LOS_DL_LIST *node = OsSchedLockPendFindPos(runTask, &(rwlock->writeList));//找到要挂入的位置
ret = runTask->ops->wait(runTask, node, timeout);
if (ret == LOS_ERRNO_TSK_TIMEOUT) {
ret = LOS_ETIMEDOUT;
@ -265,20 +276,22 @@ UINT32 OsRwlockTryWrUnsafe(LosRwlock *rwlock, UINT32 timeout)
return LOS_EBADF;
}
/* When the rwlock is read mode, current write task will be pended. */
/* When the rwlock is read mode, current write task will be pended.
| rwlock */
if (rwlock->rwCount > 0) {
return LOS_EBUSY;
}
/* When other write task obtains this rwlock, current write task will be pended. */
/* When other write task obtains this rwlock, current write task will be pended.
| rwlock*/
LosTaskCB *runTask = OsCurrTaskGet();
if ((rwlock->rwCount < 0) && ((LosTaskCB *)(rwlock->writeOwner) != runTask)) {
return LOS_EBUSY;
}
return OsRwlockWrPendOp(runTask, rwlock, timeout);
return OsRwlockWrPendOp(runTask, rwlock, timeout);//
}
/// 申请指定的读模式下的锁
UINT32 LOS_RwlockRdLock(LosRwlock *rwlock, UINT32 timeout)
{
UINT32 intSave;
@ -293,7 +306,7 @@ UINT32 LOS_RwlockRdLock(LosRwlock *rwlock, UINT32 timeout)
SCHEDULER_UNLOCK(intSave);
return ret;
}
/// 尝试申请指定的读模式下的锁
UINT32 LOS_RwlockTryRdLock(LosRwlock *rwlock)
{
UINT32 intSave;
@ -304,11 +317,11 @@ UINT32 LOS_RwlockTryRdLock(LosRwlock *rwlock)
}
SCHEDULER_LOCK(intSave);
ret = OsRwlockTryRdUnsafe(rwlock, 0);
ret = OsRwlockTryRdUnsafe(rwlock, 0);//所谓尝试就是没锁爷就返回,不等待,不纠结.当前任务也不会被挂起
SCHEDULER_UNLOCK(intSave);
return ret;
}
/// 申请指定的写模式下的锁
UINT32 LOS_RwlockWrLock(LosRwlock *rwlock, UINT32 timeout)
{
UINT32 intSave;
@ -323,7 +336,7 @@ UINT32 LOS_RwlockWrLock(LosRwlock *rwlock, UINT32 timeout)
SCHEDULER_UNLOCK(intSave);
return ret;
}
/// 尝试申请指定的写模式下的锁
UINT32 LOS_RwlockTryWrLock(LosRwlock *rwlock)
{
UINT32 intSave;
@ -334,32 +347,32 @@ UINT32 LOS_RwlockTryWrLock(LosRwlock *rwlock)
}
SCHEDULER_LOCK(intSave);
ret = OsRwlockTryWrUnsafe(rwlock, 0);
ret = OsRwlockTryWrUnsafe(rwlock, 0);//所谓尝试就是没锁爷就返回,不等待,不纠结.当前任务也不会被挂起
SCHEDULER_UNLOCK(intSave);
return ret;
}
/// 获取读写锁模式
STATIC UINT32 OsRwlockGetMode(LOS_DL_LIST *readList, LOS_DL_LIST *writeList)
{
BOOL isReadEmpty = LOS_ListEmpty(readList);
BOOL isWriteEmpty = LOS_ListEmpty(writeList);
if (isReadEmpty && isWriteEmpty) {
return RWLOCK_NONE_MODE;
if (isReadEmpty && isWriteEmpty) { //读写链表都没有内容
return RWLOCK_NONE_MODE; //自由模式
}
if (!isReadEmpty && isWriteEmpty) {
if (!isReadEmpty && isWriteEmpty) { //读链表有数据,写链表没有数据
return RWLOCK_READ_MODE;
}
if (isReadEmpty && !isWriteEmpty) {
if (isReadEmpty && !isWriteEmpty) { //写链表有数据,读链表没有数据
return RWLOCK_WRITE_MODE;
}
LosTaskCB *pendedReadTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(readList));
LosTaskCB *pendedWriteTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(writeList));
if (OsSchedParamCompare(pendedWriteTask, pendedReadTask) <= 0) {
return RWLOCK_WRITEFIRST_MODE;
return RWLOCK_WRITEFIRST_MODE; //写的优先级高时,为写优先模式
}
return RWLOCK_READFIRST_MODE;
return RWLOCK_READFIRST_MODE; //读的优先级高时,为读优先模式
}
/// 释放锁
STATIC UINT32 OsRwlockPostOp(LosRwlock *rwlock, BOOL *needSched)
{
UINT32 rwlockMode;
@ -367,15 +380,15 @@ STATIC UINT32 OsRwlockPostOp(LosRwlock *rwlock, BOOL *needSched)
rwlock->rwCount = 0;
rwlock->writeOwner = NULL;
rwlockMode = OsRwlockGetMode(&(rwlock->readList), &(rwlock->writeList));
if (rwlockMode == RWLOCK_NONE_MODE) {
rwlockMode = OsRwlockGetMode(&(rwlock->readList), &(rwlock->writeList));//先获取模式
if (rwlockMode == RWLOCK_NONE_MODE) {//自由模式则正常返回
return LOS_OK;
}
/* In this case, rwlock will wake the first pended write task. */
if ((rwlockMode == RWLOCK_WRITE_MODE) || (rwlockMode == RWLOCK_WRITEFIRST_MODE)) {
resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(rwlock->writeList)));
rwlock->rwCount = -1;
rwlock->writeOwner = (VOID *)resumedTask;
/* In this case, rwlock will wake the first pended write task. | 在这种情况下rwlock 将唤醒第一个挂起的写任务。 */
if ((rwlockMode == RWLOCK_WRITE_MODE) || (rwlockMode == RWLOCK_WRITEFIRST_MODE)) {//如果当前是写模式 (有任务在等写锁涅)
resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(rwlock->writeList)));//获取任务实体
rwlock->rwCount = -1;//直接干成-1,注意这里并不是 --
rwlock->writeOwner = (VOID *)resumedTask;//有锁了则唤醒等锁的任务(写模式)
resumedTask->ops->wake(resumedTask);
if (needSched != NULL) {
*needSched = TRUE;
@ -383,29 +396,30 @@ STATIC UINT32 OsRwlockPostOp(LosRwlock *rwlock, BOOL *needSched)
return LOS_OK;
}
rwlock->rwCount = 1;
rwlock->rwCount = 1; //直接干成1,因为是释放操作
resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(rwlock->readList)));
resumedTask->ops->wake(resumedTask);
while (!LOS_ListEmpty(&(rwlock->readList))) {
while (!LOS_ListEmpty(&(rwlock->readList))) {//遍历读链表,目的是要唤醒其他读模式的任务(优先级得要高于pendedWriteTaskPri才行)
resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(rwlock->readList)));
if (rwlockMode == RWLOCK_READFIRST_MODE) {
LosTaskCB *pendedWriteTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(rwlock->writeList)));
if (OsSchedParamCompare(resumedTask, pendedWriteTask) >= 0) {
break;
}
break;//跳出循环
}
}
if (rwlock->rwCount == INT8_MAX) {
return EINVAL;
}
rwlock->rwCount++;
resumedTask->ops->wake(resumedTask);
rwlock->rwCount++;//读锁任务数量增加
resumedTask->ops->wake(resumedTask);//不断唤醒读锁任务,由此实现了允许多个读操作并发,因为在多核情况下resumedTask很大可能
//与当前任务并不在同一个核上运行, 此处非常有意思,点赞! @note_good
}
if (needSched != NULL) {
*needSched = TRUE;
}
return LOS_OK;
}
/// 释放锁,唤醒任务
UINT32 OsRwlockUnlockUnsafe(LosRwlock *rwlock, BOOL *needSched)
{
if ((rwlock->magic & RWLOCK_COUNT_MASK) != OS_RWLOCK_MAGIC) {
@ -417,27 +431,28 @@ UINT32 OsRwlockUnlockUnsafe(LosRwlock *rwlock, BOOL *needSched)
}
LosTaskCB *runTask = OsCurrTaskGet();
if ((rwlock->rwCount < 0) && ((LosTaskCB *)(rwlock->writeOwner) != runTask)) {
if ((rwlock->rwCount < 0) && ((LosTaskCB *)(rwlock->writeOwner) != runTask)) {//写模式时,当前任务未持有锁
return LOS_EPERM;
}
/*
* When the rwCount of the rwlock more than 1 or less than -1, the rwlock mode will
* not changed after current unlock operation, so pended tasks can not be waken.
| rwlock rwCount 1 -1 rwlock
*/
if (rwlock->rwCount > 1) {
if (rwlock->rwCount > 1) {//读模式
rwlock->rwCount--;
return LOS_OK;
}
if (rwlock->rwCount < -1) {
if (rwlock->rwCount < -1) {//写模式
rwlock->rwCount++;
return LOS_OK;
}
return OsRwlockPostOp(rwlock, needSched);
}
/// 释放指定读写锁
UINT32 LOS_RwlockUnLock(LosRwlock *rwlock)
{
UINT32 intSave;
@ -451,9 +466,9 @@ UINT32 LOS_RwlockUnLock(LosRwlock *rwlock)
SCHEDULER_LOCK(intSave);
ret = OsRwlockUnlockUnsafe(rwlock, &needSched);
SCHEDULER_UNLOCK(intSave);
LOS_MpSchedule(OS_MP_CPU_ALL);
if (needSched == TRUE) {
LOS_Schedule();
LOS_MpSchedule(OS_MP_CPU_ALL);//设置调度CPU的方式,所有CPU参与调度
if (needSched == TRUE) {//是否需要调度
LOS_Schedule();//产生调度,切换任务执行
}
return ret;
}

@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@ -40,17 +40,18 @@
#include "los_percpu_pri.h"
#include "los_hook.h"
#ifdef LOSCFG_BASE_IPC_SEM
#if (LOSCFG_BASE_IPC_SEM_LIMIT <= 0)
#error "sem maxnum cannot be zero"
#endif /* LOSCFG_BASE_IPC_SEM_LIMIT <= 0 */
LITE_OS_SEC_DATA_INIT STATIC LOS_DL_LIST g_unusedSemList;
LITE_OS_SEC_BSS LosSemCB *g_allSem = NULL;
LITE_OS_SEC_DATA_INIT STATIC LOS_DL_LIST g_unusedSemList; ///< 可用的信号量列表,干嘛不用freeList? 可以看出这里是另一个人写的代码
LITE_OS_SEC_BSS LosSemCB *g_allSem = NULL; ///< 信号池,一次分配 LOSCFG_BASE_IPC_SEM_LIMIT 个信号量
/*
* Description : Initialize the semaphore doubly linked list
* Description : Initialize the semaphore doubly linked list |
* Return : LOS_OK on success, or error code on failure
*/
LITE_OS_SEC_TEXT_INIT UINT32 OsSemInit(VOID)
@ -58,18 +59,18 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsSemInit(VOID)
LosSemCB *semNode = NULL;
UINT32 index;
LOS_ListInit(&g_unusedSemList);
LOS_ListInit(&g_unusedSemList);//初始化链表,链表上挂未使用的信号量,用于分配信号量,鸿蒙信号量的个数是有限的,默认1024个
/* system resident memory, don't free */
g_allSem = (LosSemCB *)LOS_MemAlloc(m_aucSysMem0, (LOSCFG_BASE_IPC_SEM_LIMIT * sizeof(LosSemCB)));
g_allSem = (LosSemCB *)LOS_MemAlloc(m_aucSysMem0, (LOSCFG_BASE_IPC_SEM_LIMIT * sizeof(LosSemCB)));//分配信号池
if (g_allSem == NULL) {
return LOS_ERRNO_SEM_NO_MEMORY;
}
for (index = 0; index < LOSCFG_BASE_IPC_SEM_LIMIT; index++) {
semNode = ((LosSemCB *)g_allSem) + index;
semNode->semID = SET_SEM_ID(0, index);
semNode->semStat = OS_SEM_UNUSED;
LOS_ListTailInsert(&g_unusedSemList, &semNode->semList);
semNode = ((LosSemCB *)g_allSem) + index;//拿信号控制块, 可以直接g_allSem[index]来嘛
semNode->semID = SET_SEM_ID(0, index);//保存ID
semNode->semStat = OS_SEM_UNUSED;//标记未使用
LOS_ListTailInsert(&g_unusedSemList, &semNode->semList);//通过semList把 信号块挂到空闲链表上
}
if (OsSemDbgInitHook() != LOS_OK) {
@ -78,13 +79,7 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsSemInit(VOID)
return LOS_OK;
}
/*
* Description : Create a semaphore,
* Input : count --- semaphore count,
* maxCount --- Max number of available semaphores,
* semHandle --- Index of semaphore,
* Return : LOS_OK on success ,or error code on failure
*/
LITE_OS_SEC_TEXT_INIT UINT32 OsSemCreate(UINT16 count, UINT16 maxCount, UINT32 *semHandle)
{
UINT32 intSave;
@ -97,45 +92,46 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsSemCreate(UINT16 count, UINT16 maxCount, UINT32 *
return LOS_ERRNO_SEM_PTR_NULL;
}
if (count > maxCount) {
if (count > maxCount) {//信号量不能大于最大值,两参数都是外面给的
OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_OVERFLOW);
}
SCHEDULER_LOCK(intSave);
SCHEDULER_LOCK(intSave);//进入临界区,拿自旋锁
if (LOS_ListEmpty(&g_unusedSemList)) {
if (LOS_ListEmpty(&g_unusedSemList)) {//没有可分配的空闲信号提供
SCHEDULER_UNLOCK(intSave);
OsSemInfoGetFullDataHook();
OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_ALL_BUSY);
}
unusedSem = LOS_DL_LIST_FIRST(&g_unusedSemList);
LOS_ListDelete(unusedSem);
unusedSem = LOS_DL_LIST_FIRST(&g_unusedSemList);//从未使用信号量池中取首个
LOS_ListDelete(unusedSem);//从空闲链表上摘除
SCHEDULER_UNLOCK(intSave);
semCreated = GET_SEM_LIST(unusedSem);
semCreated->semCount = count;
semCreated->semStat = OS_SEM_USED;
semCreated->maxSemCount = maxCount;
LOS_ListInit(&semCreated->semList);
*semHandle = semCreated->semID;
semCreated = GET_SEM_LIST(unusedSem);//通过semList挂到链表上的,这里也要通过它把LosSemCB头查到. 进程,线程等结构体也都是这么干的.
semCreated->semCount = count;//设置数量
semCreated->semStat = OS_SEM_USED;//设置可用状态
semCreated->maxSemCount = maxCount;//设置最大信号数量
LOS_ListInit(&semCreated->semList);//初始化链表,后续阻塞任务通过task->pendList挂到semList链表上,就知道哪些任务在等它了.
*semHandle = semCreated->semID;//参数带走 semID
OsHookCall(LOS_HOOK_TYPE_SEM_CREATE, semCreated);
OsSemDbgUpdateHook(semCreated->semID, OsCurrTaskGet()->taskEntry, count);
return LOS_OK;
ERR_HANDLER:
OS_RETURN_ERROR_P2(errLine, errNo);
}
///对外接口 创建信号量
LITE_OS_SEC_TEXT_INIT UINT32 LOS_SemCreate(UINT16 count, UINT32 *semHandle)
{
return OsSemCreate(count, OS_SEM_COUNT_MAX, semHandle);
}
///对外接口 创建二值信号量其计数值最大为1可以当互斥锁用
LITE_OS_SEC_TEXT_INIT UINT32 LOS_BinarySemCreate(UINT16 count, UINT32 *semHandle)
{
return OsSemCreate(count, OS_SEM_BINARY_COUNT_MAX, semHandle);
}
///对外接口 删除指定的信号量,参数就是 semID
LITE_OS_SEC_TEXT_INIT UINT32 LOS_SemDelete(UINT32 semHandle)
{
UINT32 intSave;
@ -147,23 +143,23 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_SemDelete(UINT32 semHandle)
OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_INVALID);
}
semDeleted = GET_SEM(semHandle);
semDeleted = GET_SEM(semHandle);//通过ID拿到信号量实体
SCHEDULER_LOCK(intSave);
if ((semDeleted->semStat == OS_SEM_UNUSED) || (semDeleted->semID != semHandle)) {
if ((semDeleted->semStat == OS_SEM_UNUSED) || (semDeleted->semID != semHandle)) {//参数判断
SCHEDULER_UNLOCK(intSave);
OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_INVALID);
}
if (!LOS_ListEmpty(&semDeleted->semList)) {
if (!LOS_ListEmpty(&semDeleted->semList)) {//当前还有任务挂在这个信号上面,当然不能删除
SCHEDULER_UNLOCK(intSave);
OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_PENDED);
OS_GOTO_ERR_HANDLER(LOS_ERRNO_SEM_PENDED);//这个宏很有意思,里面goto到ERR_HANDLER
}
LOS_ListTailInsert(&g_unusedSemList, &semDeleted->semList);
semDeleted->semStat = OS_SEM_UNUSED;
semDeleted->semID = SET_SEM_ID(GET_SEM_COUNT(semDeleted->semID) + 1, GET_SEM_INDEX(semDeleted->semID));
LOS_ListTailInsert(&g_unusedSemList, &semDeleted->semList);//通过semList从尾部插入空闲链表
semDeleted->semStat = OS_SEM_UNUSED;//状态变成了未使用
semDeleted->semID = SET_SEM_ID(GET_SEM_COUNT(semDeleted->semID) + 1, GET_SEM_INDEX(semDeleted->semID));//设置ID
OsHookCall(LOS_HOOK_TYPE_SEM_DELETE, semDeleted);
OsSemDbgUpdateHook(semDeleted->semID, NULL, 0);
@ -174,11 +170,11 @@ LITE_OS_SEC_TEXT_INIT UINT32 LOS_SemDelete(UINT32 semHandle)
ERR_HANDLER:
OS_RETURN_ERROR_P2(errLine, errNo);
}
///对外接口 申请指定的信号量,并设置超时时间
LITE_OS_SEC_TEXT UINT32 LOS_SemPend(UINT32 semHandle, UINT32 timeout)
{
UINT32 intSave;
LosSemCB *semPended = GET_SEM(semHandle);
LosSemCB *semPended = GET_SEM(semHandle);//通过ID拿到信号体
UINT32 retErr = LOS_OK;
LosTaskCB *runTask = NULL;
@ -192,7 +188,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_SemPend(UINT32 semHandle, UINT32 timeout)
return LOS_ERRNO_SEM_PEND_INTERR;
}
runTask = OsCurrTaskGet();
runTask = OsCurrTaskGet();//获取当前任务
if (runTask->taskStatus & OS_TASK_FLAG_SYSTEM_TASK) {
OsBackTrace();
return LOS_ERRNO_SEM_PEND_IN_SYSTEM_TASK;
@ -204,19 +200,20 @@ LITE_OS_SEC_TEXT UINT32 LOS_SemPend(UINT32 semHandle, UINT32 timeout)
retErr = LOS_ERRNO_SEM_INVALID;
goto OUT;
}
/* Update the operate time, no matter the actual Pend success or not */
OsSemDbgTimeUpdateHook(semHandle);
if (semPended->semCount > 0) {
semPended->semCount--;
if (semPended->semCount > 0) {//还有资源可用,返回肯定得成功,semCount=0时代表没资源了,task会必须去睡眠了
semPended->semCount--;//资源少了一个
OsHookCall(LOS_HOOK_TYPE_SEM_PEND, semPended, runTask, timeout);
goto OUT;
goto OUT;//注意这里 retErr = LOS_OK ,所以返回是OK的
} else if (!timeout) {
retErr = LOS_ERRNO_SEM_UNAVAILABLE;
goto OUT;
}
if (!OsPreemptableInSched()) {
if (!OsPreemptableInSched()) {//不能申请调度 (不能调度的原因是因为没有持有调度任务自旋锁)
PRINT_ERR("!!!LOS_ERRNO_SEM_PEND_IN_LOCK!!!\n");
OsBackTrace();
retErr = LOS_ERRNO_SEM_PEND_IN_LOCK;
@ -226,7 +223,7 @@ LITE_OS_SEC_TEXT UINT32 LOS_SemPend(UINT32 semHandle, UINT32 timeout)
OsHookCall(LOS_HOOK_TYPE_SEM_PEND, semPended, runTask, timeout);
OsTaskWaitSetPendMask(OS_TASK_WAIT_SEM, semPended->semID, timeout);
retErr = runTask->ops->wait(runTask, &semPended->semList, timeout);
if (retErr == LOS_ERRNO_TSK_TIMEOUT) {
if (retErr == LOS_ERRNO_TSK_TIMEOUT) {//注意:这里是涉及到task切换的,把自己挂起,唤醒其他task
retErr = LOS_ERRNO_SEM_TIMEOUT;
}
@ -234,7 +231,7 @@ OUT:
SCHEDULER_UNLOCK(intSave);
return retErr;
}
///以不安全的方式释放指定的信号量,所谓不安全指的是不用自旋锁
LITE_OS_SEC_TEXT UINT32 OsSemPostUnsafe(UINT32 semHandle, BOOL *needSched)
{
LosTaskCB *resumedTask = NULL;
@ -246,23 +243,23 @@ LITE_OS_SEC_TEXT UINT32 OsSemPostUnsafe(UINT32 semHandle, BOOL *needSched)
/* Update the operate time, no matter the actual Post success or not */
OsSemDbgTimeUpdateHook(semHandle);
if (semPosted->semCount == OS_SEM_COUNT_MAX) {
if (semPosted->semCount == OS_SEM_COUNT_MAX) {//当前信号资源不能大于最大资源量
return LOS_ERRNO_SEM_OVERFLOW;
}
if (!LOS_ListEmpty(&semPosted->semList)) {
resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(semPosted->semList)));
if (!LOS_ListEmpty(&semPosted->semList)) {//当前有任务挂在semList上,要去唤醒任务
resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(semPosted->semList)));//semList上面挂的都是task->pendlist节点,取第一个task下来唤醒
OsTaskWakeClearPendMask(resumedTask);
resumedTask->ops->wake(resumedTask);
if (needSched != NULL) {
*needSched = TRUE;
if (needSched != NULL) {//参数不为空,就返回需要调度的标签
*needSched = TRUE;//TRUE代表需要调度
}
} else {
semPosted->semCount++;
} else {//当前没有任务挂在semList上,
semPosted->semCount++;//信号资源多一个
}
OsHookCall(LOS_HOOK_TYPE_SEM_POST, semPosted, resumedTask);
return LOS_OK;
}
///对外接口 释放指定的信号量
LITE_OS_SEC_TEXT UINT32 LOS_SemPost(UINT32 semHandle)
{
UINT32 intSave;
@ -272,16 +269,15 @@ LITE_OS_SEC_TEXT UINT32 LOS_SemPost(UINT32 semHandle)
if (GET_SEM_INDEX(semHandle) >= LOSCFG_BASE_IPC_SEM_LIMIT) {
return LOS_ERRNO_SEM_INVALID;
}
SCHEDULER_LOCK(intSave);
ret = OsSemPostUnsafe(semHandle, &needSched);
SCHEDULER_UNLOCK(intSave);
if (needSched) {
LOS_MpSchedule(OS_MP_CPU_ALL);
LOS_Schedule();
if (needSched) {//需要调度的情况
LOS_MpSchedule(OS_MP_CPU_ALL);//向所有CPU发送调度指令
LOS_Schedule();////发起调度
}
return ret;
}
#endif /* LOSCFG_BASE_IPC_SEM */
#endif /* (LOSCFG_BASE_IPC_SEM == YES) */

@ -78,11 +78,11 @@ STATIC VOID OsSemPendedTaskNamePrint(LosSemCB *semNode)
#ifdef LOSCFG_DEBUG_SEMAPHORE
typedef struct {
UINT16 origSemCount; /* Number of original available semaphores */
UINT64 lastAccessTime; /* The last operation time */
TSK_ENTRY_FUNC creator; /* The task entry who created this sem */
UINT16 origSemCount; /* Number of orignal available semaphores *///原始可用信号量数
UINT64 lastAccessTime; /* The last operation time */ //最后操作时间
TSK_ENTRY_FUNC creator; /* The task entry who created this sem */ //由哪个task的入口函数创建了这个任务
} SemDebugCB;
STATIC SemDebugCB *g_semDebugArray = NULL;
STATIC SemDebugCB *g_semDebugArray = NULL;//默认1024个SemDebugCB debug信号量池
STATIC BOOL SemCompareValue(const IpcSortParam *sortParam, UINT32 left, UINT32 right)
{
@ -102,23 +102,23 @@ UINT32 OsSemDbgInit(VOID)
(VOID)memset_s(g_semDebugArray, size, 0, size);
return LOS_OK;
}
///更新最后访问时间
VOID OsSemDbgTimeUpdate(UINT32 semID)
{
SemDebugCB *semDebug = &g_semDebugArray[GET_SEM_INDEX(semID)];
semDebug->lastAccessTime = LOS_TickCountGet();
semDebug->lastAccessTime = LOS_TickCountGet();//获取tick总数
return;
}
///更新信号量
VOID OsSemDbgUpdate(UINT32 semID, TSK_ENTRY_FUNC creator, UINT16 count)
{
SemDebugCB *semDebug = &g_semDebugArray[GET_SEM_INDEX(semID)];
semDebug->creator = creator;
semDebug->lastAccessTime = LOS_TickCountGet();
semDebug->origSemCount = count;
semDebug->creator = creator; //改为由参数入口函数创建了这个任务
semDebug->lastAccessTime = LOS_TickCountGet();//获取tick总数
semDebug->origSemCount = count;//原始信号量改变
return;
}
///按信号量访问时间排序
STATIC VOID OsSemSort(UINT32 *semIndexArray, UINT32 usedCount)
{
UINT32 i, intSave;
@ -296,6 +296,6 @@ LITE_OS_SEC_TEXT_MINOR UINT32 OsShellCmdSemInfoGet(UINT32 argc, const CHAR **arg
return ret;
}
SHELLCMD_ENTRY(sem_shellcmd, CMD_TYPE_EX, "sem", 1, (CmdCallBackFunc)OsShellCmdSemInfoGet);
SHELLCMD_ENTRY(sem_shellcmd, CMD_TYPE_EX, "sem", 1, (CmdCallBackFunc)OsShellCmdSemInfoGet);//采用shell命令静态注册方式
#endif

@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2023 Huawei Device Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@ -53,15 +53,17 @@ int raise(int sig)
#define GETUNMASKSET(procmask, pendFlag) ((~(procmask)) & (sigset_t)(pendFlag))
#define UINT64_BIT_SIZE 64
int OsSigIsMember(const sigset_t *set, int signo)
{
int ret = LOS_NOK;
/* In musl, sig No bits 00000100 present sig No 3, but 1<< 3 = 00001000, so signo needs minus 1 */
/* In musl, sig No bits 00000100 present sig No 3, but 1<< 3 = 00001000, so signo needs minus 1 */
//在musl中sig No bits 00000100表示sig No 3但是在SIGNO2SET中 1<<3 = 00001000,因此signo需要减1
signo -= 1;
/* Verify the signal */
if (GOOD_SIGNO(signo)) {
if (GOOD_SIGNO(signo)) {//有效信号判断
/* Check if the signal is in the set */
ret = ((*set & SIGNO2SET((unsigned int)signo)) != 0);
ret = ((*set & SIGNO2SET((unsigned int)signo)) != 0);//检查信号是否还在集合中
}
return ret;
@ -120,7 +122,6 @@ VOID OsClearSigInfoTmpList(sig_cb *sigcb)
(VOID)LOS_MemFree(m_aucSysMem0, tmpInfoNode);
}
}
STATIC INLINE VOID OsSigWaitTaskWake(LosTaskCB *taskCB, INT32 signo)
{
sig_cb *sigcb = &taskCB->sig;
@ -132,14 +133,14 @@ STATIC INLINE VOID OsSigWaitTaskWake(LosTaskCB *taskCB, INT32 signo)
OsSigEmptySet(&sigcb->sigwaitmask);
}
}
///< 唤醒被挂起的处于等待指定信号的任务
STATIC UINT32 OsPendingTaskWake(LosTaskCB *taskCB, INT32 signo)
{
if (!OsTaskIsPending(taskCB) || !OsProcessIsUserMode(OS_PCB_FROM_TCB(taskCB))) {
return 0;
}
if ((signo != SIGKILL) && (taskCB->waitFlag != OS_TASK_WAIT_SIGNAL)) {
if ((signo != SIGKILL) && (taskCB->waitFlag != OS_TASK_WAIT_SIGNAL)) { // @note_thinking 这个判断会不会有问题 ?
return 0;
}
@ -153,16 +154,16 @@ STATIC UINT32 OsPendingTaskWake(LosTaskCB *taskCB, INT32 signo)
OsTaskWakeClearPendMask(taskCB);
taskCB->ops->wake(taskCB);
break;
case OS_TASK_WAIT_SIGNAL:
case OS_TASK_WAIT_SIGNAL://等待普通信号
OsSigWaitTaskWake(taskCB, signo);
break;
case OS_TASK_WAIT_LITEIPC:
OsTaskWakeClearPendMask(taskCB);
case OS_TASK_WAIT_LITEIPC://等待liteipc信号
OsTaskWakeClearPendMask(taskCB);//重置任务的等待信息
taskCB->ops->wake(taskCB);
break;
case OS_TASK_WAIT_FUTEX:
OsFutexNodeDeleteFromFutexHash(&taskCB->futex, TRUE, NULL, NULL);
OsTaskWakeClearPendMask(taskCB);
case OS_TASK_WAIT_FUTEX://等待快锁信号
OsFutexNodeDeleteFromFutexHash(&taskCB->futex, TRUE, NULL, NULL);//从哈希桶中删除快锁
OsTaskWakeClearPendMask(taskCB);//重置任务的等待信息
taskCB->ops->wake(taskCB);
break;
default:
@ -171,7 +172,7 @@ STATIC UINT32 OsPendingTaskWake(LosTaskCB *taskCB, INT32 signo)
return 0;
}
///给任务(线程)发送一个信号
int OsTcbDispatch(LosTaskCB *stcb, siginfo_t *info)
{
bool masked = FALSE;
@ -179,19 +180,19 @@ int OsTcbDispatch(LosTaskCB *stcb, siginfo_t *info)
OS_RETURN_IF_NULL(sigcb);
/* If signo is 0, not send signal, just check process or pthread exist */
if (info->si_signo == 0) {
if (info->si_signo == 0) {//如果信号为0,则不发送信号,只是作为检查进程和线程是否还存在.
return 0;
}
masked = (bool)OsSigIsMember(&sigcb->sigprocmask, info->si_signo);
if (masked) {
/* If signal is in wait list and mask list, need unblock it */
masked = (bool)OsSigIsMember(&sigcb->sigprocmask, info->si_signo);//@note_thinking 这里还有 masked= -1的情况要处理!!!
if (masked) {//如果信号被屏蔽了,要看等待信号集,sigwaitmask
/* If signal is in wait list and mask list, need unblock it */ //如果信号在等待列表和掩码列表中,需要解除阻止
if (LOS_ListEmpty(&sigcb->waitList) ||
(!LOS_ListEmpty(&sigcb->waitList) && !OsSigIsMember(&sigcb->sigwaitmask, info->si_signo))) {
OsSigAddSet(&sigcb->sigPendFlag, info->si_signo);
(!LOS_ListEmpty(&sigcb->waitList) && !OsSigIsMember(&sigcb->sigwaitmask, info->si_signo))) {
OsSigAddSet(&sigcb->sigPendFlag, info->si_signo);//将信号加入挂起/待办集
}
} else {
} else {//信号没有被屏蔽的处理
/* unmasked signal actions */
OsSigAddSet(&sigcb->sigFlag, info->si_signo);
OsSigAddSet(&sigcb->sigFlag, info->si_signo);//不屏蔽的信号集
}
if (OsAddSigInfoToTmpList(sigcb, info) == LOS_NOK) {
@ -206,14 +207,15 @@ void OsSigMaskSwitch(LosTaskCB * const rtcb, sigset_t set)
sigset_t unmaskset;
rtcb->sig.sigprocmask = set;
unmaskset = GETUNMASKSET(rtcb->sig.sigprocmask, rtcb->sig.sigPendFlag);
unmaskset = GETUNMASKSET(rtcb->sig.sigprocmask, rtcb->sig.sigPendFlag);//过滤出没有被屏蔽的信号集
if (unmaskset != NULL_SIGNAL_SET) {
/* pendlist do */
rtcb->sig.sigFlag |= unmaskset;
rtcb->sig.sigPendFlag ^= unmaskset;
rtcb->sig.sigFlag |= unmaskset; //加入不屏蔽信号集
rtcb->sig.sigPendFlag ^= unmaskset;//从挂起/待办集中去掉unmaskset
}
}
int OsSigprocMask(int how, const sigset_t_l *setl, sigset_t_l *oldsetl)
{
LosTaskCB *spcb = NULL;
@ -223,11 +225,11 @@ int OsSigprocMask(int how, const sigset_t_l *setl, sigset_t_l *oldsetl)
SCHEDULER_LOCK(intSave);
spcb = OsCurrTaskGet();
/* If requested, copy the old mask to user. */
/* If requested, copy the old mask to user. | 如果需要,请将旧掩码复制给用户*/
if (oldsetl != NULL) {
*(sigset_t *)oldsetl = spcb->sig.sigprocmask;
}
/* If requested, modify the current signal mask. */
/* If requested, modify the current signal mask. | 如有要求,修改当前信号屏蔽*/
if (setl != NULL) {
set = *(sigset_t *)setl;
/* Okay, determine what we are supposed to do */
@ -236,46 +238,46 @@ int OsSigprocMask(int how, const sigset_t_l *setl, sigset_t_l *oldsetl)
* set pointed to by set as the new sigprocmask.
*/
case SIG_BLOCK:
spcb->sig.sigprocmask |= set;
spcb->sig.sigprocmask |= set;//增加信号屏蔽位
break;
/* Set the intersection of the current set and the
* signal set pointed to by set as the new sigprocmask.
*/
case SIG_UNBLOCK:
spcb->sig.sigprocmask &= ~(set);
spcb->sig.sigprocmask &= ~(set);//解除信号屏蔽位
break;
/* Set the signal set pointed to by set as the new sigprocmask. */
case SIG_SETMASK:
spcb->sig.sigprocmask = set;
spcb->sig.sigprocmask = set;//设置一个新的屏蔽掩码
break;
default:
ret = -EINVAL;
break;
}
/* If pending mask not in sigmask, need set sigflag. */
OsSigMaskSwitch(spcb, spcb->sig.sigprocmask);
OsSigMaskSwitch(spcb, spcb->sig.sigprocmask);//更新与屏蔽信号相关的变量
}
SCHEDULER_UNLOCK(intSave);
return ret;
}
///让进程的每一个task执行参数函数
int OsSigProcessForeachChild(LosProcessCB *spcb, ForEachTaskCB handler, void *arg)
{
int ret;
/* Visit the main thread last (if present) */
LosTaskCB *taskCB = NULL;
LOS_DL_LIST_FOR_EACH_ENTRY(taskCB, &(spcb->threadSiblingList), LosTaskCB, threadList) {
ret = handler(taskCB, arg);
OS_RETURN_IF(ret != 0, ret);
LosTaskCB *taskCB = NULL;//遍历进程的 threadList 链表,里面存放的都是task节点
LOS_DL_LIST_FOR_EACH_ENTRY(taskCB, &(spcb->threadSiblingList), LosTaskCB, threadList) {//遍历进程的任务列表
ret = handler(taskCB, arg);//回调参数函数
OS_RETURN_IF(ret != 0, ret);//这个宏的意思就是只有ret = 0时,啥也不处理.其余就返回 ret
}
return LOS_OK;
}
///信号处理函数,这里就是上面的 handler = SigProcessSignalHandler,见于 OsSigProcessSend
static int SigProcessSignalHandler(LosTaskCB *tcb, void *arg)
{
struct ProcessSignalInfo *info = (struct ProcessSignalInfo *)arg;
struct ProcessSignalInfo *info = (struct ProcessSignalInfo *)arg;//先把参数解出来
int ret;
int isMember;
@ -283,128 +285,130 @@ static int SigProcessSignalHandler(LosTaskCB *tcb, void *arg)
return 0;
}
/* If the default tcb is not set, then set this one as default. */
if (!info->defaultTcb) {
/* If the default tcb is not setted, then set this one as default. */
if (!info->defaultTcb) {//如果没有默认发送方的任务,即默认参数任务.
info->defaultTcb = tcb;
}
isMember = OsSigIsMember(&tcb->sig.sigwaitmask, info->sigInfo->si_signo);
if (isMember && (!info->awakenedTcb)) {
isMember = OsSigIsMember(&tcb->sig.sigwaitmask, info->sigInfo->si_signo);//任务是否在等待这个信号
if (isMember && (!info->awakenedTcb)) {//是在等待,并尚未向该任务时发送信号时
/* This means the task is waiting for this signal. Stop looking for it and use this tcb.
* The requirement is: if more than one task in this task group is waiting for the signal,
* then only one indeterminate task in the group will receive the signal.
*/
ret = OsTcbDispatch(tcb, info->sigInfo);
OS_RETURN_IF(ret < 0, ret);
ret = OsTcbDispatch(tcb, info->sigInfo);//发送信号,注意这是给其他任务发送信号,tcb不是当前任务
OS_RETURN_IF(ret < 0, ret);//这种写法很有意思
/* set this tcb as awakenedTcb */
info->awakenedTcb = tcb;
OS_RETURN_IF(info->receivedTcb != NULL, SIG_STOP_VISIT); /* Stop search */
}
/* Is this signal unblocked on this thread? */
isMember = OsSigIsMember(&tcb->sig.sigprocmask, info->sigInfo->si_signo);
if ((!isMember) && (!info->receivedTcb) && (tcb != info->awakenedTcb)) {
/* if unblockedTcb of this signal is not set, then set it. */
isMember = OsSigIsMember(&tcb->sig.sigprocmask, info->sigInfo->si_signo);//任务是否屏蔽了这个信号
if ((!isMember) && (!info->receivedTcb) && (tcb != info->awakenedTcb)) {//没有屏蔽,有唤醒任务没有接收任务.
/* if unblockedTcb of this signal is not setted, then set it. */
if (!info->unblockedTcb) {
info->unblockedTcb = tcb;
}
ret = OsTcbDispatch(tcb, info->sigInfo);
ret = OsTcbDispatch(tcb, info->sigInfo);//向任务发送信号
OS_RETURN_IF(ret < 0, ret);
/* set this tcb as receivedTcb */
info->receivedTcb = tcb;
info->receivedTcb = tcb;//设置这个任务为接收任务
OS_RETURN_IF(info->awakenedTcb != NULL, SIG_STOP_VISIT); /* Stop search */
}
return 0; /* Keep searching */
}
///进程收到 SIGKILL 信号后,通知任务tcb处理.
static int SigProcessKillSigHandler(LosTaskCB *tcb, void *arg)
{
struct ProcessSignalInfo *info = (struct ProcessSignalInfo *)arg;
struct ProcessSignalInfo *info = (struct ProcessSignalInfo *)arg;//转参
return OsPendingTaskWake(tcb, info->sigInfo->si_signo);
}
//处理信号发送
static void SigProcessLoadTcb(struct ProcessSignalInfo *info, siginfo_t *sigInfo)
{
LosTaskCB *tcb = NULL;
if (info->awakenedTcb == NULL && info->receivedTcb == NULL) {
if (info->unblockedTcb) {
tcb = info->unblockedTcb;
} else if (info->defaultTcb) {
if (info->awakenedTcb == NULL && info->receivedTcb == NULL) {//信号即没有指定接收task 也没有指定被唤醒task
if (info->unblockedTcb) {//如果进程信号信息体中有阻塞task
tcb = info->unblockedTcb;//
} else if (info->defaultTcb) {//如果有默认的发送方task
tcb = info->defaultTcb;
} else {
return;
}
/* Deliver the signal to the selected task */
(void)OsTcbDispatch(tcb, sigInfo);
(void)OsTcbDispatch(tcb, sigInfo);//向所选任务发送信号
}
}
///给参数进程发送参数信号
int OsSigProcessSend(LosProcessCB *spcb, siginfo_t *sigInfo)
{
int ret;
struct ProcessSignalInfo info = {
.sigInfo = sigInfo,
.defaultTcb = NULL,
.sigInfo = sigInfo, //信号内容
.defaultTcb = NULL, //以下四个值将在OsSigProcessForeachChild中根据条件完善
.unblockedTcb = NULL,
.awakenedTcb = NULL,
.receivedTcb = NULL
};
if (info.sigInfo == NULL) {
//总之是要从进程中找个至少一个任务来接受这个信号,优先级
//awakenedTcb > receivedTcb > unblockedTcb > defaultTcb
if (info.sigInfo == NULL){
return -EFAULT;
}
/* visit all taskcb and dispatch signal */
if (info.sigInfo->si_signo == SIGKILL) {
OsSigAddSet(&spcb->sigShare, info.sigInfo->si_signo);
/* visit all taskcb and dispatch signal */ //访问所有任务和分发信号
if (info.sigInfo->si_signo == SIGKILL) {//需要干掉进程时 SIGKILL = 9 #linux kill 9 14
OsSigAddSet(&spcb->sigShare, info.sigInfo->si_signo);//信号集中增加信号
(void)OsSigProcessForeachChild(spcb, SigProcessKillSigHandler, &info);
return 0;
} else {
ret = OsSigProcessForeachChild(spcb, SigProcessSignalHandler, &info);
ret = OsSigProcessForeachChild(spcb, SigProcessSignalHandler, &info);//进程通知所有task处理信号
}
if (ret < 0) {
return ret;
}
SigProcessLoadTcb(&info, sigInfo);
SigProcessLoadTcb(&info, sigInfo);//确保能给一个任务发送信号
return 0;
}
///信号集全部清0
int OsSigEmptySet(sigset_t *set)
{
*set = NULL_SIGNAL_SET;
return 0;
}
/* Privilege process can't send to kernel and privilege process */
/* Privilege process can't send to kernel and privilege process */ //内核进程组和用户特权进程组无法发送
static int OsSignalPermissionToCheck(const LosProcessCB *spcb)
{
UINTPTR gid = (UINTPTR)OS_GET_PGROUP_LEADER(spcb->pgroup);
if (gid == OS_KERNEL_PROCESS_GROUP) {
if (gid == OS_KERNEL_PROCESS_GROUP) {//内核进程组
return -EPERM;
} else if (gid == OS_USER_PRIVILEGE_PROCESS_GROUP) {
} else if (gid == OS_USER_PRIVILEGE_PROCESS_GROUP) {//用户特权进程组
return -EPERM;
}
return 0;
}
///信号分发,发送信号权限/进程组过滤.
STATIC int SendSigPermissionCheck(LosProcessCB *spcb, int permission)
{
if (spcb == NULL) {
return -ESRCH;
}
if (OsProcessIsUnused(spcb)) {
if (OsProcessIsUnused(spcb)) {//进程是否还在使用,不一定是当前进程但必须是个有效进程
return -ESRCH;
}
#ifdef LOSCFG_SECURITY_CAPABILITY
LosProcessCB *current = OsCurrProcessGet();
/* Kernel process always has kill permission and user process should check permission */
if (OsProcessIsUserMode(current) && !(current->processStatus & OS_PROCESS_FLAG_EXIT)) {
#ifdef LOSCFG_SECURITY_CAPABILITY //启用能力安全模式
LosProcessCB *current = OsCurrProcessGet();//获取当前进程,检查当前进程是否有发送信号的权限.
/* Kernel process always has kill permission and user process should check permission *///内核进程总是有kill权限用户进程需要检查权限
if (OsProcessIsUserMode(current) && !(current->processStatus & OS_PROCESS_FLAG_EXIT)) {//用户进程检查能力范围
if ((current != spcb) && (!IsCapPermit(CAP_KILL)) && (current->user->userID != spcb->user->userID)) {
return -EPERM;
}
@ -437,7 +441,7 @@ int OsSendSigToProcess(LosProcessCB *spcb, int sig, int permission)
info.si_code = SI_USER;
info.si_value.sival_ptr = NULL;
return OsSigProcessSend(spcb, &info);
return OsSigProcessSend(spcb, &info);//给参数进程发送信号
}
int OsDispatch(pid_t pid, siginfo_t *info, int permission)
@ -470,14 +474,14 @@ int OsKill(pid_t pid, int sig, int permission)
return -EINVAL;
}
/* Create the siginfo structure */
info.si_signo = sig;
info.si_code = SI_USER;
/* Create the siginfo structure */ //创建信号结构体
info.si_signo = sig; //信号编号
info.si_code = SI_USER; //来自用户进程信号
info.si_value.sival_ptr = NULL;
if (pid > 0) {
/* Send the signal to the specify process */
ret = OsDispatch(pid, &info, permission);
ret = OsDispatch(pid, &info, permission);//发送信号
} else if (pid == -1) {
/* Send SIG to all processes */
ret = OsSendSignalToAllProcess(&info, permission);
@ -489,18 +493,17 @@ int OsKill(pid_t pid, int sig, int permission)
}
return ret;
}
///给发送信号过程加锁
int OsKillLock(pid_t pid, int sig)
{
int ret;
unsigned int intSave;
SCHEDULER_LOCK(intSave);
ret = OsKill(pid, sig, OS_USER_KILL_PERMISSION);
ret = OsKill(pid, sig, OS_USER_KILL_PERMISSION);//用户权限向进程发送信号
SCHEDULER_UNLOCK(intSave);
return ret;
}
INT32 OsTaskKillUnsafe(UINT32 taskID, INT32 signo)
{
siginfo_t info;
@ -519,7 +522,7 @@ INT32 OsTaskKillUnsafe(UINT32 taskID, INT32 signo)
* dispatch rules. */
return OsTcbDispatch(taskCB, &info);
}
///发送信号
int OsPthreadKill(UINT32 tid, int signo)
{
int ret;
@ -537,7 +540,7 @@ int OsPthreadKill(UINT32 tid, int signo)
SCHEDULER_UNLOCK(intSave);
return ret;
}
///向信号集中加入signo信号
int OsSigAddSet(sigset_t *set, int signo)
{
/* Verify the signal */
@ -545,13 +548,13 @@ int OsSigAddSet(sigset_t *set, int signo)
return -EINVAL;
} else {
/* In musl, sig No bits 00000100 present sig No 3, but 1<< 3 = 00001000, so signo needs minus 1 */
signo -= 1;
signo -= 1;// 信号范围是 [1 ~ 64 ],而保存变量位的范围是[0 ~ 63]
/* Add the signal to the set */
*set |= SIGNO2SET((unsigned int)signo);
*set |= SIGNO2SET((unsigned int)signo);//填充信号集
return LOS_OK;
}
}
///获取阻塞当前任务的信号集
int OsSigPending(sigset_t *set)
{
LosTaskCB *tcb = NULL;
@ -563,7 +566,7 @@ int OsSigPending(sigset_t *set)
SCHEDULER_LOCK(intSave);
tcb = OsCurrTaskGet();
*set = tcb->sig.sigPendFlag;
*set = tcb->sig.sigPendFlag;//被阻塞的信号集
SCHEDULER_UNLOCK(intSave);
return LOS_OK;
}
@ -578,7 +581,7 @@ STATIC int FindFirstSetedBit(UINT64 n)
for (count = 0; (count < UINT64_BIT_SIZE) && (n ^ 1ULL); n >>= 1, count++) {}
return (count < UINT64_BIT_SIZE) ? count : (-1);
}
///等待信号时间
int OsSigTimedWaitNoLock(sigset_t *set, siginfo_t *info, unsigned int timeout)
{
LosTaskCB *task = NULL;
@ -589,19 +592,19 @@ int OsSigTimedWaitNoLock(sigset_t *set, siginfo_t *info, unsigned int timeout)
sigcb = &task->sig;
if (sigcb->waitList.pstNext == NULL) {
LOS_ListInit(&sigcb->waitList);
LOS_ListInit(&sigcb->waitList);//初始化信号等待链表
}
/* If pendingflag & set > 0, should clear pending flag */
/* If pendingflag & set > 0, shound clear pending flag */
sigset_t clear = sigcb->sigPendFlag & *set;
if (clear) {
sigcb->sigPendFlag ^= clear;
ret = FindFirstSetedBit((UINT64)clear) + 1;
OsMoveTmpInfoToUnbInfo(sigcb, ret);
} else {
OsSigAddSet(set, SIGKILL);
OsSigAddSet(set, SIGSTOP);
OsSigAddSet(set, SIGKILL);//kill 9 14 必须要处理
OsSigAddSet(set, SIGSTOP);//终止进程的信号也必须处理
sigcb->sigwaitmask |= *set;
sigcb->sigwaitmask |= *set;//按位加到等待集上,也就是说sigwaitmask的信号来了都是要处理的.
OsTaskWaitSetPendMask(OS_TASK_WAIT_SIGNAL, sigcb->sigwaitmask, timeout);
ret = task->ops->wait(task, &sigcb->waitList, timeout);
if (ret == LOS_ERRNO_TSK_TIMEOUT) {
@ -614,7 +617,7 @@ int OsSigTimedWaitNoLock(sigset_t *set, siginfo_t *info, unsigned int timeout)
}
return ret;
}
///让当前任务等待的信号
int OsSigTimedWait(sigset_t *set, siginfo_t *info, unsigned int timeout)
{
int ret;
@ -622,12 +625,12 @@ int OsSigTimedWait(sigset_t *set, siginfo_t *info, unsigned int timeout)
SCHEDULER_LOCK(intSave);
ret = OsSigTimedWaitNoLock(set, info, timeout);
ret = OsSigTimedWaitNoLock(set, info, timeout);//以不加锁的方式等待
SCHEDULER_UNLOCK(intSave);
return ret;
}
///通过信号挂起当前任务
int OsPause(void)
{
LosTaskCB *spcb = NULL;
@ -637,7 +640,7 @@ int OsPause(void)
oldSigprocmask = spcb->sig.sigprocmask;
return OsSigSuspend(&oldSigprocmask);
}
///用参数set代替进程的原有掩码并暂停进程执行直到收到信号再恢复原有掩码并继续执行进程。
int OsSigSuspend(const sigset_t *set)
{
unsigned int intSave;
@ -677,6 +680,7 @@ int OsSigSuspend(const sigset_t *set)
return -EINTR;
}
int OsSigAction(int sig, const sigaction_t *act, sigaction_t *oact)
{
UINTPTR addr;
@ -685,14 +689,17 @@ int OsSigAction(int sig, const sigaction_t *act, sigaction_t *oact)
if (!GOOD_SIGNO(sig) || sig < 1 || act == NULL) {
return -EINVAL;
}
if (LOS_ArchCopyFromUser(&action, act, sizeof(sigaction_t)) != LOS_OK) {
return -EFAULT;
}
if (sig == SIGSYS) {
addr = OsGetSigHandler();
if (addr == 0) {
OsSetSigHandler((unsigned long)(UINTPTR)action.sa_handler);
//将数据从用户空间拷贝到内核空间
if (LOS_ArchCopyFromUser(&action, act, sizeof(sigaction_t)) != LOS_OK) {
return -EFAULT;
}
if (sig == SIGSYS) {//鸿蒙此处通过错误的系统调用 来安装信号处理函数,有点巧妙.
addr = OsGetSigHandler();//是否已存在信号处理函数
if (addr == 0) {//进程没有设置信号处理函数时
OsSetSigHandler((unsigned long)(UINTPTR)action.sa_handler);//设置进程信号处理函数
//void (*sa_handler)(int); //信号处理函数——普通版
//void (*sa_sigaction)(int, siginfo_t *, void *);//信号处理函数——高级版
return LOS_OK;
}
return -EINVAL;
@ -717,11 +724,11 @@ VOID OsSigIntUnlock(VOID)
(VOID)LOS_AtomicSub((Atomic *)&sigcb->sigIntLock, 1);
}
VOID *OsSaveSignalContext(VOID *sp, VOID *newSp)
{
UINTPTR sigHandler;
UINT32 intSave;
LosTaskCB *task = OsCurrTaskGet();
LosProcessCB *process = OsCurrProcessGet();
sig_cb *sigcb = &task->sig;
@ -754,7 +761,7 @@ VOID *OsSaveSignalContext(VOID *sp, VOID *newSp)
OsProcessExitCodeSignalSet(process, signo);
sigcb->sigContext = sp;
OsInitSignalContext(sp, newSp, sigHandler, signo, sigVal);
OsInitSignalContext(sp, newSp, sigHandler, signo, sigVal);//初始化信号上下文
/* sig No bits 00000100 present sig No 3, but 1<< 3 = 00001000, so signo needs minus 1 */
sigcb->sigFlag ^= 1ULL << (signo - 1);
@ -767,6 +774,7 @@ VOID *OsSaveSignalContext(VOID *sp, VOID *newSp)
return sp;
}
VOID *OsRestorSignalContext(VOID *sp)
{
UINT32 intSave;
@ -785,8 +793,8 @@ VOID *OsRestorSignalContext(VOID *sp)
VOID *saveContext = sigcb->sigContext;
sigcb->sigContext = NULL;
sigcb->count--;
process->sigShare = 0;
OsProcessExitCodeSignalClear(process);
process->sigShare = 0; //回到用户态,信号共享清0
OsProcessExitCodeSignalClear(process);//清空进程退出码
SCHEDULER_UNLOCK(intSave);
return saveContext;
}

@ -48,6 +48,23 @@
#include "shell.h"
#endif
/*********************************************
kill [signo | -signo] [pid]
signo ID [1,30]
pid ID [1,MAX_INT]
signo[0,64][1,30]
使
pid256[1-256]
*********************************************/
LITE_OS_SEC_TEXT_MINOR VOID OsPrintKillUsage(VOID)
{
PRINTK("\nkill: usage: kill [sigspec] [pid]\n");

@ -31,7 +31,10 @@
#include "los_task_pri.h"
// 这个函数接受两个参数一个是地址addr另一个是对齐边界boundary。它返回一个对齐后的地址。
// 这个函数首先检查地址加上边界再减去1是否大于地址如果是
// 就返回地址加上边界再减去1然后与~((UINTPTR)(boundary - 1))进行位与运算的结果,
// 否则就返回地址与~((UINTPTR)(boundary - 1))进行位与运算的结果。这实际上是在对地址进行对齐。
LITE_OS_SEC_TEXT UINTPTR LOS_Align(UINTPTR addr, UINT32 boundary)
{
if ((addr + boundary - 1) > addr) {
@ -41,13 +44,14 @@ LITE_OS_SEC_TEXT UINTPTR LOS_Align(UINTPTR addr, UINT32 boundary)
}
}
LITE_OS_SEC_TEXT_MINOR VOID LOS_Msleep(UINT32 msecs)
{
UINT32 interval;
// 这个函数接受一个参数毫秒数msecs。这个函数首先检查毫秒数是否为0如果是就将间隔设置为0。
if (msecs == 0) {
interval = 0;
} else {
// } else { 否则它将毫秒数转换为tick数可能是操作系统的时间单位如果转换后的间隔为0就将间隔设置为1。然后它调用LOS_TaskDelay函数来延迟指定的间隔。
interval = LOS_MS2Tick(msecs);
if (interval == 0) {
interval = 1;

@ -37,7 +37,31 @@
#include "shcmd.h"
#include "shell.h"
#endif
/**
* @file los_stackinfo.c
* @brief
* @verbatim
@note_pic OsExcStackInfo CPU,,CPU
__undef_stack(SMP)
+-------------------+ <--- cpu1 top
| |
| CPU core1 |
| |
+--------------------<--- cpu2 top
| |
| cpu core 2 |
| |
+--------------------<--- cpu3 top
| |
| cpu core 3 |
| |
+--------------------<--- cpu4 top
| |
| cpu core 4 |
| |
+-------------------+
* @endverbatim
*/
const StackInfo *g_stackInfo = NULL; ///< CPU所有工作模式的栈信息
UINT32 g_stackNum; ///< CPU所有工作模式的栈数量

@ -44,7 +44,7 @@ STATIC VOID IdleTimeSliceUpdate(SchedRunqueue *rq, LosTaskCB *taskCB, UINT64 cur
STATIC INT32 IdleParamCompare(const SchedPolicy *sp1, const SchedPolicy *sp2);
STATIC VOID IdlePriorityInheritance(LosTaskCB *owner, const SchedParam *param);
STATIC VOID IdlePriorityRestore(LosTaskCB *owner, const LOS_DL_LIST *list, const SchedParam *param);
//空闲调度
const STATIC SchedOps g_idleOps = {
.dequeue = IdleDequeue,
.enqueue = IdleEnqueue,

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022-2023 Huawei Device Co., Ltd. All rights reserved.
* Copyright (c) 2022-2022 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@ -43,6 +43,8 @@
#define OS_SCHED_READY_MAX 30
#define OS_TIME_SLICE_MIN (INT32)((50 * OS_SYS_NS_PER_US) / OS_NS_PER_CYCLE) /* 50us */
//基于优先数调度算法 Highest-Priority-First (HPF)
STATIC HPFRunqueue g_schedHPF;
STATIC VOID HPFDequeue(SchedRunqueue *rq, LosTaskCB *taskCB);
@ -63,7 +65,7 @@ STATIC VOID HPFTimeSliceUpdate(SchedRunqueue *rq, LosTaskCB *taskCB, UINT64 curr
STATIC INT32 HPFParamCompare(const SchedPolicy *sp1, const SchedPolicy *sp2);
STATIC VOID HPFPriorityInheritance(LosTaskCB *owner, const SchedParam *param);
STATIC VOID HPFPriorityRestore(LosTaskCB *owner, const LOS_DL_LIST *list, const SchedParam *param);
//优先级调度算法操作
const STATIC SchedOps g_priorityOps = {
.dequeue = HPFDequeue,
.enqueue = HPFEnqueue,
@ -243,7 +245,7 @@ STATIC INLINE VOID PriQueInsert(HPFRunqueue *rq, LosTaskCB *taskCB)
taskCB->taskStatus &= ~OS_TASK_STATUS_BLOCKED;
taskCB->taskStatus |= OS_TASK_STATUS_READY;
}
//入就绪队列
STATIC VOID HPFEnqueue(SchedRunqueue *rq, LosTaskCB *taskCB)
{
#ifdef LOSCFG_SCHED_HPF_DEBUG
@ -253,14 +255,14 @@ STATIC VOID HPFEnqueue(SchedRunqueue *rq, LosTaskCB *taskCB)
#endif
PriQueInsert(rq->hpfRunqueue, taskCB);
}
//出就绪队列
STATIC VOID HPFDequeue(SchedRunqueue *rq, LosTaskCB *taskCB)
{
SchedHPF *sched = (SchedHPF *)&taskCB->sp;
if (taskCB->taskStatus & OS_TASK_STATUS_READY) {
if (taskCB->taskStatus & OS_TASK_STATUS_READY) {//是否有就绪状态
PriQueDelete(rq->hpfRunqueue, sched->basePrio, &taskCB->pendList, sched->priority);
taskCB->taskStatus &= ~OS_TASK_STATUS_READY;
taskCB->taskStatus &= ~OS_TASK_STATUS_READY;//更新成非就绪状态
}
}
@ -475,7 +477,7 @@ STATIC VOID HPFPriorityInheritance(LosTaskCB *owner, const SchedParam *param)
LOS_BitmapSet(&sp->priBitmap, sp->priority);
sp->priority = param->priority;
}
/// 恢复任务优先级
STATIC VOID HPFPriorityRestore(LosTaskCB *owner, const LOS_DL_LIST *list, const SchedParam *param)
{
UINT16 priority;
@ -498,8 +500,8 @@ STATIC VOID HPFPriorityRestore(LosTaskCB *owner, const LOS_DL_LIST *list, const
}
if ((list != NULL) && !LOS_ListEmpty((LOS_DL_LIST *)list)) {
priority = LOS_HighBitGet(sp->priBitmap);
LOS_DL_LIST_FOR_EACH_ENTRY(pendedTask, list, LosTaskCB, pendList) {
priority = LOS_HighBitGet(sp->priBitmap);//获取在历史调度中最高优先级
LOS_DL_LIST_FOR_EACH_ENTRY(pendedTask, list, LosTaskCB, pendList) {//遍历链表
SchedHPF *pendSp = (SchedHPF *)&pendedTask->sp;
if ((pendedTask->ops == owner->ops) && (priority != pendSp->priority)) {
LOS_BitmapClr(&sp->priBitmap, pendSp->priority);
@ -537,7 +539,7 @@ VOID HPFProcessDefaultSchedParamGet(SchedParam *param)
{
param->basePrio = OS_USER_PROCESS_PRIORITY_HIGHEST;
}
//HPF 调度策略初始化
VOID HPFSchedPolicyInit(SchedRunqueue *rq)
{
if (ArchCurrCpuid() > 0) {

@ -1,6 +1,6 @@
/*
* Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020-2022 Huawei Device Co., Ltd. All rights reserved.
* Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@ -30,7 +30,7 @@
*/
#include "los_sortlink_pri.h"
/// 排序链表初始化
VOID OsSortLinkInit(SortLinkAttribute *sortLinkHeader)
{
LOS_ListInit(&sortLinkHeader->sortLink);
@ -38,38 +38,47 @@ VOID OsSortLinkInit(SortLinkAttribute *sortLinkHeader)
sortLinkHeader->nodeNum = 0;
}
/*!
* @brief OsAddNode2SortLink ,
*
* @param sortLinkHeader
* @param sortList
* @return
*
* @see
*/
STATIC INLINE VOID AddNode2SortLink(SortLinkAttribute *sortLinkHeader, SortLinkList *sortList)
{
LOS_DL_LIST *head = (LOS_DL_LIST *)&sortLinkHeader->sortLink;
LOS_DL_LIST *head = (LOS_DL_LIST *)&sortLinkHeader->sortLink; //获取双向链表
if (LOS_ListEmpty(head)) {
LOS_ListHeadInsert(head, &sortList->sortLinkNode);
sortLinkHeader->nodeNum++;
if (LOS_ListEmpty(head)) { //空链表,直接插入
LOS_ListHeadInsert(head, &sortList->sortLinkNode);//插入结点
sortLinkHeader->nodeNum++;//CPU的工作量增加了
return;
}
//链表不为空时,插入分三种情况, responseTime 大于,等于,小于的处理
SortLinkList *listSorted = LOS_DL_LIST_ENTRY(head->pstNext, SortLinkList, sortLinkNode);
if (listSorted->responseTime > sortList->responseTime) {
LOS_ListAdd(head, &sortList->sortLinkNode);
sortLinkHeader->nodeNum++;
return;
} else if (listSorted->responseTime == sortList->responseTime) {
LOS_ListAdd(head->pstNext, &sortList->sortLinkNode);
if (listSorted->responseTime > sortList->responseTime) {//如果要插入的节点 responseTime 最小
LOS_ListAdd(head, &sortList->sortLinkNode);//能跑进来说明是最小的,直接插入到第一的位置
sortLinkHeader->nodeNum++;//CPU的工作量增加了
return;//直接返回了
} else if (listSorted->responseTime == sortList->responseTime) {//相等的情况
LOS_ListAdd(head->pstNext, &sortList->sortLinkNode);//插到第二的位置
sortLinkHeader->nodeNum++;
return;
}
LOS_DL_LIST *prevNode = head->pstPrev;
do {
listSorted = LOS_DL_LIST_ENTRY(prevNode, SortLinkList, sortLinkNode);
if (listSorted->responseTime <= sortList->responseTime) {
//处理大于链表中第一个responseTime的情况,需要遍历链表
LOS_DL_LIST *prevNode = head->pstPrev;//注意这里用的前一个结点,也就是说前一个结点中的responseTime 是最大的
do { // @note_good 这里写的有点妙,也是双向链表的魅力所在
listSorted = LOS_DL_LIST_ENTRY(prevNode, SortLinkList, sortLinkNode);//一个个遍历,先比大的再比小的
if (listSorted->responseTime <= sortList->responseTime) {//如果时间比你小,就插到后面
LOS_ListAdd(prevNode, &sortList->sortLinkNode);
sortLinkHeader->nodeNum++;
break;
}
prevNode = prevNode->pstPrev;
} while (1);
prevNode = prevNode->pstPrev;//再拿上一个更小的responseTime进行比较
} while (1);//死循环
}
VOID OsAdd2SortLink(SortLinkAttribute *head, SortLinkList *node, UINT64 responseTime, UINT16 idleCpu)

Loading…
Cancel
Save