You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
654 lines
16 KiB
654 lines
16 KiB
#!/bin/bash
|
|
|
|
# CodeDetect综合测试运行脚本
|
|
# 支持单元测试、集成测试、性能测试、回归测试等
|
|
|
|
set -e
|
|
|
|
# 颜色定义
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
BLUE='\033[0;34m'
|
|
CYAN='\033[0;36m'
|
|
MAGENTA='\033[0;35m'
|
|
NC='\033[0m' # No Color
|
|
|
|
# 脚本配置
|
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
PROJECT_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
|
TEST_DIR="$PROJECT_ROOT/tests"
|
|
REPORT_DIR="$PROJECT_ROOT/test_reports"
|
|
|
|
# 测试类型
|
|
declare -a TEST_TYPES=("unit" "integration" "performance" "regression")
|
|
declare -a TEST_MODULES=("verify" "mutate" "ui" "freertos" "system")
|
|
|
|
# 全局变量
|
|
VERBOSE=false
|
|
PARALLEL=false
|
|
COVERAGE=false
|
|
HTML_REPORT=false
|
|
JUNIT_REPORT=false
|
|
FAILED_TESTS=0
|
|
TOTAL_TESTS=0
|
|
START_TIME=""
|
|
END_TIME=""
|
|
|
|
# 日志函数
|
|
log_info() {
|
|
echo -e "${BLUE}[INFO]${NC} $(date '+%Y-%m-%d %H:%M:%S') $1"
|
|
}
|
|
|
|
log_success() {
|
|
echo -e "${GREEN}[SUCCESS]${NC} $(date '+%Y-%m-%d %H:%M:%S') $1"
|
|
}
|
|
|
|
log_warning() {
|
|
echo -e "${YELLOW}[WARNING]${NC} $(date '+%Y-%m-%d %H:%M:%S') $1"
|
|
}
|
|
|
|
log_error() {
|
|
echo -e "${RED}[ERROR]${NC} $(date '+%Y-%m-%d %H:%M:%S') $1"
|
|
}
|
|
|
|
log_debug() {
|
|
if [ "$VERBOSE" = true ]; then
|
|
echo -e "${CYAN}[DEBUG]${NC} $(date '+%Y-%m-%d %H:%M:%S') $1"
|
|
fi
|
|
}
|
|
|
|
# 显示帮助信息
|
|
show_help() {
|
|
cat << EOF
|
|
CodeDetect测试运行脚本
|
|
|
|
用法: $0 [选项] [测试类型...]
|
|
|
|
测试类型:
|
|
unit - 运行单元测试
|
|
integration - 运行集成测试
|
|
performance - 运行性能测试
|
|
regression - 运行回归测试
|
|
all - 运行所有测试 (默认)
|
|
|
|
选项:
|
|
-h, --help 显示帮助信息
|
|
-v, --verbose 详细输出
|
|
-p, --parallel 并行运行测试
|
|
-c, --coverage 生成覆盖率报告
|
|
-j, --junit 生成JUnit格式报告
|
|
-m, --module=MOD 指定测试模块 (verify, mutate, ui, freertos, system)
|
|
-r, --report-dir=DIR 指定报告目录
|
|
--html 生成HTML报告
|
|
--fail-fast 遇到失败时停止
|
|
--dry-run 仅显示要运行的测试
|
|
|
|
示例:
|
|
$0 --verbose --coverage unit integration
|
|
$0 --module=verify --parallel
|
|
$0 --html --junit all
|
|
EOF
|
|
}
|
|
|
|
# 解析命令行参数
|
|
parse_arguments() {
|
|
local test_types=()
|
|
local modules=()
|
|
|
|
while [[ $# -gt 0 ]]; do
|
|
case $1 in
|
|
-h|--help)
|
|
show_help
|
|
exit 0
|
|
;;
|
|
-v|--verbose)
|
|
VERBOSE=true
|
|
shift
|
|
;;
|
|
-p|--parallel)
|
|
PARALLEL=true
|
|
shift
|
|
;;
|
|
-c|--coverage)
|
|
COVERAGE=true
|
|
shift
|
|
;;
|
|
-j|--junit)
|
|
JUNIT_REPORT=true
|
|
shift
|
|
;;
|
|
--html)
|
|
HTML_REPORT=true
|
|
shift
|
|
;;
|
|
--fail-fast)
|
|
set -e
|
|
shift
|
|
;;
|
|
--dry-run)
|
|
DRY_RUN=true
|
|
shift
|
|
;;
|
|
-m|--module)
|
|
modules+=("$2")
|
|
shift 2
|
|
;;
|
|
-r|--report-dir)
|
|
REPORT_DIR="$2"
|
|
shift 2
|
|
;;
|
|
unit|integration|performance|regression|all)
|
|
if [ "$1" = "all" ]; then
|
|
test_types=("${TEST_TYPES[@]}")
|
|
else
|
|
test_types+=("$1")
|
|
fi
|
|
shift
|
|
;;
|
|
*)
|
|
log_error "未知选项: $1"
|
|
show_help
|
|
exit 1
|
|
;;
|
|
esac
|
|
done
|
|
|
|
# 如果没有指定测试类型,默认运行所有测试
|
|
if [ ${#test_types[@]} -eq 0 ]; then
|
|
test_types=("${TEST_TYPES[@]}")
|
|
fi
|
|
|
|
SELECTED_TEST_TYPES=("${test_types[@]}")
|
|
SELECTED_MODULES=("${modules[@]}")
|
|
}
|
|
|
|
# 检查依赖
|
|
check_dependencies() {
|
|
log_info "检查测试依赖..."
|
|
|
|
local missing_deps=()
|
|
|
|
# 检查Python和pip
|
|
if ! command -v python3 &> /dev/null; then
|
|
missing_deps+=("python3")
|
|
fi
|
|
|
|
if ! command -v pip3 &> /dev/null; then
|
|
missing_deps+=("pip3")
|
|
fi
|
|
|
|
# 检查pytest
|
|
if ! python3 -c "import pytest" &> /dev/null; then
|
|
missing_deps+=("pytest")
|
|
fi
|
|
|
|
# 检查其他测试依赖
|
|
for dep in pytest-cov pytest-html pytest-xdist pytest-mock; do
|
|
if ! python3 -c "import $dep" &> /dev/null; then
|
|
missing_deps+=("$dep")
|
|
fi
|
|
done
|
|
|
|
if [ ${#missing_deps[@]} -gt 0 ]; then
|
|
log_error "缺少测试依赖: ${missing_deps[*]}"
|
|
log_info "请运行: pip install ${missing_deps[*]}"
|
|
exit 1
|
|
fi
|
|
|
|
log_success "所有依赖已安装"
|
|
}
|
|
|
|
# 创建报告目录
|
|
create_report_directory() {
|
|
mkdir -p "$REPORT_DIR"
|
|
mkdir -p "$REPORT_DIR/coverage"
|
|
mkdir -p "$REPORT_DIR/junit"
|
|
mkdir -p "$REPORT_DIR/html"
|
|
}
|
|
|
|
# 运行单元测试
|
|
run_unit_tests() {
|
|
log_info "运行单元测试..."
|
|
|
|
local unit_dir="$TEST_DIR/unit"
|
|
local test_files=()
|
|
|
|
# 收集测试文件
|
|
if [ ${#SELECTED_MODULES[@]} -eq 0 ]; then
|
|
test_files=("$unit_dir"/test_*.py)
|
|
else
|
|
for module in "${SELECTED_MODULES[@]}"; do
|
|
case $module in
|
|
verify)
|
|
test_files+=("$unit_dir/test_verify_module.py")
|
|
;;
|
|
mutate)
|
|
test_files+=("$unit_dir/test_mutate_module.py")
|
|
;;
|
|
ui)
|
|
test_files+=("$unit_dir/test_ui_api.py")
|
|
;;
|
|
*)
|
|
log_warning "未知的单元测试模块: $module"
|
|
;;
|
|
esac
|
|
done
|
|
fi
|
|
|
|
# 过滤存在的文件
|
|
local existing_files=()
|
|
for file in "${test_files[@]}"; do
|
|
if [ -f "$file" ]; then
|
|
existing_files+=("$file")
|
|
fi
|
|
done
|
|
|
|
if [ ${#existing_files[@]} -eq 0 ]; then
|
|
log_warning "没有找到单元测试文件"
|
|
return 0
|
|
fi
|
|
|
|
log_debug "单元测试文件: ${existing_files[*]}"
|
|
|
|
# 构建pytest命令
|
|
local pytest_cmd="python3 -m pytest"
|
|
|
|
if [ "$VERBOSE" = true ]; then
|
|
pytest_cmd+=" -v"
|
|
fi
|
|
|
|
if [ "$PARALLEL" = true ]; then
|
|
pytest_cmd+=" -n auto"
|
|
fi
|
|
|
|
if [ "$COVERAGE" = true ]; then
|
|
pytest_cmd+=" --cov=src"
|
|
pytest_cmd+=" --cov-report=html:$REPORT_DIR/coverage/html"
|
|
pytest_cmd+=" --cov-report=term-missing"
|
|
pytest_cmd+=" --cov-report=xml:$REPORT_DIR/coverage/coverage.xml"
|
|
fi
|
|
|
|
if [ "$HTML_REPORT" = true ]; then
|
|
pytest_cmd+=" --html=$REPORT_DIR/html/unit_tests.html"
|
|
pytest_cmd+=" --self-contained-html"
|
|
fi
|
|
|
|
if [ "$JUNIT_REPORT" = true ]; then
|
|
pytest_cmd+=" --junitxml=$REPORT_DIR/junit/unit_tests.xml"
|
|
fi
|
|
|
|
pytest_cmd+=" ${existing_files[*]}"
|
|
|
|
log_debug "执行命令: $pytest_cmd"
|
|
|
|
if [ "$DRY_RUN" = true ]; then
|
|
log_info "[DRY RUN] 将运行单元测试: ${existing_files[*]}"
|
|
return 0
|
|
fi
|
|
|
|
# 运行测试
|
|
if eval "$pytest_cmd"; then
|
|
log_success "单元测试通过"
|
|
else
|
|
log_error "单元测试失败"
|
|
((FAILED_TESTS++))
|
|
fi
|
|
|
|
((TOTAL_TESTS++))
|
|
}
|
|
|
|
# 运行集成测试
|
|
run_integration_tests() {
|
|
log_info "运行集成测试..."
|
|
|
|
local integration_dir="$TEST_DIR/integration"
|
|
local test_files=()
|
|
|
|
# 收集测试文件
|
|
if [ ${#SELECTED_MODULES[@]} -eq 0 ]; then
|
|
test_files=("$integration_dir"/test_*.py)
|
|
else
|
|
for module in "${SELECTED_MODULES[@]}"; do
|
|
case $module in
|
|
system)
|
|
test_files+=("$integration_dir/test_complete_pipeline.py")
|
|
;;
|
|
freertos)
|
|
test_files+=("$integration_dir/test_freertos_verification.py")
|
|
;;
|
|
*)
|
|
log_warning "未知的集成测试模块: $module"
|
|
;;
|
|
esac
|
|
done
|
|
fi
|
|
|
|
# 过滤存在的文件
|
|
local existing_files=()
|
|
for file in "${test_files[@]}"; do
|
|
if [ -f "$file" ]; then
|
|
existing_files+=("$file")
|
|
fi
|
|
done
|
|
|
|
if [ ${#existing_files[@]} -eq 0 ]; then
|
|
log_warning "没有找到集成测试文件"
|
|
return 0
|
|
fi
|
|
|
|
log_debug "集成测试文件: ${existing_files[*]}"
|
|
|
|
# 构建pytest命令
|
|
local pytest_cmd="python3 -m pytest"
|
|
|
|
if [ "$VERBOSE" = true ]; then
|
|
pytest_cmd+=" -v"
|
|
fi
|
|
|
|
if [ "$PARALLEL" = true ]; then
|
|
pytest_cmd+=" -n auto"
|
|
fi
|
|
|
|
if [ "$COVERAGE" = true ]; then
|
|
pytest_cmd+=" --cov=src"
|
|
pytest_cmd+=" --cov-report=term-missing"
|
|
fi
|
|
|
|
if [ "$HTML_REPORT" = true ]; then
|
|
pytest_cmd+=" --html=$REPORT_DIR/html/integration_tests.html"
|
|
pytest_cmd+=" --self-contained-html"
|
|
fi
|
|
|
|
if [ "$JUNIT_REPORT" = true ]; then
|
|
pytest_cmd+=" --junitxml=$REPORT_DIR/junit/integration_tests.xml"
|
|
fi
|
|
|
|
pytest_cmd+=" ${existing_files[*]}"
|
|
|
|
log_debug "执行命令: $pytest_cmd"
|
|
|
|
if [ "$DRY_RUN" = true ]; then
|
|
log_info "[DRY RUN] 将运行集成测试: ${existing_files[*]}"
|
|
return 0
|
|
fi
|
|
|
|
# 运行测试
|
|
if eval "$pytest_cmd"; then
|
|
log_success "集成测试通过"
|
|
else
|
|
log_error "集成测试失败"
|
|
((FAILED_TESTS++))
|
|
fi
|
|
|
|
((TOTAL_TESTS++))
|
|
}
|
|
|
|
# 运行性能测试
|
|
run_performance_tests() {
|
|
log_info "运行性能测试..."
|
|
|
|
local performance_dir="$TEST_DIR/performance"
|
|
local test_file="$performance_dir/test_system_performance.py"
|
|
|
|
if [ ! -f "$test_file" ]; then
|
|
log_warning "性能测试文件不存在: $test_file"
|
|
return 0
|
|
fi
|
|
|
|
log_debug "性能测试文件: $test_file"
|
|
|
|
# 构建pytest命令
|
|
local pytest_cmd="python3 -m pytest"
|
|
|
|
if [ "$VERBOSE" = true ]; then
|
|
pytest_cmd+=" -v"
|
|
fi
|
|
|
|
# 性能测试通常不并行运行
|
|
# if [ "$PARALLEL" = true ]; then
|
|
# pytest_cmd+=" -n auto"
|
|
# fi
|
|
|
|
if [ "$HTML_REPORT" = true ]; then
|
|
pytest_cmd+=" --html=$REPORT_DIR/html/performance_tests.html"
|
|
pytest_cmd+=" --self-contained-html"
|
|
fi
|
|
|
|
if [ "$JUNIT_REPORT" = true ]; then
|
|
pytest_cmd+=" --junitxml=$REPORT_DIR/junit/performance_tests.xml"
|
|
fi
|
|
|
|
pytest_cmd+=" $test_file"
|
|
|
|
log_debug "执行命令: $pytest_cmd"
|
|
|
|
if [ "$DRY_RUN" = true ]; then
|
|
log_info "[DRY RUN] 将运行性能测试: $test_file"
|
|
return 0
|
|
fi
|
|
|
|
# 运行测试
|
|
if eval "$pytest_cmd"; then
|
|
log_success "性能测试通过"
|
|
else
|
|
log_error "性能测试失败"
|
|
((FAILED_TESTS++))
|
|
fi
|
|
|
|
((TOTAL_TESTS++))
|
|
}
|
|
|
|
# 运行回归测试
|
|
run_regression_tests() {
|
|
log_info "运行回归测试..."
|
|
|
|
local regression_dir="$TEST_DIR/regression"
|
|
local test_file="$regression_dir/test_regression_suite.py"
|
|
|
|
if [ ! -f "$test_file" ]; then
|
|
log_warning "回归测试文件不存在: $test_file"
|
|
return 0
|
|
fi
|
|
|
|
log_debug "回归测试文件: $test_file"
|
|
|
|
# 构建pytest命令
|
|
local pytest_cmd="python3 -m pytest"
|
|
|
|
if [ "$VERBOSE" = true ]; then
|
|
pytest_cmd+=" -v"
|
|
fi
|
|
|
|
if [ "$PARALLEL" = true ]; then
|
|
pytest_cmd+=" -n auto"
|
|
fi
|
|
|
|
if [ "$HTML_REPORT" = true ]; then
|
|
pytest_cmd+=" --html=$REPORT_DIR/html/regression_tests.html"
|
|
pytest_cmd+=" --self-contained-html"
|
|
fi
|
|
|
|
if [ "$JUNIT_REPORT" = true ]; then
|
|
pytest_cmd+=" --junitxml=$REPORT_DIR/junit/regression_tests.xml"
|
|
fi
|
|
|
|
pytest_cmd+=" $test_file"
|
|
|
|
log_debug "执行命令: $pytest_cmd"
|
|
|
|
if [ "$DRY_RUN" = true ]; then
|
|
log_info "[DRY RUN] 将运行回归测试: $test_file"
|
|
return 0
|
|
fi
|
|
|
|
# 运行测试
|
|
if eval "$pytest_cmd"; then
|
|
log_success "回归测试通过"
|
|
else
|
|
log_error "回归测试失败"
|
|
((FAILED_TESTS++))
|
|
fi
|
|
|
|
((TOTAL_TESTS++))
|
|
}
|
|
|
|
# 生成测试报告
|
|
generate_test_report() {
|
|
if [ "$TOTAL_TESTS" -eq 0 ]; then
|
|
log_warning "没有运行任何测试"
|
|
return 0
|
|
fi
|
|
|
|
local report_file="$REPORT_DIR/test_summary.md"
|
|
local passed_tests=$((TOTAL_TESTS - FAILED_TESTS))
|
|
local success_rate=$((passed_tests * 100 / TOTAL_TESTS))
|
|
|
|
cat > "$report_file" << EOF
|
|
# CodeDetect测试报告
|
|
|
|
## 测试摘要
|
|
|
|
- **测试总数**: $TOTAL_TESTS
|
|
- **通过测试**: $passed_tests
|
|
- **失败测试**: $FAILED_TESTS
|
|
- **成功率**: ${success_rate}%
|
|
|
|
## 测试详情
|
|
|
|
| 测试类型 | 状态 | 备注 |
|
|
|---------|------|------|
|
|
EOF
|
|
|
|
# 添加各个测试类型的结果
|
|
for test_type in "${SELECTED_TEST_TYPES[@]}"; do
|
|
local status="✅ 通过"
|
|
if [ "$FAILED_TESTS" -gt 0 ]; then
|
|
status="❌ 失败"
|
|
fi
|
|
echo "| $test_type | $status | - |" >> "$report_file"
|
|
done
|
|
|
|
cat >> "$report_file" << EOF
|
|
|
|
## 运行环境
|
|
|
|
- **系统**: $(uname -s)
|
|
- **架构**: $(uname -m)
|
|
- **Python版本**: $(python3 --version)
|
|
- **Pytest版本**: $(python3 -c "import pytest; print(pytest.__version__)")
|
|
- **测试时间**: $(date '+%Y-%m-%d %H:%M:%S')
|
|
- **执行时间**: $(echo "$END_TIME - $START_TIME" | bc) 秒
|
|
|
|
## 测试配置
|
|
|
|
- **详细输出**: $([ "$VERBOSE" = true ] && echo "是" || echo "否")
|
|
- **并行运行**: $([ "$PARALLEL" = true ] && echo "是" || echo "否")
|
|
- **覆盖率报告**: $([ "$COVERAGE" = true ] && echo "是" || echo "否")
|
|
- **HTML报告**: $([ "$HTML_REPORT" = true ] && echo "是" || echo "否")
|
|
- **JUnit报告**: $([ "$JUNIT_REPORT" = true ] && echo "是" || echo "否")
|
|
|
|
## 报告文件
|
|
|
|
EOF
|
|
|
|
# 添加报告文件链接
|
|
if [ "$HTML_REPORT" = true ]; then
|
|
echo "- HTML报告: $REPORT_DIR/html/" >> "$report_file"
|
|
fi
|
|
|
|
if [ "$COVERAGE" = true ]; then
|
|
echo "- 覆盖率报告: $REPORT_DIR/coverage/" >> "$report_file"
|
|
fi
|
|
|
|
if [ "$JUNIT_REPORT" = true ]; then
|
|
echo "- JUnit报告: $REPORT_DIR/junit/" >> "$report_file"
|
|
fi
|
|
|
|
log_success "测试报告已生成: $report_file"
|
|
}
|
|
|
|
# 显示测试结果
|
|
show_test_results() {
|
|
echo ""
|
|
echo "======================================="
|
|
echo "测试结果摘要"
|
|
echo "======================================="
|
|
echo ""
|
|
echo "总测试套件: $TOTAL_TESTS"
|
|
echo "通过: $((TOTAL_TESTS - FAILED_TESTS))"
|
|
echo "失败: $FAILED_TESTS"
|
|
|
|
if [ "$FAILED_TESTS" -gt 0 ]; then
|
|
echo ""
|
|
echo -e "${RED}❌ 有测试失败,请检查详细报告${NC}"
|
|
exit 1
|
|
else
|
|
echo ""
|
|
echo -e "${GREEN}✅ 所有测试通过${NC}"
|
|
fi
|
|
}
|
|
|
|
# 主函数
|
|
main() {
|
|
# 解析参数
|
|
parse_arguments "$@"
|
|
|
|
# 显示配置信息
|
|
if [ "$VERBOSE" = true ]; then
|
|
log_info "测试配置:"
|
|
log_info " 测试类型: ${SELECTED_TEST_TYPES[*]}"
|
|
log_info " 测试模块: ${SELECTED_MODULES[*]:-all}"
|
|
log_info " 详细输出: $VERBOSE"
|
|
log_info " 并行运行: $PARALLEL"
|
|
log_info " 覆盖率: $COVERAGE"
|
|
log_info " HTML报告: $HTML_REPORT"
|
|
log_info " JUnit报告: $JUNIT_REPORT"
|
|
log_info " 报告目录: $REPORT_DIR"
|
|
echo ""
|
|
fi
|
|
|
|
# 检查依赖
|
|
check_dependencies
|
|
|
|
# 创建报告目录
|
|
create_report_directory
|
|
|
|
# 记录开始时间
|
|
START_TIME=$(date +%s.%N)
|
|
|
|
# 运行测试
|
|
for test_type in "${SELECTED_TEST_TYPES[@]}"; do
|
|
case $test_type in
|
|
unit)
|
|
run_unit_tests
|
|
;;
|
|
integration)
|
|
run_integration_tests
|
|
;;
|
|
performance)
|
|
run_performance_tests
|
|
;;
|
|
regression)
|
|
run_regression_tests
|
|
;;
|
|
*)
|
|
log_error "未知的测试类型: $test_type"
|
|
;;
|
|
esac
|
|
done
|
|
|
|
# 记录结束时间
|
|
END_TIME=$(date +%s.%N)
|
|
|
|
# 生成报告
|
|
generate_test_report
|
|
|
|
# 显示结果
|
|
show_test_results
|
|
}
|
|
|
|
# 脚本入口点
|
|
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
|
|
main "$@"
|
|
fi |