You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
cbmc/codedetect/.github/workflows/ci.yml

357 lines
8.8 KiB

name: CI/CD Pipeline
on:
push:
branches: [ main, develop ]
pull_request:
branches: [ main, develop ]
schedule:
# 每天凌晨2点运行
- cron: '0 2 * * *'
env:
PYTHON_VERSION: '3.11'
NODE_VERSION: '18'
jobs:
code-quality:
name: Code Quality
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements-dev.txt
- name: Run flake8
run: |
flake8 src/ tests/ --max-line-length=120 --exclude=__pycache__
- name: Run black formatting check
run: |
black --check --diff src/ tests/
- name: Run isort import check
run: |
isort --check-only --diff src/ tests/
- name: Run bandit security scan
run: |
bandit -r src/ -f json -o bandit-report.json || true
- name: Upload security scan results
uses: actions/upload-artifact@v3
if: always()
with:
name: security-scan-results
path: bandit-report.json
unit-tests:
name: Unit Tests
runs-on: ubuntu-latest
needs: code-quality
strategy:
matrix:
test-type: [unit, integration, performance]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
pip install -r requirements-dev.txt
- name: Install CBMC
run: |
sudo apt-get update
sudo apt-get install -y cbmc
- name: Run tests
run: |
chmod +x scripts/run_tests.sh
./scripts/run_tests.sh --verbose --coverage --junit --html ${{ matrix.test-type }}
- name: Upload test results
uses: actions/upload-artifact@v3
if: always()
with:
name: test-results-${{ matrix.test-type }}
path: |
test_reports/
htmlcov/
junit-*.xml
coverage.xml
regression-tests:
name: Regression Tests
runs-on: ubuntu-latest
needs: unit-tests
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
pip install -r requirements-dev.txt
- name: Setup FreeRTOS environment
run: |
chmod +x scripts/freertos-setup.sh
./scripts/freertos-setup.sh --dry-run || true
- name: Run regression tests
run: |
chmod +x scripts/run_tests.sh
./scripts/run_tests.sh --verbose --junit regression
- name: Upload regression results
uses: actions/upload-artifact@v3
if: always()
with:
name: regression-results
path: test_reports/
performance-benchmarks:
name: Performance Benchmarks
runs-on: ubuntu-latest
needs: unit-tests
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
pip install -r requirements-dev.txt
pip install matplotlib psutil
- name: Run performance benchmarks
run: |
chmod +x tools/run_benchmarks.py
python tools/run_benchmarks.py --output-dir benchmark_results --iterations 3
- name: Upload benchmark results
uses: actions/upload-artifact@v3
if: always()
with:
name: benchmark-results
path: benchmark_results/
documentation-build:
name: Documentation Build
runs-on: ubuntu-latest
needs: code-quality
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements-dev.txt
pip install mkdocs mkdocs-material pymdown-extensions
- name: Generate API documentation
run: |
chmod +x scripts/generate_api_docs.py
python scripts/generate_api_docs.py --output-dir docs/api
- name: Build documentation
run: |
if [ -f "mkdocs.yml" ]; then
mkdocs build
fi
- name: Upload documentation
uses: actions/upload-artifact@v3
with:
name: documentation
path: |
docs/api/
site/
security-scan:
name: Security Scan
runs-on: ubuntu-latest
needs: code-quality
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@master
with:
scan-type: 'fs'
scan-ref: '.'
format: 'sarif'
output: 'trivy-results.sarif'
- name: Upload Trivy scan results
uses: github/codeql-action/upload-sarif@v2
with:
sarif_file: 'trivy-results.sarif'
build-and-test-docker:
name: Build and Test Docker
runs-on: ubuntu-latest
needs: [unit-tests, regression-tests]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build Docker image
run: |
if [ -f "Dockerfile" ]; then
docker build -t codedetect:latest .
fi
- name: Test Docker image
run: |
if [ -f "Dockerfile" ]; then
docker run --rm codedetect:latest python -c "import sys; print('Docker image test successful')"
fi
coverage-report:
name: Coverage Report
runs-on: ubuntu-latest
needs: [unit-tests, regression-tests]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Download all artifacts
uses: actions/download-artifact@v3
- name: Generate coverage report
run: |
if [ -f "coverage.xml" ]; then
pip install coverage
coverage xml
fi
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
with:
file: ./coverage.xml
flags: unittests
name: codecov-umbrella
deployment-staging:
name: Deploy to Staging
runs-on: ubuntu-latest
needs: [unit-tests, regression-tests, documentation-build, security-scan]
if: github.ref == 'refs/heads/develop'
environment: staging
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Deploy to staging
run: |
echo "Deploying to staging environment..."
# 这里添加实际的部署步骤
- name: Run smoke tests
run: |
echo "Running smoke tests..."
# 这里添加冒烟测试步骤
deployment-production:
name: Deploy to Production
runs-on: ubuntu-latest
needs: [unit-tests, regression-tests, documentation-build, security-scan]
if: github.ref == 'refs/heads/main'
environment: production
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Deploy to production
run: |
echo "Deploying to production environment..."
# 这里添加实际的生产部署步骤
- name: Health check
run: |
echo "Running health checks..."
# 这里添加健康检查步骤
notify-results:
name: Notify Results
runs-on: ubuntu-latest
needs: [unit-tests, regression-tests, performance-benchmarks]
if: always()
steps:
- name: Download all artifacts
uses: actions/download-artifact@v3
- name: Generate summary report
run: |
echo "## CI/CD Pipeline Summary" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
if [ -f "test-results-unit/junit-unit_tests.xml" ]; then
echo "### Unit Tests" >> $GITHUB_STEP_SUMMARY
echo "✅ Completed successfully" >> $GITHUB_STEP_SUMMARY
fi
if [ -f "regression-results/junit-regression_tests.xml" ]; then
echo "### Regression Tests" >> $GITHUB_STEP_SUMMARY
echo "✅ Completed successfully" >> $GITHUB_STEP_SUMMARY
fi
if [ -f "benchmark-results/complete_benchmark_suite.json" ]; then
echo "### Performance Benchmarks" >> $GITHUB_STEP_SUMMARY
echo "✅ Completed successfully" >> $GITHUB_STEP_SUMMARY
fi
echo "" >> $GITHUB_STEP_SUMMARY
echo "Pipeline completed at: $(date)" >> $GITHUB_STEP_SUMMARY