gtat-tech-career-kickstarte.../.gitea/workflows/cd_tests.yml

210 lines
6.8 KiB
YAML

name: Continuous Deployment Tests
on:
push:
branches:
- main
workflow_dispatch:
inputs:
deployment_dir:
description: "Root deployment directory (overrides CK_DEPLOYMENT_DIR)"
required: false
default: ""
env:
CK_DEPLOYMENT_DIR: ${{ github.event.inputs.deployment_dir || '~/deployment' }}
CK_TESTS_DIR: ${{ github.workspace }}/solution/tests
WORKERS_COUNT: 4
jobs:
run_system_tests:
runs-on: ubuntu-latest
timeout-minutes: 25
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.11"
- name: Find oldest untested deployment
id: find_deployment
shell: bash
run: |
set -euo pipefail
DEPLOYMENT_DIR="${CK_DEPLOYMENT_DIR}"
DEPLOYMENT_DIR="${DEPLOYMENT_DIR/#\~/$HOME}"
oldest_time=""
oldest_dir=""
oldest_wheel=""
for user_dir in "$DEPLOYMENT_DIR"/*/; do
[ -d "$user_dir" ] || continue
for deployment_dir in "$user_dir"*/; do
[ -d "$deployment_dir" ] || continue
# Skip if already started
[ -f "${deployment_dir}started.flag" ] && continue
# Find wheel files
wheel_count=$(find "$deployment_dir" -maxdepth 1 -name '*.whl' | wc -l)
[ "$wheel_count" -eq 0 ] && continue
if [ "$wheel_count" -gt 1 ]; then
echo "ERROR: Multiple wheel files found in $deployment_dir" >&2
exit 1
fi
wheel_file=$(find "$deployment_dir" -maxdepth 1 -name '*.whl')
mtime=$(stat -c '%Y' "$deployment_dir")
if [ -z "$oldest_time" ] || [ "$mtime" -lt "$oldest_time" ]; then
oldest_time="$mtime"
oldest_dir="$deployment_dir"
oldest_wheel="$wheel_file"
fi
done
done
if [ -z "$oldest_dir" ]; then
echo "No untested deployments found."
echo "found=false" >> "$GITHUB_OUTPUT"
exit 0
fi
echo "Found untested deployment: $oldest_dir"
echo "found=true" >> "$GITHUB_OUTPUT"
echo "deployment_dir=$oldest_dir" >> "$GITHUB_OUTPUT"
echo "wheel_file=$oldest_wheel" >> "$GITHUB_OUTPUT"
touch "${oldest_dir}started.flag"
- name: Create virtual environment
if: steps.find_deployment.outputs.found == 'true'
shell: bash
run: |
set -euo pipefail
python -m venv "${{ steps.find_deployment.outputs.deployment_dir }}venv"
- name: Install wheel
if: steps.find_deployment.outputs.found == 'true'
shell: bash
run: |
set -euo pipefail
VENV_PY="${{ steps.find_deployment.outputs.deployment_dir }}venv/bin/python"
"$VENV_PY" -m pip install --quiet "${{ steps.find_deployment.outputs.wheel_file }}"
- name: Install pytest and plugins
if: steps.find_deployment.outputs.found == 'true'
shell: bash
run: |
set -euo pipefail
VENV_PY="${{ steps.find_deployment.outputs.deployment_dir }}venv/bin/python"
"$VENV_PY" -m pip install --quiet pytest pytest-xdist jsonschema
- name: Resolve test files from deployment config
id: resolve_tests
if: steps.find_deployment.outputs.found == 'true'
shell: bash
run: |
set -euo pipefail
CONFIG_FILE="${{ steps.find_deployment.outputs.deployment_dir }}deployment_config.json"
TESTS_DIR="${CK_TESTS_DIR}"
if [ ! -f "$CONFIG_FILE" ]; then
echo "ERROR: deployment_config.json not found at $CONFIG_FILE" >&2
exit 1
fi
test_files=()
while IFS= read -r test_name; do
test_file="${TESTS_DIR}/test_${test_name}_system.py"
if [ ! -f "$test_file" ]; then
echo "ERROR: Test file not found: $test_file" >&2
exit 1
fi
test_files+=("$test_file")
done < <(jq -r '.systemTests[]' "$CONFIG_FILE")
# Space-separated list for output
echo "test_files=${test_files[*]}" >> "$GITHUB_OUTPUT"
- name: Run system tests
id: run_tests
if: steps.find_deployment.outputs.found == 'true'
shell: bash
run: |
set -euo pipefail
DEPLOYMENT_DIR="${{ steps.find_deployment.outputs.deployment_dir }}"
VENV_PY="${DEPLOYMENT_DIR}venv/bin/python"
CONFIG_FILE="${DEPLOYMENT_DIR}deployment_config.json"
JUNIT_XML="${DEPLOYMENT_DIR}test_results.xml"
# Read test files back from prior step output
read -ra test_files <<< "${{ steps.resolve_tests.outputs.test_files }}"
echo "Running system tests: ${test_files[*]}"
start_ns=$(date +%s%N)
set +e
"$VENV_PY" -m pytest \
"${test_files[@]}" \
-n "${WORKERS_COUNT}" \
-W error::pytest.PytestUnhandledThreadExceptionWarning \
--venv-path="${DEPLOYMENT_DIR}venv" \
--deployment-config="${CONFIG_FILE}" \
--junit-xml="${JUNIT_XML}"
pytest_exit=$?
set -e
end_ns=$(date +%s%N)
duration_ms=$(( (end_ns - start_ns) / 1000000 ))
echo "duration_ms=${duration_ms}" >> "$GITHUB_OUTPUT"
echo "pytest_exit=${pytest_exit}" >> "$GITHUB_OUTPUT"
- name: Write final report
if: always() && steps.find_deployment.outputs.found == 'true'
shell: bash
run: |
set -euo pipefail
DEPLOYMENT_DIR="${{ steps.find_deployment.outputs.deployment_dir }}"
FINAL_REPORT="${DEPLOYMENT_DIR}final_report.json"
duration_ms="${{ steps.run_tests.outputs.duration_ms }}"
timed_out="false"
# Job cancelled (timeout) maps to testing_timed_out = true
if [ "${{ job.status }}" = "cancelled" ]; then
timed_out="true"
duration_ms="${duration_ms:-0}"
fi
cat > "$FINAL_REPORT" <<EOF
{
"testing_duration_ms": ${duration_ms:-0},
"testing_timed_out": ${timed_out}
}
EOF
echo "Final report written to $FINAL_REPORT"
cat "$FINAL_REPORT"
- name: Upload test artifacts
if: always() && steps.find_deployment.outputs.found == 'true'
uses: actions/upload-artifact@v4
with:
name: test-results
if-no-files-found: warn
path: |
${{ steps.find_deployment.outputs.deployment_dir }}test_results.xml
${{ steps.find_deployment.outputs.deployment_dir }}final_report.json
${{ steps.find_deployment.outputs.deployment_dir }}logs/