Update test scripts to generic-track versions (#213)

This makes the track more consistent with more recent tracks
This commit is contained in:
Erik Schierboom
2025-01-30 11:16:31 +01:00
committed by GitHub
parent 583ad1fbb6
commit 9a638548b7
5 changed files with 166 additions and 147 deletions

View File

@@ -40,4 +40,4 @@ jobs:
build-args: ZIP_PASSWORD=${{secrets.ZIP_PASSWORD}}
- name: Run tests for all exercises
run: bin/test
run: bin/verify-exercises-in-docker

View File

@@ -1,66 +0,0 @@
#!/usr/bin/env bash
# Test if the example/exemplar solution of each
# Practice/Concept Exercise passes the exercise's tests.
# Example:
# ./bin/test
set -eo pipefail
exit_code=0
function run_test_runner() {
local dir=$1
local slug=$2
docker run \
--rm \
--network none \
--mount type=bind,src="${dir}",dst=/solution \
--mount type=bind,src="${dir}",dst=/output \
--tmpfs /tmp:rw \
exercism/8th-test-runner "${slug}" "/solution" "/output"
}
function verify_exercise() {
local dir=$(realpath "$1")
local slug=$(basename "${dir}")
local implementation_file_key=$2
local implementation_file=$(jq -r --arg d "${dir}" --arg k "${implementation_file_key}" '$d + "/" + .files[$k][0]' "${dir}/.meta/config.json")
local stub_file=$(jq -r --arg d "${dir}" '$d + "/" + .files.solution[0]' "${dir}/.meta/config.json")
local stub_backup_file="${stub_file}.bak"
local results_file="${dir}/results.json"
cp "${stub_file}" "${stub_backup_file}"
cp "${implementation_file}" "${stub_file}"
run_test_runner "${dir}" "${slug}"
if [[ $(jq -r '.status' "${results_file}") != "pass" ]]; then
echo "${slug}: ${implementation_file_key} solution did not pass the tests"
cat "${results_file}"
exit_code=1
fi
mv "${stub_backup_file}" "${stub_file}"
rm -f "${results_file}"
}
# Verify the Concept Exercises
for concept_exercise_dir in ./exercises/concept/*/; do
if [[ -d $concept_exercise_dir ]]; then
echo "Checking $(basename "${concept_exercise_dir}") exercise..."
verify_exercise "$concept_exercise_dir" "exemplar"
fi
done
# Verify the Practice Exercises
for practice_exercise_dir in ./exercises/practice/*/; do
if [[ -d $practice_exercise_dir ]]; then
echo "Checking $(basename "${practice_exercise_dir}") exercise..."
verify_exercise "$practice_exercise_dir" "example"
fi
done
exit "${exit_code}"

View File

@@ -1,80 +0,0 @@
#!/usr/bin/env bash
usage="Usage: $0 [-r] -a|exercise_slug
Test an example solution.
Use -a to test all exercises.
Use -r to run skipped tests.
Example: $0 luhn"
die() { echo "$*" >&2; exit 1; }
make_test_dir() {
local exercise_dir=$1
local slug=${exercise_dir##*/}
local key=$2
(
cd "$exercise_dir" || die "cannot cd to: $exercise_dir"
[[ -f test-words.8th ]] && cp test-words.8th "$test_dir"
[[ -d libs ]] && cp -r libs "$test_dir"
while IFS= read -r test; do
cp -r "$test" "$test_dir"
done < <(
jq -r '.files.test[]' .meta/config.json
)
while IFS= read -r solution; do
cp -r "$solution" "$test_dir/$slug.8th"
done < <(
jq -r --arg key "$key" '.files[$key][]' .meta/config.json
)
)
}
test_one() {
local slug=$1
if [[ -d "./exercises/concept/$slug" ]]; then
make_test_dir "./exercises/concept/$slug" exemplar
elif [[ -d "./exercises/practice/$slug" ]]; then
make_test_dir "./exercises/practice/$slug" example
else
die "no such exercise: $slug"
fi
(
cd "$test_dir" || die "cannot cd to $test_dir"
8th -f test.8th
)
}
cleanup() { rm -rf "$test_dir"; }
test_dir=$(mktemp -d)
trap cleanup EXIT
all=false
while getopts :har opt; do
case $opt in
h) die "$usage" ;;
a) all=true ;;
r) export RUN_ALL_TESTS=true ;;
?) die "unknown option: -$OPTARG" ;;
esac
done
shift $((OPTIND-1))
if $all; then
shopt -s nullglob
for dir in ./exercises/concept/* ./exercises/practice/*; do
slug=${dir##*/}
if test_one "$slug"; then
echo "$slug tests exited with $?"
echo
else
die "$slug tests exited with $?"
fi
done
else
slug=$1
[[ -n $slug ]] || die "$usage"
test_one "$slug"
fi

81
bin/verify-exercises Executable file
View File

@@ -0,0 +1,81 @@
#!/usr/bin/env bash
# Synopsis:
# Verify that each exercise's example/exemplar solution passes the tests.
# You can either verify all exercises or a single exercise.
# Example: verify all exercises
# bin/verify-exercises
# Example: verify single exercise
# bin/verify-exercises two-fer
set -eo pipefail
die() { echo "$*" >&2; exit 1; }
required_tool() {
command -v "${1}" >/dev/null 2>&1 ||
die "${1} is required but not installed. Please install it and make sure it's in your PATH."
}
required_tool jq
copy_example_or_examplar_to_solution() {
jq -c '[.files.solution, .files.exemplar // .files.example] | transpose | map({src: .[1], dst: .[0]}) | .[]' .meta/config.json \
| while read -r src_and_dst; do
cp "$(jq -r '.src' <<< "${src_and_dst}")" "$(jq -r '.dst' <<< "${src_and_dst}")"
done
}
unskip_tests() {
# shellcheck disable=SC2034
jq -r '.files.test[]' .meta/config.json | while read -r test_file; do
sed -i 's/SKIP-REST-OF-TESTS//g' "${test_file}"
done
}
run_tests() {
8th -f test.8th
}
verify_exercise() {
local dir
local slug
local tmp_dir
dir=$(realpath "${1}")
slug=$(basename "${dir}")
tmp_dir=$(mktemp -d -t "exercism-verify-${slug}-XXXXX")
echo "Verifying ${slug} exercise..."
(
trap 'rm -rf "$tmp_dir"' EXIT # remove tempdir when subshell ends
cp -r "${dir}/." "${tmp_dir}"
cd "${tmp_dir}"
copy_example_or_examplar_to_solution
unskip_tests
run_tests
)
}
verify_exercises() {
local exercise_slug
exercise_slug="${1}"
shopt -s nullglob
count=0
for exercise_dir in ./exercises/{concept,practice}/${exercise_slug}/; do
if [[ -d "${exercise_dir}" ]]; then
verify_exercise "${exercise_dir}"
((++count))
fi
done
((count > 0)) || die 'no matching exercises found!'
}
exercise_slug="${1:-*}"
verify_exercises "${exercise_slug}"

84
bin/verify-exercises-in-docker Executable file
View File

@@ -0,0 +1,84 @@
#!/usr/bin/env bash
# Synopsis:
# Verify that each exercise's example/exemplar solution passes the tests
# using the track's test runner Docker image.
# You can either verify all exercises or a single exercise.
# Example: verify all exercises in Docker
# bin/verify-exercises-in-docker
# Example: verify single exercise in Docker
# bin/verify-exercises-in-docker two-fer
set -eo pipefail
die() { echo "$*" >&2; exit 1; }
required_tool() {
command -v "${1}" >/dev/null 2>&1 ||
die "${1} is required but not installed. Please install it and make sure it's in your PATH."
}
required_tool docker
copy_example_or_examplar_to_solution() {
jq -c '[.files.solution, .files.exemplar // .files.example] | transpose | map({src: .[1], dst: .[0]}) | .[]' .meta/config.json \
| while read -r src_and_dst; do
cp "$(jq -r '.src' <<< "${src_and_dst}")" "$(jq -r '.dst' <<< "${src_and_dst}")"
done
}
run_tests() {
local slug
slug="${1}"
# shellcheck disable=SC1083
docker run \
--rm \
--network none \
--read-only \
--mount type=bind,src="${PWD}",dst=/solution \
--mount type=bind,src="${PWD}",dst=/output \
--mount type=tmpfs,dst=/tmp \
exercism/8th-test-runner "${slug}" /solution /output
jq -e '.status == "pass"' "${PWD}/results.json" >/dev/null 2>&1
}
verify_exercise() {
local dir
local slug
local tmp_dir
dir=$(realpath "${1}")
slug=$(basename "${dir}")
tmp_dir=$(mktemp -d -t "exercism-verify-${slug}-XXXXX")
echo "Verifying ${slug} exercise..."
(
trap 'rm -rf "$tmp_dir"' EXIT # remove tempdir when subshell ends
cp -r "${dir}/." "${tmp_dir}"
cd "${tmp_dir}"
copy_example_or_examplar_to_solution
run_tests "${slug}"
)
}
verify_exercises() {
local exercise_slug
exercise_slug="${1}"
shopt -s nullglob
count=0
for exercise_dir in ./exercises/{concept,practice}/${exercise_slug}/; do
if [[ -d "${exercise_dir}" ]]; then
verify_exercise "${exercise_dir}"
((++count))
fi
done
((count > 0)) || die 'no matching exercises found!'
}
exercise_slug="${1:-*}"
verify_exercises "${exercise_slug}"