merge test_exercises.py and test_exercises_docker.py

This commit is contained in:
Corey McCandless
2021-02-08 13:52:35 -05:00
committed by Corey McCandless
parent 753bcccdad
commit a43e360874
5 changed files with 190 additions and 110 deletions

View File

@@ -35,6 +35,34 @@ class ExerciseStatus(str, Enum):
Deprecated = 'deprecated' Deprecated = 'deprecated'
@dataclass
class ExerciseFiles:
solution: List[str]
test: List[str]
exemplar: List[str]
@dataclass
class ExerciseConfig:
files: ExerciseFiles
authors: List[str] = None
forked_from: str = None
contributors: List[str] = None
language_versions: List[str] = None
def __post_init__(self):
if isinstance(self.files, dict):
self.files = ExerciseFiles(**self.files)
for attr in ['authors', 'contributors', 'language_versions']:
if getattr(self, attr) is None:
setattr(self, attr, [])
@classmethod
def load(cls, config_file: Path) -> 'ExerciseConfig':
with config_file.open() as f:
return cls(**json.load(f))
@dataclass @dataclass
class ExerciseInfo: class ExerciseInfo:
path: Path path: Path
@@ -93,6 +121,9 @@ class ExerciseInfo:
def config_file(self): def config_file(self):
return self.meta_dir / 'config.json' return self.meta_dir / 'config.json'
def load_config(self) -> ExerciseConfig:
return ExerciseConfig.load(self.config_file)
@dataclass @dataclass
class Exercises: class Exercises:

View File

@@ -1,49 +1,175 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
"""Meant to be run from inside python-test-runner container,
where this track repo is mounted at /python
"""
import argparse
from functools import wraps
from itertools import zip_longest
import json
from pathlib import Path
import shutil import shutil
import subprocess import subprocess
import sys import sys
import tempfile import tempfile
from pathlib import Path from data import Config, ExerciseConfig, ExerciseInfo
from data import Config, ExerciseInfo
# Allow high-performance tests to be skipped # Allow high-performance tests to be skipped
ALLOW_SKIP = ['alphametics', 'largest-series-product'] ALLOW_SKIP = ['alphametics', 'largest-series-product']
TEST_RUNNER_DIR = Path('/opt/test-runner')
def check_assignment(exercise: ExerciseInfo, quiet=False) -> int: RUNNERS = {}
# Returns the exit code of the tests
workdir = Path(tempfile.mkdtemp(exercise.slug))
solution_file = exercise.solution_stub.name def runner(name):
try: def _decorator(runner_func):
test_file_out = workdir / exercise.test_file.name RUNNERS[name] = runner_func
if exercise.slug in ALLOW_SKIP: @wraps(runner_func)
shutil.copyfile(exercise.test_file, test_file_out) def _wrapper(exercise: ExerciseInfo, workdir: Path, quiet: bool = False):
return runner_func(exercise, workdir, quiet=quiet)
return _wrapper
return _decorator
def copy_file(src: Path, dst: Path, strip_skips=False):
if strip_skips:
with src.open('r') as src_file:
lines = [line for line in src_file.readlines()
if not line.strip().startswith('@unittest.skip')]
with dst.open('w') as dst_file:
dst_file.writelines(lines)
else:
shutil.copy2(src, dst)
def copy_solution_files(exercise: ExerciseInfo, workdir: Path, exercise_config: ExerciseConfig = None):
if exercise_config is not None:
solution_files = exercise_config.files.solution
exemplar_files = exercise_config.files.exemplar
else:
solution_files = []
exemplar_files = []
if not solution_files:
solution_files.append(exercise.solution_stub.name)
solution_files = [exercise.path / s for s in solution_files]
if not exemplar_files:
exemplar_files.append(exercise.exemplar_file.relative_to(exercise.path))
exemplar_files = [exercise.path / e for e in exemplar_files]
for solution_file, exemplar_file in zip_longest(solution_files, exemplar_files):
if solution_file is None:
copy_file(exemplar_file, workdir / exemplar_file.name)
elif exemplar_file is None:
copy_file(solution_file, workdir / solution_file.name)
else: else:
with exercise.test_file.open('r') as src_file: dst = workdir / solution_file.relative_to(exercise.path)
lines = [line for line in src_file.readlines() copy_file(exemplar_file, dst)
if not line.strip().startswith('@unittest.skip')]
with test_file_out.open('w') as dst_file:
dst_file.writelines(lines) def copy_test_files(exercise: ExerciseInfo, workdir: Path, exercise_config = None):
shutil.copyfile(exercise.exemplar_file, workdir / solution_file) if exercise_config is not None:
kwargs = {} test_files = exercise_config.files.test
if quiet: else:
kwargs['stdout'] = subprocess.DEVNULL test_files = []
kwargs['stderr'] = subprocess.DEVNULL if not test_files:
return subprocess.run([sys.executable, '-m', 'pytest', test_file_out], **kwargs).returncode test_files.append(exercise.test_file.name)
finally: for test_file_name in test_files:
shutil.rmtree(workdir) test_file = exercise.path / test_file_name
test_file_out = workdir / test_file_name
copy_file(test_file, test_file_out, strip_skips=(exercise.slug not in ALLOW_SKIP))
def copy_exercise_files(exercise: ExerciseInfo, workdir: Path):
exercise_config = None
if exercise.config_file.is_file():
workdir_meta = workdir / '.meta'
workdir_meta.mkdir(exist_ok=True)
copy_file(exercise.config_file, workdir_meta / exercise.config_file.name)
exercise_config = exercise.load_config()
copy_solution_files(exercise, workdir, exercise_config)
copy_test_files(exercise, workdir, exercise_config)
@runner('pytest')
def run_with_pytest(_exercise, workdir, quiet: bool = False) -> int:
kwargs = {'cwd': str(workdir)}
if quiet:
kwargs['stdout'] = subprocess.DEVNULL
kwargs['stderr'] = subprocess.DEVNULL
return subprocess.run([sys.executable, '-m', 'pytest'], **kwargs).returncode
@runner('test-runner')
def run_with_test_runner(exercise, workdir, quiet: bool = False) -> int:
kwargs = {}
if quiet:
kwargs['stdout'] = subprocess.DEVNULL
kwargs['stderr'] = subprocess.DEVNULL
if TEST_RUNNER_DIR.is_dir():
kwargs['cwd'] = str(TEST_RUNNER_DIR)
args = ['./bin/run.sh', exercise.slug, workdir, workdir]
else:
args = [
'docker-compose',
'run',
'-w', str(TEST_RUNNER_DIR),
'--entrypoint', './bin/run.sh',
'-v', f'{workdir}:/{exercise.slug}',
'test-runner',
exercise.slug,
f'/{exercise.slug}',
f'/{exercise.slug}',
]
subprocess.run(args, **kwargs)
results_file = workdir / 'results.json'
if results_file.is_file():
with results_file.open() as f:
results = json.load(f)
if results['status'] == 'pass':
return 0
return 1
def check_assignment(exercise: ExerciseInfo, runner: str = 'pytest', quiet: bool = False) -> int:
ret = 1
with tempfile.TemporaryDirectory(exercise.slug) as workdir:
workdir = Path(workdir)
copy_exercise_files(exercise, workdir)
ret = RUNNERS[runner](exercise, workdir, quiet=quiet)
return ret
def get_cli() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
runners = list(RUNNERS.keys())
if not runners:
print('No runners registered!')
raise SystemExit(1)
parser.add_argument('-q', '--quiet', action='store_true')
parser.add_argument('-r', '--runner', choices=runners, default=runners[0])
parser.add_argument('exercises', nargs='*')
return parser
def main(): def main():
opts = get_cli().parse_args()
config = Config.load() config = Config.load()
exercises = config.exercises.all() exercises = config.exercises.all()
if len(sys.argv) >= 2: if opts.exercises:
# test specific exercises # test specific exercises
exercises = [ exercises = [
e for e in exercises if e.slug in sys.argv[1:] e for e in exercises if e.slug in opts.exercises
] ]
not_found = [
slug for slug in opts.exercises
if not any(e.slug == slug for e in exercises)
]
if not_found:
for slug in not_found:
if slug not in exercises:
print(f"unknown exercise '{slug}'")
raise SystemExit(1)
print(f'TestEnvironment: {sys.executable.capitalize()}')
print(f'Runner: {opts.runner}\n\n')
failures = [] failures = []
for exercise in exercises: for exercise in exercises:
@@ -52,12 +178,10 @@ def main():
print('FAIL: File with test cases not found') print('FAIL: File with test cases not found')
failures.append('{} (FileNotFound)'.format(exercise.slug)) failures.append('{} (FileNotFound)'.format(exercise.slug))
else: else:
if check_assignment(exercise): if check_assignment(exercise, runner=opts.runner, quiet=opts.quiet):
failures.append('{} (TestFailed)'.format(exercise.slug)) failures.append('{} (TestFailed)'.format(exercise.slug))
print('') print('')
print('TestEnvironment:', sys.executable.capitalize(), '\n\n')
if failures: if failures:
print('FAILURES: ', ', '.join(failures)) print('FAILURES: ', ', '.join(failures))
raise SystemExit(1) raise SystemExit(1)
@@ -65,5 +189,5 @@ def main():
print('SUCCESS!') print('SUCCESS!')
if __name__ == '__main__': if __name__ == "__main__":
main() main()

View File

@@ -1,77 +0,0 @@
#!/usr/bin/env python3
"""Meant to be run from inside python-test-runner container,
where this track repo is mounted at /python
"""
import json
from pathlib import Path
import shutil
import subprocess
import sys
import tempfile
from data import Config, ExerciseInfo
# Allow high-performance tests to be skipped
ALLOW_SKIP = ['alphametics', 'largest-series-product']
def check_assignment(exercise: ExerciseInfo) -> int:
# Returns the exit code of the tests
workdir = Path(tempfile.mkdtemp(exercise.slug))
solution_file = exercise.solution_stub.name
try:
test_file_out = workdir / exercise.test_file.name
if exercise.slug in ALLOW_SKIP:
shutil.copy2(exercise.test_file, test_file_out)
else:
with exercise.test_file.open('r') as src_file:
lines = [line for line in src_file.readlines()
if not line.strip().startswith('@unittest.skip')]
with test_file_out.open('w') as dst_file:
dst_file.writelines(lines)
shutil.copyfile(exercise.exemplar_file, workdir / solution_file)
if exercise.config_file.is_file():
tmp_meta = workdir / '.meta'
tmp_meta.mkdir(exist_ok=True)
shutil.copy2(exercise.config_file, tmp_meta / exercise.config_file.name)
args = ['./bin/run.sh', exercise.slug, workdir, workdir]
subprocess.run(args, cwd='/opt/test-runner')
results_file = workdir / 'results.json'
if results_file.is_file():
with results_file.open() as f:
results = json.load(f)
if results['status'] == 'pass':
return 0
return 1
finally:
shutil.rmtree(workdir)
def main():
config = Config.load()
exercises = config.exercises.all()
if len(sys.argv) >= 2:
# test specific exercises
exercises = [
e for e in exercises if e.slug in sys.argv[1:]
]
failures = []
for exercise in exercises:
print('# ', exercise.slug)
if not exercise.test_file:
print('FAIL: File with test cases not found')
failures.append('{} (FileNotFound)'.format(exercise.slug))
else:
if check_assignment(exercise):
failures.append('{} (TestFailed)'.format(exercise.slug))
print('')
if failures:
print('FAILURES: ', ', '.join(failures))
raise SystemExit(1)
else:
print('SUCCESS!')
if __name__ == "__main__":
main()

View File

@@ -4,6 +4,6 @@ services:
test-runner: test-runner:
image: exercism/python-test-runner image: exercism/python-test-runner
working_dir: /python working_dir: /python
entrypoint: ./bin/test_runner_exercises.py entrypoint: ./bin/test_exercises.py --runner test-runner
volumes: volumes:
- .:/python - .:/python

View File

@@ -1,5 +1,7 @@
{ {
"files": { "files": {
"test": ["paasio_test.py", "test_utils.py"] "solution": ["paasio.py"],
"test": ["paasio_test.py", "test_utils.py"],
"exemplar": [".meta/example.py"]
} }
} }