From cf1e26a347d75814c5bc08ac4c414b967279d857 Mon Sep 17 00:00:00 2001 From: Vishwak Srinivasan Date: Fri, 7 Jun 2019 13:32:43 +0530 Subject: [PATCH] Better submission status viewing (#69) Co-authored-by: Prateek Kumar Co-authored-by: vishwakftw --- .flake8 | 2 +- content/Dockerfile | 2 +- content/compile_and_test.py | 10 ++- content/main_tester.sh | 5 +- judge/default/compilation_script.sh | 1 + judge/handler.py | 93 ++++++++++++-------- judge/migrations/0001_initial.py | 10 +-- judge/models.py | 22 ++--- judge/templates/judge/base.html | 4 +- judge/templates/judge/problem_detail.html | 4 +- judge/templates/judge/submission_detail.html | 91 +++++++++++++------ judge/tests.py | 6 +- judge/views.py | 8 +- submission_watcher_saver.py | 16 ++-- 14 files changed, 173 insertions(+), 101 deletions(-) diff --git a/.flake8 b/.flake8 index dda1d31..c1c55f2 100644 --- a/.flake8 +++ b/.flake8 @@ -1,3 +1,3 @@ [flake8] max-line-length = 100 -exclude = judge/migrations +exclude = judge/migrations,content/**/* diff --git a/content/Dockerfile b/content/Dockerfile index 80aba89..7f6d0cb 100644 --- a/content/Dockerfile +++ b/content/Dockerfile @@ -57,4 +57,4 @@ ENV SUB_ID=-1 WORKDIR /app # Run the meta script -CMD python compile_and_test.py --submission_config tmp/sub_run_${SUB_ID}.txt +CMD python3.6 compile_and_test.py --submission_config tmp/sub_run_${SUB_ID}.txt diff --git a/content/compile_and_test.py b/content/compile_and_test.py index 8ec6234..e727512 100644 --- a/content/compile_and_test.py +++ b/content/compile_and_test.py @@ -29,13 +29,17 @@ 'submission_{}{}'.format(sub_info[1], sub_info[2])], stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: # If compilation fails, end this script here - error_msg = e.output.decode('utf-8').replace('\n', '\\n') + error_msg = str(e.output.decode('utf-8')) with open(args.submission_config, "a") as stat_file: for testcase_id in sub_info[5:]: + log_file_name = 'sub_run_{}_{}.log'.format(sub_info[1], testcase_id) + + with open('tmp/' + log_file_name, "w") as log_file: + log_file.write(error_msg) + stat_file.write("{} {} 0 0 {}\n" .format(testcase_id, - 'CE' if e.returncode == 1 else 'NA', - error_msg)) + 'CE' if e.returncode == 1 else 'NA', log_file_name)) else: subprocess.call(['./main_tester.sh'] + sub_info[0:2] + sub_info[3:]) # run tests subprocess.call(['rm', 'submissions/submission_{}'.format(sub_info[1])]) # remove executable diff --git a/content/main_tester.sh b/content/main_tester.sh index be2da02..5b23c40 100755 --- a/content/main_tester.sh +++ b/content/main_tester.sh @@ -80,14 +80,13 @@ run_submission() { # This is then checked normally using a diff # The status is appended to the verdict_string along with the memory and time consumed VERDICT="" - ERR_MSG="" if [ "$TIMEOUT" = true ] ; then VERDICT=$(error_code_to_string $TLE ${TID}) elif [ "$MEMOUT" = true ] ; then VERDICT=$(error_code_to_string $OOM ${TID}) else clean_generated_output ${SID} ${TID} # Delete the generated file to prevent any mismatch - ERR_MSG=$({ ${SUB_FDR}/submission_${SID} < ${TEST_FDR}/inputfile_${TID}.txt > ${TMP}/sub_output_${SID}_${TID}.txt; } 2>&1) + ${SUB_FDR}/submission_${SID} < ${TEST_FDR}/inputfile_${TID}.txt > ${TMP}/sub_output_${SID}_${TID}.txt 2> ${TMP}/sub_run_${SID}_${TID}.log case "$?" in "0") @@ -102,7 +101,7 @@ run_submission() { ;; esac fi - VERDICT="${VERDICT} ${WCTIME} ${MAXVM} ${ERR_MSG}" + VERDICT="${VERDICT} ${WCTIME} ${MAXVM} sub_run_${SID}_${TID}.log" echo ${VERDICT} } diff --git a/judge/default/compilation_script.sh b/judge/default/compilation_script.sh index cf57725..334ec29 100644 --- a/judge/default/compilation_script.sh +++ b/judge/default/compilation_script.sh @@ -15,6 +15,7 @@ compile_c() { return $SUCCESS else return $FAILURE + fi } # This is the function to compile .cpp files using g++ diff --git a/judge/handler.py b/judge/handler.py index 659d446..18d2960 100644 --- a/judge/handler.py +++ b/judge/handler.py @@ -3,17 +3,25 @@ from re import compile from io import StringIO -from logging import error as log_error from traceback import print_exc from csv import writer as csvwriter from shutil import rmtree, copyfile +from logging import error as log_error from datetime import timedelta, datetime from typing import Tuple, Optional, Dict, Any, List, Union from django.utils import timezone +from django.core.files.uploadedfile import InMemoryUploadedFile + from . import models +def _check_and_remove(*fullpaths): + for fullpath in fullpaths: + if os.path.exists(fullpath): + os.remove(fullpath) + + def process_contest(contest_name: str, contest_start: datetime, contest_soft_end: datetime, contest_hard_end: datetime, penalty: float, is_public: bool, enable_linter_score: bool, enable_poster_score: bool) -> Tuple[bool, str]: @@ -74,60 +82,77 @@ def delete_contest(contest_id: int) -> Tuple[bool, Optional[str]]: return (False, 'Contest could not be deleted') -def process_problem(code: str, contest: int, name: str, statement: str, input_format: str, - output_format: str, difficulty: int, time_limit: int, memory_limit: int, - file_format: str, starting_code, max_score: int, - compilation_script, test_script) -> Tuple[bool, Optional[str]]: +def process_problem( + contest: int, + **kwargs: Union[str, int, Optional[InMemoryUploadedFile]]) -> Tuple[bool, Optional[str]]: """ Function to process a new :class:`~judge.models.Problem`. - :param code: Problem code :param contest: Contest ID to which the problem belongs + + :attr:`**kwargs` includes the following keyword arguments, which are directly passed + to the construct a :class:`~judge.models.Problem` object. + + :param code: Problem code + :type code: str :param name: Problem name + :type name: str :param statement: Problem statement + :type statement: str :param input_format: Problem input format + :type statement: str :param output_format: Problem output format + :type statement: str :param difficulty: Problem difficulty + :type statement: int :param time_limit: Problem execution time limit + :type statement: int :param memory_limit: Problem virtual memory limit - :param file_format: Accepted file format for submissions + :type statement: int + :param file_exts: Accepted file format for submissions + :type statement: str :param starting_code: Starting code for the problem + :type statement: Optional[InMemoryUploadedFile] :param max_score: Maximum judge score per test case for the problem + :type statement: int :param compilation_script: Compilation script for the submissions + :type statement: Optional[InMemoryUploadedFile] :param test_script: Test script for the submissions + :type statement: Optional[InMemoryUploadedFile] :returns: A 2-tuple - 1st element indicating whether the processing has succeeded, and 2nd element providing an error message if processing is unsuccessful. """ # Check if the Problem Code has already been taken + code = kwargs.get('code') try: models.Problem.objects.get(pk=code) return (False, '{} already a used Question code.'.format(code)) except models.Problem.DoesNotExist: pass - statement = 'The problem statement is empty.' if statement is None else statement - input_format = 'No input format specified.' if input_format is None else input_format - output_format = 'No output format specified.' if output_format is None else output_format + # Quill replaces empty input with this + NO_INPUT_QUILL = '{"ops":[{"insert":"\\n"}]}' + if kwargs.get('statement') == NO_INPUT_QUILL: + kwargs['statement'] = 'The problem statement is empty.' + if kwargs.get('input_format') == NO_INPUT_QUILL: + kwargs['input_format'] = 'No input format specified.' + if kwargs.get('output_format') == NO_INPUT_QUILL: + kwargs['output_format'] = 'No output format specified.' # if either one of compilation_script or test_script is None, # we create a Problem with the default compilation script and/or test_script # and then we copy a compilation script and/or test_script to the right location # and update the link after creation - no_comp_script, no_test_script = compilation_script is None, test_script is None + no_comp_script = kwargs.get('compilation_script') is None + no_test_script = kwargs.get('test_script') is None if no_comp_script: - compilation_script = './default/compilation_script.sh' + kwargs['compilation_script'] = './default/compilation_script.sh' if no_test_script: - test_script = './default/test_script.sh' + kwargs['test_script'] = './default/test_script.sh' try: c = models.Contest.objects.get(pk=contest) - p = models.Problem.objects.create( - code=code, contest=c, name=name, statement=statement, - input_format=input_format, output_format=output_format, - difficulty=difficulty, time_limit=time_limit, memory_limit=memory_limit, - file_format=file_format, start_code=starting_code, max_score=max_score, - compilation_script=compilation_script, - test_script=test_script) + p = models.Problem.objects.create(contest=c, **kwargs) if not os.path.exists(os.path.join('content', 'problems', p.code)): # Create the problem directory explictly if not yet created @@ -210,17 +235,15 @@ def delete_problem(problem_id: str) -> Tuple[bool, Optional[str]]: 'content', 'testcase', 'inputfile_{}.txt'.format(testcase.pk)) outputfile_path = os.path.join( 'content', 'testcase', 'outputfile_{}.txt'.format(testcase.pk)) - if os.path.exists(inputfile_path): - os.remove(inputfile_path) - if os.path.exists(outputfile_path): - os.remove(outputfile_path) + _check_and_remove(inputfile_path, outputfile_path) + submissions = models.Submission.objects.filter(problem=problem) for submission in submissions: submission_path = os.path.join( 'content', 'submissions', 'submission_{}{}'.format(submission.pk, submission.file_type)) - if os.path.exists(submission_path): - os.remove(submission_path) + _check_and_remove(submission_path) + rmtree(os.path.join('content', 'problems', problem_id)) models.Problem.objects.filter(pk=problem_id).delete() @@ -254,7 +277,8 @@ def process_person(email: str, rank: int = 0) -> Tuple[bool, Optional[str]]: def process_testcase(problem_id: str, test_type: str, - input_file, output_file) -> Tuple[bool, Optional[str]]: + input_file: InMemoryUploadedFile, + output_file: InMemoryUploadedFile) -> Tuple[bool, Optional[str]]: """ Function to process a new :class:`~judge.models.TestCase` for a problem. @@ -300,10 +324,8 @@ def delete_testcase(testcase_id: str) -> Tuple[bool, Optional[str]]: 'content', 'testcase', 'inputfile_{}.txt'.format(testcase_id)) outputfile_path = os.path.join( 'content', 'testcase', 'outputfile_{}.txt'.format(testcase_id)) - if os.path.exists(inputfile_path): - os.remove(inputfile_path) - if os.path.exists(outputfile_path): - os.remove(outputfile_path) + _check_and_remove(inputfile_path, outputfile_path) + models.TestCase.objects.filter(pk=testcase_id).delete() return (True, None) except Exception as e: @@ -312,7 +334,8 @@ def delete_testcase(testcase_id: str) -> Tuple[bool, Optional[str]]: def process_submission(problem_id: str, participant: str, file_type: str, - submission_file, timestamp: str) -> Tuple[bool, Optional[str]]: + submission_file: InMemoryUploadedFile, + timestamp: str) -> Tuple[bool, Optional[str]]: """ Function to process a new :class:`~judge.models.Submission` for a problem by a participant. @@ -326,9 +349,9 @@ def process_submission(problem_id: str, participant: str, file_type: str, """ try: problem = models.Problem.objects.get(pk=problem_id) - if file_type not in problem.file_format.split(','): + if file_type not in problem.file_exts.split(','): return (False, 'Accepted file types: \"{}\"' - .format(', '.join(problem.file_format.split(',')))) + .format(', '.join(problem.file_exts.split(',')))) participant = models.Person.objects.get(email=participant) s = problem.submission_set.create(participant=participant, file_type=file_type, submission_file=submission_file, timestamp=timestamp) @@ -658,7 +681,7 @@ def get_personcontest_score(person: str, contest: int) -> Tuple[bool, Union[floa def get_submissions(problem_id: str, person_id: Optional[str]) \ - -> Tuple[bool, Union[Dict[str, List[Any]], str]]: + -> Tuple[bool, Union[Dict[str, List[Any]], str]]: """ Function to retrieve all submissions made by everyone or a specific person for this problem. diff --git a/judge/migrations/0001_initial.py b/judge/migrations/0001_initial.py index 737053f..8bdf104 100644 --- a/judge/migrations/0001_initial.py +++ b/judge/migrations/0001_initial.py @@ -1,4 +1,4 @@ -# Generated by Django 2.2.1 on 2019-06-02 07:16 +# Generated by Django 2.2.1 on 2019-06-05 06:21 import datetime from django.db import migrations, models @@ -47,10 +47,10 @@ class Migration(migrations.Migration): ('input_format', models.TextField(default='No input format specified.')), ('output_format', models.TextField(default='No output format specified.')), ('difficulty', models.PositiveSmallIntegerField(default=0)), - ('time_limit', models.DurationField(default=datetime.timedelta(seconds=10))), + ('time_limit', models.DurationField(default=datetime.timedelta(0, 10))), ('memory_limit', models.PositiveIntegerField(default=200000)), - ('file_format', models.CharField(default='.py,.cpp', max_length=100)), - ('start_code', models.FileField(null=True, upload_to=judge.models.start_code_name)), + ('file_exts', models.CharField(default='.py,.cpp', max_length=100)), + ('starting_code', models.FileField(null=True, upload_to=judge.models.starting_code_name)), ('max_score', models.PositiveSmallIntegerField(default=0)), ('compilation_script', models.FileField(default='./default/compilation_script.sh', upload_to=functools.partial(judge.models.compilation_test_upload_location, *(), **{'is_compilation': True}))), ('test_script', models.FileField(default='./default/test_script.sh', upload_to=functools.partial(judge.models.compilation_test_upload_location, *(), **{'is_compilation': False}))), @@ -97,7 +97,7 @@ class Migration(migrations.Migration): name='SubmissionTestCase', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('verdict', models.CharField(choices=[('F', 'Fail'), ('P', 'Pass'), ('R', 'Running'), ('TE', 'TLE'), ('ME', 'OOM'), ('CE', 'COMPILATION_ERROR'), ('RE', 'RUNTIME_ERROR'), ('NA', 'NOT_AVAILABLE')], default='NA', max_length=2)), + ('verdict', models.CharField(choices=[('F', 'Failed'), ('P', 'Passed'), ('R', 'Running'), ('TE', 'Time Limit Exceeded'), ('ME', 'Out Of Memory'), ('CE', 'Compilation Error'), ('RE', 'Runtime Error'), ('NA', 'Internal Failure')], default='NA', max_length=2)), ('memory_taken', models.PositiveIntegerField()), ('time_taken', models.DurationField()), ('message', models.TextField(default='')), diff --git a/judge/models.py b/judge/models.py index 976478b..8ea578d 100644 --- a/judge/models.py +++ b/judge/models.py @@ -7,7 +7,7 @@ from django.utils import timezone -def start_code_name(instance, filename): +def starting_code_name(instance, filename): return 'content/problems/{}/start_code{}'.format(instance.code, splitext(filename)[1]) @@ -105,10 +105,10 @@ class Problem(models.Model): """Problem memory limit""" # Support upto 30 file formats - file_format = models.CharField(max_length=100, default='.py,.cpp') - """Accepted file formats for submissions to problem""" + file_exts = models.CharField(max_length=100, default='.py,.cpp') + """Accepted file extensions for submissions to problem""" - start_code = models.FileField(upload_to=start_code_name, null=True) + starting_code = models.FileField(upload_to=starting_code_name, null=True) """Problem starting code""" max_score = models.PositiveSmallIntegerField(default=0) @@ -246,14 +246,14 @@ class SubmissionTestCase(models.Model): # Possible Verdicts VERDICT = ( - ('F', 'Fail'), - ('P', 'Pass'), + ('F', 'Failed'), + ('P', 'Passed'), ('R', 'Running'), - ('TE', 'TLE'), - ('ME', 'OOM'), - ('CE', 'COMPILATION_ERROR'), - ('RE', 'RUNTIME_ERROR'), - ('NA', 'NOT_AVAILABLE')) + ('TE', 'Time Limit Exceeded'), + ('ME', 'Out Of Memory'), + ('CE', 'Compilation Error'), + ('RE', 'Runtime Error'), + ('NA', 'Internal Failure')) submission = models.ForeignKey(Submission, on_delete=models.CASCADE) """Foreign key to submission""" diff --git a/judge/templates/judge/base.html b/judge/templates/judge/base.html index a041c51..e63c62f 100644 --- a/judge/templates/judge/base.html +++ b/judge/templates/judge/base.html @@ -5,7 +5,7 @@ - {% block title %}Home{% endblock %} | PDP + {% block title %}Home{% endblock %} | AUTOJUDGE @@ -30,7 +30,7 @@ {% url 'judge:index' as homepage %}