diff --git a/evalai/submissions.py b/evalai/submissions.py index ac8bccf62..b0b3a8da1 100644 --- a/evalai/submissions.py +++ b/evalai/submissions.py @@ -19,6 +19,7 @@ from evalai.utils.submissions import ( display_submission_details, display_submission_result, + display_submission_stderr_file, convert_bytes_to, ) from evalai.utils.urls import URLS @@ -63,6 +64,18 @@ def result(ctx): display_submission_result(ctx.submission_id) +@submission.command() +@click.pass_obj +def stderr(ctx): + """ + Display the submission error + """ + """ + Invoked by `evalai submission SUBMISSION_ID stderr`. + """ + display_submission_stderr_file(ctx.submission_id) + + @click.command() @click.argument("IMAGE", nargs=1) @click.option( diff --git a/evalai/utils/submissions.py b/evalai/utils/submissions.py index 8e7aae044..6c5b03a5d 100644 --- a/evalai/utils/submissions.py +++ b/evalai/utils/submissions.py @@ -292,3 +292,35 @@ def convert_bytes_to(byte, to, bsize=1024): unit = int(unit / bsize) return unit + + +def display_submission_stderr_file(submission_id): + response = submission_details_request(submission_id).json() + echo(requests.get(response['stderr_file']).text) + if response['status'] == "submitted": + echo( + style( + "\nThe Submission is yet to be evaluated.\n", + bold=True, + fg="yellow", + ) + ) + + elif response['status'] == "failed": + echo( + style( + "\nThe Submission failed.\n", + bold=True, + fg="red", + ) + ) + + elif response['status'] == "running": + echo( + style( + "\nThe Submission is still running.\n", + bold=True, + fg="red", + ) + ) + sys.exit(0) diff --git a/tests/data/submission_response.py b/tests/data/submission_response.py index e9ee30474..33a1facdb 100644 --- a/tests/data/submission_response.py +++ b/tests/data/submission_response.py @@ -158,3 +158,78 @@ submission_result_file = """ [{"Total": 60, "Metric1": 61, "Metric2": 62, "Metric3": 63}] """ + + +submission_result_with_stderr_file_with_status_of_submitted = """ + { + "id": 48728, + "participant_team": 3519, + "participant_team_name": "test", + "execution_time": 0.085137, + "challenge_phase": 251, + "created_by": 5672, + "status": "submitted", + "input_file": "https://evalai.s3.amazonaws.com/media/submission_files/submission_48728/a93d2f2b-ac19-409d-a97d-7240ea336a0c.txt", + "stdout_file": "https://evalai.s3.amazonaws.com/media/submission_files/submission_48728/0b2c4396-e078-4b95-b041-83801a430874.txt", + "stderr_file": "https://evalai.s3.amazonaws.com/media/submission_files/submission_48728/39f3b087-8f86-4757-9c93-bf0b26c1a3c2.txt", + "submitted_at": "2019-12-11T05:37:24.259890Z", + "method_name": "null", + "method_description": "null", + "project_url": "null", + "publication_url": "null", + "is_public": false, + "is_flagged": false, + "submission_result_file": null, + "when_made_public": null, + "is_baseline": false + }""" + + +submission_result_with_stderr_file_with_status_of_failed = """ + { + "id": 48928, + "participant_team": 3519, + "participant_team_name": "test", + "execution_time": 0.085137, + "challenge_phase": 251, + "created_by": 5672, + "status": "failed", + "input_file": "https://evalai.s3.amazonaws.com/media/submission_files/submission_48728/a93d2f2b-ac19-409d-a97d-7240ea336a0c.txt", + "stdout_file": "https://evalai.s3.amazonaws.com/media/submission_files/submission_48728/0b2c4396-e078-4b95-b041-83801a430874.txt", + "stderr_file": "https://evalai.s3.amazonaws.com/media/submission_files/submission_48728/39f3b087-8f86-4757-9c93-bf0b26c1a3c2.txt", + "submitted_at": "2019-12-11T05:37:24.259890Z", + "method_name": "null", + "method_description": "null", + "project_url": "null", + "publication_url": "null", + "is_public": false, + "is_flagged": false, + "submission_result_file": null, + "when_made_public": null, + "is_baseline": false + }""" + + +submission_result_with_stderr_file_with_status_of_running = """ + { + "id": 49928, + "participant_team": 3519, + "participant_team_name": "test", + "execution_time": 0.085137, + "challenge_phase": 251, + "created_by": 5672, + "status": "running", + "input_file": "https://evalai.s3.amazonaws.com/media/submission_files/submission_48728/a93d2f2b-ac19-409d-a97d-7240ea336a0c.txt", + "stdout_file": "https://evalai.s3.amazonaws.com/media/submission_files/submission_48728/0b2c4396-e078-4b95-b041-83801a430874.txt", + "stderr_file": "https://evalai.s3.amazonaws.com/media/submission_files/submission_48728/39f3b087-8f86-4757-9c93-bf0b26c1a3c2.txt", + "submitted_at": "2019-12-11T05:37:24.259890Z", + "method_name": "null", + "method_description": "null", + "project_url": "null", + "publication_url": "null", + "is_public": false, + "is_flagged": false, + "submission_result_file": null, + "when_made_public": null, + "is_baseline": false + }""" diff --git a/tests/test_submissions.py b/tests/test_submissions.py index 02f65fc76..851386910 100644 --- a/tests/test_submissions.py +++ b/tests/test_submissions.py @@ -290,3 +290,91 @@ def test_make_submission_for_docker_based_challenge( ], ) assert result.exit_code == 0 + + +class TestDisplayStderrFile(BaseTestClass): + def setup(self): + self.submission_stderr_result_submitted = json.loads(submission_response.submission_result_with_stderr_file_with_status_of_submitted) + self.submission_stderr_result_failed = json.loads(submission_response.submission_result_with_stderr_file_with_status_of_failed) + self.submission_stderr_result_running = json.loads(submission_response.submission_result_with_stderr_file_with_status_of_running) + self.expected_stderr_text = "Testing display contents of stderr file" + + url = "{}{}" + responses.add( + responses.GET, + url.format(API_HOST_URL, URLS.get_submission.value).format( + "48728" + ), + json=self.submission_stderr_result_submitted, + status=200, + ) + + responses.add( + responses.GET, + url.format(API_HOST_URL, URLS.get_submission.value).format( + "48928" + ), + json=self.submission_stderr_result_failed, + status=200, + ) + + responses.add( + responses.GET, + url.format(API_HOST_URL, URLS.get_submission.value).format( + "49928" + ), + json=self.submission_stderr_result_running, + status=200, + ) + + responses.add( + responses.GET, + self.submission_stderr_result_submitted["stderr_file"], + body=self.expected_stderr_text, + status=200, + ) + + @responses.activate + def test_display_stderr_file_success_with_status_of_submitted(self): + expected = "{}\n\n{}" + expected = expected.format( + self.expected_stderr_text, + "The Submission is yet to be evaluated." + ) + runner = CliRunner() + result = runner.invoke( + submission, + ["48728", "stderr"] + ) + assert result.output.strip() == expected + assert result.exit_code == 0 + + @responses.activate + def test_display_stderr_file_success_with_status_of_failed(self): + expected = "{}\n\n{}" + expected = expected.format( + self.expected_stderr_text, + "The Submission failed." + ) + runner = CliRunner() + result = runner.invoke( + submission, + ["48928", "stderr"] + ) + assert result.output.strip() == expected + assert result.exit_code == 0 + + @responses.activate + def test_display_stderr_file_success_with_status_of_running(self): + expected = "{}\n\n{}" + expected = expected.format( + self.expected_stderr_text, + "The Submission is still running." + ) + runner = CliRunner() + result = runner.invoke( + submission, + ["49928", "stderr"] + ) + assert result.output.strip() == expected + assert result.exit_code == 0