From 1c4c5ee415e1ef9a71569633a6b7261dcefc35f0 Mon Sep 17 00:00:00 2001 From: Conor Date: Mon, 24 Oct 2022 12:10:35 -0500 Subject: [PATCH] Add option to write test cases to JSON file (#366) --- .github/workflows/ci-cd.yml | 8 + README.md | 52 ++++- action.yml | 4 + composite/action.yml | 6 +- python/publish/publisher.py | 30 ++- python/publish/unittestresults.py | 28 ++- python/publish_test_results.py | 1 + python/test/test_action_script.py | 5 +- python/test/test_publish.py | 247 ++++++++++++----------- python/test/test_publisher.py | 293 ++++++++++++++++++++++++---- python/test/test_unittestresults.py | 102 +++++----- 11 files changed, 545 insertions(+), 231 deletions(-) diff --git a/.github/workflows/ci-cd.yml b/.github/workflows/ci-cd.yml index 2080d466..104b631d 100644 --- a/.github/workflows/ci-cd.yml +++ b/.github/workflows/ci-cd.yml @@ -169,6 +169,7 @@ jobs: check_name: Test Results (Dockerfile) junit_files: "artifacts/**/*.xml" json_file: "tests.json" + json_test_case_results: true log_level: DEBUG - name: JSON output @@ -251,6 +252,7 @@ jobs: -e INPUT_SECONDS_BETWEEN_GITHUB_READS \ -e INPUT_SECONDS_BETWEEN_GITHUB_WRITES \ -e INPUT_JSON_THOUSANDS_SEPARATOR \ + -e INPUT_JSON_TEST_CASE_RESULTS \ -e HOME \ -e GITHUB_JOB \ -e GITHUB_REF \ @@ -303,6 +305,8 @@ jobs: INPUT_CHECK_NAME: Test Results (Docker Image) INPUT_JUNIT_FILES: "artifacts/**/*.xml" INPUT_JSON_FILE: "tests.json" + INPUT_JSON_TEST_CASE_RESULTS: true + - name: JSON output uses: ./misc/action/json-output @@ -428,6 +432,7 @@ jobs: check_name: Test Results (${{ matrix.os-label }} python ${{ matrix.python }}) junit_files: "artifacts${{ steps.os.outputs.path-sep }}**${{ steps.os.outputs.path-sep }}*.xml" json_file: "tests.json" + json_test_case_results: true - name: JSON output uses: ./misc/action/json-output @@ -478,6 +483,7 @@ jobs: check_name: Test Results (setup-python) junit_files: "artifacts/**/*.xml" json_file: "tests.json" + json_test_case_results: true - name: JSON output uses: ./misc/action/json-output @@ -521,6 +527,7 @@ jobs: xunit_files: "test-files/xunit/**/*.xml" trx_files: "test-files/trx/**/*.trx" json_file: "tests.json" + json_test_case_results: true log_level: DEBUG - name: JSON output @@ -562,6 +569,7 @@ jobs: fail_on: nothing junit_files: "test-files/pytest/junit.gloo.standalone.xml" json_file: "tests.json" + json_test_case_results: true log_level: DEBUG - name: JSON output diff --git a/README.md b/README.md index ced5d9bf..4f4e6d3b 100644 --- a/README.md +++ b/README.md @@ -268,6 +268,7 @@ The list of most notable options: |`check_run_annotations_branch`|`event.repository.default_branch` or `"main, master"`|Adds check run annotations only on given branches. If not given, this defaults to the default branch of your repository, e.g. `main` or `master`. Comma separated list of branch names allowed, asterisk `"*"` matches all branches. Example: `main, master, branch_one`.| |`json_file`|no file|Results are written to this JSON file.| |`json_thousands_separator`|`" "`|Formatted numbers in JSON use this character to separate groups of thousands. Common values are "," or ".". Defaults to punctuation space (\u2008).| +|`json_test_case_results`|`false`|Write out all individual test case results to the JSON file. Setting this to `true` can greatly increase the size of the output. Defaults to `false`.| |`fail_on`|`"test failures"`|Configures the state of the created test result check run. With `"test failures"` it fails if any test fails or test errors occur. It never fails when set to `"nothing"`, and fails only on errors when set to `"errors"`.| Pull request comments highlight removal of tests or tests that the pull request moves into skip state. @@ -360,7 +361,11 @@ via `json_thousands_separator`. Formatted numbers are especially useful when tho is not easily available, e.g. when [creating a badge from test results](#create-a-badge-from-test-results). The optional `json_file` allows to configure a file where extended JSON information are to be written. -Compared to `"Access JSON via step outputs"` above, `errors` and `annotations` contain more information than just the number of errors and annotations, respectively: +Compared to `"Access JSON via step outputs"` above, `errors` and `annotations` contain more information +than just the number of errors and annotations, respectively. + +Additionally, `json_test_case_results` can be enabled to add the `cases` field to the JSON file, which provides +all test results of all tests. Enabling this may greatly increase the output size of the JSON file. ```json { @@ -388,9 +393,54 @@ Compared to `"Access JSON via step outputs"` above, `errors` and `annotations` c "title": "1 out of 3 runs failed: test_events (test.Tests)", "raw_details": "self = \n\n def test_events(self):\n > self.do_test_events(3)\n\n test.py:821:\n _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _\n test.py:836: in do_test_events\n self.do_test_rsh(command, 143, events=events)\n test.py:852: in do_test_rsh\n self.assertEqual(expected_result, res)\n E AssertionError: 143 != 0\n " } + ], + "cases": [ + { + "class_name": "class1", + "test_name": "test1", + "states": { + "success": [ + { + "result_file": "result", + "test_file": "test", + "line": 123, + "class_name": "class1", + "test_name": "test1", + "result": "success", + "message": "message1", + "content": "content1", + "stdout": "stdout1", + "stderr": "stderr1", + "time": 1 + } + ] + } + }, + { + "class_name": "class1", + "test_name": "test2", + "states": { + "skipped": [ + { + "result_file": "result", + "test_file": "test", + "line": 123, + "class_name": "class1", + "test_name": "test2", + "result": "skipped", + "message": "message2", + "content": "content2", + "stdout": "stdout2", + "stderr": "stderr2", + "time": 2 + } + ] + } + } ] } ``` + See [Create a badge from test results](#create-a-badge-from-test-results) for an example on how to create a badge from this JSON. diff --git a/action.yml b/action.yml index b9af055c..d54a80ee 100644 --- a/action.yml +++ b/action.yml @@ -101,6 +101,10 @@ inputs: description: 'Formatted numbers in JSON use this character to separate groups of thousands. Common values are "," or ".". Defaults to punctuation space (\u2008).' default: ' ' required: false + json_test_case_results: + description: 'Write out all individual test case results to the JSON file. Setting this to "true" can greatly increase the size of the output. Defaults to "false".' + default: false + required: false outputs: json: diff --git a/composite/action.yml b/composite/action.yml index 9211f972..da2dfaee 100644 --- a/composite/action.yml +++ b/composite/action.yml @@ -101,7 +101,10 @@ inputs: description: 'Formatted numbers in JSON use this character to separate groups of thousands. Common values are "," or ".". Defaults to punctuation space (\u2008).' default: ' ' required: false - + json_test_case_results: + description: 'Write out all individual test case results to the JSON file. Setting this to "true" can greatly increase the size of the output. Defaults to "false".' + default: false + required: false outputs: json: description: "Test results as JSON" @@ -177,6 +180,7 @@ runs: SECONDS_BETWEEN_GITHUB_WRITES: ${{ inputs.seconds_between_github_writes }} JSON_FILE: ${{ inputs.json_file }} JSON_THOUSANDS_SEPARATOR: ${{ inputs.json_thousands_separator }} + JSON_TEST_CASE_RESULTS: ${{ inputs.json_test_case_results }} JOB_SUMMARY: ${{ inputs.job_summary }} # not documented ROOT_LOG_LEVEL: ${{ inputs.root_log_level }} diff --git a/python/publish/publisher.py b/python/publish/publisher.py index 08e1c730..3e28df3d 100644 --- a/python/publish/publisher.py +++ b/python/publish/publisher.py @@ -24,7 +24,7 @@ from publish import logger from publish.github_action import GithubAction from publish.unittestresults import UnitTestCaseResults, UnitTestRunResults, UnitTestRunDeltaResults, \ - UnitTestRunResultsOrDeltaResults, get_stats_delta + UnitTestRunResultsOrDeltaResults, get_stats_delta, create_unit_test_case_results @dataclass(frozen=True) @@ -40,6 +40,7 @@ class Settings: commit: str json_file: Optional[str] json_thousands_separator: str + json_test_case_results: bool fail_on_errors: bool fail_on_failures: bool # one of these *_files_glob must be set @@ -72,6 +73,7 @@ class PublishData: stats_with_delta: Optional[UnitTestRunDeltaResults] annotations: List[Annotation] check_url: str + cases: Optional[UnitTestCaseResults] @classmethod def _format_digit(cls, value: Union[int, Mapping[str, int], Any], thousands_separator: str) -> Union[str, Mapping[str, str], Any]: @@ -100,24 +102,41 @@ def _formatted_stats_and_delta(cls, def _as_dict(self) -> Dict[str, Any]: self_without_exceptions = dataclasses.replace( self, + # remove exceptions stats=self.stats.without_exceptions(), - stats_with_delta=self.stats_with_delta.without_exceptions() if self.stats_with_delta else None + stats_with_delta=self.stats_with_delta.without_exceptions() if self.stats_with_delta else None, + # turn defaultdict into simple dict + cases={test: {state: cases for state, cases in states.items()} + for test, states in self.cases.items()} if self.cases else None ) + # the dict_factory removes None values return dataclasses.asdict(self_without_exceptions, dict_factory=lambda x: {k: v for (k, v) in x if v is not None}) def to_dict(self, thousands_separator: str) -> Mapping[str, Any]: d = self._as_dict() + + # beautify cases, turn tuple-key into proper fields + if d.get('cases'): + d['cases'] = [{k: v for k, v in [('file_name', test[0]), + ('class_name', test[1]), + ('test_name', test[2]), + ('states', states)] + if v} + for test, states in d['cases'].items()] + + # provide formatted stats and delta d.update(formatted=self._formatted_stats_and_delta( d.get('stats'), d.get('stats_with_delta'), thousands_separator )) + return d def to_reduced_dict(self, thousands_separator: str) -> Mapping[str, Any]: data = self._as_dict() - # replace some large fields with their lengths + # replace some large fields with their lengths and delete individual test cases if present def reduce(d: Dict[str, Any]) -> Dict[str, Any]: d = deepcopy(d) if d.get('stats', {}).get('errors') is not None: @@ -126,6 +145,8 @@ def reduce(d: Dict[str, Any]) -> Dict[str, Any]: d['stats_with_delta']['errors'] = len(d['stats_with_delta']['errors']) if d.get('annotations') is not None: d['annotations'] = len(d['annotations']) + if d.get('cases') is not None: + del d['cases'] return d data = reduce(data) @@ -347,7 +368,8 @@ def publish_check(self, stats=stats, stats_with_delta=stats_with_delta if before_stats is not None else None, annotations=all_annotations, - check_url=check_run.html_url + check_url=check_run.html_url, + cases=cases if self._settings.json_test_case_results else None ) self.publish_json(data) diff --git a/python/publish/unittestresults.py b/python/publish/unittestresults.py index d611fd7b..62ba7796 100644 --- a/python/publish/unittestresults.py +++ b/python/publish/unittestresults.py @@ -1,7 +1,8 @@ import dataclasses from collections import defaultdict +from copy import deepcopy from dataclasses import dataclass -from typing import Optional, List, Mapping, Any, Union, Dict, Callable +from typing import Optional, List, Mapping, Any, Union, Dict, Callable, Tuple, AbstractSet from xml.etree.ElementTree import ParseError as XmlParseError @@ -20,11 +21,18 @@ class UnitTestCase: time: Optional[float] -class UnitTestCaseResults(defaultdict): - def __init__(self, items=None): - if items is None: - items = [] - super(UnitTestCaseResults, self).__init__(lambda: defaultdict(list), items) +UnitTestCaseFileName = str +UnitTestCaseClassName = str +UnitTestCaseTestName = str +UnitTestCaseResultKey = Tuple[Optional[UnitTestCaseFileName], UnitTestCaseClassName, UnitTestCaseTestName] +UnitTestCaseState = str +UnitTestCaseResults = Mapping[UnitTestCaseResultKey, Mapping[UnitTestCaseState, List[UnitTestCase]]] + + +def create_unit_test_case_results(indexed_cases: Optional[UnitTestCaseResults] = None) -> UnitTestCaseResults: + if indexed_cases: + return deepcopy(indexed_cases) + return defaultdict(lambda: defaultdict(list)) @dataclass(frozen=True) @@ -130,7 +138,7 @@ def without_cases(self): cases_failures=self.suite_failures, cases_errors=self.suite_errors, cases_time=self.suite_time, - case_results=UnitTestCaseResults(), + case_results=create_unit_test_case_results(), tests=self.suite_tests, tests_skipped=self.suite_skipped, @@ -390,7 +398,7 @@ def without_exceptions(self) -> 'UnitTestRunDeltaResults': UnitTestRunResultsOrDeltaResults = Union[UnitTestRunResults, UnitTestRunDeltaResults] -def aggregate_states(states: List[str]) -> str: +def aggregate_states(states: AbstractSet[str]) -> str: return 'error' if 'error' in states else \ 'failure' if 'failure' in states else \ 'success' if 'success' in states else \ @@ -419,7 +427,7 @@ def get_test_results(parsed_results: ParsedUnitTestResultsWithCommit, cases_time = sum([case.time or 0 for case in cases]) # index cases by tests and state - cases_results = UnitTestCaseResults() + cases_results = create_unit_test_case_results() for case in cases: # index by test file name (when de-duplicating by file name), class name and test name test = (case.test_file if dedup_classes_by_file_name else None, case.class_name, case.test_name) @@ -432,7 +440,7 @@ def get_test_results(parsed_results: ParsedUnitTestResultsWithCommit, test_results = dict() for test, states in cases_results.items(): - test_results[test] = aggregate_states(states) + test_results[test] = aggregate_states(states.keys()) tests = len(test_results) tests_skipped = len([test for test, state in test_results.items() if state in ['skipped', 'disabled']]) diff --git a/python/publish_test_results.py b/python/publish_test_results.py index 15901d71..7fe79dd3 100644 --- a/python/publish_test_results.py +++ b/python/publish_test_results.py @@ -370,6 +370,7 @@ def get_settings(options: dict, gha: Optional[GithubAction] = None) -> Settings: commit=get_var('COMMIT', options) or get_commit_sha(event, event_name, options), json_file=get_var('JSON_FILE', options), json_thousands_separator=get_var('JSON_THOUSANDS_SEPARATOR', options) or punctuation_space, + json_test_case_results=get_bool_var('JSON_TEST_CASE_RESULTS', options, default=False), fail_on_errors=fail_on_errors, fail_on_failures=fail_on_failures, junit_files_glob=get_var('JUNIT_FILES', options) or default_junit_files_glob, diff --git a/python/test/test_action_script.py b/python/test/test_action_script.py index 13eaef96..e33a72ce 100644 --- a/python/test/test_action_script.py +++ b/python/test/test_action_script.py @@ -178,7 +178,8 @@ def get_settings(token='token', seconds_between_github_reads=1.5, seconds_between_github_writes=2.5, json_file=None, - json_thousands_separator=punctuation_space) -> Settings: + json_thousands_separator=punctuation_space, + json_test_case_results=False) -> Settings: return Settings( token=token, api_url=api_url, @@ -191,6 +192,7 @@ def get_settings(token='token', commit=commit, json_file=json_file, json_thousands_separator=json_thousands_separator, + json_test_case_results=json_test_case_results, fail_on_errors=fail_on_errors, fail_on_failures=fail_on_failures, junit_files_glob=junit_files_glob, @@ -849,7 +851,6 @@ def test_parse_files(self): gha = GithubAction(file=string) with mock.patch('publish.github_action.logger') as m: log_parse_errors(actual.errors, gha) - self.maxDiff = None expected = [ "::error::lxml.etree.XMLSyntaxError: Start tag expected, '<' not found, line 1, column 1", "::error file=non-xml.xml::Error processing result file: Start tag expected, '<' not found, line 1, column 1 (non-xml.xml, line 1)", diff --git a/python/test/test_publish.py b/python/test/test_publish.py index 688392a2..874ae5a7 100644 --- a/python/test/test_publish.py +++ b/python/test/test_publish.py @@ -16,8 +16,7 @@ get_case_annotations, get_case_annotation, get_all_tests_list_annotation, \ get_skipped_tests_list_annotation, get_case_messages, chunk_test_list, message_is_contained_in_content from publish.junit import parse_junit_xml_files, process_junit_xml_elems -from publish.unittestresults import get_stats, UnitTestCase, ParseError -from publish.unittestresults import get_test_results +from publish.unittestresults import get_stats, UnitTestCase, ParseError, get_test_results, create_unit_test_case_results from test_utils import temp_locale, d, n test_files_path = pathlib.Path(__file__).resolve().parent / 'files' / 'junit-xml' @@ -1441,40 +1440,40 @@ def test_get_test_changes_summary_md_rename_skip_tests(self): get_test_changes_summary_md(changes, 3)) def test_get_case_messages(self): - results = UnitTestCaseResults([ - ((None, 'class1', 'test1'), dict([ - ('success', list([ + results = create_unit_test_case_results({ + (None, 'class1', 'test1'): { + 'success': [ UnitTestCase(result_file='result-file1', test_file='file1', line=1, class_name='class1', test_name='test1', result='success', message='message1', content='content1', stdout='stdout1', stderr='stderr1', time=1.0), UnitTestCase(result_file='result-file1', test_file='file1', line=1, class_name='class1', test_name='test1', result='success', message='message1', content='content1', stdout='stdout1', stderr='stderr1', time=1.1), UnitTestCase(result_file='result-file1', test_file='file1', line=1, class_name='class1', test_name='test1', result='success', message='message2', content='content2', stdout='stdout2', stderr='stderr2', time=1.2), - ])), - ('skipped', list([ + ], + 'skipped': [ UnitTestCase(result_file='result-file1', test_file='file1', line=1, class_name='class1', test_name='test1', result='skipped', message='message2', content='content2', stdout='stdout2', stderr='stderr2', time=None), UnitTestCase(result_file='result-file1', test_file='file1', line=1, class_name='class1', test_name='test1', result='skipped', message='message3', content='content3', stdout='stdout3', stderr='stderr3', time=None), - ])), - ('failure', list([ + ], + 'failure': [ UnitTestCase(result_file='result-file1', test_file='file1', line=1, class_name='class1', test_name='test1', result='failure', message='message4', content='content4', stdout='stdout4', stderr='stderr4', time=1.23), UnitTestCase(result_file='result-file1', test_file='file1', line=1, class_name='class1', test_name='test1', result='failure', message='message4', content='content4', stdout='stdout4', stderr='stderr4', time=1.234), - ])), - ('error', list([ + ], + 'error': [ UnitTestCase(result_file='result-file1', test_file='file1', line=1, class_name='class1', test_name='test1', result='error', message='message5', content='content5', stdout='stdout5', stderr='stderr5', time=1.2345), - ])), - ])), - ((None, 'class2', 'test2'), dict([ - ('success', list([ + ], + }, + (None, 'class2', 'test2'): { + 'success': [ UnitTestCase(result_file='result-file1', test_file=None, line=None, class_name='class2', test_name='test2', result='success', message=None, content=None, stdout=None, stderr=None, time=None) - ])), - ('skipped', list([ + ], + 'skipped': [ UnitTestCase(result_file='result-file1', test_file=None, line=None, class_name='class2', test_name='test2', result='skipped', message=None, content=None, stdout=None, stderr=None, time=None) - ])), - ('failure', list([ + ], + 'failure': [ UnitTestCase(result_file='result-file1', test_file=None, line=None, class_name='class2', test_name='test2', result='failure', message=None, content=None, stdout=None, stderr=None, time=None) - ])), - ('error', list([ + ], + 'error': [ UnitTestCase(result_file='result-file1', test_file=None, line=None, class_name='class2', test_name='test2', result='error', message=None, content=None, stdout=None, stderr=None, time=None) - ])), - ])) - ]) + ], + } + }) expected = CaseMessages([ ((None, 'class1', 'test1'), dict([ @@ -1666,38 +1665,38 @@ def test_get_case_annotation_report_individual_runs(self): self.assertEqual(Annotation(path='file1', start_line=123, end_line=123, start_column=None, end_column=None, annotation_level='failure', message='result-file1', title='1 out of 6 runs with error: test1 (class1)', raw_details='actual message'), get_case_annotation(messages, (None, 'class1', 'test1'), 'error', 'message5', report_individual_runs=True)) def test_get_case_annotations(self): - results = UnitTestCaseResults([ - ((None, 'class1', 'test1'), dict([ - ('success', list([ + results = create_unit_test_case_results({ + (None, 'class1', 'test1'): { + 'success': [ UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test1', result='success', message='success message', content='success content', stdout='success stdout', stderr='success stderr', time=1.0) - ])), - ('skipped', list([ + ], + 'skipped': [ UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name=None, test_name='test1', result='skipped', message='skip message', content='skip content', stdout='skip stdout', stderr='skip stderr', time=None) - ])), - ('failure', list([ + ], + 'failure': [ UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test1', result='failure', message='fail message 1', content='fail content 1', stdout='fail stdout 1', stderr='fail stderr 1', time=1.2), UnitTestCase(result_file='result-file2', test_file='file1', line=123, class_name='class1', test_name='test1', result='failure', message='fail message 2', content='fail content 2', stdout='fail stdout 2', stderr='fail stderr 2', time=1.23), UnitTestCase(result_file='result-file3', test_file='file1', line=123, class_name='class1', test_name='test1', result='failure', message='fail message 3', content='fail content 3', stdout='fail stdout 3', stderr='fail stderr 3', time=1.234) - ])), - ('error', list([ + ], + 'error': [ UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test1', result='error', message='error message', content='error content', stdout='error stdout', stderr='error stderr', time=1.2345) - ])), - ])), - ((None, 'class2', 'test2'), dict([ - ('success', list([ + ], + }, + (None, 'class2', 'test2'): { + 'success': [ UnitTestCase(result_file='result-file1', test_file=None, line=None, class_name='class2', test_name='test2', result='success', message=None, content=None, stdout=None, stderr=None, time=None) - ])), - ('skipped', list([ + ], + 'skipped': [ UnitTestCase(result_file='result-file1', test_file=None, line=None, class_name='class2', test_name='test2', result='skipped', message=None, content=None, stdout=None, stderr=None, time=None) - ])), - ('failure', list([ + ], + 'failure': [ UnitTestCase(result_file='result-file1', test_file=None, line=None, class_name='class2', test_name='test2', result='failure', message=None, content=None, stdout=None, stderr=None, time=None) - ])), - ('error', list([ + ], + 'error': [ UnitTestCase(result_file='result-file1', test_file=None, line=None, class_name='class2', test_name='test2', result='error', message=None, content=None, stdout=None, stderr=None, time=None) - ])), - ])) - ]) + ], + } + }) expected = [ Annotation( @@ -1748,24 +1747,24 @@ def test_get_case_annotations(self): self.assertEqual(expected, annotations) def test_get_case_annotations_report_individual_runs(self): - results = UnitTestCaseResults([ - ((None, 'class1', 'test1'), dict([ - ('success', list([ + results = create_unit_test_case_results({ + (None, 'class1', 'test1'): { + 'success': [ UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test1', result='success', message='success message', content='success content', stdout='success stdout', stderr='success stderr', time=1.0) - ])), - ('skipped', list([ + ], + 'skipped': [ UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name=None, test_name='test1', result='skipped', message='skip message', content='skip content', stdout='skip stdout', stderr='skip stderr', time=None) - ])), - ('failure', list([ + ], + 'failure': [ UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test1', result='failure', message='fail message 1', content='fail content 1', stdout='fail stdout 1', stderr='fail stderr 1', time=1.2), UnitTestCase(result_file='result-file2', test_file='file1', line=123, class_name='class1', test_name='test1', result='failure', message='fail message 2', content='fail content 2', stdout='fail stdout 2', stderr='fail stderr 2', time=1.23), UnitTestCase(result_file='result-file3', test_file='file1', line=123, class_name='class1', test_name='test1', result='failure', message='fail message 2', content='fail content 2', stdout='fail stdout 2', stderr='fail stderr 2', time=1.234) - ])), - ('error', list([ + ], + 'error': [ UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test1', result='error', message='error message', content='error content', stdout='error stdout', stderr='error stderr', time=0.1) - ])), - ])) - ]) + ], + } + }) expected = [ Annotation( @@ -1812,56 +1811,56 @@ def test_get_error_annotation(self): self.assertEqual(Annotation(path='file', start_line=12, end_line=12, start_column=34, end_column=34, annotation_level='failure', message='message', title='Error processing result file', raw_details='file'), get_error_annotation(ParseError('file', 'message', 12, 34, ValueError('invalid value')))) def test_get_all_tests_list_annotation(self): - results = UnitTestCaseResults([ - ((None, 'class1', 'test2'), dict([ - ('success', list([ + results = create_unit_test_case_results({ + (None, 'class1', 'test2'): { + 'success': [ UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test2', result='success', message='success message', content='success content', stdout='success stdout', stderr='success stderr', time=1.0) - ])), - ])), - ((None, 'class1', 'test1'), dict([ - ('success', list([ + ], + }, + (None, 'class1', 'test1'): { + 'success': [ UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test1', result='success', message='success message', content='success content', stdout='success stdout', stderr='success stderr', time=1.0) - ])), - ('skipped', list([ + ], + 'skipped': [ UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name=None, test_name='test1', result='skipped', message='skip message', content='skip content', stdout='skip stdout', stderr='skip stderr', time=None) - ])), - ])), - (('file', 'class1', 'test2'), dict([ - ('success', list([ + ], + }, + ('file', 'class1', 'test2'): { + 'success': [ UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test2', result='success', message='success message', content='success content', stdout='success stdout', stderr='success stderr', time=1.0) - ])), - ])) - ]) + ], + } + }) - self.assertEqual([], get_all_tests_list_annotation(UnitTestCaseResults())) + self.assertEqual([], get_all_tests_list_annotation(create_unit_test_case_results())) self.assertEqual([Annotation(path='.github', start_line=0, end_line=0, start_column=None, end_column=None, annotation_level='notice', message='There are 3 tests, see "Raw output" for the full list of tests.', title='3 tests found', raw_details='class1 ‑ test1\nclass1 ‑ test2\nfile ‑ class1 ‑ test2')], get_all_tests_list_annotation(results)) del results[(None, 'class1', 'test1')] del results[('file', 'class1', 'test2')] self.assertEqual([Annotation(path='.github', start_line=0, end_line=0, start_column=None, end_column=None, annotation_level='notice', message='There is 1 test, see "Raw output" for the name of the test.', title='1 test found', raw_details='class1 ‑ test2')], get_all_tests_list_annotation(results)) def test_get_all_tests_list_annotation_chunked(self): - results = UnitTestCaseResults([ - ((None, 'class1', 'test2'), dict([ - ('success', list([ + results = create_unit_test_case_results({ + (None, 'class1', 'test2'): { + 'success': [ UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test2', result='success', message='success message', content='success content', stdout='success stdout', stderr='success stderr', time=1.0) - ])), - ])), - ((None, 'class1', 'test1'), dict([ - ('success', list([ + ], + }, + (None, 'class1', 'test1'): { + 'success': [ UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test1', result='success', message='success message', content='success content', stdout='success stdout', stderr='success stderr', time=1.0) - ])), - ('skipped', list([ + ], + 'skipped': [ UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name=None, test_name='test1', result='skipped', message='skip message', content='skip content', stdout='skip stdout', stderr='skip stderr', time=None) - ])), - ])), - (('file', 'class1', 'test2'), dict([ - ('success', list([ + ], + }, + ('file', 'class1', 'test2'): { + 'success': [ UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test2', result='success', message='success message', content='success content', stdout='success stdout', stderr='success stderr', time=1.0) - ])), - ])) - ]) + ], + } + }) - self.assertEqual([], get_all_tests_list_annotation(UnitTestCaseResults())) + self.assertEqual([], get_all_tests_list_annotation(create_unit_test_case_results())) self.assertEqual( [ Annotation(path='.github', start_line=0, end_line=0, start_column=None, end_column=None, annotation_level='notice', message='There are 3 tests, see "Raw output" for the list of tests 1 to 2.', title='3 tests found (test 1 to 2)', raw_details='class1 ‑ test1\nclass1 ‑ test2'), @@ -1871,52 +1870,52 @@ def test_get_all_tests_list_annotation_chunked(self): ) def test_get_skipped_tests_list_annotation(self): - results = UnitTestCaseResults([ - ((None, 'class1', 'test2'), dict([ - ('skipped', list([ + results = create_unit_test_case_results({ + (None, 'class1', 'test2'): { + 'skipped': [ UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test2', result='success', message='success message', content='success content', stdout='success stdout', stderr='success stderr', time=1.0) - ])), - ])), - ((None, 'class1', 'test1'), dict([ - ('success', list([ + ], + }, + (None, 'class1', 'test1'): { + 'success': [ UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test1', result='success', message='success message', content='success content', stdout='success stdout', stderr='success stderr', time=1.0) - ])), - ('skipped', list([ + ], + 'skipped': [ UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name=None, test_name='test1', result='skipped', message='skip message', content='skip content', stdout='skip stdout', stderr='skip stderr', time=None) - ])), - ])), - (('file', 'class1', 'test2'), dict([ - ('success', list([ + ], + }, + ('file', 'class1', 'test2'): { + 'success': [ UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test2', result='success', message='success message', content='success content', stdout='success stdout', stderr='success stderr', time=1.0) - ])), - ])) - ]) + ], + } + }) - self.assertEqual([], get_skipped_tests_list_annotation(UnitTestCaseResults())) + self.assertEqual([], get_skipped_tests_list_annotation(create_unit_test_case_results())) self.assertEqual([Annotation(path='.github', start_line=0, end_line=0, start_column=None, end_column=None, annotation_level='notice', message='There is 1 skipped test, see "Raw output" for the name of the skipped test.', title='1 skipped test found', raw_details='class1 ‑ test2')], get_skipped_tests_list_annotation(results)) del results[(None, 'class1', 'test1')]['success'] self.assertEqual([Annotation(path='.github', start_line=0, end_line=0, start_column=None, end_column=None, annotation_level='notice', message='There are 2 skipped tests, see "Raw output" for the full list of skipped tests.', title='2 skipped tests found', raw_details='class1 ‑ test1\nclass1 ‑ test2')], get_skipped_tests_list_annotation(results)) def test_get_skipped_tests_list_annotation_chunked(self): - results = UnitTestCaseResults([ - ((None, 'class1', 'test2'), dict([ - ('skipped', list([ + results = create_unit_test_case_results({ + (None, 'class1', 'test2'): { + 'skipped': [ UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test2', result='success', message='success message', content='success content', stdout='success stdout', stderr='success stderr', time=1.0) - ])), - ])), - ((None, 'class1', 'test1'), dict([ - ('skipped', list([ + ], + }, + (None, 'class1', 'test1'): { + 'skipped': [ UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test1', result='success', message='success message', content='success content', stdout='success stdout', stderr='success stderr', time=1.0) - ])), - ])), - (('file', 'class1', 'test2'), dict([ - ('skipped', list([ + ], + }, + ('file', 'class1', 'test2'): { + 'skipped': [ UnitTestCase(result_file='result-file1', test_file='file1', line=123, class_name='class1', test_name='test2', result='success', message='success message', content='success content', stdout='success stdout', stderr='success stderr', time=1.0) - ])), - ])) - ]) + ], + } + }) - self.assertEqual([], get_skipped_tests_list_annotation(UnitTestCaseResults())) + self.assertEqual([], get_skipped_tests_list_annotation(create_unit_test_case_results())) self.assertEqual( [ Annotation(path='.github', start_line=0, end_line=0, start_column=None, end_column=None, annotation_level='notice', message='There are 3 skipped tests, see "Raw output" for the list of skipped tests 1 to 2.', title='3 skipped tests found (test 1 to 2)', raw_details='class1 ‑ test1\nclass1 ‑ test2'), diff --git a/python/test/test_publisher.py b/python/test/test_publisher.py index b21a5450..cc8c9520 100644 --- a/python/test/test_publisher.py +++ b/python/test/test_publisher.py @@ -24,7 +24,7 @@ from publish.github_action import GithubAction from publish.publisher import Publisher, Settings, PublishData from publish.unittestresults import UnitTestCase, ParseError, UnitTestRunResults, UnitTestRunDeltaResults, \ - UnitTestCaseResults + UnitTestCaseResults, create_unit_test_case_results, get_test_results, get_stats, ParsedUnitTestResultsWithCommit sys.path.append(str(pathlib.Path(__file__).resolve().parent)) @@ -87,6 +87,7 @@ def create_settings(comment_mode=comment_mode_always, event_name: str = 'event name', json_file: Optional[str] = None, json_thousands_separator: str = punctuation_space, + json_test_case_results: Optional[bool] = False, pull_request_build: str = pull_request_build_mode_merge, test_changes_limit: Optional[int] = 5): return Settings( @@ -101,6 +102,7 @@ def create_settings(comment_mode=comment_mode_always, commit='commit', json_file=json_file, json_thousands_separator=json_thousands_separator, + json_test_case_results=json_test_case_results, fail_on_errors=True, fail_on_failures=True, junit_files_glob='*.xml', @@ -180,8 +182,8 @@ def create_check_run_hook(**kwargs) -> Mapping[str, Any]: return gh, gha, gh._Github__requester, repo, commit - cases = UnitTestCaseResults([ - ((None, 'class', 'test'), dict( + cases = create_unit_test_case_results({ + (None, 'class', 'test'): dict( success=[ UnitTestCase( result_file='result file', test_file='test file', line=0, @@ -200,8 +202,8 @@ def create_check_run_hook(**kwargs) -> Mapping[str, Any]: time=1.234 ) ] - )), - ((None, 'class', 'test2'), dict( + ), + (None, 'class', 'test2'): dict( skipped=[ UnitTestCase( result_file='result file', test_file='test file', line=0, @@ -220,8 +222,8 @@ def create_check_run_hook(**kwargs) -> Mapping[str, Any]: time=1.2345 ) ] - )), - ((None, 'class', 'test3'), dict( + ), + (None, 'class', 'test3'): dict( skipped=[ UnitTestCase( result_file='result file', test_file='test file', line=0, @@ -231,8 +233,8 @@ def create_check_run_hook(**kwargs) -> Mapping[str, Any]: time=None ) ] - )) - ]) + ) + }) @staticmethod def get_stats(base: str) -> UnitTestRunResults: @@ -269,7 +271,7 @@ def call_mocked_publish(settings: Settings, prs: List[object] = [], cr: object = None): # UnitTestCaseResults is mutable, always copy it - cases = UnitTestCaseResults(cases) + cases = create_unit_test_case_results(cases) # mock Publisher and call publish publisher = mock.MagicMock(Publisher) @@ -285,11 +287,11 @@ def call_mocked_publish(settings: Settings, return mock_calls def test_get_test_list_annotations(self): - cases = UnitTestCaseResults([ - ((None, 'class', 'test abcd'), {'success': [None]}), - ((None, 'class', 'test efgh'), {'skipped': [None]}), - ((None, 'class', 'test ijkl'), {'skipped': [None]}), - ]) + cases = create_unit_test_case_results({ + (None, 'class', 'test abcd'): {'success': [None]}, + (None, 'class', 'test efgh'): {'skipped': [None]}, + (None, 'class', 'test ijkl'): {'skipped': [None]}, + }) settings = self.create_settings(check_run_annotation=[all_tests_list, skipped_tests_list]) gh = mock.MagicMock() @@ -303,11 +305,11 @@ def test_get_test_list_annotations(self): ], annotations) def test_get_test_list_annotations_chunked_and_restricted_unicode(self): - cases = UnitTestCaseResults([ - ((None, 'class', 'test 𝒂'), {'success': [None]}), - ((None, 'class', 'test 𝒃'), {'skipped': [None]}), - ((None, 'class', 'test 𝒄'), {'skipped': [None]}), - ]) + cases = create_unit_test_case_results({ + (None, 'class', 'test 𝒂'): {'success': [None]}, + (None, 'class', 'test 𝒃'): {'skipped': [None]}, + (None, 'class', 'test 𝒄'): {'skipped': [None]}, + }) settings = self.create_settings(check_run_annotation=[all_tests_list, skipped_tests_list]) gh = mock.MagicMock() @@ -493,7 +495,7 @@ def test_publish_comment_compare_earlier(self): bcr = mock.MagicMock() bs = UnitTestRunResults(1, [], 1, 1, 3, 1, 2, 0, 0, 3, 1, 2, 0, 0, 'commit') stats = self.stats - cases = UnitTestCaseResults(self.cases) + cases = create_unit_test_case_results(self.cases) settings = self.create_settings(compare_earlier=True) publisher = mock.MagicMock(Publisher) publisher._settings = settings @@ -551,14 +553,14 @@ def test_publish_comment_compare_earlier_with_restricted_unicode(self): bs = UnitTestRunResults(1, [], 1, 1, 3, 1, 2, 0, 0, 3, 1, 2, 0, 0, 'commit') stats = self.stats # the new test cases with un-restricted unicode, as they come from test result files - cases = UnitTestCaseResults([ + cases = create_unit_test_case_results({ # removed test 𝒂 - ((None, 'class', 'test 𝒃'), {'success': [None]}), # unchanged test 𝒃 + (None, 'class', 'test 𝒃'): {'success': [None]}, # unchanged test 𝒃 # removed skipped 𝒄 - ((None, 'class', 'skipped 𝒅'), {'skipped': [None]}), # unchanged skipped 𝒅 - ((None, 'class', 'skipped 𝒆'), {'skipped': [None]}), # added skipped 𝒆 - ((None, 'class', 'test 𝒇'), {'success': [None]}), # added test 𝒇 - ]) + (None, 'class', 'skipped 𝒅'): {'skipped': [None]}, # unchanged skipped 𝒅 + (None, 'class', 'skipped 𝒆'): {'skipped': [None]}, # added skipped 𝒆 + (None, 'class', 'test 𝒇'): {'success': [None]}, # added test 𝒇 + }) settings = self.create_settings(compare_earlier=True) publisher = mock.MagicMock(Publisher) @@ -667,7 +669,7 @@ def test_publish_comment_compare_with_itself(self): pr = mock.MagicMock() cr = mock.MagicMock() stats = self.stats - cases = UnitTestCaseResults(self.cases) + cases = create_unit_test_case_results(self.cases) settings = self.create_settings(compare_earlier=True) publisher = mock.MagicMock(Publisher) publisher._settings = settings @@ -693,7 +695,7 @@ def test_publish_comment_compare_with_None(self): pr = mock.MagicMock(number="1234", create_issue_comment=mock.Mock(return_value=mock.MagicMock())) cr = mock.MagicMock() stats = self.stats - cases = UnitTestCaseResults(self.cases) + cases = create_unit_test_case_results(self.cases) settings = self.create_settings(compare_earlier=True) publisher = mock.MagicMock(Publisher) publisher._settings = settings @@ -742,7 +744,7 @@ def do_test_publish_comment_with_reuse_comment(self, one_exists: bool): cr = mock.MagicMock() lc = mock.MagicMock(body='latest comment') if one_exists else None stats = self.stats - cases = UnitTestCaseResults(self.cases) + cases = create_unit_test_case_results(self.cases) settings = self.create_settings(comment_mode=comment_mode_always, compare_earlier=False) publisher = mock.MagicMock(Publisher) publisher._settings = settings @@ -1369,8 +1371,8 @@ def test_publish_check_with_multiple_annotation_pages(self): publisher = Publisher(settings, gh, gha) # generate a lot cases - cases = UnitTestCaseResults([ - ((None, 'class', f'test{i}'), dict( + cases = create_unit_test_case_results({ + (None, 'class', f'test{i}'): dict( failure=[ UnitTestCase( result_file='result file', test_file='test file', line=i, @@ -1380,9 +1382,9 @@ def test_publish_check_with_multiple_annotation_pages(self): time=1.234 + i / 1000 ) ] - )) + ) for i in range(1, 151) - ]) + }) # makes gzipped digest deterministic with mock.patch('gzip.time.time', return_value=0): @@ -1496,13 +1498,201 @@ def test_publish_check_with_multiple_annotation_pages(self): title=f'Error processing result file', raw_details='file' )], - check_url='http://check-run.url' + check_url='http://check-run.url', + cases=create_unit_test_case_results({ + (None, 'class name', 'test name'): {"success": [ + UnitTestCase( + class_name='test.classpath.classname', + content='content', + line=1, + message='message', + result='success', + result_file='/path/to/test/test.classpath.classname', + stderr='stderr', + stdout='stdout', + test_file='file1', + test_name='casename', + time=0.1 + ) + ]}, + }) ) + def test_publish_check_with_cases(self): + results = get_test_results(ParsedUnitTestResultsWithCommit( + files=1, + errors=errors, + suites=2, suite_tests=3, suite_skipped=4, suite_failures=5, suite_errors=6, suite_time=7, + cases=[ + UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test1', result='success', message='message1', content='content1', stdout='stdout1', stderr='stderr1', time=1), + UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test2', result='skipped', message='message2', content='content2', stdout='stdout2', stderr='stderr2', time=2), + UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test3', result='failure', message='message3', content='content3', stdout='stdout3', stderr='stderr3', time=3), + UnitTestCase(result_file='result', test_file='test', line=123, class_name='class2', test_name='test1', result='error', message='message4', content='content4', stdout='stdout4', stderr='stderr4', time=4), + UnitTestCase(result_file='result', test_file='test', line=123, class_name='class2', test_name='test2', result='skipped', message='message5', content='content5', stdout='stdout5', stderr='stderr5', time=5), + UnitTestCase(result_file='result', test_file='test', line=123, class_name='class2', test_name='test3', result='failure', message='message6', content='content6', stdout='stdout6', stderr='stderr6', time=6), + UnitTestCase(result_file='result', test_file='test', line=123, class_name='class2', test_name='test4', result='failure', message='message7', content='content7', stdout='stdout7', stderr='stderr7', time=7), + ], + commit='commit' + ), False) + stats = get_stats(results) + + with tempfile.TemporaryDirectory() as path: + filepath = os.path.join(path, 'file.json') + settings = self.create_settings(event={}, json_file=filepath, json_test_case_results=True) + gh, gha, req, repo, commit = self.create_mocks(commit=mock.Mock(), digest=None, check_names=[]) + publisher = Publisher(settings, gh, gha) + + # makes gzipped digest deterministic + with mock.patch('gzip.time.time', return_value=0): + check_run, before_check_run = publisher.publish_check(stats, results.case_results, 'conclusion') + + repo.get_commit.assert_not_called() + + create_check_run_kwargs = dict( + name=settings.check_name, + head_sha=settings.commit, + status='completed', + conclusion='conclusion', + output={ + 'title': '1 parse errors, 1 errors, 3 fail, 2 skipped, 1 pass in 7s', + 'summary': '1 files\u2004\u2003\u205f\u20041 errors\u2004\u20032 suites\u2004\u2003\u20027s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols "duration of all tests")\n' + '7 tests\u2003\u205f\u20041 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols "passed tests")\u20032 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols "skipped / disabled tests")\u20033 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols "failed tests")\u20031 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols "test errors")\n' + '3 runs\u2006\u2003-12 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols "passed tests")\u20034 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols "skipped / disabled tests")\u20035 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols "failed tests")\u20036 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols "test errors")\n' + '\n' + 'Results for commit commit.\n' + '\n' + '[test-results]:data:application/gzip;base64,H4sIAAAAAAAC/02MSwqAMAxEryJd68I/eBmRWiH4qST' + 'tSry7URNxN+8NM4eZYHFkuiRPE0MRwgMFwxhxCOA3xpaRi0D/3FO0VoYiZthl/IppgIVF+QmH6FE2GDeS8o56l+' + 'XFZ96/SlnuamV9a1hYv64QGDSdF7scnZDbAAAA\n', + 'annotations': [ + {'path': 'file', 'start_line': 1, 'end_line': 1, 'start_column': 2, 'end_column': 2, 'annotation_level': 'failure', 'message': 'error', 'title': 'Error processing result file', 'raw_details': 'file'}, + {'path': 'test', 'start_line': 123, 'end_line': 123, 'annotation_level': 'warning', 'message': 'result', 'title': 'test3 (class1) failed', 'raw_details': 'message3\ncontent3\nstdout3\nstderr3'}, + {'path': 'test', 'start_line': 123, 'end_line': 123, 'annotation_level': 'failure', 'message': 'result', 'title': 'test1 (class2) with error', 'raw_details': 'message4\ncontent4\nstdout4\nstderr4'}, + {'path': 'test', 'start_line': 123, 'end_line': 123, 'annotation_level': 'warning', 'message': 'result', 'title': 'test3 (class2) failed', 'raw_details': 'message6\ncontent6\nstdout6\nstderr6'}, + {'path': 'test', 'start_line': 123, 'end_line': 123, 'annotation_level': 'warning', 'message': 'result', 'title': 'test4 (class2) failed', 'raw_details': 'message7\ncontent7\nstdout7\nstderr7'}, + {'path': '.github', 'start_line': 0, 'end_line': 0, 'annotation_level': 'notice', 'message': 'There are 2 skipped tests, see "Raw output" for the full list of skipped tests.', 'title': '2 skipped tests found', 'raw_details': 'class1 ‑ test2\nclass2 ‑ test2'}, + {'path': '.github', 'start_line': 0, 'end_line': 0, 'annotation_level': 'notice', 'message': 'There are 7 tests, see "Raw output" for the full list of tests.', 'title': '7 tests found', 'raw_details': 'class1 ‑ test1\nclass1 ‑ test2\nclass1 ‑ test3\nclass2 ‑ test1\nclass2 ‑ test2\nclass2 ‑ test3\nclass2 ‑ test4'} + ] + } + ) + repo.create_check_run.assert_called_once_with(**create_check_run_kwargs) + + # this checks that publisher.publish_check returned + # the result of the last call to repo.create_check_run + self.assertIsInstance(check_run, mock.Mock) + self.assertTrue(hasattr(check_run, 'create_check_run_kwargs')) + self.assertEqual(create_check_run_kwargs, check_run.create_check_run_kwargs) + self.assertIsNone(before_check_run) + + # assert the json file + with open(filepath, encoding='utf-8') as r: + actual = r.read() + self.assertEqual( + '{' + '"title": "1 parse errors, 1 errors, 3 fail, 2 skipped, 1 pass in 7s", ' + '"summary": "' + '1 files    1 errors  2 suites   7s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols \\"duration of all tests\\")\\n' + '7 tests   1 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols \\"passed tests\\") 2 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols \\"skipped / disabled tests\\") 3 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols \\"failed tests\\") 1 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols \\"test errors\\")\\n' + '3 runs  -12 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols \\"passed tests\\") 4 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols \\"skipped / disabled tests\\") 5 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols \\"failed tests\\") 6 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols \\"test errors\\")\\n' + '\\n' + 'Results for commit commit.\\n", ' + '"conclusion": "conclusion", ' + '"stats": {"files": 1, "errors": [{"file": "file", "message": "error", "line": 1, "column": 2}], "suites": 2, "duration": 7, "tests": 7, "tests_succ": 1, "tests_skip": 2, "tests_fail": 3, "tests_error": 1, "runs": 3, "runs_succ": -12, "runs_skip": 4, "runs_fail": 5, "runs_error": 6, "commit": "commit"}, ' + '"annotations": [' + '{"path": "file", "start_line": 1, "end_line": 1, "start_column": 2, "end_column": 2, "annotation_level": "failure", "message": "error", "title": "Error processing result file", "raw_details": "file"}, ' + '{"path": "test", "start_line": 123, "end_line": 123, "annotation_level": "warning", "message": "result", "title": "test3 (class1) failed", "raw_details": "message3\\ncontent3\\nstdout3\\nstderr3"}, ' + '{"path": "test", "start_line": 123, "end_line": 123, "annotation_level": "failure", "message": "result", "title": "test1 (class2) with error", "raw_details": "message4\\ncontent4\\nstdout4\\nstderr4"}, ' + '{"path": "test", "start_line": 123, "end_line": 123, "annotation_level": "warning", "message": "result", "title": "test3 (class2) failed", "raw_details": "message6\\ncontent6\\nstdout6\\nstderr6"}, ' + '{"path": "test", "start_line": 123, "end_line": 123, "annotation_level": "warning", "message": "result", "title": "test4 (class2) failed", "raw_details": "message7\\ncontent7\\nstdout7\\nstderr7"}, ' + '{"path": ".github", "start_line": 0, "end_line": 0, "annotation_level": "notice", "message": "There are 2 skipped tests, see \\"Raw output\\" for the full list of skipped tests.", "title": "2 skipped tests found", "raw_details": "class1 ‑ test2\\nclass2 ‑ test2"}, ' + '{"path": ".github", "start_line": 0, "end_line": 0, "annotation_level": "notice", "message": "There are 7 tests, see \\"Raw output\\" for the full list of tests.", "title": "7 tests found", "raw_details": "class1 ‑ test1\\nclass1 ‑ test2\\nclass1 ‑ test3\\nclass2 ‑ test1\\nclass2 ‑ test2\\nclass2 ‑ test3\\nclass2 ‑ test4"}' + '], ' + '"check_url": "mock url", ' + '"cases": [' + '{' + '"class_name": "class1", ' + '"test_name": "test1", ' + '"states": {' + '"success": [' + '{"result_file": "result", "test_file": "test", "line": 123, "class_name": "class1", "test_name": "test1", "result": "success", "message": "message1", "content": "content1", "stdout": "stdout1", "stderr": "stderr1", "time": 1}' + ']' + '}' + '}, {' + '"class_name": "class1", ' + '"test_name": "test2", ' + '"states": {' + '"skipped": [' + '{"result_file": "result", "test_file": "test", "line": 123, "class_name": "class1", "test_name": "test2", "result": "skipped", "message": "message2", "content": "content2", "stdout": "stdout2", "stderr": "stderr2", "time": 2}' + ']' + '}' + '}, {' + '"class_name": "class1", ' + '"test_name": "test3", ' + '"states": {' + '"failure": [' + '{"result_file": "result", "test_file": "test", "line": 123, "class_name": "class1", "test_name": "test3", "result": "failure", "message": "message3", "content": "content3", "stdout": "stdout3", "stderr": "stderr3", "time": 3}' + ']' + '}' + '}, {' + '"class_name": "class2", ' + '"test_name": "test1", ' + '"states": {' + '"error": [' + '{"result_file": "result", "test_file": "test", "line": 123, "class_name": "class2", "test_name": "test1", "result": "error", "message": "message4", "content": "content4", "stdout": "stdout4", "stderr": "stderr4", "time": 4}' + ']' + '}' + '}, {' + '"class_name": "class2", ' + '"test_name": "test2", ' + '"states": {' + '"skipped": [' + '{"result_file": "result", "test_file": "test", "line": 123, "class_name": "class2", "test_name": "test2", "result": "skipped", "message": "message5", "content": "content5", "stdout": "stdout5", "stderr": "stderr5", "time": 5}' + ']' + '}' + '}, {' + '"class_name": "class2", ' + '"test_name": "test3", ' + '"states": {' + '"failure": [' + '{"result_file": "result", "test_file": "test", "line": 123, "class_name": "class2", "test_name": "test3", "result": "failure", "message": "message6", "content": "content6", "stdout": "stdout6", "stderr": "stderr6", "time": 6}' + ']' + '}' + '}, {' + '"class_name": "class2", ' + '"test_name": "test4", "states": {' + '"failure": [' + '{"result_file": "result", "test_file": "test", "line": 123, "class_name": "class2", "test_name": "test4", "result": "failure", "message": "message7", "content": "content7", "stdout": "stdout7", "stderr": "stderr7", "time": 7}' + ']' + '}' + '}' + '], ' + '"formatted": {"stats": {"files": "1", "errors": [{"file": "file", "message": "error", "line": 1, "column": 2}], "suites": "2", "duration": "7", "tests": "7", "tests_succ": "1", "tests_skip": "2", "tests_fail": "3", "tests_error": "1", "runs": "3", "runs_succ": "-12", "runs_skip": "4", "runs_fail": "5", "runs_error": "6", "commit": "commit"}}' + '}', + actual + ) + + # check the json output has been provided + gha.add_to_output.assert_called_once_with( + 'json', + '{' + '"title": "1 parse errors, 1 errors, 3 fail, 2 skipped, 1 pass in 7s", ' + '"summary": "' + '1 files\u2004\u2003\u205f\u20041 errors\u2004\u20032 suites\u2004\u2003\u20027s [:stopwatch:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols \\"duration of all tests\\")\\n' + '7 tests\u2003\u205f\u20041 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols \\"passed tests\\")\u20032 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols \\"skipped / disabled tests\\")\u20033 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols \\"failed tests\\")\u20031 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols \\"test errors\\")\\n' + '3 runs\u2006\u2003-12 [:heavy_check_mark:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols \\"passed tests\\")\u20034 [:zzz:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols \\"skipped / disabled tests\\")\u20035 [:x:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols \\"failed tests\\")\u20036 [:fire:](https://github.com/EnricoMi/publish-unit-test-result-action/blob/v1.20/README.md#the-symbols \\"test errors\\")\\n' + '\\n' + 'Results for commit commit.\\n", ' + '"conclusion": "conclusion", ' + '"stats": {"files": 1, "errors": 1, "suites": 2, "duration": 7, "tests": 7, "tests_succ": 1, "tests_skip": 2, "tests_fail": 3, "tests_error": 1, "runs": 3, "runs_succ": -12, "runs_skip": 4, "runs_fail": 5, "runs_error": 6, "commit": "commit"}, ' + '"annotations": 7, ' + '"check_url": "mock url", ' + '"formatted": {"stats": {"files": "1", "errors": "1", "suites": "2", "duration": "7", "tests": "7", "tests_succ": "1", "tests_skip": "2", "tests_fail": "3", "tests_error": "1", "runs": "3", "runs_succ": "-12", "runs_skip": "4", "runs_fail": "5", "runs_error": "6", "commit": "commit"}}' + '}' + ) + def test_publish_data(self): for separator in ['.', ',', ' ', punctuation_space]: with self.subTest(json_thousands_separator=separator): - self.maxDiff = None self.assertEqual({ 'title': 'title', 'summary': 'summary', @@ -1600,7 +1790,31 @@ def test_publish_data(self): 'start_column': 3, 'start_line': 1, 'title': 'Error processing result file'}], - 'check_url': 'http://check-run.url'}, + 'check_url': 'http://check-run.url', + 'cases': [ + { + 'class_name': 'class name', + 'test_name': 'test name', + 'states': { + 'success': [ + { + 'class_name': 'test.classpath.classname', + 'content': 'content', + 'line': 1, + 'message': 'message', + 'result': 'success', + 'result_file': '/path/to/test/test.classpath.classname', + 'stderr': 'stderr', + 'stdout': 'stdout', + 'test_file': 'file1', + 'test_name': 'casename', + 'time': 0.1 + } + ] + } + } + ] + }, self.publish_data.to_dict(separator)) self.assertEqual({ @@ -1680,7 +1894,7 @@ def test_publish_json(self): with self.subTest(json_thousands_separator=separator): with tempfile.TemporaryDirectory() as path: filepath = os.path.join(path, 'file.json') - settings = self.create_settings(json_file=filepath, json_thousands_separator=separator) + settings = self.create_settings(json_file=filepath, json_thousands_separator=separator, json_test_case_results=True) gh, gha, req, repo, commit = self.create_mocks(digest=self.base_digest, check_names=[settings.check_name]) publisher = Publisher(settings, gh, gha) @@ -1700,6 +1914,9 @@ def test_publish_json(self): '"stats_with_delta": {"files": {"number": 1234, "delta": -1234}, "errors": [{"file": "file", "message": "message", "line": 1, "column": 2}, {"file": "file2", "message": "message2", "line": 2, "column": 4}], "suites": {"number": 2, "delta": -2}, "duration": {"number": 3456, "delta": -3456}, "tests": {"number": 4, "delta": -4}, "tests_succ": {"number": 5, "delta": -5}, "tests_skip": {"number": 6, "delta": -6}, "tests_fail": {"number": 7, "delta": -7}, "tests_error": {"number": 8, "delta": -8}, "runs": {"number": 9, "delta": -9}, "runs_succ": {"number": 10, "delta": -10}, "runs_skip": {"number": 11, "delta": -11}, "runs_fail": {"number": 12, "delta": -12}, "runs_error": {"number": 1345, "delta": -1345}, "commit": "commit", "reference_type": "type", "reference_commit": "ref"}, ' '"annotations": [{"path": "path", "start_line": 1, "end_line": 2, "start_column": 3, "end_column": 4, "annotation_level": "failure", "message": "message", "title": "Error processing result file", "raw_details": "file"}], ' '"check_url": "http://check-run.url", ' + '"cases": [' + '{"class_name": "class name", "test_name": "test name", "states": {"success": [{"result_file": "/path/to/test/test.classpath.classname", "test_file": "file1", "line": 1, "class_name": "test.classpath.classname", "test_name": "casename", "result": "success", "message": "message", "content": "content", "stdout": "stdout", "stderr": "stderr", "time": 0.1}]}}' + '], ' '"formatted": {' '"stats": {"files": "12' + separator + '345", "errors": [{"file": "file", "message": "message", "line": 1, "column": 2}], "suites": "2", "duration": "3' + separator + '456", "tests": "4", "tests_succ": "5", "tests_skip": "6", "tests_fail": "7", "tests_error": "8' + separator + '901", "runs": "9", "runs_succ": "10", "runs_skip": "11", "runs_fail": "12", "runs_error": "1' + separator + '345", "commit": "commit"}, ' '"stats_with_delta": {"files": {"number": "1' + separator + '234", "delta": "-1' + separator + '234"}, "errors": [{"file": "file", "message": "message", "line": 1, "column": 2}, {"file": "file2", "message": "message2", "line": 2, "column": 4}], "suites": {"number": "2", "delta": "-2"}, "duration": {"number": "3' + separator + '456", "delta": "-3' + separator + '456"}, "tests": {"number": "4", "delta": "-4"}, "tests_succ": {"number": "5", "delta": "-5"}, "tests_skip": {"number": "6", "delta": "-6"}, "tests_fail": {"number": "7", "delta": "-7"}, "tests_error": {"number": "8", "delta": "-8"}, "runs": {"number": "9", "delta": "-9"}, "runs_succ": {"number": "10", "delta": "-10"}, "runs_skip": {"number": "11", "delta": "-11"}, "runs_fail": {"number": "12", "delta": "-12"}, "runs_error": {"number": "1' + separator + '345", "delta": "-1' + separator + '345"}, "commit": "commit", "reference_type": "type", "reference_commit": "ref"}' diff --git a/python/test/test_unittestresults.py b/python/test/test_unittestresults.py index 4f81a0ac..51ded4d0 100644 --- a/python/test/test_unittestresults.py +++ b/python/test/test_unittestresults.py @@ -5,7 +5,7 @@ from publish.unittestresults import get_test_results, get_stats, get_stats_delta, \ ParsedUnitTestResults, ParsedUnitTestResultsWithCommit, \ - UnitTestCase, UnitTestResults, UnitTestCaseResults, \ + UnitTestCase, UnitTestResults, create_unit_test_case_results, \ UnitTestRunResults, UnitTestRunDeltaResults, ParseError from test_utils import d, n @@ -186,7 +186,7 @@ def test_get_test_results_with_empty_cases(self): files=0, errors=[], suites=0, suite_tests=0, suite_skipped=0, suite_failures=0, suite_errors=0, suite_time=0, - cases=0, cases_skipped=0, cases_failures=0, cases_errors=0, cases_time=0, case_results=UnitTestCaseResults(), + cases=0, cases_skipped=0, cases_failures=0, cases_errors=0, cases_time=0, case_results=create_unit_test_case_results(), tests=0, tests_skipped=0, tests_failures=0, tests_errors=0, commit='commit' )) @@ -211,15 +211,15 @@ def test_get_test_results(self): errors=errors, suites=2, suite_tests=3, suite_skipped=4, suite_failures=5, suite_errors=6, suite_time=7, cases=7, cases_skipped=2, cases_failures=3, cases_errors=1, cases_time=28, - case_results=UnitTestCaseResults([ - ((None, 'class1', 'test1'), dict(success=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test1', result='success', message='message1', content='content1', stdout='stdout1', stderr='stderr1', time=1)])), - ((None, 'class1', 'test2'), dict(skipped=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test2', result='skipped', message='message2', content='content2', stdout='stdout2', stderr='stderr2', time=2)])), - ((None, 'class1', 'test3'), dict(failure=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test3', result='failure', message='message3', content='content3', stdout='stdout3', stderr='stderr3', time=3)])), - ((None, 'class2', 'test1'), dict(error=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class2', test_name='test1', result='error', message='message4', content='content4', stdout='stdout4', stderr='stderr4', time=4)])), - ((None, 'class2', 'test2'), dict(skipped=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class2', test_name='test2', result='skipped', message='message5', content='content5', stdout='stdout5', stderr='stderr5', time=5)])), - ((None, 'class2', 'test3'), dict(failure=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class2', test_name='test3', result='failure', message='message6', content='content6', stdout='stdout6', stderr='stderr6', time=6)])), - ((None, 'class2', 'test4'), dict(failure=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class2', test_name='test4', result='failure', message='message7', content='content7', stdout='stdout7', stderr='stderr7', time=7)])), - ]), + case_results=create_unit_test_case_results({ + (None, 'class1', 'test1'): dict(success=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test1', result='success', message='message1', content='content1', stdout='stdout1', stderr='stderr1', time=1)]), + (None, 'class1', 'test2'): dict(skipped=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test2', result='skipped', message='message2', content='content2', stdout='stdout2', stderr='stderr2', time=2)]), + (None, 'class1', 'test3'): dict(failure=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test3', result='failure', message='message3', content='content3', stdout='stdout3', stderr='stderr3', time=3)]), + (None, 'class2', 'test1'): dict(error=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class2', test_name='test1', result='error', message='message4', content='content4', stdout='stdout4', stderr='stderr4', time=4)]), + (None, 'class2', 'test2'): dict(skipped=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class2', test_name='test2', result='skipped', message='message5', content='content5', stdout='stdout5', stderr='stderr5', time=5)]), + (None, 'class2', 'test3'): dict(failure=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class2', test_name='test3', result='failure', message='message6', content='content6', stdout='stdout6', stderr='stderr6', time=6)]), + (None, 'class2', 'test4'): dict(failure=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class2', test_name='test4', result='failure', message='message7', content='content7', stdout='stdout7', stderr='stderr7', time=7)]), + }), tests=7, tests_skipped=2, tests_failures=3, tests_errors=1, commit='commit' )) @@ -253,13 +253,13 @@ def test_get_test_results_with_multiple_runs(self): errors=[], suites=2, suite_tests=3, suite_skipped=4, suite_failures=5, suite_errors=6, suite_time=7, cases=10, cases_skipped=3, cases_failures=1, cases_errors=1, cases_time=55, - case_results=UnitTestCaseResults([ - ((None, 'class1', 'test1'), dict(success=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test1', result='success', message='message1', content='content1', stdout='stdout1', stderr='stderr1', time=1), UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test1', result='success', message='message2', content='content2', stdout='stdout2', stderr='stderr2', time=2)])), - ((None, 'class1', 'test2'), dict(success=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test2', result='success', message='message3', content='content3', stdout='stdout3', stderr='stderr3', time=3)], skipped=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test2', result='skipped', message='message4', content='content4', stdout='stdout4', stderr='stderr4', time=4)])), - ((None, 'class1', 'test3'), dict(skipped=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test3', result='skipped', message='message5', content='content5', stdout='stdout5', stderr='stderr5', time=5), UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test3', result='skipped', message='message6', content='content6', stdout='stdout6', stderr='stderr6', time=6)])), - ((None, 'class1', 'test4'), dict(success=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test4', result='success', message='message7', content='content7', stdout='stdout7', stderr='stderr7', time=7)], failure=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test4', result='failure', message='message8', content='content8', stdout='stdout8', stderr='stderr8', time=8)])), - ((None, 'class1', 'test5'), dict(success=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test5', result='success', message='message9', content='content9', stdout='stdout9', stderr='stderr9', time=9)], error=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test5', result='error', message='message10', content='content10', stdout='stdout10', stderr='stderr10', time=10)])), - ]), + case_results=create_unit_test_case_results({ + (None, 'class1', 'test1'): dict(success=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test1', result='success', message='message1', content='content1', stdout='stdout1', stderr='stderr1', time=1), UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test1', result='success', message='message2', content='content2', stdout='stdout2', stderr='stderr2', time=2)]), + (None, 'class1', 'test2'): dict(success=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test2', result='success', message='message3', content='content3', stdout='stdout3', stderr='stderr3', time=3)], skipped=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test2', result='skipped', message='message4', content='content4', stdout='stdout4', stderr='stderr4', time=4)]), + (None, 'class1', 'test3'): dict(skipped=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test3', result='skipped', message='message5', content='content5', stdout='stdout5', stderr='stderr5', time=5), UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test3', result='skipped', message='message6', content='content6', stdout='stdout6', stderr='stderr6', time=6)]), + (None, 'class1', 'test4'): dict(success=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test4', result='success', message='message7', content='content7', stdout='stdout7', stderr='stderr7', time=7)], failure=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test4', result='failure', message='message8', content='content8', stdout='stdout8', stderr='stderr8', time=8)]), + (None, 'class1', 'test5'): dict(success=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test5', result='success', message='message9', content='content9', stdout='stdout9', stderr='stderr9', time=9)], error=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test5', result='error', message='message10', content='content10', stdout='stdout10', stderr='stderr10', time=10)]), + }), tests=5, tests_skipped=1, tests_failures=1, tests_errors=1, commit='commit' )) @@ -295,13 +295,13 @@ def test_get_test_results_with_duplicate_class_names(self): errors=[], suites=2, suite_tests=3, suite_skipped=4, suite_failures=5, suite_errors=6, suite_time=7, cases=10, cases_skipped=3, cases_failures=1, cases_errors=1, cases_time=55, - case_results=UnitTestCaseResults([ - ((None, 'class1', 'test1'), dict(success=[UnitTestCase(result_file='result', test_file='test1', line=123, class_name='class1', test_name='test1', result='success', message='message1', content='content1', stdout='stdout1', stderr='stderr1', time=1), UnitTestCase(result_file='result', test_file='test2', line=123, class_name='class1', test_name='test1', result='success', message='message2', content='content2', stdout='stdout2', stderr='stderr2', time=2)])), - ((None, 'class1', 'test2'), dict(success=[UnitTestCase(result_file='result', test_file='test1', line=123, class_name='class1', test_name='test2', result='success', message='message3', content='content3', stdout='stdout3', stderr='stderr3', time=3)], skipped=[UnitTestCase(result_file='result', test_file='test2', line=123, class_name='class1', test_name='test2', result='skipped', message='message4', content='content4', stdout='stdout4', stderr='stderr4', time=4)])), - ((None, 'class1', 'test3'), dict(skipped=[UnitTestCase(result_file='result', test_file='test1', line=123, class_name='class1', test_name='test3', result='skipped', message='message5', content='content5', stdout='stdout5', stderr='stderr5', time=5), UnitTestCase(result_file='result', test_file='test2', line=123, class_name='class1', test_name='test3', result='skipped', message='message6', content='content6', stdout='stdout6', stderr='stderr6', time=6)])), - ((None, 'class1', 'test4'), dict(success=[UnitTestCase(result_file='result', test_file='test1', line=123, class_name='class1', test_name='test4', result='success', message='message7', content='content7', stdout='stdout7', stderr='stderr7', time=7)], failure=[UnitTestCase(result_file='result', test_file='test2', line=123, class_name='class1', test_name='test4', result='failure', message='message8', content='content8', stdout='stdout8', stderr='stderr8', time=8)])), - ((None, 'class1', 'test5'), dict(success=[UnitTestCase(result_file='result', test_file='test1', line=123, class_name='class1', test_name='test5', result='success', message='message9', content='content9', stdout='stdout9', stderr='stderr9', time=9)], error=[UnitTestCase(result_file='result', test_file='test2', line=123, class_name='class1', test_name='test5', result='error', message='message10', content='content10', stdout='stdout10', stderr='stderr10', time=10)])), - ]), + case_results=create_unit_test_case_results({ + (None, 'class1', 'test1'): dict(success=[UnitTestCase(result_file='result', test_file='test1', line=123, class_name='class1', test_name='test1', result='success', message='message1', content='content1', stdout='stdout1', stderr='stderr1', time=1), UnitTestCase(result_file='result', test_file='test2', line=123, class_name='class1', test_name='test1', result='success', message='message2', content='content2', stdout='stdout2', stderr='stderr2', time=2)]), + (None, 'class1', 'test2'): dict(success=[UnitTestCase(result_file='result', test_file='test1', line=123, class_name='class1', test_name='test2', result='success', message='message3', content='content3', stdout='stdout3', stderr='stderr3', time=3)], skipped=[UnitTestCase(result_file='result', test_file='test2', line=123, class_name='class1', test_name='test2', result='skipped', message='message4', content='content4', stdout='stdout4', stderr='stderr4', time=4)]), + (None, 'class1', 'test3'): dict(skipped=[UnitTestCase(result_file='result', test_file='test1', line=123, class_name='class1', test_name='test3', result='skipped', message='message5', content='content5', stdout='stdout5', stderr='stderr5', time=5), UnitTestCase(result_file='result', test_file='test2', line=123, class_name='class1', test_name='test3', result='skipped', message='message6', content='content6', stdout='stdout6', stderr='stderr6', time=6)]), + (None, 'class1', 'test4'): dict(success=[UnitTestCase(result_file='result', test_file='test1', line=123, class_name='class1', test_name='test4', result='success', message='message7', content='content7', stdout='stdout7', stderr='stderr7', time=7)], failure=[UnitTestCase(result_file='result', test_file='test2', line=123, class_name='class1', test_name='test4', result='failure', message='message8', content='content8', stdout='stdout8', stderr='stderr8', time=8)]), + (None, 'class1', 'test5'): dict(success=[UnitTestCase(result_file='result', test_file='test1', line=123, class_name='class1', test_name='test5', result='success', message='message9', content='content9', stdout='stdout9', stderr='stderr9', time=9)], error=[UnitTestCase(result_file='result', test_file='test2', line=123, class_name='class1', test_name='test5', result='error', message='message10', content='content10', stdout='stdout10', stderr='stderr10', time=10)]), + }), tests=5, tests_skipped=1, tests_failures=1, tests_errors=1, commit='commit' )) @@ -311,18 +311,18 @@ def test_get_test_results_with_duplicate_class_names(self): errors=[], suites=2, suite_tests=3, suite_skipped=4, suite_failures=5, suite_errors=6, suite_time=7, cases=10, cases_skipped=3, cases_failures=1, cases_errors=1, cases_time=55, - case_results=UnitTestCaseResults([ - (('test1', 'class1', 'test1'), dict(success=[UnitTestCase(result_file='result', test_file='test1', line=123, class_name='class1', test_name='test1', result='success', message='message1', content='content1', stdout='stdout1', stderr='stderr1', time=1)])), - (('test2', 'class1', 'test1'), dict(success=[UnitTestCase(result_file='result', test_file='test2', line=123, class_name='class1', test_name='test1', result='success', message='message2', content='content2', stdout='stdout2', stderr='stderr2', time=2)])), - (('test1', 'class1', 'test2'), dict(success=[UnitTestCase(result_file='result', test_file='test1', line=123, class_name='class1', test_name='test2', result='success', message='message3', content='content3', stdout='stdout3', stderr='stderr3', time=3)])), - (('test2', 'class1', 'test2'), dict(skipped=[UnitTestCase(result_file='result', test_file='test2', line=123, class_name='class1', test_name='test2', result='skipped', message='message4', content='content4', stdout='stdout4', stderr='stderr4', time=4)])), - (('test1', 'class1', 'test3'), dict(skipped=[UnitTestCase(result_file='result', test_file='test1', line=123, class_name='class1', test_name='test3', result='skipped', message='message5', content='content5', stdout='stdout5', stderr='stderr5', time=5)])), - (('test2', 'class1', 'test3'), dict(skipped=[UnitTestCase(result_file='result', test_file='test2', line=123, class_name='class1', test_name='test3', result='skipped', message='message6', content='content6', stdout='stdout6', stderr='stderr6', time=6)])), - (('test1', 'class1', 'test4'), dict(success=[UnitTestCase(result_file='result', test_file='test1', line=123, class_name='class1', test_name='test4', result='success', message='message7', content='content7', stdout='stdout7', stderr='stderr7', time=7)])), - (('test2', 'class1', 'test4'), dict(failure=[UnitTestCase(result_file='result', test_file='test2', line=123, class_name='class1', test_name='test4', result='failure', message='message8', content='content8', stdout='stdout8', stderr='stderr8', time=8)])), - (('test1', 'class1', 'test5'), dict(success=[UnitTestCase(result_file='result', test_file='test1', line=123, class_name='class1', test_name='test5', result='success', message='message9', content='content9', stdout='stdout9', stderr='stderr9', time=9)])), - (('test2', 'class1', 'test5'), dict(error=[UnitTestCase(result_file='result', test_file='test2', line=123, class_name='class1', test_name='test5', result='error', message='message10', content='content10', stdout='stdout10', stderr='stderr10', time=10)])), - ]), + case_results=create_unit_test_case_results({ + ('test1', 'class1', 'test1'): dict(success=[UnitTestCase(result_file='result', test_file='test1', line=123, class_name='class1', test_name='test1', result='success', message='message1', content='content1', stdout='stdout1', stderr='stderr1', time=1)]), + ('test2', 'class1', 'test1'): dict(success=[UnitTestCase(result_file='result', test_file='test2', line=123, class_name='class1', test_name='test1', result='success', message='message2', content='content2', stdout='stdout2', stderr='stderr2', time=2)]), + ('test1', 'class1', 'test2'): dict(success=[UnitTestCase(result_file='result', test_file='test1', line=123, class_name='class1', test_name='test2', result='success', message='message3', content='content3', stdout='stdout3', stderr='stderr3', time=3)]), + ('test2', 'class1', 'test2'): dict(skipped=[UnitTestCase(result_file='result', test_file='test2', line=123, class_name='class1', test_name='test2', result='skipped', message='message4', content='content4', stdout='stdout4', stderr='stderr4', time=4)]), + ('test1', 'class1', 'test3'): dict(skipped=[UnitTestCase(result_file='result', test_file='test1', line=123, class_name='class1', test_name='test3', result='skipped', message='message5', content='content5', stdout='stdout5', stderr='stderr5', time=5)]), + ('test2', 'class1', 'test3'): dict(skipped=[UnitTestCase(result_file='result', test_file='test2', line=123, class_name='class1', test_name='test3', result='skipped', message='message6', content='content6', stdout='stdout6', stderr='stderr6', time=6)]), + ('test1', 'class1', 'test4'): dict(success=[UnitTestCase(result_file='result', test_file='test1', line=123, class_name='class1', test_name='test4', result='success', message='message7', content='content7', stdout='stdout7', stderr='stderr7', time=7)]), + ('test2', 'class1', 'test4'): dict(failure=[UnitTestCase(result_file='result', test_file='test2', line=123, class_name='class1', test_name='test4', result='failure', message='message8', content='content8', stdout='stdout8', stderr='stderr8', time=8)]), + ('test1', 'class1', 'test5'): dict(success=[UnitTestCase(result_file='result', test_file='test1', line=123, class_name='class1', test_name='test5', result='success', message='message9', content='content9', stdout='stdout9', stderr='stderr9', time=9)]), + ('test2', 'class1', 'test5'): dict(error=[UnitTestCase(result_file='result', test_file='test2', line=123, class_name='class1', test_name='test5', result='error', message='message10', content='content10', stdout='stdout10', stderr='stderr10', time=10)]), + }), tests=10, tests_skipped=3, tests_failures=1, tests_errors=1, commit='commit' )) @@ -344,10 +344,10 @@ def test_get_test_results_with_some_nones(self): errors=[], suites=2, suite_tests=3, suite_skipped=4, suite_failures=5, suite_errors=6, suite_time=7, cases=4, cases_skipped=2, cases_failures=1, cases_errors=0, cases_time=3, - case_results=UnitTestCaseResults([ - ((None, 'class', 'test1'), dict(success=[UnitTestCase(result_file='result', test_file=None, line=None, class_name='class', test_name='test1', result='success', message='message1', content='content1', stdout='stdout1', stderr='stderr1', time=1)], skipped=[UnitTestCase(result_file='result', test_file=None, line=None, class_name='class', test_name='test1', result='skipped', message='message2', content='content2', stdout='stdout2', stderr='stderr2', time=None)])), - ((None, 'class', 'test2'), dict(failure=[UnitTestCase(result_file='result', test_file=None, line=None, class_name='class', test_name='test2', result='failure', message='message3', content='content3', stdout='stdout3', stderr='stderr3', time=2)], skipped=[UnitTestCase(result_file='result', test_file=None, line=None, class_name='class', test_name='test2', result='skipped', message='message4', content='content4', stdout='stdout4', stderr='stderr4', time=None)])), - ]), + case_results=create_unit_test_case_results({ + (None, 'class', 'test1'): dict(success=[UnitTestCase(result_file='result', test_file=None, line=None, class_name='class', test_name='test1', result='success', message='message1', content='content1', stdout='stdout1', stderr='stderr1', time=1)], skipped=[UnitTestCase(result_file='result', test_file=None, line=None, class_name='class', test_name='test1', result='skipped', message='message2', content='content2', stdout='stdout2', stderr='stderr2', time=None)]), + (None, 'class', 'test2'): dict(failure=[UnitTestCase(result_file='result', test_file=None, line=None, class_name='class', test_name='test2', result='failure', message='message3', content='content3', stdout='stdout3', stderr='stderr3', time=2)], skipped=[UnitTestCase(result_file='result', test_file=None, line=None, class_name='class', test_name='test2', result='skipped', message='message4', content='content4', stdout='stdout4', stderr='stderr4', time=None)]), + }), tests=2, tests_skipped=0, tests_failures=1, tests_errors=0, commit='commit' )) @@ -372,15 +372,15 @@ def test_get_test_results_with_disabled_cases(self): errors=errors, suites=2, suite_tests=3, suite_skipped=4, suite_failures=5, suite_errors=6, suite_time=7, cases=7, cases_skipped=2, cases_failures=3, cases_errors=1, cases_time=28, - case_results=UnitTestCaseResults([ - ((None, 'class1', 'test1'), dict(success=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test1', result='success', message='message1', content='content1', stdout='stdout1', stderr='stderr1', time=1)])), - ((None, 'class1', 'test2'), dict(skipped=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test2', result='skipped', message='message2', content='content2', stdout='stdout2', stderr='stderr2', time=2)])), - ((None, 'class1', 'test3'), dict(failure=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test3', result='failure', message='message3', content='content3', stdout='stdout3', stderr='stderr3', time=3)])), - ((None, 'class2', 'test1'), dict(error=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class2', test_name='test1', result='error', message='message4', content='content4', stdout='stdout4', stderr='stderr4', time=4)])), - ((None, 'class2', 'test2'), dict(skipped=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class2', test_name='test2', result='disabled', message='message5', content='content5', stdout='stdout5', stderr='stderr5', time=5)])), - ((None, 'class2', 'test3'), dict(failure=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class2', test_name='test3', result='failure', message='message6', content='content6', stdout='stdout6', stderr='stderr6', time=6)])), - ((None, 'class2', 'test4'), dict(failure=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class2', test_name='test4', result='failure', message='message7', content='content7', stdout='stdout7', stderr='stderr7', time=7)])), - ]), + case_results=create_unit_test_case_results({ + (None, 'class1', 'test1'): dict(success=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test1', result='success', message='message1', content='content1', stdout='stdout1', stderr='stderr1', time=1)]), + (None, 'class1', 'test2'): dict(skipped=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test2', result='skipped', message='message2', content='content2', stdout='stdout2', stderr='stderr2', time=2)]), + (None, 'class1', 'test3'): dict(failure=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class1', test_name='test3', result='failure', message='message3', content='content3', stdout='stdout3', stderr='stderr3', time=3)]), + (None, 'class2', 'test1'): dict(error=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class2', test_name='test1', result='error', message='message4', content='content4', stdout='stdout4', stderr='stderr4', time=4)]), + (None, 'class2', 'test2'): dict(skipped=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class2', test_name='test2', result='disabled', message='message5', content='content5', stdout='stdout5', stderr='stderr5', time=5)]), + (None, 'class2', 'test3'): dict(failure=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class2', test_name='test3', result='failure', message='message6', content='content6', stdout='stdout6', stderr='stderr6', time=6)]), + (None, 'class2', 'test4'): dict(failure=[UnitTestCase(result_file='result', test_file='test', line=123, class_name='class2', test_name='test4', result='failure', message='message7', content='content7', stdout='stdout7', stderr='stderr7', time=7)]), + }), tests=7, tests_skipped=2, tests_failures=3, tests_errors=1, commit='commit' )) @@ -402,7 +402,7 @@ def test_get_stats(self): cases_failures=12, cases_errors=13, cases_time=4, - case_results=UnitTestCaseResults(), + case_results=create_unit_test_case_results(), tests=30, tests_skipped=8,