/
publisher.py
608 lines (514 loc) · 27.8 KB
/
publisher.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
import dataclasses
import json
import logging
import os
import re
from dataclasses import dataclass
from typing import List, Any, Optional, Tuple, Mapping, Dict
from github import Github, GithubException
from github.CheckRun import CheckRun
from github.CheckRunAnnotation import CheckRunAnnotation
from github.PullRequest import PullRequest
from github.IssueComment import IssueComment
from publish import hide_comments_mode_orphaned, hide_comments_mode_all_but_latest, hide_comments_mode_off, \
comment_mode_off, comment_mode_create, comment_mode_update, \
comment_condition_always, comment_condition_changes, comment_condition_failures, comment_condition_errors,\
digest_prefix, get_stats_from_digest, digest_header, get_short_summary, get_long_summary_md, \
get_long_summary_with_digest_md, get_error_annotations, get_case_annotations, \
get_all_tests_list_annotation, get_skipped_tests_list_annotation, get_all_tests_list, \
get_skipped_tests_list, all_tests_list, skipped_tests_list, pull_request_build_mode_merge, \
Annotation, SomeTestChanges
from publish import logger
from publish.github_action import GithubAction
from publish.unittestresults import UnitTestCaseResults, UnitTestRunResults, UnitTestRunDeltaResults, get_stats_delta
@dataclass(frozen=True)
class Settings:
token: str
api_url: str
graphql_url: str
api_retries: int
event: dict
event_file: Optional[str]
event_name: str
repo: str
commit: str
json_file: Optional[str]
fail_on_errors: bool
fail_on_failures: bool
files_glob: str
time_factor: float
check_name: str
comment_title: str
comment_mode: str
comment_condition: str
compare_earlier: bool
pull_request_build: str
test_changes_limit: int
hide_comment_mode: str
report_individual_runs: bool
dedup_classes_by_file_name: bool
ignore_runs: bool
check_run_annotation: List[str]
seconds_between_github_reads: float
seconds_between_github_writes: float
@dataclasses.dataclass(frozen=True)
class PublishData:
title: str
summary: str
conclusion: str
stats: UnitTestRunResults
stats_with_delta: Optional[UnitTestRunDeltaResults]
annotations: List[Annotation]
def to_dict(self) -> Dict[str, Any]:
return dataclasses.asdict(self, dict_factory=lambda x: {k: v for (k, v) in x if v is not None})
def reduced(self) -> Mapping[str, Any]:
data = self.to_dict()
# replace some large fields with their lengths
if data.get('stats', {}).get('errors') is not None:
data['stats']['errors'] = len(data['stats']['errors'])
if data.get('stats_with_delta', {}).get('errors') is not None:
data['stats_with_delta']['errors'] = len(data['stats_with_delta']['errors'])
if data.get('annotations') is not None:
data['annotations'] = len(data['annotations'])
return data
class Publisher:
def __init__(self, settings: Settings, gh: Github, gha: GithubAction):
self._settings = settings
self._gh = gh
self._gha = gha
self._repo = gh.get_repo(self._settings.repo)
self._req = gh._Github__requester
def publish(self,
stats: UnitTestRunResults,
cases: UnitTestCaseResults,
conclusion: str):
logger.info(f'publishing {conclusion} results for commit {self._settings.commit}')
check_run = self.publish_check(stats, cases, conclusion)
if self._settings.comment_mode != comment_mode_off:
pulls = self.get_pulls(self._settings.commit)
if pulls:
for pull in pulls:
self.publish_comment(self._settings.comment_title, stats, pull, check_run, cases)
if self._settings.hide_comment_mode == hide_comments_mode_orphaned:
self.hide_orphaned_commit_comments(pull)
elif self._settings.hide_comment_mode == hide_comments_mode_all_but_latest:
self.hide_all_but_latest_comments(pull)
if self._settings.hide_comment_mode == hide_comments_mode_off:
logger.info('hide_comments disabled, not hiding any comments')
else:
logger.info(f'there is no pull request for commit {self._settings.commit}')
else:
logger.info('comment_on_pr disabled, not commenting on any pull requests')
def get_pulls(self, commit: str) -> List[PullRequest]:
# totalCount calls the GitHub API just to get the total number
# we have to retrieve them all anyway so better do this once by materialising the PaginatedList via list()
issues = list(self._gh.search_issues(f'type:pr repo:"{self._settings.repo}" {commit}'))
logger.debug(f'found {len(issues)} pull requests in repo {self._settings.repo} containing commit {commit}')
if logger.isEnabledFor(logging.DEBUG):
for issue in issues:
pr = issue.as_pull_request()
logger.debug(pr)
logger.debug(pr.raw_data)
logger.debug(f'PR {pr.html_url}: {pr.head.repo.full_name} -> {pr.base.repo.full_name}')
# we can only publish the comment to PRs that are in the same repository as this action is executed in
# so pr.base.repo.full_name must be same as GITHUB_REPOSITORY / self._settings.repo
# we won't have permission otherwise
pulls = list([pr
for issue in issues
for pr in [issue.as_pull_request()]
if pr.base.repo.full_name == self._settings.repo])
if len(pulls) == 0:
logger.debug(f'found no pull requests in repo {self._settings.repo} for commit {commit}')
return []
# we only comment on PRs that have the commit as their current head or merge commit
pulls = [pull for pull in pulls if commit in [pull.head.sha, pull.merge_commit_sha]]
if len(pulls) == 0:
logger.debug(f'found no pull request in repo {self._settings.repo} with '
f'commit {commit} as current head or merge commit')
return []
# only comment on the open PRs
pulls = [pull for pull in pulls if pull.state == 'open']
if len(pulls) == 0:
logger.debug(f'found multiple pull requests in repo {self._settings.repo} with '
f'commit {commit} as current head or merge commit but none is open')
for pull in pulls:
logger.debug(f'found open pull request #{pull.number} with commit {commit} as current head or merge commit')
return pulls
def get_stats_from_commit(self, commit_sha: str) -> Optional[UnitTestRunResults]:
check_run = self.get_check_run(commit_sha)
return self.get_stats_from_check_run(check_run) if check_run is not None else None
def get_check_run(self, commit_sha: str) -> Optional[CheckRun]:
if commit_sha is None or commit_sha == '0000000000000000000000000000000000000000':
return None
commit = None
try:
commit = self._repo.get_commit(commit_sha)
except GithubException as e:
if e.status == 422:
self._gha.warning(str(e.data))
else:
raise e
if commit is None:
self._gha.error(f'Could not find commit {commit_sha}')
return None
runs = commit.get_check_runs()
# totalCount calls the GitHub API, so better not do this if we are not logging the result anyway
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'found {runs.totalCount} check runs for commit {commit_sha}')
return self.get_check_run_from_list(list(runs))
def get_check_run_from_list(self, runs: List[CheckRun]) -> Optional[CheckRun]:
# filter for runs with the same name as configured
runs = [run for run in runs if run.name == self._settings.check_name]
logger.debug(f'there are {len(runs)} check runs with title {self._settings.check_name}')
if len(runs) == 0:
return None
if len(runs) == 1:
return runs[0]
# filter based on summary
runs = [run for run in runs if run.output.summary and digest_prefix in run.output.summary]
logger.debug(f'there are {len(runs)} check runs with a test result summary')
if len(runs) == 0:
return None
if len(runs) == 1:
return runs[0]
# filter for completed runs
runs = [run for run in runs if run.status == 'completed']
logger.debug(f'there are {len(runs)} check runs with completed status')
if len(runs) == 0:
return None
if len(runs) == 1:
return runs[0]
# pick run that started latest
return sorted(runs, key=lambda run: run.started_at, reverse=True)[0]
@staticmethod
def get_stats_from_check_run(check_run: CheckRun) -> Optional[UnitTestRunResults]:
summary = check_run.output.summary
if summary is None:
return None
for line in summary.split('\n'):
logger.debug(f'summary: {line}')
pos = summary.index(digest_header) if digest_header in summary else None
if pos:
digest = summary[pos + len(digest_header):]
logger.debug(f'digest: {digest}')
stats = get_stats_from_digest(digest)
logger.debug(f'stats: {stats}')
return stats
@staticmethod
def get_test_list_from_annotation(annotation: CheckRunAnnotation) -> Optional[List[str]]:
if annotation is None or not annotation.raw_details:
return None
return annotation.raw_details.split('\n')
def publish_check(self,
stats: UnitTestRunResults,
cases: UnitTestCaseResults,
conclusion: str) -> CheckRun:
# get stats from earlier commits
before_stats = None
if self._settings.compare_earlier:
before_commit_sha = self._settings.event.get('before')
logger.debug(f'comparing against before={before_commit_sha}')
before_stats = self.get_stats_from_commit(before_commit_sha)
stats_with_delta = get_stats_delta(stats, before_stats, 'earlier') if before_stats is not None else stats
logger.debug(f'stats with delta: {stats_with_delta}')
error_annotations = get_error_annotations(stats.errors)
case_annotations = get_case_annotations(cases, self._settings.report_individual_runs)
file_list_annotations = self.get_test_list_annotations(cases)
all_annotations = error_annotations + case_annotations + file_list_annotations
title = get_short_summary(stats)
summary = get_long_summary_md(stats_with_delta)
# create full json
data = PublishData(
title=title,
summary=summary,
conclusion=conclusion,
stats=stats,
stats_with_delta=stats_with_delta if before_stats is not None else None,
annotations=all_annotations
)
self.publish_json(data)
# we can send only 50 annotations at once, so we split them into chunks of 50
check_run = None
summary_with_digest = get_long_summary_with_digest_md(stats_with_delta, stats)
all_annotations = [annotation.to_dict() for annotation in all_annotations]
all_annotations = [all_annotations[x:x+50] for x in range(0, len(all_annotations), 50)] or [[]]
for annotations in all_annotations:
output = dict(
title=title,
summary=summary_with_digest,
annotations=annotations
)
if check_run is None:
logger.debug(f'creating check with {len(annotations)} annotations')
check_run = self._repo.create_check_run(name=self._settings.check_name,
head_sha=self._settings.commit,
status='completed',
conclusion=conclusion,
output=output)
logger.info(f'created check {check_run.html_url}')
else:
logger.debug(f'updating check with {len(annotations)} more annotations')
check_run.edit(output=output)
logger.debug(f'updated check')
return check_run
def publish_json(self, data: PublishData):
if self._settings.json_file:
try:
with open(self._settings.json_file, 'wt', encoding='utf-8') as w:
json.dump(data.to_dict(), w, ensure_ascii=False)
except Exception as e:
self._gha.error(f'Failed to write JSON file {self._settings.json_file}: {str(e)}')
try:
os.unlink(self._settings.json_file)
except:
pass
# provide a reduced version to Github actions
self._gha.set_output('json', json.dumps(data.reduced(), ensure_ascii=False))
@staticmethod
def get_test_lists_from_check_run(check_run: Optional[CheckRun]) -> Tuple[Optional[List[str]], Optional[List[str]]]:
if check_run is None:
return None, None
all_tests_title_regexp = re.compile(r'^\d+ test(s)? found( \(tests \d+ to \d+\))?$')
skipped_tests_title_regexp = re.compile(r'^\d+ skipped test(s)? found( \(tests \d+ to \d+\))?$')
all_tests_message_regexp = re.compile(
r'^(There is 1 test, see "Raw output" for the name of the test)|'
r'(There are \d+ tests, see "Raw output" for the full list of tests)|'
r'(There are \d+ tests, see "Raw output" for the list of tests \d+ to \d+)\.$')
skipped_tests_message_regexp = re.compile(
r'^(There is 1 skipped test, see "Raw output" for the name of the skipped test)|'
r'(There are \d+ skipped tests, see "Raw output" for the full list of skipped tests)|'
r'(There are \d+ skipped tests, see "Raw output" for the list of skipped tests \d+ to \d+)\.$')
annotations = list(check_run.get_annotations())
all_tests_list = Publisher.get_test_list_from_annotations(annotations, all_tests_title_regexp, all_tests_message_regexp)
skipped_tests_list = Publisher.get_test_list_from_annotations(annotations, skipped_tests_title_regexp, skipped_tests_message_regexp)
return all_tests_list or None, skipped_tests_list or None
@staticmethod
def get_test_list_from_annotations(annotations: List[CheckRunAnnotation],
title_regexp, message_regexp) -> List[str]:
test_annotations: List[CheckRunAnnotation] = []
for annotation in annotations:
if annotation and annotation.title and annotation.message and annotation.raw_details and \
title_regexp.match(annotation.title) and \
message_regexp.match(annotation.message):
test_annotations.append(annotation)
test_lists = [Publisher.get_test_list_from_annotation(test_annotation)
for test_annotation in test_annotations]
test_list = [test
for test_list in test_lists
if test_list
for test in test_list]
return test_list
def get_test_list_annotations(self, cases: UnitTestCaseResults) -> List[Annotation]:
all_tests = get_all_tests_list_annotation(cases) \
if all_tests_list in self._settings.check_run_annotation else []
skipped_tests = get_skipped_tests_list_annotation(cases) \
if skipped_tests_list in self._settings.check_run_annotation else []
return [annotation for annotation in skipped_tests + all_tests if annotation]
def publish_comment(self,
title: str,
stats: UnitTestRunResults,
pull_request: PullRequest,
check_run: Optional[CheckRun] = None,
cases: Optional[UnitTestCaseResults] = None):
# compare them with earlier stats
base_check_run = None
if self._settings.compare_earlier:
base_commit_sha = self.get_base_commit_sha(pull_request)
if stats.commit == base_commit_sha:
# we do not publish a comment when we compare the commit to itself
# that would overwrite earlier comments without change stats
return pull_request
logger.debug(f'comparing against base={base_commit_sha}')
base_check_run = self.get_check_run(base_commit_sha)
base_stats = self.get_stats_from_check_run(base_check_run) if base_check_run is not None else None
stats_with_delta = get_stats_delta(stats, base_stats, 'base') if base_stats is not None else stats
logger.debug(f'stats with delta: {stats_with_delta}')
# gather test lists from check run and cases
before_all_tests, before_skipped_tests = self.get_test_lists_from_check_run(base_check_run)
all_tests, skipped_tests = get_all_tests_list(cases), get_skipped_tests_list(cases)
test_changes = SomeTestChanges(before_all_tests, all_tests, before_skipped_tests, skipped_tests)
# we need fetch the latest comment if comment_condition != comment_condition_always
# or self._settings.comment_mode == comment_mode_update
latest_comment = None
if self._settings.comment_condition != comment_condition_always or self._settings.comment_mode == comment_mode_update:
latest_comment = self.get_latest_comment(pull_request)
latest_comment_body = latest_comment.body if latest_comment else None
# are we required to create a comment on this PR?
if not self.require_comment(stats, stats_with_delta, test_changes, latest_comment_body):
logger.info(f'No comment required as comment_on is {self._settings.comment_condition}')
return
details_url = check_run.html_url if check_run else None
summary = get_long_summary_md(stats_with_delta, details_url, test_changes, self._settings.test_changes_limit)
body = f'## {title}\n{summary}'
# reuse existing comment when comment_mode == comment_mode_update, otherwise create new comment
if self._settings.comment_mode == comment_mode_update and latest_comment is not None:
self.reuse_comment(latest_comment, body)
logger.info(f'edited comment for pull request #{pull_request.number}: {latest_comment.html_url}')
else:
comment = pull_request.create_issue_comment(body)
logger.info(f'created comment for pull request #{pull_request.number}: {comment.html_url}')
@staticmethod
def comment_has_changes(comment_body: Optional[str]) -> bool:
if comment_body is None:
return False
end = comment_body.lower().find('results for commit')
if end < 0:
return False
comment_body = comment_body[:end]
# remove links
comment_body = re.sub(r'\([^)]*\)', '<link>', comment_body)
# replace ' ' with some non-whitespace string, it separates columns in the comment
comment_body = comment_body.replace(' ', '⋯')
m = re.search(r'[-+](\s*[0-9]+)+\s+(([^0-9\s])|\n)', comment_body)
return m is not None
@classmethod
def comment_has_failures(cls, comment_body: Optional[str]) -> bool:
return cls._comment_has(comment_body, r'\[:x:]')
@classmethod
def comment_has_errors(cls, comment_body: Optional[str]) -> bool:
return cls._comment_has(comment_body, r'(\[:fire:]|errors)')
@staticmethod
def _comment_has(comment_body: Optional[str], symbol: str) -> bool:
if comment_body is None:
return False
end = comment_body.lower().find('results for commit')
if end < 0:
return False
comment_body = comment_body[:end]
# we assume '00 ...' indicates multiple failures / errors (more than 99)
m = re.search(r'([0-9][0-9]|[1-9])\s' + symbol, comment_body)
return m is not None
def require_comment(self,
stats: UnitTestRunResults,
stats_with_delta: UnitTestRunDeltaResults,
test_changes: SomeTestChanges,
comment_body: Optional[str]) -> bool:
return (self._settings.comment_condition == comment_condition_always or
self._settings.comment_condition == comment_condition_changes and (self.comment_has_changes(comment_body) or stats_with_delta is None or stats_with_delta.has_changes or test_changes.has_changes) or
self._settings.comment_condition == comment_condition_failures and (self.comment_has_failures(comment_body) or self.comment_has_errors(comment_body) or stats.has_failures or stats.has_errors) or
self._settings.comment_condition == comment_condition_errors and (self.comment_has_errors(comment_body) or stats.has_errors))
def get_latest_comment(self, pull: PullRequest) -> Optional[IssueComment]:
# get comments of this pull request
comments = self.get_pull_request_comments(pull, order_by_updated=True)
# get all comments that come from this action and are not hidden
comments = self.get_action_comments(comments)
# if there is no such comment, stop here
if len(comments) == 0:
return None
# fetch latest action comment
comment_id = comments[-1].get("databaseId")
return pull.get_issue_comment(comment_id)
def reuse_comment(self, comment: IssueComment, body: str):
if ':recycle:' not in body:
body = f'{body}\n:recycle: This comment has been updated with latest results.'
try:
comment.edit(body)
except Exception as e:
self._gha.warning(f'Failed to edit existing comment #{comment.id}')
logger.debug('editing existing comment failed', exc_info=e)
def get_base_commit_sha(self, pull_request: PullRequest) -> Optional[str]:
if self._settings.pull_request_build == pull_request_build_mode_merge:
if self._settings.event:
# for pull request events we take the other parent of the merge commit (base)
if self._settings.event_name == 'pull_request':
return self._settings.event.get('pull_request', {}).get('base', {}).get('sha')
# for workflow run events we should take the same as for pull request events,
# but we have no way to figure out the actual merge commit and its parents
# we do not take the base sha from pull_request as it is not immutable
if self._settings.event_name == 'workflow_run':
return None
try:
# we always fall back to where the branch merged off base ref
logger.debug(f'comparing {pull_request.base.ref} with {self._settings.commit}')
compare = self._repo.compare(pull_request.base.ref, self._settings.commit)
return compare.merge_base_commit.sha
except:
logger.warning(f'could not find best common ancestor '
f'between base {pull_request.base.sha} '
f'and commit {self._settings.commit}')
return None
def get_pull_request_comments(self, pull: PullRequest, order_by_updated: bool) -> List[Mapping[str, Any]]:
order = ''
if order_by_updated:
order = ', orderBy: { direction: ASC, field: UPDATED_AT }'
query = dict(
query=r'query ListComments {'
r' repository(owner:"' + self._repo.owner.login + r'", name:"' + self._repo.name + r'") {'
r' pullRequest(number: ' + str(pull.number) + r') {'
f' comments(last: 100{order}) {{'
r' nodes {'
r' id, databaseId, author { login }, body, isMinimized'
r' }'
r' }'
r' }'
r' }'
r'}'
)
headers, data = self._req.requestJsonAndCheck(
"POST", self._settings.graphql_url, input=query
)
return data \
.get('data', {}) \
.get('repository', {}) \
.get('pullRequest', {}) \
.get('comments', {}) \
.get('nodes')
def hide_comment(self, comment_node_id) -> bool:
input = dict(
query=r'mutation MinimizeComment {'
r' minimizeComment(input: { subjectId: "' + comment_node_id + r'", classifier: OUTDATED } ) {'
r' minimizedComment { isMinimized, minimizedReason }'
r' }'
r'}'
)
headers, data = self._req.requestJsonAndCheck(
"POST", self._settings.graphql_url, input=input
)
return data \
.get('data', {}) \
.get('minimizeComment', {}) \
.get('minimizedComment', {}) \
.get('isMinimized', {})
def get_action_comments(self, comments: List[Mapping[str, Any]], is_minimized: Optional[bool] = False):
return list([comment for comment in comments
if comment.get('author', {}).get('login') == 'github-actions'
and (is_minimized is None or comment.get('isMinimized') == is_minimized)
and comment.get('body', '').startswith(f'## {self._settings.comment_title}\n')
and ('\nresults for commit ' in comment.get('body') or '\nResults for commit ' in comment.get('body'))])
def hide_orphaned_commit_comments(self, pull: PullRequest) -> None:
# rewriting history of branch removes commits
# we do not want to show test results for those commits anymore
# get commits of this pull request
commit_shas = set([commit.sha for commit in pull.get_commits()])
# get comments of this pull request
comments = self.get_pull_request_comments(pull, order_by_updated=False)
# get all comments that come from this action and are not hidden
comments = self.get_action_comments(comments)
# get comment node ids and their commit sha (possibly abbreviated)
matches = [(comment.get('id'), re.search(r'^[Rr]esults for commit ([0-9a-f]{8,40})\.(?:\s.*)?$', comment.get('body'), re.MULTILINE))
for comment in comments]
comment_commits = [(node_id, match.group(1))
for node_id, match in matches
if match is not None]
# get those comment node ids whose commit is not part of this pull request any more
comment_ids = [(node_id, comment_commit_sha)
for (node_id, comment_commit_sha) in comment_commits
if not any([sha
for sha in commit_shas
if sha.startswith(comment_commit_sha)])]
# hide all those comments
for node_id, comment_commit_sha in comment_ids:
logger.info(f'hiding unit test result comment for commit {comment_commit_sha}')
self.hide_comment(node_id)
def hide_all_but_latest_comments(self, pull: PullRequest) -> None:
# we want to reduce the number of shown comments to a minimum
# get comments of this pull request
comments = self.get_pull_request_comments(pull, order_by_updated=False)
# get all comments that come from this action and are not hidden
comments = self.get_action_comments(comments)
# take all but the last comment
comment_ids = [comment.get('id') for comment in comments[:-1]]
# hide all those comments
for node_id in comment_ids:
logger.info(f'hiding unit test result comment {node_id}')
self.hide_comment(node_id)