forked from PyCQA/bandit
/
test_screen.py
208 lines (161 loc) · 8.14 KB
/
test_screen.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
# Copyright (c) 2015 VMware, Inc.
# Copyright (c) 2015 Hewlett Packard Enterprise
#
# SPDX-License-Identifier: Apache-2.0
import collections
import tempfile
from unittest import mock
import testtools
import bandit
from bandit.core import config
from bandit.core import docs_utils
from bandit.core import issue
from bandit.core import manager
from bandit.formatters import screen
class ScreenFormatterTests(testtools.TestCase):
def setUp(self):
super(ScreenFormatterTests, self).setUp()
@mock.patch('bandit.core.issue.Issue.get_code')
def test_output_issue(self, get_code):
issue = _get_issue_instance()
get_code.return_value = 'DDDDDDD'
indent_val = 'CCCCCCC'
def _template(_issue, _indent_val, _code, _color):
return_val = ["{}{}>> Issue: [{}:{}] {}".
format(_indent_val, _color, _issue.test_id,
_issue.test, _issue.text),
"{} Severity: {} Confidence: {}".
format(_indent_val, _issue.severity.capitalize(),
_issue.confidence.capitalize()),
"{} Location: {}:{}:{}".
format(_indent_val, _issue.fname, _issue.lineno,
_issue.col_offset),
"{} More Info: {}{}".format(
_indent_val, docs_utils.get_url(_issue.test_id),
screen.COLOR['DEFAULT'])]
if _code:
return_val.append("{}{}".format(_indent_val, _code))
return '\n'.join(return_val)
issue_text = screen._output_issue_str(issue, indent_val)
expected_return = _template(issue, indent_val, 'DDDDDDD',
screen.COLOR['MEDIUM'])
self.assertEqual(expected_return, issue_text)
issue_text = screen._output_issue_str(issue, indent_val,
show_code=False)
expected_return = _template(issue, indent_val, '',
screen.COLOR['MEDIUM'])
self.assertEqual(expected_return, issue_text)
issue.lineno = ''
issue.col_offset = ''
issue_text = screen._output_issue_str(issue, indent_val,
show_lineno=False)
expected_return = _template(issue, indent_val, 'DDDDDDD',
screen.COLOR['MEDIUM'])
self.assertEqual(expected_return, issue_text)
@mock.patch('bandit.core.manager.BanditManager.get_issue_list')
def test_no_issues(self, get_issue_list):
conf = config.BanditConfig()
self.manager = manager.BanditManager(conf, 'file')
(tmp_fd, self.tmp_fname) = tempfile.mkstemp()
self.manager.out_file = self.tmp_fname
get_issue_list.return_value = collections.OrderedDict()
with mock.patch('bandit.formatters.screen.do_print') as m:
with open(self.tmp_fname, 'w') as tmp_file:
screen.report(self.manager, tmp_file, bandit.LOW, bandit.LOW,
lines=5)
self.assertIn('No issues identified.',
'\n'.join([str(a) for a in m.call_args]))
@mock.patch('bandit.core.manager.BanditManager.get_issue_list')
def test_report_nobaseline(self, get_issue_list):
conf = config.BanditConfig()
self.manager = manager.BanditManager(conf, 'file')
(tmp_fd, self.tmp_fname) = tempfile.mkstemp()
self.manager.out_file = self.tmp_fname
self.manager.verbose = True
self.manager.files_list = ['binding.py']
self.manager.scores = [{'SEVERITY': [0, 0, 0, 1],
'CONFIDENCE': [0, 0, 0, 1]}]
self.manager.skipped = [('abc.py', 'File is bad')]
self.manager.excluded_files = ['def.py']
issue_a = _get_issue_instance()
issue_b = _get_issue_instance()
get_issue_list.return_value = [issue_a, issue_b]
self.manager.metrics.data['_totals'] = {'loc': 1000, 'nosec': 50}
for category in ['SEVERITY', 'CONFIDENCE']:
for level in ['UNDEFINED', 'LOW', 'MEDIUM', 'HIGH']:
self.manager.metrics.data['_totals']['%s.%s' %
(category, level)] = 1
# Validate that we're outputting the correct issues
output_str_fn = 'bandit.formatters.screen._output_issue_str'
with mock.patch(output_str_fn) as output_str:
output_str.return_value = 'ISSUE_OUTPUT_TEXT'
with open(self.tmp_fname, 'w') as tmp_file:
screen.report(self.manager, tmp_file, bandit.LOW, bandit.LOW,
lines=5)
calls = [mock.call(issue_a, '', lines=5),
mock.call(issue_b, '', lines=5)]
output_str.assert_has_calls(calls, any_order=True)
# Validate that we're outputting all of the expected fields and the
# correct values
with mock.patch('bandit.formatters.screen.do_print') as m:
with open(self.tmp_fname, 'w') as tmp_file:
screen.report(self.manager, tmp_file, bandit.LOW, bandit.LOW,
lines=5)
data = '\n'.join([str(a) for a in m.call_args[0][0]])
expected = 'Run started'
self.assertIn(expected, data)
expected_items = [
screen.header('Files in scope (1):'),
'\n\tbinding.py (score: {SEVERITY: 1, CONFIDENCE: 1})']
for item in expected_items:
self.assertIn(item, data)
expected = screen.header('Files excluded (1):') + '\n\tdef.py'
self.assertIn(expected, data)
expected = ('Total lines of code: 1000\n\tTotal lines skipped '
'(#nosec): 50')
self.assertIn(expected, data)
expected = ('Total issues (by severity):\n\t\tUndefined: 1\n\t\t'
'Low: 1\n\t\tMedium: 1\n\t\tHigh: 1')
self.assertIn(expected, data)
expected = ('Total issues (by confidence):\n\t\tUndefined: 1\n\t\t'
'Low: 1\n\t\tMedium: 1\n\t\tHigh: 1')
self.assertIn(expected, data)
expected = (screen.header('Files skipped (1):') +
'\n\tabc.py (File is bad)')
self.assertIn(expected, data)
@mock.patch('bandit.core.manager.BanditManager.get_issue_list')
def test_report_baseline(self, get_issue_list):
conf = config.BanditConfig()
self.manager = manager.BanditManager(conf, 'file')
(tmp_fd, self.tmp_fname) = tempfile.mkstemp()
self.manager.out_file = self.tmp_fname
issue_a = _get_issue_instance()
issue_b = _get_issue_instance()
issue_x = _get_issue_instance()
issue_x.fname = 'x'
issue_y = _get_issue_instance()
issue_y.fname = 'y'
issue_z = _get_issue_instance()
issue_z.fname = 'z'
get_issue_list.return_value = collections.OrderedDict(
[(issue_a, [issue_x]), (issue_b, [issue_y, issue_z])])
# Validate that we're outputting the correct issues
indent_val = ' ' * 10
output_str_fn = 'bandit.formatters.screen._output_issue_str'
with mock.patch(output_str_fn) as output_str:
output_str.return_value = 'ISSUE_OUTPUT_TEXT'
with open(self.tmp_fname, 'w') as tmp_file:
screen.report(self.manager, tmp_file, bandit.LOW, bandit.LOW,
lines=5)
calls = [mock.call(issue_a, '', lines=5),
mock.call(issue_b, '', show_code=False,
show_lineno=False),
mock.call(issue_y, indent_val, lines=5),
mock.call(issue_z, indent_val, lines=5)]
output_str.assert_has_calls(calls, any_order=True)
def _get_issue_instance(severity=bandit.MEDIUM, confidence=bandit.MEDIUM):
new_issue = issue.Issue(severity, confidence, 'Test issue')
new_issue.fname = 'code.py'
new_issue.test = 'bandit_plugin'
new_issue.lineno = 1
return new_issue