forked from PyCQA/bandit
-
Notifications
You must be signed in to change notification settings - Fork 1
/
tester.py
158 lines (136 loc) · 5.91 KB
/
tester.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
#
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# SPDX-License-Identifier: Apache-2.0
import copy
import logging
import warnings
from bandit.core import constants
from bandit.core import context as b_context
from bandit.core import utils
warnings.formatwarning = utils.warnings_formatter
LOG = logging.getLogger(__name__)
class BanditTester:
def __init__(self, testset, debug, nosec_lines, metrics):
self.results = []
self.testset = testset
self.last_result = None
self.debug = debug
self.nosec_lines = nosec_lines
self.metrics = metrics
def run_tests(self, raw_context, checktype):
"""Runs all tests for a certain type of check, for example
Runs all tests for a certain type of check, for example 'functions'
store results in results.
:param raw_context: Raw context dictionary
:param checktype: The type of checks to run
:return: a score based on the number and type of test results with
extra metrics about nosec comments
"""
scores = {
"SEVERITY": [0] * len(constants.RANKING),
"CONFIDENCE": [0] * len(constants.RANKING),
}
tests = self.testset.get_tests(checktype)
for test in tests:
name = test.__name__
# execute test with the an instance of the context class
temp_context = copy.copy(raw_context)
context = b_context.Context(temp_context)
try:
if hasattr(test, "_config"):
result = test(context, test._config)
else:
result = test(context)
if result is not None:
nosec_tests_to_skip = self._get_nosecs_from_contexts(
temp_context, test_result=result
)
if isinstance(temp_context["filename"], bytes):
result.fname = temp_context["filename"].decode("utf-8")
else:
result.fname = temp_context["filename"]
result.fdata = temp_context["file_data"]
if result.lineno is None:
result.lineno = temp_context["lineno"]
result.linerange = temp_context["linerange"]
result.col_offset = temp_context["col_offset"]
result.test = name
if result.test_id == "":
result.test_id = test._test_id
# don't skip the test if there was no nosec comment
if nosec_tests_to_skip is not None:
# if the set is empty or the test id is in the set of
# tests to skip, log and increment the skip by test
# count
if not nosec_tests_to_skip or (
result.test_id in nosec_tests_to_skip
):
LOG.debug(
"skipped, nosec for test %s" % result.test_id
)
self.metrics.note_skipped_test()
continue
self.results.append(result)
LOG.debug("Issue identified by %s: %s", name, result)
sev = constants.RANKING.index(result.severity)
val = constants.RANKING_VALUES[result.severity]
scores["SEVERITY"][sev] += val
con = constants.RANKING.index(result.confidence)
val = constants.RANKING_VALUES[result.confidence]
scores["CONFIDENCE"][con] += val
else:
nosec_tests_to_skip = self._get_nosecs_from_contexts(
temp_context
)
if (
nosec_tests_to_skip
and test._test_id in nosec_tests_to_skip
):
LOG.warning(
f"nosec encountered ({test._test_id}), but no "
f"failed test on line {temp_context['lineno']}"
)
except Exception as e:
self.report_error(name, context, e)
if self.debug:
raise
LOG.debug("Returning scores: %s", scores)
return scores
def _get_nosecs_from_contexts(self, context, test_result=None):
"""Use context and optional test result to get set of tests to skip.
:param context: temp context
:param test_result: optional test result
:return: set of tests to skip for the line based on contexts
"""
nosec_tests_to_skip = set()
base_tests = (
self.nosec_lines.get(test_result.lineno, None)
if test_result
else None
)
context_tests = self.nosec_lines.get(context["lineno"], None)
# if both are none there were no comments
# this is explicitly different from being empty.
# empty set indicates blanket nosec comment without
# individual test names or ids
if base_tests is None and context_tests is None:
nosec_tests_to_skip = None
# combine tests from current line and context line
if base_tests is not None:
nosec_tests_to_skip.update(base_tests)
if context_tests is not None:
nosec_tests_to_skip.update(context_tests)
return nosec_tests_to_skip
@staticmethod
def report_error(test, context, error):
what = "Bandit internal error running: "
what += "%s " % test
what += "on file %s at line %i: " % (
context._context["filename"],
context._context["lineno"],
)
what += str(error)
import traceback
what += traceback.format_exc()
LOG.error(what)