Skip to content

Commit

Permalink
support for disabling individual tests
Browse files Browse the repository at this point in the history
- allow disabling tests by id and by name (e.g. B602,assert_used)
- update nosec_lines to be a dict keyed on line number to a set of tests to ignore
- use an empty set to indicate a blanket nosec (i.e. just #nosec, no individual tests)
- use None to indicate that there was no nosec comment on the line in question
- track and report metrics on the number of tests skipped by specific tests and the number of tests that fail because they were not included in the list of specific tests to ignore

Resolves #211
See also #418
  • Loading branch information
mikespallino committed Jan 7, 2022
1 parent e0a12a9 commit b9d406c
Show file tree
Hide file tree
Showing 9 changed files with 190 additions and 29 deletions.
17 changes: 17 additions & 0 deletions README.rst
Expand Up @@ -176,6 +176,23 @@ security issue, it will not be reported::

self.process = subprocess.Popen('/bin/echo', shell=True) # nosec

Because multiple issues can be reported for the same line, specific tests may
be provided to suppress those reports. This will cause other issues not
included to be reported. This can be useful in preventing situations where a
nosec comment is used, but a separate vulnerability may be added to the line
later causing the new vulnerability to be ignored.

For example, this will suppress the report of B602 and B607::

self.process = subprocess.Popen('/bin/ls *', shell=True) #nosec B602, B607

Full test names rather than the test ID may also be used.

For example, this will suppress the report of B101 and continue to report B506
as an issue.::

assert yaml.load("{}") == [] # nosec assert_used


Vulnerability Tests
-------------------
Expand Down
89 changes: 77 additions & 12 deletions bandit/core/manager.py
Expand Up @@ -9,6 +9,7 @@
import json
import logging
import os
import re
import sys
import tokenize
import traceback
Expand All @@ -23,6 +24,8 @@


LOG = logging.getLogger(__name__)
NOSEC_SPECIFIC_IDS_IGNORE = re.compile(r"([bB][\d]+),?[ ]?")
NOSEC_SPECIFIC_NAMES_IGNORE = re.compile(r"([a-z_]+),?[ ]?")


class BanditManager(object):
Expand Down Expand Up @@ -273,18 +276,20 @@ def _parse_file(self, fname, fdata, new_files_list):
lines = data.splitlines()
self.metrics.begin(fname)
self.metrics.count_locs(lines)
if self.ignore_nosec:
nosec_lines = set()
else:
try:
fdata.seek(0)
tokens = tokenize.tokenize(fdata.readline)
nosec_lines = set(
lineno for toktype, tokval, (lineno, _), _, _ in tokens
if toktype == tokenize.COMMENT and
'#nosec' in tokval or '# nosec' in tokval)
except tokenize.TokenError:
nosec_lines = set()
# nosec_lines is a dict of line number -> set of tests to ignore
# for the line
nosec_lines = dict()
try:
fdata.seek(0)
tokens = tokenize.tokenize(fdata.readline)

if not self.ignore_nosec:
for toktype, tokval, (lineno, _), _, _ in tokens:
if toktype == tokenize.COMMENT:
nosec_lines[lineno] = _parse_nosec_comment(tokval)

except tokenize.TokenError:
pass
score = self._execute_ast_visitor(fname, data, nosec_lines)
self.scores.append(score)
self.metrics.count_issues([score, ])
Expand Down Expand Up @@ -409,3 +414,63 @@ def _find_candidate_matches(unmatched_issues, results_list):
unmatched == i])

return issue_candidates


def _get_tests_from_nosec_comment(comment):
nosec_no_space = '#nosec'
nosec_space = '# nosec'
nospace = comment.find(nosec_no_space)
space = comment.find(nosec_space)
if nospace > -1:
start_comment = nospace
nosec_length = len(nosec_no_space)
elif space > -1:
start_comment = space
nosec_length = len(nosec_space)
else:
# None is explicitly different to empty set, None indicates no
# nosec comment on a line
return None

# find the next # separator
end = comment.find('#', start_comment + 1)
# if we don't find one, set end index to the length
if end == -1:
end = len(comment)

# extract text after #[ ]?nosec and the end index, this is just the
# list of potential test ids or names
return comment[start_comment + nosec_length:end]


def _parse_nosec_comment(comment):
nosec_tests = _get_tests_from_nosec_comment(comment)
if nosec_tests is None:
return None
# empty set indicates that there was a nosec comment without specific
# test ids or names
test_ids = set()
if nosec_tests:
extman = extension_loader.MANAGER
# check for IDS to ignore (e.g. B602)
for test in re.finditer(NOSEC_SPECIFIC_IDS_IGNORE,
nosec_tests):
test_id_match = test.group(1)
plugin_id = extman.check_id(test_id_match)
if plugin_id:
test_ids.add(test_id_match)
else:
LOG.warning("Test in comment: %s is not a test id",
test_id_match)
# check for NAMES to ignore (e.g. assert_used)
for test in re.finditer(NOSEC_SPECIFIC_NAMES_IGNORE,
nosec_tests):
plugin_id = extman.get_plugin_id(test.group(1))
if plugin_id:
test_ids.add(plugin_id)
else:
LOG.warning(
"Test in comment: %s is not a test name, ignoring",
test.group(1))

return test_ids
26 changes: 21 additions & 5 deletions bandit/core/metrics.py
Expand Up @@ -21,7 +21,8 @@ class Metrics(object):

def __init__(self):
self.data = dict()
self.data['_totals'] = {'loc': 0, 'nosec': 0}
self.data['_totals'] = {'loc': 0, 'nosec': 0, 'nosec_by_test': 0,
'failed_nosec_by_test': 0}

# initialize 0 totals for criteria and rank; this will be reset later
for rank in constants.RANKING:
Expand All @@ -32,21 +33,36 @@ def begin(self, fname):
"""Begin a new metric block.
This starts a new metric collection name "fname" and makes is active.
:param fname: the metrics unique name, normally the file name.
"""
self.data[fname] = {'loc': 0, 'nosec': 0}
self.data[fname] = {'loc': 0, 'nosec': 0, 'nosec_by_test': 0,
'failed_nosec_by_test': 0}
self.current = self.data[fname]

def note_nosec(self, num=1):
"""Note a "nosec" commnet.
"""Note a "nosec" comment.
Increment the currently active metrics nosec count.
:param num: number of nosecs seen, defaults to 1
"""
self.current['nosec'] += num

def note_nosec_by_test(self, num=1):
"""Note a "nosec BXXX, BYYY, ..." comment.
Increment the currently active metrics nosec_by_test count.
:param num: number of nosecs seen, defaults to 1
"""
self.current['nosec_by_test'] += num

def note_failed_nosec_by_test(self, num=1):
"""Note a test failed not caught when specific tests in comment used
Increment the currently active metrics failed_nosec_by_test count.
:param num: number of failed nosecs seen, defaults to 1
"""
self.current['failed_nosec_by_test'] += num

def count_locs(self, lines):
"""Count lines of code.
Expand Down
13 changes: 11 additions & 2 deletions bandit/core/node_visitor.py
Expand Up @@ -201,8 +201,10 @@ def pre_visit(self, node):
if hasattr(node, 'lineno'):
self.context['lineno'] = node.lineno

if node.lineno in self.nosec_lines:
LOG.debug("skipped, nosec")
# explicitly check for empty set to skip all tests for a line
nosec_tests = self.nosec_lines.get(node.lineno)
if nosec_tests is not None and not len(nosec_tests):
LOG.debug("skipped, nosec without test number")
self.metrics.note_nosec()
return False
if hasattr(node, 'col_offset'):
Expand Down Expand Up @@ -272,6 +274,13 @@ def update_scores(self, scores):
severity, this is needed to update the stored list.
:param score: The score list to update our scores with
'''
# scores has extra nosec information about nosecs with specific tests
# so pop those out first and track the metrics for them
self.metrics.note_nosec_by_test(scores.pop('nosecs_by_tests'))
self.metrics.note_failed_nosec_by_test(
scores.pop('failed_nosecs_by_test')
)

# we'll end up with something like:
# SEVERITY: {0, 0, 0, 10} where 10 is weighted by finding and level
for score_type in self.scores:
Expand Down
49 changes: 42 additions & 7 deletions bandit/core/tester.py
Expand Up @@ -32,13 +32,15 @@ def run_tests(self, raw_context, checktype):
:param raw_context: Raw context dictionary
:param checktype: The type of checks to run
:param nosec_lines: Lines which should be skipped because of nosec
:return: a score based on the number and type of test results
:return: a score based on the number and type of test results with
extra metrics about nosec comments
'''

scores = {
'SEVERITY': [0] * len(constants.RANKING),
'CONFIDENCE': [0] * len(constants.RANKING)
'CONFIDENCE': [0] * len(constants.RANKING),
'nosecs_by_tests': 0,
'failed_nosecs_by_test': 0
}

tests = self.testset.get_tests(checktype)
Expand All @@ -53,10 +55,24 @@ def run_tests(self, raw_context, checktype):
else:
result = test(context)

# if we have a result, record it and update scores
if (result is not None and
result.lineno not in self.nosec_lines and
temp_context['lineno'] not in self.nosec_lines):
if result is not None:
nosec_tests_to_skip = set()
base_tests = self.nosec_lines.get(result.lineno, None)
context_tests = self.nosec_lines.get(
temp_context['lineno'], None)

# if both are non there are were no comments
# this is explicitly different than being empty
# empty set indicates blanket nosec comment without
# individual test names or ids
if base_tests is None and context_tests is None:
nosec_tests_to_skip = None

# combine tests from current line and context line
if base_tests is not None:
nosec_tests_to_skip.update(base_tests)
if context_tests is not None:
nosec_tests_to_skip.update(context_tests)

if isinstance(temp_context['filename'], bytes):
result.fname = temp_context['filename'].decode('utf-8')
Expand All @@ -71,6 +87,25 @@ def run_tests(self, raw_context, checktype):
if result.test_id == "":
result.test_id = test._test_id

# don't skip a the test if there was no nosec comment
if nosec_tests_to_skip is not None:
# if the set is empty or the test id is in the set of
# tests to skip, log and increment the skip by test
# count
if not nosec_tests_to_skip or \
(result.test_id in nosec_tests_to_skip):
LOG.debug("skipped, nosec for test %s"
% result.test_id)
scores['nosecs_by_tests'] += 1
continue
# otherwise this test was not called out explicitly by
# a nosec BXX type comment and should fail. Log and
# increment the failed test count
else:
LOG.debug("uncaught test %s in nosec comment"
% result.test_id)
scores['failed_nosecs_by_test'] += 1

self.results.append(result)

LOG.debug("Issue identified by %s: %s", name, result)
Expand Down
6 changes: 6 additions & 0 deletions bandit/formatters/text.py
Expand Up @@ -147,6 +147,12 @@ def report(manager, fileobj, sev_level, conf_level, lines=-1):

bits.append('\tTotal lines skipped (#nosec): %i' %
(manager.metrics.data['_totals']['nosec']))
bits.append('\tTotal lines skipped (#nosec BXXX,BYYY,...): %i' %
(manager.metrics.data['_totals']['nosec_by_test']))
bits.append(
'\tTotal other nosec test caught (#nosec BXXX,BYYY but BZZZ): %i' %
(manager.metrics.data['_totals']['failed_nosec_by_test'])
)

skipped = manager.get_skipped()
bits.append(get_metrics(manager))
Expand Down
7 changes: 7 additions & 0 deletions examples/nosec.py
Expand Up @@ -5,3 +5,10 @@
shell=True) #nosec (on the specific kwarg line)
subprocess.Popen('#nosec', shell=True)
subprocess.Popen('/bin/ls *', shell=True) # type: … # nosec # noqa: E501 ; pylint: disable=line-too-long
subprocess.Popen('/bin/ls *', shell=True) #nosec subprocess_popen_with_shell_equals_true (on the line)
subprocess.Popen('#nosec', shell=True) # nosec B607, B602
subprocess.Popen('#nosec', shell=True) # nosec B607 B602
subprocess.Popen('/bin/ls *', shell=True) # nosec subprocess_popen_with_shell_equals_true start_process_with_partial_path
subprocess.Popen('/bin/ls *', shell=True) # type: … # noqa: E501 ; pylint: disable=line-too-long # nosec
subprocess.Popen('#nosec', shell=True) # nosec B607, B101
subprocess.Popen('#nosec', shell=True) # nosec B602, subprocess_popen_with_shell_equals_true
4 changes: 2 additions & 2 deletions tests/functional/test_functional.py
Expand Up @@ -722,8 +722,8 @@ def test_flask_debug_true(self):

def test_nosec(self):
expect = {
'SEVERITY': {'UNDEFINED': 0, 'LOW': 2, 'MEDIUM': 0, 'HIGH': 0},
'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 2}
'SEVERITY': {'UNDEFINED': 0, 'LOW': 4, 'MEDIUM': 0, 'HIGH': 0},
'CONFIDENCE': {'UNDEFINED': 0, 'LOW': 0, 'MEDIUM': 0, 'HIGH': 4}
}
self.check_example('nosec.py', expect)

Expand Down
8 changes: 7 additions & 1 deletion tests/unit/formatters/test_text.py
Expand Up @@ -99,7 +99,9 @@ def test_report_nobaseline(self, get_issue_list):

get_issue_list.return_value = [issue_a, issue_b]

self.manager.metrics.data['_totals'] = {'loc': 1000, 'nosec': 50}
self.manager.metrics.data['_totals'] = {'loc': 1000, 'nosec': 50,
'nosec_by_test': 0,
'failed_nosec_by_test': 0}
for category in ['SEVERITY', 'CONFIDENCE']:
for level in ['UNDEFINED', 'LOW', 'MEDIUM', 'HIGH']:
self.manager.metrics.data['_totals']['%s.%s' %
Expand Down Expand Up @@ -140,6 +142,10 @@ def test_report_nobaseline(self, get_issue_list):
'High: 1',
'Total lines skipped ',
'(#nosec): 50',
'Total lines skipped ',
'(#nosec BXXX,BYYY,...): 0',
'Total other nosec test caught ',
'(#nosec BXXX,BYYY but BZZZ): 0',
'Total issues (by severity)',
'Total issues (by confidence)',
'Files skipped (1)',
Expand Down

0 comments on commit b9d406c

Please sign in to comment.