diff --git a/reports/20210322_66173dc24d/htmlcov/coverage___init___py.html b/reports/20210322_66173dc24d/htmlcov/coverage___init___py.html new file mode 100644 index 000000000..26b7c352c --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage___init___py.html @@ -0,0 +1,102 @@ + + + + + + Coverage for coverage/__init__.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Code coverage measurement for Python. 

+

5 

+

6Ned Batchelder 

+

7https://nedbatchelder.com/code/coverage 

+

8 

+

9""" 

+

10 

+

11import sys 

+

12 

+

13from coverage.version import __version__, __url__, version_info 

+

14 

+

15from coverage.control import Coverage, process_startup 

+

16from coverage.data import CoverageData 

+

17from coverage.misc import CoverageException 

+

18from coverage.plugin import CoveragePlugin, FileTracer, FileReporter 

+

19from coverage.pytracer import PyTracer 

+

20 

+

21# Backward compatibility. 

+

22coverage = Coverage 

+

23 

+

24# On Windows, we encode and decode deep enough that something goes wrong and 

+

25# the encodings.utf_8 module is loaded and then unloaded, I don't know why. 

+

26# Adding a reference here prevents it from being unloaded. Yuk. 

+

27import encodings.utf_8 # pylint: disable=wrong-import-position, wrong-import-order 

+

28 

+

29# Because of the "from coverage.control import fooey" lines at the top of the 

+

30# file, there's an entry for coverage.coverage in sys.modules, mapped to None. 

+

31# This makes some inspection tools (like pydoc) unable to find the class 

+

32# coverage.coverage. So remove that entry. 

+

33try: 

+

34 del sys.modules['coverage.coverage'] 

+

35except KeyError: 

+

36 pass 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage___main___py.html b/reports/20210322_66173dc24d/htmlcov/coverage___main___py.html new file mode 100644 index 000000000..06d858d34 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage___main___py.html @@ -0,0 +1,74 @@ + + + + + + Coverage for coverage/__main__.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Coverage.py's main entry point.""" 

+

5 

+

6import sys 

+

7from coverage.cmdline import main 

+

8sys.exit(main()) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_annotate_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_annotate_py.html new file mode 100644 index 000000000..7ef12b89f --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_annotate_py.html @@ -0,0 +1,174 @@ + + + + + + Coverage for coverage/annotate.py: 96.552% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Source file annotation for coverage.py.""" 

+

5 

+

6import io 

+

7import os 

+

8import re 

+

9 

+

10from coverage.files import flat_rootname 

+

11from coverage.misc import ensure_dir, isolate_module 

+

12from coverage.report import get_analysis_to_report 

+

13 

+

14os = isolate_module(os) 

+

15 

+

16 

+

17class AnnotateReporter(object): 

+

18 """Generate annotated source files showing line coverage. 

+

19 

+

20 This reporter creates annotated copies of the measured source files. Each 

+

21 .py file is copied as a .py,cover file, with a left-hand margin annotating 

+

22 each line:: 

+

23 

+

24 > def h(x): 

+

25 - if 0: #pragma: no cover 

+

26 - pass 

+

27 > if x == 1: 

+

28 ! a = 1 

+

29 > else: 

+

30 > a = 2 

+

31 

+

32 > h(2) 

+

33 

+

34 Executed lines use '>', lines not executed use '!', lines excluded from 

+

35 consideration use '-'. 

+

36 

+

37 """ 

+

38 

+

39 def __init__(self, coverage): 

+

40 self.coverage = coverage 

+

41 self.config = self.coverage.config 

+

42 self.directory = None 

+

43 

+

44 blank_re = re.compile(r"\s*(#|$)") 

+

45 else_re = re.compile(r"\s*else\s*:\s*(#|$)") 

+

46 

+

47 def report(self, morfs, directory=None): 

+

48 """Run the report. 

+

49 

+

50 See `coverage.report()` for arguments. 

+

51 

+

52 """ 

+

53 self.directory = directory 

+

54 self.coverage.get_data() 

+

55 for fr, analysis in get_analysis_to_report(self.coverage, morfs): 

+

56 self.annotate_file(fr, analysis) 

+

57 

+

58 def annotate_file(self, fr, analysis): 

+

59 """Annotate a single file. 

+

60 

+

61 `fr` is the FileReporter for the file to annotate. 

+

62 

+

63 """ 

+

64 statements = sorted(analysis.statements) 

+

65 missing = sorted(analysis.missing) 

+

66 excluded = sorted(analysis.excluded) 

+

67 

+

68 if self.directory: 

+

69 ensure_dir(self.directory) 

+

70 dest_file = os.path.join(self.directory, flat_rootname(fr.relative_filename())) 

+

71 if dest_file.endswith("_py"): 71 ↛ 73line 71 didn't jump to line 73, because the condition on line 71 was never false

+

72 dest_file = dest_file[:-3] + ".py" 

+

73 dest_file += ",cover" 

+

74 else: 

+

75 dest_file = fr.filename + ",cover" 

+

76 

+

77 with io.open(dest_file, 'w', encoding='utf8') as dest: 

+

78 i = 0 

+

79 j = 0 

+

80 covered = True 

+

81 source = fr.source() 

+

82 for lineno, line in enumerate(source.splitlines(True), start=1): 

+

83 while i < len(statements) and statements[i] < lineno: 

+

84 i += 1 

+

85 while j < len(missing) and missing[j] < lineno: 

+

86 j += 1 

+

87 if i < len(statements) and statements[i] == lineno: 

+

88 covered = j >= len(missing) or missing[j] > lineno 

+

89 if self.blank_re.match(line): 

+

90 dest.write(u' ') 

+

91 elif self.else_re.match(line): 

+

92 # Special logic for lines containing only 'else:'. 

+

93 if i >= len(statements) and j >= len(missing): 93 ↛ 94line 93 didn't jump to line 94, because the condition on line 93 was never true

+

94 dest.write(u'! ') 

+

95 elif i >= len(statements) or j >= len(missing): 

+

96 dest.write(u'> ') 

+

97 elif statements[i] == missing[j]: 

+

98 dest.write(u'! ') 

+

99 else: 

+

100 dest.write(u'> ') 

+

101 elif lineno in excluded: 

+

102 dest.write(u'- ') 

+

103 elif covered: 

+

104 dest.write(u'> ') 

+

105 else: 

+

106 dest.write(u'! ') 

+

107 

+

108 dest.write(line) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_backward_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_backward_py.html new file mode 100644 index 000000000..cec5bfddb --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_backward_py.html @@ -0,0 +1,327 @@ + + + + + + Coverage for coverage/backward.py: 99.338% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Add things to old Pythons so I can pretend they are newer.""" 

+

5 

+

6# This file's purpose is to provide modules to be imported from here. 

+

7# pylint: disable=unused-import 

+

8 

+

9import os 

+

10import sys 

+

11 

+

12from datetime import datetime 

+

13 

+

14from coverage import env 

+

15 

+

16 

+

17# Pythons 2 and 3 differ on where to get StringIO. 

+

18try: 

+

19 from cStringIO import StringIO 

+

20except ImportError: 

+

21 from io import StringIO 

+

22 

+

23# In py3, ConfigParser was renamed to the more-standard configparser. 

+

24# But there's a py3 backport that installs "configparser" in py2, and I don't 

+

25# want it because it has annoying deprecation warnings. So try the real py2 

+

26# import first. 

+

27try: 

+

28 import ConfigParser as configparser 

+

29except ImportError: 

+

30 import configparser 

+

31 

+

32# What's a string called? 

+

33try: 

+

34 string_class = basestring 

+

35except NameError: 

+

36 string_class = str 

+

37 

+

38# What's a Unicode string called? 

+

39try: 

+

40 unicode_class = unicode 

+

41except NameError: 

+

42 unicode_class = str 

+

43 

+

44# range or xrange? 

+

45try: 

+

46 range = xrange # pylint: disable=redefined-builtin 

+

47except NameError: 

+

48 range = range 

+

49 

+

50try: 

+

51 from itertools import zip_longest 

+

52except ImportError: 

+

53 from itertools import izip_longest as zip_longest 

+

54 

+

55# Where do we get the thread id from? 

+

56try: 

+

57 from thread import get_ident as get_thread_id 

+

58except ImportError: 

+

59 from threading import get_ident as get_thread_id 

+

60 

+

61try: 

+

62 os.PathLike 

+

63except AttributeError: 

+

64 # This is Python 2 and 3 

+

65 path_types = (bytes, string_class, unicode_class) 

+

66else: 

+

67 # 3.6+ 

+

68 path_types = (bytes, str, os.PathLike) 

+

69 

+

70# shlex.quote is new, but there's an undocumented implementation in "pipes", 

+

71# who knew!? 

+

72try: 

+

73 from shlex import quote as shlex_quote 

+

74except ImportError: 

+

75 # Useful function, available under a different (undocumented) name 

+

76 # in Python versions earlier than 3.3. 

+

77 from pipes import quote as shlex_quote 

+

78 

+

79try: 

+

80 import reprlib 

+

81except ImportError: # pragma: not covered 

+

82 # We need this on Python 2, but in testing environments, a backport is 

+

83 # installed, so this import isn't used. 

+

84 import repr as reprlib 

+

85 

+

86# A function to iterate listlessly over a dict's items, and one to get the 

+

87# items as a list. 

+

88try: 

+

89 {}.iteritems 

+

90except AttributeError: 

+

91 # Python 3 

+

92 def iitems(d): 

+

93 """Produce the items from dict `d`.""" 

+

94 return d.items() 

+

95 

+

96 def litems(d): 

+

97 """Return a list of items from dict `d`.""" 

+

98 return list(d.items()) 

+

99else: 

+

100 # Python 2 

+

101 def iitems(d): 

+

102 """Produce the items from dict `d`.""" 

+

103 return d.iteritems() 

+

104 

+

105 def litems(d): 

+

106 """Return a list of items from dict `d`.""" 

+

107 return d.items() 

+

108 

+

109# Getting the `next` function from an iterator is different in 2 and 3. 

+

110try: 

+

111 iter([]).next 

+

112except AttributeError: 

+

113 def iternext(seq): 

+

114 """Get the `next` function for iterating over `seq`.""" 

+

115 return iter(seq).__next__ 

+

116else: 

+

117 def iternext(seq): 

+

118 """Get the `next` function for iterating over `seq`.""" 

+

119 return iter(seq).next 

+

120 

+

121# Python 3.x is picky about bytes and strings, so provide methods to 

+

122# get them right, and make them no-ops in 2.x 

+

123if env.PY3: 

+

124 def to_bytes(s): 

+

125 """Convert string `s` to bytes.""" 

+

126 return s.encode('utf8') 

+

127 

+

128 def to_string(b): 

+

129 """Convert bytes `b` to string.""" 

+

130 return b.decode('utf8') 

+

131 

+

132 def binary_bytes(byte_values): 

+

133 """Produce a byte string with the ints from `byte_values`.""" 

+

134 return bytes(byte_values) 

+

135 

+

136 def byte_to_int(byte): 

+

137 """Turn a byte indexed from a bytes object into an int.""" 

+

138 return byte 

+

139 

+

140 def bytes_to_ints(bytes_value): 

+

141 """Turn a bytes object into a sequence of ints.""" 

+

142 # In Python 3, iterating bytes gives ints. 

+

143 return bytes_value 

+

144 

+

145else: 

+

146 def to_bytes(s): 

+

147 """Convert string `s` to bytes (no-op in 2.x).""" 

+

148 return s 

+

149 

+

150 def to_string(b): 

+

151 """Convert bytes `b` to string.""" 

+

152 return b 

+

153 

+

154 def binary_bytes(byte_values): 

+

155 """Produce a byte string with the ints from `byte_values`.""" 

+

156 return "".join(chr(b) for b in byte_values) 

+

157 

+

158 def byte_to_int(byte): 

+

159 """Turn a byte indexed from a bytes object into an int.""" 

+

160 return ord(byte) 

+

161 

+

162 def bytes_to_ints(bytes_value): 

+

163 """Turn a bytes object into a sequence of ints.""" 

+

164 for byte in bytes_value: 

+

165 yield ord(byte) 

+

166 

+

167 

+

168try: 

+

169 # In Python 2.x, the builtins were in __builtin__ 

+

170 BUILTINS = sys.modules['__builtin__'] 

+

171except KeyError: 

+

172 # In Python 3.x, they're in builtins 

+

173 BUILTINS = sys.modules['builtins'] 

+

174 

+

175 

+

176# imp was deprecated in Python 3.3 

+

177try: 

+

178 import importlib 

+

179 import importlib.util 

+

180 imp = None 

+

181except ImportError: 

+

182 importlib = None 

+

183 

+

184# We only want to use importlib if it has everything we need. 

+

185try: 

+

186 importlib_util_find_spec = importlib.util.find_spec 

+

187except Exception: 

+

188 import imp 

+

189 importlib_util_find_spec = None 

+

190 

+

191# What is the .pyc magic number for this version of Python? 

+

192try: 

+

193 PYC_MAGIC_NUMBER = importlib.util.MAGIC_NUMBER 

+

194except AttributeError: 

+

195 PYC_MAGIC_NUMBER = imp.get_magic() 

+

196 

+

197 

+

198def code_object(fn): 

+

199 """Get the code object from a function.""" 

+

200 try: 

+

201 return fn.func_code 

+

202 except AttributeError: 

+

203 return fn.__code__ 

+

204 

+

205 

+

206try: 

+

207 from types import SimpleNamespace 

+

208except ImportError: 

+

209 # The code from https://docs.python.org/3/library/types.html#types.SimpleNamespace 

+

210 class SimpleNamespace: 

+

211 """Python implementation of SimpleNamespace, for Python 2.""" 

+

212 def __init__(self, **kwargs): 

+

213 self.__dict__.update(kwargs) 

+

214 

+

215 def __repr__(self): 

+

216 keys = sorted(self.__dict__) 

+

217 items = ("{}={!r}".format(k, self.__dict__[k]) for k in keys) 

+

218 return "{}({})".format(type(self).__name__, ", ".join(items)) 

+

219 

+

220 

+

221def format_local_datetime(dt): 

+

222 """Return a string with local timezone representing the date. 

+

223 If python version is lower than 3.6, the time zone is not included. 

+

224 """ 

+

225 try: 

+

226 return dt.astimezone().strftime('%Y-%m-%d %H:%M %z') 

+

227 except (TypeError, ValueError): 

+

228 # Datetime.astimezone in Python 3.5 can not handle naive datetime 

+

229 return dt.strftime('%Y-%m-%d %H:%M') 

+

230 

+

231 

+

232def import_local_file(modname, modfile=None): 

+

233 """Import a local file as a module. 

+

234 

+

235 Opens a file in the current directory named `modname`.py, imports it 

+

236 as `modname`, and returns the module object. `modfile` is the file to 

+

237 import if it isn't in the current directory. 

+

238 

+

239 """ 

+

240 try: 

+

241 import importlib.util as importlib_util 

+

242 except ImportError: 

+

243 importlib_util = None 

+

244 

+

245 if modfile is None: 245 ↛ 247line 245 didn't jump to line 247, because the condition on line 245 was never false

+

246 modfile = modname + '.py' 

+

247 if importlib_util: 

+

248 spec = importlib_util.spec_from_file_location(modname, modfile) 

+

249 mod = importlib_util.module_from_spec(spec) 

+

250 sys.modules[modname] = mod 

+

251 spec.loader.exec_module(mod) 

+

252 else: 

+

253 for suff in imp.get_suffixes(): # pragma: part covered 

+

254 if suff[0] == '.py': 

+

255 break 

+

256 

+

257 with open(modfile, 'r') as f: 

+

258 # pylint: disable=undefined-loop-variable 

+

259 mod = imp.load_module(modname, f, modfile, suff) 

+

260 

+

261 return mod 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_bytecode_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_bytecode_py.html new file mode 100644 index 000000000..489f7cc29 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_bytecode_py.html @@ -0,0 +1,85 @@ + + + + + + Coverage for coverage/bytecode.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Bytecode manipulation for coverage.py""" 

+

5 

+

6import types 

+

7 

+

8 

+

9def code_objects(code): 

+

10 """Iterate over all the code objects in `code`.""" 

+

11 stack = [code] 

+

12 while stack: 

+

13 # We're going to return the code object on the stack, but first 

+

14 # push its children for later returning. 

+

15 code = stack.pop() 

+

16 for c in code.co_consts: 

+

17 if isinstance(c, types.CodeType): 

+

18 stack.append(c) 

+

19 yield code 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_cmdline_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_cmdline_py.html new file mode 100644 index 000000000..99b74fd6d --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_cmdline_py.html @@ -0,0 +1,976 @@ + + + + + + Coverage for coverage/cmdline.py: 98.423% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Command-line support for coverage.py.""" 

+

5 

+

6from __future__ import print_function 

+

7 

+

8import glob 

+

9import optparse 

+

10import os.path 

+

11import shlex 

+

12import sys 

+

13import textwrap 

+

14import traceback 

+

15 

+

16import coverage 

+

17from coverage import Coverage 

+

18from coverage import env 

+

19from coverage.collector import CTracer 

+

20from coverage.data import line_counts 

+

21from coverage.debug import info_formatter, info_header, short_stack 

+

22from coverage.execfile import PyRunner 

+

23from coverage.misc import BaseCoverageException, ExceptionDuringRun, NoSource, output_encoding 

+

24from coverage.results import should_fail_under 

+

25 

+

26 

+

27class Opts(object): 

+

28 """A namespace class for individual options we'll build parsers from.""" 

+

29 

+

30 append = optparse.make_option( 

+

31 '-a', '--append', action='store_true', 

+

32 help="Append coverage data to .coverage, otherwise it starts clean each time.", 

+

33 ) 

+

34 keep = optparse.make_option( 

+

35 '', '--keep', action='store_true', 

+

36 help="Keep original coverage files, otherwise they are deleted.", 

+

37 ) 

+

38 branch = optparse.make_option( 

+

39 '', '--branch', action='store_true', 

+

40 help="Measure branch coverage in addition to statement coverage.", 

+

41 ) 

+

42 CONCURRENCY_CHOICES = [ 

+

43 "thread", "gevent", "greenlet", "eventlet", "multiprocessing", 

+

44 ] 

+

45 concurrency = optparse.make_option( 

+

46 '', '--concurrency', action='store', metavar="LIB", 

+

47 choices=CONCURRENCY_CHOICES, 

+

48 help=( 

+

49 "Properly measure code using a concurrency library. " 

+

50 "Valid values are: %s." 

+

51 ) % ", ".join(CONCURRENCY_CHOICES), 

+

52 ) 

+

53 context = optparse.make_option( 

+

54 '', '--context', action='store', metavar="LABEL", 

+

55 help="The context label to record for this coverage run.", 

+

56 ) 

+

57 debug = optparse.make_option( 

+

58 '', '--debug', action='store', metavar="OPTS", 

+

59 help="Debug options, separated by commas. [env: COVERAGE_DEBUG]", 

+

60 ) 

+

61 directory = optparse.make_option( 

+

62 '-d', '--directory', action='store', metavar="DIR", 

+

63 help="Write the output files to DIR.", 

+

64 ) 

+

65 fail_under = optparse.make_option( 

+

66 '', '--fail-under', action='store', metavar="MIN", type="float", 

+

67 help="Exit with a status of 2 if the total coverage is less than MIN.", 

+

68 ) 

+

69 help = optparse.make_option( 

+

70 '-h', '--help', action='store_true', 

+

71 help="Get help on this command.", 

+

72 ) 

+

73 ignore_errors = optparse.make_option( 

+

74 '-i', '--ignore-errors', action='store_true', 

+

75 help="Ignore errors while reading source files.", 

+

76 ) 

+

77 include = optparse.make_option( 

+

78 '', '--include', action='store', 

+

79 metavar="PAT1,PAT2,...", 

+

80 help=( 

+

81 "Include only files whose paths match one of these patterns. " 

+

82 "Accepts shell-style wildcards, which must be quoted." 

+

83 ), 

+

84 ) 

+

85 pylib = optparse.make_option( 

+

86 '-L', '--pylib', action='store_true', 

+

87 help=( 

+

88 "Measure coverage even inside the Python installed library, " 

+

89 "which isn't done by default." 

+

90 ), 

+

91 ) 

+

92 sort = optparse.make_option( 

+

93 '--sort', action='store', metavar='COLUMN', 

+

94 help="Sort the report by the named column: name, stmts, miss, branch, brpart, or cover. " 

+

95 "Default is name." 

+

96 ) 

+

97 show_missing = optparse.make_option( 

+

98 '-m', '--show-missing', action='store_true', 

+

99 help="Show line numbers of statements in each module that weren't executed.", 

+

100 ) 

+

101 skip_covered = optparse.make_option( 

+

102 '--skip-covered', action='store_true', 

+

103 help="Skip files with 100% coverage.", 

+

104 ) 

+

105 no_skip_covered = optparse.make_option( 

+

106 '--no-skip-covered', action='store_false', dest='skip_covered', 

+

107 help="Disable --skip-covered.", 

+

108 ) 

+

109 skip_empty = optparse.make_option( 

+

110 '--skip-empty', action='store_true', 

+

111 help="Skip files with no code.", 

+

112 ) 

+

113 show_contexts = optparse.make_option( 

+

114 '--show-contexts', action='store_true', 

+

115 help="Show contexts for covered lines.", 

+

116 ) 

+

117 omit = optparse.make_option( 

+

118 '', '--omit', action='store', 

+

119 metavar="PAT1,PAT2,...", 

+

120 help=( 

+

121 "Omit files whose paths match one of these patterns. " 

+

122 "Accepts shell-style wildcards, which must be quoted." 

+

123 ), 

+

124 ) 

+

125 contexts = optparse.make_option( 

+

126 '', '--contexts', action='store', 

+

127 metavar="REGEX1,REGEX2,...", 

+

128 help=( 

+

129 "Only display data from lines covered in the given contexts. " 

+

130 "Accepts Python regexes, which must be quoted." 

+

131 ), 

+

132 ) 

+

133 output_xml = optparse.make_option( 

+

134 '-o', '', action='store', dest="outfile", 

+

135 metavar="OUTFILE", 

+

136 help="Write the XML report to this file. Defaults to 'coverage.xml'", 

+

137 ) 

+

138 output_json = optparse.make_option( 

+

139 '-o', '', action='store', dest="outfile", 

+

140 metavar="OUTFILE", 

+

141 help="Write the JSON report to this file. Defaults to 'coverage.json'", 

+

142 ) 

+

143 json_pretty_print = optparse.make_option( 

+

144 '', '--pretty-print', action='store_true', 

+

145 help="Format the JSON for human readers.", 

+

146 ) 

+

147 parallel_mode = optparse.make_option( 

+

148 '-p', '--parallel-mode', action='store_true', 

+

149 help=( 

+

150 "Append the machine name, process id and random number to the " 

+

151 ".coverage data file name to simplify collecting data from " 

+

152 "many processes." 

+

153 ), 

+

154 ) 

+

155 module = optparse.make_option( 

+

156 '-m', '--module', action='store_true', 

+

157 help=( 

+

158 "<pyfile> is an importable Python module, not a script path, " 

+

159 "to be run as 'python -m' would run it." 

+

160 ), 

+

161 ) 

+

162 precision = optparse.make_option( 

+

163 '', '--precision', action='store', metavar='N', type=int, 

+

164 help=( 

+

165 "Number of digits after the decimal point to display for " 

+

166 "reported coverage percentages." 

+

167 ), 

+

168 ) 

+

169 rcfile = optparse.make_option( 

+

170 '', '--rcfile', action='store', 

+

171 help=( 

+

172 "Specify configuration file. " 

+

173 "By default '.coveragerc', 'setup.cfg', 'tox.ini', and " 

+

174 "'pyproject.toml' are tried. [env: COVERAGE_RCFILE]" 

+

175 ), 

+

176 ) 

+

177 source = optparse.make_option( 

+

178 '', '--source', action='store', metavar="SRC1,SRC2,...", 

+

179 help="A list of directories or importable names of code to measure.", 

+

180 ) 

+

181 timid = optparse.make_option( 

+

182 '', '--timid', action='store_true', 

+

183 help=( 

+

184 "Use a simpler but slower trace method. Try this if you get " 

+

185 "seemingly impossible results!" 

+

186 ), 

+

187 ) 

+

188 title = optparse.make_option( 

+

189 '', '--title', action='store', metavar="TITLE", 

+

190 help="A text string to use as the title on the HTML.", 

+

191 ) 

+

192 version = optparse.make_option( 

+

193 '', '--version', action='store_true', 

+

194 help="Display version information and exit.", 

+

195 ) 

+

196 

+

197 

+

198class CoverageOptionParser(optparse.OptionParser, object): 

+

199 """Base OptionParser for coverage.py. 

+

200 

+

201 Problems don't exit the program. 

+

202 Defaults are initialized for all options. 

+

203 

+

204 """ 

+

205 

+

206 def __init__(self, *args, **kwargs): 

+

207 super(CoverageOptionParser, self).__init__( 

+

208 add_help_option=False, *args, **kwargs 

+

209 ) 

+

210 self.set_defaults( 

+

211 action=None, 

+

212 append=None, 

+

213 branch=None, 

+

214 concurrency=None, 

+

215 context=None, 

+

216 debug=None, 

+

217 directory=None, 

+

218 fail_under=None, 

+

219 help=None, 

+

220 ignore_errors=None, 

+

221 include=None, 

+

222 keep=None, 

+

223 module=None, 

+

224 omit=None, 

+

225 contexts=None, 

+

226 parallel_mode=None, 

+

227 precision=None, 

+

228 pylib=None, 

+

229 rcfile=True, 

+

230 show_missing=None, 

+

231 skip_covered=None, 

+

232 skip_empty=None, 

+

233 show_contexts=None, 

+

234 sort=None, 

+

235 source=None, 

+

236 timid=None, 

+

237 title=None, 

+

238 version=None, 

+

239 ) 

+

240 

+

241 self.disable_interspersed_args() 

+

242 

+

243 class OptionParserError(Exception): 

+

244 """Used to stop the optparse error handler ending the process.""" 

+

245 pass 

+

246 

+

247 def parse_args_ok(self, args=None, options=None): 

+

248 """Call optparse.parse_args, but return a triple: 

+

249 

+

250 (ok, options, args) 

+

251 

+

252 """ 

+

253 try: 

+

254 options, args = super(CoverageOptionParser, self).parse_args(args, options) 

+

255 except self.OptionParserError: 

+

256 return False, None, None 

+

257 return True, options, args 

+

258 

+

259 def error(self, msg): 

+

260 """Override optparse.error so sys.exit doesn't get called.""" 

+

261 show_help(msg) 

+

262 raise self.OptionParserError 

+

263 

+

264 

+

265class GlobalOptionParser(CoverageOptionParser): 

+

266 """Command-line parser for coverage.py global option arguments.""" 

+

267 

+

268 def __init__(self): 

+

269 super(GlobalOptionParser, self).__init__() 

+

270 

+

271 self.add_options([ 

+

272 Opts.help, 

+

273 Opts.version, 

+

274 ]) 

+

275 

+

276 

+

277class CmdOptionParser(CoverageOptionParser): 

+

278 """Parse one of the new-style commands for coverage.py.""" 

+

279 

+

280 def __init__(self, action, options, defaults=None, usage=None, description=None): 

+

281 """Create an OptionParser for a coverage.py command. 

+

282 

+

283 `action` is the slug to put into `options.action`. 

+

284 `options` is a list of Option's for the command. 

+

285 `defaults` is a dict of default value for options. 

+

286 `usage` is the usage string to display in help. 

+

287 `description` is the description of the command, for the help text. 

+

288 

+

289 """ 

+

290 if usage: 

+

291 usage = "%prog " + usage 

+

292 super(CmdOptionParser, self).__init__( 

+

293 usage=usage, 

+

294 description=description, 

+

295 ) 

+

296 self.set_defaults(action=action, **(defaults or {})) 

+

297 self.add_options(options) 

+

298 self.cmd = action 

+

299 

+

300 def __eq__(self, other): 

+

301 # A convenience equality, so that I can put strings in unit test 

+

302 # results, and they will compare equal to objects. 

+

303 return (other == "<CmdOptionParser:%s>" % self.cmd) 

+

304 

+

305 __hash__ = None # This object doesn't need to be hashed. 

+

306 

+

307 def get_prog_name(self): 

+

308 """Override of an undocumented function in optparse.OptionParser.""" 

+

309 program_name = super(CmdOptionParser, self).get_prog_name() 

+

310 

+

311 # Include the sub-command for this parser as part of the command. 

+

312 return "{command} {subcommand}".format(command=program_name, subcommand=self.cmd) 

+

313 

+

314 

+

315GLOBAL_ARGS = [ 

+

316 Opts.debug, 

+

317 Opts.help, 

+

318 Opts.rcfile, 

+

319 ] 

+

320 

+

321CMDS = { 

+

322 'annotate': CmdOptionParser( 

+

323 "annotate", 

+

324 [ 

+

325 Opts.directory, 

+

326 Opts.ignore_errors, 

+

327 Opts.include, 

+

328 Opts.omit, 

+

329 ] + GLOBAL_ARGS, 

+

330 usage="[options] [modules]", 

+

331 description=( 

+

332 "Make annotated copies of the given files, marking statements that are executed " 

+

333 "with > and statements that are missed with !." 

+

334 ), 

+

335 ), 

+

336 

+

337 'combine': CmdOptionParser( 

+

338 "combine", 

+

339 [ 

+

340 Opts.append, 

+

341 Opts.keep, 

+

342 ] + GLOBAL_ARGS, 

+

343 usage="[options] <path1> <path2> ... <pathN>", 

+

344 description=( 

+

345 "Combine data from multiple coverage files collected " 

+

346 "with 'run -p'. The combined results are written to a single " 

+

347 "file representing the union of the data. The positional " 

+

348 "arguments are data files or directories containing data files. " 

+

349 "If no paths are provided, data files in the default data file's " 

+

350 "directory are combined." 

+

351 ), 

+

352 ), 

+

353 

+

354 'debug': CmdOptionParser( 

+

355 "debug", GLOBAL_ARGS, 

+

356 usage="<topic>", 

+

357 description=( 

+

358 "Display information about the internals of coverage.py, " 

+

359 "for diagnosing problems. " 

+

360 "Topics are: " 

+

361 "'data' to show a summary of the collected data; " 

+

362 "'sys' to show installation information; " 

+

363 "'config' to show the configuration; " 

+

364 "'premain' to show what is calling coverage." 

+

365 ), 

+

366 ), 

+

367 

+

368 'erase': CmdOptionParser( 

+

369 "erase", GLOBAL_ARGS, 

+

370 description="Erase previously collected coverage data.", 

+

371 ), 

+

372 

+

373 'help': CmdOptionParser( 

+

374 "help", GLOBAL_ARGS, 

+

375 usage="[command]", 

+

376 description="Describe how to use coverage.py", 

+

377 ), 

+

378 

+

379 'html': CmdOptionParser( 

+

380 "html", 

+

381 [ 

+

382 Opts.contexts, 

+

383 Opts.directory, 

+

384 Opts.fail_under, 

+

385 Opts.ignore_errors, 

+

386 Opts.include, 

+

387 Opts.omit, 

+

388 Opts.precision, 

+

389 Opts.show_contexts, 

+

390 Opts.skip_covered, 

+

391 Opts.no_skip_covered, 

+

392 Opts.skip_empty, 

+

393 Opts.title, 

+

394 ] + GLOBAL_ARGS, 

+

395 usage="[options] [modules]", 

+

396 description=( 

+

397 "Create an HTML report of the coverage of the files. " 

+

398 "Each file gets its own page, with the source decorated to show " 

+

399 "executed, excluded, and missed lines." 

+

400 ), 

+

401 ), 

+

402 

+

403 'json': CmdOptionParser( 

+

404 "json", 

+

405 [ 

+

406 Opts.contexts, 

+

407 Opts.fail_under, 

+

408 Opts.ignore_errors, 

+

409 Opts.include, 

+

410 Opts.omit, 

+

411 Opts.output_json, 

+

412 Opts.json_pretty_print, 

+

413 Opts.show_contexts, 

+

414 ] + GLOBAL_ARGS, 

+

415 usage="[options] [modules]", 

+

416 description="Generate a JSON report of coverage results." 

+

417 ), 

+

418 

+

419 'report': CmdOptionParser( 

+

420 "report", 

+

421 [ 

+

422 Opts.contexts, 

+

423 Opts.fail_under, 

+

424 Opts.ignore_errors, 

+

425 Opts.include, 

+

426 Opts.omit, 

+

427 Opts.precision, 

+

428 Opts.sort, 

+

429 Opts.show_missing, 

+

430 Opts.skip_covered, 

+

431 Opts.no_skip_covered, 

+

432 Opts.skip_empty, 

+

433 ] + GLOBAL_ARGS, 

+

434 usage="[options] [modules]", 

+

435 description="Report coverage statistics on modules." 

+

436 ), 

+

437 

+

438 'run': CmdOptionParser( 

+

439 "run", 

+

440 [ 

+

441 Opts.append, 

+

442 Opts.branch, 

+

443 Opts.concurrency, 

+

444 Opts.context, 

+

445 Opts.include, 

+

446 Opts.module, 

+

447 Opts.omit, 

+

448 Opts.pylib, 

+

449 Opts.parallel_mode, 

+

450 Opts.source, 

+

451 Opts.timid, 

+

452 ] + GLOBAL_ARGS, 

+

453 usage="[options] <pyfile> [program options]", 

+

454 description="Run a Python program, measuring code execution." 

+

455 ), 

+

456 

+

457 'xml': CmdOptionParser( 

+

458 "xml", 

+

459 [ 

+

460 Opts.fail_under, 

+

461 Opts.ignore_errors, 

+

462 Opts.include, 

+

463 Opts.omit, 

+

464 Opts.output_xml, 

+

465 Opts.skip_empty, 

+

466 ] + GLOBAL_ARGS, 

+

467 usage="[options] [modules]", 

+

468 description="Generate an XML report of coverage results." 

+

469 ), 

+

470} 

+

471 

+

472 

+

473def show_help(error=None, topic=None, parser=None): 

+

474 """Display an error message, or the named topic.""" 

+

475 assert error or topic or parser 

+

476 

+

477 program_path = sys.argv[0] 

+

478 if program_path.endswith(os.path.sep + '__main__.py'): 

+

479 # The path is the main module of a package; get that path instead. 

+

480 program_path = os.path.dirname(program_path) 

+

481 program_name = os.path.basename(program_path) 

+

482 if env.WINDOWS: 

+

483 # entry_points={'console_scripts':...} on Windows makes files 

+

484 # called coverage.exe, coverage3.exe, and coverage-3.5.exe. These 

+

485 # invoke coverage-script.py, coverage3-script.py, and 

+

486 # coverage-3.5-script.py. argv[0] is the .py file, but we want to 

+

487 # get back to the original form. 

+

488 auto_suffix = "-script.py" 

+

489 if program_name.endswith(auto_suffix): 489 ↛ 490line 489 didn't jump to line 490, because the condition on line 489 was never true

+

490 program_name = program_name[:-len(auto_suffix)] 

+

491 

+

492 help_params = dict(coverage.__dict__) 

+

493 help_params['program_name'] = program_name 

+

494 if CTracer is not None: 

+

495 help_params['extension_modifier'] = 'with C extension' 

+

496 else: 

+

497 help_params['extension_modifier'] = 'without C extension' 

+

498 

+

499 if error: 

+

500 print(error, file=sys.stderr) 

+

501 print("Use '%s help' for help." % (program_name,), file=sys.stderr) 

+

502 elif parser: 

+

503 print(parser.format_help().strip()) 

+

504 print() 

+

505 else: 

+

506 help_msg = textwrap.dedent(HELP_TOPICS.get(topic, '')).strip() 

+

507 if help_msg: 

+

508 print(help_msg.format(**help_params)) 

+

509 else: 

+

510 print("Don't know topic %r" % topic) 

+

511 print("Full documentation is at {__url__}".format(**help_params)) 

+

512 

+

513 

+

514OK, ERR, FAIL_UNDER = 0, 1, 2 

+

515 

+

516 

+

517class CoverageScript(object): 

+

518 """The command-line interface to coverage.py.""" 

+

519 

+

520 def __init__(self): 

+

521 self.global_option = False 

+

522 self.coverage = None 

+

523 

+

524 def command_line(self, argv): 

+

525 """The bulk of the command line interface to coverage.py. 

+

526 

+

527 `argv` is the argument list to process. 

+

528 

+

529 Returns 0 if all is well, 1 if something went wrong. 

+

530 

+

531 """ 

+

532 # Collect the command-line options. 

+

533 if not argv: 

+

534 show_help(topic='minimum_help') 

+

535 return OK 

+

536 

+

537 # The command syntax we parse depends on the first argument. Global 

+

538 # switch syntax always starts with an option. 

+

539 self.global_option = argv[0].startswith('-') 

+

540 if self.global_option: 

+

541 parser = GlobalOptionParser() 

+

542 else: 

+

543 parser = CMDS.get(argv[0]) 

+

544 if not parser: 

+

545 show_help("Unknown command: '%s'" % argv[0]) 

+

546 return ERR 

+

547 argv = argv[1:] 

+

548 

+

549 ok, options, args = parser.parse_args_ok(argv) 

+

550 if not ok: 

+

551 return ERR 

+

552 

+

553 # Handle help and version. 

+

554 if self.do_help(options, args, parser): 

+

555 return OK 

+

556 

+

557 # Listify the list options. 

+

558 source = unshell_list(options.source) 

+

559 omit = unshell_list(options.omit) 

+

560 include = unshell_list(options.include) 

+

561 debug = unshell_list(options.debug) 

+

562 contexts = unshell_list(options.contexts) 

+

563 

+

564 # Do something. 

+

565 self.coverage = Coverage( 

+

566 data_suffix=options.parallel_mode, 

+

567 cover_pylib=options.pylib, 

+

568 timid=options.timid, 

+

569 branch=options.branch, 

+

570 config_file=options.rcfile, 

+

571 source=source, 

+

572 omit=omit, 

+

573 include=include, 

+

574 debug=debug, 

+

575 concurrency=options.concurrency, 

+

576 check_preimported=True, 

+

577 context=options.context, 

+

578 ) 

+

579 

+

580 if options.action == "debug": 

+

581 return self.do_debug(args) 

+

582 

+

583 elif options.action == "erase": 

+

584 self.coverage.erase() 

+

585 return OK 

+

586 

+

587 elif options.action == "run": 

+

588 return self.do_run(options, args) 

+

589 

+

590 elif options.action == "combine": 

+

591 if options.append: 

+

592 self.coverage.load() 

+

593 data_dirs = args or None 

+

594 self.coverage.combine(data_dirs, strict=True, keep=bool(options.keep)) 

+

595 self.coverage.save() 

+

596 return OK 

+

597 

+

598 # Remaining actions are reporting, with some common options. 

+

599 report_args = dict( 

+

600 morfs=unglob_args(args), 

+

601 ignore_errors=options.ignore_errors, 

+

602 omit=omit, 

+

603 include=include, 

+

604 contexts=contexts, 

+

605 ) 

+

606 

+

607 # We need to be able to import from the current directory, because 

+

608 # plugins may try to, for example, to read Django settings. 

+

609 sys.path.insert(0, '') 

+

610 

+

611 self.coverage.load() 

+

612 

+

613 total = None 

+

614 if options.action == "report": 

+

615 total = self.coverage.report( 

+

616 show_missing=options.show_missing, 

+

617 skip_covered=options.skip_covered, 

+

618 skip_empty=options.skip_empty, 

+

619 precision=options.precision, 

+

620 sort=options.sort, 

+

621 **report_args 

+

622 ) 

+

623 elif options.action == "annotate": 

+

624 self.coverage.annotate(directory=options.directory, **report_args) 

+

625 elif options.action == "html": 

+

626 total = self.coverage.html_report( 

+

627 directory=options.directory, 

+

628 title=options.title, 

+

629 skip_covered=options.skip_covered, 

+

630 skip_empty=options.skip_empty, 

+

631 show_contexts=options.show_contexts, 

+

632 precision=options.precision, 

+

633 **report_args 

+

634 ) 

+

635 elif options.action == "xml": 

+

636 outfile = options.outfile 

+

637 total = self.coverage.xml_report( 

+

638 outfile=outfile, skip_empty=options.skip_empty, 

+

639 **report_args 

+

640 ) 

+

641 elif options.action == "json": 641 ↛ 650line 641 didn't jump to line 650, because the condition on line 641 was never false

+

642 outfile = options.outfile 

+

643 total = self.coverage.json_report( 

+

644 outfile=outfile, 

+

645 pretty_print=options.pretty_print, 

+

646 show_contexts=options.show_contexts, 

+

647 **report_args 

+

648 ) 

+

649 

+

650 if total is not None: 

+

651 # Apply the command line fail-under options, and then use the config 

+

652 # value, so we can get fail_under from the config file. 

+

653 if options.fail_under is not None: 

+

654 self.coverage.set_option("report:fail_under", options.fail_under) 

+

655 

+

656 fail_under = self.coverage.get_option("report:fail_under") 

+

657 precision = self.coverage.get_option("report:precision") 

+

658 if should_fail_under(total, fail_under, precision): 

+

659 msg = "total of {total:.{p}f} is less than fail-under={fail_under:.{p}f}".format( 

+

660 total=total, fail_under=fail_under, p=precision, 

+

661 ) 

+

662 print("Coverage failure:", msg) 

+

663 return FAIL_UNDER 

+

664 

+

665 return OK 

+

666 

+

667 def do_help(self, options, args, parser): 

+

668 """Deal with help requests. 

+

669 

+

670 Return True if it handled the request, False if not. 

+

671 

+

672 """ 

+

673 # Handle help. 

+

674 if options.help: 

+

675 if self.global_option: 

+

676 show_help(topic='help') 

+

677 else: 

+

678 show_help(parser=parser) 

+

679 return True 

+

680 

+

681 if options.action == "help": 

+

682 if args: 

+

683 for a in args: 

+

684 parser = CMDS.get(a) 

+

685 if parser: 

+

686 show_help(parser=parser) 

+

687 else: 

+

688 show_help(topic=a) 

+

689 else: 

+

690 show_help(topic='help') 

+

691 return True 

+

692 

+

693 # Handle version. 

+

694 if options.version: 

+

695 show_help(topic='version') 

+

696 return True 

+

697 

+

698 return False 

+

699 

+

700 def do_run(self, options, args): 

+

701 """Implementation of 'coverage run'.""" 

+

702 

+

703 if not args: 

+

704 if options.module: 

+

705 # Specified -m with nothing else. 

+

706 show_help("No module specified for -m") 

+

707 return ERR 

+

708 command_line = self.coverage.get_option("run:command_line") 

+

709 if command_line is not None: 

+

710 args = shlex.split(command_line) 

+

711 if args and args[0] == "-m": 

+

712 options.module = True 

+

713 args = args[1:] 

+

714 if not args: 

+

715 show_help("Nothing to do.") 

+

716 return ERR 

+

717 

+

718 if options.append and self.coverage.get_option("run:parallel"): 

+

719 show_help("Can't append to data files in parallel mode.") 

+

720 return ERR 

+

721 

+

722 if options.concurrency == "multiprocessing": 

+

723 # Can't set other run-affecting command line options with 

+

724 # multiprocessing. 

+

725 for opt_name in ['branch', 'include', 'omit', 'pylib', 'source', 'timid']: 

+

726 # As it happens, all of these options have no default, meaning 

+

727 # they will be None if they have not been specified. 

+

728 if getattr(options, opt_name) is not None: 

+

729 show_help( 

+

730 "Options affecting multiprocessing must only be specified " 

+

731 "in a configuration file.\n" 

+

732 "Remove --{} from the command line.".format(opt_name) 

+

733 ) 

+

734 return ERR 

+

735 

+

736 runner = PyRunner(args, as_module=bool(options.module)) 

+

737 runner.prepare() 

+

738 

+

739 if options.append: 

+

740 self.coverage.load() 

+

741 

+

742 # Run the script. 

+

743 self.coverage.start() 

+

744 code_ran = True 

+

745 try: 

+

746 runner.run() 

+

747 except NoSource: 

+

748 code_ran = False 

+

749 raise 

+

750 finally: 

+

751 self.coverage.stop() 

+

752 if code_ran: 752 ↛ 755line 752 didn't jump to line 755, because the condition on line 752 was never false

+

753 self.coverage.save() 

+

754 

+

755 return OK 

+

756 

+

757 def do_debug(self, args): 

+

758 """Implementation of 'coverage debug'.""" 

+

759 

+

760 if not args: 

+

761 show_help("What information would you like: config, data, sys, premain?") 

+

762 return ERR 

+

763 

+

764 for info in args: 

+

765 if info == 'sys': 

+

766 sys_info = self.coverage.sys_info() 

+

767 print(info_header("sys")) 

+

768 for line in info_formatter(sys_info): 

+

769 print(" %s" % line) 

+

770 elif info == 'data': 

+

771 self.coverage.load() 

+

772 data = self.coverage.get_data() 

+

773 print(info_header("data")) 

+

774 print("path: %s" % data.data_filename()) 

+

775 if data: 

+

776 print("has_arcs: %r" % data.has_arcs()) 

+

777 summary = line_counts(data, fullpath=True) 

+

778 filenames = sorted(summary.keys()) 

+

779 print("\n%d files:" % len(filenames)) 

+

780 for f in filenames: 

+

781 line = "%s: %d lines" % (f, summary[f]) 

+

782 plugin = data.file_tracer(f) 

+

783 if plugin: 

+

784 line += " [%s]" % plugin 

+

785 print(line) 

+

786 else: 

+

787 print("No data collected") 

+

788 elif info == 'config': 

+

789 print(info_header("config")) 

+

790 config_info = self.coverage.config.__dict__.items() 

+

791 for line in info_formatter(config_info): 

+

792 print(" %s" % line) 

+

793 elif info == "premain": 793 ↛ 794line 793 didn't jump to line 794, because the condition on line 793 was never true

+

794 print(info_header("premain")) 

+

795 print(short_stack()) 

+

796 else: 

+

797 show_help("Don't know what you mean by %r" % info) 

+

798 return ERR 

+

799 

+

800 return OK 

+

801 

+

802 

+

803def unshell_list(s): 

+

804 """Turn a command-line argument into a list.""" 

+

805 if not s: 

+

806 return None 

+

807 if env.WINDOWS: 

+

808 # When running coverage.py as coverage.exe, some of the behavior 

+

809 # of the shell is emulated: wildcards are expanded into a list of 

+

810 # file names. So you have to single-quote patterns on the command 

+

811 # line, but (not) helpfully, the single quotes are included in the 

+

812 # argument, so we have to strip them off here. 

+

813 s = s.strip("'") 

+

814 return s.split(',') 

+

815 

+

816 

+

817def unglob_args(args): 

+

818 """Interpret shell wildcards for platforms that need it.""" 

+

819 if env.WINDOWS: 

+

820 globbed = [] 

+

821 for arg in args: 

+

822 if '?' in arg or '*' in arg: 

+

823 globbed.extend(glob.glob(arg)) 

+

824 else: 

+

825 globbed.append(arg) 

+

826 args = globbed 

+

827 return args 

+

828 

+

829 

+

830HELP_TOPICS = { 

+

831 'help': """\ 

+

832 Coverage.py, version {__version__} {extension_modifier} 

+

833 Measure, collect, and report on code coverage in Python programs. 

+

834 

+

835 usage: {program_name} <command> [options] [args] 

+

836 

+

837 Commands: 

+

838 annotate Annotate source files with execution information. 

+

839 combine Combine a number of data files. 

+

840 debug Display information about the internals of coverage.py 

+

841 erase Erase previously collected coverage data. 

+

842 help Get help on using coverage.py. 

+

843 html Create an HTML report. 

+

844 json Create a JSON report of coverage results. 

+

845 report Report coverage stats on modules. 

+

846 run Run a Python program and measure code execution. 

+

847 xml Create an XML report of coverage results. 

+

848 

+

849 Use "{program_name} help <command>" for detailed help on any command. 

+

850 """, 

+

851 

+

852 'minimum_help': """\ 

+

853 Code coverage for Python, version {__version__} {extension_modifier}. Use '{program_name} help' for help. 

+

854 """, 

+

855 

+

856 'version': """\ 

+

857 Coverage.py, version {__version__} {extension_modifier} 

+

858 """, 

+

859} 

+

860 

+

861 

+

862def main(argv=None): 

+

863 """The main entry point to coverage.py. 

+

864 

+

865 This is installed as the script entry point. 

+

866 

+

867 """ 

+

868 if argv is None: 

+

869 argv = sys.argv[1:] 

+

870 try: 

+

871 status = CoverageScript().command_line(argv) 

+

872 except ExceptionDuringRun as err: 

+

873 # An exception was caught while running the product code. The 

+

874 # sys.exc_info() return tuple is packed into an ExceptionDuringRun 

+

875 # exception. 

+

876 traceback.print_exception(*err.args) # pylint: disable=no-value-for-parameter 

+

877 status = ERR 

+

878 except BaseCoverageException as err: 

+

879 # A controlled error inside coverage.py: print the message to the user. 

+

880 msg = err.args[0] 

+

881 if env.PY2: 

+

882 msg = msg.encode(output_encoding()) 

+

883 print(msg) 

+

884 status = ERR 

+

885 except SystemExit as err: 

+

886 # The user called `sys.exit()`. Exit with their argument, if any. 

+

887 if err.args: 

+

888 status = err.args[0] 

+

889 else: 

+

890 status = None 

+

891 return status 

+

892 

+

893# Profiling using ox_profile. Install it from GitHub: 

+

894# pip install git+https://github.com/emin63/ox_profile.git 

+

895# 

+

896# $set_env.py: COVERAGE_PROFILE - Set to use ox_profile. 

+

897_profile = os.environ.get("COVERAGE_PROFILE", "") 

+

898if _profile: # pragma: debugging 

+

899 from ox_profile.core.launchers import SimpleLauncher # pylint: disable=import-error 

+

900 original_main = main 

+

901 

+

902 def main(argv=None): # pylint: disable=function-redefined 

+

903 """A wrapper around main that profiles.""" 

+

904 profiler = SimpleLauncher.launch() 

+

905 try: 

+

906 return original_main(argv) 

+

907 finally: 

+

908 data, _ = profiler.query(re_filter='coverage', max_records=100) 

+

909 print(profiler.show(query=data, limit=100, sep='', col='')) 

+

910 profiler.cancel() 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_collector_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_collector_py.html new file mode 100644 index 000000000..1f3766dd7 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_collector_py.html @@ -0,0 +1,517 @@ + + + + + + Coverage for coverage/collector.py: 58.305% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Raw data collector for coverage.py.""" 

+

5 

+

6import os 

+

7import sys 

+

8 

+

9from coverage import env 

+

10from coverage.backward import litems, range # pylint: disable=redefined-builtin 

+

11from coverage.debug import short_stack 

+

12from coverage.disposition import FileDisposition 

+

13from coverage.misc import CoverageException, isolate_module 

+

14from coverage.pytracer import PyTracer 

+

15 

+

16os = isolate_module(os) 

+

17 

+

18 

+

19try: 

+

20 # Use the C extension code when we can, for speed. 

+

21 from coverage.tracer import CTracer, CFileDisposition 

+

22except ImportError: 

+

23 # Couldn't import the C extension, maybe it isn't built. 

+

24 if os.getenv('COVERAGE_TEST_TRACER') == 'c': 24 ↛ 31line 24 didn't jump to line 31, because the condition on line 24 was never true

+

25 # During testing, we use the COVERAGE_TEST_TRACER environment variable 

+

26 # to indicate that we've fiddled with the environment to test this 

+

27 # fallback code. If we thought we had a C tracer, but couldn't import 

+

28 # it, then exit quickly and clearly instead of dribbling confusing 

+

29 # errors. I'm using sys.exit here instead of an exception because an 

+

30 # exception here causes all sorts of other noise in unittest. 

+

31 sys.stderr.write("*** COVERAGE_TEST_TRACER is 'c' but can't import CTracer!\n") 

+

32 sys.exit(1) 

+

33 CTracer = None 

+

34 

+

35 

+

36class Collector(object): 

+

37 """Collects trace data. 

+

38 

+

39 Creates a Tracer object for each thread, since they track stack 

+

40 information. Each Tracer points to the same shared data, contributing 

+

41 traced data points. 

+

42 

+

43 When the Collector is started, it creates a Tracer for the current thread, 

+

44 and installs a function to create Tracers for each new thread started. 

+

45 When the Collector is stopped, all active Tracers are stopped. 

+

46 

+

47 Threads started while the Collector is stopped will never have Tracers 

+

48 associated with them. 

+

49 

+

50 """ 

+

51 

+

52 # The stack of active Collectors. Collectors are added here when started, 

+

53 # and popped when stopped. Collectors on the stack are paused when not 

+

54 # the top, and resumed when they become the top again. 

+

55 _collectors = [] 

+

56 

+

57 # The concurrency settings we support here. 

+

58 SUPPORTED_CONCURRENCIES = {"greenlet", "eventlet", "gevent", "thread"} 

+

59 

+

60 def __init__( 

+

61 self, should_trace, check_include, should_start_context, file_mapper, 

+

62 timid, branch, warn, concurrency, 

+

63 ): 

+

64 """Create a collector. 

+

65 

+

66 `should_trace` is a function, taking a file name and a frame, and 

+

67 returning a `coverage.FileDisposition object`. 

+

68 

+

69 `check_include` is a function taking a file name and a frame. It returns 

+

70 a boolean: True if the file should be traced, False if not. 

+

71 

+

72 `should_start_context` is a function taking a frame, and returning a 

+

73 string. If the frame should be the start of a new context, the string 

+

74 is the new context. If the frame should not be the start of a new 

+

75 context, return None. 

+

76 

+

77 `file_mapper` is a function taking a filename, and returning a Unicode 

+

78 filename. The result is the name that will be recorded in the data 

+

79 file. 

+

80 

+

81 If `timid` is true, then a slower simpler trace function will be 

+

82 used. This is important for some environments where manipulation of 

+

83 tracing functions make the faster more sophisticated trace function not 

+

84 operate properly. 

+

85 

+

86 If `branch` is true, then branches will be measured. This involves 

+

87 collecting data on which statements followed each other (arcs). Use 

+

88 `get_arc_data` to get the arc data. 

+

89 

+

90 `warn` is a warning function, taking a single string message argument 

+

91 and an optional slug argument which will be a string or None, to be 

+

92 used if a warning needs to be issued. 

+

93 

+

94 `concurrency` is a list of strings indicating the concurrency libraries 

+

95 in use. Valid values are "greenlet", "eventlet", "gevent", or "thread" 

+

96 (the default). Of these four values, only one can be supplied. Other 

+

97 values are ignored. 

+

98 

+

99 """ 

+

100 self.should_trace = should_trace 

+

101 self.check_include = check_include 

+

102 self.should_start_context = should_start_context 

+

103 self.file_mapper = file_mapper 

+

104 self.warn = warn 

+

105 self.branch = branch 

+

106 self.threading = None 

+

107 self.covdata = None 

+

108 

+

109 self.static_context = None 

+

110 

+

111 self.origin = short_stack() 

+

112 

+

113 self.concur_id_func = None 

+

114 self.mapped_file_cache = {} 

+

115 

+

116 # We can handle a few concurrency options here, but only one at a time. 

+

117 these_concurrencies = self.SUPPORTED_CONCURRENCIES.intersection(concurrency) 

+

118 if len(these_concurrencies) > 1: 118 ↛ 119line 118 didn't jump to line 119, because the condition on line 118 was never true

+

119 raise CoverageException("Conflicting concurrency settings: %s" % concurrency) 

+

120 self.concurrency = these_concurrencies.pop() if these_concurrencies else '' 

+

121 

+

122 try: 

+

123 if self.concurrency == "greenlet": 

+

124 import greenlet 

+

125 self.concur_id_func = greenlet.getcurrent 

+

126 elif self.concurrency == "eventlet": 

+

127 import eventlet.greenthread # pylint: disable=import-error,useless-suppression 

+

128 self.concur_id_func = eventlet.greenthread.getcurrent 

+

129 elif self.concurrency == "gevent": 

+

130 import gevent # pylint: disable=import-error,useless-suppression 

+

131 self.concur_id_func = gevent.getcurrent 

+

132 elif self.concurrency == "thread" or not self.concurrency: 132 ↛ 139line 132 didn't jump to line 139, because the condition on line 132 was never false

+

133 # It's important to import threading only if we need it. If 

+

134 # it's imported early, and the program being measured uses 

+

135 # gevent, then gevent's monkey-patching won't work properly. 

+

136 import threading 

+

137 self.threading = threading 

+

138 else: 

+

139 raise CoverageException("Don't understand concurrency=%s" % concurrency) 

+

140 except ImportError: 

+

141 raise CoverageException( 

+

142 "Couldn't trace with concurrency=%s, the module isn't installed." % ( 

+

143 self.concurrency, 

+

144 ) 

+

145 ) 

+

146 

+

147 self.reset() 

+

148 

+

149 if timid: 

+

150 # Being timid: use the simple Python trace function. 

+

151 self._trace_class = PyTracer 

+

152 else: 

+

153 # Being fast: use the C Tracer if it is available, else the Python 

+

154 # trace function. 

+

155 self._trace_class = CTracer or PyTracer 

+

156 

+

157 if self._trace_class is CTracer: 

+

158 self.file_disposition_class = CFileDisposition 

+

159 self.supports_plugins = True 

+

160 else: 

+

161 self.file_disposition_class = FileDisposition 

+

162 self.supports_plugins = False 

+

163 

+

164 def __repr__(self): 

+

165 return "<Collector at 0x%x: %s>" % (id(self), self.tracer_name()) 

+

166 

+

167 def use_data(self, covdata, context): 

+

168 """Use `covdata` for recording data.""" 

+

169 self.covdata = covdata 

+

170 self.static_context = context 

+

171 self.covdata.set_context(self.static_context) 

+

172 

+

173 def tracer_name(self): 

+

174 """Return the class name of the tracer we're using.""" 

+

175 return self._trace_class.__name__ 

+

176 

+

177 def _clear_data(self): 

+

178 """Clear out existing data, but stay ready for more collection.""" 

+

179 # We used to used self.data.clear(), but that would remove filename 

+

180 # keys and data values that were still in use higher up the stack 

+

181 # when we are called as part of switch_context. 

+

182 for d in self.data.values(): 

+

183 d.clear() 

+

184 

+

185 for tracer in self.tracers: 

+

186 tracer.reset_activity() 

+

187 

+

188 def reset(self): 

+

189 """Clear collected data, and prepare to collect more.""" 

+

190 # A dictionary mapping file names to dicts with line number keys (if not 

+

191 # branch coverage), or mapping file names to dicts with line number 

+

192 # pairs as keys (if branch coverage). 

+

193 self.data = {} 

+

194 

+

195 # A dictionary mapping file names to file tracer plugin names that will 

+

196 # handle them. 

+

197 self.file_tracers = {} 

+

198 

+

199 self.disabled_plugins = set() 

+

200 

+

201 # The .should_trace_cache attribute is a cache from file names to 

+

202 # coverage.FileDisposition objects, or None. When a file is first 

+

203 # considered for tracing, a FileDisposition is obtained from 

+

204 # Coverage.should_trace. Its .trace attribute indicates whether the 

+

205 # file should be traced or not. If it should be, a plugin with dynamic 

+

206 # file names can decide not to trace it based on the dynamic file name 

+

207 # being excluded by the inclusion rules, in which case the 

+

208 # FileDisposition will be replaced by None in the cache. 

+

209 if env.PYPY: 

+

210 import __pypy__ # pylint: disable=import-error 

+

211 # Alex Gaynor said: 

+

212 # should_trace_cache is a strictly growing key: once a key is in 

+

213 # it, it never changes. Further, the keys used to access it are 

+

214 # generally constant, given sufficient context. That is to say, at 

+

215 # any given point _trace() is called, pypy is able to know the key. 

+

216 # This is because the key is determined by the physical source code 

+

217 # line, and that's invariant with the call site. 

+

218 # 

+

219 # This property of a dict with immutable keys, combined with 

+

220 # call-site-constant keys is a match for PyPy's module dict, 

+

221 # which is optimized for such workloads. 

+

222 # 

+

223 # This gives a 20% benefit on the workload described at 

+

224 # https://bitbucket.org/pypy/pypy/issue/1871/10x-slower-than-cpython-under-coverage 

+

225 self.should_trace_cache = __pypy__.newdict("module") 

+

226 else: 

+

227 self.should_trace_cache = {} 

+

228 

+

229 # Our active Tracers. 

+

230 self.tracers = [] 

+

231 

+

232 self._clear_data() 

+

233 

+

234 def _start_tracer(self): 

+

235 """Start a new Tracer object, and store it in self.tracers.""" 

+

236 tracer = self._trace_class() 

+

237 tracer.data = self.data 

+

238 tracer.trace_arcs = self.branch 

+

239 tracer.should_trace = self.should_trace 

+

240 tracer.should_trace_cache = self.should_trace_cache 

+

241 tracer.warn = self.warn 

+

242 

+

243 if hasattr(tracer, 'concur_id_func'): 

+

244 tracer.concur_id_func = self.concur_id_func 

+

245 elif self.concur_id_func: 

+

246 raise CoverageException( 

+

247 "Can't support concurrency=%s with %s, only threads are supported" % ( 

+

248 self.concurrency, self.tracer_name(), 

+

249 ) 

+

250 ) 

+

251 

+

252 if hasattr(tracer, 'file_tracers'): 

+

253 tracer.file_tracers = self.file_tracers 

+

254 if hasattr(tracer, 'threading'): 

+

255 tracer.threading = self.threading 

+

256 if hasattr(tracer, 'check_include'): 

+

257 tracer.check_include = self.check_include 

+

258 if hasattr(tracer, 'should_start_context'): 

+

259 tracer.should_start_context = self.should_start_context 

+

260 tracer.switch_context = self.switch_context 

+

261 if hasattr(tracer, 'disable_plugin'): 

+

262 tracer.disable_plugin = self.disable_plugin 

+

263 

+

264 fn = tracer.start() 

+

265 self.tracers.append(tracer) 

+

266 

+

267 return fn 

+

268 

+

269 # The trace function has to be set individually on each thread before 

+

270 # execution begins. Ironically, the only support the threading module has 

+

271 # for running code before the thread main is the tracing function. So we 

+

272 # install this as a trace function, and the first time it's called, it does 

+

273 # the real trace installation. 

+

274 

+

275 def _installation_trace(self, frame, event, arg): 

+

276 """Called on new threads, installs the real tracer.""" 

+

277 # Remove ourselves as the trace function. 

+

278 sys.settrace(None) 

+

279 # Install the real tracer. 

+

280 fn = self._start_tracer() 

+

281 # Invoke the real trace function with the current event, to be sure 

+

282 # not to lose an event. 

+

283 if fn: 

+

284 fn = fn(frame, event, arg) 

+

285 # Return the new trace function to continue tracing in this scope. 

+

286 return fn 

+

287 

+

288 def start(self): 

+

289 """Start collecting trace information.""" 

+

290 if self._collectors: 290 ↛ 293line 290 didn't jump to line 293, because the condition on line 290 was never false

+

291 self._collectors[-1].pause() 

+

292 

+

293 self.tracers = [] 

+

294 

+

295 # Check to see whether we had a fullcoverage tracer installed. If so, 

+

296 # get the stack frames it stashed away for us. 

+

297 traces0 = [] 

+

298 fn0 = sys.gettrace() 

+

299 if fn0: 

+

300 tracer0 = getattr(fn0, '__self__', None) 

+

301 if tracer0: 

+

302 traces0 = getattr(tracer0, 'traces', []) 

+

303 

+

304 try: 

+

305 # Install the tracer on this thread. 

+

306 fn = self._start_tracer() 

+

307 except: 

+

308 if self._collectors: 

+

309 self._collectors[-1].resume() 

+

310 raise 

+

311 

+

312 # If _start_tracer succeeded, then we add ourselves to the global 

+

313 # stack of collectors. 

+

314 self._collectors.append(self) 

+

315 

+

316 # Replay all the events from fullcoverage into the new trace function. 

+

317 for args in traces0: 317 ↛ 318line 317 didn't jump to line 318, because the loop on line 317 never started

+

318 (frame, event, arg), lineno = args 

+

319 try: 

+

320 fn(frame, event, arg, lineno=lineno) 

+

321 except TypeError: 

+

322 raise Exception("fullcoverage must be run with the C trace function.") 

+

323 

+

324 # Install our installation tracer in threading, to jump-start other 

+

325 # threads. 

+

326 if self.threading: 326 ↛ exitline 326 didn't return from function 'start', because the condition on line 326 was never false

+

327 self.threading.settrace(self._installation_trace) 

+

328 

+

329 def stop(self): 

+

330 """Stop collecting trace information.""" 

+

331 assert self._collectors 

+

332 if self._collectors[-1] is not self: 332 ↛ 333line 332 didn't jump to line 333, because the condition on line 332 was never true

+

333 print("self._collectors:") 

+

334 for c in self._collectors: 

+

335 print(" {!r}\n{}".format(c, c.origin)) 

+

336 assert self._collectors[-1] is self, ( 

+

337 "Expected current collector to be %r, but it's %r" % (self, self._collectors[-1]) 

+

338 ) 

+

339 

+

340 self.pause() 

+

341 

+

342 # Remove this Collector from the stack, and resume the one underneath 

+

343 # (if any). 

+

344 self._collectors.pop() 

+

345 if self._collectors: 

+

346 self._collectors[-1].resume() 

+

347 

+

348 def pause(self): 

+

349 """Pause tracing, but be prepared to `resume`.""" 

+

350 for tracer in self.tracers: 350 ↛ 357line 350 didn't jump to line 357, because the loop on line 350 didn't complete

+

351 tracer.stop() 

+

352 stats = tracer.get_stats() 

+

353 if stats: 353 ↛ 354line 353 didn't jump to line 354, because the condition on line 353 was never true

+

354 print("\nCoverage.py tracer stats:") 

+

355 for k in sorted(stats.keys()): 

+

356 print("%20s: %s" % (k, stats[k])) 

+

357 if self.threading: 

+

358 self.threading.settrace(None) 

+

359 

+

360 def resume(self): 

+

361 """Resume tracing after a `pause`.""" 

+

362 for tracer in self.tracers: 

+

363 tracer.start() 

+

364 if self.threading: 364 ↛ 367line 364 didn't jump to line 367, because the condition on line 364 was never false

+

365 self.threading.settrace(self._installation_trace) 

+

366 else: 

+

367 self._start_tracer() 

+

368 

+

369 def _activity(self): 

+

370 """Has any activity been traced? 

+

371 

+

372 Returns a boolean, True if any trace function was invoked. 

+

373 

+

374 """ 

+

375 return any(tracer.activity() for tracer in self.tracers) 

+

376 

+

377 def switch_context(self, new_context): 

+

378 """Switch to a new dynamic context.""" 

+

379 self.flush_data() 

+

380 if self.static_context: 

+

381 context = self.static_context 

+

382 if new_context: 

+

383 context += "|" + new_context 

+

384 else: 

+

385 context = new_context 

+

386 self.covdata.set_context(context) 

+

387 

+

388 def disable_plugin(self, disposition): 

+

389 """Disable the plugin mentioned in `disposition`.""" 

+

390 file_tracer = disposition.file_tracer 

+

391 plugin = file_tracer._coverage_plugin 

+

392 plugin_name = plugin._coverage_plugin_name 

+

393 self.warn("Disabling plug-in {!r} due to previous exception".format(plugin_name)) 

+

394 plugin._coverage_enabled = False 

+

395 disposition.trace = False 

+

396 

+

397 def cached_mapped_file(self, filename): 

+

398 """A locally cached version of file names mapped through file_mapper.""" 

+

399 key = (type(filename), filename) 

+

400 try: 

+

401 return self.mapped_file_cache[key] 

+

402 except KeyError: 

+

403 return self.mapped_file_cache.setdefault(key, self.file_mapper(filename)) 

+

404 

+

405 def mapped_file_dict(self, d): 

+

406 """Return a dict like d, but with keys modified by file_mapper.""" 

+

407 # The call to litems() ensures that the GIL protects the dictionary 

+

408 # iterator against concurrent modifications by tracers running 

+

409 # in other threads. We try three times in case of concurrent 

+

410 # access, hoping to get a clean copy. 

+

411 runtime_err = None 

+

412 for _ in range(3): 412 ↛ 420line 412 didn't jump to line 420, because the loop on line 412 didn't complete

+

413 try: 

+

414 items = litems(d) 

+

415 except RuntimeError as ex: 

+

416 runtime_err = ex 

+

417 else: 

+

418 break 

+

419 else: 

+

420 raise runtime_err 

+

421 

+

422 return dict((self.cached_mapped_file(k), v) for k, v in items if v) 

+

423 

+

424 def plugin_was_disabled(self, plugin): 

+

425 """Record that `plugin` was disabled during the run.""" 

+

426 self.disabled_plugins.add(plugin._coverage_plugin_name) 

+

427 

+

428 def flush_data(self): 

+

429 """Save the collected data to our associated `CoverageData`. 

+

430 

+

431 Data may have also been saved along the way. This forces the 

+

432 last of the data to be saved. 

+

433 

+

434 Returns True if there was data to save, False if not. 

+

435 """ 

+

436 if not self._activity(): 

+

437 return False 

+

438 

+

439 if self.branch: 

+

440 self.covdata.add_arcs(self.mapped_file_dict(self.data)) 

+

441 else: 

+

442 self.covdata.add_lines(self.mapped_file_dict(self.data)) 

+

443 

+

444 file_tracers = { 

+

445 k: v for k, v in self.file_tracers.items() 

+

446 if v not in self.disabled_plugins 

+

447 } 

+

448 self.covdata.add_file_tracers(self.mapped_file_dict(file_tracers)) 

+

449 

+

450 self._clear_data() 

+

451 return True 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_config_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_config_py.html new file mode 100644 index 000000000..f5524dfc4 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_config_py.html @@ -0,0 +1,636 @@ + + + + + + Coverage for coverage/config.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Config file for coverage.py""" 

+

5 

+

6import collections 

+

7import copy 

+

8import os 

+

9import os.path 

+

10import re 

+

11 

+

12from coverage import env 

+

13from coverage.backward import configparser, iitems, string_class 

+

14from coverage.misc import contract, CoverageException, isolate_module 

+

15from coverage.misc import substitute_variables 

+

16 

+

17from coverage.tomlconfig import TomlConfigParser, TomlDecodeError 

+

18 

+

19os = isolate_module(os) 

+

20 

+

21 

+

22class HandyConfigParser(configparser.RawConfigParser): 

+

23 """Our specialization of ConfigParser.""" 

+

24 

+

25 def __init__(self, our_file): 

+

26 """Create the HandyConfigParser. 

+

27 

+

28 `our_file` is True if this config file is specifically for coverage, 

+

29 False if we are examining another config file (tox.ini, setup.cfg) 

+

30 for possible settings. 

+

31 """ 

+

32 

+

33 configparser.RawConfigParser.__init__(self) 

+

34 self.section_prefixes = ["coverage:"] 

+

35 if our_file: 

+

36 self.section_prefixes.append("") 

+

37 

+

38 def read(self, filenames, encoding_unused=None): 

+

39 """Read a file name as UTF-8 configuration data.""" 

+

40 kwargs = {} 

+

41 if env.PYVERSION >= (3, 2): 

+

42 kwargs['encoding'] = "utf-8" 

+

43 return configparser.RawConfigParser.read(self, filenames, **kwargs) 

+

44 

+

45 def has_option(self, section, option): 

+

46 for section_prefix in self.section_prefixes: 

+

47 real_section = section_prefix + section 

+

48 has = configparser.RawConfigParser.has_option(self, real_section, option) 

+

49 if has: 

+

50 return has 

+

51 return False 

+

52 

+

53 def has_section(self, section): 

+

54 for section_prefix in self.section_prefixes: 

+

55 real_section = section_prefix + section 

+

56 has = configparser.RawConfigParser.has_section(self, real_section) 

+

57 if has: 

+

58 return real_section 

+

59 return False 

+

60 

+

61 def options(self, section): 

+

62 for section_prefix in self.section_prefixes: 

+

63 real_section = section_prefix + section 

+

64 if configparser.RawConfigParser.has_section(self, real_section): 

+

65 return configparser.RawConfigParser.options(self, real_section) 

+

66 raise configparser.NoSectionError(section) 

+

67 

+

68 def get_section(self, section): 

+

69 """Get the contents of a section, as a dictionary.""" 

+

70 d = {} 

+

71 for opt in self.options(section): 

+

72 d[opt] = self.get(section, opt) 

+

73 return d 

+

74 

+

75 def get(self, section, option, *args, **kwargs): 

+

76 """Get a value, replacing environment variables also. 

+

77 

+

78 The arguments are the same as `RawConfigParser.get`, but in the found 

+

79 value, ``$WORD`` or ``${WORD}`` are replaced by the value of the 

+

80 environment variable ``WORD``. 

+

81 

+

82 Returns the finished value. 

+

83 

+

84 """ 

+

85 for section_prefix in self.section_prefixes: 

+

86 real_section = section_prefix + section 

+

87 if configparser.RawConfigParser.has_option(self, real_section, option): 

+

88 break 

+

89 else: 

+

90 raise configparser.NoOptionError(option, section) 

+

91 

+

92 v = configparser.RawConfigParser.get(self, real_section, option, *args, **kwargs) 

+

93 v = substitute_variables(v, os.environ) 

+

94 return v 

+

95 

+

96 def getlist(self, section, option): 

+

97 """Read a list of strings. 

+

98 

+

99 The value of `section` and `option` is treated as a comma- and newline- 

+

100 separated list of strings. Each value is stripped of whitespace. 

+

101 

+

102 Returns the list of strings. 

+

103 

+

104 """ 

+

105 value_list = self.get(section, option) 

+

106 values = [] 

+

107 for value_line in value_list.split('\n'): 

+

108 for value in value_line.split(','): 

+

109 value = value.strip() 

+

110 if value: 

+

111 values.append(value) 

+

112 return values 

+

113 

+

114 def getregexlist(self, section, option): 

+

115 """Read a list of full-line regexes. 

+

116 

+

117 The value of `section` and `option` is treated as a newline-separated 

+

118 list of regexes. Each value is stripped of whitespace. 

+

119 

+

120 Returns the list of strings. 

+

121 

+

122 """ 

+

123 line_list = self.get(section, option) 

+

124 value_list = [] 

+

125 for value in line_list.splitlines(): 

+

126 value = value.strip() 

+

127 try: 

+

128 re.compile(value) 

+

129 except re.error as e: 

+

130 raise CoverageException( 

+

131 "Invalid [%s].%s value %r: %s" % (section, option, value, e) 

+

132 ) 

+

133 if value: 

+

134 value_list.append(value) 

+

135 return value_list 

+

136 

+

137 

+

138# The default line exclusion regexes. 

+

139DEFAULT_EXCLUDE = [ 

+

140 r'#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(cover|COVER)', 

+

141] 

+

142 

+

143# The default partial branch regexes, to be modified by the user. 

+

144DEFAULT_PARTIAL = [ 

+

145 r'#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(branch|BRANCH)', 

+

146] 

+

147 

+

148# The default partial branch regexes, based on Python semantics. 

+

149# These are any Python branching constructs that can't actually execute all 

+

150# their branches. 

+

151DEFAULT_PARTIAL_ALWAYS = [ 

+

152 'while (True|1|False|0):', 

+

153 'if (True|1|False|0):', 

+

154] 

+

155 

+

156 

+

157class CoverageConfig(object): 

+

158 """Coverage.py configuration. 

+

159 

+

160 The attributes of this class are the various settings that control the 

+

161 operation of coverage.py. 

+

162 

+

163 """ 

+

164 # pylint: disable=too-many-instance-attributes 

+

165 

+

166 def __init__(self): 

+

167 """Initialize the configuration attributes to their defaults.""" 

+

168 # Metadata about the config. 

+

169 # We tried to read these config files. 

+

170 self.attempted_config_files = [] 

+

171 # We did read these config files, but maybe didn't find any content for us. 

+

172 self.config_files_read = [] 

+

173 # The file that gave us our configuration. 

+

174 self.config_file = None 

+

175 self._config_contents = None 

+

176 

+

177 # Defaults for [run] and [report] 

+

178 self._include = None 

+

179 self._omit = None 

+

180 

+

181 # Defaults for [run] 

+

182 self.branch = False 

+

183 self.command_line = None 

+

184 self.concurrency = None 

+

185 self.context = None 

+

186 self.cover_pylib = False 

+

187 self.data_file = ".coverage" 

+

188 self.debug = [] 

+

189 self.disable_warnings = [] 

+

190 self.dynamic_context = None 

+

191 self.note = None 

+

192 self.parallel = False 

+

193 self.plugins = [] 

+

194 self.relative_files = False 

+

195 self.run_include = None 

+

196 self.run_omit = None 

+

197 self.source = None 

+

198 self.source_pkgs = [] 

+

199 self.timid = False 

+

200 self._crash = None 

+

201 

+

202 # Defaults for [report] 

+

203 self.exclude_list = DEFAULT_EXCLUDE[:] 

+

204 self.fail_under = 0.0 

+

205 self.ignore_errors = False 

+

206 self.report_include = None 

+

207 self.report_omit = None 

+

208 self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:] 

+

209 self.partial_list = DEFAULT_PARTIAL[:] 

+

210 self.precision = 0 

+

211 self.report_contexts = None 

+

212 self.show_missing = False 

+

213 self.skip_covered = False 

+

214 self.skip_empty = False 

+

215 self.sort = None 

+

216 

+

217 # Defaults for [html] 

+

218 self.extra_css = None 

+

219 self.html_dir = "htmlcov" 

+

220 self.html_skip_covered = None 

+

221 self.html_skip_empty = None 

+

222 self.html_title = "Coverage report" 

+

223 self.show_contexts = False 

+

224 

+

225 # Defaults for [xml] 

+

226 self.xml_output = "coverage.xml" 

+

227 self.xml_package_depth = 99 

+

228 

+

229 # Defaults for [json] 

+

230 self.json_output = "coverage.json" 

+

231 self.json_pretty_print = False 

+

232 self.json_show_contexts = False 

+

233 

+

234 # Defaults for [paths] 

+

235 self.paths = collections.OrderedDict() 

+

236 

+

237 # Options for plugins 

+

238 self.plugin_options = {} 

+

239 

+

240 MUST_BE_LIST = [ 

+

241 "debug", "concurrency", "plugins", 

+

242 "report_omit", "report_include", 

+

243 "run_omit", "run_include", 

+

244 ] 

+

245 

+

246 def from_args(self, **kwargs): 

+

247 """Read config values from `kwargs`.""" 

+

248 for k, v in iitems(kwargs): 

+

249 if v is not None: 

+

250 if k in self.MUST_BE_LIST and isinstance(v, string_class): 

+

251 v = [v] 

+

252 setattr(self, k, v) 

+

253 

+

254 @contract(filename=str) 

+

255 def from_file(self, filename, our_file): 

+

256 """Read configuration from a .rc file. 

+

257 

+

258 `filename` is a file name to read. 

+

259 

+

260 `our_file` is True if this config file is specifically for coverage, 

+

261 False if we are examining another config file (tox.ini, setup.cfg) 

+

262 for possible settings. 

+

263 

+

264 Returns True or False, whether the file could be read, and it had some 

+

265 coverage.py settings in it. 

+

266 

+

267 """ 

+

268 _, ext = os.path.splitext(filename) 

+

269 if ext == '.toml': 

+

270 cp = TomlConfigParser(our_file) 

+

271 else: 

+

272 cp = HandyConfigParser(our_file) 

+

273 

+

274 self.attempted_config_files.append(filename) 

+

275 

+

276 try: 

+

277 files_read = cp.read(filename) 

+

278 except (configparser.Error, TomlDecodeError) as err: 

+

279 raise CoverageException("Couldn't read config file %s: %s" % (filename, err)) 

+

280 if not files_read: 

+

281 return False 

+

282 

+

283 self.config_files_read.extend(map(os.path.abspath, files_read)) 

+

284 

+

285 any_set = False 

+

286 try: 

+

287 for option_spec in self.CONFIG_FILE_OPTIONS: 

+

288 was_set = self._set_attr_from_config_option(cp, *option_spec) 

+

289 if was_set: 

+

290 any_set = True 

+

291 except ValueError as err: 

+

292 raise CoverageException("Couldn't read config file %s: %s" % (filename, err)) 

+

293 

+

294 # Check that there are no unrecognized options. 

+

295 all_options = collections.defaultdict(set) 

+

296 for option_spec in self.CONFIG_FILE_OPTIONS: 

+

297 section, option = option_spec[1].split(":") 

+

298 all_options[section].add(option) 

+

299 

+

300 for section, options in iitems(all_options): 

+

301 real_section = cp.has_section(section) 

+

302 if real_section: 

+

303 for unknown in set(cp.options(section)) - options: 

+

304 raise CoverageException( 

+

305 "Unrecognized option '[%s] %s=' in config file %s" % ( 

+

306 real_section, unknown, filename 

+

307 ) 

+

308 ) 

+

309 

+

310 # [paths] is special 

+

311 if cp.has_section('paths'): 

+

312 for option in cp.options('paths'): 

+

313 self.paths[option] = cp.getlist('paths', option) 

+

314 any_set = True 

+

315 

+

316 # plugins can have options 

+

317 for plugin in self.plugins: 

+

318 if cp.has_section(plugin): 

+

319 self.plugin_options[plugin] = cp.get_section(plugin) 

+

320 any_set = True 

+

321 

+

322 # Was this file used as a config file? If it's specifically our file, 

+

323 # then it was used. If we're piggybacking on someone else's file, 

+

324 # then it was only used if we found some settings in it. 

+

325 if our_file: 

+

326 used = True 

+

327 else: 

+

328 used = any_set 

+

329 

+

330 if used: 

+

331 self.config_file = os.path.abspath(filename) 

+

332 with open(filename, "rb") as f: 

+

333 self._config_contents = f.read() 

+

334 

+

335 return used 

+

336 

+

337 def copy(self): 

+

338 """Return a copy of the configuration.""" 

+

339 return copy.deepcopy(self) 

+

340 

+

341 CONFIG_FILE_OPTIONS = [ 

+

342 # These are *args for _set_attr_from_config_option: 

+

343 # (attr, where, type_="") 

+

344 # 

+

345 # attr is the attribute to set on the CoverageConfig object. 

+

346 # where is the section:name to read from the configuration file. 

+

347 # type_ is the optional type to apply, by using .getTYPE to read the 

+

348 # configuration value from the file. 

+

349 

+

350 # [run] 

+

351 ('branch', 'run:branch', 'boolean'), 

+

352 ('command_line', 'run:command_line'), 

+

353 ('concurrency', 'run:concurrency', 'list'), 

+

354 ('context', 'run:context'), 

+

355 ('cover_pylib', 'run:cover_pylib', 'boolean'), 

+

356 ('data_file', 'run:data_file'), 

+

357 ('debug', 'run:debug', 'list'), 

+

358 ('disable_warnings', 'run:disable_warnings', 'list'), 

+

359 ('dynamic_context', 'run:dynamic_context'), 

+

360 ('note', 'run:note'), 

+

361 ('parallel', 'run:parallel', 'boolean'), 

+

362 ('plugins', 'run:plugins', 'list'), 

+

363 ('relative_files', 'run:relative_files', 'boolean'), 

+

364 ('run_include', 'run:include', 'list'), 

+

365 ('run_omit', 'run:omit', 'list'), 

+

366 ('source', 'run:source', 'list'), 

+

367 ('source_pkgs', 'run:source_pkgs', 'list'), 

+

368 ('timid', 'run:timid', 'boolean'), 

+

369 ('_crash', 'run:_crash'), 

+

370 

+

371 # [report] 

+

372 ('exclude_list', 'report:exclude_lines', 'regexlist'), 

+

373 ('fail_under', 'report:fail_under', 'float'), 

+

374 ('ignore_errors', 'report:ignore_errors', 'boolean'), 

+

375 ('partial_always_list', 'report:partial_branches_always', 'regexlist'), 

+

376 ('partial_list', 'report:partial_branches', 'regexlist'), 

+

377 ('precision', 'report:precision', 'int'), 

+

378 ('report_contexts', 'report:contexts', 'list'), 

+

379 ('report_include', 'report:include', 'list'), 

+

380 ('report_omit', 'report:omit', 'list'), 

+

381 ('show_missing', 'report:show_missing', 'boolean'), 

+

382 ('skip_covered', 'report:skip_covered', 'boolean'), 

+

383 ('skip_empty', 'report:skip_empty', 'boolean'), 

+

384 ('sort', 'report:sort'), 

+

385 

+

386 # [html] 

+

387 ('extra_css', 'html:extra_css'), 

+

388 ('html_dir', 'html:directory'), 

+

389 ('html_skip_covered', 'html:skip_covered', 'boolean'), 

+

390 ('html_skip_empty', 'html:skip_empty', 'boolean'), 

+

391 ('html_title', 'html:title'), 

+

392 ('show_contexts', 'html:show_contexts', 'boolean'), 

+

393 

+

394 # [xml] 

+

395 ('xml_output', 'xml:output'), 

+

396 ('xml_package_depth', 'xml:package_depth', 'int'), 

+

397 

+

398 # [json] 

+

399 ('json_output', 'json:output'), 

+

400 ('json_pretty_print', 'json:pretty_print', 'boolean'), 

+

401 ('json_show_contexts', 'json:show_contexts', 'boolean'), 

+

402 ] 

+

403 

+

404 def _set_attr_from_config_option(self, cp, attr, where, type_=''): 

+

405 """Set an attribute on self if it exists in the ConfigParser. 

+

406 

+

407 Returns True if the attribute was set. 

+

408 

+

409 """ 

+

410 section, option = where.split(":") 

+

411 if cp.has_option(section, option): 

+

412 method = getattr(cp, 'get' + type_) 

+

413 setattr(self, attr, method(section, option)) 

+

414 return True 

+

415 return False 

+

416 

+

417 def get_plugin_options(self, plugin): 

+

418 """Get a dictionary of options for the plugin named `plugin`.""" 

+

419 return self.plugin_options.get(plugin, {}) 

+

420 

+

421 def set_option(self, option_name, value): 

+

422 """Set an option in the configuration. 

+

423 

+

424 `option_name` is a colon-separated string indicating the section and 

+

425 option name. For example, the ``branch`` option in the ``[run]`` 

+

426 section of the config file would be indicated with `"run:branch"`. 

+

427 

+

428 `value` is the new value for the option. 

+

429 

+

430 """ 

+

431 # Special-cased options. 

+

432 if option_name == "paths": 

+

433 self.paths = value 

+

434 return 

+

435 

+

436 # Check all the hard-coded options. 

+

437 for option_spec in self.CONFIG_FILE_OPTIONS: 

+

438 attr, where = option_spec[:2] 

+

439 if where == option_name: 

+

440 setattr(self, attr, value) 

+

441 return 

+

442 

+

443 # See if it's a plugin option. 

+

444 plugin_name, _, key = option_name.partition(":") 

+

445 if key and plugin_name in self.plugins: 

+

446 self.plugin_options.setdefault(plugin_name, {})[key] = value 

+

447 return 

+

448 

+

449 # If we get here, we didn't find the option. 

+

450 raise CoverageException("No such option: %r" % option_name) 

+

451 

+

452 def get_option(self, option_name): 

+

453 """Get an option from the configuration. 

+

454 

+

455 `option_name` is a colon-separated string indicating the section and 

+

456 option name. For example, the ``branch`` option in the ``[run]`` 

+

457 section of the config file would be indicated with `"run:branch"`. 

+

458 

+

459 Returns the value of the option. 

+

460 

+

461 """ 

+

462 # Special-cased options. 

+

463 if option_name == "paths": 

+

464 return self.paths 

+

465 

+

466 # Check all the hard-coded options. 

+

467 for option_spec in self.CONFIG_FILE_OPTIONS: 

+

468 attr, where = option_spec[:2] 

+

469 if where == option_name: 

+

470 return getattr(self, attr) 

+

471 

+

472 # See if it's a plugin option. 

+

473 plugin_name, _, key = option_name.partition(":") 

+

474 if key and plugin_name in self.plugins: 

+

475 return self.plugin_options.get(plugin_name, {}).get(key) 

+

476 

+

477 # If we get here, we didn't find the option. 

+

478 raise CoverageException("No such option: %r" % option_name) 

+

479 

+

480 def post_process_file(self, path): 

+

481 """Make final adjustments to a file path to make it usable.""" 

+

482 return os.path.expanduser(path) 

+

483 

+

484 def post_process(self): 

+

485 """Make final adjustments to settings to make them usable.""" 

+

486 self.data_file = self.post_process_file(self.data_file) 

+

487 self.html_dir = self.post_process_file(self.html_dir) 

+

488 self.xml_output = self.post_process_file(self.xml_output) 

+

489 self.paths = collections.OrderedDict( 

+

490 (k, [self.post_process_file(f) for f in v]) 

+

491 for k, v in self.paths.items() 

+

492 ) 

+

493 

+

494 

+

495def config_files_to_try(config_file): 

+

496 """What config files should we try to read? 

+

497 

+

498 Returns a list of tuples: 

+

499 (filename, is_our_file, was_file_specified) 

+

500 """ 

+

501 

+

502 # Some API users were specifying ".coveragerc" to mean the same as 

+

503 # True, so make it so. 

+

504 if config_file == ".coveragerc": 

+

505 config_file = True 

+

506 specified_file = (config_file is not True) 

+

507 if not specified_file: 

+

508 # No file was specified. Check COVERAGE_RCFILE. 

+

509 config_file = os.environ.get('COVERAGE_RCFILE') 

+

510 if config_file: 

+

511 specified_file = True 

+

512 if not specified_file: 

+

513 # Still no file specified. Default to .coveragerc 

+

514 config_file = ".coveragerc" 

+

515 files_to_try = [ 

+

516 (config_file, True, specified_file), 

+

517 ("setup.cfg", False, False), 

+

518 ("tox.ini", False, False), 

+

519 ("pyproject.toml", False, False), 

+

520 ] 

+

521 return files_to_try 

+

522 

+

523 

+

524def read_coverage_config(config_file, **kwargs): 

+

525 """Read the coverage.py configuration. 

+

526 

+

527 Arguments: 

+

528 config_file: a boolean or string, see the `Coverage` class for the 

+

529 tricky details. 

+

530 all others: keyword arguments from the `Coverage` class, used for 

+

531 setting values in the configuration. 

+

532 

+

533 Returns: 

+

534 config: 

+

535 config is a CoverageConfig object read from the appropriate 

+

536 configuration file. 

+

537 

+

538 """ 

+

539 # Build the configuration from a number of sources: 

+

540 # 1) defaults: 

+

541 config = CoverageConfig() 

+

542 

+

543 # 2) from a file: 

+

544 if config_file: 

+

545 files_to_try = config_files_to_try(config_file) 

+

546 

+

547 for fname, our_file, specified_file in files_to_try: 

+

548 config_read = config.from_file(fname, our_file=our_file) 

+

549 if config_read: 

+

550 break 

+

551 if specified_file: 

+

552 raise CoverageException("Couldn't read '%s' as a config file" % fname) 

+

553 

+

554 # $set_env.py: COVERAGE_DEBUG - Options for --debug. 

+

555 # 3) from environment variables: 

+

556 env_data_file = os.environ.get('COVERAGE_FILE') 

+

557 if env_data_file: 

+

558 config.data_file = env_data_file 

+

559 debugs = os.environ.get('COVERAGE_DEBUG') 

+

560 if debugs: 

+

561 config.debug.extend(d.strip() for d in debugs.split(",")) 

+

562 

+

563 # 4) from constructor arguments: 

+

564 config.from_args(**kwargs) 

+

565 

+

566 # Once all the config has been collected, there's a little post-processing 

+

567 # to do. 

+

568 config.post_process() 

+

569 

+

570 return config 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_context_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_context_py.html new file mode 100644 index 000000000..1f2b6dd4b --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_context_py.html @@ -0,0 +1,157 @@ + + + + + + Coverage for coverage/context.py: 77.419% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Determine contexts for coverage.py""" 

+

5 

+

6 

+

7def combine_context_switchers(context_switchers): 

+

8 """Create a single context switcher from multiple switchers. 

+

9 

+

10 `context_switchers` is a list of functions that take a frame as an 

+

11 argument and return a string to use as the new context label. 

+

12 

+

13 Returns a function that composites `context_switchers` functions, or None 

+

14 if `context_switchers` is an empty list. 

+

15 

+

16 When invoked, the combined switcher calls `context_switchers` one-by-one 

+

17 until a string is returned. The combined switcher returns None if all 

+

18 `context_switchers` return None. 

+

19 """ 

+

20 if not context_switchers: 

+

21 return None 

+

22 

+

23 if len(context_switchers) == 1: 

+

24 return context_switchers[0] 

+

25 

+

26 def should_start_context(frame): 

+

27 """The combiner for multiple context switchers.""" 

+

28 for switcher in context_switchers: 

+

29 new_context = switcher(frame) 

+

30 if new_context is not None: 

+

31 return new_context 

+

32 return None 

+

33 

+

34 return should_start_context 

+

35 

+

36 

+

37def should_start_context_test_function(frame): 

+

38 """Is this frame calling a test_* function?""" 

+

39 co_name = frame.f_code.co_name 

+

40 if co_name.startswith("test") or co_name == "runTest": 

+

41 return qualname_from_frame(frame) 

+

42 return None 

+

43 

+

44 

+

45def qualname_from_frame(frame): 

+

46 """Get a qualified name for the code running in `frame`.""" 

+

47 co = frame.f_code 

+

48 fname = co.co_name 

+

49 method = None 

+

50 if co.co_argcount and co.co_varnames[0] == "self": 

+

51 self = frame.f_locals["self"] 

+

52 method = getattr(self, fname, None) 

+

53 

+

54 if method is None: 

+

55 func = frame.f_globals.get(fname) 

+

56 if func is None: 

+

57 return None 

+

58 return func.__module__ + '.' + fname 

+

59 

+

60 func = getattr(method, '__func__', None) 

+

61 if func is None: 

+

62 cls = self.__class__ 

+

63 return cls.__module__ + '.' + cls.__name__ + "." + fname 

+

64 

+

65 if hasattr(func, '__qualname__'): 

+

66 qname = func.__module__ + '.' + func.__qualname__ 

+

67 else: 

+

68 for cls in getattr(self.__class__, '__mro__', ()): 

+

69 f = cls.__dict__.get(fname, None) 

+

70 if f is None: 

+

71 continue 

+

72 if f is func: 72 ↛ 68line 72 didn't jump to line 68, because the condition on line 72 was never false

+

73 qname = cls.__module__ + '.' + cls.__name__ + "." + fname 

+

74 break 

+

75 else: 

+

76 # Support for old-style classes. 

+

77 def mro(bases): 

+

78 for base in bases: 

+

79 f = base.__dict__.get(fname, None) 

+

80 if f is func: 

+

81 return base.__module__ + '.' + base.__name__ + "." + fname 

+

82 for base in bases: 82 ↛ 86line 82 didn't jump to line 86, because the loop on line 82 didn't complete

+

83 qname = mro(base.__bases__) 

+

84 if qname is not None: 84 ↛ 82line 84 didn't jump to line 82, because the condition on line 84 was never false

+

85 return qname 

+

86 return None 

+

87 qname = mro([self.__class__]) 

+

88 if qname is None: 88 ↛ 89line 88 didn't jump to line 89, because the condition on line 88 was never true

+

89 qname = func.__module__ + '.' + fname 

+

90 

+

91 return qname 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_control_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_control_py.html new file mode 100644 index 000000000..0c79f0b0c --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_control_py.html @@ -0,0 +1,1211 @@ + + + + + + Coverage for coverage/control.py: 88.931% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Core control stuff for coverage.py.""" 

+

5 

+

6import atexit 

+

7import collections 

+

8import contextlib 

+

9import os 

+

10import os.path 

+

11import platform 

+

12import sys 

+

13import time 

+

14 

+

15from coverage import env 

+

16from coverage.annotate import AnnotateReporter 

+

17from coverage.backward import string_class, iitems 

+

18from coverage.collector import Collector, CTracer 

+

19from coverage.config import read_coverage_config 

+

20from coverage.context import should_start_context_test_function, combine_context_switchers 

+

21from coverage.data import CoverageData, combine_parallel_data 

+

22from coverage.debug import DebugControl, short_stack, write_formatted_info 

+

23from coverage.disposition import disposition_debug_msg 

+

24from coverage.files import PathAliases, abs_file, relative_filename, set_relative_directory 

+

25from coverage.html import HtmlReporter 

+

26from coverage.inorout import InOrOut 

+

27from coverage.jsonreport import JsonReporter 

+

28from coverage.misc import CoverageException, bool_or_none, join_regex 

+

29from coverage.misc import DefaultValue, ensure_dir_for_file, isolate_module 

+

30from coverage.plugin import FileReporter 

+

31from coverage.plugin_support import Plugins 

+

32from coverage.python import PythonFileReporter 

+

33from coverage.report import render_report 

+

34from coverage.results import Analysis, Numbers 

+

35from coverage.summary import SummaryReporter 

+

36from coverage.xmlreport import XmlReporter 

+

37 

+

38try: 

+

39 from coverage.multiproc import patch_multiprocessing 

+

40except ImportError: # pragma: only jython 

+

41 # Jython has no multiprocessing module. 

+

42 patch_multiprocessing = None 

+

43 

+

44os = isolate_module(os) 

+

45 

+

46@contextlib.contextmanager 

+

47def override_config(cov, **kwargs): 

+

48 """Temporarily tweak the configuration of `cov`. 

+

49 

+

50 The arguments are applied to `cov.config` with the `from_args` method. 

+

51 At the end of the with-statement, the old configuration is restored. 

+

52 """ 

+

53 original_config = cov.config 

+

54 cov.config = cov.config.copy() 

+

55 try: 

+

56 cov.config.from_args(**kwargs) 

+

57 yield 

+

58 finally: 

+

59 cov.config = original_config 

+

60 

+

61 

+

62_DEFAULT_DATAFILE = DefaultValue("MISSING") 

+

63 

+

64class Coverage(object): 

+

65 """Programmatic access to coverage.py. 

+

66 

+

67 To use:: 

+

68 

+

69 from coverage import Coverage 

+

70 

+

71 cov = Coverage() 

+

72 cov.start() 

+

73 #.. call your code .. 

+

74 cov.stop() 

+

75 cov.html_report(directory='covhtml') 

+

76 

+

77 Note: in keeping with Python custom, names starting with underscore are 

+

78 not part of the public API. They might stop working at any point. Please 

+

79 limit yourself to documented methods to avoid problems. 

+

80 

+

81 """ 

+

82 

+

83 # The stack of started Coverage instances. 

+

84 _instances = [] 

+

85 

+

86 @classmethod 

+

87 def current(cls): 

+

88 """Get the latest started `Coverage` instance, if any. 

+

89 

+

90 Returns: a `Coverage` instance, or None. 

+

91 

+

92 .. versionadded:: 5.0 

+

93 

+

94 """ 

+

95 if cls._instances: 95 ↛ 98line 95 didn't jump to line 98, because the condition on line 95 was never false

+

96 return cls._instances[-1] 

+

97 else: 

+

98 return None 

+

99 

+

100 def __init__( 

+

101 self, data_file=_DEFAULT_DATAFILE, data_suffix=None, cover_pylib=None, 

+

102 auto_data=False, timid=None, branch=None, config_file=True, 

+

103 source=None, source_pkgs=None, omit=None, include=None, debug=None, 

+

104 concurrency=None, check_preimported=False, context=None, 

+

105 ): # pylint: disable=too-many-arguments 

+

106 """ 

+

107 Many of these arguments duplicate and override values that can be 

+

108 provided in a configuration file. Parameters that are missing here 

+

109 will use values from the config file. 

+

110 

+

111 `data_file` is the base name of the data file to use. The config value 

+

112 defaults to ".coverage". None can be provided to prevent writing a data 

+

113 file. `data_suffix` is appended (with a dot) to `data_file` to create 

+

114 the final file name. If `data_suffix` is simply True, then a suffix is 

+

115 created with the machine and process identity included. 

+

116 

+

117 `cover_pylib` is a boolean determining whether Python code installed 

+

118 with the Python interpreter is measured. This includes the Python 

+

119 standard library and any packages installed with the interpreter. 

+

120 

+

121 If `auto_data` is true, then any existing data file will be read when 

+

122 coverage measurement starts, and data will be saved automatically when 

+

123 measurement stops. 

+

124 

+

125 If `timid` is true, then a slower and simpler trace function will be 

+

126 used. This is important for some environments where manipulation of 

+

127 tracing functions breaks the faster trace function. 

+

128 

+

129 If `branch` is true, then branch coverage will be measured in addition 

+

130 to the usual statement coverage. 

+

131 

+

132 `config_file` determines what configuration file to read: 

+

133 

+

134 * If it is ".coveragerc", it is interpreted as if it were True, 

+

135 for backward compatibility. 

+

136 

+

137 * If it is a string, it is the name of the file to read. If the 

+

138 file can't be read, it is an error. 

+

139 

+

140 * If it is True, then a few standard files names are tried 

+

141 (".coveragerc", "setup.cfg", "tox.ini"). It is not an error for 

+

142 these files to not be found. 

+

143 

+

144 * If it is False, then no configuration file is read. 

+

145 

+

146 `source` is a list of file paths or package names. Only code located 

+

147 in the trees indicated by the file paths or package names will be 

+

148 measured. 

+

149 

+

150 `source_pkgs` is a list of package names. It works the same as 

+

151 `source`, but can be used to name packages where the name can also be 

+

152 interpreted as a file path. 

+

153 

+

154 `include` and `omit` are lists of file name patterns. Files that match 

+

155 `include` will be measured, files that match `omit` will not. Each 

+

156 will also accept a single string argument. 

+

157 

+

158 `debug` is a list of strings indicating what debugging information is 

+

159 desired. 

+

160 

+

161 `concurrency` is a string indicating the concurrency library being used 

+

162 in the measured code. Without this, coverage.py will get incorrect 

+

163 results if these libraries are in use. Valid strings are "greenlet", 

+

164 "eventlet", "gevent", "multiprocessing", or "thread" (the default). 

+

165 This can also be a list of these strings. 

+

166 

+

167 If `check_preimported` is true, then when coverage is started, the 

+

168 already-imported files will be checked to see if they should be 

+

169 measured by coverage. Importing measured files before coverage is 

+

170 started can mean that code is missed. 

+

171 

+

172 `context` is a string to use as the :ref:`static context 

+

173 <static_contexts>` label for collected data. 

+

174 

+

175 .. versionadded:: 4.0 

+

176 The `concurrency` parameter. 

+

177 

+

178 .. versionadded:: 4.2 

+

179 The `concurrency` parameter can now be a list of strings. 

+

180 

+

181 .. versionadded:: 5.0 

+

182 The `check_preimported` and `context` parameters. 

+

183 

+

184 .. versionadded:: 5.3 

+

185 The `source_pkgs` parameter. 

+

186 

+

187 """ 

+

188 # data_file=None means no disk file at all. data_file missing means 

+

189 # use the value from the config file. 

+

190 self._no_disk = data_file is None 

+

191 if data_file is _DEFAULT_DATAFILE: 

+

192 data_file = None 

+

193 

+

194 # Build our configuration from a number of sources. 

+

195 self.config = read_coverage_config( 

+

196 config_file=config_file, 

+

197 data_file=data_file, cover_pylib=cover_pylib, timid=timid, 

+

198 branch=branch, parallel=bool_or_none(data_suffix), 

+

199 source=source, source_pkgs=source_pkgs, run_omit=omit, run_include=include, debug=debug, 

+

200 report_omit=omit, report_include=include, 

+

201 concurrency=concurrency, context=context, 

+

202 ) 

+

203 

+

204 # This is injectable by tests. 

+

205 self._debug_file = None 

+

206 

+

207 self._auto_load = self._auto_save = auto_data 

+

208 self._data_suffix_specified = data_suffix 

+

209 

+

210 # Is it ok for no data to be collected? 

+

211 self._warn_no_data = True 

+

212 self._warn_unimported_source = True 

+

213 self._warn_preimported_source = check_preimported 

+

214 self._no_warn_slugs = None 

+

215 

+

216 # A record of all the warnings that have been issued. 

+

217 self._warnings = [] 

+

218 

+

219 # Other instance attributes, set later. 

+

220 self._data = self._collector = None 

+

221 self._plugins = None 

+

222 self._inorout = None 

+

223 self._data_suffix = self._run_suffix = None 

+

224 self._exclude_re = None 

+

225 self._debug = None 

+

226 self._file_mapper = None 

+

227 

+

228 # State machine variables: 

+

229 # Have we initialized everything? 

+

230 self._inited = False 

+

231 self._inited_for_start = False 

+

232 # Have we started collecting and not stopped it? 

+

233 self._started = False 

+

234 # Should we write the debug output? 

+

235 self._should_write_debug = True 

+

236 

+

237 # If we have sub-process measurement happening automatically, then we 

+

238 # want any explicit creation of a Coverage object to mean, this process 

+

239 # is already coverage-aware, so don't auto-measure it. By now, the 

+

240 # auto-creation of a Coverage object has already happened. But we can 

+

241 # find it and tell it not to save its data. 

+

242 if not env.METACOV: 

+

243 _prevent_sub_process_measurement() 

+

244 

+

245 def _init(self): 

+

246 """Set all the initial state. 

+

247 

+

248 This is called by the public methods to initialize state. This lets us 

+

249 construct a :class:`Coverage` object, then tweak its state before this 

+

250 function is called. 

+

251 

+

252 """ 

+

253 if self._inited: 

+

254 return 

+

255 

+

256 self._inited = True 

+

257 

+

258 # Create and configure the debugging controller. COVERAGE_DEBUG_FILE 

+

259 # is an environment variable, the name of a file to append debug logs 

+

260 # to. 

+

261 self._debug = DebugControl(self.config.debug, self._debug_file) 

+

262 

+

263 if "multiprocessing" in (self.config.concurrency or ()): 

+

264 # Multi-processing uses parallel for the subprocesses, so also use 

+

265 # it for the main process. 

+

266 self.config.parallel = True 

+

267 

+

268 # _exclude_re is a dict that maps exclusion list names to compiled regexes. 

+

269 self._exclude_re = {} 

+

270 

+

271 set_relative_directory() 

+

272 self._file_mapper = relative_filename if self.config.relative_files else abs_file 

+

273 

+

274 # Load plugins 

+

275 self._plugins = Plugins.load_plugins(self.config.plugins, self.config, self._debug) 

+

276 

+

277 # Run configuring plugins. 

+

278 for plugin in self._plugins.configurers: 

+

279 # We need an object with set_option and get_option. Either self or 

+

280 # self.config will do. Choosing randomly stops people from doing 

+

281 # other things with those objects, against the public API. Yes, 

+

282 # this is a bit childish. :) 

+

283 plugin.configure([self, self.config][int(time.time()) % 2]) 

+

284 

+

285 def _post_init(self): 

+

286 """Stuff to do after everything is initialized.""" 

+

287 if self._should_write_debug: 

+

288 self._should_write_debug = False 

+

289 self._write_startup_debug() 

+

290 

+

291 # '[run] _crash' will raise an exception if the value is close by in 

+

292 # the call stack, for testing error handling. 

+

293 if self.config._crash and self.config._crash in short_stack(limit=4): 

+

294 raise Exception("Crashing because called by {}".format(self.config._crash)) 

+

295 

+

296 def _write_startup_debug(self): 

+

297 """Write out debug info at startup if needed.""" 

+

298 wrote_any = False 

+

299 with self._debug.without_callers(): 

+

300 if self._debug.should('config'): 

+

301 config_info = sorted(self.config.__dict__.items()) 

+

302 config_info = [(k, v) for k, v in config_info if not k.startswith('_')] 

+

303 write_formatted_info(self._debug, "config", config_info) 

+

304 wrote_any = True 

+

305 

+

306 if self._debug.should('sys'): 

+

307 write_formatted_info(self._debug, "sys", self.sys_info()) 

+

308 for plugin in self._plugins: 

+

309 header = "sys: " + plugin._coverage_plugin_name 

+

310 info = plugin.sys_info() 

+

311 write_formatted_info(self._debug, header, info) 

+

312 wrote_any = True 

+

313 

+

314 if wrote_any: 

+

315 write_formatted_info(self._debug, "end", ()) 

+

316 

+

317 def _should_trace(self, filename, frame): 

+

318 """Decide whether to trace execution in `filename`. 

+

319 

+

320 Calls `_should_trace_internal`, and returns the FileDisposition. 

+

321 

+

322 """ 

+

323 disp = self._inorout.should_trace(filename, frame) 

+

324 if self._debug.should('trace'): 

+

325 self._debug.write(disposition_debug_msg(disp)) 

+

326 return disp 

+

327 

+

328 def _check_include_omit_etc(self, filename, frame): 

+

329 """Check a file name against the include/omit/etc, rules, verbosely. 

+

330 

+

331 Returns a boolean: True if the file should be traced, False if not. 

+

332 

+

333 """ 

+

334 reason = self._inorout.check_include_omit_etc(filename, frame) 

+

335 if self._debug.should('trace'): 

+

336 if not reason: 

+

337 msg = "Including %r" % (filename,) 

+

338 else: 

+

339 msg = "Not including %r: %s" % (filename, reason) 

+

340 self._debug.write(msg) 

+

341 

+

342 return not reason 

+

343 

+

344 def _warn(self, msg, slug=None, once=False): 

+

345 """Use `msg` as a warning. 

+

346 

+

347 For warning suppression, use `slug` as the shorthand. 

+

348 

+

349 If `once` is true, only show this warning once (determined by the 

+

350 slug.) 

+

351 

+

352 """ 

+

353 if self._no_warn_slugs is None: 

+

354 self._no_warn_slugs = list(self.config.disable_warnings) 

+

355 

+

356 if slug in self._no_warn_slugs: 

+

357 # Don't issue the warning 

+

358 return 

+

359 

+

360 self._warnings.append(msg) 

+

361 if slug: 

+

362 msg = "%s (%s)" % (msg, slug) 

+

363 if self._debug.should('pid'): 363 ↛ 364line 363 didn't jump to line 364, because the condition on line 363 was never true

+

364 msg = "[%d] %s" % (os.getpid(), msg) 

+

365 sys.stderr.write("Coverage.py warning: %s\n" % msg) 

+

366 

+

367 if once: 

+

368 self._no_warn_slugs.append(slug) 

+

369 

+

370 def get_option(self, option_name): 

+

371 """Get an option from the configuration. 

+

372 

+

373 `option_name` is a colon-separated string indicating the section and 

+

374 option name. For example, the ``branch`` option in the ``[run]`` 

+

375 section of the config file would be indicated with `"run:branch"`. 

+

376 

+

377 Returns the value of the option. The type depends on the option 

+

378 selected. 

+

379 

+

380 As a special case, an `option_name` of ``"paths"`` will return an 

+

381 OrderedDict with the entire ``[paths]`` section value. 

+

382 

+

383 .. versionadded:: 4.0 

+

384 

+

385 """ 

+

386 return self.config.get_option(option_name) 

+

387 

+

388 def set_option(self, option_name, value): 

+

389 """Set an option in the configuration. 

+

390 

+

391 `option_name` is a colon-separated string indicating the section and 

+

392 option name. For example, the ``branch`` option in the ``[run]`` 

+

393 section of the config file would be indicated with ``"run:branch"``. 

+

394 

+

395 `value` is the new value for the option. This should be an 

+

396 appropriate Python value. For example, use True for booleans, not the 

+

397 string ``"True"``. 

+

398 

+

399 As an example, calling:: 

+

400 

+

401 cov.set_option("run:branch", True) 

+

402 

+

403 has the same effect as this configuration file:: 

+

404 

+

405 [run] 

+

406 branch = True 

+

407 

+

408 As a special case, an `option_name` of ``"paths"`` will replace the 

+

409 entire ``[paths]`` section. The value should be an OrderedDict. 

+

410 

+

411 .. versionadded:: 4.0 

+

412 

+

413 """ 

+

414 self.config.set_option(option_name, value) 

+

415 

+

416 def load(self): 

+

417 """Load previously-collected coverage data from the data file.""" 

+

418 self._init() 

+

419 if self._collector: 419 ↛ 420line 419 didn't jump to line 420, because the condition on line 419 was never true

+

420 self._collector.reset() 

+

421 should_skip = self.config.parallel and not os.path.exists(self.config.data_file) 

+

422 if not should_skip: 

+

423 self._init_data(suffix=None) 

+

424 self._post_init() 

+

425 if not should_skip: 

+

426 self._data.read() 

+

427 

+

428 def _init_for_start(self): 

+

429 """Initialization for start()""" 

+

430 # Construct the collector. 

+

431 concurrency = self.config.concurrency or () 

+

432 if "multiprocessing" in concurrency: 

+

433 if not patch_multiprocessing: 

+

434 raise CoverageException( # pragma: only jython 

+

435 "multiprocessing is not supported on this Python" 

+

436 ) 

+

437 patch_multiprocessing(rcfile=self.config.config_file) 

+

438 

+

439 dycon = self.config.dynamic_context 

+

440 if not dycon or dycon == "none": 

+

441 context_switchers = [] 

+

442 elif dycon == "test_function": 442 ↛ 445line 442 didn't jump to line 445, because the condition on line 442 was never false

+

443 context_switchers = [should_start_context_test_function] 

+

444 else: 

+

445 raise CoverageException( 

+

446 "Don't understand dynamic_context setting: {!r}".format(dycon) 

+

447 ) 

+

448 

+

449 context_switchers.extend( 

+

450 plugin.dynamic_context for plugin in self._plugins.context_switchers 

+

451 ) 

+

452 

+

453 should_start_context = combine_context_switchers(context_switchers) 

+

454 

+

455 self._collector = Collector( 

+

456 should_trace=self._should_trace, 

+

457 check_include=self._check_include_omit_etc, 

+

458 should_start_context=should_start_context, 

+

459 file_mapper=self._file_mapper, 

+

460 timid=self.config.timid, 

+

461 branch=self.config.branch, 

+

462 warn=self._warn, 

+

463 concurrency=concurrency, 

+

464 ) 

+

465 

+

466 suffix = self._data_suffix_specified 

+

467 if suffix or self.config.parallel: 

+

468 if not isinstance(suffix, string_class): 

+

469 # if data_suffix=True, use .machinename.pid.random 

+

470 suffix = True 

+

471 else: 

+

472 suffix = None 

+

473 

+

474 self._init_data(suffix) 

+

475 

+

476 self._collector.use_data(self._data, self.config.context) 

+

477 

+

478 # Early warning if we aren't going to be able to support plugins. 

+

479 if self._plugins.file_tracers and not self._collector.supports_plugins: 

+

480 self._warn( 

+

481 "Plugin file tracers (%s) aren't supported with %s" % ( 

+

482 ", ".join( 

+

483 plugin._coverage_plugin_name 

+

484 for plugin in self._plugins.file_tracers 

+

485 ), 

+

486 self._collector.tracer_name(), 

+

487 ) 

+

488 ) 

+

489 for plugin in self._plugins.file_tracers: 

+

490 plugin._coverage_enabled = False 

+

491 

+

492 # Create the file classifying substructure. 

+

493 self._inorout = InOrOut( 

+

494 warn=self._warn, 

+

495 debug=(self._debug if self._debug.should('trace') else None), 

+

496 ) 

+

497 self._inorout.configure(self.config) 

+

498 self._inorout.plugins = self._plugins 

+

499 self._inorout.disp_class = self._collector.file_disposition_class 

+

500 

+

501 # It's useful to write debug info after initing for start. 

+

502 self._should_write_debug = True 

+

503 

+

504 atexit.register(self._atexit) 

+

505 

+

506 def _init_data(self, suffix): 

+

507 """Create a data file if we don't have one yet.""" 

+

508 if self._data is None: 

+

509 # Create the data file. We do this at construction time so that the 

+

510 # data file will be written into the directory where the process 

+

511 # started rather than wherever the process eventually chdir'd to. 

+

512 ensure_dir_for_file(self.config.data_file) 

+

513 self._data = CoverageData( 

+

514 basename=self.config.data_file, 

+

515 suffix=suffix, 

+

516 warn=self._warn, 

+

517 debug=self._debug, 

+

518 no_disk=self._no_disk, 

+

519 ) 

+

520 

+

521 def start(self): 

+

522 """Start measuring code coverage. 

+

523 

+

524 Coverage measurement only occurs in functions called after 

+

525 :meth:`start` is invoked. Statements in the same scope as 

+

526 :meth:`start` won't be measured. 

+

527 

+

528 Once you invoke :meth:`start`, you must also call :meth:`stop` 

+

529 eventually, or your process might not shut down cleanly. 

+

530 

+

531 """ 

+

532 self._init() 

+

533 if not self._inited_for_start: 

+

534 self._inited_for_start = True 

+

535 self._init_for_start() 

+

536 self._post_init() 

+

537 

+

538 # Issue warnings for possible problems. 

+

539 self._inorout.warn_conflicting_settings() 

+

540 

+

541 # See if we think some code that would eventually be measured has 

+

542 # already been imported. 

+

543 if self._warn_preimported_source: 

+

544 self._inorout.warn_already_imported_files() 

+

545 

+

546 if self._auto_load: 546 ↛ 547line 546 didn't jump to line 547, because the condition on line 546 was never true

+

547 self.load() 

+

548 

+

549 self._collector.start() 

+

550 self._started = True 

+

551 self._instances.append(self) 

+

552 

+

553 def stop(self): 

+

554 """Stop measuring code coverage.""" 

+

555 if self._instances: 555 ↛ 558line 555 didn't jump to line 558, because the condition on line 555 was never false

+

556 if self._instances[-1] is self: 

+

557 self._instances.pop() 

+

558 if self._started: 

+

559 self._collector.stop() 

+

560 self._started = False 

+

561 

+

562 def _atexit(self): 

+

563 """Clean up on process shutdown.""" 

+

564 if self._debug.should("process"): 564 ↛ 565line 564 didn't jump to line 565, because the condition on line 564 was never true

+

565 self._debug.write("atexit: pid: {}, instance: {!r}".format(os.getpid(), self)) 

+

566 if self._started: 

+

567 self.stop() 

+

568 if self._auto_save: 568 ↛ 569line 568 didn't jump to line 569, because the condition on line 568 was never true

+

569 self.save() 

+

570 

+

571 def erase(self): 

+

572 """Erase previously collected coverage data. 

+

573 

+

574 This removes the in-memory data collected in this session as well as 

+

575 discarding the data file. 

+

576 

+

577 """ 

+

578 self._init() 

+

579 self._post_init() 

+

580 if self._collector: 

+

581 self._collector.reset() 

+

582 self._init_data(suffix=None) 

+

583 self._data.erase(parallel=self.config.parallel) 

+

584 self._data = None 

+

585 self._inited_for_start = False 

+

586 

+

587 def switch_context(self, new_context): 

+

588 """Switch to a new dynamic context. 

+

589 

+

590 `new_context` is a string to use as the :ref:`dynamic context 

+

591 <dynamic_contexts>` label for collected data. If a :ref:`static 

+

592 context <static_contexts>` is in use, the static and dynamic context 

+

593 labels will be joined together with a pipe character. 

+

594 

+

595 Coverage collection must be started already. 

+

596 

+

597 .. versionadded:: 5.0 

+

598 

+

599 """ 

+

600 if not self._started: # pragma: part started 

+

601 raise CoverageException( 

+

602 "Cannot switch context, coverage is not started" 

+

603 ) 

+

604 

+

605 if self._collector.should_start_context: 

+

606 self._warn("Conflicting dynamic contexts", slug="dynamic-conflict", once=True) 

+

607 

+

608 self._collector.switch_context(new_context) 

+

609 

+

610 def clear_exclude(self, which='exclude'): 

+

611 """Clear the exclude list.""" 

+

612 self._init() 

+

613 setattr(self.config, which + "_list", []) 

+

614 self._exclude_regex_stale() 

+

615 

+

616 def exclude(self, regex, which='exclude'): 

+

617 """Exclude source lines from execution consideration. 

+

618 

+

619 A number of lists of regular expressions are maintained. Each list 

+

620 selects lines that are treated differently during reporting. 

+

621 

+

622 `which` determines which list is modified. The "exclude" list selects 

+

623 lines that are not considered executable at all. The "partial" list 

+

624 indicates lines with branches that are not taken. 

+

625 

+

626 `regex` is a regular expression. The regex is added to the specified 

+

627 list. If any of the regexes in the list is found in a line, the line 

+

628 is marked for special treatment during reporting. 

+

629 

+

630 """ 

+

631 self._init() 

+

632 excl_list = getattr(self.config, which + "_list") 

+

633 excl_list.append(regex) 

+

634 self._exclude_regex_stale() 

+

635 

+

636 def _exclude_regex_stale(self): 

+

637 """Drop all the compiled exclusion regexes, a list was modified.""" 

+

638 self._exclude_re.clear() 

+

639 

+

640 def _exclude_regex(self, which): 

+

641 """Return a compiled regex for the given exclusion list.""" 

+

642 if which not in self._exclude_re: 

+

643 excl_list = getattr(self.config, which + "_list") 

+

644 self._exclude_re[which] = join_regex(excl_list) 

+

645 return self._exclude_re[which] 

+

646 

+

647 def get_exclude_list(self, which='exclude'): 

+

648 """Return a list of excluded regex patterns. 

+

649 

+

650 `which` indicates which list is desired. See :meth:`exclude` for the 

+

651 lists that are available, and their meaning. 

+

652 

+

653 """ 

+

654 self._init() 

+

655 return getattr(self.config, which + "_list") 

+

656 

+

657 def save(self): 

+

658 """Save the collected coverage data to the data file.""" 

+

659 data = self.get_data() 

+

660 data.write() 

+

661 

+

662 def combine(self, data_paths=None, strict=False, keep=False): 

+

663 """Combine together a number of similarly-named coverage data files. 

+

664 

+

665 All coverage data files whose name starts with `data_file` (from the 

+

666 coverage() constructor) will be read, and combined together into the 

+

667 current measurements. 

+

668 

+

669 `data_paths` is a list of files or directories from which data should 

+

670 be combined. If no list is passed, then the data files from the 

+

671 directory indicated by the current data file (probably the current 

+

672 directory) will be combined. 

+

673 

+

674 If `strict` is true, then it is an error to attempt to combine when 

+

675 there are no data files to combine. 

+

676 

+

677 If `keep` is true, then original input data files won't be deleted. 

+

678 

+

679 .. versionadded:: 4.0 

+

680 The `data_paths` parameter. 

+

681 

+

682 .. versionadded:: 4.3 

+

683 The `strict` parameter. 

+

684 

+

685 .. versionadded: 5.5 

+

686 The `keep` parameter. 

+

687 """ 

+

688 self._init() 

+

689 self._init_data(suffix=None) 

+

690 self._post_init() 

+

691 self.get_data() 

+

692 

+

693 aliases = None 

+

694 if self.config.paths: 

+

695 aliases = PathAliases() 

+

696 for paths in self.config.paths.values(): 

+

697 result = paths[0] 

+

698 for pattern in paths[1:]: 

+

699 aliases.add(pattern, result) 

+

700 

+

701 combine_parallel_data( 

+

702 self._data, 

+

703 aliases=aliases, 

+

704 data_paths=data_paths, 

+

705 strict=strict, 

+

706 keep=keep, 

+

707 ) 

+

708 

+

709 def get_data(self): 

+

710 """Get the collected data. 

+

711 

+

712 Also warn about various problems collecting data. 

+

713 

+

714 Returns a :class:`coverage.CoverageData`, the collected coverage data. 

+

715 

+

716 .. versionadded:: 4.0 

+

717 

+

718 """ 

+

719 self._init() 

+

720 self._init_data(suffix=None) 

+

721 self._post_init() 

+

722 

+

723 for plugin in self._plugins: 

+

724 if not plugin._coverage_enabled: 

+

725 self._collector.plugin_was_disabled(plugin) 

+

726 

+

727 if self._collector and self._collector.flush_data(): 

+

728 self._post_save_work() 

+

729 

+

730 return self._data 

+

731 

+

732 def _post_save_work(self): 

+

733 """After saving data, look for warnings, post-work, etc. 

+

734 

+

735 Warn about things that should have happened but didn't. 

+

736 Look for unexecuted files. 

+

737 

+

738 """ 

+

739 # If there are still entries in the source_pkgs_unmatched list, 

+

740 # then we never encountered those packages. 

+

741 if self._warn_unimported_source: 741 ↛ 745line 741 didn't jump to line 745, because the condition on line 741 was never false

+

742 self._inorout.warn_unimported_source() 

+

743 

+

744 # Find out if we got any data. 

+

745 if not self._data and self._warn_no_data: 

+

746 self._warn("No data was collected.", slug="no-data-collected") 

+

747 

+

748 # Touch all the files that could have executed, so that we can 

+

749 # mark completely unexecuted files as 0% covered. 

+

750 if self._data is not None: 750 ↛ 758line 750 didn't jump to line 758, because the condition on line 750 was never false

+

751 file_paths = collections.defaultdict(list) 

+

752 for file_path, plugin_name in self._inorout.find_possibly_unexecuted_files(): 

+

753 file_path = self._file_mapper(file_path) 

+

754 file_paths[plugin_name].append(file_path) 

+

755 for plugin_name, paths in file_paths.items(): 

+

756 self._data.touch_files(paths, plugin_name) 

+

757 

+

758 if self.config.note: 

+

759 self._warn("The '[run] note' setting is no longer supported.") 

+

760 

+

761 # Backward compatibility with version 1. 

+

762 def analysis(self, morf): 

+

763 """Like `analysis2` but doesn't return excluded line numbers.""" 

+

764 f, s, _, m, mf = self.analysis2(morf) 

+

765 return f, s, m, mf 

+

766 

+

767 def analysis2(self, morf): 

+

768 """Analyze a module. 

+

769 

+

770 `morf` is a module or a file name. It will be analyzed to determine 

+

771 its coverage statistics. The return value is a 5-tuple: 

+

772 

+

773 * The file name for the module. 

+

774 * A list of line numbers of executable statements. 

+

775 * A list of line numbers of excluded statements. 

+

776 * A list of line numbers of statements not run (missing from 

+

777 execution). 

+

778 * A readable formatted string of the missing line numbers. 

+

779 

+

780 The analysis uses the source file itself and the current measured 

+

781 coverage data. 

+

782 

+

783 """ 

+

784 analysis = self._analyze(morf) 

+

785 return ( 

+

786 analysis.filename, 

+

787 sorted(analysis.statements), 

+

788 sorted(analysis.excluded), 

+

789 sorted(analysis.missing), 

+

790 analysis.missing_formatted(), 

+

791 ) 

+

792 

+

793 def _analyze(self, it): 

+

794 """Analyze a single morf or code unit. 

+

795 

+

796 Returns an `Analysis` object. 

+

797 

+

798 """ 

+

799 # All reporting comes through here, so do reporting initialization. 

+

800 self._init() 

+

801 Numbers.set_precision(self.config.precision) 

+

802 self._post_init() 

+

803 

+

804 data = self.get_data() 

+

805 if not isinstance(it, FileReporter): 

+

806 it = self._get_file_reporter(it) 

+

807 

+

808 return Analysis(data, it, self._file_mapper) 

+

809 

+

810 def _get_file_reporter(self, morf): 

+

811 """Get a FileReporter for a module or file name.""" 

+

812 plugin = None 

+

813 file_reporter = "python" 

+

814 

+

815 if isinstance(morf, string_class): 

+

816 mapped_morf = self._file_mapper(morf) 

+

817 plugin_name = self._data.file_tracer(mapped_morf) 

+

818 if plugin_name: 

+

819 plugin = self._plugins.get(plugin_name) 

+

820 

+

821 if plugin: 821 ↛ 830line 821 didn't jump to line 830, because the condition on line 821 was never false

+

822 file_reporter = plugin.file_reporter(mapped_morf) 

+

823 if file_reporter is None: 823 ↛ 824line 823 didn't jump to line 824, because the condition on line 823 was never true

+

824 raise CoverageException( 

+

825 "Plugin %r did not provide a file reporter for %r." % ( 

+

826 plugin._coverage_plugin_name, morf 

+

827 ) 

+

828 ) 

+

829 

+

830 if file_reporter == "python": 

+

831 file_reporter = PythonFileReporter(morf, self) 

+

832 

+

833 return file_reporter 

+

834 

+

835 def _get_file_reporters(self, morfs=None): 

+

836 """Get a list of FileReporters for a list of modules or file names. 

+

837 

+

838 For each module or file name in `morfs`, find a FileReporter. Return 

+

839 the list of FileReporters. 

+

840 

+

841 If `morfs` is a single module or file name, this returns a list of one 

+

842 FileReporter. If `morfs` is empty or None, then the list of all files 

+

843 measured is used to find the FileReporters. 

+

844 

+

845 """ 

+

846 if not morfs: 

+

847 morfs = self._data.measured_files() 

+

848 

+

849 # Be sure we have a collection. 

+

850 if not isinstance(morfs, (list, tuple, set)): 

+

851 morfs = [morfs] 

+

852 

+

853 file_reporters = [self._get_file_reporter(morf) for morf in morfs] 

+

854 return file_reporters 

+

855 

+

856 def report( 

+

857 self, morfs=None, show_missing=None, ignore_errors=None, 

+

858 file=None, omit=None, include=None, skip_covered=None, 

+

859 contexts=None, skip_empty=None, precision=None, sort=None 

+

860 ): 

+

861 """Write a textual summary report to `file`. 

+

862 

+

863 Each module in `morfs` is listed, with counts of statements, executed 

+

864 statements, missing statements, and a list of lines missed. 

+

865 

+

866 If `show_missing` is true, then details of which lines or branches are 

+

867 missing will be included in the report. If `ignore_errors` is true, 

+

868 then a failure while reporting a single file will not stop the entire 

+

869 report. 

+

870 

+

871 `file` is a file-like object, suitable for writing. 

+

872 

+

873 `include` is a list of file name patterns. Files that match will be 

+

874 included in the report. Files matching `omit` will not be included in 

+

875 the report. 

+

876 

+

877 If `skip_covered` is true, don't report on files with 100% coverage. 

+

878 

+

879 If `skip_empty` is true, don't report on empty files (those that have 

+

880 no statements). 

+

881 

+

882 `contexts` is a list of regular expressions. Only data from 

+

883 :ref:`dynamic contexts <dynamic_contexts>` that match one of those 

+

884 expressions (using :func:`re.search <python:re.search>`) will be 

+

885 included in the report. 

+

886 

+

887 `precision` is the number of digits to display after the decimal 

+

888 point for percentages. 

+

889 

+

890 All of the arguments default to the settings read from the 

+

891 :ref:`configuration file <config>`. 

+

892 

+

893 Returns a float, the total percentage covered. 

+

894 

+

895 .. versionadded:: 4.0 

+

896 The `skip_covered` parameter. 

+

897 

+

898 .. versionadded:: 5.0 

+

899 The `contexts` and `skip_empty` parameters. 

+

900 

+

901 .. versionadded:: 5.2 

+

902 The `precision` parameter. 

+

903 

+

904 """ 

+

905 with override_config( 

+

906 self, 

+

907 ignore_errors=ignore_errors, report_omit=omit, report_include=include, 

+

908 show_missing=show_missing, skip_covered=skip_covered, 

+

909 report_contexts=contexts, skip_empty=skip_empty, precision=precision, 

+

910 sort=sort 

+

911 ): 

+

912 reporter = SummaryReporter(self) 

+

913 return reporter.report(morfs, outfile=file) 

+

914 

+

915 def annotate( 

+

916 self, morfs=None, directory=None, ignore_errors=None, 

+

917 omit=None, include=None, contexts=None, 

+

918 ): 

+

919 """Annotate a list of modules. 

+

920 

+

921 Each module in `morfs` is annotated. The source is written to a new 

+

922 file, named with a ",cover" suffix, with each line prefixed with a 

+

923 marker to indicate the coverage of the line. Covered lines have ">", 

+

924 excluded lines have "-", and missing lines have "!". 

+

925 

+

926 See :meth:`report` for other arguments. 

+

927 

+

928 """ 

+

929 with override_config(self, 

+

930 ignore_errors=ignore_errors, report_omit=omit, 

+

931 report_include=include, report_contexts=contexts, 

+

932 ): 

+

933 reporter = AnnotateReporter(self) 

+

934 reporter.report(morfs, directory=directory) 

+

935 

+

936 def html_report( 

+

937 self, morfs=None, directory=None, ignore_errors=None, 

+

938 omit=None, include=None, extra_css=None, title=None, 

+

939 skip_covered=None, show_contexts=None, contexts=None, 

+

940 skip_empty=None, precision=None, 

+

941 ): 

+

942 """Generate an HTML report. 

+

943 

+

944 The HTML is written to `directory`. The file "index.html" is the 

+

945 overview starting point, with links to more detailed pages for 

+

946 individual modules. 

+

947 

+

948 `extra_css` is a path to a file of other CSS to apply on the page. 

+

949 It will be copied into the HTML directory. 

+

950 

+

951 `title` is a text string (not HTML) to use as the title of the HTML 

+

952 report. 

+

953 

+

954 See :meth:`report` for other arguments. 

+

955 

+

956 Returns a float, the total percentage covered. 

+

957 

+

958 .. note:: 

+

959 The HTML report files are generated incrementally based on the 

+

960 source files and coverage results. If you modify the report files, 

+

961 the changes will not be considered. You should be careful about 

+

962 changing the files in the report folder. 

+

963 

+

964 """ 

+

965 with override_config(self, 

+

966 ignore_errors=ignore_errors, report_omit=omit, report_include=include, 

+

967 html_dir=directory, extra_css=extra_css, html_title=title, 

+

968 html_skip_covered=skip_covered, show_contexts=show_contexts, report_contexts=contexts, 

+

969 html_skip_empty=skip_empty, precision=precision, 

+

970 ): 

+

971 reporter = HtmlReporter(self) 

+

972 return reporter.report(morfs) 

+

973 

+

974 def xml_report( 

+

975 self, morfs=None, outfile=None, ignore_errors=None, 

+

976 omit=None, include=None, contexts=None, skip_empty=None, 

+

977 ): 

+

978 """Generate an XML report of coverage results. 

+

979 

+

980 The report is compatible with Cobertura reports. 

+

981 

+

982 Each module in `morfs` is included in the report. `outfile` is the 

+

983 path to write the file to, "-" will write to stdout. 

+

984 

+

985 See :meth:`report` for other arguments. 

+

986 

+

987 Returns a float, the total percentage covered. 

+

988 

+

989 """ 

+

990 with override_config(self, 

+

991 ignore_errors=ignore_errors, report_omit=omit, report_include=include, 

+

992 xml_output=outfile, report_contexts=contexts, skip_empty=skip_empty, 

+

993 ): 

+

994 return render_report(self.config.xml_output, XmlReporter(self), morfs) 

+

995 

+

996 def json_report( 

+

997 self, morfs=None, outfile=None, ignore_errors=None, 

+

998 omit=None, include=None, contexts=None, pretty_print=None, 

+

999 show_contexts=None 

+

1000 ): 

+

1001 """Generate a JSON report of coverage results. 

+

1002 

+

1003 Each module in `morfs` is included in the report. `outfile` is the 

+

1004 path to write the file to, "-" will write to stdout. 

+

1005 

+

1006 See :meth:`report` for other arguments. 

+

1007 

+

1008 Returns a float, the total percentage covered. 

+

1009 

+

1010 .. versionadded:: 5.0 

+

1011 

+

1012 """ 

+

1013 with override_config(self, 

+

1014 ignore_errors=ignore_errors, report_omit=omit, report_include=include, 

+

1015 json_output=outfile, report_contexts=contexts, json_pretty_print=pretty_print, 

+

1016 json_show_contexts=show_contexts 

+

1017 ): 

+

1018 return render_report(self.config.json_output, JsonReporter(self), morfs) 

+

1019 

+

1020 def sys_info(self): 

+

1021 """Return a list of (key, value) pairs showing internal information.""" 

+

1022 

+

1023 import coverage as covmod 

+

1024 

+

1025 self._init() 

+

1026 self._post_init() 

+

1027 

+

1028 def plugin_info(plugins): 

+

1029 """Make an entry for the sys_info from a list of plug-ins.""" 

+

1030 entries = [] 

+

1031 for plugin in plugins: 

+

1032 entry = plugin._coverage_plugin_name 

+

1033 if not plugin._coverage_enabled: 

+

1034 entry += " (disabled)" 

+

1035 entries.append(entry) 

+

1036 return entries 

+

1037 

+

1038 info = [ 

+

1039 ('version', covmod.__version__), 

+

1040 ('coverage', covmod.__file__), 

+

1041 ('tracer', self._collector.tracer_name() if self._collector else "-none-"), 

+

1042 ('CTracer', 'available' if CTracer else "unavailable"), 

+

1043 ('plugins.file_tracers', plugin_info(self._plugins.file_tracers)), 

+

1044 ('plugins.configurers', plugin_info(self._plugins.configurers)), 

+

1045 ('plugins.context_switchers', plugin_info(self._plugins.context_switchers)), 

+

1046 ('configs_attempted', self.config.attempted_config_files), 

+

1047 ('configs_read', self.config.config_files_read), 

+

1048 ('config_file', self.config.config_file), 

+

1049 ('config_contents', 

+

1050 repr(self.config._config_contents) 

+

1051 if self.config._config_contents 

+

1052 else '-none-' 

+

1053 ), 

+

1054 ('data_file', self._data.data_filename() if self._data is not None else "-none-"), 

+

1055 ('python', sys.version.replace('\n', '')), 

+

1056 ('platform', platform.platform()), 

+

1057 ('implementation', platform.python_implementation()), 

+

1058 ('executable', sys.executable), 

+

1059 ('def_encoding', sys.getdefaultencoding()), 

+

1060 ('fs_encoding', sys.getfilesystemencoding()), 

+

1061 ('pid', os.getpid()), 

+

1062 ('cwd', os.getcwd()), 

+

1063 ('path', sys.path), 

+

1064 ('environment', sorted( 

+

1065 ("%s = %s" % (k, v)) 

+

1066 for k, v in iitems(os.environ) 

+

1067 if any(slug in k for slug in ("COV", "PY")) 

+

1068 )), 

+

1069 ('command_line', " ".join(getattr(sys, 'argv', ['-none-']))), 

+

1070 ] 

+

1071 

+

1072 if self._inorout: 

+

1073 info.extend(self._inorout.sys_info()) 

+

1074 

+

1075 info.extend(CoverageData.sys_info()) 

+

1076 

+

1077 return info 

+

1078 

+

1079 

+

1080# Mega debugging... 

+

1081# $set_env.py: COVERAGE_DEBUG_CALLS - Lots and lots of output about calls to Coverage. 

+

1082if int(os.environ.get("COVERAGE_DEBUG_CALLS", 0)): # pragma: debugging 

+

1083 from coverage.debug import decorate_methods, show_calls 

+

1084 

+

1085 Coverage = decorate_methods(show_calls(show_args=True), butnot=['get_data'])(Coverage) 

+

1086 

+

1087 

+

1088def process_startup(): 

+

1089 """Call this at Python start-up to perhaps measure coverage. 

+

1090 

+

1091 If the environment variable COVERAGE_PROCESS_START is defined, coverage 

+

1092 measurement is started. The value of the variable is the config file 

+

1093 to use. 

+

1094 

+

1095 There are two ways to configure your Python installation to invoke this 

+

1096 function when Python starts: 

+

1097 

+

1098 #. Create or append to sitecustomize.py to add these lines:: 

+

1099 

+

1100 import coverage 

+

1101 coverage.process_startup() 

+

1102 

+

1103 #. Create a .pth file in your Python installation containing:: 

+

1104 

+

1105 import coverage; coverage.process_startup() 

+

1106 

+

1107 Returns the :class:`Coverage` instance that was started, or None if it was 

+

1108 not started by this call. 

+

1109 

+

1110 """ 

+

1111 cps = os.environ.get("COVERAGE_PROCESS_START") 

+

1112 if not cps: 1112 ↛ 1114line 1112 didn't jump to line 1114, because the condition on line 1112 was never true

+

1113 # No request for coverage, nothing to do. 

+

1114 return None 

+

1115 

+

1116 # This function can be called more than once in a process. This happens 

+

1117 # because some virtualenv configurations make the same directory visible 

+

1118 # twice in sys.path. This means that the .pth file will be found twice, 

+

1119 # and executed twice, executing this function twice. We set a global 

+

1120 # flag (an attribute on this function) to indicate that coverage.py has 

+

1121 # already been started, so we can avoid doing it twice. 

+

1122 # 

+

1123 # https://github.com/nedbat/coveragepy/issues/340 has more details. 

+

1124 

+

1125 if hasattr(process_startup, "coverage"): 1125 ↛ 1130line 1125 didn't jump to line 1130, because the condition on line 1125 was never false

+

1126 # We've annotated this function before, so we must have already 

+

1127 # started coverage.py in this process. Nothing to do. 

+

1128 return None 

+

1129 

+

1130 cov = Coverage(config_file=cps) 

+

1131 process_startup.coverage = cov 

+

1132 cov._warn_no_data = False 

+

1133 cov._warn_unimported_source = False 

+

1134 cov._warn_preimported_source = False 

+

1135 cov._auto_save = True 

+

1136 cov.start() 

+

1137 

+

1138 return cov 

+

1139 

+

1140 

+

1141def _prevent_sub_process_measurement(): 

+

1142 """Stop any subprocess auto-measurement from writing data.""" 

+

1143 auto_created_coverage = getattr(process_startup, "coverage", None) 

+

1144 if auto_created_coverage is not None: 

+

1145 auto_created_coverage._auto_save = False 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_data_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_data_py.html new file mode 100644 index 000000000..dbb090bc2 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_data_py.html @@ -0,0 +1,191 @@ + + + + + + Coverage for coverage/data.py: 91.860% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Coverage data for coverage.py. 

+

5 

+

6This file had the 4.x JSON data support, which is now gone. This file still 

+

7has storage-agnostic helpers, and is kept to avoid changing too many imports. 

+

8CoverageData is now defined in sqldata.py, and imported here to keep the 

+

9imports working. 

+

10 

+

11""" 

+

12 

+

13import glob 

+

14import os.path 

+

15 

+

16from coverage.misc import CoverageException, file_be_gone 

+

17from coverage.sqldata import CoverageData 

+

18 

+

19 

+

20def line_counts(data, fullpath=False): 

+

21 """Return a dict summarizing the line coverage data. 

+

22 

+

23 Keys are based on the file names, and values are the number of executed 

+

24 lines. If `fullpath` is true, then the keys are the full pathnames of 

+

25 the files, otherwise they are the basenames of the files. 

+

26 

+

27 Returns a dict mapping file names to counts of lines. 

+

28 

+

29 """ 

+

30 summ = {} 

+

31 if fullpath: 

+

32 filename_fn = lambda f: f 

+

33 else: 

+

34 filename_fn = os.path.basename 

+

35 for filename in data.measured_files(): 

+

36 summ[filename_fn(filename)] = len(data.lines(filename)) 

+

37 return summ 

+

38 

+

39 

+

40def add_data_to_hash(data, filename, hasher): 

+

41 """Contribute `filename`'s data to the `hasher`. 

+

42 

+

43 `hasher` is a `coverage.misc.Hasher` instance to be updated with 

+

44 the file's data. It should only get the results data, not the run 

+

45 data. 

+

46 

+

47 """ 

+

48 if data.has_arcs(): 

+

49 hasher.update(sorted(data.arcs(filename) or [])) 

+

50 else: 

+

51 hasher.update(sorted(data.lines(filename) or [])) 

+

52 hasher.update(data.file_tracer(filename)) 

+

53 

+

54 

+

55def combine_parallel_data(data, aliases=None, data_paths=None, strict=False, keep=False): 

+

56 """Combine a number of data files together. 

+

57 

+

58 Treat `data.filename` as a file prefix, and combine the data from all 

+

59 of the data files starting with that prefix plus a dot. 

+

60 

+

61 If `aliases` is provided, it's a `PathAliases` object that is used to 

+

62 re-map paths to match the local machine's. 

+

63 

+

64 If `data_paths` is provided, it is a list of directories or files to 

+

65 combine. Directories are searched for files that start with 

+

66 `data.filename` plus dot as a prefix, and those files are combined. 

+

67 

+

68 If `data_paths` is not provided, then the directory portion of 

+

69 `data.filename` is used as the directory to search for data files. 

+

70 

+

71 Unless `keep` is True every data file found and combined is then deleted from disk. If a file 

+

72 cannot be read, a warning will be issued, and the file will not be 

+

73 deleted. 

+

74 

+

75 If `strict` is true, and no files are found to combine, an error is 

+

76 raised. 

+

77 

+

78 """ 

+

79 # Because of the os.path.abspath in the constructor, data_dir will 

+

80 # never be an empty string. 

+

81 data_dir, local = os.path.split(data.base_filename()) 

+

82 localdot = local + '.*' 

+

83 

+

84 data_paths = data_paths or [data_dir] 

+

85 files_to_combine = [] 

+

86 for p in data_paths: 

+

87 if os.path.isfile(p): 

+

88 files_to_combine.append(os.path.abspath(p)) 

+

89 elif os.path.isdir(p): 

+

90 pattern = os.path.join(os.path.abspath(p), localdot) 

+

91 files_to_combine.extend(glob.glob(pattern)) 

+

92 else: 

+

93 raise CoverageException("Couldn't combine from non-existent path '%s'" % (p,)) 

+

94 

+

95 if strict and not files_to_combine: 

+

96 raise CoverageException("No data to combine") 

+

97 

+

98 files_combined = 0 

+

99 for f in files_to_combine: 

+

100 if f == data.data_filename(): 

+

101 # Sometimes we are combining into a file which is one of the 

+

102 # parallel files. Skip that file. 

+

103 if data._debug.should('dataio'): 103 ↛ 104line 103 didn't jump to line 104, because the condition on line 103 was never true

+

104 data._debug.write("Skipping combining ourself: %r" % (f,)) 

+

105 continue 

+

106 if data._debug.should('dataio'): 106 ↛ 107line 106 didn't jump to line 107, because the condition on line 106 was never true

+

107 data._debug.write("Combining data file %r" % (f,)) 

+

108 try: 

+

109 new_data = CoverageData(f, debug=data._debug) 

+

110 new_data.read() 

+

111 except CoverageException as exc: 

+

112 if data._warn: 112 ↛ 99line 112 didn't jump to line 99, because the condition on line 112 was never false

+

113 # The CoverageException has the file name in it, so just 

+

114 # use the message as the warning. 

+

115 data._warn(str(exc)) 

+

116 else: 

+

117 data.update(new_data, aliases=aliases) 

+

118 files_combined += 1 

+

119 if not keep: 

+

120 if data._debug.should('dataio'): 120 ↛ 121line 120 didn't jump to line 121, because the condition on line 120 was never true

+

121 data._debug.write("Deleting combined data file %r" % (f,)) 

+

122 file_be_gone(f) 

+

123 

+

124 if strict and not files_combined: 

+

125 raise CoverageException("No usable data files") 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_debug_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_debug_py.html new file mode 100644 index 000000000..615089eda --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_debug_py.html @@ -0,0 +1,472 @@ + + + + + + Coverage for coverage/debug.py: 94.406% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Control of and utilities for debugging.""" 

+

5 

+

6import contextlib 

+

7import functools 

+

8import inspect 

+

9import itertools 

+

10import os 

+

11import pprint 

+

12import sys 

+

13try: 

+

14 import _thread 

+

15except ImportError: 

+

16 import thread as _thread 

+

17 

+

18from coverage.backward import reprlib, StringIO 

+

19from coverage.misc import isolate_module 

+

20 

+

21os = isolate_module(os) 

+

22 

+

23 

+

24# When debugging, it can be helpful to force some options, especially when 

+

25# debugging the configuration mechanisms you usually use to control debugging! 

+

26# This is a list of forced debugging options. 

+

27FORCED_DEBUG = [] 

+

28FORCED_DEBUG_FILE = None 

+

29 

+

30 

+

31class DebugControl(object): 

+

32 """Control and output for debugging.""" 

+

33 

+

34 show_repr_attr = False # For SimpleReprMixin 

+

35 

+

36 def __init__(self, options, output): 

+

37 """Configure the options and output file for debugging.""" 

+

38 self.options = list(options) + FORCED_DEBUG 

+

39 self.suppress_callers = False 

+

40 

+

41 filters = [] 

+

42 if self.should('pid'): 

+

43 filters.append(add_pid_and_tid) 

+

44 self.output = DebugOutputFile.get_one( 

+

45 output, 

+

46 show_process=self.should('process'), 

+

47 filters=filters, 

+

48 ) 

+

49 self.raw_output = self.output.outfile 

+

50 

+

51 def __repr__(self): 

+

52 return "<DebugControl options=%r raw_output=%r>" % (self.options, self.raw_output) 

+

53 

+

54 def should(self, option): 

+

55 """Decide whether to output debug information in category `option`.""" 

+

56 if option == "callers" and self.suppress_callers: 

+

57 return False 

+

58 return (option in self.options) 

+

59 

+

60 @contextlib.contextmanager 

+

61 def without_callers(self): 

+

62 """A context manager to prevent call stacks from being logged.""" 

+

63 old = self.suppress_callers 

+

64 self.suppress_callers = True 

+

65 try: 

+

66 yield 

+

67 finally: 

+

68 self.suppress_callers = old 

+

69 

+

70 def write(self, msg): 

+

71 """Write a line of debug output. 

+

72 

+

73 `msg` is the line to write. A newline will be appended. 

+

74 

+

75 """ 

+

76 self.output.write(msg+"\n") 

+

77 if self.should('self'): 77 ↛ 78line 77 didn't jump to line 78, because the condition on line 77 was never true

+

78 caller_self = inspect.stack()[1][0].f_locals.get('self') 

+

79 if caller_self is not None: 

+

80 self.output.write("self: {!r}\n".format(caller_self)) 

+

81 if self.should('callers'): 

+

82 dump_stack_frames(out=self.output, skip=1) 

+

83 self.output.flush() 

+

84 

+

85 

+

86class DebugControlString(DebugControl): 

+

87 """A `DebugControl` that writes to a StringIO, for testing.""" 

+

88 def __init__(self, options): 

+

89 super(DebugControlString, self).__init__(options, StringIO()) 

+

90 

+

91 def get_output(self): 

+

92 """Get the output text from the `DebugControl`.""" 

+

93 return self.raw_output.getvalue() 

+

94 

+

95 

+

96class NoDebugging(object): 

+

97 """A replacement for DebugControl that will never try to do anything.""" 

+

98 def should(self, option): # pylint: disable=unused-argument 

+

99 """Should we write debug messages? Never.""" 

+

100 return False 

+

101 

+

102 

+

103def info_header(label): 

+

104 """Make a nice header string.""" 

+

105 return "--{:-<60s}".format(" "+label+" ") 

+

106 

+

107 

+

108def info_formatter(info): 

+

109 """Produce a sequence of formatted lines from info. 

+

110 

+

111 `info` is a sequence of pairs (label, data). The produced lines are 

+

112 nicely formatted, ready to print. 

+

113 

+

114 """ 

+

115 info = list(info) 

+

116 if not info: 

+

117 return 

+

118 label_len = 30 

+

119 assert all(len(l) < label_len for l, _ in info) 

+

120 for label, data in info: 

+

121 if data == []: 

+

122 data = "-none-" 

+

123 if isinstance(data, (list, set, tuple)): 

+

124 prefix = "%*s:" % (label_len, label) 

+

125 for e in data: 

+

126 yield "%*s %s" % (label_len+1, prefix, e) 

+

127 prefix = "" 

+

128 else: 

+

129 yield "%*s: %s" % (label_len, label, data) 

+

130 

+

131 

+

132def write_formatted_info(writer, header, info): 

+

133 """Write a sequence of (label,data) pairs nicely.""" 

+

134 writer.write(info_header(header)) 

+

135 for line in info_formatter(info): 

+

136 writer.write(" %s" % line) 

+

137 

+

138 

+

139def short_stack(limit=None, skip=0): 

+

140 """Return a string summarizing the call stack. 

+

141 

+

142 The string is multi-line, with one line per stack frame. Each line shows 

+

143 the function name, the file name, and the line number: 

+

144 

+

145 ... 

+

146 start_import_stop : /Users/ned/coverage/trunk/tests/coveragetest.py @95 

+

147 import_local_file : /Users/ned/coverage/trunk/tests/coveragetest.py @81 

+

148 import_local_file : /Users/ned/coverage/trunk/coverage/backward.py @159 

+

149 ... 

+

150 

+

151 `limit` is the number of frames to include, defaulting to all of them. 

+

152 

+

153 `skip` is the number of frames to skip, so that debugging functions can 

+

154 call this and not be included in the result. 

+

155 

+

156 """ 

+

157 stack = inspect.stack()[limit:skip:-1] 

+

158 return "\n".join("%30s : %s:%d" % (t[3], t[1], t[2]) for t in stack) 

+

159 

+

160 

+

161def dump_stack_frames(limit=None, out=None, skip=0): 

+

162 """Print a summary of the stack to stdout, or someplace else.""" 

+

163 out = out or sys.stdout 

+

164 out.write(short_stack(limit=limit, skip=skip+1)) 

+

165 out.write("\n") 

+

166 

+

167 

+

168def clipped_repr(text, numchars=50): 

+

169 """`repr(text)`, but limited to `numchars`.""" 

+

170 r = reprlib.Repr() 

+

171 r.maxstring = numchars 

+

172 return r.repr(text) 

+

173 

+

174 

+

175def short_id(id64): 

+

176 """Given a 64-bit id, make a shorter 16-bit one.""" 

+

177 id16 = 0 

+

178 for offset in range(0, 64, 16): 

+

179 id16 ^= id64 >> offset 

+

180 return id16 & 0xFFFF 

+

181 

+

182 

+

183def add_pid_and_tid(text): 

+

184 """A filter to add pid and tid to debug messages.""" 

+

185 # Thread ids are useful, but too long. Make a shorter one. 

+

186 tid = "{:04x}".format(short_id(_thread.get_ident())) 

+

187 text = "{:5d}.{}: {}".format(os.getpid(), tid, text) 

+

188 return text 

+

189 

+

190 

+

191class SimpleReprMixin(object): 

+

192 """A mixin implementing a simple __repr__.""" 

+

193 simple_repr_ignore = ['simple_repr_ignore', '$coverage.object_id'] 

+

194 

+

195 def __repr__(self): 

+

196 show_attrs = ( 

+

197 (k, v) for k, v in self.__dict__.items() 

+

198 if getattr(v, "show_repr_attr", True) 

+

199 and not callable(v) 

+

200 and k not in self.simple_repr_ignore 

+

201 ) 

+

202 return "<{klass} @0x{id:x} {attrs}>".format( 

+

203 klass=self.__class__.__name__, 

+

204 id=id(self), 

+

205 attrs=" ".join("{}={!r}".format(k, v) for k, v in show_attrs), 

+

206 ) 

+

207 

+

208 

+

209def simplify(v): # pragma: debugging 

+

210 """Turn things which are nearly dict/list/etc into dict/list/etc.""" 

+

211 if isinstance(v, dict): 

+

212 return {k:simplify(vv) for k, vv in v.items()} 

+

213 elif isinstance(v, (list, tuple)): 

+

214 return type(v)(simplify(vv) for vv in v) 

+

215 elif hasattr(v, "__dict__"): 

+

216 return simplify({'.'+k: v for k, v in v.__dict__.items()}) 

+

217 else: 

+

218 return v 

+

219 

+

220 

+

221def pp(v): # pragma: debugging 

+

222 """Debug helper to pretty-print data, including SimpleNamespace objects.""" 

+

223 # Might not be needed in 3.9+ 

+

224 pprint.pprint(simplify(v)) 

+

225 

+

226 

+

227def filter_text(text, filters): 

+

228 """Run `text` through a series of filters. 

+

229 

+

230 `filters` is a list of functions. Each takes a string and returns a 

+

231 string. Each is run in turn. 

+

232 

+

233 Returns: the final string that results after all of the filters have 

+

234 run. 

+

235 

+

236 """ 

+

237 clean_text = text.rstrip() 

+

238 ending = text[len(clean_text):] 

+

239 text = clean_text 

+

240 for fn in filters: 

+

241 lines = [] 

+

242 for line in text.splitlines(): 

+

243 lines.extend(fn(line).splitlines()) 

+

244 text = "\n".join(lines) 

+

245 return text + ending 

+

246 

+

247 

+

248class CwdTracker(object): # pragma: debugging 

+

249 """A class to add cwd info to debug messages.""" 

+

250 def __init__(self): 

+

251 self.cwd = None 

+

252 

+

253 def filter(self, text): 

+

254 """Add a cwd message for each new cwd.""" 

+

255 cwd = os.getcwd() 

+

256 if cwd != self.cwd: 

+

257 text = "cwd is now {!r}\n".format(cwd) + text 

+

258 self.cwd = cwd 

+

259 return text 

+

260 

+

261 

+

262class DebugOutputFile(object): # pragma: debugging 

+

263 """A file-like object that includes pid and cwd information.""" 

+

264 def __init__(self, outfile, show_process, filters): 

+

265 self.outfile = outfile 

+

266 self.show_process = show_process 

+

267 self.filters = list(filters) 

+

268 

+

269 if self.show_process: 

+

270 self.filters.insert(0, CwdTracker().filter) 

+

271 self.write("New process: executable: %r\n" % (sys.executable,)) 

+

272 self.write("New process: cmd: %r\n" % (getattr(sys, 'argv', None),)) 

+

273 if hasattr(os, 'getppid'): 

+

274 self.write("New process: pid: %r, parent pid: %r\n" % (os.getpid(), os.getppid())) 

+

275 

+

276 SYS_MOD_NAME = '$coverage.debug.DebugOutputFile.the_one' 

+

277 

+

278 @classmethod 

+

279 def get_one(cls, fileobj=None, show_process=True, filters=(), interim=False): 

+

280 """Get a DebugOutputFile. 

+

281 

+

282 If `fileobj` is provided, then a new DebugOutputFile is made with it. 

+

283 

+

284 If `fileobj` isn't provided, then a file is chosen 

+

285 (COVERAGE_DEBUG_FILE, or stderr), and a process-wide singleton 

+

286 DebugOutputFile is made. 

+

287 

+

288 `show_process` controls whether the debug file adds process-level 

+

289 information, and filters is a list of other message filters to apply. 

+

290 

+

291 `filters` are the text filters to apply to the stream to annotate with 

+

292 pids, etc. 

+

293 

+

294 If `interim` is true, then a future `get_one` can replace this one. 

+

295 

+

296 """ 

+

297 if fileobj is not None: 

+

298 # Make DebugOutputFile around the fileobj passed. 

+

299 return cls(fileobj, show_process, filters) 

+

300 

+

301 # Because of the way igor.py deletes and re-imports modules, 

+

302 # this class can be defined more than once. But we really want 

+

303 # a process-wide singleton. So stash it in sys.modules instead of 

+

304 # on a class attribute. Yes, this is aggressively gross. 

+

305 the_one, is_interim = sys.modules.get(cls.SYS_MOD_NAME, (None, True)) 

+

306 if the_one is None or is_interim: 

+

307 if fileobj is None: 

+

308 debug_file_name = os.environ.get("COVERAGE_DEBUG_FILE", FORCED_DEBUG_FILE) 

+

309 if debug_file_name: 

+

310 fileobj = open(debug_file_name, "a") 

+

311 else: 

+

312 fileobj = sys.stderr 

+

313 the_one = cls(fileobj, show_process, filters) 

+

314 sys.modules[cls.SYS_MOD_NAME] = (the_one, interim) 

+

315 return the_one 

+

316 

+

317 def write(self, text): 

+

318 """Just like file.write, but filter through all our filters.""" 

+

319 self.outfile.write(filter_text(text, self.filters)) 

+

320 self.outfile.flush() 

+

321 

+

322 def flush(self): 

+

323 """Flush our file.""" 

+

324 self.outfile.flush() 

+

325 

+

326 

+

327def log(msg, stack=False): # pragma: debugging 

+

328 """Write a log message as forcefully as possible.""" 

+

329 out = DebugOutputFile.get_one(interim=True) 

+

330 out.write(msg+"\n") 

+

331 if stack: 

+

332 dump_stack_frames(out=out, skip=1) 

+

333 

+

334 

+

335def decorate_methods(decorator, butnot=(), private=False): # pragma: debugging 

+

336 """A class decorator to apply a decorator to methods.""" 

+

337 def _decorator(cls): 

+

338 for name, meth in inspect.getmembers(cls, inspect.isroutine): 

+

339 if name not in cls.__dict__: 

+

340 continue 

+

341 if name != "__init__": 

+

342 if not private and name.startswith("_"): 

+

343 continue 

+

344 if name in butnot: 

+

345 continue 

+

346 setattr(cls, name, decorator(meth)) 

+

347 return cls 

+

348 return _decorator 

+

349 

+

350 

+

351def break_in_pudb(func): # pragma: debugging 

+

352 """A function decorator to stop in the debugger for each call.""" 

+

353 @functools.wraps(func) 

+

354 def _wrapper(*args, **kwargs): 

+

355 import pudb 

+

356 sys.stdout = sys.__stdout__ 

+

357 pudb.set_trace() 

+

358 return func(*args, **kwargs) 

+

359 return _wrapper 

+

360 

+

361 

+

362OBJ_IDS = itertools.count() 

+

363CALLS = itertools.count() 

+

364OBJ_ID_ATTR = "$coverage.object_id" 

+

365 

+

366def show_calls(show_args=True, show_stack=False, show_return=False): # pragma: debugging 

+

367 """A method decorator to debug-log each call to the function.""" 

+

368 def _decorator(func): 

+

369 @functools.wraps(func) 

+

370 def _wrapper(self, *args, **kwargs): 

+

371 oid = getattr(self, OBJ_ID_ATTR, None) 

+

372 if oid is None: 

+

373 oid = "{:08d} {:04d}".format(os.getpid(), next(OBJ_IDS)) 

+

374 setattr(self, OBJ_ID_ATTR, oid) 

+

375 extra = "" 

+

376 if show_args: 

+

377 eargs = ", ".join(map(repr, args)) 

+

378 ekwargs = ", ".join("{}={!r}".format(*item) for item in kwargs.items()) 

+

379 extra += "(" 

+

380 extra += eargs 

+

381 if eargs and ekwargs: 

+

382 extra += ", " 

+

383 extra += ekwargs 

+

384 extra += ")" 

+

385 if show_stack: 

+

386 extra += " @ " 

+

387 extra += "; ".join(_clean_stack_line(l) for l in short_stack().splitlines()) 

+

388 callid = next(CALLS) 

+

389 msg = "{} {:04d} {}{}\n".format(oid, callid, func.__name__, extra) 

+

390 DebugOutputFile.get_one(interim=True).write(msg) 

+

391 ret = func(self, *args, **kwargs) 

+

392 if show_return: 

+

393 msg = "{} {:04d} {} return {!r}\n".format(oid, callid, func.__name__, ret) 

+

394 DebugOutputFile.get_one(interim=True).write(msg) 

+

395 return ret 

+

396 return _wrapper 

+

397 return _decorator 

+

398 

+

399 

+

400def _clean_stack_line(s): # pragma: debugging 

+

401 """Simplify some paths in a stack trace, for compactness.""" 

+

402 s = s.strip() 

+

403 s = s.replace(os.path.dirname(__file__) + '/', '') 

+

404 s = s.replace(os.path.dirname(os.__file__) + '/', '') 

+

405 s = s.replace(sys.prefix + '/', '') 

+

406 return s 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_disposition_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_disposition_py.html new file mode 100644 index 000000000..297f385c0 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_disposition_py.html @@ -0,0 +1,103 @@ + + + + + + Coverage for coverage/disposition.py: 58.333% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Simple value objects for tracking what to do with files.""" 

+

5 

+

6 

+

7class FileDisposition(object): 

+

8 """A simple value type for recording what to do with a file.""" 

+

9 pass 

+

10 

+

11 

+

12# FileDisposition "methods": FileDisposition is a pure value object, so it can 

+

13# be implemented in either C or Python. Acting on them is done with these 

+

14# functions. 

+

15 

+

16def disposition_init(cls, original_filename): 

+

17 """Construct and initialize a new FileDisposition object.""" 

+

18 disp = cls() 

+

19 disp.original_filename = original_filename 

+

20 disp.canonical_filename = original_filename 

+

21 disp.source_filename = None 

+

22 disp.trace = False 

+

23 disp.reason = "" 

+

24 disp.file_tracer = None 

+

25 disp.has_dynamic_filename = False 

+

26 return disp 

+

27 

+

28 

+

29def disposition_debug_msg(disp): 

+

30 """Make a nice debug message of what the FileDisposition is doing.""" 

+

31 if disp.trace: 

+

32 msg = "Tracing %r" % (disp.original_filename,) 

+

33 if disp.file_tracer: 

+

34 msg += ": will be traced by %r" % disp.file_tracer 

+

35 else: 

+

36 msg = "Not tracing %r: %s" % (disp.original_filename, disp.reason) 

+

37 return msg 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_env_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_env_py.html new file mode 100644 index 000000000..90baca262 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_env_py.html @@ -0,0 +1,198 @@ + + + + + + Coverage for coverage/env.py: 96.875% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Determine facts about the environment.""" 

+

5 

+

6import os 

+

7import platform 

+

8import sys 

+

9 

+

10# Operating systems. 

+

11WINDOWS = sys.platform == "win32" 

+

12LINUX = sys.platform.startswith("linux") 

+

13 

+

14# Python implementations. 

+

15CPYTHON = (platform.python_implementation() == "CPython") 

+

16PYPY = (platform.python_implementation() == "PyPy") 

+

17JYTHON = (platform.python_implementation() == "Jython") 

+

18IRONPYTHON = (platform.python_implementation() == "IronPython") 

+

19 

+

20# Python versions. We amend version_info with one more value, a zero if an 

+

21# official version, or 1 if built from source beyond an official version. 

+

22PYVERSION = sys.version_info + (int(platform.python_version()[-1] == "+"),) 

+

23PY2 = PYVERSION < (3, 0) 

+

24PY3 = PYVERSION >= (3, 0) 

+

25 

+

26if PYPY: 

+

27 PYPYVERSION = sys.pypy_version_info 

+

28 

+

29PYPY2 = PYPY and PY2 

+

30PYPY3 = PYPY and PY3 

+

31 

+

32# Python behavior. 

+

33class PYBEHAVIOR(object): 

+

34 """Flags indicating this Python's behavior.""" 

+

35 

+

36 # Does Python conform to PEP626, Precise line numbers for debugging and other tools. 

+

37 # https://www.python.org/dev/peps/pep-0626 

+

38 pep626 = CPYTHON and (PYVERSION > (3, 10, 0, 'alpha', 4)) 

+

39 

+

40 # Is "if __debug__" optimized away? 

+

41 if PYPY3: 

+

42 optimize_if_debug = True 

+

43 elif PYPY2: 43 ↛ 44line 43 didn't jump to line 44, because the condition on line 43 was never true

+

44 optimize_if_debug = False 

+

45 else: 

+

46 optimize_if_debug = not pep626 

+

47 

+

48 # Is "if not __debug__" optimized away? 

+

49 optimize_if_not_debug = (not PYPY) and (PYVERSION >= (3, 7, 0, 'alpha', 4)) 

+

50 if pep626: 

+

51 optimize_if_not_debug = False 

+

52 if PYPY3: 

+

53 optimize_if_not_debug = True 

+

54 

+

55 # Is "if not __debug__" optimized away even better? 

+

56 optimize_if_not_debug2 = (not PYPY) and (PYVERSION >= (3, 8, 0, 'beta', 1)) 

+

57 if pep626: 

+

58 optimize_if_not_debug2 = False 

+

59 

+

60 # Do we have yield-from? 

+

61 yield_from = (PYVERSION >= (3, 3)) 

+

62 

+

63 # Do we have PEP 420 namespace packages? 

+

64 namespaces_pep420 = (PYVERSION >= (3, 3)) 

+

65 

+

66 # Do .pyc files have the source file size recorded in them? 

+

67 size_in_pyc = (PYVERSION >= (3, 3)) 

+

68 

+

69 # Do we have async and await syntax? 

+

70 async_syntax = (PYVERSION >= (3, 5)) 

+

71 

+

72 # PEP 448 defined additional unpacking generalizations 

+

73 unpackings_pep448 = (PYVERSION >= (3, 5)) 

+

74 

+

75 # Can co_lnotab have negative deltas? 

+

76 negative_lnotab = (PYVERSION >= (3, 6)) and not (PYPY and PYPYVERSION < (7, 2)) 

+

77 

+

78 # Do .pyc files conform to PEP 552? Hash-based pyc's. 

+

79 hashed_pyc_pep552 = (PYVERSION >= (3, 7, 0, 'alpha', 4)) 

+

80 

+

81 # Python 3.7.0b3 changed the behavior of the sys.path[0] entry for -m. It 

+

82 # used to be an empty string (meaning the current directory). It changed 

+

83 # to be the actual path to the current directory, so that os.chdir wouldn't 

+

84 # affect the outcome. 

+

85 actual_syspath0_dash_m = CPYTHON and (PYVERSION >= (3, 7, 0, 'beta', 3)) 

+

86 

+

87 # 3.7 changed how functions with only docstrings are numbered. 

+

88 docstring_only_function = (not PYPY) and ((3, 7, 0, 'beta', 5) <= PYVERSION <= (3, 10)) 

+

89 

+

90 # When a break/continue/return statement in a try block jumps to a finally 

+

91 # block, does the finally block do the break/continue/return (pre-3.8), or 

+

92 # does the finally jump back to the break/continue/return (3.8) to do the 

+

93 # work? 

+

94 finally_jumps_back = ((3, 8) <= PYVERSION < (3, 10)) 

+

95 

+

96 # When a function is decorated, does the trace function get called for the 

+

97 # @-line and also the def-line (new behavior in 3.8)? Or just the @-line 

+

98 # (old behavior)? 

+

99 trace_decorated_def = (PYVERSION >= (3, 8)) 

+

100 

+

101 # Are while-true loops optimized into absolute jumps with no loop setup? 

+

102 nix_while_true = (PYVERSION >= (3, 8)) 

+

103 

+

104 # Python 3.9a1 made sys.argv[0] and other reported files absolute paths. 

+

105 report_absolute_files = (PYVERSION >= (3, 9)) 

+

106 

+

107 # Lines after break/continue/return/raise are no longer compiled into the 

+

108 # bytecode. They used to be marked as missing, now they aren't executable. 

+

109 omit_after_jump = pep626 

+

110 

+

111 # PyPy has always omitted statements after return. 

+

112 omit_after_return = omit_after_jump or PYPY 

+

113 

+

114 # Modules used to have firstlineno equal to the line number of the first 

+

115 # real line of code. Now they always start at 1. 

+

116 module_firstline_1 = pep626 

+

117 

+

118 # Are "if 0:" lines (and similar) kept in the compiled code? 

+

119 keep_constant_test = pep626 

+

120 

+

121# Coverage.py specifics. 

+

122 

+

123# Are we using the C-implemented trace function? 

+

124C_TRACER = os.getenv('COVERAGE_TEST_TRACER', 'c') == 'c' 

+

125 

+

126# Are we coverage-measuring ourselves? 

+

127METACOV = os.getenv('COVERAGE_COVERAGE', '') != '' 

+

128 

+

129# Are we running our test suite? 

+

130# Even when running tests, you can use COVERAGE_TESTING=0 to disable the 

+

131# test-specific behavior like contracts. 

+

132TESTING = os.getenv('COVERAGE_TESTING', '') == 'True' 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_execfile_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_execfile_py.html new file mode 100644 index 000000000..ec0381b00 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_execfile_py.html @@ -0,0 +1,428 @@ + + + + + + Coverage for coverage/execfile.py: 86.691% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Execute files of Python code.""" 

+

5 

+

6import inspect 

+

7import marshal 

+

8import os 

+

9import struct 

+

10import sys 

+

11import types 

+

12 

+

13from coverage import env 

+

14from coverage.backward import BUILTINS 

+

15from coverage.backward import PYC_MAGIC_NUMBER, imp, importlib_util_find_spec 

+

16from coverage.files import canonical_filename, python_reported_file 

+

17from coverage.misc import CoverageException, ExceptionDuringRun, NoCode, NoSource, isolate_module 

+

18from coverage.phystokens import compile_unicode 

+

19from coverage.python import get_python_source 

+

20 

+

21os = isolate_module(os) 

+

22 

+

23 

+

24class DummyLoader(object): 

+

25 """A shim for the pep302 __loader__, emulating pkgutil.ImpLoader. 

+

26 

+

27 Currently only implements the .fullname attribute 

+

28 """ 

+

29 def __init__(self, fullname, *_args): 

+

30 self.fullname = fullname 

+

31 

+

32 

+

33if importlib_util_find_spec: 

+

34 def find_module(modulename): 

+

35 """Find the module named `modulename`. 

+

36 

+

37 Returns the file path of the module, the name of the enclosing 

+

38 package, and the spec. 

+

39 """ 

+

40 try: 

+

41 spec = importlib_util_find_spec(modulename) 

+

42 except ImportError as err: 

+

43 raise NoSource(str(err)) 

+

44 if not spec: 

+

45 raise NoSource("No module named %r" % (modulename,)) 

+

46 pathname = spec.origin 

+

47 packagename = spec.name 

+

48 if spec.submodule_search_locations: 

+

49 mod_main = modulename + ".__main__" 

+

50 spec = importlib_util_find_spec(mod_main) 

+

51 if not spec: 

+

52 raise NoSource( 

+

53 "No module named %s; " 

+

54 "%r is a package and cannot be directly executed" 

+

55 % (mod_main, modulename) 

+

56 ) 

+

57 pathname = spec.origin 

+

58 packagename = spec.name 

+

59 packagename = packagename.rpartition(".")[0] 

+

60 return pathname, packagename, spec 

+

61else: 

+

62 def find_module(modulename): 

+

63 """Find the module named `modulename`. 

+

64 

+

65 Returns the file path of the module, the name of the enclosing 

+

66 package, and None (where a spec would have been). 

+

67 """ 

+

68 openfile = None 

+

69 glo, loc = globals(), locals() 

+

70 try: 

+

71 # Search for the module - inside its parent package, if any - using 

+

72 # standard import mechanics. 

+

73 if '.' in modulename: 

+

74 packagename, name = modulename.rsplit('.', 1) 

+

75 package = __import__(packagename, glo, loc, ['__path__']) 

+

76 searchpath = package.__path__ 

+

77 else: 

+

78 packagename, name = None, modulename 

+

79 searchpath = None # "top-level search" in imp.find_module() 

+

80 openfile, pathname, _ = imp.find_module(name, searchpath) 

+

81 

+

82 # Complain if this is a magic non-file module. 

+

83 if openfile is None and pathname is None: 83 ↛ 84line 83 didn't jump to line 84, because the condition on line 83 was never true

+

84 raise NoSource( 

+

85 "module does not live in a file: %r" % modulename 

+

86 ) 

+

87 

+

88 # If `modulename` is actually a package, not a mere module, then we 

+

89 # pretend to be Python 2.7 and try running its __main__.py script. 

+

90 if openfile is None: 

+

91 packagename = modulename 

+

92 name = '__main__' 

+

93 package = __import__(packagename, glo, loc, ['__path__']) 

+

94 searchpath = package.__path__ 

+

95 openfile, pathname, _ = imp.find_module(name, searchpath) 

+

96 except ImportError as err: 

+

97 raise NoSource(str(err)) 

+

98 finally: 

+

99 if openfile: 99 ↛ 102line 99 didn't jump to line 102, because the condition on line 99 was never false

+

100 openfile.close() 100 ↛ exitline 100 didn't except from function 'find_module', because the raise on line 97 wasn't executed

+

101 

+

102 return pathname, packagename, None 

+

103 

+

104 

+

105class PyRunner(object): 

+

106 """Multi-stage execution of Python code. 

+

107 

+

108 This is meant to emulate real Python execution as closely as possible. 

+

109 

+

110 """ 

+

111 def __init__(self, args, as_module=False): 

+

112 self.args = args 

+

113 self.as_module = as_module 

+

114 

+

115 self.arg0 = args[0] 

+

116 self.package = self.modulename = self.pathname = self.loader = self.spec = None 

+

117 

+

118 def prepare(self): 

+

119 """Set sys.path properly. 

+

120 

+

121 This needs to happen before any importing, and without importing anything. 

+

122 """ 

+

123 if self.as_module: 

+

124 if env.PYBEHAVIOR.actual_syspath0_dash_m: 

+

125 path0 = os.getcwd() 

+

126 else: 

+

127 path0 = "" 

+

128 elif os.path.isdir(self.arg0): 

+

129 # Running a directory means running the __main__.py file in that 

+

130 # directory. 

+

131 path0 = self.arg0 

+

132 else: 

+

133 path0 = os.path.abspath(os.path.dirname(self.arg0)) 

+

134 

+

135 if os.path.isdir(sys.path[0]): 

+

136 # sys.path fakery. If we are being run as a command, then sys.path[0] 

+

137 # is the directory of the "coverage" script. If this is so, replace 

+

138 # sys.path[0] with the directory of the file we're running, or the 

+

139 # current directory when running modules. If it isn't so, then we 

+

140 # don't know what's going on, and just leave it alone. 

+

141 top_file = inspect.stack()[-1][0].f_code.co_filename 

+

142 sys_path_0_abs = os.path.abspath(sys.path[0]) 

+

143 top_file_dir_abs = os.path.abspath(os.path.dirname(top_file)) 

+

144 sys_path_0_abs = canonical_filename(sys_path_0_abs) 

+

145 top_file_dir_abs = canonical_filename(top_file_dir_abs) 

+

146 if sys_path_0_abs != top_file_dir_abs: 

+

147 path0 = None 

+

148 

+

149 else: 

+

150 # sys.path[0] is a file. Is the next entry the directory containing 

+

151 # that file? 

+

152 if sys.path[1] == os.path.dirname(sys.path[0]): 

+

153 # Can it be right to always remove that? 

+

154 del sys.path[1] 

+

155 

+

156 if path0 is not None: 

+

157 sys.path[0] = python_reported_file(path0) 

+

158 

+

159 def _prepare2(self): 

+

160 """Do more preparation to run Python code. 

+

161 

+

162 Includes finding the module to run and adjusting sys.argv[0]. 

+

163 This method is allowed to import code. 

+

164 

+

165 """ 

+

166 if self.as_module: 

+

167 self.modulename = self.arg0 

+

168 pathname, self.package, self.spec = find_module(self.modulename) 

+

169 if self.spec is not None: 

+

170 self.modulename = self.spec.name 

+

171 self.loader = DummyLoader(self.modulename) 

+

172 self.pathname = os.path.abspath(pathname) 

+

173 self.args[0] = self.arg0 = self.pathname 

+

174 elif os.path.isdir(self.arg0): 

+

175 # Running a directory means running the __main__.py file in that 

+

176 # directory. 

+

177 for ext in [".py", ".pyc", ".pyo"]: 

+

178 try_filename = os.path.join(self.arg0, "__main__" + ext) 

+

179 if os.path.exists(try_filename): 

+

180 self.arg0 = try_filename 

+

181 break 

+

182 else: 

+

183 raise NoSource("Can't find '__main__' module in '%s'" % self.arg0) 

+

184 

+

185 if env.PY2: 

+

186 self.arg0 = os.path.abspath(self.arg0) 

+

187 

+

188 # Make a spec. I don't know if this is the right way to do it. 

+

189 try: 

+

190 import importlib.machinery 

+

191 except ImportError: 

+

192 pass 

+

193 else: 

+

194 try_filename = python_reported_file(try_filename) 

+

195 self.spec = importlib.machinery.ModuleSpec("__main__", None, origin=try_filename) 

+

196 self.spec.has_location = True 

+

197 self.package = "" 

+

198 self.loader = DummyLoader("__main__") 

+

199 else: 

+

200 if env.PY3: 

+

201 self.loader = DummyLoader("__main__") 

+

202 

+

203 self.arg0 = python_reported_file(self.arg0) 

+

204 

+

205 def run(self): 

+

206 """Run the Python code!""" 

+

207 

+

208 self._prepare2() 

+

209 

+

210 # Create a module to serve as __main__ 

+

211 main_mod = types.ModuleType('__main__') 

+

212 

+

213 from_pyc = self.arg0.endswith((".pyc", ".pyo")) 

+

214 main_mod.__file__ = self.arg0 

+

215 if from_pyc: 

+

216 main_mod.__file__ = main_mod.__file__[:-1] 

+

217 if self.package is not None: 

+

218 main_mod.__package__ = self.package 

+

219 main_mod.__loader__ = self.loader 

+

220 if self.spec is not None: 

+

221 main_mod.__spec__ = self.spec 

+

222 

+

223 main_mod.__builtins__ = BUILTINS 

+

224 

+

225 sys.modules['__main__'] = main_mod 

+

226 

+

227 # Set sys.argv properly. 

+

228 sys.argv = self.args 

+

229 

+

230 try: 

+

231 # Make a code object somehow. 

+

232 if from_pyc: 

+

233 code = make_code_from_pyc(self.arg0) 

+

234 else: 

+

235 code = make_code_from_py(self.arg0) 

+

236 except CoverageException: 

+

237 raise 

+

238 except Exception as exc: 

+

239 msg = "Couldn't run '{filename}' as Python code: {exc.__class__.__name__}: {exc}" 

+

240 raise CoverageException(msg.format(filename=self.arg0, exc=exc)) 

+

241 

+

242 # Execute the code object. 

+

243 # Return to the original directory in case the test code exits in 

+

244 # a non-existent directory. 

+

245 cwd = os.getcwd() 

+

246 try: 

+

247 exec(code, main_mod.__dict__) 

+

248 except SystemExit: # pylint: disable=try-except-raise 

+

249 # The user called sys.exit(). Just pass it along to the upper 

+

250 # layers, where it will be handled. 

+

251 raise 

+

252 except Exception: 

+

253 # Something went wrong while executing the user code. 

+

254 # Get the exc_info, and pack them into an exception that we can 

+

255 # throw up to the outer loop. We peel one layer off the traceback 

+

256 # so that the coverage.py code doesn't appear in the final printed 

+

257 # traceback. 

+

258 typ, err, tb = sys.exc_info() 

+

259 

+

260 # PyPy3 weirdness. If I don't access __context__, then somehow it 

+

261 # is non-None when the exception is reported at the upper layer, 

+

262 # and a nested exception is shown to the user. This getattr fixes 

+

263 # it somehow? https://bitbucket.org/pypy/pypy/issue/1903 

+

264 getattr(err, '__context__', None) 

+

265 

+

266 # Call the excepthook. 

+

267 try: 

+

268 if hasattr(err, "__traceback__"): 

+

269 err.__traceback__ = err.__traceback__.tb_next 

+

270 sys.excepthook(typ, err, tb.tb_next) 

+

271 except SystemExit: # pylint: disable=try-except-raise 

+

272 raise 

+

273 except Exception: 

+

274 # Getting the output right in the case of excepthook 

+

275 # shenanigans is kind of involved. 

+

276 sys.stderr.write("Error in sys.excepthook:\n") 

+

277 typ2, err2, tb2 = sys.exc_info() 

+

278 err2.__suppress_context__ = True 

+

279 if hasattr(err2, "__traceback__"): 

+

280 err2.__traceback__ = err2.__traceback__.tb_next 

+

281 sys.__excepthook__(typ2, err2, tb2.tb_next) 

+

282 sys.stderr.write("\nOriginal exception was:\n") 

+

283 raise ExceptionDuringRun(typ, err, tb.tb_next) 

+

284 else: 

+

285 sys.exit(1) 

+

286 finally: 

+

287 os.chdir(cwd) 

+

288 

+

289 

+

290def run_python_module(args): 

+

291 """Run a Python module, as though with ``python -m name args...``. 

+

292 

+

293 `args` is the argument array to present as sys.argv, including the first 

+

294 element naming the module being executed. 

+

295 

+

296 This is a helper for tests, to encapsulate how to use PyRunner. 

+

297 

+

298 """ 

+

299 runner = PyRunner(args, as_module=True) 

+

300 runner.prepare() 

+

301 runner.run() 

+

302 

+

303 

+

304def run_python_file(args): 

+

305 """Run a Python file as if it were the main program on the command line. 

+

306 

+

307 `args` is the argument array to present as sys.argv, including the first 

+

308 element naming the file being executed. `package` is the name of the 

+

309 enclosing package, if any. 

+

310 

+

311 This is a helper for tests, to encapsulate how to use PyRunner. 

+

312 

+

313 """ 

+

314 runner = PyRunner(args, as_module=False) 

+

315 runner.prepare() 

+

316 runner.run() 

+

317 

+

318 

+

319def make_code_from_py(filename): 

+

320 """Get source from `filename` and make a code object of it.""" 

+

321 # Open the source file. 

+

322 try: 

+

323 source = get_python_source(filename) 

+

324 except (IOError, NoSource): 

+

325 raise NoSource("No file to run: '%s'" % filename) 

+

326 

+

327 code = compile_unicode(source, filename, "exec") 

+

328 return code 

+

329 

+

330 

+

331def make_code_from_pyc(filename): 

+

332 """Get a code object from a .pyc file.""" 

+

333 try: 

+

334 fpyc = open(filename, "rb") 

+

335 except IOError: 

+

336 raise NoCode("No file to run: '%s'" % filename) 

+

337 

+

338 with fpyc: 

+

339 # First four bytes are a version-specific magic number. It has to 

+

340 # match or we won't run the file. 

+

341 magic = fpyc.read(4) 

+

342 if magic != PYC_MAGIC_NUMBER: 

+

343 raise NoCode("Bad magic number in .pyc file: {} != {}".format(magic, PYC_MAGIC_NUMBER)) 

+

344 

+

345 date_based = True 

+

346 if env.PYBEHAVIOR.hashed_pyc_pep552: 

+

347 flags = struct.unpack('<L', fpyc.read(4))[0] 

+

348 hash_based = flags & 0x01 

+

349 if hash_based: 349 ↛ 350line 349 didn't jump to line 350, because the condition on line 349 was never true

+

350 fpyc.read(8) # Skip the hash. 

+

351 date_based = False 

+

352 if date_based: 352 ↛ 360line 352 didn't jump to line 360, because the condition on line 352 was never false

+

353 # Skip the junk in the header that we don't need. 

+

354 fpyc.read(4) # Skip the moddate. 

+

355 if env.PYBEHAVIOR.size_in_pyc: 

+

356 # 3.3 added another long to the header (size), skip it. 

+

357 fpyc.read(4) 

+

358 

+

359 # The rest of the file is the code object we want. 

+

360 code = marshal.load(fpyc) 

+

361 

+

362 return code 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_files_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_files_py.html new file mode 100644 index 000000000..867431c3e --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_files_py.html @@ -0,0 +1,500 @@ + + + + + + Coverage for coverage/files.py: 97.627% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""File wrangling.""" 

+

5 

+

6import hashlib 

+

7import fnmatch 

+

8import ntpath 

+

9import os 

+

10import os.path 

+

11import posixpath 

+

12import re 

+

13import sys 

+

14 

+

15from coverage import env 

+

16from coverage.backward import unicode_class 

+

17from coverage.misc import contract, CoverageException, join_regex, isolate_module 

+

18 

+

19 

+

20os = isolate_module(os) 

+

21 

+

22 

+

23def set_relative_directory(): 

+

24 """Set the directory that `relative_filename` will be relative to.""" 

+

25 global RELATIVE_DIR, CANONICAL_FILENAME_CACHE 

+

26 

+

27 # The absolute path to our current directory. 

+

28 RELATIVE_DIR = os.path.normcase(abs_file(os.curdir) + os.sep) 

+

29 

+

30 # Cache of results of calling the canonical_filename() method, to 

+

31 # avoid duplicating work. 

+

32 CANONICAL_FILENAME_CACHE = {} 

+

33 

+

34 

+

35def relative_directory(): 

+

36 """Return the directory that `relative_filename` is relative to.""" 

+

37 return RELATIVE_DIR 

+

38 

+

39 

+

40@contract(returns='unicode') 

+

41def relative_filename(filename): 

+

42 """Return the relative form of `filename`. 

+

43 

+

44 The file name will be relative to the current directory when the 

+

45 `set_relative_directory` was called. 

+

46 

+

47 """ 

+

48 fnorm = os.path.normcase(filename) 

+

49 if fnorm.startswith(RELATIVE_DIR): 

+

50 filename = filename[len(RELATIVE_DIR):] 

+

51 return unicode_filename(filename) 

+

52 

+

53 

+

54@contract(returns='unicode') 

+

55def canonical_filename(filename): 

+

56 """Return a canonical file name for `filename`. 

+

57 

+

58 An absolute path with no redundant components and normalized case. 

+

59 

+

60 """ 

+

61 if filename not in CANONICAL_FILENAME_CACHE: 

+

62 cf = filename 

+

63 if not os.path.isabs(filename): 

+

64 for path in [os.curdir] + sys.path: 

+

65 if path is None: 65 ↛ 66line 65 didn't jump to line 66, because the condition on line 65 was never true

+

66 continue 

+

67 f = os.path.join(path, filename) 

+

68 try: 

+

69 exists = os.path.exists(f) 

+

70 except UnicodeError: 

+

71 exists = False 

+

72 if exists: 

+

73 cf = f 

+

74 break 

+

75 cf = abs_file(cf) 

+

76 CANONICAL_FILENAME_CACHE[filename] = cf 

+

77 return CANONICAL_FILENAME_CACHE[filename] 

+

78 

+

79 

+

80MAX_FLAT = 200 

+

81 

+

82@contract(filename='unicode', returns='unicode') 

+

83def flat_rootname(filename): 

+

84 """A base for a flat file name to correspond to this file. 

+

85 

+

86 Useful for writing files about the code where you want all the files in 

+

87 the same directory, but need to differentiate same-named files from 

+

88 different directories. 

+

89 

+

90 For example, the file a/b/c.py will return 'a_b_c_py' 

+

91 

+

92 """ 

+

93 name = ntpath.splitdrive(filename)[1] 

+

94 name = re.sub(r"[\\/.:]", "_", name) 

+

95 if len(name) > MAX_FLAT: 

+

96 h = hashlib.sha1(name.encode('UTF-8')).hexdigest() 

+

97 name = name[-(MAX_FLAT-len(h)-1):] + '_' + h 

+

98 return name 

+

99 

+

100 

+

101if env.WINDOWS: 

+

102 

+

103 _ACTUAL_PATH_CACHE = {} 

+

104 _ACTUAL_PATH_LIST_CACHE = {} 

+

105 

+

106 def actual_path(path): 

+

107 """Get the actual path of `path`, including the correct case.""" 

+

108 if env.PY2 and isinstance(path, unicode_class): 

+

109 path = path.encode(sys.getfilesystemencoding()) 

+

110 if path in _ACTUAL_PATH_CACHE: 

+

111 return _ACTUAL_PATH_CACHE[path] 

+

112 

+

113 head, tail = os.path.split(path) 

+

114 if not tail: 

+

115 # This means head is the drive spec: normalize it. 

+

116 actpath = head.upper() 

+

117 elif not head: 117 ↛ 118line 117 didn't jump to line 118, because the condition on line 117 was never true

+

118 actpath = tail 

+

119 else: 

+

120 head = actual_path(head) 

+

121 if head in _ACTUAL_PATH_LIST_CACHE: 

+

122 files = _ACTUAL_PATH_LIST_CACHE[head] 

+

123 else: 

+

124 try: 

+

125 files = os.listdir(head) 

+

126 except Exception: 

+

127 # This will raise OSError, or this bizarre TypeError: 

+

128 # https://bugs.python.org/issue1776160 

+

129 files = [] 

+

130 _ACTUAL_PATH_LIST_CACHE[head] = files 

+

131 normtail = os.path.normcase(tail) 

+

132 for f in files: 

+

133 if os.path.normcase(f) == normtail: 

+

134 tail = f 

+

135 break 

+

136 actpath = os.path.join(head, tail) 

+

137 _ACTUAL_PATH_CACHE[path] = actpath 

+

138 return actpath 

+

139 

+

140else: 

+

141 def actual_path(filename): 

+

142 """The actual path for non-Windows platforms.""" 

+

143 return filename 

+

144 

+

145 

+

146if env.PY2: 

+

147 @contract(returns='unicode') 

+

148 def unicode_filename(filename): 

+

149 """Return a Unicode version of `filename`.""" 

+

150 if isinstance(filename, str): 

+

151 encoding = sys.getfilesystemencoding() or sys.getdefaultencoding() 

+

152 filename = filename.decode(encoding, "replace") 

+

153 return filename 

+

154else: 

+

155 @contract(filename='unicode', returns='unicode') 

+

156 def unicode_filename(filename): 

+

157 """Return a Unicode version of `filename`.""" 

+

158 return filename 

+

159 

+

160 

+

161@contract(returns='unicode') 

+

162def abs_file(path): 

+

163 """Return the absolute normalized form of `path`.""" 

+

164 try: 

+

165 path = os.path.realpath(path) 

+

166 except UnicodeError: 

+

167 pass 

+

168 path = os.path.abspath(path) 

+

169 path = actual_path(path) 

+

170 path = unicode_filename(path) 

+

171 return path 

+

172 

+

173 

+

174def python_reported_file(filename): 

+

175 """Return the string as Python would describe this file name.""" 

+

176 if env.PYBEHAVIOR.report_absolute_files: 

+

177 filename = os.path.abspath(filename) 

+

178 return filename 

+

179 

+

180 

+

181RELATIVE_DIR = None 

+

182CANONICAL_FILENAME_CACHE = None 

+

183set_relative_directory() 

+

184 

+

185 

+

186def isabs_anywhere(filename): 

+

187 """Is `filename` an absolute path on any OS?""" 

+

188 return ntpath.isabs(filename) or posixpath.isabs(filename) 

+

189 

+

190 

+

191def prep_patterns(patterns): 

+

192 """Prepare the file patterns for use in a `FnmatchMatcher`. 

+

193 

+

194 If a pattern starts with a wildcard, it is used as a pattern 

+

195 as-is. If it does not start with a wildcard, then it is made 

+

196 absolute with the current directory. 

+

197 

+

198 If `patterns` is None, an empty list is returned. 

+

199 

+

200 """ 

+

201 prepped = [] 

+

202 for p in patterns or []: 

+

203 if p.startswith(("*", "?")): 

+

204 prepped.append(p) 

+

205 else: 

+

206 prepped.append(abs_file(p)) 

+

207 return prepped 

+

208 

+

209 

+

210class TreeMatcher(object): 

+

211 """A matcher for files in a tree. 

+

212 

+

213 Construct with a list of paths, either files or directories. Paths match 

+

214 with the `match` method if they are one of the files, or if they are 

+

215 somewhere in a subtree rooted at one of the directories. 

+

216 

+

217 """ 

+

218 def __init__(self, paths): 

+

219 self.paths = list(paths) 

+

220 

+

221 def __repr__(self): 

+

222 return "<TreeMatcher %r>" % self.paths 

+

223 

+

224 def info(self): 

+

225 """A list of strings for displaying when dumping state.""" 

+

226 return self.paths 

+

227 

+

228 def match(self, fpath): 

+

229 """Does `fpath` indicate a file in one of our trees?""" 

+

230 for p in self.paths: 

+

231 if fpath.startswith(p): 

+

232 if fpath == p: 

+

233 # This is the same file! 

+

234 return True 

+

235 if fpath[len(p)] == os.sep: 

+

236 # This is a file in the directory 

+

237 return True 

+

238 return False 

+

239 

+

240 

+

241class ModuleMatcher(object): 

+

242 """A matcher for modules in a tree.""" 

+

243 def __init__(self, module_names): 

+

244 self.modules = list(module_names) 

+

245 

+

246 def __repr__(self): 

+

247 return "<ModuleMatcher %r>" % (self.modules) 

+

248 

+

249 def info(self): 

+

250 """A list of strings for displaying when dumping state.""" 

+

251 return self.modules 

+

252 

+

253 def match(self, module_name): 

+

254 """Does `module_name` indicate a module in one of our packages?""" 

+

255 if not module_name: 

+

256 return False 

+

257 

+

258 for m in self.modules: 

+

259 if module_name.startswith(m): 

+

260 if module_name == m: 

+

261 return True 

+

262 if module_name[len(m)] == '.': 

+

263 # This is a module in the package 

+

264 return True 

+

265 

+

266 return False 

+

267 

+

268 

+

269class FnmatchMatcher(object): 

+

270 """A matcher for files by file name pattern.""" 

+

271 def __init__(self, pats): 

+

272 self.pats = list(pats) 

+

273 self.re = fnmatches_to_regex(self.pats, case_insensitive=env.WINDOWS) 

+

274 

+

275 def __repr__(self): 

+

276 return "<FnmatchMatcher %r>" % self.pats 

+

277 

+

278 def info(self): 

+

279 """A list of strings for displaying when dumping state.""" 

+

280 return self.pats 

+

281 

+

282 def match(self, fpath): 

+

283 """Does `fpath` match one of our file name patterns?""" 

+

284 return self.re.match(fpath) is not None 

+

285 

+

286 

+

287def sep(s): 

+

288 """Find the path separator used in this string, or os.sep if none.""" 

+

289 sep_match = re.search(r"[\\/]", s) 

+

290 if sep_match: 

+

291 the_sep = sep_match.group(0) 

+

292 else: 

+

293 the_sep = os.sep 

+

294 return the_sep 

+

295 

+

296 

+

297def fnmatches_to_regex(patterns, case_insensitive=False, partial=False): 

+

298 """Convert fnmatch patterns to a compiled regex that matches any of them. 

+

299 

+

300 Slashes are always converted to match either slash or backslash, for 

+

301 Windows support, even when running elsewhere. 

+

302 

+

303 If `partial` is true, then the pattern will match if the target string 

+

304 starts with the pattern. Otherwise, it must match the entire string. 

+

305 

+

306 Returns: a compiled regex object. Use the .match method to compare target 

+

307 strings. 

+

308 

+

309 """ 

+

310 regexes = (fnmatch.translate(pattern) for pattern in patterns) 

+

311 # Python3.7 fnmatch translates "/" as "/". Before that, it translates as "\/", 

+

312 # so we have to deal with maybe a backslash. 

+

313 regexes = (re.sub(r"\\?/", r"[\\\\/]", regex) for regex in regexes) 

+

314 

+

315 if partial: 

+

316 # fnmatch always adds a \Z to match the whole string, which we don't 

+

317 # want, so we remove the \Z. While removing it, we only replace \Z if 

+

318 # followed by paren (introducing flags), or at end, to keep from 

+

319 # destroying a literal \Z in the pattern. 

+

320 regexes = (re.sub(r'\\Z(\(\?|$)', r'\1', regex) for regex in regexes) 

+

321 

+

322 flags = 0 

+

323 if case_insensitive: 

+

324 flags |= re.IGNORECASE 

+

325 compiled = re.compile(join_regex(regexes), flags=flags) 

+

326 

+

327 return compiled 

+

328 

+

329 

+

330class PathAliases(object): 

+

331 """A collection of aliases for paths. 

+

332 

+

333 When combining data files from remote machines, often the paths to source 

+

334 code are different, for example, due to OS differences, or because of 

+

335 serialized checkouts on continuous integration machines. 

+

336 

+

337 A `PathAliases` object tracks a list of pattern/result pairs, and can 

+

338 map a path through those aliases to produce a unified path. 

+

339 

+

340 """ 

+

341 def __init__(self): 

+

342 self.aliases = [] 

+

343 

+

344 def pprint(self): # pragma: debugging 

+

345 """Dump the important parts of the PathAliases, for debugging.""" 

+

346 for regex, result in self.aliases: 

+

347 print("{!r} --> {!r}".format(regex.pattern, result)) 

+

348 

+

349 def add(self, pattern, result): 

+

350 """Add the `pattern`/`result` pair to the list of aliases. 

+

351 

+

352 `pattern` is an `fnmatch`-style pattern. `result` is a simple 

+

353 string. When mapping paths, if a path starts with a match against 

+

354 `pattern`, then that match is replaced with `result`. This models 

+

355 isomorphic source trees being rooted at different places on two 

+

356 different machines. 

+

357 

+

358 `pattern` can't end with a wildcard component, since that would 

+

359 match an entire tree, and not just its root. 

+

360 

+

361 """ 

+

362 pattern_sep = sep(pattern) 

+

363 

+

364 if len(pattern) > 1: 

+

365 pattern = pattern.rstrip(r"\/") 

+

366 

+

367 # The pattern can't end with a wildcard component. 

+

368 if pattern.endswith("*"): 

+

369 raise CoverageException("Pattern must not end with wildcards.") 

+

370 

+

371 # The pattern is meant to match a filepath. Let's make it absolute 

+

372 # unless it already is, or is meant to match any prefix. 

+

373 if not pattern.startswith('*') and not isabs_anywhere(pattern + 

+

374 pattern_sep): 

+

375 pattern = abs_file(pattern) 

+

376 if not pattern.endswith(pattern_sep): 

+

377 pattern += pattern_sep 

+

378 

+

379 # Make a regex from the pattern. 

+

380 regex = fnmatches_to_regex([pattern], case_insensitive=True, partial=True) 

+

381 

+

382 # Normalize the result: it must end with a path separator. 

+

383 result_sep = sep(result) 

+

384 result = result.rstrip(r"\/") + result_sep 

+

385 self.aliases.append((regex, result)) 

+

386 

+

387 def map(self, path): 

+

388 """Map `path` through the aliases. 

+

389 

+

390 `path` is checked against all of the patterns. The first pattern to 

+

391 match is used to replace the root of the path with the result root. 

+

392 Only one pattern is ever used. If no patterns match, `path` is 

+

393 returned unchanged. 

+

394 

+

395 The separator style in the result is made to match that of the result 

+

396 in the alias. 

+

397 

+

398 Returns the mapped path. If a mapping has happened, this is a 

+

399 canonical path. If no mapping has happened, it is the original value 

+

400 of `path` unchanged. 

+

401 

+

402 """ 

+

403 for regex, result in self.aliases: 

+

404 m = regex.match(path) 

+

405 if m: 

+

406 new = path.replace(m.group(0), result) 

+

407 new = new.replace(sep(path), sep(result)) 

+

408 new = canonical_filename(new) 

+

409 return new 

+

410 return path 

+

411 

+

412 

+

413def find_python_files(dirname): 

+

414 """Yield all of the importable Python files in `dirname`, recursively. 

+

415 

+

416 To be importable, the files have to be in a directory with a __init__.py, 

+

417 except for `dirname` itself, which isn't required to have one. The 

+

418 assumption is that `dirname` was specified directly, so the user knows 

+

419 best, but sub-directories are checked for a __init__.py to be sure we only 

+

420 find the importable files. 

+

421 

+

422 """ 

+

423 for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)): 

+

424 if i > 0 and '__init__.py' not in filenames: 

+

425 # If a directory doesn't have __init__.py, then it isn't 

+

426 # importable and neither are its files 

+

427 del dirnames[:] 

+

428 continue 

+

429 for filename in filenames: 

+

430 # We're only interested in files that look like reasonable Python 

+

431 # files: Must end with .py or .pyw, and must not have certain funny 

+

432 # characters that probably mean they are editor junk. 

+

433 if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename): 

+

434 yield os.path.join(dirpath, filename) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_html.js b/reports/20210322_66173dc24d/htmlcov/coverage_html.js new file mode 100644 index 000000000..27b49b36f --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_html.js @@ -0,0 +1,616 @@ +// Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 +// For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt + +// Coverage.py HTML report browser code. +/*jslint browser: true, sloppy: true, vars: true, plusplus: true, maxerr: 50, indent: 4 */ +/*global coverage: true, document, window, $ */ + +coverage = {}; + +// Find all the elements with shortkey_* class, and use them to assign a shortcut key. +coverage.assign_shortkeys = function () { + $("*[class*='shortkey_']").each(function (i, e) { + $.each($(e).attr("class").split(" "), function (i, c) { + if (/^shortkey_/.test(c)) { + $(document).bind('keydown', c.substr(9), function () { + $(e).click(); + }); + } + }); + }); +}; + +// Create the events for the help panel. +coverage.wire_up_help_panel = function () { + $("#keyboard_icon").click(function () { + // Show the help panel, and position it so the keyboard icon in the + // panel is in the same place as the keyboard icon in the header. + $(".help_panel").show(); + var koff = $("#keyboard_icon").offset(); + var poff = $("#panel_icon").position(); + $(".help_panel").offset({ + top: koff.top-poff.top, + left: koff.left-poff.left + }); + }); + $("#panel_icon").click(function () { + $(".help_panel").hide(); + }); +}; + +// Create the events for the filter box. +coverage.wire_up_filter = function () { + // Cache elements. + var table = $("table.index"); + var table_rows = table.find("tbody tr"); + var table_row_names = table_rows.find("td.name a"); + var no_rows = $("#no_rows"); + + // Create a duplicate table footer that we can modify with dynamic summed values. + var table_footer = $("table.index tfoot tr"); + var table_dynamic_footer = table_footer.clone(); + table_dynamic_footer.attr('class', 'total_dynamic hidden'); + table_footer.after(table_dynamic_footer); + + // Observe filter keyevents. + $("#filter").on("keyup change", $.debounce(150, function (event) { + var filter_value = $(this).val(); + + if (filter_value === "") { + // Filter box is empty, remove all filtering. + table_rows.removeClass("hidden"); + + // Show standard footer, hide dynamic footer. + table_footer.removeClass("hidden"); + table_dynamic_footer.addClass("hidden"); + + // Hide placeholder, show table. + if (no_rows.length > 0) { + no_rows.hide(); + } + table.show(); + + } + else { + // Filter table items by value. + var hidden = 0; + var shown = 0; + + // Hide / show elements. + $.each(table_row_names, function () { + var element = $(this).parents("tr"); + + if ($(this).text().indexOf(filter_value) === -1) { + // hide + element.addClass("hidden"); + hidden++; + } + else { + // show + element.removeClass("hidden"); + shown++; + } + }); + + // Show placeholder if no rows will be displayed. + if (no_rows.length > 0) { + if (shown === 0) { + // Show placeholder, hide table. + no_rows.show(); + table.hide(); + } + else { + // Hide placeholder, show table. + no_rows.hide(); + table.show(); + } + } + + // Manage dynamic header: + if (hidden > 0) { + // Calculate new dynamic sum values based on visible rows. + for (var column = 2; column < 20; column++) { + // Calculate summed value. + var cells = table_rows.find('td:nth-child(' + column + ')'); + if (!cells.length) { + // No more columns...! + break; + } + + var sum = 0, numer = 0, denom = 0; + $.each(cells.filter(':visible'), function () { + var ratio = $(this).data("ratio"); + if (ratio) { + var splitted = ratio.split(" "); + numer += parseInt(splitted[0], 10); + denom += parseInt(splitted[1], 10); + } + else { + sum += parseInt(this.innerHTML, 10); + } + }); + + // Get footer cell element. + var footer_cell = table_dynamic_footer.find('td:nth-child(' + column + ')'); + + // Set value into dynamic footer cell element. + if (cells[0].innerHTML.indexOf('%') > -1) { + // Percentage columns use the numerator and denominator, + // and adapt to the number of decimal places. + var match = /\.([0-9]+)/.exec(cells[0].innerHTML); + var places = 0; + if (match) { + places = match[1].length; + } + var pct = numer * 100 / denom; + footer_cell.text(pct.toFixed(places) + '%'); + } + else { + footer_cell.text(sum); + } + } + + // Hide standard footer, show dynamic footer. + table_footer.addClass("hidden"); + table_dynamic_footer.removeClass("hidden"); + } + else { + // Show standard footer, hide dynamic footer. + table_footer.removeClass("hidden"); + table_dynamic_footer.addClass("hidden"); + } + } + })); + + // Trigger change event on setup, to force filter on page refresh + // (filter value may still be present). + $("#filter").trigger("change"); +}; + +// Loaded on index.html +coverage.index_ready = function ($) { + // Look for a localStorage item containing previous sort settings: + var sort_list = []; + var storage_name = "COVERAGE_INDEX_SORT"; + var stored_list = undefined; + try { + stored_list = localStorage.getItem(storage_name); + } catch(err) {} + + if (stored_list) { + sort_list = JSON.parse('[[' + stored_list + ']]'); + } + + // Create a new widget which exists only to save and restore + // the sort order: + $.tablesorter.addWidget({ + id: "persistentSort", + + // Format is called by the widget before displaying: + format: function (table) { + if (table.config.sortList.length === 0 && sort_list.length > 0) { + // This table hasn't been sorted before - we'll use + // our stored settings: + $(table).trigger('sorton', [sort_list]); + } + else { + // This is not the first load - something has + // already defined sorting so we'll just update + // our stored value to match: + sort_list = table.config.sortList; + } + } + }); + + // Configure our tablesorter to handle the variable number of + // columns produced depending on report options: + var headers = []; + var col_count = $("table.index > thead > tr > th").length; + + headers[0] = { sorter: 'text' }; + for (i = 1; i < col_count-1; i++) { + headers[i] = { sorter: 'digit' }; + } + headers[col_count-1] = { sorter: 'percent' }; + + // Enable the table sorter: + $("table.index").tablesorter({ + widgets: ['persistentSort'], + headers: headers + }); + + coverage.assign_shortkeys(); + coverage.wire_up_help_panel(); + coverage.wire_up_filter(); + + // Watch for page unload events so we can save the final sort settings: + $(window).on("unload", function () { + try { + localStorage.setItem(storage_name, sort_list.toString()) + } catch(err) {} + }); +}; + +// -- pyfile stuff -- + +coverage.LINE_FILTERS_STORAGE = "COVERAGE_LINE_FILTERS"; + +coverage.pyfile_ready = function ($) { + // If we're directed to a particular line number, highlight the line. + var frag = location.hash; + if (frag.length > 2 && frag[1] === 't') { + $(frag).addClass('highlight'); + coverage.set_sel(parseInt(frag.substr(2), 10)); + } + else { + coverage.set_sel(0); + } + + $(document) + .bind('keydown', 'j', coverage.to_next_chunk_nicely) + .bind('keydown', 'k', coverage.to_prev_chunk_nicely) + .bind('keydown', '0', coverage.to_top) + .bind('keydown', '1', coverage.to_first_chunk) + ; + + $(".button_toggle_run").click(function (evt) {coverage.toggle_lines(evt.target, "run");}); + $(".button_toggle_exc").click(function (evt) {coverage.toggle_lines(evt.target, "exc");}); + $(".button_toggle_mis").click(function (evt) {coverage.toggle_lines(evt.target, "mis");}); + $(".button_toggle_par").click(function (evt) {coverage.toggle_lines(evt.target, "par");}); + + coverage.filters = undefined; + try { + coverage.filters = localStorage.getItem(coverage.LINE_FILTERS_STORAGE); + } catch(err) {} + + if (coverage.filters) { + coverage.filters = JSON.parse(coverage.filters); + } + else { + coverage.filters = {run: false, exc: true, mis: true, par: true}; + } + + for (cls in coverage.filters) { + coverage.set_line_visibilty(cls, coverage.filters[cls]); + } + + coverage.assign_shortkeys(); + coverage.wire_up_help_panel(); + + coverage.init_scroll_markers(); + + // Rebuild scroll markers when the window height changes. + $(window).resize(coverage.build_scroll_markers); +}; + +coverage.toggle_lines = function (btn, cls) { + var onoff = !$(btn).hasClass("show_" + cls); + coverage.set_line_visibilty(cls, onoff); + coverage.build_scroll_markers(); + coverage.filters[cls] = onoff; + try { + localStorage.setItem(coverage.LINE_FILTERS_STORAGE, JSON.stringify(coverage.filters)); + } catch(err) {} +}; + +coverage.set_line_visibilty = function (cls, onoff) { + var show = "show_" + cls; + var btn = $(".button_toggle_" + cls); + if (onoff) { + $("#source ." + cls).addClass(show); + btn.addClass(show); + } + else { + $("#source ." + cls).removeClass(show); + btn.removeClass(show); + } +}; + +// Return the nth line div. +coverage.line_elt = function (n) { + return $("#t" + n); +}; + +// Return the nth line number div. +coverage.num_elt = function (n) { + return $("#n" + n); +}; + +// Set the selection. b and e are line numbers. +coverage.set_sel = function (b, e) { + // The first line selected. + coverage.sel_begin = b; + // The next line not selected. + coverage.sel_end = (e === undefined) ? b+1 : e; +}; + +coverage.to_top = function () { + coverage.set_sel(0, 1); + coverage.scroll_window(0); +}; + +coverage.to_first_chunk = function () { + coverage.set_sel(0, 1); + coverage.to_next_chunk(); +}; + +// Return a string indicating what kind of chunk this line belongs to, +// or null if not a chunk. +coverage.chunk_indicator = function (line_elt) { + var klass = line_elt.attr('class'); + if (klass) { + var m = klass.match(/\bshow_\w+\b/); + if (m) { + return m[0]; + } + } + return null; +}; + +coverage.to_next_chunk = function () { + var c = coverage; + + // Find the start of the next colored chunk. + var probe = c.sel_end; + var chunk_indicator, probe_line; + while (true) { + probe_line = c.line_elt(probe); + if (probe_line.length === 0) { + return; + } + chunk_indicator = c.chunk_indicator(probe_line); + if (chunk_indicator) { + break; + } + probe++; + } + + // There's a next chunk, `probe` points to it. + var begin = probe; + + // Find the end of this chunk. + var next_indicator = chunk_indicator; + while (next_indicator === chunk_indicator) { + probe++; + probe_line = c.line_elt(probe); + next_indicator = c.chunk_indicator(probe_line); + } + c.set_sel(begin, probe); + c.show_selection(); +}; + +coverage.to_prev_chunk = function () { + var c = coverage; + + // Find the end of the prev colored chunk. + var probe = c.sel_begin-1; + var probe_line = c.line_elt(probe); + if (probe_line.length === 0) { + return; + } + var chunk_indicator = c.chunk_indicator(probe_line); + while (probe > 0 && !chunk_indicator) { + probe--; + probe_line = c.line_elt(probe); + if (probe_line.length === 0) { + return; + } + chunk_indicator = c.chunk_indicator(probe_line); + } + + // There's a prev chunk, `probe` points to its last line. + var end = probe+1; + + // Find the beginning of this chunk. + var prev_indicator = chunk_indicator; + while (prev_indicator === chunk_indicator) { + probe--; + probe_line = c.line_elt(probe); + prev_indicator = c.chunk_indicator(probe_line); + } + c.set_sel(probe+1, end); + c.show_selection(); +}; + +// Return the line number of the line nearest pixel position pos +coverage.line_at_pos = function (pos) { + var l1 = coverage.line_elt(1), + l2 = coverage.line_elt(2), + result; + if (l1.length && l2.length) { + var l1_top = l1.offset().top, + line_height = l2.offset().top - l1_top, + nlines = (pos - l1_top) / line_height; + if (nlines < 1) { + result = 1; + } + else { + result = Math.ceil(nlines); + } + } + else { + result = 1; + } + return result; +}; + +// Returns 0, 1, or 2: how many of the two ends of the selection are on +// the screen right now? +coverage.selection_ends_on_screen = function () { + if (coverage.sel_begin === 0) { + return 0; + } + + var top = coverage.line_elt(coverage.sel_begin); + var next = coverage.line_elt(coverage.sel_end-1); + + return ( + (top.isOnScreen() ? 1 : 0) + + (next.isOnScreen() ? 1 : 0) + ); +}; + +coverage.to_next_chunk_nicely = function () { + coverage.finish_scrolling(); + if (coverage.selection_ends_on_screen() === 0) { + // The selection is entirely off the screen: select the top line on + // the screen. + var win = $(window); + coverage.select_line_or_chunk(coverage.line_at_pos(win.scrollTop())); + } + coverage.to_next_chunk(); +}; + +coverage.to_prev_chunk_nicely = function () { + coverage.finish_scrolling(); + if (coverage.selection_ends_on_screen() === 0) { + var win = $(window); + coverage.select_line_or_chunk(coverage.line_at_pos(win.scrollTop() + win.height())); + } + coverage.to_prev_chunk(); +}; + +// Select line number lineno, or if it is in a colored chunk, select the +// entire chunk +coverage.select_line_or_chunk = function (lineno) { + var c = coverage; + var probe_line = c.line_elt(lineno); + if (probe_line.length === 0) { + return; + } + var the_indicator = c.chunk_indicator(probe_line); + if (the_indicator) { + // The line is in a highlighted chunk. + // Search backward for the first line. + var probe = lineno; + var indicator = the_indicator; + while (probe > 0 && indicator === the_indicator) { + probe--; + probe_line = c.line_elt(probe); + if (probe_line.length === 0) { + break; + } + indicator = c.chunk_indicator(probe_line); + } + var begin = probe + 1; + + // Search forward for the last line. + probe = lineno; + indicator = the_indicator; + while (indicator === the_indicator) { + probe++; + probe_line = c.line_elt(probe); + indicator = c.chunk_indicator(probe_line); + } + + coverage.set_sel(begin, probe); + } + else { + coverage.set_sel(lineno); + } +}; + +coverage.show_selection = function () { + var c = coverage; + + // Highlight the lines in the chunk + $(".linenos .highlight").removeClass("highlight"); + for (var probe = c.sel_begin; probe > 0 && probe < c.sel_end; probe++) { + c.num_elt(probe).addClass("highlight"); + } + + c.scroll_to_selection(); +}; + +coverage.scroll_to_selection = function () { + // Scroll the page if the chunk isn't fully visible. + if (coverage.selection_ends_on_screen() < 2) { + // Need to move the page. The html,body trick makes it scroll in all + // browsers, got it from http://stackoverflow.com/questions/3042651 + var top = coverage.line_elt(coverage.sel_begin); + var top_pos = parseInt(top.offset().top, 10); + coverage.scroll_window(top_pos - 30); + } +}; + +coverage.scroll_window = function (to_pos) { + $("html,body").animate({scrollTop: to_pos}, 200); +}; + +coverage.finish_scrolling = function () { + $("html,body").stop(true, true); +}; + +coverage.init_scroll_markers = function () { + var c = coverage; + // Init some variables + c.lines_len = $('#source p').length; + c.body_h = $('body').height(); + c.header_h = $('div#header').height(); + + // Build html + c.build_scroll_markers(); +}; + +coverage.build_scroll_markers = function () { + var c = coverage, + min_line_height = 3, + max_line_height = 10, + visible_window_h = $(window).height(); + + c.lines_to_mark = $('#source').find('p.show_run, p.show_mis, p.show_exc, p.show_exc, p.show_par'); + $('#scroll_marker').remove(); + // Don't build markers if the window has no scroll bar. + if (c.body_h <= visible_window_h) { + return; + } + + $("body").append("
 
"); + var scroll_marker = $('#scroll_marker'), + marker_scale = scroll_marker.height() / c.body_h, + line_height = scroll_marker.height() / c.lines_len; + + // Line height must be between the extremes. + if (line_height > min_line_height) { + if (line_height > max_line_height) { + line_height = max_line_height; + } + } + else { + line_height = min_line_height; + } + + var previous_line = -99, + last_mark, + last_top, + offsets = {}; + + // Calculate line offsets outside loop to prevent relayouts + c.lines_to_mark.each(function() { + offsets[this.id] = $(this).offset().top; + }); + c.lines_to_mark.each(function () { + var id_name = $(this).attr('id'), + line_top = Math.round(offsets[id_name] * marker_scale), + line_number = parseInt(id_name.substring(1, id_name.length)); + + if (line_number === previous_line + 1) { + // If this solid missed block just make previous mark higher. + last_mark.css({ + 'height': line_top + line_height - last_top + }); + } + else { + // Add colored line in scroll_marker block. + scroll_marker.append('
'); + last_mark = $('#m' + line_number); + last_mark.css({ + 'height': line_height, + 'top': line_top + }); + last_top = line_top; + } + + previous_line = line_number; + }); +}; diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_html_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_html_py.html new file mode 100644 index 000000000..56c1c126b --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_html_py.html @@ -0,0 +1,586 @@ + + + + + + Coverage for coverage/html.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""HTML reporting for coverage.py.""" 

+

5 

+

6import datetime 

+

7import json 

+

8import os 

+

9import re 

+

10import shutil 

+

11 

+

12import coverage 

+

13from coverage import env 

+

14from coverage.backward import iitems, SimpleNamespace, format_local_datetime 

+

15from coverage.data import add_data_to_hash 

+

16from coverage.files import flat_rootname 

+

17from coverage.misc import CoverageException, ensure_dir, file_be_gone, Hasher, isolate_module 

+

18from coverage.report import get_analysis_to_report 

+

19from coverage.results import Numbers 

+

20from coverage.templite import Templite 

+

21 

+

22os = isolate_module(os) 

+

23 

+

24 

+

25# Static files are looked for in a list of places. 

+

26STATIC_PATH = [ 

+

27 # The place Debian puts system Javascript libraries. 

+

28 "/usr/share/javascript", 

+

29 

+

30 # Our htmlfiles directory. 

+

31 os.path.join(os.path.dirname(__file__), "htmlfiles"), 

+

32] 

+

33 

+

34 

+

35def data_filename(fname, pkgdir=""): 

+

36 """Return the path to a data file of ours. 

+

37 

+

38 The file is searched for on `STATIC_PATH`, and the first place it's found, 

+

39 is returned. 

+

40 

+

41 Each directory in `STATIC_PATH` is searched as-is, and also, if `pkgdir` 

+

42 is provided, at that sub-directory. 

+

43 

+

44 """ 

+

45 tried = [] 

+

46 for static_dir in STATIC_PATH: 

+

47 static_filename = os.path.join(static_dir, fname) 

+

48 if os.path.exists(static_filename): 

+

49 return static_filename 

+

50 else: 

+

51 tried.append(static_filename) 

+

52 if pkgdir: 

+

53 static_filename = os.path.join(static_dir, pkgdir, fname) 

+

54 if os.path.exists(static_filename): 

+

55 return static_filename 

+

56 else: 

+

57 tried.append(static_filename) 

+

58 raise CoverageException( 

+

59 "Couldn't find static file %r from %r, tried: %r" % (fname, os.getcwd(), tried) 

+

60 ) 

+

61 

+

62 

+

63def read_data(fname): 

+

64 """Return the contents of a data file of ours.""" 

+

65 with open(data_filename(fname)) as data_file: 

+

66 return data_file.read() 

+

67 

+

68 

+

69def write_html(fname, html): 

+

70 """Write `html` to `fname`, properly encoded.""" 

+

71 html = re.sub(r"(\A\s+)|(\s+$)", "", html, flags=re.MULTILINE) + "\n" 

+

72 with open(fname, "wb") as fout: 

+

73 fout.write(html.encode('ascii', 'xmlcharrefreplace')) 

+

74 

+

75 

+

76class HtmlDataGeneration(object): 

+

77 """Generate structured data to be turned into HTML reports.""" 

+

78 

+

79 EMPTY = "(empty)" 

+

80 

+

81 def __init__(self, cov): 

+

82 self.coverage = cov 

+

83 self.config = self.coverage.config 

+

84 data = self.coverage.get_data() 

+

85 self.has_arcs = data.has_arcs() 

+

86 if self.config.show_contexts: 

+

87 if data.measured_contexts() == {""}: 

+

88 self.coverage._warn("No contexts were measured") 

+

89 data.set_query_contexts(self.config.report_contexts) 

+

90 

+

91 def data_for_file(self, fr, analysis): 

+

92 """Produce the data needed for one file's report.""" 

+

93 if self.has_arcs: 

+

94 missing_branch_arcs = analysis.missing_branch_arcs() 

+

95 arcs_executed = analysis.arcs_executed() 

+

96 

+

97 if self.config.show_contexts: 

+

98 contexts_by_lineno = analysis.data.contexts_by_lineno(analysis.filename) 

+

99 

+

100 lines = [] 

+

101 

+

102 for lineno, tokens in enumerate(fr.source_token_lines(), start=1): 

+

103 # Figure out how to mark this line. 

+

104 category = None 

+

105 short_annotations = [] 

+

106 long_annotations = [] 

+

107 

+

108 if lineno in analysis.excluded: 

+

109 category = 'exc' 

+

110 elif lineno in analysis.missing: 

+

111 category = 'mis' 

+

112 elif self.has_arcs and lineno in missing_branch_arcs: 

+

113 category = 'par' 

+

114 for b in missing_branch_arcs[lineno]: 

+

115 if b < 0: 

+

116 short_annotations.append("exit") 

+

117 else: 

+

118 short_annotations.append(b) 

+

119 long_annotations.append(fr.missing_arc_description(lineno, b, arcs_executed)) 

+

120 elif lineno in analysis.statements: 

+

121 category = 'run' 

+

122 

+

123 contexts = contexts_label = None 

+

124 context_list = None 

+

125 if category and self.config.show_contexts: 

+

126 contexts = sorted(c or self.EMPTY for c in contexts_by_lineno[lineno]) 

+

127 if contexts == [self.EMPTY]: 

+

128 contexts_label = self.EMPTY 

+

129 else: 

+

130 contexts_label = "{} ctx".format(len(contexts)) 

+

131 context_list = contexts 

+

132 

+

133 lines.append(SimpleNamespace( 

+

134 tokens=tokens, 

+

135 number=lineno, 

+

136 category=category, 

+

137 statement=(lineno in analysis.statements), 

+

138 contexts=contexts, 

+

139 contexts_label=contexts_label, 

+

140 context_list=context_list, 

+

141 short_annotations=short_annotations, 

+

142 long_annotations=long_annotations, 

+

143 )) 

+

144 

+

145 file_data = SimpleNamespace( 

+

146 relative_filename=fr.relative_filename(), 

+

147 nums=analysis.numbers, 

+

148 lines=lines, 

+

149 ) 

+

150 

+

151 return file_data 

+

152 

+

153 

+

154class HtmlReporter(object): 

+

155 """HTML reporting.""" 

+

156 

+

157 # These files will be copied from the htmlfiles directory to the output 

+

158 # directory. 

+

159 STATIC_FILES = [ 

+

160 ("style.css", ""), 

+

161 ("jquery.min.js", "jquery"), 

+

162 ("jquery.ba-throttle-debounce.min.js", "jquery-throttle-debounce"), 

+

163 ("jquery.hotkeys.js", "jquery-hotkeys"), 

+

164 ("jquery.isonscreen.js", "jquery-isonscreen"), 

+

165 ("jquery.tablesorter.min.js", "jquery-tablesorter"), 

+

166 ("coverage_html.js", ""), 

+

167 ("keybd_closed.png", ""), 

+

168 ("keybd_open.png", ""), 

+

169 ("favicon_32.png", ""), 

+

170 ] 

+

171 

+

172 def __init__(self, cov): 

+

173 self.coverage = cov 

+

174 self.config = self.coverage.config 

+

175 self.directory = self.config.html_dir 

+

176 

+

177 self.skip_covered = self.config.html_skip_covered 

+

178 if self.skip_covered is None: 

+

179 self.skip_covered = self.config.skip_covered 

+

180 self.skip_empty = self.config.html_skip_empty 

+

181 if self.skip_empty is None: 

+

182 self.skip_empty= self.config.skip_empty 

+

183 

+

184 title = self.config.html_title 

+

185 if env.PY2: 

+

186 title = title.decode("utf8") 

+

187 

+

188 if self.config.extra_css: 

+

189 self.extra_css = os.path.basename(self.config.extra_css) 

+

190 else: 

+

191 self.extra_css = None 

+

192 

+

193 self.data = self.coverage.get_data() 

+

194 self.has_arcs = self.data.has_arcs() 

+

195 

+

196 self.file_summaries = [] 

+

197 self.all_files_nums = [] 

+

198 self.incr = IncrementalChecker(self.directory) 

+

199 self.datagen = HtmlDataGeneration(self.coverage) 

+

200 self.totals = Numbers() 

+

201 

+

202 self.template_globals = { 

+

203 # Functions available in the templates. 

+

204 'escape': escape, 

+

205 'pair': pair, 

+

206 'len': len, 

+

207 

+

208 # Constants for this report. 

+

209 '__url__': coverage.__url__, 

+

210 '__version__': coverage.__version__, 

+

211 'title': title, 

+

212 'time_stamp': format_local_datetime(datetime.datetime.now()), 

+

213 'extra_css': self.extra_css, 

+

214 'has_arcs': self.has_arcs, 

+

215 'show_contexts': self.config.show_contexts, 

+

216 

+

217 # Constants for all reports. 

+

218 # These css classes determine which lines are highlighted by default. 

+

219 'category': { 

+

220 'exc': 'exc show_exc', 

+

221 'mis': 'mis show_mis', 

+

222 'par': 'par run show_par', 

+

223 'run': 'run', 

+

224 } 

+

225 } 

+

226 self.pyfile_html_source = read_data("pyfile.html") 

+

227 self.source_tmpl = Templite(self.pyfile_html_source, self.template_globals) 

+

228 

+

229 def report(self, morfs): 

+

230 """Generate an HTML report for `morfs`. 

+

231 

+

232 `morfs` is a list of modules or file names. 

+

233 

+

234 """ 

+

235 # Read the status data and check that this run used the same 

+

236 # global data as the last run. 

+

237 self.incr.read() 

+

238 self.incr.check_global_data(self.config, self.pyfile_html_source) 

+

239 

+

240 # Process all the files. 

+

241 for fr, analysis in get_analysis_to_report(self.coverage, morfs): 

+

242 self.html_file(fr, analysis) 

+

243 

+

244 if not self.all_files_nums: 

+

245 raise CoverageException("No data to report.") 

+

246 

+

247 self.totals = sum(self.all_files_nums) 

+

248 

+

249 # Write the index file. 

+

250 self.index_file() 

+

251 

+

252 self.make_local_static_report_files() 

+

253 return self.totals.n_statements and self.totals.pc_covered 

+

254 

+

255 def make_local_static_report_files(self): 

+

256 """Make local instances of static files for HTML report.""" 

+

257 # The files we provide must always be copied. 

+

258 for static, pkgdir in self.STATIC_FILES: 

+

259 shutil.copyfile( 

+

260 data_filename(static, pkgdir), 

+

261 os.path.join(self.directory, static) 

+

262 ) 

+

263 

+

264 # The user may have extra CSS they want copied. 

+

265 if self.extra_css: 

+

266 shutil.copyfile( 

+

267 self.config.extra_css, 

+

268 os.path.join(self.directory, self.extra_css) 

+

269 ) 

+

270 

+

271 def html_file(self, fr, analysis): 

+

272 """Generate an HTML file for one source file.""" 

+

273 rootname = flat_rootname(fr.relative_filename()) 

+

274 html_filename = rootname + ".html" 

+

275 ensure_dir(self.directory) 

+

276 html_path = os.path.join(self.directory, html_filename) 

+

277 

+

278 # Get the numbers for this file. 

+

279 nums = analysis.numbers 

+

280 self.all_files_nums.append(nums) 

+

281 

+

282 if self.skip_covered: 

+

283 # Don't report on 100% files. 

+

284 no_missing_lines = (nums.n_missing == 0) 

+

285 no_missing_branches = (nums.n_partial_branches == 0) 

+

286 if no_missing_lines and no_missing_branches: 

+

287 # If there's an existing file, remove it. 

+

288 file_be_gone(html_path) 

+

289 return 

+

290 

+

291 if self.skip_empty: 

+

292 # Don't report on empty files. 

+

293 if nums.n_statements == 0: 

+

294 file_be_gone(html_path) 

+

295 return 

+

296 

+

297 # Find out if the file on disk is already correct. 

+

298 if self.incr.can_skip_file(self.data, fr, rootname): 

+

299 self.file_summaries.append(self.incr.index_info(rootname)) 

+

300 return 

+

301 

+

302 # Write the HTML page for this file. 

+

303 file_data = self.datagen.data_for_file(fr, analysis) 

+

304 for ldata in file_data.lines: 

+

305 # Build the HTML for the line. 

+

306 html = [] 

+

307 for tok_type, tok_text in ldata.tokens: 

+

308 if tok_type == "ws": 

+

309 html.append(escape(tok_text)) 

+

310 else: 

+

311 tok_html = escape(tok_text) or '&nbsp;' 

+

312 html.append( 

+

313 u'<span class="{}">{}</span>'.format(tok_type, tok_html) 

+

314 ) 

+

315 ldata.html = ''.join(html) 

+

316 

+

317 if ldata.short_annotations: 

+

318 # 202F is NARROW NO-BREAK SPACE. 

+

319 # 219B is RIGHTWARDS ARROW WITH STROKE. 

+

320 ldata.annotate = u",&nbsp;&nbsp; ".join( 

+

321 u"{}&#x202F;&#x219B;&#x202F;{}".format(ldata.number, d) 

+

322 for d in ldata.short_annotations 

+

323 ) 

+

324 else: 

+

325 ldata.annotate = None 

+

326 

+

327 if ldata.long_annotations: 

+

328 longs = ldata.long_annotations 

+

329 if len(longs) == 1: 

+

330 ldata.annotate_long = longs[0] 

+

331 else: 

+

332 ldata.annotate_long = u"{:d} missed branches: {}".format( 

+

333 len(longs), 

+

334 u", ".join( 

+

335 u"{:d}) {}".format(num, ann_long) 

+

336 for num, ann_long in enumerate(longs, start=1) 

+

337 ), 

+

338 ) 

+

339 else: 

+

340 ldata.annotate_long = None 

+

341 

+

342 css_classes = [] 

+

343 if ldata.category: 

+

344 css_classes.append(self.template_globals['category'][ldata.category]) 

+

345 ldata.css_class = ' '.join(css_classes) or "pln" 

+

346 

+

347 html = self.source_tmpl.render(file_data.__dict__) 

+

348 write_html(html_path, html) 

+

349 

+

350 # Save this file's information for the index file. 

+

351 index_info = { 

+

352 'nums': nums, 

+

353 'html_filename': html_filename, 

+

354 'relative_filename': fr.relative_filename(), 

+

355 } 

+

356 self.file_summaries.append(index_info) 

+

357 self.incr.set_index_info(rootname, index_info) 

+

358 

+

359 def index_file(self): 

+

360 """Write the index.html file for this report.""" 

+

361 index_tmpl = Templite(read_data("index.html"), self.template_globals) 

+

362 

+

363 html = index_tmpl.render({ 

+

364 'files': self.file_summaries, 

+

365 'totals': self.totals, 

+

366 }) 

+

367 

+

368 write_html(os.path.join(self.directory, "index.html"), html) 

+

369 

+

370 # Write the latest hashes for next time. 

+

371 self.incr.write() 

+

372 

+

373 

+

374class IncrementalChecker(object): 

+

375 """Logic and data to support incremental reporting.""" 

+

376 

+

377 STATUS_FILE = "status.json" 

+

378 STATUS_FORMAT = 2 

+

379 

+

380 # pylint: disable=wrong-spelling-in-comment,useless-suppression 

+

381 # The data looks like: 

+

382 # 

+

383 # { 

+

384 # "format": 2, 

+

385 # "globals": "540ee119c15d52a68a53fe6f0897346d", 

+

386 # "version": "4.0a1", 

+

387 # "files": { 

+

388 # "cogapp___init__": { 

+

389 # "hash": "e45581a5b48f879f301c0f30bf77a50c", 

+

390 # "index": { 

+

391 # "html_filename": "cogapp___init__.html", 

+

392 # "relative_filename": "cogapp/__init__", 

+

393 # "nums": [ 1, 14, 0, 0, 0, 0, 0 ] 

+

394 # } 

+

395 # }, 

+

396 # ... 

+

397 # "cogapp_whiteutils": { 

+

398 # "hash": "8504bb427fc488c4176809ded0277d51", 

+

399 # "index": { 

+

400 # "html_filename": "cogapp_whiteutils.html", 

+

401 # "relative_filename": "cogapp/whiteutils", 

+

402 # "nums": [ 1, 59, 0, 1, 28, 2, 2 ] 

+

403 # } 

+

404 # } 

+

405 # } 

+

406 # } 

+

407 

+

408 def __init__(self, directory): 

+

409 self.directory = directory 

+

410 self.reset() 

+

411 

+

412 def reset(self): 

+

413 """Initialize to empty. Causes all files to be reported.""" 

+

414 self.globals = '' 

+

415 self.files = {} 

+

416 

+

417 def read(self): 

+

418 """Read the information we stored last time.""" 

+

419 usable = False 

+

420 try: 

+

421 status_file = os.path.join(self.directory, self.STATUS_FILE) 

+

422 with open(status_file) as fstatus: 

+

423 status = json.load(fstatus) 

+

424 except (IOError, ValueError): 

+

425 usable = False 

+

426 else: 

+

427 usable = True 

+

428 if status['format'] != self.STATUS_FORMAT: 

+

429 usable = False 

+

430 elif status['version'] != coverage.__version__: 

+

431 usable = False 

+

432 

+

433 if usable: 

+

434 self.files = {} 

+

435 for filename, fileinfo in iitems(status['files']): 

+

436 fileinfo['index']['nums'] = Numbers(*fileinfo['index']['nums']) 

+

437 self.files[filename] = fileinfo 

+

438 self.globals = status['globals'] 

+

439 else: 

+

440 self.reset() 

+

441 

+

442 def write(self): 

+

443 """Write the current status.""" 

+

444 status_file = os.path.join(self.directory, self.STATUS_FILE) 

+

445 files = {} 

+

446 for filename, fileinfo in iitems(self.files): 

+

447 fileinfo['index']['nums'] = fileinfo['index']['nums'].init_args() 

+

448 files[filename] = fileinfo 

+

449 

+

450 status = { 

+

451 'format': self.STATUS_FORMAT, 

+

452 'version': coverage.__version__, 

+

453 'globals': self.globals, 

+

454 'files': files, 

+

455 } 

+

456 with open(status_file, "w") as fout: 

+

457 json.dump(status, fout, separators=(',', ':')) 

+

458 

+

459 def check_global_data(self, *data): 

+

460 """Check the global data that can affect incremental reporting.""" 

+

461 m = Hasher() 

+

462 for d in data: 

+

463 m.update(d) 

+

464 these_globals = m.hexdigest() 

+

465 if self.globals != these_globals: 

+

466 self.reset() 

+

467 self.globals = these_globals 

+

468 

+

469 def can_skip_file(self, data, fr, rootname): 

+

470 """Can we skip reporting this file? 

+

471 

+

472 `data` is a CoverageData object, `fr` is a `FileReporter`, and 

+

473 `rootname` is the name being used for the file. 

+

474 """ 

+

475 m = Hasher() 

+

476 m.update(fr.source().encode('utf-8')) 

+

477 add_data_to_hash(data, fr.filename, m) 

+

478 this_hash = m.hexdigest() 

+

479 

+

480 that_hash = self.file_hash(rootname) 

+

481 

+

482 if this_hash == that_hash: 

+

483 # Nothing has changed to require the file to be reported again. 

+

484 return True 

+

485 else: 

+

486 self.set_file_hash(rootname, this_hash) 

+

487 return False 

+

488 

+

489 def file_hash(self, fname): 

+

490 """Get the hash of `fname`'s contents.""" 

+

491 return self.files.get(fname, {}).get('hash', '') 

+

492 

+

493 def set_file_hash(self, fname, val): 

+

494 """Set the hash of `fname`'s contents.""" 

+

495 self.files.setdefault(fname, {})['hash'] = val 

+

496 

+

497 def index_info(self, fname): 

+

498 """Get the information for index.html for `fname`.""" 

+

499 return self.files.get(fname, {}).get('index', {}) 

+

500 

+

501 def set_index_info(self, fname, info): 

+

502 """Set the information for index.html for `fname`.""" 

+

503 self.files.setdefault(fname, {})['index'] = info 

+

504 

+

505 

+

506# Helpers for templates and generating HTML 

+

507 

+

508def escape(t): 

+

509 """HTML-escape the text in `t`. 

+

510 

+

511 This is only suitable for HTML text, not attributes. 

+

512 

+

513 """ 

+

514 # Convert HTML special chars into HTML entities. 

+

515 return t.replace("&", "&amp;").replace("<", "&lt;") 

+

516 

+

517 

+

518def pair(ratio): 

+

519 """Format a pair of numbers so JavaScript can read them in an attribute.""" 

+

520 return "%s %s" % ratio 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_inorout_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_inorout_py.html new file mode 100644 index 000000000..2f7592612 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_inorout_py.html @@ -0,0 +1,560 @@ + + + + + + Coverage for coverage/inorout.py: 78.520% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Determining whether files are being measured/reported or not.""" 

+

5 

+

6# For finding the stdlib 

+

7import atexit 

+

8import inspect 

+

9import itertools 

+

10import os 

+

11import platform 

+

12import re 

+

13import sys 

+

14import traceback 

+

15 

+

16from coverage import env 

+

17from coverage.backward import code_object 

+

18from coverage.disposition import FileDisposition, disposition_init 

+

19from coverage.files import TreeMatcher, FnmatchMatcher, ModuleMatcher 

+

20from coverage.files import prep_patterns, find_python_files, canonical_filename 

+

21from coverage.misc import CoverageException 

+

22from coverage.python import source_for_file, source_for_morf 

+

23 

+

24 

+

25# Pypy has some unusual stuff in the "stdlib". Consider those locations 

+

26# when deciding where the stdlib is. These modules are not used for anything, 

+

27# they are modules importable from the pypy lib directories, so that we can 

+

28# find those directories. 

+

29_structseq = _pypy_irc_topic = None 

+

30if env.PYPY: 

+

31 try: 

+

32 import _structseq 

+

33 except ImportError: 

+

34 pass 

+

35 

+

36 try: 

+

37 import _pypy_irc_topic 

+

38 except ImportError: 

+

39 pass 

+

40 

+

41 

+

42def canonical_path(morf, directory=False): 

+

43 """Return the canonical path of the module or file `morf`. 

+

44 

+

45 If the module is a package, then return its directory. If it is a 

+

46 module, then return its file, unless `directory` is True, in which 

+

47 case return its enclosing directory. 

+

48 

+

49 """ 

+

50 morf_path = canonical_filename(source_for_morf(morf)) 

+

51 if morf_path.endswith("__init__.py") or directory: 

+

52 morf_path = os.path.split(morf_path)[0] 

+

53 return morf_path 

+

54 

+

55 

+

56def name_for_module(filename, frame): 

+

57 """Get the name of the module for a filename and frame. 

+

58 

+

59 For configurability's sake, we allow __main__ modules to be matched by 

+

60 their importable name. 

+

61 

+

62 If loaded via runpy (aka -m), we can usually recover the "original" 

+

63 full dotted module name, otherwise, we resort to interpreting the 

+

64 file name to get the module's name. In the case that the module name 

+

65 can't be determined, None is returned. 

+

66 

+

67 """ 

+

68 module_globals = frame.f_globals if frame is not None else {} 

+

69 if module_globals is None: # pragma: only ironpython 

+

70 # IronPython doesn't provide globals: https://github.com/IronLanguages/main/issues/1296 

+

71 module_globals = {} 

+

72 

+

73 dunder_name = module_globals.get('__name__', None) 

+

74 

+

75 if isinstance(dunder_name, str) and dunder_name != '__main__': 75 ↛ 77line 75 didn't jump to line 77, because the condition on line 75 was never true

+

76 # This is the usual case: an imported module. 

+

77 return dunder_name 

+

78 

+

79 loader = module_globals.get('__loader__', None) 

+

80 for attrname in ('fullname', 'name'): # attribute renamed in py3.2 

+

81 if hasattr(loader, attrname): 81 ↛ 82line 81 didn't jump to line 82, because the condition on line 81 was never true

+

82 fullname = getattr(loader, attrname) 

+

83 else: 

+

84 continue 

+

85 

+

86 if isinstance(fullname, str) and fullname != '__main__': 

+

87 # Module loaded via: runpy -m 

+

88 return fullname 

+

89 

+

90 # Script as first argument to Python command line. 

+

91 inspectedname = inspect.getmodulename(filename) 

+

92 if inspectedname is not None: 

+

93 return inspectedname 

+

94 else: 

+

95 return dunder_name 

+

96 

+

97 

+

98def module_is_namespace(mod): 

+

99 """Is the module object `mod` a PEP420 namespace module?""" 

+

100 return hasattr(mod, '__path__') and getattr(mod, '__file__', None) is None 

+

101 

+

102 

+

103def module_has_file(mod): 

+

104 """Does the module object `mod` have an existing __file__ ?""" 

+

105 mod__file__ = getattr(mod, '__file__', None) 

+

106 if mod__file__ is None: 

+

107 return False 

+

108 return os.path.exists(mod__file__) 

+

109 

+

110 

+

111class InOrOut(object): 

+

112 """Machinery for determining what files to measure.""" 

+

113 

+

114 def __init__(self, warn, debug): 

+

115 self.warn = warn 

+

116 self.debug = debug 

+

117 

+

118 # The matchers for should_trace. 

+

119 self.source_match = None 

+

120 self.source_pkgs_match = None 

+

121 self.pylib_paths = self.cover_paths = None 

+

122 self.pylib_match = self.cover_match = None 

+

123 self.include_match = self.omit_match = None 

+

124 self.plugins = [] 

+

125 self.disp_class = FileDisposition 

+

126 

+

127 # The source argument can be directories or package names. 

+

128 self.source = [] 

+

129 self.source_pkgs = [] 

+

130 self.source_pkgs_unmatched = [] 

+

131 self.omit = self.include = None 

+

132 

+

133 def configure(self, config): 

+

134 """Apply the configuration to get ready for decision-time.""" 

+

135 self.source_pkgs.extend(config.source_pkgs) 

+

136 for src in config.source or []: 

+

137 if os.path.isdir(src): 

+

138 self.source.append(canonical_filename(src)) 

+

139 else: 

+

140 self.source_pkgs.append(src) 

+

141 self.source_pkgs_unmatched = self.source_pkgs[:] 

+

142 

+

143 self.omit = prep_patterns(config.run_omit) 

+

144 self.include = prep_patterns(config.run_include) 

+

145 

+

146 # The directories for files considered "installed with the interpreter". 

+

147 self.pylib_paths = set() 

+

148 if not config.cover_pylib: 

+

149 # Look at where some standard modules are located. That's the 

+

150 # indication for "installed with the interpreter". In some 

+

151 # environments (virtualenv, for example), these modules may be 

+

152 # spread across a few locations. Look at all the candidate modules 

+

153 # we've imported, and take all the different ones. 

+

154 for m in (atexit, inspect, os, platform, _pypy_irc_topic, re, _structseq, traceback): 

+

155 if m is not None and hasattr(m, "__file__"): 

+

156 self.pylib_paths.add(canonical_path(m, directory=True)) 

+

157 

+

158 if _structseq and not hasattr(_structseq, '__file__'): 

+

159 # PyPy 2.4 has no __file__ in the builtin modules, but the code 

+

160 # objects still have the file names. So dig into one to find 

+

161 # the path to exclude. The "filename" might be synthetic, 

+

162 # don't be fooled by those. 

+

163 structseq_file = code_object(_structseq.structseq_new).co_filename 

+

164 if not structseq_file.startswith("<"): 164 ↛ 165line 164 didn't jump to line 165, because the condition on line 164 was never true

+

165 self.pylib_paths.add(canonical_path(structseq_file)) 

+

166 

+

167 # To avoid tracing the coverage.py code itself, we skip anything 

+

168 # located where we are. 

+

169 self.cover_paths = [canonical_path(__file__, directory=True)] 

+

170 if env.TESTING: 

+

171 # Don't include our own test code. 

+

172 self.cover_paths.append(os.path.join(self.cover_paths[0], "tests")) 

+

173 

+

174 # When testing, we use PyContracts, which should be considered 

+

175 # part of coverage.py, and it uses six. Exclude those directories 

+

176 # just as we exclude ourselves. 

+

177 import contracts 

+

178 import six 

+

179 for mod in [contracts, six]: 

+

180 self.cover_paths.append(canonical_path(mod)) 

+

181 

+

182 def debug(msg): 

+

183 if self.debug: 

+

184 self.debug.write(msg) 

+

185 

+

186 # Create the matchers we need for should_trace 

+

187 if self.source or self.source_pkgs: 

+

188 against = [] 

+

189 if self.source: 

+

190 self.source_match = TreeMatcher(self.source) 

+

191 against.append("trees {!r}".format(self.source_match)) 

+

192 if self.source_pkgs: 

+

193 self.source_pkgs_match = ModuleMatcher(self.source_pkgs) 

+

194 against.append("modules {!r}".format(self.source_pkgs_match)) 

+

195 debug("Source matching against " + " and ".join(against)) 

+

196 else: 

+

197 if self.cover_paths: 197 ↛ 200line 197 didn't jump to line 200, because the condition on line 197 was never false

+

198 self.cover_match = TreeMatcher(self.cover_paths) 

+

199 debug("Coverage code matching: {!r}".format(self.cover_match)) 

+

200 if self.pylib_paths: 

+

201 self.pylib_match = TreeMatcher(self.pylib_paths) 

+

202 debug("Python stdlib matching: {!r}".format(self.pylib_match)) 

+

203 if self.include: 

+

204 self.include_match = FnmatchMatcher(self.include) 

+

205 debug("Include matching: {!r}".format(self.include_match)) 

+

206 if self.omit: 

+

207 self.omit_match = FnmatchMatcher(self.omit) 

+

208 debug("Omit matching: {!r}".format(self.omit_match)) 

+

209 

+

210 def should_trace(self, filename, frame=None): 

+

211 """Decide whether to trace execution in `filename`, with a reason. 

+

212 

+

213 This function is called from the trace function. As each new file name 

+

214 is encountered, this function determines whether it is traced or not. 

+

215 

+

216 Returns a FileDisposition object. 

+

217 

+

218 """ 

+

219 original_filename = filename 

+

220 disp = disposition_init(self.disp_class, filename) 

+

221 

+

222 def nope(disp, reason): 

+

223 """Simple helper to make it easy to return NO.""" 

+

224 disp.trace = False 

+

225 disp.reason = reason 

+

226 return disp 

+

227 

+

228 if frame is not None: 228 ↛ 235line 228 didn't jump to line 235, because the condition on line 228 was never true

+

229 # Compiled Python files have two file names: frame.f_code.co_filename is 

+

230 # the file name at the time the .pyc was compiled. The second name is 

+

231 # __file__, which is where the .pyc was actually loaded from. Since 

+

232 # .pyc files can be moved after compilation (for example, by being 

+

233 # installed), we look for __file__ in the frame and prefer it to the 

+

234 # co_filename value. 

+

235 dunder_file = frame.f_globals and frame.f_globals.get('__file__') 

+

236 if dunder_file: 

+

237 filename = source_for_file(dunder_file) 

+

238 if original_filename and not original_filename.startswith('<'): 

+

239 orig = os.path.basename(original_filename) 

+

240 if orig != os.path.basename(filename): 

+

241 # Files shouldn't be renamed when moved. This happens when 

+

242 # exec'ing code. If it seems like something is wrong with 

+

243 # the frame's file name, then just use the original. 

+

244 filename = original_filename 

+

245 

+

246 if not filename: 246 ↛ 248line 246 didn't jump to line 248, because the condition on line 246 was never true

+

247 # Empty string is pretty useless. 

+

248 return nope(disp, "empty string isn't a file name") 

+

249 

+

250 if filename.startswith('memory:'): 250 ↛ 251line 250 didn't jump to line 251, because the condition on line 250 was never true

+

251 return nope(disp, "memory isn't traceable") 

+

252 

+

253 if filename.startswith('<'): 253 ↛ 258line 253 didn't jump to line 258, because the condition on line 253 was never true

+

254 # Lots of non-file execution is represented with artificial 

+

255 # file names like "<string>", "<doctest readme.txt[0]>", or 

+

256 # "<exec_function>". Don't ever trace these executions, since we 

+

257 # can't do anything with the data later anyway. 

+

258 return nope(disp, "not a real file name") 

+

259 

+

260 # pyexpat does a dumb thing, calling the trace function explicitly from 

+

261 # C code with a C file name. 

+

262 if re.search(r"[/\\]Modules[/\\]pyexpat.c", filename): 262 ↛ 263line 262 didn't jump to line 263, because the condition on line 262 was never true

+

263 return nope(disp, "pyexpat lies about itself") 

+

264 

+

265 # Jython reports the .class file to the tracer, use the source file. 

+

266 if filename.endswith("$py.class"): 266 ↛ 267line 266 didn't jump to line 267, because the condition on line 266 was never true

+

267 filename = filename[:-9] + ".py" 

+

268 

+

269 canonical = canonical_filename(filename) 

+

270 disp.canonical_filename = canonical 

+

271 

+

272 # Try the plugins, see if they have an opinion about the file. 

+

273 plugin = None 

+

274 for plugin in self.plugins.file_tracers: 274 ↛ 275line 274 didn't jump to line 275, because the loop on line 274 never started

+

275 if not plugin._coverage_enabled: 

+

276 continue 

+

277 

+

278 try: 

+

279 file_tracer = plugin.file_tracer(canonical) 

+

280 if file_tracer is not None: 

+

281 file_tracer._coverage_plugin = plugin 

+

282 disp.trace = True 

+

283 disp.file_tracer = file_tracer 

+

284 if file_tracer.has_dynamic_source_filename(): 

+

285 disp.has_dynamic_filename = True 

+

286 else: 

+

287 disp.source_filename = canonical_filename( 

+

288 file_tracer.source_filename() 

+

289 ) 

+

290 break 

+

291 except Exception: 

+

292 self.warn( 

+

293 "Disabling plug-in %r due to an exception:" % (plugin._coverage_plugin_name) 

+

294 ) 

+

295 traceback.print_exc() 

+

296 plugin._coverage_enabled = False 

+

297 continue 

+

298 else: 

+

299 # No plugin wanted it: it's Python. 

+

300 disp.trace = True 

+

301 disp.source_filename = canonical 

+

302 

+

303 if not disp.has_dynamic_filename: 303 ↛ 313line 303 didn't jump to line 313, because the condition on line 303 was never false

+

304 if not disp.source_filename: 304 ↛ 305line 304 didn't jump to line 305, because the condition on line 304 was never true

+

305 raise CoverageException( 

+

306 "Plugin %r didn't set source_filename for %r" % 

+

307 (plugin, disp.original_filename) 

+

308 ) 

+

309 reason = self.check_include_omit_etc(disp.source_filename, frame) 

+

310 if reason: 

+

311 nope(disp, reason) 

+

312 

+

313 return disp 

+

314 

+

315 def check_include_omit_etc(self, filename, frame): 

+

316 """Check a file name against the include, omit, etc, rules. 

+

317 

+

318 Returns a string or None. String means, don't trace, and is the reason 

+

319 why. None means no reason found to not trace. 

+

320 

+

321 """ 

+

322 modulename = name_for_module(filename, frame) 

+

323 

+

324 # If the user specified source or include, then that's authoritative 

+

325 # about the outer bound of what to measure and we don't have to apply 

+

326 # any canned exclusions. If they didn't, then we have to exclude the 

+

327 # stdlib and coverage.py directories. 

+

328 if self.source_match or self.source_pkgs_match: 

+

329 extra = "" 

+

330 ok = False 

+

331 if self.source_pkgs_match: 

+

332 if self.source_pkgs_match.match(modulename): 332 ↛ 333line 332 didn't jump to line 333, because the condition on line 332 was never true

+

333 ok = True 

+

334 if modulename in self.source_pkgs_unmatched: 

+

335 self.source_pkgs_unmatched.remove(modulename) 

+

336 else: 

+

337 extra = "module {!r} ".format(modulename) 

+

338 if not ok and self.source_match: 

+

339 if self.source_match.match(filename): 339 ↛ 340line 339 didn't jump to line 340, because the condition on line 339 was never true

+

340 ok = True 

+

341 if not ok: 341 ↛ 358line 341 didn't jump to line 358, because the condition on line 341 was never false

+

342 return extra + "falls outside the --source spec" 

+

343 elif self.include_match: 343 ↛ 349line 343 didn't jump to line 349, because the condition on line 343 was never false

+

344 if not self.include_match.match(filename): 

+

345 return "falls outside the --include trees" 

+

346 else: 

+

347 # If we aren't supposed to trace installed code, then check if this 

+

348 # is near the Python standard library and skip it if so. 

+

349 if self.pylib_match and self.pylib_match.match(filename): 

+

350 return "is in the stdlib" 

+

351 

+

352 # We exclude the coverage.py code itself, since a little of it 

+

353 # will be measured otherwise. 

+

354 if self.cover_match and self.cover_match.match(filename): 

+

355 return "is part of coverage.py" 

+

356 

+

357 # Check the file against the omit pattern. 

+

358 if self.omit_match and self.omit_match.match(filename): 358 ↛ 359line 358 didn't jump to line 359, because the condition on line 358 was never true

+

359 return "is inside an --omit pattern" 

+

360 

+

361 # No point tracing a file we can't later write to SQLite. 

+

362 try: 

+

363 filename.encode("utf8") 

+

364 except UnicodeEncodeError: 

+

365 return "non-encodable filename" 

+

366 

+

367 # No reason found to skip this file. 

+

368 return None 

+

369 

+

370 def warn_conflicting_settings(self): 

+

371 """Warn if there are settings that conflict.""" 

+

372 if self.include: 

+

373 if self.source or self.source_pkgs: 

+

374 self.warn("--include is ignored because --source is set", slug="include-ignored") 

+

375 

+

376 def warn_already_imported_files(self): 

+

377 """Warn if files have already been imported that we will be measuring.""" 

+

378 if self.include or self.source or self.source_pkgs: 

+

379 warned = set() 

+

380 for mod in list(sys.modules.values()): 

+

381 filename = getattr(mod, "__file__", None) 

+

382 if filename is None: 

+

383 continue 

+

384 if filename in warned: 384 ↛ 385line 384 didn't jump to line 385, because the condition on line 384 was never true

+

385 continue 

+

386 

+

387 disp = self.should_trace(filename) 

+

388 if disp.trace: 

+

389 msg = "Already imported a file that will be measured: {}".format(filename) 

+

390 self.warn(msg, slug="already-imported") 

+

391 warned.add(filename) 

+

392 

+

393 def warn_unimported_source(self): 

+

394 """Warn about source packages that were of interest, but never traced.""" 

+

395 for pkg in self.source_pkgs_unmatched: 

+

396 self._warn_about_unmeasured_code(pkg) 

+

397 

+

398 def _warn_about_unmeasured_code(self, pkg): 

+

399 """Warn about a package or module that we never traced. 

+

400 

+

401 `pkg` is a string, the name of the package or module. 

+

402 

+

403 """ 

+

404 mod = sys.modules.get(pkg) 

+

405 if mod is None: 

+

406 self.warn("Module %s was never imported." % pkg, slug="module-not-imported") 

+

407 return 

+

408 

+

409 if module_is_namespace(mod): 

+

410 # A namespace package. It's OK for this not to have been traced, 

+

411 # since there is no code directly in it. 

+

412 return 

+

413 

+

414 if not module_has_file(mod): 

+

415 self.warn("Module %s has no Python source." % pkg, slug="module-not-python") 

+

416 return 

+

417 

+

418 # The module was in sys.modules, and seems like a module with code, but 

+

419 # we never measured it. I guess that means it was imported before 

+

420 # coverage even started. 

+

421 self.warn( 

+

422 "Module %s was previously imported, but not measured" % pkg, 

+

423 slug="module-not-measured", 

+

424 ) 

+

425 

+

426 def find_possibly_unexecuted_files(self): 

+

427 """Find files in the areas of interest that might be untraced. 

+

428 

+

429 Yields pairs: file path, and responsible plug-in name. 

+

430 """ 

+

431 for pkg in self.source_pkgs: 

+

432 if (not pkg in sys.modules or 

+

433 not module_has_file(sys.modules[pkg])): 

+

434 continue 

+

435 pkg_file = source_for_file(sys.modules[pkg].__file__) 

+

436 for ret in self._find_executable_files(canonical_path(pkg_file)): 

+

437 yield ret 

+

438 

+

439 for src in self.source: 

+

440 for ret in self._find_executable_files(src): 

+

441 yield ret 

+

442 

+

443 def _find_plugin_files(self, src_dir): 

+

444 """Get executable files from the plugins.""" 

+

445 for plugin in self.plugins.file_tracers: 

+

446 for x_file in plugin.find_executable_files(src_dir): 

+

447 yield x_file, plugin._coverage_plugin_name 

+

448 

+

449 def _find_executable_files(self, src_dir): 

+

450 """Find executable files in `src_dir`. 

+

451 

+

452 Search for files in `src_dir` that can be executed because they 

+

453 are probably importable. Don't include ones that have been omitted 

+

454 by the configuration. 

+

455 

+

456 Yield the file path, and the plugin name that handles the file. 

+

457 

+

458 """ 

+

459 py_files = ((py_file, None) for py_file in find_python_files(src_dir)) 

+

460 plugin_files = self._find_plugin_files(src_dir) 

+

461 

+

462 for file_path, plugin_name in itertools.chain(py_files, plugin_files): 

+

463 file_path = canonical_filename(file_path) 

+

464 if self.omit_match and self.omit_match.match(file_path): 

+

465 # Turns out this file was omitted, so don't pull it back 

+

466 # in as unexecuted. 

+

467 continue 

+

468 yield file_path, plugin_name 

+

469 

+

470 def sys_info(self): 

+

471 """Our information for Coverage.sys_info. 

+

472 

+

473 Returns a list of (key, value) pairs. 

+

474 """ 

+

475 info = [ 

+

476 ('cover_paths', self.cover_paths), 

+

477 ('pylib_paths', self.pylib_paths), 

+

478 ] 

+

479 

+

480 matcher_names = [ 

+

481 'source_match', 'source_pkgs_match', 

+

482 'include_match', 'omit_match', 

+

483 'cover_match', 'pylib_match', 

+

484 ] 

+

485 

+

486 for matcher_name in matcher_names: 

+

487 matcher = getattr(self, matcher_name) 

+

488 if matcher: 

+

489 matcher_info = matcher.info() 

+

490 else: 

+

491 matcher_info = '-none-' 

+

492 info.append((matcher_name, matcher_info)) 

+

493 

+

494 return info 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_jsonreport_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_jsonreport_py.html new file mode 100644 index 000000000..f5e6a8663 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_jsonreport_py.html @@ -0,0 +1,171 @@ + + + + + + Coverage for coverage/jsonreport.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# coding: utf-8 

+

2# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

3# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

4 

+

5"""Json reporting for coverage.py""" 

+

6import datetime 

+

7import json 

+

8import sys 

+

9 

+

10from coverage import __version__ 

+

11from coverage.report import get_analysis_to_report 

+

12from coverage.results import Numbers 

+

13 

+

14 

+

15class JsonReporter(object): 

+

16 """A reporter for writing JSON coverage results.""" 

+

17 

+

18 def __init__(self, coverage): 

+

19 self.coverage = coverage 

+

20 self.config = self.coverage.config 

+

21 self.total = Numbers() 

+

22 self.report_data = {} 

+

23 

+

24 def report(self, morfs, outfile=None): 

+

25 """Generate a json report for `morfs`. 

+

26 

+

27 `morfs` is a list of modules or file names. 

+

28 

+

29 `outfile` is a file object to write the json to 

+

30 

+

31 """ 

+

32 outfile = outfile or sys.stdout 

+

33 coverage_data = self.coverage.get_data() 

+

34 coverage_data.set_query_contexts(self.config.report_contexts) 

+

35 self.report_data["meta"] = { 

+

36 "version": __version__, 

+

37 "timestamp": datetime.datetime.now().isoformat(), 

+

38 "branch_coverage": coverage_data.has_arcs(), 

+

39 "show_contexts": self.config.json_show_contexts, 

+

40 } 

+

41 

+

42 measured_files = {} 

+

43 for file_reporter, analysis in get_analysis_to_report(self.coverage, morfs): 

+

44 measured_files[file_reporter.relative_filename()] = self.report_one_file( 

+

45 coverage_data, 

+

46 analysis 

+

47 ) 

+

48 

+

49 self.report_data["files"] = measured_files 

+

50 

+

51 self.report_data["totals"] = { 

+

52 'covered_lines': self.total.n_executed, 

+

53 'num_statements': self.total.n_statements, 

+

54 'percent_covered': self.total.pc_covered, 

+

55 'percent_covered_display': self.total.pc_covered_str, 

+

56 'missing_lines': self.total.n_missing, 

+

57 'excluded_lines': self.total.n_excluded, 

+

58 } 

+

59 

+

60 if coverage_data.has_arcs(): 

+

61 self.report_data["totals"].update({ 

+

62 'num_branches': self.total.n_branches, 

+

63 'num_partial_branches': self.total.n_partial_branches, 

+

64 'covered_branches': self.total.n_executed_branches, 

+

65 'missing_branches': self.total.n_missing_branches, 

+

66 }) 

+

67 

+

68 json.dump( 

+

69 self.report_data, 

+

70 outfile, 

+

71 indent=4 if self.config.json_pretty_print else None 

+

72 ) 

+

73 

+

74 return self.total.n_statements and self.total.pc_covered 

+

75 

+

76 def report_one_file(self, coverage_data, analysis): 

+

77 """Extract the relevant report data for a single file""" 

+

78 nums = analysis.numbers 

+

79 self.total += nums 

+

80 summary = { 

+

81 'covered_lines': nums.n_executed, 

+

82 'num_statements': nums.n_statements, 

+

83 'percent_covered': nums.pc_covered, 

+

84 'percent_covered_display': nums.pc_covered_str, 

+

85 'missing_lines': nums.n_missing, 

+

86 'excluded_lines': nums.n_excluded, 

+

87 } 

+

88 reported_file = { 

+

89 'executed_lines': sorted(analysis.executed), 

+

90 'summary': summary, 

+

91 'missing_lines': sorted(analysis.missing), 

+

92 'excluded_lines': sorted(analysis.excluded) 

+

93 } 

+

94 if self.config.json_show_contexts: 

+

95 reported_file['contexts'] = analysis.data.contexts_by_lineno( 

+

96 analysis.filename, 

+

97 ) 

+

98 if coverage_data.has_arcs(): 

+

99 reported_file['summary'].update({ 

+

100 'num_branches': nums.n_branches, 

+

101 'num_partial_branches': nums.n_partial_branches, 

+

102 'covered_branches': nums.n_executed_branches, 

+

103 'missing_branches': nums.n_missing_branches, 

+

104 }) 

+

105 return reported_file 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_misc_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_misc_py.html new file mode 100644 index 000000000..694f49686 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_misc_py.html @@ -0,0 +1,427 @@ + + + + + + Coverage for coverage/misc.py: 96.460% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Miscellaneous stuff for coverage.py.""" 

+

5 

+

6import errno 

+

7import hashlib 

+

8import inspect 

+

9import locale 

+

10import os 

+

11import os.path 

+

12import random 

+

13import re 

+

14import socket 

+

15import sys 

+

16import types 

+

17 

+

18from coverage import env 

+

19from coverage.backward import to_bytes, unicode_class 

+

20 

+

21ISOLATED_MODULES = {} 

+

22 

+

23 

+

24def isolate_module(mod): 

+

25 """Copy a module so that we are isolated from aggressive mocking. 

+

26 

+

27 If a test suite mocks os.path.exists (for example), and then we need to use 

+

28 it during the test, everything will get tangled up if we use their mock. 

+

29 Making a copy of the module when we import it will isolate coverage.py from 

+

30 those complications. 

+

31 """ 

+

32 if mod not in ISOLATED_MODULES: 

+

33 new_mod = types.ModuleType(mod.__name__) 

+

34 ISOLATED_MODULES[mod] = new_mod 

+

35 for name in dir(mod): 

+

36 value = getattr(mod, name) 

+

37 if isinstance(value, types.ModuleType): 

+

38 value = isolate_module(value) 

+

39 setattr(new_mod, name, value) 

+

40 return ISOLATED_MODULES[mod] 

+

41 

+

42os = isolate_module(os) 

+

43 

+

44 

+

45def dummy_decorator_with_args(*args_unused, **kwargs_unused): 

+

46 """Dummy no-op implementation of a decorator with arguments.""" 

+

47 def _decorator(func): 

+

48 return func 

+

49 return _decorator 

+

50 

+

51 

+

52# Environment COVERAGE_NO_CONTRACTS=1 can turn off contracts while debugging 

+

53# tests to remove noise from stack traces. 

+

54# $set_env.py: COVERAGE_NO_CONTRACTS - Disable PyContracts to simplify stack traces. 

+

55USE_CONTRACTS = env.TESTING and not bool(int(os.environ.get("COVERAGE_NO_CONTRACTS", 0))) 

+

56 

+

57# Use PyContracts for assertion testing on parameters and returns, but only if 

+

58# we are running our own test suite. 

+

59if USE_CONTRACTS: 

+

60 from contracts import contract # pylint: disable=unused-import 

+

61 from contracts import new_contract as raw_new_contract 

+

62 

+

63 def new_contract(*args, **kwargs): 

+

64 """A proxy for contracts.new_contract that doesn't mind happening twice.""" 

+

65 try: 

+

66 raw_new_contract(*args, **kwargs) 

+

67 except ValueError: 

+

68 # During meta-coverage, this module is imported twice, and 

+

69 # PyContracts doesn't like redefining contracts. It's OK. 

+

70 pass 

+

71 

+

72 # Define contract words that PyContract doesn't have. 

+

73 new_contract('bytes', lambda v: isinstance(v, bytes)) 73 ↛ exitline 73 didn't run the lambda on line 73

+

74 if env.PY3: 

+

75 new_contract('unicode', lambda v: isinstance(v, unicode_class)) 75 ↛ exitline 75 didn't run the lambda on line 75

+

76 

+

77 def one_of(argnames): 

+

78 """Ensure that only one of the argnames is non-None.""" 

+

79 def _decorator(func): 

+

80 argnameset = {name.strip() for name in argnames.split(",")} 

+

81 def _wrapper(*args, **kwargs): 

+

82 vals = [kwargs.get(name) for name in argnameset] 

+

83 assert sum(val is not None for val in vals) == 1 

+

84 return func(*args, **kwargs) 

+

85 return _wrapper 

+

86 return _decorator 

+

87else: # pragma: not testing 

+

88 # We aren't using real PyContracts, so just define our decorators as 

+

89 # stunt-double no-ops. 

+

90 contract = dummy_decorator_with_args 

+

91 one_of = dummy_decorator_with_args 

+

92 

+

93 def new_contract(*args_unused, **kwargs_unused): 

+

94 """Dummy no-op implementation of `new_contract`.""" 

+

95 pass 

+

96 

+

97 

+

98def nice_pair(pair): 

+

99 """Make a nice string representation of a pair of numbers. 

+

100 

+

101 If the numbers are equal, just return the number, otherwise return the pair 

+

102 with a dash between them, indicating the range. 

+

103 

+

104 """ 

+

105 start, end = pair 

+

106 if start == end: 

+

107 return "%d" % start 

+

108 else: 

+

109 return "%d-%d" % (start, end) 

+

110 

+

111 

+

112def expensive(fn): 

+

113 """A decorator to indicate that a method shouldn't be called more than once. 

+

114 

+

115 Normally, this does nothing. During testing, this raises an exception if 

+

116 called more than once. 

+

117 

+

118 """ 

+

119 if env.TESTING: 

+

120 attr = "_once_" + fn.__name__ 

+

121 

+

122 def _wrapper(self): 

+

123 if hasattr(self, attr): 

+

124 raise AssertionError("Shouldn't have called %s more than once" % fn.__name__) 

+

125 setattr(self, attr, True) 

+

126 return fn(self) 

+

127 return _wrapper 

+

128 else: 

+

129 return fn # pragma: not testing 

+

130 

+

131 

+

132def bool_or_none(b): 

+

133 """Return bool(b), but preserve None.""" 

+

134 if b is None: 

+

135 return None 

+

136 else: 

+

137 return bool(b) 

+

138 

+

139 

+

140def join_regex(regexes): 

+

141 """Combine a list of regexes into one that matches any of them.""" 

+

142 return "|".join("(?:%s)" % r for r in regexes) 

+

143 

+

144 

+

145def file_be_gone(path): 

+

146 """Remove a file, and don't get annoyed if it doesn't exist.""" 

+

147 try: 

+

148 os.remove(path) 

+

149 except OSError as e: 

+

150 if e.errno != errno.ENOENT: 

+

151 raise 

+

152 

+

153 

+

154def ensure_dir(directory): 

+

155 """Make sure the directory exists. 

+

156 

+

157 If `directory` is None or empty, do nothing. 

+

158 """ 

+

159 if directory and not os.path.isdir(directory): 

+

160 os.makedirs(directory) 

+

161 

+

162 

+

163def ensure_dir_for_file(path): 

+

164 """Make sure the directory for the path exists.""" 

+

165 ensure_dir(os.path.dirname(path)) 

+

166 

+

167 

+

168def output_encoding(outfile=None): 

+

169 """Determine the encoding to use for output written to `outfile` or stdout.""" 

+

170 if outfile is None: 170 ↛ 172line 170 didn't jump to line 172

+

171 outfile = sys.stdout 

+

172 encoding = ( 

+

173 getattr(outfile, "encoding", None) or 

+

174 getattr(sys.__stdout__, "encoding", None) or 

+

175 locale.getpreferredencoding() 

+

176 ) 

+

177 return encoding 

+

178 

+

179 

+

180def filename_suffix(suffix): 

+

181 """Compute a filename suffix for a data file. 

+

182 

+

183 If `suffix` is a string or None, simply return it. If `suffix` is True, 

+

184 then build a suffix incorporating the hostname, process id, and a random 

+

185 number. 

+

186 

+

187 Returns a string or None. 

+

188 

+

189 """ 

+

190 if suffix is True: 

+

191 # If data_suffix was a simple true value, then make a suffix with 

+

192 # plenty of distinguishing information. We do this here in 

+

193 # `save()` at the last minute so that the pid will be correct even 

+

194 # if the process forks. 

+

195 dice = random.Random(os.urandom(8)).randint(0, 999999) 

+

196 suffix = "%s.%s.%06d" % (socket.gethostname(), os.getpid(), dice) 

+

197 return suffix 

+

198 

+

199 

+

200class Hasher(object): 

+

201 """Hashes Python data into md5.""" 

+

202 def __init__(self): 

+

203 self.md5 = hashlib.md5() 

+

204 

+

205 def update(self, v): 

+

206 """Add `v` to the hash, recursively if needed.""" 

+

207 self.md5.update(to_bytes(str(type(v)))) 

+

208 if isinstance(v, unicode_class): 

+

209 self.md5.update(v.encode('utf8')) 

+

210 elif isinstance(v, bytes): 

+

211 self.md5.update(v) 

+

212 elif v is None: 

+

213 pass 

+

214 elif isinstance(v, (int, float)): 

+

215 self.md5.update(to_bytes(str(v))) 

+

216 elif isinstance(v, (tuple, list)): 

+

217 for e in v: 

+

218 self.update(e) 

+

219 elif isinstance(v, dict): 

+

220 keys = v.keys() 

+

221 for k in sorted(keys): 

+

222 self.update(k) 

+

223 self.update(v[k]) 

+

224 else: 

+

225 for k in dir(v): 

+

226 if k.startswith('__'): 

+

227 continue 

+

228 a = getattr(v, k) 

+

229 if inspect.isroutine(a): 

+

230 continue 

+

231 self.update(k) 

+

232 self.update(a) 

+

233 self.md5.update(b'.') 

+

234 

+

235 def hexdigest(self): 

+

236 """Retrieve the hex digest of the hash.""" 

+

237 return self.md5.hexdigest() 

+

238 

+

239 

+

240def _needs_to_implement(that, func_name): 

+

241 """Helper to raise NotImplementedError in interface stubs.""" 

+

242 if hasattr(that, "_coverage_plugin_name"): 242 ↛ 246line 242 didn't jump to line 246, because the condition on line 242 was never false

+

243 thing = "Plugin" 

+

244 name = that._coverage_plugin_name 

+

245 else: 

+

246 thing = "Class" 

+

247 klass = that.__class__ 

+

248 name = "{klass.__module__}.{klass.__name__}".format(klass=klass) 

+

249 

+

250 raise NotImplementedError( 

+

251 "{thing} {name!r} needs to implement {func_name}()".format( 

+

252 thing=thing, name=name, func_name=func_name 

+

253 ) 

+

254 ) 

+

255 

+

256 

+

257class DefaultValue(object): 

+

258 """A sentinel object to use for unusual default-value needs. 

+

259 

+

260 Construct with a string that will be used as the repr, for display in help 

+

261 and Sphinx output. 

+

262 

+

263 """ 

+

264 def __init__(self, display_as): 

+

265 self.display_as = display_as 

+

266 

+

267 def __repr__(self): 

+

268 return self.display_as 

+

269 

+

270 

+

271def substitute_variables(text, variables): 

+

272 """Substitute ``${VAR}`` variables in `text` with their values. 

+

273 

+

274 Variables in the text can take a number of shell-inspired forms:: 

+

275 

+

276 $VAR 

+

277 ${VAR} 

+

278 ${VAR?} strict: an error if VAR isn't defined. 

+

279 ${VAR-missing} defaulted: "missing" if VAR isn't defined. 

+

280 $$ just a dollar sign. 

+

281 

+

282 `variables` is a dictionary of variable values. 

+

283 

+

284 Returns the resulting text with values substituted. 

+

285 

+

286 """ 

+

287 dollar_pattern = r"""(?x) # Use extended regex syntax 

+

288 \$ # A dollar sign, 

+

289 (?: # then 

+

290 (?P<dollar>\$) | # a dollar sign, or 

+

291 (?P<word1>\w+) | # a plain word, or 

+

292 { # a {-wrapped 

+

293 (?P<word2>\w+) # word, 

+

294 (?: 

+

295 (?P<strict>\?) | # with a strict marker 

+

296 -(?P<defval>[^}]*) # or a default value 

+

297 )? # maybe. 

+

298 } 

+

299 ) 

+

300 """ 

+

301 

+

302 def dollar_replace(match): 

+

303 """Called for each $replacement.""" 

+

304 # Only one of the groups will have matched, just get its text. 

+

305 word = next(g for g in match.group('dollar', 'word1', 'word2') if g) 305 ↛ exitline 305 didn't finish the generator expression on line 305

+

306 if word == "$": 

+

307 return "$" 

+

308 elif word in variables: 

+

309 return variables[word] 

+

310 elif match.group('strict'): 

+

311 msg = "Variable {} is undefined: {!r}".format(word, text) 

+

312 raise CoverageException(msg) 

+

313 else: 

+

314 return match.group('defval') 

+

315 

+

316 text = re.sub(dollar_pattern, dollar_replace, text) 

+

317 return text 

+

318 

+

319 

+

320class BaseCoverageException(Exception): 

+

321 """The base of all Coverage exceptions.""" 

+

322 pass 

+

323 

+

324 

+

325class CoverageException(BaseCoverageException): 

+

326 """An exception raised by a coverage.py function.""" 

+

327 pass 

+

328 

+

329 

+

330class NoSource(CoverageException): 

+

331 """We couldn't find the source for a module.""" 

+

332 pass 

+

333 

+

334 

+

335class NoCode(NoSource): 

+

336 """We couldn't find any code at all.""" 

+

337 pass 

+

338 

+

339 

+

340class NotPython(CoverageException): 

+

341 """A source file turned out not to be parsable Python.""" 

+

342 pass 

+

343 

+

344 

+

345class ExceptionDuringRun(CoverageException): 

+

346 """An exception happened while running customer code. 

+

347 

+

348 Construct it with three arguments, the values from `sys.exc_info`. 

+

349 

+

350 """ 

+

351 pass 

+

352 

+

353 

+

354class StopEverything(BaseCoverageException): 

+

355 """An exception that means everything should stop. 

+

356 

+

357 The CoverageTest class converts these to SkipTest, so that when running 

+

358 tests, raising this exception will automatically skip the test. 

+

359 

+

360 """ 

+

361 pass 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_multiproc_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_multiproc_py.html new file mode 100644 index 000000000..e82489bda --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_multiproc_py.html @@ -0,0 +1,177 @@ + + + + + + Coverage for coverage/multiproc.py: 85.714% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Monkey-patching to add multiprocessing support for coverage.py""" 

+

5 

+

6import multiprocessing 

+

7import multiprocessing.process 

+

8import os 

+

9import os.path 

+

10import sys 

+

11import traceback 

+

12 

+

13from coverage import env 

+

14from coverage.misc import contract 

+

15 

+

16# An attribute that will be set on the module to indicate that it has been 

+

17# monkey-patched. 

+

18PATCHED_MARKER = "_coverage$patched" 

+

19 

+

20 

+

21if env.PYVERSION >= (3, 4): 

+

22 OriginalProcess = multiprocessing.process.BaseProcess 

+

23else: 

+

24 OriginalProcess = multiprocessing.Process 

+

25 

+

26original_bootstrap = OriginalProcess._bootstrap 

+

27 

+

28class ProcessWithCoverage(OriginalProcess): # pylint: disable=abstract-method 

+

29 """A replacement for multiprocess.Process that starts coverage.""" 

+

30 

+

31 def _bootstrap(self, *args, **kwargs): 

+

32 """Wrapper around _bootstrap to start coverage.""" 

+

33 try: 

+

34 from coverage import Coverage # avoid circular import 

+

35 cov = Coverage(data_suffix=True) 

+

36 cov._warn_preimported_source = False 

+

37 cov.start() 

+

38 debug = cov._debug 

+

39 if debug.should("multiproc"): 39 ↛ 40line 39 didn't jump to line 40, because the condition on line 39 was never true

+

40 debug.write("Calling multiprocessing bootstrap") 

+

41 except Exception: 

+

42 print("Exception during multiprocessing bootstrap init:") 

+

43 traceback.print_exc(file=sys.stdout) 

+

44 sys.stdout.flush() 

+

45 raise 

+

46 try: 

+

47 return original_bootstrap(self, *args, **kwargs) 

+

48 finally: 

+

49 if debug.should("multiproc"): 49 ↛ 50line 49 didn't jump to line 50, because the condition on line 49 was never true

+

50 debug.write("Finished multiprocessing bootstrap") 

+

51 cov.stop() 

+

52 cov.save() 

+

53 if debug.should("multiproc"): 53 ↛ 54line 53 didn't jump to line 54, because the condition on line 53 was never true

+

54 debug.write("Saved multiprocessing data") 

+

55 

+

56class Stowaway(object): 

+

57 """An object to pickle, so when it is unpickled, it can apply the monkey-patch.""" 

+

58 def __init__(self, rcfile): 

+

59 self.rcfile = rcfile 

+

60 

+

61 def __getstate__(self): 

+

62 return {'rcfile': self.rcfile} 

+

63 

+

64 def __setstate__(self, state): 

+

65 patch_multiprocessing(state['rcfile']) 

+

66 

+

67 

+

68@contract(rcfile=str) 

+

69def patch_multiprocessing(rcfile): 

+

70 """Monkey-patch the multiprocessing module. 

+

71 

+

72 This enables coverage measurement of processes started by multiprocessing. 

+

73 This involves aggressive monkey-patching. 

+

74 

+

75 `rcfile` is the path to the rcfile being used. 

+

76 

+

77 """ 

+

78 

+

79 if hasattr(multiprocessing, PATCHED_MARKER): 

+

80 return 

+

81 

+

82 if env.PYVERSION >= (3, 4): 

+

83 OriginalProcess._bootstrap = ProcessWithCoverage._bootstrap 

+

84 else: 

+

85 multiprocessing.Process = ProcessWithCoverage 

+

86 

+

87 # Set the value in ProcessWithCoverage that will be pickled into the child 

+

88 # process. 

+

89 os.environ["COVERAGE_RCFILE"] = os.path.abspath(rcfile) 

+

90 

+

91 # When spawning processes rather than forking them, we have no state in the 

+

92 # new process. We sneak in there with a Stowaway: we stuff one of our own 

+

93 # objects into the data that gets pickled and sent to the sub-process. When 

+

94 # the Stowaway is unpickled, it's __setstate__ method is called, which 

+

95 # re-applies the monkey-patch. 

+

96 # Windows only spawns, so this is needed to keep Windows working. 

+

97 try: 

+

98 from multiprocessing import spawn 

+

99 original_get_preparation_data = spawn.get_preparation_data 

+

100 except (ImportError, AttributeError): 

+

101 pass 

+

102 else: 

+

103 def get_preparation_data_with_stowaway(name): 

+

104 """Get the original preparation data, and also insert our stowaway.""" 

+

105 d = original_get_preparation_data(name) 

+

106 d['stowaway'] = Stowaway(rcfile) 

+

107 return d 

+

108 

+

109 spawn.get_preparation_data = get_preparation_data_with_stowaway 

+

110 

+

111 setattr(multiprocessing, PATCHED_MARKER, True) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_numbits_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_numbits_py.html new file mode 100644 index 000000000..79e745641 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_numbits_py.html @@ -0,0 +1,229 @@ + + + + + + Coverage for coverage/numbits.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4""" 

+

5Functions to manipulate packed binary representations of number sets. 

+

6 

+

7To save space, coverage stores sets of line numbers in SQLite using a packed 

+

8binary representation called a numbits. A numbits is a set of positive 

+

9integers. 

+

10 

+

11A numbits is stored as a blob in the database. The exact meaning of the bytes 

+

12in the blobs should be considered an implementation detail that might change in 

+

13the future. Use these functions to work with those binary blobs of data. 

+

14 

+

15""" 

+

16import json 

+

17 

+

18from coverage import env 

+

19from coverage.backward import byte_to_int, bytes_to_ints, binary_bytes, zip_longest 

+

20from coverage.misc import contract, new_contract 

+

21 

+

22if env.PY3: 

+

23 def _to_blob(b): 

+

24 """Convert a bytestring into a type SQLite will accept for a blob.""" 

+

25 return b 

+

26 

+

27 new_contract('blob', lambda v: isinstance(v, bytes)) 

+

28else: 

+

29 def _to_blob(b): 

+

30 """Convert a bytestring into a type SQLite will accept for a blob.""" 

+

31 return buffer(b) # pylint: disable=undefined-variable 

+

32 

+

33 new_contract('blob', lambda v: isinstance(v, buffer)) # pylint: disable=undefined-variable 

+

34 

+

35 

+

36@contract(nums='Iterable', returns='blob') 

+

37def nums_to_numbits(nums): 

+

38 """Convert `nums` into a numbits. 

+

39 

+

40 Arguments: 

+

41 nums: a reusable iterable of integers, the line numbers to store. 

+

42 

+

43 Returns: 

+

44 A binary blob. 

+

45 """ 

+

46 try: 

+

47 nbytes = max(nums) // 8 + 1 

+

48 except ValueError: 

+

49 # nums was empty. 

+

50 return _to_blob(b'') 

+

51 b = bytearray(nbytes) 

+

52 for num in nums: 

+

53 b[num//8] |= 1 << num % 8 

+

54 return _to_blob(bytes(b)) 

+

55 

+

56 

+

57@contract(numbits='blob', returns='list[int]') 

+

58def numbits_to_nums(numbits): 

+

59 """Convert a numbits into a list of numbers. 

+

60 

+

61 Arguments: 

+

62 numbits: a binary blob, the packed number set. 

+

63 

+

64 Returns: 

+

65 A list of ints. 

+

66 

+

67 When registered as a SQLite function by :func:`register_sqlite_functions`, 

+

68 this returns a string, a JSON-encoded list of ints. 

+

69 

+

70 """ 

+

71 nums = [] 

+

72 for byte_i, byte in enumerate(bytes_to_ints(numbits)): 

+

73 for bit_i in range(8): 

+

74 if (byte & (1 << bit_i)): 

+

75 nums.append(byte_i * 8 + bit_i) 

+

76 return nums 

+

77 

+

78 

+

79@contract(numbits1='blob', numbits2='blob', returns='blob') 

+

80def numbits_union(numbits1, numbits2): 

+

81 """Compute the union of two numbits. 

+

82 

+

83 Returns: 

+

84 A new numbits, the union of `numbits1` and `numbits2`. 

+

85 """ 

+

86 byte_pairs = zip_longest(bytes_to_ints(numbits1), bytes_to_ints(numbits2), fillvalue=0) 

+

87 return _to_blob(binary_bytes(b1 | b2 for b1, b2 in byte_pairs)) 

+

88 

+

89 

+

90@contract(numbits1='blob', numbits2='blob', returns='blob') 

+

91def numbits_intersection(numbits1, numbits2): 

+

92 """Compute the intersection of two numbits. 

+

93 

+

94 Returns: 

+

95 A new numbits, the intersection `numbits1` and `numbits2`. 

+

96 """ 

+

97 byte_pairs = zip_longest(bytes_to_ints(numbits1), bytes_to_ints(numbits2), fillvalue=0) 

+

98 intersection_bytes = binary_bytes(b1 & b2 for b1, b2 in byte_pairs) 

+

99 return _to_blob(intersection_bytes.rstrip(b'\0')) 

+

100 

+

101 

+

102@contract(numbits1='blob', numbits2='blob', returns='bool') 

+

103def numbits_any_intersection(numbits1, numbits2): 

+

104 """Is there any number that appears in both numbits? 

+

105 

+

106 Determine whether two number sets have a non-empty intersection. This is 

+

107 faster than computing the intersection. 

+

108 

+

109 Returns: 

+

110 A bool, True if there is any number in both `numbits1` and `numbits2`. 

+

111 """ 

+

112 byte_pairs = zip_longest(bytes_to_ints(numbits1), bytes_to_ints(numbits2), fillvalue=0) 

+

113 return any(b1 & b2 for b1, b2 in byte_pairs) 

+

114 

+

115 

+

116@contract(num='int', numbits='blob', returns='bool') 

+

117def num_in_numbits(num, numbits): 

+

118 """Does the integer `num` appear in `numbits`? 

+

119 

+

120 Returns: 

+

121 A bool, True if `num` is a member of `numbits`. 

+

122 """ 

+

123 nbyte, nbit = divmod(num, 8) 

+

124 if nbyte >= len(numbits): 

+

125 return False 

+

126 return bool(byte_to_int(numbits[nbyte]) & (1 << nbit)) 

+

127 

+

128 

+

129def register_sqlite_functions(connection): 

+

130 """ 

+

131 Define numbits functions in a SQLite connection. 

+

132 

+

133 This defines these functions for use in SQLite statements: 

+

134 

+

135 * :func:`numbits_union` 

+

136 * :func:`numbits_intersection` 

+

137 * :func:`numbits_any_intersection` 

+

138 * :func:`num_in_numbits` 

+

139 * :func:`numbits_to_nums` 

+

140 

+

141 `connection` is a :class:`sqlite3.Connection <python:sqlite3.Connection>` 

+

142 object. After creating the connection, pass it to this function to 

+

143 register the numbits functions. Then you can use numbits functions in your 

+

144 queries:: 

+

145 

+

146 import sqlite3 

+

147 from coverage.numbits import register_sqlite_functions 

+

148 

+

149 conn = sqlite3.connect('example.db') 

+

150 register_sqlite_functions(conn) 

+

151 c = conn.cursor() 

+

152 # Kind of a nonsense query: find all the files and contexts that 

+

153 # executed line 47 in any file: 

+

154 c.execute( 

+

155 "select file_id, context_id from line_bits where num_in_numbits(?, numbits)", 

+

156 (47,) 

+

157 ) 

+

158 """ 

+

159 connection.create_function("numbits_union", 2, numbits_union) 

+

160 connection.create_function("numbits_intersection", 2, numbits_intersection) 

+

161 connection.create_function("numbits_any_intersection", 2, numbits_any_intersection) 

+

162 connection.create_function("num_in_numbits", 2, num_in_numbits) 

+

163 connection.create_function("numbits_to_nums", 1, lambda b: json.dumps(numbits_to_nums(b))) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_parser_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_parser_py.html new file mode 100644 index 000000000..73aac52cc --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_parser_py.html @@ -0,0 +1,1334 @@ + + + + + + Coverage for coverage/parser.py: 98.558% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Code parsing for coverage.py.""" 

+

5 

+

6import ast 

+

7import collections 

+

8import os 

+

9import re 

+

10import token 

+

11import tokenize 

+

12 

+

13from coverage import env 

+

14from coverage.backward import range # pylint: disable=redefined-builtin 

+

15from coverage.backward import bytes_to_ints, string_class 

+

16from coverage.bytecode import code_objects 

+

17from coverage.debug import short_stack 

+

18from coverage.misc import contract, join_regex, new_contract, nice_pair, one_of 

+

19from coverage.misc import NoSource, NotPython, StopEverything 

+

20from coverage.phystokens import compile_unicode, generate_tokens, neuter_encoding_declaration 

+

21 

+

22 

+

23class PythonParser(object): 

+

24 """Parse code to find executable lines, excluded lines, etc. 

+

25 

+

26 This information is all based on static analysis: no code execution is 

+

27 involved. 

+

28 

+

29 """ 

+

30 @contract(text='unicode|None') 

+

31 def __init__(self, text=None, filename=None, exclude=None): 

+

32 """ 

+

33 Source can be provided as `text`, the text itself, or `filename`, from 

+

34 which the text will be read. Excluded lines are those that match 

+

35 `exclude`, a regex. 

+

36 

+

37 """ 

+

38 assert text or filename, "PythonParser needs either text or filename" 

+

39 self.filename = filename or "<code>" 

+

40 self.text = text 

+

41 if not self.text: 

+

42 from coverage.python import get_python_source 

+

43 try: 

+

44 self.text = get_python_source(self.filename) 

+

45 except IOError as err: 

+

46 raise NoSource( 

+

47 "No source for code: '%s': %s" % (self.filename, err) 

+

48 ) 

+

49 

+

50 self.exclude = exclude 

+

51 

+

52 # The text lines of the parsed code. 

+

53 self.lines = self.text.split('\n') 

+

54 

+

55 # The normalized line numbers of the statements in the code. Exclusions 

+

56 # are taken into account, and statements are adjusted to their first 

+

57 # lines. 

+

58 self.statements = set() 

+

59 

+

60 # The normalized line numbers of the excluded lines in the code, 

+

61 # adjusted to their first lines. 

+

62 self.excluded = set() 

+

63 

+

64 # The raw_* attributes are only used in this class, and in 

+

65 # lab/parser.py to show how this class is working. 

+

66 

+

67 # The line numbers that start statements, as reported by the line 

+

68 # number table in the bytecode. 

+

69 self.raw_statements = set() 

+

70 

+

71 # The raw line numbers of excluded lines of code, as marked by pragmas. 

+

72 self.raw_excluded = set() 

+

73 

+

74 # The line numbers of class and function definitions. 

+

75 self.raw_classdefs = set() 

+

76 

+

77 # The line numbers of docstring lines. 

+

78 self.raw_docstrings = set() 

+

79 

+

80 # Internal detail, used by lab/parser.py. 

+

81 self.show_tokens = False 

+

82 

+

83 # A dict mapping line numbers to lexical statement starts for 

+

84 # multi-line statements. 

+

85 self._multiline = {} 

+

86 

+

87 # Lazily-created ByteParser, arc data, and missing arc descriptions. 

+

88 self._byte_parser = None 

+

89 self._all_arcs = None 

+

90 self._missing_arc_fragments = None 

+

91 

+

92 @property 

+

93 def byte_parser(self): 

+

94 """Create a ByteParser on demand.""" 

+

95 if not self._byte_parser: 95 ↛ 97line 95 didn't jump to line 97, because the condition on line 95 was never false

+

96 self._byte_parser = ByteParser(self.text, filename=self.filename) 

+

97 return self._byte_parser 

+

98 

+

99 def lines_matching(self, *regexes): 

+

100 """Find the lines matching one of a list of regexes. 

+

101 

+

102 Returns a set of line numbers, the lines that contain a match for one 

+

103 of the regexes in `regexes`. The entire line needn't match, just a 

+

104 part of it. 

+

105 

+

106 """ 

+

107 combined = join_regex(regexes) 

+

108 if env.PY2: 

+

109 combined = combined.decode("utf8") 

+

110 regex_c = re.compile(combined) 

+

111 matches = set() 

+

112 for i, ltext in enumerate(self.lines, start=1): 

+

113 if regex_c.search(ltext): 

+

114 matches.add(i) 

+

115 return matches 

+

116 

+

117 def _raw_parse(self): 

+

118 """Parse the source to find the interesting facts about its lines. 

+

119 

+

120 A handful of attributes are updated. 

+

121 

+

122 """ 

+

123 # Find lines which match an exclusion pattern. 

+

124 if self.exclude: 

+

125 self.raw_excluded = self.lines_matching(self.exclude) 

+

126 

+

127 # Tokenize, to find excluded suites, to find docstrings, and to find 

+

128 # multi-line statements. 

+

129 indent = 0 

+

130 exclude_indent = 0 

+

131 excluding = False 

+

132 excluding_decorators = False 

+

133 prev_toktype = token.INDENT 

+

134 first_line = None 

+

135 empty = True 

+

136 first_on_line = True 

+

137 

+

138 tokgen = generate_tokens(self.text) 

+

139 for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen: 

+

140 if self.show_tokens: # pragma: debugging 

+

141 print("%10s %5s %-20r %r" % ( 

+

142 tokenize.tok_name.get(toktype, toktype), 

+

143 nice_pair((slineno, elineno)), ttext, ltext 

+

144 )) 

+

145 if toktype == token.INDENT: 

+

146 indent += 1 

+

147 elif toktype == token.DEDENT: 

+

148 indent -= 1 

+

149 elif toktype == token.NAME: 

+

150 if ttext == 'class': 

+

151 # Class definitions look like branches in the bytecode, so 

+

152 # we need to exclude them. The simplest way is to note the 

+

153 # lines with the 'class' keyword. 

+

154 self.raw_classdefs.add(slineno) 

+

155 elif toktype == token.OP: 

+

156 if ttext == ':': 

+

157 should_exclude = (elineno in self.raw_excluded) or excluding_decorators 

+

158 if not excluding and should_exclude: 

+

159 # Start excluding a suite. We trigger off of the colon 

+

160 # token so that the #pragma comment will be recognized on 

+

161 # the same line as the colon. 

+

162 self.raw_excluded.add(elineno) 

+

163 exclude_indent = indent 

+

164 excluding = True 

+

165 excluding_decorators = False 

+

166 elif ttext == '@' and first_on_line: 

+

167 # A decorator. 

+

168 if elineno in self.raw_excluded: 

+

169 excluding_decorators = True 

+

170 if excluding_decorators: 

+

171 self.raw_excluded.add(elineno) 

+

172 elif toktype == token.STRING and prev_toktype == token.INDENT: 

+

173 # Strings that are first on an indented line are docstrings. 

+

174 # (a trick from trace.py in the stdlib.) This works for 

+

175 # 99.9999% of cases. For the rest (!) see: 

+

176 # http://stackoverflow.com/questions/1769332/x/1769794#1769794 

+

177 self.raw_docstrings.update(range(slineno, elineno+1)) 

+

178 elif toktype == token.NEWLINE: 

+

179 if first_line is not None and elineno != first_line: 

+

180 # We're at the end of a line, and we've ended on a 

+

181 # different line than the first line of the statement, 

+

182 # so record a multi-line range. 

+

183 for l in range(first_line, elineno+1): 

+

184 self._multiline[l] = first_line 

+

185 first_line = None 

+

186 first_on_line = True 

+

187 

+

188 if ttext.strip() and toktype != tokenize.COMMENT: 

+

189 # A non-whitespace token. 

+

190 empty = False 

+

191 if first_line is None: 

+

192 # The token is not whitespace, and is the first in a 

+

193 # statement. 

+

194 first_line = slineno 

+

195 # Check whether to end an excluded suite. 

+

196 if excluding and indent <= exclude_indent: 

+

197 excluding = False 

+

198 if excluding: 

+

199 self.raw_excluded.add(elineno) 

+

200 first_on_line = False 

+

201 

+

202 prev_toktype = toktype 

+

203 

+

204 # Find the starts of the executable statements. 

+

205 if not empty: 

+

206 self.raw_statements.update(self.byte_parser._find_statements()) 

+

207 

+

208 # The first line of modules can lie and say 1 always, even if the first 

+

209 # line of code is later. If so, map 1 to the actual first line of the 

+

210 # module. 

+

211 if env.PYBEHAVIOR.module_firstline_1 and self._multiline: 

+

212 self._multiline[1] = min(self.raw_statements) 

+

213 

+

214 def first_line(self, line): 

+

215 """Return the first line number of the statement including `line`.""" 

+

216 if line < 0: 

+

217 line = -self._multiline.get(-line, -line) 

+

218 else: 

+

219 line = self._multiline.get(line, line) 

+

220 return line 

+

221 

+

222 def first_lines(self, lines): 

+

223 """Map the line numbers in `lines` to the correct first line of the 

+

224 statement. 

+

225 

+

226 Returns a set of the first lines. 

+

227 

+

228 """ 

+

229 return {self.first_line(l) for l in lines} 

+

230 

+

231 def translate_lines(self, lines): 

+

232 """Implement `FileReporter.translate_lines`.""" 

+

233 return self.first_lines(lines) 

+

234 

+

235 def translate_arcs(self, arcs): 

+

236 """Implement `FileReporter.translate_arcs`.""" 

+

237 return [(self.first_line(a), self.first_line(b)) for (a, b) in arcs] 

+

238 

+

239 def parse_source(self): 

+

240 """Parse source text to find executable lines, excluded lines, etc. 

+

241 

+

242 Sets the .excluded and .statements attributes, normalized to the first 

+

243 line of multi-line statements. 

+

244 

+

245 """ 

+

246 try: 

+

247 self._raw_parse() 

+

248 except (tokenize.TokenError, IndentationError) as err: 

+

249 if hasattr(err, "lineno"): 

+

250 lineno = err.lineno # IndentationError 

+

251 else: 

+

252 lineno = err.args[1][0] # TokenError 

+

253 raise NotPython( 

+

254 u"Couldn't parse '%s' as Python source: '%s' at line %d" % ( 

+

255 self.filename, err.args[0], lineno 

+

256 ) 

+

257 ) 

+

258 

+

259 self.excluded = self.first_lines(self.raw_excluded) 

+

260 

+

261 ignore = self.excluded | self.raw_docstrings 

+

262 starts = self.raw_statements - ignore 

+

263 self.statements = self.first_lines(starts) - ignore 

+

264 

+

265 def arcs(self): 

+

266 """Get information about the arcs available in the code. 

+

267 

+

268 Returns a set of line number pairs. Line numbers have been normalized 

+

269 to the first line of multi-line statements. 

+

270 

+

271 """ 

+

272 if self._all_arcs is None: 

+

273 self._analyze_ast() 

+

274 return self._all_arcs 

+

275 

+

276 def _analyze_ast(self): 

+

277 """Run the AstArcAnalyzer and save its results. 

+

278 

+

279 `_all_arcs` is the set of arcs in the code. 

+

280 

+

281 """ 

+

282 aaa = AstArcAnalyzer(self.text, self.raw_statements, self._multiline) 

+

283 aaa.analyze() 

+

284 

+

285 self._all_arcs = set() 

+

286 for l1, l2 in aaa.arcs: 

+

287 fl1 = self.first_line(l1) 

+

288 fl2 = self.first_line(l2) 

+

289 if fl1 != fl2: 

+

290 self._all_arcs.add((fl1, fl2)) 

+

291 

+

292 self._missing_arc_fragments = aaa.missing_arc_fragments 

+

293 

+

294 def exit_counts(self): 

+

295 """Get a count of exits from that each line. 

+

296 

+

297 Excluded lines are excluded. 

+

298 

+

299 """ 

+

300 exit_counts = collections.defaultdict(int) 

+

301 for l1, l2 in self.arcs(): 

+

302 if l1 < 0: 

+

303 # Don't ever report -1 as a line number 

+

304 continue 

+

305 if l1 in self.excluded: 

+

306 # Don't report excluded lines as line numbers. 

+

307 continue 

+

308 if l2 in self.excluded: 

+

309 # Arcs to excluded lines shouldn't count. 

+

310 continue 

+

311 exit_counts[l1] += 1 

+

312 

+

313 # Class definitions have one extra exit, so remove one for each: 

+

314 for l in self.raw_classdefs: 

+

315 # Ensure key is there: class definitions can include excluded lines. 

+

316 if l in exit_counts: 

+

317 exit_counts[l] -= 1 

+

318 

+

319 return exit_counts 

+

320 

+

321 def missing_arc_description(self, start, end, executed_arcs=None): 

+

322 """Provide an English sentence describing a missing arc.""" 

+

323 if self._missing_arc_fragments is None: 

+

324 self._analyze_ast() 

+

325 

+

326 actual_start = start 

+

327 

+

328 if ( 

+

329 executed_arcs and 

+

330 end < 0 and end == -start and 

+

331 (end, start) not in executed_arcs and 

+

332 (end, start) in self._missing_arc_fragments 

+

333 ): 

+

334 # It's a one-line callable, and we never even started it, 

+

335 # and we have a message about not starting it. 

+

336 start, end = end, start 

+

337 

+

338 fragment_pairs = self._missing_arc_fragments.get((start, end), [(None, None)]) 

+

339 

+

340 msgs = [] 

+

341 for smsg, emsg in fragment_pairs: 

+

342 if emsg is None: 

+

343 if end < 0: 

+

344 # Hmm, maybe we have a one-line callable, let's check. 

+

345 if (-end, end) in self._missing_arc_fragments: 345 ↛ 347line 345 didn't jump to line 347, because the condition on line 345 was never false

+

346 return self.missing_arc_description(-end, end) 

+

347 emsg = "didn't jump to the function exit" 

+

348 else: 

+

349 emsg = "didn't jump to line {lineno}" 

+

350 emsg = emsg.format(lineno=end) 

+

351 

+

352 msg = "line {start} {emsg}".format(start=actual_start, emsg=emsg) 

+

353 if smsg is not None: 

+

354 msg += ", because {smsg}".format(smsg=smsg.format(lineno=actual_start)) 

+

355 

+

356 msgs.append(msg) 

+

357 

+

358 return " or ".join(msgs) 

+

359 

+

360 

+

361class ByteParser(object): 

+

362 """Parse bytecode to understand the structure of code.""" 

+

363 

+

364 @contract(text='unicode') 

+

365 def __init__(self, text, code=None, filename=None): 

+

366 self.text = text 

+

367 if code: 

+

368 self.code = code 

+

369 else: 

+

370 try: 

+

371 self.code = compile_unicode(text, filename, "exec") 

+

372 except SyntaxError as synerr: 

+

373 raise NotPython( 

+

374 u"Couldn't parse '%s' as Python source: '%s' at line %d" % ( 

+

375 filename, synerr.msg, synerr.lineno 

+

376 ) 

+

377 ) 

+

378 

+

379 # Alternative Python implementations don't always provide all the 

+

380 # attributes on code objects that we need to do the analysis. 

+

381 for attr in ['co_lnotab', 'co_firstlineno']: 

+

382 if not hasattr(self.code, attr): 

+

383 raise StopEverything( # pragma: only jython 

+

384 "This implementation of Python doesn't support code analysis.\n" 

+

385 "Run coverage.py under another Python for this command." 

+

386 ) 

+

387 

+

388 def child_parsers(self): 

+

389 """Iterate over all the code objects nested within this one. 

+

390 

+

391 The iteration includes `self` as its first value. 

+

392 

+

393 """ 

+

394 return (ByteParser(self.text, code=c) for c in code_objects(self.code)) 

+

395 

+

396 def _line_numbers(self): 

+

397 """Yield the line numbers possible in this code object. 

+

398 

+

399 Uses co_lnotab described in Python/compile.c to find the 

+

400 line numbers. Produces a sequence: l0, l1, ... 

+

401 """ 

+

402 if hasattr(self.code, "co_lines"): 

+

403 for _, _, line in self.code.co_lines(): 

+

404 if line is not None: 

+

405 yield line 

+

406 else: 

+

407 # Adapted from dis.py in the standard library. 

+

408 byte_increments = bytes_to_ints(self.code.co_lnotab[0::2]) 

+

409 line_increments = bytes_to_ints(self.code.co_lnotab[1::2]) 

+

410 

+

411 last_line_num = None 

+

412 line_num = self.code.co_firstlineno 

+

413 byte_num = 0 

+

414 for byte_incr, line_incr in zip(byte_increments, line_increments): 

+

415 if byte_incr: 

+

416 if line_num != last_line_num: 

+

417 yield line_num 

+

418 last_line_num = line_num 

+

419 byte_num += byte_incr 

+

420 if env.PYBEHAVIOR.negative_lnotab and line_incr >= 0x80: 

+

421 line_incr -= 0x100 

+

422 line_num += line_incr 

+

423 if line_num != last_line_num: 

+

424 yield line_num 

+

425 

+

426 def _find_statements(self): 

+

427 """Find the statements in `self.code`. 

+

428 

+

429 Produce a sequence of line numbers that start statements. Recurses 

+

430 into all code objects reachable from `self.code`. 

+

431 

+

432 """ 

+

433 for bp in self.child_parsers(): 

+

434 # Get all of the lineno information from this code. 

+

435 for l in bp._line_numbers(): 

+

436 yield l 

+

437 

+

438 

+

439# 

+

440# AST analysis 

+

441# 

+

442 

+

443class LoopBlock(object): 

+

444 """A block on the block stack representing a `for` or `while` loop.""" 

+

445 @contract(start=int) 

+

446 def __init__(self, start): 

+

447 # The line number where the loop starts. 

+

448 self.start = start 

+

449 # A set of ArcStarts, the arcs from break statements exiting this loop. 

+

450 self.break_exits = set() 

+

451 

+

452 

+

453class FunctionBlock(object): 

+

454 """A block on the block stack representing a function definition.""" 

+

455 @contract(start=int, name=str) 

+

456 def __init__(self, start, name): 

+

457 # The line number where the function starts. 

+

458 self.start = start 

+

459 # The name of the function. 

+

460 self.name = name 

+

461 

+

462 

+

463class TryBlock(object): 

+

464 """A block on the block stack representing a `try` block.""" 

+

465 @contract(handler_start='int|None', final_start='int|None') 

+

466 def __init__(self, handler_start, final_start): 

+

467 # The line number of the first "except" handler, if any. 

+

468 self.handler_start = handler_start 

+

469 # The line number of the "finally:" clause, if any. 

+

470 self.final_start = final_start 

+

471 

+

472 # The ArcStarts for breaks/continues/returns/raises inside the "try:" 

+

473 # that need to route through the "finally:" clause. 

+

474 self.break_from = set() 

+

475 self.continue_from = set() 

+

476 self.return_from = set() 

+

477 self.raise_from = set() 

+

478 

+

479 

+

480class ArcStart(collections.namedtuple("Arc", "lineno, cause")): 

+

481 """The information needed to start an arc. 

+

482 

+

483 `lineno` is the line number the arc starts from. 

+

484 

+

485 `cause` is an English text fragment used as the `startmsg` for 

+

486 AstArcAnalyzer.missing_arc_fragments. It will be used to describe why an 

+

487 arc wasn't executed, so should fit well into a sentence of the form, 

+

488 "Line 17 didn't run because {cause}." The fragment can include "{lineno}" 

+

489 to have `lineno` interpolated into it. 

+

490 

+

491 """ 

+

492 def __new__(cls, lineno, cause=None): 

+

493 return super(ArcStart, cls).__new__(cls, lineno, cause) 

+

494 

+

495 

+

496# Define contract words that PyContract doesn't have. 

+

497# ArcStarts is for a list or set of ArcStart's. 

+

498new_contract('ArcStarts', lambda seq: all(isinstance(x, ArcStart) for x in seq)) 

+

499 

+

500 

+

501# Turn on AST dumps with an environment variable. 

+

502# $set_env.py: COVERAGE_AST_DUMP - Dump the AST nodes when parsing code. 

+

503AST_DUMP = bool(int(os.environ.get("COVERAGE_AST_DUMP", 0))) 

+

504 

+

505class NodeList(object): 

+

506 """A synthetic fictitious node, containing a sequence of nodes. 

+

507 

+

508 This is used when collapsing optimized if-statements, to represent the 

+

509 unconditional execution of one of the clauses. 

+

510 

+

511 """ 

+

512 def __init__(self, body): 

+

513 self.body = body 

+

514 self.lineno = body[0].lineno 

+

515 

+

516 

+

517# TODO: some add_arcs methods here don't add arcs, they return them. Rename them. 

+

518# TODO: the cause messages have too many commas. 

+

519# TODO: Shouldn't the cause messages join with "and" instead of "or"? 

+

520 

+

521class AstArcAnalyzer(object): 

+

522 """Analyze source text with an AST to find executable code paths.""" 

+

523 

+

524 @contract(text='unicode', statements=set) 

+

525 def __init__(self, text, statements, multiline): 

+

526 self.root_node = ast.parse(neuter_encoding_declaration(text)) 

+

527 # TODO: I think this is happening in too many places. 

+

528 self.statements = {multiline.get(l, l) for l in statements} 

+

529 self.multiline = multiline 

+

530 

+

531 if AST_DUMP: # pragma: debugging 

+

532 # Dump the AST so that failing tests have helpful output. 

+

533 print("Statements: {}".format(self.statements)) 

+

534 print("Multiline map: {}".format(self.multiline)) 

+

535 ast_dump(self.root_node) 

+

536 

+

537 self.arcs = set() 

+

538 

+

539 # A map from arc pairs to a list of pairs of sentence fragments: 

+

540 # { (start, end): [(startmsg, endmsg), ...], } 

+

541 # 

+

542 # For an arc from line 17, they should be usable like: 

+

543 # "Line 17 {endmsg}, because {startmsg}" 

+

544 self.missing_arc_fragments = collections.defaultdict(list) 

+

545 self.block_stack = [] 

+

546 

+

547 # $set_env.py: COVERAGE_TRACK_ARCS - Trace every arc added while parsing code. 

+

548 self.debug = bool(int(os.environ.get("COVERAGE_TRACK_ARCS", 0))) 

+

549 

+

550 def analyze(self): 

+

551 """Examine the AST tree from `root_node` to determine possible arcs. 

+

552 

+

553 This sets the `arcs` attribute to be a set of (from, to) line number 

+

554 pairs. 

+

555 

+

556 """ 

+

557 for node in ast.walk(self.root_node): 

+

558 node_name = node.__class__.__name__ 

+

559 code_object_handler = getattr(self, "_code_object__" + node_name, None) 

+

560 if code_object_handler is not None: 

+

561 code_object_handler(node) 

+

562 

+

563 @contract(start=int, end=int) 

+

564 def add_arc(self, start, end, smsg=None, emsg=None): 

+

565 """Add an arc, including message fragments to use if it is missing.""" 

+

566 if self.debug: # pragma: debugging 

+

567 print("\nAdding arc: ({}, {}): {!r}, {!r}".format(start, end, smsg, emsg)) 

+

568 print(short_stack(limit=6)) 

+

569 self.arcs.add((start, end)) 

+

570 

+

571 if smsg is not None or emsg is not None: 

+

572 self.missing_arc_fragments[(start, end)].append((smsg, emsg)) 

+

573 

+

574 def nearest_blocks(self): 

+

575 """Yield the blocks in nearest-to-farthest order.""" 

+

576 return reversed(self.block_stack) 

+

577 

+

578 @contract(returns=int) 

+

579 def line_for_node(self, node): 

+

580 """What is the right line number to use for this node? 

+

581 

+

582 This dispatches to _line__Node functions where needed. 

+

583 

+

584 """ 

+

585 node_name = node.__class__.__name__ 

+

586 handler = getattr(self, "_line__" + node_name, None) 

+

587 if handler is not None: 

+

588 return handler(node) 

+

589 else: 

+

590 return node.lineno 

+

591 

+

592 def _line_decorated(self, node): 

+

593 """Compute first line number for things that can be decorated (classes and functions).""" 

+

594 lineno = node.lineno 

+

595 if env.PYBEHAVIOR.trace_decorated_def: 

+

596 if node.decorator_list: 

+

597 lineno = node.decorator_list[0].lineno 

+

598 return lineno 

+

599 

+

600 def _line__Assign(self, node): 

+

601 return self.line_for_node(node.value) 

+

602 

+

603 _line__ClassDef = _line_decorated 

+

604 

+

605 def _line__Dict(self, node): 

+

606 # Python 3.5 changed how dict literals are made. 

+

607 if env.PYVERSION >= (3, 5) and node.keys: 

+

608 if node.keys[0] is not None: 

+

609 return node.keys[0].lineno 

+

610 else: 

+

611 # Unpacked dict literals `{**{'a':1}}` have None as the key, 

+

612 # use the value in that case. 

+

613 return node.values[0].lineno 

+

614 else: 

+

615 return node.lineno 

+

616 

+

617 _line__FunctionDef = _line_decorated 

+

618 _line__AsyncFunctionDef = _line_decorated 

+

619 

+

620 def _line__List(self, node): 

+

621 if node.elts: 

+

622 return self.line_for_node(node.elts[0]) 

+

623 else: 

+

624 return node.lineno 

+

625 

+

626 def _line__Module(self, node): 

+

627 if env.PYBEHAVIOR.module_firstline_1: 

+

628 return 1 

+

629 elif node.body: 

+

630 return self.line_for_node(node.body[0]) 

+

631 else: 

+

632 # Empty modules have no line number, they always start at 1. 

+

633 return 1 

+

634 

+

635 # The node types that just flow to the next node with no complications. 

+

636 OK_TO_DEFAULT = { 

+

637 "Assign", "Assert", "AugAssign", "Delete", "Exec", "Expr", "Global", 

+

638 "Import", "ImportFrom", "Nonlocal", "Pass", "Print", 

+

639 } 

+

640 

+

641 @contract(returns='ArcStarts') 

+

642 def add_arcs(self, node): 

+

643 """Add the arcs for `node`. 

+

644 

+

645 Return a set of ArcStarts, exits from this node to the next. Because a 

+

646 node represents an entire sub-tree (including its children), the exits 

+

647 from a node can be arbitrarily complex:: 

+

648 

+

649 if something(1): 

+

650 if other(2): 

+

651 doit(3) 

+

652 else: 

+

653 doit(5) 

+

654 

+

655 There are two exits from line 1: they start at line 3 and line 5. 

+

656 

+

657 """ 

+

658 node_name = node.__class__.__name__ 

+

659 handler = getattr(self, "_handle__" + node_name, None) 

+

660 if handler is not None: 

+

661 return handler(node) 

+

662 else: 

+

663 # No handler: either it's something that's ok to default (a simple 

+

664 # statement), or it's something we overlooked. Change this 0 to 1 

+

665 # to see if it's overlooked. 

+

666 if 0: 

+

667 if node_name not in self.OK_TO_DEFAULT: 

+

668 print("*** Unhandled: {}".format(node)) 

+

669 

+

670 # Default for simple statements: one exit from this node. 

+

671 return {ArcStart(self.line_for_node(node))} 

+

672 

+

673 @one_of("from_start, prev_starts") 

+

674 @contract(returns='ArcStarts') 

+

675 def add_body_arcs(self, body, from_start=None, prev_starts=None): 

+

676 """Add arcs for the body of a compound statement. 

+

677 

+

678 `body` is the body node. `from_start` is a single `ArcStart` that can 

+

679 be the previous line in flow before this body. `prev_starts` is a set 

+

680 of ArcStarts that can be the previous line. Only one of them should be 

+

681 given. 

+

682 

+

683 Returns a set of ArcStarts, the exits from this body. 

+

684 

+

685 """ 

+

686 if prev_starts is None: 

+

687 prev_starts = {from_start} 

+

688 for body_node in body: 

+

689 lineno = self.line_for_node(body_node) 

+

690 first_line = self.multiline.get(lineno, lineno) 

+

691 if first_line not in self.statements: 

+

692 body_node = self.find_non_missing_node(body_node) 

+

693 if body_node is None: 

+

694 continue 

+

695 lineno = self.line_for_node(body_node) 

+

696 for prev_start in prev_starts: 

+

697 self.add_arc(prev_start.lineno, lineno, prev_start.cause) 

+

698 prev_starts = self.add_arcs(body_node) 

+

699 return prev_starts 

+

700 

+

701 def find_non_missing_node(self, node): 

+

702 """Search `node` looking for a child that has not been optimized away. 

+

703 

+

704 This might return the node you started with, or it will work recursively 

+

705 to find a child node in self.statements. 

+

706 

+

707 Returns a node, or None if none of the node remains. 

+

708 

+

709 """ 

+

710 # This repeats work just done in add_body_arcs, but this duplication 

+

711 # means we can avoid a function call in the 99.9999% case of not 

+

712 # optimizing away statements. 

+

713 lineno = self.line_for_node(node) 

+

714 first_line = self.multiline.get(lineno, lineno) 

+

715 if first_line in self.statements: 

+

716 return node 

+

717 

+

718 missing_fn = getattr(self, "_missing__" + node.__class__.__name__, None) 

+

719 if missing_fn: 

+

720 node = missing_fn(node) 

+

721 else: 

+

722 node = None 

+

723 return node 

+

724 

+

725 # Missing nodes: _missing__* 

+

726 # 

+

727 # Entire statements can be optimized away by Python. They will appear in 

+

728 # the AST, but not the bytecode. These functions are called (by 

+

729 # find_non_missing_node) to find a node to use instead of the missing 

+

730 # node. They can return None if the node should truly be gone. 

+

731 

+

732 def _missing__If(self, node): 

+

733 # If the if-node is missing, then one of its children might still be 

+

734 # here, but not both. So return the first of the two that isn't missing. 

+

735 # Use a NodeList to hold the clauses as a single node. 

+

736 non_missing = self.find_non_missing_node(NodeList(node.body)) 

+

737 if non_missing: 

+

738 return non_missing 

+

739 if node.orelse: 

+

740 return self.find_non_missing_node(NodeList(node.orelse)) 

+

741 return None 

+

742 

+

743 def _missing__NodeList(self, node): 

+

744 # A NodeList might be a mixture of missing and present nodes. Find the 

+

745 # ones that are present. 

+

746 non_missing_children = [] 

+

747 for child in node.body: 

+

748 child = self.find_non_missing_node(child) 

+

749 if child is not None: 

+

750 non_missing_children.append(child) 

+

751 

+

752 # Return the simplest representation of the present children. 

+

753 if not non_missing_children: 

+

754 return None 

+

755 if len(non_missing_children) == 1: 

+

756 return non_missing_children[0] 

+

757 return NodeList(non_missing_children) 

+

758 

+

759 def _missing__While(self, node): 

+

760 body_nodes = self.find_non_missing_node(NodeList(node.body)) 

+

761 if not body_nodes: 761 ↛ 762line 761 didn't jump to line 762, because the condition on line 761 was never true

+

762 return None 

+

763 # Make a synthetic While-true node. 

+

764 new_while = ast.While() 

+

765 new_while.lineno = body_nodes.lineno 

+

766 new_while.test = ast.Name() 

+

767 new_while.test.lineno = body_nodes.lineno 

+

768 new_while.test.id = "True" 

+

769 new_while.body = body_nodes.body 

+

770 new_while.orelse = None 

+

771 return new_while 

+

772 

+

773 def is_constant_expr(self, node): 

+

774 """Is this a compile-time constant?""" 

+

775 node_name = node.__class__.__name__ 

+

776 if node_name in ["Constant", "NameConstant", "Num"]: 

+

777 return "Num" 

+

778 elif node_name == "Name": 

+

779 if node.id in ["True", "False", "None", "__debug__"]: 

+

780 return "Name" 

+

781 return None 

+

782 

+

783 # In the fullness of time, these might be good tests to write: 

+

784 # while EXPR: 

+

785 # while False: 

+

786 # listcomps hidden deep in other expressions 

+

787 # listcomps hidden in lists: x = [[i for i in range(10)]] 

+

788 # nested function definitions 

+

789 

+

790 

+

791 # Exit processing: process_*_exits 

+

792 # 

+

793 # These functions process the four kinds of jump exits: break, continue, 

+

794 # raise, and return. To figure out where an exit goes, we have to look at 

+

795 # the block stack context. For example, a break will jump to the nearest 

+

796 # enclosing loop block, or the nearest enclosing finally block, whichever 

+

797 # is nearer. 

+

798 

+

799 @contract(exits='ArcStarts') 

+

800 def process_break_exits(self, exits): 

+

801 """Add arcs due to jumps from `exits` being breaks.""" 

+

802 for block in self.nearest_blocks(): 802 ↛ exitline 802 didn't return from function 'process_break_exits', because the loop on line 802 didn't complete

+

803 if isinstance(block, LoopBlock): 

+

804 block.break_exits.update(exits) 

+

805 break 

+

806 elif isinstance(block, TryBlock) and block.final_start is not None: 806 ↛ 802line 806 didn't jump to line 802, because the condition on line 806 was never false

+

807 block.break_from.update(exits) 

+

808 break 

+

809 

+

810 @contract(exits='ArcStarts') 

+

811 def process_continue_exits(self, exits): 

+

812 """Add arcs due to jumps from `exits` being continues.""" 

+

813 for block in self.nearest_blocks(): 813 ↛ exitline 813 didn't return from function 'process_continue_exits', because the loop on line 813 didn't complete

+

814 if isinstance(block, LoopBlock): 

+

815 for xit in exits: 

+

816 self.add_arc(xit.lineno, block.start, xit.cause) 

+

817 break 

+

818 elif isinstance(block, TryBlock) and block.final_start is not None: 818 ↛ 813line 818 didn't jump to line 813, because the condition on line 818 was never false

+

819 block.continue_from.update(exits) 

+

820 break 

+

821 

+

822 @contract(exits='ArcStarts') 

+

823 def process_raise_exits(self, exits): 

+

824 """Add arcs due to jumps from `exits` being raises.""" 

+

825 for block in self.nearest_blocks(): 

+

826 if isinstance(block, TryBlock): 

+

827 if block.handler_start is not None: 

+

828 for xit in exits: 

+

829 self.add_arc(xit.lineno, block.handler_start, xit.cause) 

+

830 break 

+

831 elif block.final_start is not None: 831 ↛ 825line 831 didn't jump to line 825, because the condition on line 831 was never false

+

832 block.raise_from.update(exits) 

+

833 break 

+

834 elif isinstance(block, FunctionBlock): 

+

835 for xit in exits: 

+

836 self.add_arc( 

+

837 xit.lineno, -block.start, xit.cause, 

+

838 "didn't except from function {!r}".format(block.name), 

+

839 ) 

+

840 break 

+

841 

+

842 @contract(exits='ArcStarts') 

+

843 def process_return_exits(self, exits): 

+

844 """Add arcs due to jumps from `exits` being returns.""" 

+

845 for block in self.nearest_blocks(): 845 ↛ exitline 845 didn't return from function 'process_return_exits', because the loop on line 845 didn't complete

+

846 if isinstance(block, TryBlock) and block.final_start is not None: 

+

847 block.return_from.update(exits) 

+

848 break 

+

849 elif isinstance(block, FunctionBlock): 

+

850 for xit in exits: 

+

851 self.add_arc( 

+

852 xit.lineno, -block.start, xit.cause, 

+

853 "didn't return from function {!r}".format(block.name), 

+

854 ) 

+

855 break 

+

856 

+

857 

+

858 # Handlers: _handle__* 

+

859 # 

+

860 # Each handler deals with a specific AST node type, dispatched from 

+

861 # add_arcs. Handlers return the set of exits from that node, and can 

+

862 # also call self.add_arc to record arcs they find. These functions mirror 

+

863 # the Python semantics of each syntactic construct. See the docstring 

+

864 # for add_arcs to understand the concept of exits from a node. 

+

865 

+

866 @contract(returns='ArcStarts') 

+

867 def _handle__Break(self, node): 

+

868 here = self.line_for_node(node) 

+

869 break_start = ArcStart(here, cause="the break on line {lineno} wasn't executed") 

+

870 self.process_break_exits([break_start]) 

+

871 return set() 

+

872 

+

873 @contract(returns='ArcStarts') 

+

874 def _handle_decorated(self, node): 

+

875 """Add arcs for things that can be decorated (classes and functions).""" 

+

876 main_line = last = node.lineno 

+

877 if node.decorator_list: 

+

878 if env.PYBEHAVIOR.trace_decorated_def: 

+

879 last = None 

+

880 for dec_node in node.decorator_list: 

+

881 dec_start = self.line_for_node(dec_node) 

+

882 if last is not None and dec_start != last: 

+

883 self.add_arc(last, dec_start) 

+

884 last = dec_start 

+

885 if env.PYBEHAVIOR.trace_decorated_def: 

+

886 self.add_arc(last, main_line) 

+

887 last = main_line 

+

888 # The definition line may have been missed, but we should have it 

+

889 # in `self.statements`. For some constructs, `line_for_node` is 

+

890 # not what we'd think of as the first line in the statement, so map 

+

891 # it to the first one. 

+

892 if node.body: 892 ↛ 900line 892 didn't jump to line 900, because the condition on line 892 was never false

+

893 body_start = self.line_for_node(node.body[0]) 

+

894 body_start = self.multiline.get(body_start, body_start) 

+

895 for lineno in range(last+1, body_start): 

+

896 if lineno in self.statements: 

+

897 self.add_arc(last, lineno) 

+

898 last = lineno 

+

899 # The body is handled in collect_arcs. 

+

900 return {ArcStart(last)} 

+

901 

+

902 _handle__ClassDef = _handle_decorated 

+

903 

+

904 @contract(returns='ArcStarts') 

+

905 def _handle__Continue(self, node): 

+

906 here = self.line_for_node(node) 

+

907 continue_start = ArcStart(here, cause="the continue on line {lineno} wasn't executed") 

+

908 self.process_continue_exits([continue_start]) 

+

909 return set() 

+

910 

+

911 @contract(returns='ArcStarts') 

+

912 def _handle__For(self, node): 

+

913 start = self.line_for_node(node.iter) 

+

914 self.block_stack.append(LoopBlock(start=start)) 

+

915 from_start = ArcStart(start, cause="the loop on line {lineno} never started") 

+

916 exits = self.add_body_arcs(node.body, from_start=from_start) 

+

917 # Any exit from the body will go back to the top of the loop. 

+

918 for xit in exits: 

+

919 self.add_arc(xit.lineno, start, xit.cause) 

+

920 my_block = self.block_stack.pop() 

+

921 exits = my_block.break_exits 

+

922 from_start = ArcStart(start, cause="the loop on line {lineno} didn't complete") 

+

923 if node.orelse: 

+

924 else_exits = self.add_body_arcs(node.orelse, from_start=from_start) 

+

925 exits |= else_exits 

+

926 else: 

+

927 # No else clause: exit from the for line. 

+

928 exits.add(from_start) 

+

929 return exits 

+

930 

+

931 _handle__AsyncFor = _handle__For 

+

932 

+

933 _handle__FunctionDef = _handle_decorated 

+

934 _handle__AsyncFunctionDef = _handle_decorated 

+

935 

+

936 @contract(returns='ArcStarts') 

+

937 def _handle__If(self, node): 

+

938 start = self.line_for_node(node.test) 

+

939 from_start = ArcStart(start, cause="the condition on line {lineno} was never true") 

+

940 exits = self.add_body_arcs(node.body, from_start=from_start) 

+

941 from_start = ArcStart(start, cause="the condition on line {lineno} was never false") 

+

942 exits |= self.add_body_arcs(node.orelse, from_start=from_start) 

+

943 return exits 

+

944 

+

945 @contract(returns='ArcStarts') 

+

946 def _handle__NodeList(self, node): 

+

947 start = self.line_for_node(node) 

+

948 exits = self.add_body_arcs(node.body, from_start=ArcStart(start)) 

+

949 return exits 

+

950 

+

951 @contract(returns='ArcStarts') 

+

952 def _handle__Raise(self, node): 

+

953 here = self.line_for_node(node) 

+

954 raise_start = ArcStart(here, cause="the raise on line {lineno} wasn't executed") 

+

955 self.process_raise_exits([raise_start]) 

+

956 # `raise` statement jumps away, no exits from here. 

+

957 return set() 

+

958 

+

959 @contract(returns='ArcStarts') 

+

960 def _handle__Return(self, node): 

+

961 here = self.line_for_node(node) 

+

962 return_start = ArcStart(here, cause="the return on line {lineno} wasn't executed") 

+

963 self.process_return_exits([return_start]) 

+

964 # `return` statement jumps away, no exits from here. 

+

965 return set() 

+

966 

+

967 @contract(returns='ArcStarts') 

+

968 def _handle__Try(self, node): 

+

969 if node.handlers: 

+

970 handler_start = self.line_for_node(node.handlers[0]) 

+

971 else: 

+

972 handler_start = None 

+

973 

+

974 if node.finalbody: 

+

975 final_start = self.line_for_node(node.finalbody[0]) 

+

976 else: 

+

977 final_start = None 

+

978 

+

979 try_block = TryBlock(handler_start, final_start) 

+

980 self.block_stack.append(try_block) 

+

981 

+

982 start = self.line_for_node(node) 

+

983 exits = self.add_body_arcs(node.body, from_start=ArcStart(start)) 

+

984 

+

985 # We're done with the `try` body, so this block no longer handles 

+

986 # exceptions. We keep the block so the `finally` clause can pick up 

+

987 # flows from the handlers and `else` clause. 

+

988 if node.finalbody: 

+

989 try_block.handler_start = None 

+

990 if node.handlers: 

+

991 # If there are `except` clauses, then raises in the try body 

+

992 # will already jump to them. Start this set over for raises in 

+

993 # `except` and `else`. 

+

994 try_block.raise_from = set() 

+

995 else: 

+

996 self.block_stack.pop() 

+

997 

+

998 handler_exits = set() 

+

999 

+

1000 if node.handlers: 

+

1001 last_handler_start = None 

+

1002 for handler_node in node.handlers: 

+

1003 handler_start = self.line_for_node(handler_node) 

+

1004 if last_handler_start is not None: 

+

1005 self.add_arc(last_handler_start, handler_start) 

+

1006 last_handler_start = handler_start 

+

1007 from_cause = "the exception caught by line {lineno} didn't happen" 

+

1008 from_start = ArcStart(handler_start, cause=from_cause) 

+

1009 handler_exits |= self.add_body_arcs(handler_node.body, from_start=from_start) 

+

1010 

+

1011 if node.orelse: 

+

1012 exits = self.add_body_arcs(node.orelse, prev_starts=exits) 

+

1013 

+

1014 exits |= handler_exits 

+

1015 

+

1016 if node.finalbody: 

+

1017 self.block_stack.pop() 

+

1018 final_from = ( # You can get to the `finally` clause from: 

+

1019 exits | # the exits of the body or `else` clause, 

+

1020 try_block.break_from | # or a `break`, 

+

1021 try_block.continue_from | # or a `continue`, 

+

1022 try_block.raise_from | # or a `raise`, 

+

1023 try_block.return_from # or a `return`. 

+

1024 ) 

+

1025 

+

1026 final_exits = self.add_body_arcs(node.finalbody, prev_starts=final_from) 

+

1027 

+

1028 if try_block.break_from: 

+

1029 if env.PYBEHAVIOR.finally_jumps_back: 

+

1030 for break_line in try_block.break_from: 

+

1031 lineno = break_line.lineno 

+

1032 cause = break_line.cause.format(lineno=lineno) 

+

1033 for final_exit in final_exits: 

+

1034 self.add_arc(final_exit.lineno, lineno, cause) 

+

1035 breaks = try_block.break_from 

+

1036 else: 

+

1037 breaks = self._combine_finally_starts(try_block.break_from, final_exits) 

+

1038 self.process_break_exits(breaks) 

+

1039 

+

1040 if try_block.continue_from: 

+

1041 if env.PYBEHAVIOR.finally_jumps_back: 

+

1042 for continue_line in try_block.continue_from: 

+

1043 lineno = continue_line.lineno 

+

1044 cause = continue_line.cause.format(lineno=lineno) 

+

1045 for final_exit in final_exits: 

+

1046 self.add_arc(final_exit.lineno, lineno, cause) 

+

1047 continues = try_block.continue_from 

+

1048 else: 

+

1049 continues = self._combine_finally_starts(try_block.continue_from, final_exits) 

+

1050 self.process_continue_exits(continues) 

+

1051 

+

1052 if try_block.raise_from: 

+

1053 self.process_raise_exits( 

+

1054 self._combine_finally_starts(try_block.raise_from, final_exits) 

+

1055 ) 

+

1056 

+

1057 if try_block.return_from: 

+

1058 if env.PYBEHAVIOR.finally_jumps_back: 

+

1059 for return_line in try_block.return_from: 

+

1060 lineno = return_line.lineno 

+

1061 cause = return_line.cause.format(lineno=lineno) 

+

1062 for final_exit in final_exits: 

+

1063 self.add_arc(final_exit.lineno, lineno, cause) 

+

1064 returns = try_block.return_from 

+

1065 else: 

+

1066 returns = self._combine_finally_starts(try_block.return_from, final_exits) 

+

1067 self.process_return_exits(returns) 

+

1068 

+

1069 if exits: 

+

1070 # The finally clause's exits are only exits for the try block 

+

1071 # as a whole if the try block had some exits to begin with. 

+

1072 exits = final_exits 

+

1073 

+

1074 return exits 

+

1075 

+

1076 @contract(starts='ArcStarts', exits='ArcStarts', returns='ArcStarts') 

+

1077 def _combine_finally_starts(self, starts, exits): 

+

1078 """Helper for building the cause of `finally` branches. 

+

1079 

+

1080 "finally" clauses might not execute their exits, and the causes could 

+

1081 be due to a failure to execute any of the exits in the try block. So 

+

1082 we use the causes from `starts` as the causes for `exits`. 

+

1083 """ 

+

1084 causes = [] 

+

1085 for start in sorted(starts): 

+

1086 if start.cause is not None: 1086 ↛ 1085line 1086 didn't jump to line 1085, because the condition on line 1086 was never false

+

1087 causes.append(start.cause.format(lineno=start.lineno)) 

+

1088 cause = " or ".join(causes) 

+

1089 exits = {ArcStart(xit.lineno, cause) for xit in exits} 

+

1090 return exits 

+

1091 

+

1092 @contract(returns='ArcStarts') 

+

1093 def _handle__TryExcept(self, node): 

+

1094 # Python 2.7 uses separate TryExcept and TryFinally nodes. If we get 

+

1095 # TryExcept, it means there was no finally, so fake it, and treat as 

+

1096 # a general Try node. 

+

1097 node.finalbody = [] 

+

1098 return self._handle__Try(node) 

+

1099 

+

1100 @contract(returns='ArcStarts') 

+

1101 def _handle__TryFinally(self, node): 

+

1102 # Python 2.7 uses separate TryExcept and TryFinally nodes. If we get 

+

1103 # TryFinally, see if there's a TryExcept nested inside. If so, merge 

+

1104 # them. Otherwise, fake fields to complete a Try node. 

+

1105 node.handlers = [] 

+

1106 node.orelse = [] 

+

1107 

+

1108 first = node.body[0] 

+

1109 if first.__class__.__name__ == "TryExcept" and node.lineno == first.lineno: 

+

1110 assert len(node.body) == 1 

+

1111 node.body = first.body 

+

1112 node.handlers = first.handlers 

+

1113 node.orelse = first.orelse 

+

1114 

+

1115 return self._handle__Try(node) 

+

1116 

+

1117 @contract(returns='ArcStarts') 

+

1118 def _handle__While(self, node): 

+

1119 start = to_top = self.line_for_node(node.test) 

+

1120 constant_test = self.is_constant_expr(node.test) 

+

1121 top_is_body0 = False 

+

1122 if constant_test and (env.PY3 or constant_test == "Num"): 

+

1123 top_is_body0 = True 

+

1124 if env.PYBEHAVIOR.keep_constant_test: 

+

1125 top_is_body0 = False 

+

1126 if top_is_body0: 

+

1127 to_top = self.line_for_node(node.body[0]) 

+

1128 self.block_stack.append(LoopBlock(start=to_top)) 

+

1129 from_start = ArcStart(start, cause="the condition on line {lineno} was never true") 

+

1130 exits = self.add_body_arcs(node.body, from_start=from_start) 

+

1131 for xit in exits: 

+

1132 self.add_arc(xit.lineno, to_top, xit.cause) 

+

1133 exits = set() 

+

1134 my_block = self.block_stack.pop() 

+

1135 exits.update(my_block.break_exits) 

+

1136 from_start = ArcStart(start, cause="the condition on line {lineno} was never false") 

+

1137 if node.orelse: 

+

1138 else_exits = self.add_body_arcs(node.orelse, from_start=from_start) 

+

1139 exits |= else_exits 

+

1140 else: 

+

1141 # No `else` clause: you can exit from the start. 

+

1142 if not constant_test: 

+

1143 exits.add(from_start) 

+

1144 return exits 

+

1145 

+

1146 @contract(returns='ArcStarts') 

+

1147 def _handle__With(self, node): 

+

1148 start = self.line_for_node(node) 

+

1149 exits = self.add_body_arcs(node.body, from_start=ArcStart(start)) 

+

1150 return exits 

+

1151 

+

1152 _handle__AsyncWith = _handle__With 

+

1153 

+

1154 def _code_object__Module(self, node): 

+

1155 start = self.line_for_node(node) 

+

1156 if node.body: 

+

1157 exits = self.add_body_arcs(node.body, from_start=ArcStart(-start)) 

+

1158 for xit in exits: 

+

1159 self.add_arc(xit.lineno, -start, xit.cause, "didn't exit the module") 

+

1160 else: 

+

1161 # Empty module. 

+

1162 self.add_arc(-start, start) 

+

1163 self.add_arc(start, -start) 

+

1164 

+

1165 def _code_object__FunctionDef(self, node): 

+

1166 start = self.line_for_node(node) 

+

1167 self.block_stack.append(FunctionBlock(start=start, name=node.name)) 

+

1168 exits = self.add_body_arcs(node.body, from_start=ArcStart(-start)) 

+

1169 self.process_return_exits(exits) 

+

1170 self.block_stack.pop() 

+

1171 

+

1172 _code_object__AsyncFunctionDef = _code_object__FunctionDef 

+

1173 

+

1174 def _code_object__ClassDef(self, node): 

+

1175 start = self.line_for_node(node) 

+

1176 self.add_arc(-start, start) 

+

1177 exits = self.add_body_arcs(node.body, from_start=ArcStart(start)) 

+

1178 for xit in exits: 

+

1179 self.add_arc( 

+

1180 xit.lineno, -start, xit.cause, 

+

1181 "didn't exit the body of class {!r}".format(node.name), 

+

1182 ) 

+

1183 

+

1184 def _make_oneline_code_method(noun): # pylint: disable=no-self-argument 

+

1185 """A function to make methods for online callable _code_object__ methods.""" 

+

1186 def _code_object__oneline_callable(self, node): 

+

1187 start = self.line_for_node(node) 

+

1188 self.add_arc(-start, start, None, "didn't run the {} on line {}".format(noun, start)) 

+

1189 self.add_arc( 

+

1190 start, -start, None, 

+

1191 "didn't finish the {} on line {}".format(noun, start), 

+

1192 ) 

+

1193 return _code_object__oneline_callable 

+

1194 

+

1195 _code_object__Lambda = _make_oneline_code_method("lambda") 

+

1196 _code_object__GeneratorExp = _make_oneline_code_method("generator expression") 

+

1197 _code_object__DictComp = _make_oneline_code_method("dictionary comprehension") 

+

1198 _code_object__SetComp = _make_oneline_code_method("set comprehension") 

+

1199 if env.PY3: 

+

1200 _code_object__ListComp = _make_oneline_code_method("list comprehension") 

+

1201 

+

1202 

+

1203if AST_DUMP: # pragma: debugging 

+

1204 # Code only used when dumping the AST for debugging. 

+

1205 

+

1206 SKIP_DUMP_FIELDS = ["ctx"] 

+

1207 

+

1208 def _is_simple_value(value): 

+

1209 """Is `value` simple enough to be displayed on a single line?""" 

+

1210 return ( 

+

1211 value in [None, [], (), {}, set()] or 

+

1212 isinstance(value, (string_class, int, float)) 

+

1213 ) 

+

1214 

+

1215 def ast_dump(node, depth=0): 

+

1216 """Dump the AST for `node`. 

+

1217 

+

1218 This recursively walks the AST, printing a readable version. 

+

1219 

+

1220 """ 

+

1221 indent = " " * depth 

+

1222 if not isinstance(node, ast.AST): 

+

1223 print("{}<{} {!r}>".format(indent, node.__class__.__name__, node)) 

+

1224 return 

+

1225 

+

1226 lineno = getattr(node, "lineno", None) 

+

1227 if lineno is not None: 

+

1228 linemark = " @ {},{}".format(node.lineno, node.col_offset) 

+

1229 if hasattr(node, "end_lineno"): 

+

1230 linemark += ":" 

+

1231 if node.end_lineno != node.lineno: 

+

1232 linemark += "{},".format(node.end_lineno) 

+

1233 linemark += "{}".format(node.end_col_offset) 

+

1234 else: 

+

1235 linemark = "" 

+

1236 head = "{}<{}{}".format(indent, node.__class__.__name__, linemark) 

+

1237 

+

1238 named_fields = [ 

+

1239 (name, value) 

+

1240 for name, value in ast.iter_fields(node) 

+

1241 if name not in SKIP_DUMP_FIELDS 

+

1242 ] 

+

1243 if not named_fields: 

+

1244 print("{}>".format(head)) 

+

1245 elif len(named_fields) == 1 and _is_simple_value(named_fields[0][1]): 

+

1246 field_name, value = named_fields[0] 

+

1247 print("{} {}: {!r}>".format(head, field_name, value)) 

+

1248 else: 

+

1249 print(head) 

+

1250 if 0: 

+

1251 print("{}# mro: {}".format( 

+

1252 indent, ", ".join(c.__name__ for c in node.__class__.__mro__[1:]), 

+

1253 )) 

+

1254 next_indent = indent + " " 

+

1255 for field_name, value in named_fields: 

+

1256 prefix = "{}{}:".format(next_indent, field_name) 

+

1257 if _is_simple_value(value): 

+

1258 print("{} {!r}".format(prefix, value)) 

+

1259 elif isinstance(value, list): 

+

1260 print("{} [".format(prefix)) 

+

1261 for n in value: 

+

1262 ast_dump(n, depth + 8) 

+

1263 print("{}]".format(next_indent)) 

+

1264 else: 

+

1265 print(prefix) 

+

1266 ast_dump(value, depth + 8) 

+

1267 

+

1268 print("{}>".format(indent)) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_phystokens_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_phystokens_py.html new file mode 100644 index 000000000..2daef2364 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_phystokens_py.html @@ -0,0 +1,363 @@ + + + + + + Coverage for coverage/phystokens.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Better tokenizing for coverage.py.""" 

+

5 

+

6import codecs 

+

7import keyword 

+

8import re 

+

9import sys 

+

10import token 

+

11import tokenize 

+

12 

+

13from coverage import env 

+

14from coverage.backward import iternext, unicode_class 

+

15from coverage.misc import contract 

+

16 

+

17 

+

18def phys_tokens(toks): 

+

19 """Return all physical tokens, even line continuations. 

+

20 

+

21 tokenize.generate_tokens() doesn't return a token for the backslash that 

+

22 continues lines. This wrapper provides those tokens so that we can 

+

23 re-create a faithful representation of the original source. 

+

24 

+

25 Returns the same values as generate_tokens() 

+

26 

+

27 """ 

+

28 last_line = None 

+

29 last_lineno = -1 

+

30 last_ttext = None 

+

31 for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks: 

+

32 if last_lineno != elineno: 

+

33 if last_line and last_line.endswith("\\\n"): 

+

34 # We are at the beginning of a new line, and the last line 

+

35 # ended with a backslash. We probably have to inject a 

+

36 # backslash token into the stream. Unfortunately, there's more 

+

37 # to figure out. This code:: 

+

38 # 

+

39 # usage = """\ 

+

40 # HEY THERE 

+

41 # """ 

+

42 # 

+

43 # triggers this condition, but the token text is:: 

+

44 # 

+

45 # '"""\\\nHEY THERE\n"""' 

+

46 # 

+

47 # so we need to figure out if the backslash is already in the 

+

48 # string token or not. 

+

49 inject_backslash = True 

+

50 if last_ttext.endswith("\\"): 

+

51 inject_backslash = False 

+

52 elif ttype == token.STRING: 

+

53 if "\n" in ttext and ttext.split('\n', 1)[0][-1] == '\\': 

+

54 # It's a multi-line string and the first line ends with 

+

55 # a backslash, so we don't need to inject another. 

+

56 inject_backslash = False 

+

57 if inject_backslash: 

+

58 # Figure out what column the backslash is in. 

+

59 ccol = len(last_line.split("\n")[-2]) - 1 

+

60 # Yield the token, with a fake token type. 

+

61 yield ( 

+

62 99999, "\\\n", 

+

63 (slineno, ccol), (slineno, ccol+2), 

+

64 last_line 

+

65 ) 

+

66 last_line = ltext 

+

67 if ttype not in (tokenize.NEWLINE, tokenize.NL): 

+

68 last_ttext = ttext 

+

69 yield ttype, ttext, (slineno, scol), (elineno, ecol), ltext 

+

70 last_lineno = elineno 

+

71 

+

72 

+

73@contract(source='unicode') 

+

74def source_token_lines(source): 

+

75 """Generate a series of lines, one for each line in `source`. 

+

76 

+

77 Each line is a list of pairs, each pair is a token:: 

+

78 

+

79 [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ] 

+

80 

+

81 Each pair has a token class, and the token text. 

+

82 

+

83 If you concatenate all the token texts, and then join them with newlines, 

+

84 you should have your original `source` back, with two differences: 

+

85 trailing whitespace is not preserved, and a final line with no newline 

+

86 is indistinguishable from a final line with a newline. 

+

87 

+

88 """ 

+

89 

+

90 ws_tokens = {token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL} 

+

91 line = [] 

+

92 col = 0 

+

93 

+

94 source = source.expandtabs(8).replace('\r\n', '\n') 

+

95 tokgen = generate_tokens(source) 

+

96 

+

97 for ttype, ttext, (_, scol), (_, ecol), _ in phys_tokens(tokgen): 

+

98 mark_start = True 

+

99 for part in re.split('(\n)', ttext): 

+

100 if part == '\n': 

+

101 yield line 

+

102 line = [] 

+

103 col = 0 

+

104 mark_end = False 

+

105 elif part == '': 

+

106 mark_end = False 

+

107 elif ttype in ws_tokens: 

+

108 mark_end = False 

+

109 else: 

+

110 if mark_start and scol > col: 

+

111 line.append(("ws", u" " * (scol - col))) 

+

112 mark_start = False 

+

113 tok_class = tokenize.tok_name.get(ttype, 'xx').lower()[:3] 

+

114 if ttype == token.NAME and keyword.iskeyword(ttext): 

+

115 tok_class = "key" 

+

116 line.append((tok_class, part)) 

+

117 mark_end = True 

+

118 scol = 0 

+

119 if mark_end: 

+

120 col = ecol 

+

121 

+

122 if line: 

+

123 yield line 

+

124 

+

125 

+

126class CachedTokenizer(object): 

+

127 """A one-element cache around tokenize.generate_tokens. 

+

128 

+

129 When reporting, coverage.py tokenizes files twice, once to find the 

+

130 structure of the file, and once to syntax-color it. Tokenizing is 

+

131 expensive, and easily cached. 

+

132 

+

133 This is a one-element cache so that our twice-in-a-row tokenizing doesn't 

+

134 actually tokenize twice. 

+

135 

+

136 """ 

+

137 def __init__(self): 

+

138 self.last_text = None 

+

139 self.last_tokens = None 

+

140 

+

141 @contract(text='unicode') 

+

142 def generate_tokens(self, text): 

+

143 """A stand-in for `tokenize.generate_tokens`.""" 

+

144 if text != self.last_text: 

+

145 self.last_text = text 

+

146 readline = iternext(text.splitlines(True)) 

+

147 self.last_tokens = list(tokenize.generate_tokens(readline)) 

+

148 return self.last_tokens 

+

149 

+

150# Create our generate_tokens cache as a callable replacement function. 

+

151generate_tokens = CachedTokenizer().generate_tokens 

+

152 

+

153 

+

154COOKIE_RE = re.compile(r"^[ \t]*#.*coding[:=][ \t]*([-\w.]+)", flags=re.MULTILINE) 

+

155 

+

156@contract(source='bytes') 

+

157def _source_encoding_py2(source): 

+

158 """Determine the encoding for `source`, according to PEP 263. 

+

159 

+

160 `source` is a byte string, the text of the program. 

+

161 

+

162 Returns a string, the name of the encoding. 

+

163 

+

164 """ 

+

165 assert isinstance(source, bytes) 

+

166 

+

167 # Do this so the detect_encode code we copied will work. 

+

168 readline = iternext(source.splitlines(True)) 

+

169 

+

170 # This is mostly code adapted from Py3.2's tokenize module. 

+

171 

+

172 def _get_normal_name(orig_enc): 

+

173 """Imitates get_normal_name in tokenizer.c.""" 

+

174 # Only care about the first 12 characters. 

+

175 enc = orig_enc[:12].lower().replace("_", "-") 

+

176 if re.match(r"^utf-8($|-)", enc): 

+

177 return "utf-8" 

+

178 if re.match(r"^(latin-1|iso-8859-1|iso-latin-1)($|-)", enc): 

+

179 return "iso-8859-1" 

+

180 return orig_enc 

+

181 

+

182 # From detect_encode(): 

+

183 # It detects the encoding from the presence of a UTF-8 BOM or an encoding 

+

184 # cookie as specified in PEP-0263. If both a BOM and a cookie are present, 

+

185 # but disagree, a SyntaxError will be raised. If the encoding cookie is an 

+

186 # invalid charset, raise a SyntaxError. Note that if a UTF-8 BOM is found, 

+

187 # 'utf-8-sig' is returned. 

+

188 

+

189 # If no encoding is specified, then the default will be returned. 

+

190 default = 'ascii' 

+

191 

+

192 bom_found = False 

+

193 encoding = None 

+

194 

+

195 def read_or_stop(): 

+

196 """Get the next source line, or ''.""" 

+

197 try: 

+

198 return readline() 

+

199 except StopIteration: 

+

200 return '' 

+

201 

+

202 def find_cookie(line): 

+

203 """Find an encoding cookie in `line`.""" 

+

204 try: 

+

205 line_string = line.decode('ascii') 

+

206 except UnicodeDecodeError: 

+

207 return None 

+

208 

+

209 matches = COOKIE_RE.findall(line_string) 

+

210 if not matches: 

+

211 return None 

+

212 encoding = _get_normal_name(matches[0]) 

+

213 try: 

+

214 codec = codecs.lookup(encoding) 

+

215 except LookupError: 

+

216 # This behavior mimics the Python interpreter 

+

217 raise SyntaxError("unknown encoding: " + encoding) 

+

218 

+

219 if bom_found: 

+

220 # codecs in 2.3 were raw tuples of functions, assume the best. 

+

221 codec_name = getattr(codec, 'name', encoding) 

+

222 if codec_name != 'utf-8': 

+

223 # This behavior mimics the Python interpreter 

+

224 raise SyntaxError('encoding problem: utf-8') 

+

225 encoding += '-sig' 

+

226 return encoding 

+

227 

+

228 first = read_or_stop() 

+

229 if first.startswith(codecs.BOM_UTF8): 

+

230 bom_found = True 

+

231 first = first[3:] 

+

232 default = 'utf-8-sig' 

+

233 if not first: 

+

234 return default 

+

235 

+

236 encoding = find_cookie(first) 

+

237 if encoding: 

+

238 return encoding 

+

239 

+

240 second = read_or_stop() 

+

241 if not second: 

+

242 return default 

+

243 

+

244 encoding = find_cookie(second) 

+

245 if encoding: 

+

246 return encoding 

+

247 

+

248 return default 

+

249 

+

250 

+

251@contract(source='bytes') 

+

252def _source_encoding_py3(source): 

+

253 """Determine the encoding for `source`, according to PEP 263. 

+

254 

+

255 `source` is a byte string: the text of the program. 

+

256 

+

257 Returns a string, the name of the encoding. 

+

258 

+

259 """ 

+

260 readline = iternext(source.splitlines(True)) 

+

261 return tokenize.detect_encoding(readline)[0] 

+

262 

+

263 

+

264if env.PY3: 

+

265 source_encoding = _source_encoding_py3 

+

266else: 

+

267 source_encoding = _source_encoding_py2 

+

268 

+

269 

+

270@contract(source='unicode') 

+

271def compile_unicode(source, filename, mode): 

+

272 """Just like the `compile` builtin, but works on any Unicode string. 

+

273 

+

274 Python 2's compile() builtin has a stupid restriction: if the source string 

+

275 is Unicode, then it may not have a encoding declaration in it. Why not? 

+

276 Who knows! It also decodes to utf8, and then tries to interpret those utf8 

+

277 bytes according to the encoding declaration. Why? Who knows! 

+

278 

+

279 This function neuters the coding declaration, and compiles it. 

+

280 

+

281 """ 

+

282 source = neuter_encoding_declaration(source) 

+

283 if env.PY2 and isinstance(filename, unicode_class): 

+

284 filename = filename.encode(sys.getfilesystemencoding(), "replace") 

+

285 code = compile(source, filename, mode) 

+

286 return code 

+

287 

+

288 

+

289@contract(source='unicode', returns='unicode') 

+

290def neuter_encoding_declaration(source): 

+

291 """Return `source`, with any encoding declaration neutered.""" 

+

292 if COOKIE_RE.search(source): 

+

293 source_lines = source.splitlines(True) 

+

294 for lineno in range(min(2, len(source_lines))): 

+

295 source_lines[lineno] = COOKIE_RE.sub("# (deleted declaration)", source_lines[lineno]) 

+

296 source = "".join(source_lines) 

+

297 return source 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_plugin_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_plugin_py.html new file mode 100644 index 000000000..449783398 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_plugin_py.html @@ -0,0 +1,599 @@ + + + + + + Coverage for coverage/plugin.py: 85.507% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4""" 

+

5.. versionadded:: 4.0 

+

6 

+

7Plug-in interfaces for coverage.py. 

+

8 

+

9Coverage.py supports a few different kinds of plug-ins that change its 

+

10behavior: 

+

11 

+

12* File tracers implement tracing of non-Python file types. 

+

13 

+

14* Configurers add custom configuration, using Python code to change the 

+

15 configuration. 

+

16 

+

17* Dynamic context switchers decide when the dynamic context has changed, for 

+

18 example, to record what test function produced the coverage. 

+

19 

+

20To write a coverage.py plug-in, create a module with a subclass of 

+

21:class:`~coverage.CoveragePlugin`. You will override methods in your class to 

+

22participate in various aspects of coverage.py's processing. 

+

23Different types of plug-ins have to override different methods. 

+

24 

+

25Any plug-in can optionally implement :meth:`~coverage.CoveragePlugin.sys_info` 

+

26to provide debugging information about their operation. 

+

27 

+

28Your module must also contain a ``coverage_init`` function that registers an 

+

29instance of your plug-in class:: 

+

30 

+

31 import coverage 

+

32 

+

33 class MyPlugin(coverage.CoveragePlugin): 

+

34 ... 

+

35 

+

36 def coverage_init(reg, options): 

+

37 reg.add_file_tracer(MyPlugin()) 

+

38 

+

39You use the `reg` parameter passed to your ``coverage_init`` function to 

+

40register your plug-in object. The registration method you call depends on 

+

41what kind of plug-in it is. 

+

42 

+

43If your plug-in takes options, the `options` parameter is a dictionary of your 

+

44plug-in's options from the coverage.py configuration file. Use them however 

+

45you want to configure your object before registering it. 

+

46 

+

47Coverage.py will store its own information on your plug-in object, using 

+

48attributes whose names start with ``_coverage_``. Don't be startled. 

+

49 

+

50.. warning:: 

+

51 Plug-ins are imported by coverage.py before it begins measuring code. 

+

52 If you write a plugin in your own project, it might import your product 

+

53 code before coverage.py can start measuring. This can result in your 

+

54 own code being reported as missing. 

+

55 

+

56 One solution is to put your plugins in your project tree, but not in 

+

57 your importable Python package. 

+

58 

+

59 

+

60.. _file_tracer_plugins: 

+

61 

+

62File Tracers 

+

63============ 

+

64 

+

65File tracers implement measurement support for non-Python files. File tracers 

+

66implement the :meth:`~coverage.CoveragePlugin.file_tracer` method to claim 

+

67files and the :meth:`~coverage.CoveragePlugin.file_reporter` method to report 

+

68on those files. 

+

69 

+

70In your ``coverage_init`` function, use the ``add_file_tracer`` method to 

+

71register your file tracer. 

+

72 

+

73 

+

74.. _configurer_plugins: 

+

75 

+

76Configurers 

+

77=========== 

+

78 

+

79.. versionadded:: 4.5 

+

80 

+

81Configurers modify the configuration of coverage.py during start-up. 

+

82Configurers implement the :meth:`~coverage.CoveragePlugin.configure` method to 

+

83change the configuration. 

+

84 

+

85In your ``coverage_init`` function, use the ``add_configurer`` method to 

+

86register your configurer. 

+

87 

+

88 

+

89.. _dynamic_context_plugins: 

+

90 

+

91Dynamic Context Switchers 

+

92========================= 

+

93 

+

94.. versionadded:: 5.0 

+

95 

+

96Dynamic context switcher plugins implement the 

+

97:meth:`~coverage.CoveragePlugin.dynamic_context` method to dynamically compute 

+

98the context label for each measured frame. 

+

99 

+

100Computed context labels are useful when you want to group measured data without 

+

101modifying the source code. 

+

102 

+

103For example, you could write a plugin that checks `frame.f_code` to inspect 

+

104the currently executed method, and set the context label to a fully qualified 

+

105method name if it's an instance method of `unittest.TestCase` and the method 

+

106name starts with 'test'. Such a plugin would provide basic coverage grouping 

+

107by test and could be used with test runners that have no built-in coveragepy 

+

108support. 

+

109 

+

110In your ``coverage_init`` function, use the ``add_dynamic_context`` method to 

+

111register your dynamic context switcher. 

+

112 

+

113""" 

+

114 

+

115from coverage import files 

+

116from coverage.misc import contract, _needs_to_implement 

+

117 

+

118 

+

119class CoveragePlugin(object): 

+

120 """Base class for coverage.py plug-ins.""" 

+

121 

+

122 def file_tracer(self, filename): # pylint: disable=unused-argument 

+

123 """Get a :class:`FileTracer` object for a file. 

+

124 

+

125 Plug-in type: file tracer. 

+

126 

+

127 Every Python source file is offered to your plug-in to give it a chance 

+

128 to take responsibility for tracing the file. If your plug-in can 

+

129 handle the file, it should return a :class:`FileTracer` object. 

+

130 Otherwise return None. 

+

131 

+

132 There is no way to register your plug-in for particular files. 

+

133 Instead, this method is invoked for all files as they are executed, 

+

134 and the plug-in decides whether it can trace the file or not. 

+

135 Be prepared for `filename` to refer to all kinds of files that have 

+

136 nothing to do with your plug-in. 

+

137 

+

138 The file name will be a Python file being executed. There are two 

+

139 broad categories of behavior for a plug-in, depending on the kind of 

+

140 files your plug-in supports: 

+

141 

+

142 * Static file names: each of your original source files has been 

+

143 converted into a distinct Python file. Your plug-in is invoked with 

+

144 the Python file name, and it maps it back to its original source 

+

145 file. 

+

146 

+

147 * Dynamic file names: all of your source files are executed by the same 

+

148 Python file. In this case, your plug-in implements 

+

149 :meth:`FileTracer.dynamic_source_filename` to provide the actual 

+

150 source file for each execution frame. 

+

151 

+

152 `filename` is a string, the path to the file being considered. This is 

+

153 the absolute real path to the file. If you are comparing to other 

+

154 paths, be sure to take this into account. 

+

155 

+

156 Returns a :class:`FileTracer` object to use to trace `filename`, or 

+

157 None if this plug-in cannot trace this file. 

+

158 

+

159 """ 

+

160 return None 

+

161 

+

162 def file_reporter(self, filename): # pylint: disable=unused-argument 

+

163 """Get the :class:`FileReporter` class to use for a file. 

+

164 

+

165 Plug-in type: file tracer. 

+

166 

+

167 This will only be invoked if `filename` returns non-None from 

+

168 :meth:`file_tracer`. It's an error to return None from this method. 

+

169 

+

170 Returns a :class:`FileReporter` object to use to report on `filename`, 

+

171 or the string `"python"` to have coverage.py treat the file as Python. 

+

172 

+

173 """ 

+

174 _needs_to_implement(self, "file_reporter") 

+

175 

+

176 def dynamic_context(self, frame): # pylint: disable=unused-argument 

+

177 """Get the dynamically computed context label for `frame`. 

+

178 

+

179 Plug-in type: dynamic context. 

+

180 

+

181 This method is invoked for each frame when outside of a dynamic 

+

182 context, to see if a new dynamic context should be started. If it 

+

183 returns a string, a new context label is set for this and deeper 

+

184 frames. The dynamic context ends when this frame returns. 

+

185 

+

186 Returns a string to start a new dynamic context, or None if no new 

+

187 context should be started. 

+

188 

+

189 """ 

+

190 return None 

+

191 

+

192 def find_executable_files(self, src_dir): # pylint: disable=unused-argument 

+

193 """Yield all of the executable files in `src_dir`, recursively. 

+

194 

+

195 Plug-in type: file tracer. 

+

196 

+

197 Executability is a plug-in-specific property, but generally means files 

+

198 which would have been considered for coverage analysis, had they been 

+

199 included automatically. 

+

200 

+

201 Returns or yields a sequence of strings, the paths to files that could 

+

202 have been executed, including files that had been executed. 

+

203 

+

204 """ 

+

205 return [] 

+

206 

+

207 def configure(self, config): 

+

208 """Modify the configuration of coverage.py. 

+

209 

+

210 Plug-in type: configurer. 

+

211 

+

212 This method is called during coverage.py start-up, to give your plug-in 

+

213 a chance to change the configuration. The `config` parameter is an 

+

214 object with :meth:`~coverage.Coverage.get_option` and 

+

215 :meth:`~coverage.Coverage.set_option` methods. Do not call any other 

+

216 methods on the `config` object. 

+

217 

+

218 """ 

+

219 pass 

+

220 

+

221 def sys_info(self): 

+

222 """Get a list of information useful for debugging. 

+

223 

+

224 Plug-in type: any. 

+

225 

+

226 This method will be invoked for ``--debug=sys``. Your 

+

227 plug-in can return any information it wants to be displayed. 

+

228 

+

229 Returns a list of pairs: `[(name, value), ...]`. 

+

230 

+

231 """ 

+

232 return [] 

+

233 

+

234 

+

235class FileTracer(object): 

+

236 """Support needed for files during the execution phase. 

+

237 

+

238 File tracer plug-ins implement subclasses of FileTracer to return from 

+

239 their :meth:`~CoveragePlugin.file_tracer` method. 

+

240 

+

241 You may construct this object from :meth:`CoveragePlugin.file_tracer` any 

+

242 way you like. A natural choice would be to pass the file name given to 

+

243 `file_tracer`. 

+

244 

+

245 `FileTracer` objects should only be created in the 

+

246 :meth:`CoveragePlugin.file_tracer` method. 

+

247 

+

248 See :ref:`howitworks` for details of the different coverage.py phases. 

+

249 

+

250 """ 

+

251 

+

252 def source_filename(self): 

+

253 """The source file name for this file. 

+

254 

+

255 This may be any file name you like. A key responsibility of a plug-in 

+

256 is to own the mapping from Python execution back to whatever source 

+

257 file name was originally the source of the code. 

+

258 

+

259 See :meth:`CoveragePlugin.file_tracer` for details about static and 

+

260 dynamic file names. 

+

261 

+

262 Returns the file name to credit with this execution. 

+

263 

+

264 """ 

+

265 _needs_to_implement(self, "source_filename") 

+

266 

+

267 def has_dynamic_source_filename(self): 

+

268 """Does this FileTracer have dynamic source file names? 

+

269 

+

270 FileTracers can provide dynamically determined file names by 

+

271 implementing :meth:`dynamic_source_filename`. Invoking that function 

+

272 is expensive. To determine whether to invoke it, coverage.py uses the 

+

273 result of this function to know if it needs to bother invoking 

+

274 :meth:`dynamic_source_filename`. 

+

275 

+

276 See :meth:`CoveragePlugin.file_tracer` for details about static and 

+

277 dynamic file names. 

+

278 

+

279 Returns True if :meth:`dynamic_source_filename` should be called to get 

+

280 dynamic source file names. 

+

281 

+

282 """ 

+

283 return False 

+

284 

+

285 def dynamic_source_filename(self, filename, frame): # pylint: disable=unused-argument 

+

286 """Get a dynamically computed source file name. 

+

287 

+

288 Some plug-ins need to compute the source file name dynamically for each 

+

289 frame. 

+

290 

+

291 This function will not be invoked if 

+

292 :meth:`has_dynamic_source_filename` returns False. 

+

293 

+

294 Returns the source file name for this frame, or None if this frame 

+

295 shouldn't be measured. 

+

296 

+

297 """ 

+

298 return None 

+

299 

+

300 def line_number_range(self, frame): 

+

301 """Get the range of source line numbers for a given a call frame. 

+

302 

+

303 The call frame is examined, and the source line number in the original 

+

304 file is returned. The return value is a pair of numbers, the starting 

+

305 line number and the ending line number, both inclusive. For example, 

+

306 returning (5, 7) means that lines 5, 6, and 7 should be considered 

+

307 executed. 

+

308 

+

309 This function might decide that the frame doesn't indicate any lines 

+

310 from the source file were executed. Return (-1, -1) in this case to 

+

311 tell coverage.py that no lines should be recorded for this frame. 

+

312 

+

313 """ 

+

314 lineno = frame.f_lineno 

+

315 return lineno, lineno 

+

316 

+

317 

+

318class FileReporter(object): 

+

319 """Support needed for files during the analysis and reporting phases. 

+

320 

+

321 File tracer plug-ins implement a subclass of `FileReporter`, and return 

+

322 instances from their :meth:`CoveragePlugin.file_reporter` method. 

+

323 

+

324 There are many methods here, but only :meth:`lines` is required, to provide 

+

325 the set of executable lines in the file. 

+

326 

+

327 See :ref:`howitworks` for details of the different coverage.py phases. 

+

328 

+

329 """ 

+

330 

+

331 def __init__(self, filename): 

+

332 """Simple initialization of a `FileReporter`. 

+

333 

+

334 The `filename` argument is the path to the file being reported. This 

+

335 will be available as the `.filename` attribute on the object. Other 

+

336 method implementations on this base class rely on this attribute. 

+

337 

+

338 """ 

+

339 self.filename = filename 

+

340 

+

341 def __repr__(self): 

+

342 return "<{0.__class__.__name__} filename={0.filename!r}>".format(self) 

+

343 

+

344 def relative_filename(self): 

+

345 """Get the relative file name for this file. 

+

346 

+

347 This file path will be displayed in reports. The default 

+

348 implementation will supply the actual project-relative file path. You 

+

349 only need to supply this method if you have an unusual syntax for file 

+

350 paths. 

+

351 

+

352 """ 

+

353 return files.relative_filename(self.filename) 

+

354 

+

355 @contract(returns='unicode') 

+

356 def source(self): 

+

357 """Get the source for the file. 

+

358 

+

359 Returns a Unicode string. 

+

360 

+

361 The base implementation simply reads the `self.filename` file and 

+

362 decodes it as UTF8. Override this method if your file isn't readable 

+

363 as a text file, or if you need other encoding support. 

+

364 

+

365 """ 

+

366 with open(self.filename, "rb") as f: 

+

367 return f.read().decode("utf8") 

+

368 

+

369 def lines(self): 

+

370 """Get the executable lines in this file. 

+

371 

+

372 Your plug-in must determine which lines in the file were possibly 

+

373 executable. This method returns a set of those line numbers. 

+

374 

+

375 Returns a set of line numbers. 

+

376 

+

377 """ 

+

378 _needs_to_implement(self, "lines") 

+

379 

+

380 def excluded_lines(self): 

+

381 """Get the excluded executable lines in this file. 

+

382 

+

383 Your plug-in can use any method it likes to allow the user to exclude 

+

384 executable lines from consideration. 

+

385 

+

386 Returns a set of line numbers. 

+

387 

+

388 The base implementation returns the empty set. 

+

389 

+

390 """ 

+

391 return set() 

+

392 

+

393 def translate_lines(self, lines): 

+

394 """Translate recorded lines into reported lines. 

+

395 

+

396 Some file formats will want to report lines slightly differently than 

+

397 they are recorded. For example, Python records the last line of a 

+

398 multi-line statement, but reports are nicer if they mention the first 

+

399 line. 

+

400 

+

401 Your plug-in can optionally define this method to perform these kinds 

+

402 of adjustment. 

+

403 

+

404 `lines` is a sequence of integers, the recorded line numbers. 

+

405 

+

406 Returns a set of integers, the adjusted line numbers. 

+

407 

+

408 The base implementation returns the numbers unchanged. 

+

409 

+

410 """ 

+

411 return set(lines) 

+

412 

+

413 def arcs(self): 

+

414 """Get the executable arcs in this file. 

+

415 

+

416 To support branch coverage, your plug-in needs to be able to indicate 

+

417 possible execution paths, as a set of line number pairs. Each pair is 

+

418 a `(prev, next)` pair indicating that execution can transition from the 

+

419 `prev` line number to the `next` line number. 

+

420 

+

421 Returns a set of pairs of line numbers. The default implementation 

+

422 returns an empty set. 

+

423 

+

424 """ 

+

425 return set() 

+

426 

+

427 def no_branch_lines(self): 

+

428 """Get the lines excused from branch coverage in this file. 

+

429 

+

430 Your plug-in can use any method it likes to allow the user to exclude 

+

431 lines from consideration of branch coverage. 

+

432 

+

433 Returns a set of line numbers. 

+

434 

+

435 The base implementation returns the empty set. 

+

436 

+

437 """ 

+

438 return set() 

+

439 

+

440 def translate_arcs(self, arcs): 

+

441 """Translate recorded arcs into reported arcs. 

+

442 

+

443 Similar to :meth:`translate_lines`, but for arcs. `arcs` is a set of 

+

444 line number pairs. 

+

445 

+

446 Returns a set of line number pairs. 

+

447 

+

448 The default implementation returns `arcs` unchanged. 

+

449 

+

450 """ 

+

451 return arcs 

+

452 

+

453 def exit_counts(self): 

+

454 """Get a count of exits from that each line. 

+

455 

+

456 To determine which lines are branches, coverage.py looks for lines that 

+

457 have more than one exit. This function creates a dict mapping each 

+

458 executable line number to a count of how many exits it has. 

+

459 

+

460 To be honest, this feels wrong, and should be refactored. Let me know 

+

461 if you attempt to implement this method in your plug-in... 

+

462 

+

463 """ 

+

464 return {} 

+

465 

+

466 def missing_arc_description(self, start, end, executed_arcs=None): # pylint: disable=unused-argument 

+

467 """Provide an English sentence describing a missing arc. 

+

468 

+

469 The `start` and `end` arguments are the line numbers of the missing 

+

470 arc. Negative numbers indicate entering or exiting code objects. 

+

471 

+

472 The `executed_arcs` argument is a set of line number pairs, the arcs 

+

473 that were executed in this file. 

+

474 

+

475 By default, this simply returns the string "Line {start} didn't jump 

+

476 to {end}". 

+

477 

+

478 """ 

+

479 return "Line {start} didn't jump to line {end}".format(start=start, end=end) 

+

480 

+

481 def source_token_lines(self): 

+

482 """Generate a series of tokenized lines, one for each line in `source`. 

+

483 

+

484 These tokens are used for syntax-colored reports. 

+

485 

+

486 Each line is a list of pairs, each pair is a token:: 

+

487 

+

488 [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ] 

+

489 

+

490 Each pair has a token class, and the token text. The token classes 

+

491 are: 

+

492 

+

493 * ``'com'``: a comment 

+

494 * ``'key'``: a keyword 

+

495 * ``'nam'``: a name, or identifier 

+

496 * ``'num'``: a number 

+

497 * ``'op'``: an operator 

+

498 * ``'str'``: a string literal 

+

499 * ``'ws'``: some white space 

+

500 * ``'txt'``: some other kind of text 

+

501 

+

502 If you concatenate all the token texts, and then join them with 

+

503 newlines, you should have your original source back. 

+

504 

+

505 The default implementation simply returns each line tagged as 

+

506 ``'txt'``. 

+

507 

+

508 """ 

+

509 for line in self.source().splitlines(): 

+

510 yield [('txt', line)] 

+

511 

+

512 # Annoying comparison operators. Py3k wants __lt__ etc, and Py2k needs all 

+

513 # of them defined. 

+

514 

+

515 def __eq__(self, other): 

+

516 return isinstance(other, FileReporter) and self.filename == other.filename 

+

517 

+

518 def __ne__(self, other): 

+

519 return not (self == other) 

+

520 

+

521 def __lt__(self, other): 

+

522 return self.filename < other.filename 

+

523 

+

524 def __le__(self, other): 

+

525 return self.filename <= other.filename 

+

526 

+

527 def __gt__(self, other): 

+

528 return self.filename > other.filename 

+

529 

+

530 def __ge__(self, other): 

+

531 return self.filename >= other.filename 

+

532 

+

533 __hash__ = None # This object doesn't need to be hashed. 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_plugin_support_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_plugin_support_py.html new file mode 100644 index 000000000..336a650f1 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_plugin_support_py.html @@ -0,0 +1,347 @@ + + + + + + Coverage for coverage/plugin_support.py: 51.648% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Support for plugins.""" 

+

5 

+

6import os 

+

7import os.path 

+

8import sys 

+

9 

+

10from coverage.misc import CoverageException, isolate_module 

+

11from coverage.plugin import CoveragePlugin, FileTracer, FileReporter 

+

12 

+

13os = isolate_module(os) 

+

14 

+

15 

+

16class Plugins(object): 

+

17 """The currently loaded collection of coverage.py plugins.""" 

+

18 

+

19 def __init__(self): 

+

20 self.order = [] 

+

21 self.names = {} 

+

22 self.file_tracers = [] 

+

23 self.configurers = [] 

+

24 self.context_switchers = [] 

+

25 

+

26 self.current_module = None 

+

27 self.debug = None 

+

28 

+

29 @classmethod 

+

30 def load_plugins(cls, modules, config, debug=None): 

+

31 """Load plugins from `modules`. 

+

32 

+

33 Returns a Plugins object with the loaded and configured plugins. 

+

34 

+

35 """ 

+

36 plugins = cls() 

+

37 plugins.debug = debug 

+

38 

+

39 for module in modules: 

+

40 plugins.current_module = module 

+

41 __import__(module) 

+

42 mod = sys.modules[module] 

+

43 

+

44 coverage_init = getattr(mod, "coverage_init", None) 

+

45 if not coverage_init: 

+

46 raise CoverageException( 

+

47 "Plugin module %r didn't define a coverage_init function" % module 

+

48 ) 

+

49 

+

50 options = config.get_plugin_options(module) 

+

51 coverage_init(plugins, options) 

+

52 

+

53 plugins.current_module = None 

+

54 return plugins 

+

55 

+

56 def add_file_tracer(self, plugin): 

+

57 """Add a file tracer plugin. 

+

58 

+

59 `plugin` is an instance of a third-party plugin class. It must 

+

60 implement the :meth:`CoveragePlugin.file_tracer` method. 

+

61 

+

62 """ 

+

63 self._add_plugin(plugin, self.file_tracers) 

+

64 

+

65 def add_configurer(self, plugin): 

+

66 """Add a configuring plugin. 

+

67 

+

68 `plugin` is an instance of a third-party plugin class. It must 

+

69 implement the :meth:`CoveragePlugin.configure` method. 

+

70 

+

71 """ 

+

72 self._add_plugin(plugin, self.configurers) 

+

73 

+

74 def add_dynamic_context(self, plugin): 

+

75 """Add a dynamic context plugin. 

+

76 

+

77 `plugin` is an instance of a third-party plugin class. It must 

+

78 implement the :meth:`CoveragePlugin.dynamic_context` method. 

+

79 

+

80 """ 

+

81 self._add_plugin(plugin, self.context_switchers) 

+

82 

+

83 def add_noop(self, plugin): 

+

84 """Add a plugin that does nothing. 

+

85 

+

86 This is only useful for testing the plugin support. 

+

87 

+

88 """ 

+

89 self._add_plugin(plugin, None) 

+

90 

+

91 def _add_plugin(self, plugin, specialized): 

+

92 """Add a plugin object. 

+

93 

+

94 `plugin` is a :class:`CoveragePlugin` instance to add. `specialized` 

+

95 is a list to append the plugin to. 

+

96 

+

97 """ 

+

98 plugin_name = "%s.%s" % (self.current_module, plugin.__class__.__name__) 

+

99 if self.debug and self.debug.should('plugin'): 99 ↛ 100line 99 didn't jump to line 100, because the condition on line 99 was never true

+

100 self.debug.write("Loaded plugin %r: %r" % (self.current_module, plugin)) 

+

101 labelled = LabelledDebug("plugin %r" % (self.current_module,), self.debug) 

+

102 plugin = DebugPluginWrapper(plugin, labelled) 

+

103 

+

104 # pylint: disable=attribute-defined-outside-init 

+

105 plugin._coverage_plugin_name = plugin_name 

+

106 plugin._coverage_enabled = True 

+

107 self.order.append(plugin) 

+

108 self.names[plugin_name] = plugin 

+

109 if specialized is not None: 

+

110 specialized.append(plugin) 

+

111 

+

112 def __nonzero__(self): 

+

113 return bool(self.order) 

+

114 

+

115 __bool__ = __nonzero__ 

+

116 

+

117 def __iter__(self): 

+

118 return iter(self.order) 

+

119 

+

120 def get(self, plugin_name): 

+

121 """Return a plugin by name.""" 

+

122 return self.names[plugin_name] 

+

123 

+

124 

+

125class LabelledDebug(object): 

+

126 """A Debug writer, but with labels for prepending to the messages.""" 

+

127 

+

128 def __init__(self, label, debug, prev_labels=()): 

+

129 self.labels = list(prev_labels) + [label] 

+

130 self.debug = debug 

+

131 

+

132 def add_label(self, label): 

+

133 """Add a label to the writer, and return a new `LabelledDebug`.""" 

+

134 return LabelledDebug(label, self.debug, self.labels) 

+

135 

+

136 def message_prefix(self): 

+

137 """The prefix to use on messages, combining the labels.""" 

+

138 prefixes = self.labels + [''] 

+

139 return ":\n".join(" "*i+label for i, label in enumerate(prefixes)) 

+

140 

+

141 def write(self, message): 

+

142 """Write `message`, but with the labels prepended.""" 

+

143 self.debug.write("%s%s" % (self.message_prefix(), message)) 

+

144 

+

145 

+

146class DebugPluginWrapper(CoveragePlugin): 

+

147 """Wrap a plugin, and use debug to report on what it's doing.""" 

+

148 

+

149 def __init__(self, plugin, debug): 

+

150 super(DebugPluginWrapper, self).__init__() 

+

151 self.plugin = plugin 

+

152 self.debug = debug 

+

153 

+

154 def file_tracer(self, filename): 

+

155 tracer = self.plugin.file_tracer(filename) 

+

156 self.debug.write("file_tracer(%r) --> %r" % (filename, tracer)) 

+

157 if tracer: 

+

158 debug = self.debug.add_label("file %r" % (filename,)) 

+

159 tracer = DebugFileTracerWrapper(tracer, debug) 

+

160 return tracer 

+

161 

+

162 def file_reporter(self, filename): 

+

163 reporter = self.plugin.file_reporter(filename) 

+

164 self.debug.write("file_reporter(%r) --> %r" % (filename, reporter)) 

+

165 if reporter: 

+

166 debug = self.debug.add_label("file %r" % (filename,)) 

+

167 reporter = DebugFileReporterWrapper(filename, reporter, debug) 

+

168 return reporter 

+

169 

+

170 def dynamic_context(self, frame): 

+

171 context = self.plugin.dynamic_context(frame) 

+

172 self.debug.write("dynamic_context(%r) --> %r" % (frame, context)) 

+

173 return context 

+

174 

+

175 def find_executable_files(self, src_dir): 

+

176 executable_files = self.plugin.find_executable_files(src_dir) 

+

177 self.debug.write("find_executable_files(%r) --> %r" % (src_dir, executable_files)) 

+

178 return executable_files 

+

179 

+

180 def configure(self, config): 

+

181 self.debug.write("configure(%r)" % (config,)) 

+

182 self.plugin.configure(config) 

+

183 

+

184 def sys_info(self): 

+

185 return self.plugin.sys_info() 

+

186 

+

187 

+

188class DebugFileTracerWrapper(FileTracer): 

+

189 """A debugging `FileTracer`.""" 

+

190 

+

191 def __init__(self, tracer, debug): 

+

192 self.tracer = tracer 

+

193 self.debug = debug 

+

194 

+

195 def _show_frame(self, frame): 

+

196 """A short string identifying a frame, for debug messages.""" 

+

197 return "%s@%d" % ( 

+

198 os.path.basename(frame.f_code.co_filename), 

+

199 frame.f_lineno, 

+

200 ) 

+

201 

+

202 def source_filename(self): 

+

203 sfilename = self.tracer.source_filename() 

+

204 self.debug.write("source_filename() --> %r" % (sfilename,)) 

+

205 return sfilename 

+

206 

+

207 def has_dynamic_source_filename(self): 

+

208 has = self.tracer.has_dynamic_source_filename() 

+

209 self.debug.write("has_dynamic_source_filename() --> %r" % (has,)) 

+

210 return has 

+

211 

+

212 def dynamic_source_filename(self, filename, frame): 

+

213 dyn = self.tracer.dynamic_source_filename(filename, frame) 

+

214 self.debug.write("dynamic_source_filename(%r, %s) --> %r" % ( 

+

215 filename, self._show_frame(frame), dyn, 

+

216 )) 

+

217 return dyn 

+

218 

+

219 def line_number_range(self, frame): 

+

220 pair = self.tracer.line_number_range(frame) 

+

221 self.debug.write("line_number_range(%s) --> %r" % (self._show_frame(frame), pair)) 

+

222 return pair 

+

223 

+

224 

+

225class DebugFileReporterWrapper(FileReporter): 

+

226 """A debugging `FileReporter`.""" 

+

227 

+

228 def __init__(self, filename, reporter, debug): 

+

229 super(DebugFileReporterWrapper, self).__init__(filename) 

+

230 self.reporter = reporter 

+

231 self.debug = debug 

+

232 

+

233 def relative_filename(self): 

+

234 ret = self.reporter.relative_filename() 

+

235 self.debug.write("relative_filename() --> %r" % (ret,)) 

+

236 return ret 

+

237 

+

238 def lines(self): 

+

239 ret = self.reporter.lines() 

+

240 self.debug.write("lines() --> %r" % (ret,)) 

+

241 return ret 

+

242 

+

243 def excluded_lines(self): 

+

244 ret = self.reporter.excluded_lines() 

+

245 self.debug.write("excluded_lines() --> %r" % (ret,)) 

+

246 return ret 

+

247 

+

248 def translate_lines(self, lines): 

+

249 ret = self.reporter.translate_lines(lines) 

+

250 self.debug.write("translate_lines(%r) --> %r" % (lines, ret)) 

+

251 return ret 

+

252 

+

253 def translate_arcs(self, arcs): 

+

254 ret = self.reporter.translate_arcs(arcs) 

+

255 self.debug.write("translate_arcs(%r) --> %r" % (arcs, ret)) 

+

256 return ret 

+

257 

+

258 def no_branch_lines(self): 

+

259 ret = self.reporter.no_branch_lines() 

+

260 self.debug.write("no_branch_lines() --> %r" % (ret,)) 

+

261 return ret 

+

262 

+

263 def exit_counts(self): 

+

264 ret = self.reporter.exit_counts() 

+

265 self.debug.write("exit_counts() --> %r" % (ret,)) 

+

266 return ret 

+

267 

+

268 def arcs(self): 

+

269 ret = self.reporter.arcs() 

+

270 self.debug.write("arcs() --> %r" % (ret,)) 

+

271 return ret 

+

272 

+

273 def source(self): 

+

274 ret = self.reporter.source() 

+

275 self.debug.write("source() --> %d chars" % (len(ret),)) 

+

276 return ret 

+

277 

+

278 def source_token_lines(self): 

+

279 ret = list(self.reporter.source_token_lines()) 

+

280 self.debug.write("source_token_lines() --> %d tokens" % (len(ret),)) 

+

281 return ret 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_python_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_python_py.html new file mode 100644 index 000000000..6387b736b --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_python_py.html @@ -0,0 +1,315 @@ + + + + + + Coverage for coverage/python.py: 96.629% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Python source expertise for coverage.py""" 

+

5 

+

6import os.path 

+

7import types 

+

8import zipimport 

+

9 

+

10from coverage import env, files 

+

11from coverage.misc import contract, expensive, isolate_module, join_regex 

+

12from coverage.misc import CoverageException, NoSource 

+

13from coverage.parser import PythonParser 

+

14from coverage.phystokens import source_token_lines, source_encoding 

+

15from coverage.plugin import FileReporter 

+

16 

+

17os = isolate_module(os) 

+

18 

+

19 

+

20@contract(returns='bytes') 

+

21def read_python_source(filename): 

+

22 """Read the Python source text from `filename`. 

+

23 

+

24 Returns bytes. 

+

25 

+

26 """ 

+

27 with open(filename, "rb") as f: 

+

28 source = f.read() 

+

29 

+

30 if env.IRONPYTHON: 

+

31 # IronPython reads Unicode strings even for "rb" files. 

+

32 source = bytes(source) 

+

33 

+

34 return source.replace(b"\r\n", b"\n").replace(b"\r", b"\n") 

+

35 

+

36 

+

37@contract(returns='unicode') 

+

38def get_python_source(filename): 

+

39 """Return the source code, as unicode.""" 

+

40 base, ext = os.path.splitext(filename) 

+

41 if ext == ".py" and env.WINDOWS: 

+

42 exts = [".py", ".pyw"] 

+

43 else: 

+

44 exts = [ext] 

+

45 

+

46 for ext in exts: 

+

47 try_filename = base + ext 

+

48 if os.path.exists(try_filename): 

+

49 # A regular text file: open it. 

+

50 source = read_python_source(try_filename) 

+

51 break 

+

52 

+

53 # Maybe it's in a zip file? 

+

54 source = get_zip_bytes(try_filename) 

+

55 if source is not None: 

+

56 break 

+

57 else: 

+

58 # Couldn't find source. 

+

59 exc_msg = "No source for code: '%s'.\n" % (filename,) 

+

60 exc_msg += "Aborting report output, consider using -i." 

+

61 raise NoSource(exc_msg) 

+

62 

+

63 # Replace \f because of http://bugs.python.org/issue19035 

+

64 source = source.replace(b'\f', b' ') 

+

65 source = source.decode(source_encoding(source), "replace") 

+

66 

+

67 # Python code should always end with a line with a newline. 

+

68 if source and source[-1] != '\n': 

+

69 source += '\n' 

+

70 

+

71 return source 

+

72 

+

73 

+

74@contract(returns='bytes|None') 

+

75def get_zip_bytes(filename): 

+

76 """Get data from `filename` if it is a zip file path. 

+

77 

+

78 Returns the bytestring data read from the zip file, or None if no zip file 

+

79 could be found or `filename` isn't in it. The data returned will be 

+

80 an empty string if the file is empty. 

+

81 

+

82 """ 

+

83 markers = ['.zip'+os.sep, '.egg'+os.sep, '.pex'+os.sep] 

+

84 for marker in markers: 

+

85 if marker in filename: 

+

86 parts = filename.split(marker) 

+

87 try: 

+

88 zi = zipimport.zipimporter(parts[0]+marker[:-1]) 

+

89 except zipimport.ZipImportError: 

+

90 continue 

+

91 try: 

+

92 data = zi.get_data(parts[1]) 

+

93 except IOError: 

+

94 continue 

+

95 return data 

+

96 return None 

+

97 

+

98 

+

99def source_for_file(filename): 

+

100 """Return the source filename for `filename`. 

+

101 

+

102 Given a file name being traced, return the best guess as to the source 

+

103 file to attribute it to. 

+

104 

+

105 """ 

+

106 if filename.endswith(".py"): 

+

107 # .py files are themselves source files. 

+

108 return filename 

+

109 

+

110 elif filename.endswith((".pyc", ".pyo")): 

+

111 # Bytecode files probably have source files near them. 

+

112 py_filename = filename[:-1] 

+

113 if os.path.exists(py_filename): 

+

114 # Found a .py file, use that. 

+

115 return py_filename 

+

116 if env.WINDOWS: 

+

117 # On Windows, it could be a .pyw file. 

+

118 pyw_filename = py_filename + "w" 

+

119 if os.path.exists(pyw_filename): 

+

120 return pyw_filename 

+

121 # Didn't find source, but it's probably the .py file we want. 

+

122 return py_filename 

+

123 

+

124 elif filename.endswith("$py.class"): 

+

125 # Jython is easy to guess. 

+

126 return filename[:-9] + ".py" 

+

127 

+

128 # No idea, just use the file name as-is. 

+

129 return filename 

+

130 

+

131 

+

132def source_for_morf(morf): 

+

133 """Get the source filename for the module-or-file `morf`.""" 

+

134 if hasattr(morf, '__file__') and morf.__file__: 

+

135 filename = morf.__file__ 

+

136 elif isinstance(morf, types.ModuleType): 

+

137 # A module should have had .__file__, otherwise we can't use it. 

+

138 # This could be a PEP-420 namespace package. 

+

139 raise CoverageException("Module {} has no file".format(morf)) 

+

140 else: 

+

141 filename = morf 

+

142 

+

143 filename = source_for_file(files.unicode_filename(filename)) 

+

144 return filename 

+

145 

+

146 

+

147class PythonFileReporter(FileReporter): 

+

148 """Report support for a Python file.""" 

+

149 

+

150 def __init__(self, morf, coverage=None): 

+

151 self.coverage = coverage 

+

152 

+

153 filename = source_for_morf(morf) 

+

154 

+

155 super(PythonFileReporter, self).__init__(files.canonical_filename(filename)) 

+

156 

+

157 if hasattr(morf, '__name__'): 

+

158 name = morf.__name__.replace(".", os.sep) 

+

159 if os.path.basename(filename).startswith('__init__.'): 

+

160 name += os.sep + "__init__" 

+

161 name += ".py" 

+

162 name = files.unicode_filename(name) 

+

163 else: 

+

164 name = files.relative_filename(filename) 

+

165 self.relname = name 

+

166 

+

167 self._source = None 

+

168 self._parser = None 

+

169 self._excluded = None 

+

170 

+

171 def __repr__(self): 

+

172 return "<PythonFileReporter {!r}>".format(self.filename) 

+

173 

+

174 @contract(returns='unicode') 

+

175 def relative_filename(self): 

+

176 return self.relname 

+

177 

+

178 @property 

+

179 def parser(self): 

+

180 """Lazily create a :class:`PythonParser`.""" 

+

181 if self._parser is None: 

+

182 self._parser = PythonParser( 

+

183 filename=self.filename, 

+

184 exclude=self.coverage._exclude_regex('exclude'), 

+

185 ) 

+

186 self._parser.parse_source() 

+

187 return self._parser 

+

188 

+

189 def lines(self): 

+

190 """Return the line numbers of statements in the file.""" 

+

191 return self.parser.statements 

+

192 

+

193 def excluded_lines(self): 

+

194 """Return the line numbers of statements in the file.""" 

+

195 return self.parser.excluded 

+

196 

+

197 def translate_lines(self, lines): 

+

198 return self.parser.translate_lines(lines) 

+

199 

+

200 def translate_arcs(self, arcs): 

+

201 return self.parser.translate_arcs(arcs) 

+

202 

+

203 @expensive 

+

204 def no_branch_lines(self): 

+

205 no_branch = self.parser.lines_matching( 

+

206 join_regex(self.coverage.config.partial_list), 

+

207 join_regex(self.coverage.config.partial_always_list) 

+

208 ) 

+

209 return no_branch 

+

210 

+

211 @expensive 

+

212 def arcs(self): 

+

213 return self.parser.arcs() 

+

214 

+

215 @expensive 

+

216 def exit_counts(self): 

+

217 return self.parser.exit_counts() 

+

218 

+

219 def missing_arc_description(self, start, end, executed_arcs=None): 

+

220 return self.parser.missing_arc_description(start, end, executed_arcs) 

+

221 

+

222 @contract(returns='unicode') 

+

223 def source(self): 

+

224 if self._source is None: 

+

225 self._source = get_python_source(self.filename) 

+

226 return self._source 

+

227 

+

228 def should_be_python(self): 

+

229 """Does it seem like this file should contain Python? 

+

230 

+

231 This is used to decide if a file reported as part of the execution of 

+

232 a program was really likely to have contained Python in the first 

+

233 place. 

+

234 

+

235 """ 

+

236 # Get the file extension. 

+

237 _, ext = os.path.splitext(self.filename) 

+

238 

+

239 # Anything named *.py* should be Python. 

+

240 if ext.startswith('.py'): 

+

241 return True 

+

242 # A file with no extension should be Python. 

+

243 if not ext: 243 ↛ 244line 243 didn't jump to line 244, because the condition on line 243 was never true

+

244 return True 

+

245 # Everything else is probably not Python. 

+

246 return False 

+

247 

+

248 def source_token_lines(self): 

+

249 return source_token_lines(self.source()) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_pytracer_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_pytracer_py.html new file mode 100644 index 000000000..4abe2ab86 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_pytracer_py.html @@ -0,0 +1,340 @@ + + + + + + Coverage for coverage/pytracer.py: 12.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Raw data collector for coverage.py.""" 

+

5 

+

6import atexit 

+

7import dis 

+

8import sys 

+

9 

+

10from coverage import env 

+

11 

+

12# We need the YIELD_VALUE opcode below, in a comparison-friendly form. 

+

13YIELD_VALUE = dis.opmap['YIELD_VALUE'] 

+

14if env.PY2: 

+

15 YIELD_VALUE = chr(YIELD_VALUE) 

+

16 

+

17# When running meta-coverage, this file can try to trace itself, which confuses 

+

18# everything. Don't trace ourselves. 

+

19 

+

20THIS_FILE = __file__.rstrip("co") 

+

21 

+

22 

+

23class PyTracer(object): 

+

24 """Python implementation of the raw data tracer.""" 

+

25 

+

26 # Because of poor implementations of trace-function-manipulating tools, 

+

27 # the Python trace function must be kept very simple. In particular, there 

+

28 # must be only one function ever set as the trace function, both through 

+

29 # sys.settrace, and as the return value from the trace function. Put 

+

30 # another way, the trace function must always return itself. It cannot 

+

31 # swap in other functions, or return None to avoid tracing a particular 

+

32 # frame. 

+

33 # 

+

34 # The trace manipulator that introduced this restriction is DecoratorTools, 

+

35 # which sets a trace function, and then later restores the pre-existing one 

+

36 # by calling sys.settrace with a function it found in the current frame. 

+

37 # 

+

38 # Systems that use DecoratorTools (or similar trace manipulations) must use 

+

39 # PyTracer to get accurate results. The command-line --timid argument is 

+

40 # used to force the use of this tracer. 

+

41 

+

42 def __init__(self): 

+

43 # Attributes set from the collector: 

+

44 self.data = None 

+

45 self.trace_arcs = False 

+

46 self.should_trace = None 

+

47 self.should_trace_cache = None 

+

48 self.should_start_context = None 

+

49 self.warn = None 

+

50 # The threading module to use, if any. 

+

51 self.threading = None 

+

52 

+

53 self.cur_file_dict = None 

+

54 self.last_line = 0 # int, but uninitialized. 

+

55 self.cur_file_name = None 

+

56 self.context = None 

+

57 self.started_context = False 

+

58 

+

59 self.data_stack = [] 

+

60 self.last_exc_back = None 

+

61 self.last_exc_firstlineno = 0 

+

62 self.thread = None 

+

63 self.stopped = False 

+

64 self._activity = False 

+

65 

+

66 self.in_atexit = False 

+

67 # On exit, self.in_atexit = True 

+

68 atexit.register(setattr, self, 'in_atexit', True) 

+

69 

+

70 def __repr__(self): 

+

71 return "<PyTracer at {}: {} lines in {} files>".format( 

+

72 id(self), 

+

73 sum(len(v) for v in self.data.values()), 

+

74 len(self.data), 

+

75 ) 

+

76 

+

77 def log(self, marker, *args): 

+

78 """For hard-core logging of what this tracer is doing.""" 

+

79 with open("/tmp/debug_trace.txt", "a") as f: 

+

80 f.write("{} {}[{}]".format( 

+

81 marker, 

+

82 id(self), 

+

83 len(self.data_stack), 

+

84 )) 

+

85 if 0: 

+

86 f.write(".{:x}.{:x}".format( 

+

87 self.thread.ident, 

+

88 self.threading.currentThread().ident, 

+

89 )) 

+

90 f.write(" {}".format(" ".join(map(str, args)))) 

+

91 if 0: 

+

92 f.write(" | ") 

+

93 stack = " / ".join( 

+

94 (fname or "???").rpartition("/")[-1] 

+

95 for _, fname, _, _ in self.data_stack 

+

96 ) 

+

97 f.write(stack) 

+

98 f.write("\n") 

+

99 

+

100 def _trace(self, frame, event, arg_unused): 

+

101 """The trace function passed to sys.settrace.""" 

+

102 

+

103 if THIS_FILE in frame.f_code.co_filename: 

+

104 return None 

+

105 

+

106 #self.log(":", frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name + "()", event) 

+

107 

+

108 if (self.stopped and sys.gettrace() == self._trace): # pylint: disable=comparison-with-callable 

+

109 # The PyTrace.stop() method has been called, possibly by another 

+

110 # thread, let's deactivate ourselves now. 

+

111 if 0: 

+

112 self.log("---\nX", frame.f_code.co_filename, frame.f_lineno) 

+

113 f = frame 

+

114 while f: 

+

115 self.log(">", f.f_code.co_filename, f.f_lineno, f.f_code.co_name, f.f_trace) 

+

116 f = f.f_back 

+

117 sys.settrace(None) 

+

118 self.cur_file_dict, self.cur_file_name, self.last_line, self.started_context = ( 

+

119 self.data_stack.pop() 

+

120 ) 

+

121 return None 

+

122 

+

123 if self.last_exc_back: 

+

124 if frame == self.last_exc_back: 

+

125 # Someone forgot a return event. 

+

126 if self.trace_arcs and self.cur_file_dict: 

+

127 pair = (self.last_line, -self.last_exc_firstlineno) 

+

128 self.cur_file_dict[pair] = None 

+

129 self.cur_file_dict, self.cur_file_name, self.last_line, self.started_context = ( 

+

130 self.data_stack.pop() 

+

131 ) 

+

132 self.last_exc_back = None 

+

133 

+

134 # if event != 'call' and frame.f_code.co_filename != self.cur_file_name: 

+

135 # self.log("---\n*", frame.f_code.co_filename, self.cur_file_name, frame.f_lineno) 

+

136 

+

137 if event == 'call': 

+

138 # Should we start a new context? 

+

139 if self.should_start_context and self.context is None: 

+

140 context_maybe = self.should_start_context(frame) 

+

141 if context_maybe is not None: 

+

142 self.context = context_maybe 

+

143 self.started_context = True 

+

144 self.switch_context(self.context) 

+

145 else: 

+

146 self.started_context = False 

+

147 else: 

+

148 self.started_context = False 

+

149 

+

150 # Entering a new frame. Decide if we should trace 

+

151 # in this file. 

+

152 self._activity = True 

+

153 self.data_stack.append( 

+

154 ( 

+

155 self.cur_file_dict, 

+

156 self.cur_file_name, 

+

157 self.last_line, 

+

158 self.started_context, 

+

159 ) 

+

160 ) 

+

161 filename = frame.f_code.co_filename 

+

162 self.cur_file_name = filename 

+

163 disp = self.should_trace_cache.get(filename) 

+

164 if disp is None: 

+

165 disp = self.should_trace(filename, frame) 

+

166 self.should_trace_cache[filename] = disp 

+

167 

+

168 self.cur_file_dict = None 

+

169 if disp.trace: 

+

170 tracename = disp.source_filename 

+

171 if tracename not in self.data: 

+

172 self.data[tracename] = {} 

+

173 self.cur_file_dict = self.data[tracename] 

+

174 # The call event is really a "start frame" event, and happens for 

+

175 # function calls and re-entering generators. The f_lasti field is 

+

176 # -1 for calls, and a real offset for generators. Use <0 as the 

+

177 # line number for calls, and the real line number for generators. 

+

178 if getattr(frame, 'f_lasti', -1) < 0: 

+

179 self.last_line = -frame.f_code.co_firstlineno 

+

180 else: 

+

181 self.last_line = frame.f_lineno 

+

182 elif event == 'line': 

+

183 # Record an executed line. 

+

184 if self.cur_file_dict is not None: 

+

185 lineno = frame.f_lineno 

+

186 

+

187 if self.trace_arcs: 

+

188 self.cur_file_dict[(self.last_line, lineno)] = None 

+

189 else: 

+

190 self.cur_file_dict[lineno] = None 

+

191 self.last_line = lineno 

+

192 elif event == 'return': 

+

193 if self.trace_arcs and self.cur_file_dict: 

+

194 # Record an arc leaving the function, but beware that a 

+

195 # "return" event might just mean yielding from a generator. 

+

196 # Jython seems to have an empty co_code, so just assume return. 

+

197 code = frame.f_code.co_code 

+

198 if (not code) or code[frame.f_lasti] != YIELD_VALUE: 

+

199 first = frame.f_code.co_firstlineno 

+

200 self.cur_file_dict[(self.last_line, -first)] = None 

+

201 # Leaving this function, pop the filename stack. 

+

202 self.cur_file_dict, self.cur_file_name, self.last_line, self.started_context = ( 

+

203 self.data_stack.pop() 

+

204 ) 

+

205 # Leaving a context? 

+

206 if self.started_context: 

+

207 self.context = None 

+

208 self.switch_context(None) 

+

209 elif event == 'exception': 

+

210 self.last_exc_back = frame.f_back 

+

211 self.last_exc_firstlineno = frame.f_code.co_firstlineno 

+

212 return self._trace 

+

213 

+

214 def start(self): 

+

215 """Start this Tracer. 

+

216 

+

217 Return a Python function suitable for use with sys.settrace(). 

+

218 

+

219 """ 

+

220 self.stopped = False 

+

221 if self.threading: 

+

222 if self.thread is None: 

+

223 self.thread = self.threading.currentThread() 

+

224 else: 

+

225 if self.thread.ident != self.threading.currentThread().ident: 

+

226 # Re-starting from a different thread!? Don't set the trace 

+

227 # function, but we are marked as running again, so maybe it 

+

228 # will be ok? 

+

229 #self.log("~", "starting on different threads") 

+

230 return self._trace 

+

231 

+

232 sys.settrace(self._trace) 

+

233 return self._trace 

+

234 

+

235 def stop(self): 

+

236 """Stop this Tracer.""" 

+

237 # Get the active tracer callback before setting the stop flag to be 

+

238 # able to detect if the tracer was changed prior to stopping it. 

+

239 tf = sys.gettrace() 

+

240 

+

241 # Set the stop flag. The actual call to sys.settrace(None) will happen 

+

242 # in the self._trace callback itself to make sure to call it from the 

+

243 # right thread. 

+

244 self.stopped = True 

+

245 

+

246 if self.threading and self.thread.ident != self.threading.currentThread().ident: 

+

247 # Called on a different thread than started us: we can't unhook 

+

248 # ourselves, but we've set the flag that we should stop, so we 

+

249 # won't do any more tracing. 

+

250 #self.log("~", "stopping on different threads") 

+

251 return 

+

252 

+

253 if self.warn: 

+

254 # PyPy clears the trace function before running atexit functions, 

+

255 # so don't warn if we are in atexit on PyPy and the trace function 

+

256 # has changed to None. 

+

257 dont_warn = (env.PYPY and env.PYPYVERSION >= (5, 4) and self.in_atexit and tf is None) 

+

258 if (not dont_warn) and tf != self._trace: # pylint: disable=comparison-with-callable 

+

259 self.warn( 

+

260 "Trace function changed, measurement is likely wrong: %r" % (tf,), 

+

261 slug="trace-changed", 

+

262 ) 

+

263 

+

264 def activity(self): 

+

265 """Has there been any activity?""" 

+

266 return self._activity 

+

267 

+

268 def reset_activity(self): 

+

269 """Reset the activity() flag.""" 

+

270 self._activity = False 

+

271 

+

272 def get_stats(self): 

+

273 """Return a dictionary of statistics, or None.""" 

+

274 return None 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_report_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_report_py.html new file mode 100644 index 000000000..efbc5fec1 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_report_py.html @@ -0,0 +1,154 @@ + + + + + + Coverage for coverage/report.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Reporter foundation for coverage.py.""" 

+

5import sys 

+

6 

+

7from coverage import env 

+

8from coverage.files import prep_patterns, FnmatchMatcher 

+

9from coverage.misc import CoverageException, NoSource, NotPython, ensure_dir_for_file, file_be_gone 

+

10 

+

11 

+

12def render_report(output_path, reporter, morfs): 

+

13 """Run a report generator, managing the output file. 

+

14 

+

15 This function ensures the output file is ready to be written to. Then writes 

+

16 the report to it. Then closes the file and cleans up. 

+

17 

+

18 """ 

+

19 file_to_close = None 

+

20 delete_file = False 

+

21 

+

22 if output_path == "-": 

+

23 outfile = sys.stdout 

+

24 else: 

+

25 # Ensure that the output directory is created; done here 

+

26 # because this report pre-opens the output file. 

+

27 # HTMLReport does this using the Report plumbing because 

+

28 # its task is more complex, being multiple files. 

+

29 ensure_dir_for_file(output_path) 

+

30 open_kwargs = {} 

+

31 if env.PY3: 

+

32 open_kwargs["encoding"] = "utf8" 

+

33 outfile = open(output_path, "w", **open_kwargs) 

+

34 file_to_close = outfile 

+

35 

+

36 try: 

+

37 return reporter.report(morfs, outfile=outfile) 

+

38 except CoverageException: 

+

39 delete_file = True 

+

40 raise 

+

41 finally: 

+

42 if file_to_close: 

+

43 file_to_close.close() 

+

44 if delete_file: 

+

45 file_be_gone(output_path) # pragma: part covered (doesn't return) 

+

46 

+

47 

+

48def get_analysis_to_report(coverage, morfs): 

+

49 """Get the files to report on. 

+

50 

+

51 For each morf in `morfs`, if it should be reported on (based on the omit 

+

52 and include configuration options), yield a pair, the `FileReporter` and 

+

53 `Analysis` for the morf. 

+

54 

+

55 """ 

+

56 file_reporters = coverage._get_file_reporters(morfs) 

+

57 config = coverage.config 

+

58 

+

59 if config.report_include: 

+

60 matcher = FnmatchMatcher(prep_patterns(config.report_include)) 

+

61 file_reporters = [fr for fr in file_reporters if matcher.match(fr.filename)] 

+

62 

+

63 if config.report_omit: 

+

64 matcher = FnmatchMatcher(prep_patterns(config.report_omit)) 

+

65 file_reporters = [fr for fr in file_reporters if not matcher.match(fr.filename)] 

+

66 

+

67 if not file_reporters: 

+

68 raise CoverageException("No data to report.") 

+

69 

+

70 for fr in sorted(file_reporters): 

+

71 try: 

+

72 analysis = coverage._analyze(fr) 

+

73 except NoSource: 

+

74 if not config.ignore_errors: 

+

75 raise 

+

76 except NotPython: 

+

77 # Only report errors for .py files, and only if we didn't 

+

78 # explicitly suppress those errors. 

+

79 # NotPython is only raised by PythonFileReporter, which has a 

+

80 # should_be_python() method. 

+

81 if fr.should_be_python(): 

+

82 if config.ignore_errors: 

+

83 msg = "Couldn't parse Python file '{}'".format(fr.filename) 

+

84 coverage._warn(msg, slug="couldnt-parse") 

+

85 else: 

+

86 raise 

+

87 else: 

+

88 yield (fr, analysis) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_results_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_results_py.html new file mode 100644 index 000000000..1c814637a --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_results_py.html @@ -0,0 +1,409 @@ + + + + + + Coverage for coverage/results.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Results of coverage measurement.""" 

+

5 

+

6import collections 

+

7 

+

8from coverage.backward import iitems 

+

9from coverage.debug import SimpleReprMixin 

+

10from coverage.misc import contract, CoverageException, nice_pair 

+

11 

+

12 

+

13class Analysis(object): 

+

14 """The results of analyzing a FileReporter.""" 

+

15 

+

16 def __init__(self, data, file_reporter, file_mapper): 

+

17 self.data = data 

+

18 self.file_reporter = file_reporter 

+

19 self.filename = file_mapper(self.file_reporter.filename) 

+

20 self.statements = self.file_reporter.lines() 

+

21 self.excluded = self.file_reporter.excluded_lines() 

+

22 

+

23 # Identify missing statements. 

+

24 executed = self.data.lines(self.filename) or [] 

+

25 executed = self.file_reporter.translate_lines(executed) 

+

26 self.executed = executed 

+

27 self.missing = self.statements - self.executed 

+

28 

+

29 if self.data.has_arcs(): 

+

30 self._arc_possibilities = sorted(self.file_reporter.arcs()) 

+

31 self.exit_counts = self.file_reporter.exit_counts() 

+

32 self.no_branch = self.file_reporter.no_branch_lines() 

+

33 n_branches = self._total_branches() 

+

34 mba = self.missing_branch_arcs() 

+

35 n_partial_branches = sum(len(v) for k,v in iitems(mba) if k not in self.missing) 

+

36 n_missing_branches = sum(len(v) for k,v in iitems(mba)) 

+

37 else: 

+

38 self._arc_possibilities = [] 

+

39 self.exit_counts = {} 

+

40 self.no_branch = set() 

+

41 n_branches = n_partial_branches = n_missing_branches = 0 

+

42 

+

43 self.numbers = Numbers( 

+

44 n_files=1, 

+

45 n_statements=len(self.statements), 

+

46 n_excluded=len(self.excluded), 

+

47 n_missing=len(self.missing), 

+

48 n_branches=n_branches, 

+

49 n_partial_branches=n_partial_branches, 

+

50 n_missing_branches=n_missing_branches, 

+

51 ) 

+

52 

+

53 def missing_formatted(self, branches=False): 

+

54 """The missing line numbers, formatted nicely. 

+

55 

+

56 Returns a string like "1-2, 5-11, 13-14". 

+

57 

+

58 If `branches` is true, includes the missing branch arcs also. 

+

59 

+

60 """ 

+

61 if branches and self.has_arcs(): 

+

62 arcs = iitems(self.missing_branch_arcs()) 

+

63 else: 

+

64 arcs = None 

+

65 

+

66 return format_lines(self.statements, self.missing, arcs=arcs) 

+

67 

+

68 def has_arcs(self): 

+

69 """Were arcs measured in this result?""" 

+

70 return self.data.has_arcs() 

+

71 

+

72 @contract(returns='list(tuple(int, int))') 

+

73 def arc_possibilities(self): 

+

74 """Returns a sorted list of the arcs in the code.""" 

+

75 return self._arc_possibilities 

+

76 

+

77 @contract(returns='list(tuple(int, int))') 

+

78 def arcs_executed(self): 

+

79 """Returns a sorted list of the arcs actually executed in the code.""" 

+

80 executed = self.data.arcs(self.filename) or [] 

+

81 executed = self.file_reporter.translate_arcs(executed) 

+

82 return sorted(executed) 

+

83 

+

84 @contract(returns='list(tuple(int, int))') 

+

85 def arcs_missing(self): 

+

86 """Returns a sorted list of the arcs in the code not executed.""" 

+

87 possible = self.arc_possibilities() 

+

88 executed = self.arcs_executed() 

+

89 missing = ( 

+

90 p for p in possible 

+

91 if p not in executed 

+

92 and p[0] not in self.no_branch 

+

93 ) 

+

94 return sorted(missing) 

+

95 

+

96 @contract(returns='list(tuple(int, int))') 

+

97 def arcs_unpredicted(self): 

+

98 """Returns a sorted list of the executed arcs missing from the code.""" 

+

99 possible = self.arc_possibilities() 

+

100 executed = self.arcs_executed() 

+

101 # Exclude arcs here which connect a line to itself. They can occur 

+

102 # in executed data in some cases. This is where they can cause 

+

103 # trouble, and here is where it's the least burden to remove them. 

+

104 # Also, generators can somehow cause arcs from "enter" to "exit", so 

+

105 # make sure we have at least one positive value. 

+

106 unpredicted = ( 

+

107 e for e in executed 

+

108 if e not in possible 

+

109 and e[0] != e[1] 

+

110 and (e[0] > 0 or e[1] > 0) 

+

111 ) 

+

112 return sorted(unpredicted) 

+

113 

+

114 def _branch_lines(self): 

+

115 """Returns a list of line numbers that have more than one exit.""" 

+

116 return [l1 for l1,count in iitems(self.exit_counts) if count > 1] 

+

117 

+

118 def _total_branches(self): 

+

119 """How many total branches are there?""" 

+

120 return sum(count for count in self.exit_counts.values() if count > 1) 

+

121 

+

122 @contract(returns='dict(int: list(int))') 

+

123 def missing_branch_arcs(self): 

+

124 """Return arcs that weren't executed from branch lines. 

+

125 

+

126 Returns {l1:[l2a,l2b,...], ...} 

+

127 

+

128 """ 

+

129 missing = self.arcs_missing() 

+

130 branch_lines = set(self._branch_lines()) 

+

131 mba = collections.defaultdict(list) 

+

132 for l1, l2 in missing: 

+

133 if l1 in branch_lines: 

+

134 mba[l1].append(l2) 

+

135 return mba 

+

136 

+

137 @contract(returns='dict(int: tuple(int, int))') 

+

138 def branch_stats(self): 

+

139 """Get stats about branches. 

+

140 

+

141 Returns a dict mapping line numbers to a tuple: 

+

142 (total_exits, taken_exits). 

+

143 """ 

+

144 

+

145 missing_arcs = self.missing_branch_arcs() 

+

146 stats = {} 

+

147 for lnum in self._branch_lines(): 

+

148 exits = self.exit_counts[lnum] 

+

149 missing = len(missing_arcs[lnum]) 

+

150 stats[lnum] = (exits, exits - missing) 

+

151 return stats 

+

152 

+

153 

+

154class Numbers(SimpleReprMixin): 

+

155 """The numerical results of measuring coverage. 

+

156 

+

157 This holds the basic statistics from `Analysis`, and is used to roll 

+

158 up statistics across files. 

+

159 

+

160 """ 

+

161 # A global to determine the precision on coverage percentages, the number 

+

162 # of decimal places. 

+

163 _precision = 0 

+

164 _near0 = 1.0 # These will change when _precision is changed. 

+

165 _near100 = 99.0 

+

166 

+

167 def __init__(self, n_files=0, n_statements=0, n_excluded=0, n_missing=0, 

+

168 n_branches=0, n_partial_branches=0, n_missing_branches=0 

+

169 ): 

+

170 self.n_files = n_files 

+

171 self.n_statements = n_statements 

+

172 self.n_excluded = n_excluded 

+

173 self.n_missing = n_missing 

+

174 self.n_branches = n_branches 

+

175 self.n_partial_branches = n_partial_branches 

+

176 self.n_missing_branches = n_missing_branches 

+

177 

+

178 def init_args(self): 

+

179 """Return a list for __init__(*args) to recreate this object.""" 

+

180 return [ 

+

181 self.n_files, self.n_statements, self.n_excluded, self.n_missing, 

+

182 self.n_branches, self.n_partial_branches, self.n_missing_branches, 

+

183 ] 

+

184 

+

185 @classmethod 

+

186 def set_precision(cls, precision): 

+

187 """Set the number of decimal places used to report percentages.""" 

+

188 assert 0 <= precision < 10 

+

189 cls._precision = precision 

+

190 cls._near0 = 1.0 / 10**precision 

+

191 cls._near100 = 100.0 - cls._near0 

+

192 

+

193 @property 

+

194 def n_executed(self): 

+

195 """Returns the number of executed statements.""" 

+

196 return self.n_statements - self.n_missing 

+

197 

+

198 @property 

+

199 def n_executed_branches(self): 

+

200 """Returns the number of executed branches.""" 

+

201 return self.n_branches - self.n_missing_branches 

+

202 

+

203 @property 

+

204 def pc_covered(self): 

+

205 """Returns a single percentage value for coverage.""" 

+

206 if self.n_statements > 0: 

+

207 numerator, denominator = self.ratio_covered 

+

208 pc_cov = (100.0 * numerator) / denominator 

+

209 else: 

+

210 pc_cov = 100.0 

+

211 return pc_cov 

+

212 

+

213 @property 

+

214 def pc_covered_str(self): 

+

215 """Returns the percent covered, as a string, without a percent sign. 

+

216 

+

217 Note that "0" is only returned when the value is truly zero, and "100" 

+

218 is only returned when the value is truly 100. Rounding can never 

+

219 result in either "0" or "100". 

+

220 

+

221 """ 

+

222 pc = self.pc_covered 

+

223 if 0 < pc < self._near0: 

+

224 pc = self._near0 

+

225 elif self._near100 < pc < 100: 

+

226 pc = self._near100 

+

227 else: 

+

228 pc = round(pc, self._precision) 

+

229 return "%.*f" % (self._precision, pc) 

+

230 

+

231 @classmethod 

+

232 def pc_str_width(cls): 

+

233 """How many characters wide can pc_covered_str be?""" 

+

234 width = 3 # "100" 

+

235 if cls._precision > 0: 

+

236 width += 1 + cls._precision 

+

237 return width 

+

238 

+

239 @property 

+

240 def ratio_covered(self): 

+

241 """Return a numerator and denominator for the coverage ratio.""" 

+

242 numerator = self.n_executed + self.n_executed_branches 

+

243 denominator = self.n_statements + self.n_branches 

+

244 return numerator, denominator 

+

245 

+

246 def __add__(self, other): 

+

247 nums = Numbers() 

+

248 nums.n_files = self.n_files + other.n_files 

+

249 nums.n_statements = self.n_statements + other.n_statements 

+

250 nums.n_excluded = self.n_excluded + other.n_excluded 

+

251 nums.n_missing = self.n_missing + other.n_missing 

+

252 nums.n_branches = self.n_branches + other.n_branches 

+

253 nums.n_partial_branches = ( 

+

254 self.n_partial_branches + other.n_partial_branches 

+

255 ) 

+

256 nums.n_missing_branches = ( 

+

257 self.n_missing_branches + other.n_missing_branches 

+

258 ) 

+

259 return nums 

+

260 

+

261 def __radd__(self, other): 

+

262 # Implementing 0+Numbers allows us to sum() a list of Numbers. 

+

263 if other == 0: 

+

264 return self 

+

265 return NotImplemented # pragma: not covered (we never call it this way) 

+

266 

+

267 

+

268def _line_ranges(statements, lines): 

+

269 """Produce a list of ranges for `format_lines`.""" 

+

270 statements = sorted(statements) 

+

271 lines = sorted(lines) 

+

272 

+

273 pairs = [] 

+

274 start = None 

+

275 lidx = 0 

+

276 for stmt in statements: 

+

277 if lidx >= len(lines): 

+

278 break 

+

279 if stmt == lines[lidx]: 

+

280 lidx += 1 

+

281 if not start: 

+

282 start = stmt 

+

283 end = stmt 

+

284 elif start: 

+

285 pairs.append((start, end)) 

+

286 start = None 

+

287 if start: 

+

288 pairs.append((start, end)) 

+

289 return pairs 

+

290 

+

291 

+

292def format_lines(statements, lines, arcs=None): 

+

293 """Nicely format a list of line numbers. 

+

294 

+

295 Format a list of line numbers for printing by coalescing groups of lines as 

+

296 long as the lines represent consecutive statements. This will coalesce 

+

297 even if there are gaps between statements. 

+

298 

+

299 For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and 

+

300 `lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14". 

+

301 

+

302 Both `lines` and `statements` can be any iterable. All of the elements of 

+

303 `lines` must be in `statements`, and all of the values must be positive 

+

304 integers. 

+

305 

+

306 If `arcs` is provided, they are (start,[end,end,end]) pairs that will be 

+

307 included in the output as long as start isn't in `lines`. 

+

308 

+

309 """ 

+

310 line_items = [(pair[0], nice_pair(pair)) for pair in _line_ranges(statements, lines)] 

+

311 if arcs: 

+

312 line_exits = sorted(arcs) 

+

313 for line, exits in line_exits: 

+

314 for ex in sorted(exits): 

+

315 if line not in lines and ex not in lines: 

+

316 dest = (ex if ex > 0 else "exit") 

+

317 line_items.append((line, "%d->%s" % (line, dest))) 

+

318 

+

319 ret = ', '.join(t[-1] for t in sorted(line_items)) 

+

320 return ret 

+

321 

+

322 

+

323@contract(total='number', fail_under='number', precision=int, returns=bool) 

+

324def should_fail_under(total, fail_under, precision): 

+

325 """Determine if a total should fail due to fail-under. 

+

326 

+

327 `total` is a float, the coverage measurement total. `fail_under` is the 

+

328 fail_under setting to compare with. `precision` is the number of digits 

+

329 to consider after the decimal point. 

+

330 

+

331 Returns True if the total should fail. 

+

332 

+

333 """ 

+

334 # We can never achieve higher than 100% coverage, or less than zero. 

+

335 if not (0 <= fail_under <= 100.0): 

+

336 msg = "fail_under={} is invalid. Must be between 0 and 100.".format(fail_under) 

+

337 raise CoverageException(msg) 

+

338 

+

339 # Special case for fail_under=100, it must really be 100. 

+

340 if fail_under == 100.0 and total != 100.0: 

+

341 return True 

+

342 

+

343 return round(total, precision) < fail_under 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_sqldata_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_sqldata_py.html new file mode 100644 index 000000000..5a1917def --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_sqldata_py.html @@ -0,0 +1,1189 @@ + + + + + + Coverage for coverage/sqldata.py: 93.557% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Sqlite coverage data.""" 

+

5 

+

6# TODO: factor out dataop debugging to a wrapper class? 

+

7# TODO: make sure all dataop debugging is in place somehow 

+

8 

+

9import collections 

+

10import datetime 

+

11import glob 

+

12import itertools 

+

13import os 

+

14import re 

+

15import sqlite3 

+

16import sys 

+

17import zlib 

+

18 

+

19from coverage import env 

+

20from coverage.backward import get_thread_id, iitems, to_bytes, to_string 

+

21from coverage.debug import NoDebugging, SimpleReprMixin, clipped_repr 

+

22from coverage.files import PathAliases 

+

23from coverage.misc import CoverageException, contract, file_be_gone, filename_suffix, isolate_module 

+

24from coverage.numbits import numbits_to_nums, numbits_union, nums_to_numbits 

+

25from coverage.version import __version__ 

+

26 

+

27os = isolate_module(os) 

+

28 

+

29# If you change the schema, increment the SCHEMA_VERSION, and update the 

+

30# docs in docs/dbschema.rst also. 

+

31 

+

32SCHEMA_VERSION = 7 

+

33 

+

34# Schema versions: 

+

35# 1: Released in 5.0a2 

+

36# 2: Added contexts in 5.0a3. 

+

37# 3: Replaced line table with line_map table. 

+

38# 4: Changed line_map.bitmap to line_map.numbits. 

+

39# 5: Added foreign key declarations. 

+

40# 6: Key-value in meta. 

+

41# 7: line_map -> line_bits 

+

42 

+

43SCHEMA = """\ 

+

44CREATE TABLE coverage_schema ( 

+

45 -- One row, to record the version of the schema in this db. 

+

46 version integer 

+

47); 

+

48 

+

49CREATE TABLE meta ( 

+

50 -- Key-value pairs, to record metadata about the data 

+

51 key text, 

+

52 value text, 

+

53 unique (key) 

+

54 -- Keys: 

+

55 -- 'has_arcs' boolean -- Is this data recording branches? 

+

56 -- 'sys_argv' text -- The coverage command line that recorded the data. 

+

57 -- 'version' text -- The version of coverage.py that made the file. 

+

58 -- 'when' text -- Datetime when the file was created. 

+

59); 

+

60 

+

61CREATE TABLE file ( 

+

62 -- A row per file measured. 

+

63 id integer primary key, 

+

64 path text, 

+

65 unique (path) 

+

66); 

+

67 

+

68CREATE TABLE context ( 

+

69 -- A row per context measured. 

+

70 id integer primary key, 

+

71 context text, 

+

72 unique (context) 

+

73); 

+

74 

+

75CREATE TABLE line_bits ( 

+

76 -- If recording lines, a row per context per file executed. 

+

77 -- All of the line numbers for that file/context are in one numbits. 

+

78 file_id integer, -- foreign key to `file`. 

+

79 context_id integer, -- foreign key to `context`. 

+

80 numbits blob, -- see the numbits functions in coverage.numbits 

+

81 foreign key (file_id) references file (id), 

+

82 foreign key (context_id) references context (id), 

+

83 unique (file_id, context_id) 

+

84); 

+

85 

+

86CREATE TABLE arc ( 

+

87 -- If recording branches, a row per context per from/to line transition executed. 

+

88 file_id integer, -- foreign key to `file`. 

+

89 context_id integer, -- foreign key to `context`. 

+

90 fromno integer, -- line number jumped from. 

+

91 tono integer, -- line number jumped to. 

+

92 foreign key (file_id) references file (id), 

+

93 foreign key (context_id) references context (id), 

+

94 unique (file_id, context_id, fromno, tono) 

+

95); 

+

96 

+

97CREATE TABLE tracer ( 

+

98 -- A row per file indicating the tracer used for that file. 

+

99 file_id integer primary key, 

+

100 tracer text, 

+

101 foreign key (file_id) references file (id) 

+

102); 

+

103""" 

+

104 

+

105class CoverageData(SimpleReprMixin): 

+

106 """Manages collected coverage data, including file storage. 

+

107 

+

108 This class is the public supported API to the data that coverage.py 

+

109 collects during program execution. It includes information about what code 

+

110 was executed. It does not include information from the analysis phase, to 

+

111 determine what lines could have been executed, or what lines were not 

+

112 executed. 

+

113 

+

114 .. note:: 

+

115 

+

116 The data file is currently a SQLite database file, with a 

+

117 :ref:`documented schema <dbschema>`. The schema is subject to change 

+

118 though, so be careful about querying it directly. Use this API if you 

+

119 can to isolate yourself from changes. 

+

120 

+

121 There are a number of kinds of data that can be collected: 

+

122 

+

123 * **lines**: the line numbers of source lines that were executed. 

+

124 These are always available. 

+

125 

+

126 * **arcs**: pairs of source and destination line numbers for transitions 

+

127 between source lines. These are only available if branch coverage was 

+

128 used. 

+

129 

+

130 * **file tracer names**: the module names of the file tracer plugins that 

+

131 handled each file in the data. 

+

132 

+

133 Lines, arcs, and file tracer names are stored for each source file. File 

+

134 names in this API are case-sensitive, even on platforms with 

+

135 case-insensitive file systems. 

+

136 

+

137 A data file either stores lines, or arcs, but not both. 

+

138 

+

139 A data file is associated with the data when the :class:`CoverageData` 

+

140 is created, using the parameters `basename`, `suffix`, and `no_disk`. The 

+

141 base name can be queried with :meth:`base_filename`, and the actual file 

+

142 name being used is available from :meth:`data_filename`. 

+

143 

+

144 To read an existing coverage.py data file, use :meth:`read`. You can then 

+

145 access the line, arc, or file tracer data with :meth:`lines`, :meth:`arcs`, 

+

146 or :meth:`file_tracer`. 

+

147 

+

148 The :meth:`has_arcs` method indicates whether arc data is available. You 

+

149 can get a set of the files in the data with :meth:`measured_files`. As 

+

150 with most Python containers, you can determine if there is any data at all 

+

151 by using this object as a boolean value. 

+

152 

+

153 The contexts for each line in a file can be read with 

+

154 :meth:`contexts_by_lineno`. 

+

155 

+

156 To limit querying to certain contexts, use :meth:`set_query_context` or 

+

157 :meth:`set_query_contexts`. These will narrow the focus of subsequent 

+

158 :meth:`lines`, :meth:`arcs`, and :meth:`contexts_by_lineno` calls. The set 

+

159 of all measured context names can be retrieved with 

+

160 :meth:`measured_contexts`. 

+

161 

+

162 Most data files will be created by coverage.py itself, but you can use 

+

163 methods here to create data files if you like. The :meth:`add_lines`, 

+

164 :meth:`add_arcs`, and :meth:`add_file_tracers` methods add data, in ways 

+

165 that are convenient for coverage.py. 

+

166 

+

167 To record data for contexts, use :meth:`set_context` to set a context to 

+

168 be used for subsequent :meth:`add_lines` and :meth:`add_arcs` calls. 

+

169 

+

170 To add a source file without any measured data, use :meth:`touch_file`, 

+

171 or :meth:`touch_files` for a list of such files. 

+

172 

+

173 Write the data to its file with :meth:`write`. 

+

174 

+

175 You can clear the data in memory with :meth:`erase`. Two data collections 

+

176 can be combined by using :meth:`update` on one :class:`CoverageData`, 

+

177 passing it the other. 

+

178 

+

179 Data in a :class:`CoverageData` can be serialized and deserialized with 

+

180 :meth:`dumps` and :meth:`loads`. 

+

181 

+

182 """ 

+

183 

+

184 def __init__(self, basename=None, suffix=None, no_disk=False, warn=None, debug=None): 

+

185 """Create a :class:`CoverageData` object to hold coverage-measured data. 

+

186 

+

187 Arguments: 

+

188 basename (str): the base name of the data file, defaulting to 

+

189 ".coverage". 

+

190 suffix (str or bool): has the same meaning as the `data_suffix` 

+

191 argument to :class:`coverage.Coverage`. 

+

192 no_disk (bool): if True, keep all data in memory, and don't 

+

193 write any disk file. 

+

194 warn: a warning callback function, accepting a warning message 

+

195 argument. 

+

196 debug: a `DebugControl` object (optional) 

+

197 

+

198 """ 

+

199 self._no_disk = no_disk 

+

200 self._basename = os.path.abspath(basename or ".coverage") 

+

201 self._suffix = suffix 

+

202 self._warn = warn 

+

203 self._debug = debug or NoDebugging() 

+

204 

+

205 self._choose_filename() 

+

206 self._file_map = {} 

+

207 # Maps thread ids to SqliteDb objects. 

+

208 self._dbs = {} 

+

209 self._pid = os.getpid() 

+

210 

+

211 # Are we in sync with the data file? 

+

212 self._have_used = False 

+

213 

+

214 self._has_lines = False 

+

215 self._has_arcs = False 

+

216 

+

217 self._current_context = None 

+

218 self._current_context_id = None 

+

219 self._query_context_ids = None 

+

220 

+

221 def _choose_filename(self): 

+

222 """Set self._filename based on inited attributes.""" 

+

223 if self._no_disk: 

+

224 self._filename = ":memory:" 

+

225 else: 

+

226 self._filename = self._basename 

+

227 suffix = filename_suffix(self._suffix) 

+

228 if suffix: 

+

229 self._filename += "." + suffix 

+

230 

+

231 def _reset(self): 

+

232 """Reset our attributes.""" 

+

233 if self._dbs: 

+

234 for db in self._dbs.values(): 

+

235 db.close() 

+

236 self._dbs = {} 

+

237 self._file_map = {} 

+

238 self._have_used = False 

+

239 self._current_context_id = None 

+

240 

+

241 def _create_db(self): 

+

242 """Create a db file that doesn't exist yet. 

+

243 

+

244 Initializes the schema and certain metadata. 

+

245 """ 

+

246 if self._debug.should('dataio'): 

+

247 self._debug.write("Creating data file {!r}".format(self._filename)) 

+

248 self._dbs[get_thread_id()] = db = SqliteDb(self._filename, self._debug) 

+

249 with db: 

+

250 db.executescript(SCHEMA) 

+

251 db.execute("insert into coverage_schema (version) values (?)", (SCHEMA_VERSION,)) 

+

252 db.executemany( 

+

253 "insert into meta (key, value) values (?, ?)", 

+

254 [ 

+

255 ('sys_argv', str(getattr(sys, 'argv', None))), 

+

256 ('version', __version__), 

+

257 ('when', datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')), 

+

258 ] 

+

259 ) 

+

260 

+

261 def _open_db(self): 

+

262 """Open an existing db file, and read its metadata.""" 

+

263 if self._debug.should('dataio'): 

+

264 self._debug.write("Opening data file {!r}".format(self._filename)) 

+

265 self._dbs[get_thread_id()] = SqliteDb(self._filename, self._debug) 

+

266 self._read_db() 

+

267 

+

268 def _read_db(self): 

+

269 """Read the metadata from a database so that we are ready to use it.""" 

+

270 with self._dbs[get_thread_id()] as db: 

+

271 try: 

+

272 schema_version, = db.execute_one("select version from coverage_schema") 

+

273 except Exception as exc: 

+

274 raise CoverageException( 

+

275 "Data file {!r} doesn't seem to be a coverage data file: {}".format( 

+

276 self._filename, exc 

+

277 ) 

+

278 ) 

+

279 else: 

+

280 if schema_version != SCHEMA_VERSION: 

+

281 raise CoverageException( 

+

282 "Couldn't use data file {!r}: wrong schema: {} instead of {}".format( 

+

283 self._filename, schema_version, SCHEMA_VERSION 

+

284 ) 

+

285 ) 

+

286 

+

287 for row in db.execute("select value from meta where key = 'has_arcs'"): 

+

288 self._has_arcs = bool(int(row[0])) 

+

289 self._has_lines = not self._has_arcs 

+

290 

+

291 for path, file_id in db.execute("select path, id from file"): 

+

292 self._file_map[path] = file_id 

+

293 

+

294 def _connect(self): 

+

295 """Get the SqliteDb object to use.""" 

+

296 if get_thread_id() not in self._dbs: 

+

297 if os.path.exists(self._filename): 

+

298 self._open_db() 

+

299 else: 

+

300 self._create_db() 

+

301 return self._dbs[get_thread_id()] 

+

302 

+

303 def __nonzero__(self): 

+

304 if (get_thread_id() not in self._dbs and not os.path.exists(self._filename)): 

+

305 return False 

+

306 try: 

+

307 with self._connect() as con: 

+

308 rows = con.execute("select * from file limit 1") 

+

309 return bool(list(rows)) 

+

310 except CoverageException: 

+

311 return False 

+

312 

+

313 __bool__ = __nonzero__ 

+

314 

+

315 @contract(returns='bytes') 

+

316 def dumps(self): 

+

317 """Serialize the current data to a byte string. 

+

318 

+

319 The format of the serialized data is not documented. It is only 

+

320 suitable for use with :meth:`loads` in the same version of 

+

321 coverage.py. 

+

322 

+

323 Returns: 

+

324 A byte string of serialized data. 

+

325 

+

326 .. versionadded:: 5.0 

+

327 

+

328 """ 

+

329 if self._debug.should('dataio'): 329 ↛ 330line 329 didn't jump to line 330, because the condition on line 329 was never true

+

330 self._debug.write("Dumping data from data file {!r}".format(self._filename)) 

+

331 with self._connect() as con: 

+

332 return b'z' + zlib.compress(to_bytes(con.dump())) 

+

333 

+

334 @contract(data='bytes') 

+

335 def loads(self, data): 

+

336 """Deserialize data from :meth:`dumps` 

+

337 

+

338 Use with a newly-created empty :class:`CoverageData` object. It's 

+

339 undefined what happens if the object already has data in it. 

+

340 

+

341 Arguments: 

+

342 data: A byte string of serialized data produced by :meth:`dumps`. 

+

343 

+

344 .. versionadded:: 5.0 

+

345 

+

346 """ 

+

347 if self._debug.should('dataio'): 347 ↛ 348line 347 didn't jump to line 348, because the condition on line 347 was never true

+

348 self._debug.write("Loading data into data file {!r}".format(self._filename)) 

+

349 if data[:1] != b'z': 

+

350 raise CoverageException( 

+

351 "Unrecognized serialization: {!r} (head of {} bytes)".format(data[:40], len(data)) 

+

352 ) 

+

353 script = to_string(zlib.decompress(data[1:])) 

+

354 self._dbs[get_thread_id()] = db = SqliteDb(self._filename, self._debug) 

+

355 with db: 

+

356 db.executescript(script) 

+

357 self._read_db() 

+

358 self._have_used = True 

+

359 

+

360 def _file_id(self, filename, add=False): 

+

361 """Get the file id for `filename`. 

+

362 

+

363 If filename is not in the database yet, add it if `add` is True. 

+

364 If `add` is not True, return None. 

+

365 """ 

+

366 if filename not in self._file_map: 

+

367 if add: 

+

368 with self._connect() as con: 

+

369 cur = con.execute("insert or replace into file (path) values (?)", (filename,)) 

+

370 self._file_map[filename] = cur.lastrowid 

+

371 return self._file_map.get(filename) 

+

372 

+

373 def _context_id(self, context): 

+

374 """Get the id for a context.""" 

+

375 assert context is not None 

+

376 self._start_using() 

+

377 with self._connect() as con: 

+

378 row = con.execute_one("select id from context where context = ?", (context,)) 

+

379 if row is not None: 

+

380 return row[0] 

+

381 else: 

+

382 return None 

+

383 

+

384 def set_context(self, context): 

+

385 """Set the current context for future :meth:`add_lines` etc. 

+

386 

+

387 `context` is a str, the name of the context to use for the next data 

+

388 additions. The context persists until the next :meth:`set_context`. 

+

389 

+

390 .. versionadded:: 5.0 

+

391 

+

392 """ 

+

393 if self._debug.should('dataop'): 

+

394 self._debug.write("Setting context: %r" % (context,)) 

+

395 self._current_context = context 

+

396 self._current_context_id = None 

+

397 

+

398 def _set_context_id(self): 

+

399 """Use the _current_context to set _current_context_id.""" 

+

400 context = self._current_context or "" 

+

401 context_id = self._context_id(context) 

+

402 if context_id is not None: 

+

403 self._current_context_id = context_id 

+

404 else: 

+

405 with self._connect() as con: 

+

406 cur = con.execute("insert into context (context) values (?)", (context,)) 

+

407 self._current_context_id = cur.lastrowid 

+

408 

+

409 def base_filename(self): 

+

410 """The base filename for storing data. 

+

411 

+

412 .. versionadded:: 5.0 

+

413 

+

414 """ 

+

415 return self._basename 

+

416 

+

417 def data_filename(self): 

+

418 """Where is the data stored? 

+

419 

+

420 .. versionadded:: 5.0 

+

421 

+

422 """ 

+

423 return self._filename 

+

424 

+

425 def add_lines(self, line_data): 

+

426 """Add measured line data. 

+

427 

+

428 `line_data` is a dictionary mapping file names to dictionaries:: 

+

429 

+

430 { filename: { lineno: None, ... }, ...} 

+

431 

+

432 """ 

+

433 if self._debug.should('dataop'): 

+

434 self._debug.write("Adding lines: %d files, %d lines total" % ( 

+

435 len(line_data), sum(len(lines) for lines in line_data.values()) 

+

436 )) 

+

437 self._start_using() 

+

438 self._choose_lines_or_arcs(lines=True) 

+

439 if not line_data: 

+

440 return 

+

441 with self._connect() as con: 

+

442 self._set_context_id() 

+

443 for filename, linenos in iitems(line_data): 

+

444 linemap = nums_to_numbits(linenos) 

+

445 file_id = self._file_id(filename, add=True) 

+

446 query = "select numbits from line_bits where file_id = ? and context_id = ?" 

+

447 existing = list(con.execute(query, (file_id, self._current_context_id))) 

+

448 if existing: 

+

449 linemap = numbits_union(linemap, existing[0][0]) 

+

450 

+

451 con.execute( 

+

452 "insert or replace into line_bits " 

+

453 " (file_id, context_id, numbits) values (?, ?, ?)", 

+

454 (file_id, self._current_context_id, linemap), 

+

455 ) 

+

456 

+

457 def add_arcs(self, arc_data): 

+

458 """Add measured arc data. 

+

459 

+

460 `arc_data` is a dictionary mapping file names to dictionaries:: 

+

461 

+

462 { filename: { (l1,l2): None, ... }, ...} 

+

463 

+

464 """ 

+

465 if self._debug.should('dataop'): 465 ↛ 466line 465 didn't jump to line 466, because the condition on line 465 was never true

+

466 self._debug.write("Adding arcs: %d files, %d arcs total" % ( 

+

467 len(arc_data), sum(len(arcs) for arcs in arc_data.values()) 

+

468 )) 

+

469 self._start_using() 

+

470 self._choose_lines_or_arcs(arcs=True) 

+

471 if not arc_data: 

+

472 return 

+

473 with self._connect() as con: 

+

474 self._set_context_id() 

+

475 for filename, arcs in iitems(arc_data): 

+

476 file_id = self._file_id(filename, add=True) 

+

477 data = [(file_id, self._current_context_id, fromno, tono) for fromno, tono in arcs] 

+

478 con.executemany( 

+

479 "insert or ignore into arc " 

+

480 "(file_id, context_id, fromno, tono) values (?, ?, ?, ?)", 

+

481 data, 

+

482 ) 

+

483 

+

484 def _choose_lines_or_arcs(self, lines=False, arcs=False): 

+

485 """Force the data file to choose between lines and arcs.""" 

+

486 assert lines or arcs 

+

487 assert not (lines and arcs) 

+

488 if lines and self._has_arcs: 

+

489 raise CoverageException("Can't add line measurements to existing branch data") 

+

490 if arcs and self._has_lines: 

+

491 raise CoverageException("Can't add branch measurements to existing line data") 

+

492 if not self._has_arcs and not self._has_lines: 

+

493 self._has_lines = lines 

+

494 self._has_arcs = arcs 

+

495 with self._connect() as con: 

+

496 con.execute( 

+

497 "insert into meta (key, value) values (?, ?)", 

+

498 ('has_arcs', str(int(arcs))) 

+

499 ) 

+

500 

+

501 def add_file_tracers(self, file_tracers): 

+

502 """Add per-file plugin information. 

+

503 

+

504 `file_tracers` is { filename: plugin_name, ... } 

+

505 

+

506 """ 

+

507 if self._debug.should('dataop'): 

+

508 self._debug.write("Adding file tracers: %d files" % (len(file_tracers),)) 

+

509 if not file_tracers: 

+

510 return 

+

511 self._start_using() 

+

512 with self._connect() as con: 

+

513 for filename, plugin_name in iitems(file_tracers): 

+

514 file_id = self._file_id(filename) 

+

515 if file_id is None: 

+

516 raise CoverageException( 

+

517 "Can't add file tracer data for unmeasured file '%s'" % (filename,) 

+

518 ) 

+

519 

+

520 existing_plugin = self.file_tracer(filename) 

+

521 if existing_plugin: 

+

522 if existing_plugin != plugin_name: 522 ↛ 513line 522 didn't jump to line 513, because the condition on line 522 was never false

+

523 raise CoverageException( 

+

524 "Conflicting file tracer name for '%s': %r vs %r" % ( 

+

525 filename, existing_plugin, plugin_name, 

+

526 ) 

+

527 ) 

+

528 elif plugin_name: 528 ↛ 513line 528 didn't jump to line 513, because the condition on line 528 was never false

+

529 con.execute( 

+

530 "insert into tracer (file_id, tracer) values (?, ?)", 

+

531 (file_id, plugin_name) 

+

532 ) 

+

533 

+

534 def touch_file(self, filename, plugin_name=""): 

+

535 """Ensure that `filename` appears in the data, empty if needed. 

+

536 

+

537 `plugin_name` is the name of the plugin responsible for this file. It is used 

+

538 to associate the right filereporter, etc. 

+

539 """ 

+

540 self.touch_files([filename], plugin_name) 

+

541 

+

542 def touch_files(self, filenames, plugin_name=""): 

+

543 """Ensure that `filenames` appear in the data, empty if needed. 

+

544 

+

545 `plugin_name` is the name of the plugin responsible for these files. It is used 

+

546 to associate the right filereporter, etc. 

+

547 """ 

+

548 if self._debug.should('dataop'): 548 ↛ 549line 548 didn't jump to line 549, because the condition on line 548 was never true

+

549 self._debug.write("Touching %r" % (filenames,)) 

+

550 self._start_using() 

+

551 with self._connect(): # Use this to get one transaction. 

+

552 if not self._has_arcs and not self._has_lines: 552 ↛ 553line 552 didn't jump to line 553, because the condition on line 552 was never true

+

553 raise CoverageException("Can't touch files in an empty CoverageData") 

+

554 

+

555 for filename in filenames: 

+

556 self._file_id(filename, add=True) 

+

557 if plugin_name: 

+

558 # Set the tracer for this file 

+

559 self.add_file_tracers({filename: plugin_name}) 

+

560 

+

561 def update(self, other_data, aliases=None): 

+

562 """Update this data with data from several other :class:`CoverageData` instances. 

+

563 

+

564 If `aliases` is provided, it's a `PathAliases` object that is used to 

+

565 re-map paths to match the local machine's. 

+

566 """ 

+

567 if self._debug.should('dataop'): 567 ↛ 568line 567 didn't jump to line 568, because the condition on line 567 was never true

+

568 self._debug.write("Updating with data from %r" % ( 

+

569 getattr(other_data, '_filename', '???'), 

+

570 )) 

+

571 if self._has_lines and other_data._has_arcs: 

+

572 raise CoverageException("Can't combine arc data with line data") 

+

573 if self._has_arcs and other_data._has_lines: 

+

574 raise CoverageException("Can't combine line data with arc data") 

+

575 

+

576 aliases = aliases or PathAliases() 

+

577 

+

578 # Force the database we're writing to to exist before we start nesting 

+

579 # contexts. 

+

580 self._start_using() 

+

581 

+

582 # Collector for all arcs, lines and tracers 

+

583 other_data.read() 

+

584 with other_data._connect() as conn: 

+

585 # Get files data. 

+

586 cur = conn.execute('select path from file') 

+

587 files = {path: aliases.map(path) for (path,) in cur} 

+

588 cur.close() 

+

589 

+

590 # Get contexts data. 

+

591 cur = conn.execute('select context from context') 

+

592 contexts = [context for (context,) in cur] 

+

593 cur.close() 

+

594 

+

595 # Get arc data. 

+

596 cur = conn.execute( 

+

597 'select file.path, context.context, arc.fromno, arc.tono ' 

+

598 'from arc ' 

+

599 'inner join file on file.id = arc.file_id ' 

+

600 'inner join context on context.id = arc.context_id' 

+

601 ) 

+

602 arcs = [(files[path], context, fromno, tono) for (path, context, fromno, tono) in cur] 

+

603 cur.close() 

+

604 

+

605 # Get line data. 

+

606 cur = conn.execute( 

+

607 'select file.path, context.context, line_bits.numbits ' 

+

608 'from line_bits ' 

+

609 'inner join file on file.id = line_bits.file_id ' 

+

610 'inner join context on context.id = line_bits.context_id' 

+

611 ) 

+

612 lines = { 

+

613 (files[path], context): numbits 

+

614 for (path, context, numbits) in cur 

+

615 } 

+

616 cur.close() 

+

617 

+

618 # Get tracer data. 

+

619 cur = conn.execute( 

+

620 'select file.path, tracer ' 

+

621 'from tracer ' 

+

622 'inner join file on file.id = tracer.file_id' 

+

623 ) 

+

624 tracers = {files[path]: tracer for (path, tracer) in cur} 

+

625 cur.close() 

+

626 

+

627 with self._connect() as conn: 

+

628 conn.con.isolation_level = 'IMMEDIATE' 

+

629 

+

630 # Get all tracers in the DB. Files not in the tracers are assumed 

+

631 # to have an empty string tracer. Since Sqlite does not support 

+

632 # full outer joins, we have to make two queries to fill the 

+

633 # dictionary. 

+

634 this_tracers = {path: '' for path, in conn.execute('select path from file')} 

+

635 this_tracers.update({ 

+

636 aliases.map(path): tracer 

+

637 for path, tracer in conn.execute( 

+

638 'select file.path, tracer from tracer ' 

+

639 'inner join file on file.id = tracer.file_id' 

+

640 ) 

+

641 }) 

+

642 

+

643 # Create all file and context rows in the DB. 

+

644 conn.executemany( 

+

645 'insert or ignore into file (path) values (?)', 

+

646 ((file,) for file in files.values()) 

+

647 ) 

+

648 file_ids = { 

+

649 path: id 

+

650 for id, path in conn.execute('select id, path from file') 

+

651 } 

+

652 conn.executemany( 

+

653 'insert or ignore into context (context) values (?)', 

+

654 ((context,) for context in contexts) 

+

655 ) 

+

656 context_ids = { 

+

657 context: id 

+

658 for id, context in conn.execute('select id, context from context') 

+

659 } 

+

660 

+

661 # Prepare tracers and fail, if a conflict is found. 

+

662 # tracer_paths is used to ensure consistency over the tracer data 

+

663 # and tracer_map tracks the tracers to be inserted. 

+

664 tracer_map = {} 

+

665 for path in files.values(): 

+

666 this_tracer = this_tracers.get(path) 

+

667 other_tracer = tracers.get(path, '') 

+

668 # If there is no tracer, there is always the None tracer. 

+

669 if this_tracer is not None and this_tracer != other_tracer: 

+

670 raise CoverageException( 

+

671 "Conflicting file tracer name for '%s': %r vs %r" % ( 

+

672 path, this_tracer, other_tracer 

+

673 ) 

+

674 ) 

+

675 tracer_map[path] = other_tracer 

+

676 

+

677 # Prepare arc and line rows to be inserted by converting the file 

+

678 # and context strings with integer ids. Then use the efficient 

+

679 # `executemany()` to insert all rows at once. 

+

680 arc_rows = ( 

+

681 (file_ids[file], context_ids[context], fromno, tono) 

+

682 for file, context, fromno, tono in arcs 

+

683 ) 

+

684 

+

685 # Get line data. 

+

686 cur = conn.execute( 

+

687 'select file.path, context.context, line_bits.numbits ' 

+

688 'from line_bits ' 

+

689 'inner join file on file.id = line_bits.file_id ' 

+

690 'inner join context on context.id = line_bits.context_id' 

+

691 ) 

+

692 for path, context, numbits in cur: 

+

693 key = (aliases.map(path), context) 

+

694 if key in lines: 

+

695 numbits = numbits_union(lines[key], numbits) 

+

696 lines[key] = numbits 

+

697 cur.close() 

+

698 

+

699 if arcs: 

+

700 self._choose_lines_or_arcs(arcs=True) 

+

701 

+

702 # Write the combined data. 

+

703 conn.executemany( 

+

704 'insert or ignore into arc ' 

+

705 '(file_id, context_id, fromno, tono) values (?, ?, ?, ?)', 

+

706 arc_rows 

+

707 ) 

+

708 

+

709 if lines: 

+

710 self._choose_lines_or_arcs(lines=True) 

+

711 conn.execute("delete from line_bits") 

+

712 conn.executemany( 

+

713 "insert into line_bits " 

+

714 "(file_id, context_id, numbits) values (?, ?, ?)", 

+

715 [ 

+

716 (file_ids[file], context_ids[context], numbits) 

+

717 for (file, context), numbits in lines.items() 

+

718 ] 

+

719 ) 

+

720 conn.executemany( 

+

721 'insert or ignore into tracer (file_id, tracer) values (?, ?)', 

+

722 ((file_ids[filename], tracer) for filename, tracer in tracer_map.items()) 

+

723 ) 

+

724 

+

725 # Update all internal cache data. 

+

726 self._reset() 

+

727 self.read() 

+

728 

+

729 def erase(self, parallel=False): 

+

730 """Erase the data in this object. 

+

731 

+

732 If `parallel` is true, then also deletes data files created from the 

+

733 basename by parallel-mode. 

+

734 

+

735 """ 

+

736 self._reset() 

+

737 if self._no_disk: 

+

738 return 

+

739 if self._debug.should('dataio'): 

+

740 self._debug.write("Erasing data file {!r}".format(self._filename)) 

+

741 file_be_gone(self._filename) 

+

742 if parallel: 

+

743 data_dir, local = os.path.split(self._filename) 

+

744 localdot = local + '.*' 

+

745 pattern = os.path.join(os.path.abspath(data_dir), localdot) 

+

746 for filename in glob.glob(pattern): 

+

747 if self._debug.should('dataio'): 747 ↛ 748line 747 didn't jump to line 748, because the condition on line 747 was never true

+

748 self._debug.write("Erasing parallel data file {!r}".format(filename)) 

+

749 file_be_gone(filename) 

+

750 

+

751 def read(self): 

+

752 """Start using an existing data file.""" 

+

753 with self._connect(): # TODO: doesn't look right 

+

754 self._have_used = True 

+

755 

+

756 def write(self): 

+

757 """Ensure the data is written to the data file.""" 

+

758 pass 

+

759 

+

760 def _start_using(self): 

+

761 """Call this before using the database at all.""" 

+

762 if self._pid != os.getpid(): 

+

763 # Looks like we forked! Have to start a new data file. 

+

764 self._reset() 

+

765 self._choose_filename() 

+

766 self._pid = os.getpid() 

+

767 if not self._have_used: 

+

768 self.erase() 

+

769 self._have_used = True 

+

770 

+

771 def has_arcs(self): 

+

772 """Does the database have arcs (True) or lines (False).""" 

+

773 return bool(self._has_arcs) 

+

774 

+

775 def measured_files(self): 

+

776 """A set of all files that had been measured.""" 

+

777 return set(self._file_map) 

+

778 

+

779 def measured_contexts(self): 

+

780 """A set of all contexts that have been measured. 

+

781 

+

782 .. versionadded:: 5.0 

+

783 

+

784 """ 

+

785 self._start_using() 

+

786 with self._connect() as con: 

+

787 contexts = {row[0] for row in con.execute("select distinct(context) from context")} 

+

788 return contexts 

+

789 

+

790 def file_tracer(self, filename): 

+

791 """Get the plugin name of the file tracer for a file. 

+

792 

+

793 Returns the name of the plugin that handles this file. If the file was 

+

794 measured, but didn't use a plugin, then "" is returned. If the file 

+

795 was not measured, then None is returned. 

+

796 

+

797 """ 

+

798 self._start_using() 

+

799 with self._connect() as con: 

+

800 file_id = self._file_id(filename) 

+

801 if file_id is None: 

+

802 return None 

+

803 row = con.execute_one("select tracer from tracer where file_id = ?", (file_id,)) 

+

804 if row is not None: 

+

805 return row[0] or "" 

+

806 return "" # File was measured, but no tracer associated. 

+

807 

+

808 def set_query_context(self, context): 

+

809 """Set a context for subsequent querying. 

+

810 

+

811 The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno` 

+

812 calls will be limited to only one context. `context` is a string which 

+

813 must match a context exactly. If it does not, no exception is raised, 

+

814 but queries will return no data. 

+

815 

+

816 .. versionadded:: 5.0 

+

817 

+

818 """ 

+

819 self._start_using() 

+

820 with self._connect() as con: 

+

821 cur = con.execute("select id from context where context = ?", (context,)) 

+

822 self._query_context_ids = [row[0] for row in cur.fetchall()] 

+

823 

+

824 def set_query_contexts(self, contexts): 

+

825 """Set a number of contexts for subsequent querying. 

+

826 

+

827 The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno` 

+

828 calls will be limited to the specified contexts. `contexts` is a list 

+

829 of Python regular expressions. Contexts will be matched using 

+

830 :func:`re.search <python:re.search>`. Data will be included in query 

+

831 results if they are part of any of the contexts matched. 

+

832 

+

833 .. versionadded:: 5.0 

+

834 

+

835 """ 

+

836 self._start_using() 

+

837 if contexts: 

+

838 with self._connect() as con: 

+

839 context_clause = ' or '.join(['context regexp ?'] * len(contexts)) 

+

840 cur = con.execute("select id from context where " + context_clause, contexts) 

+

841 self._query_context_ids = [row[0] for row in cur.fetchall()] 

+

842 else: 

+

843 self._query_context_ids = None 

+

844 

+

845 def lines(self, filename): 

+

846 """Get the list of lines executed for a file. 

+

847 

+

848 If the file was not measured, returns None. A file might be measured, 

+

849 and have no lines executed, in which case an empty list is returned. 

+

850 

+

851 If the file was executed, returns a list of integers, the line numbers 

+

852 executed in the file. The list is in no particular order. 

+

853 

+

854 """ 

+

855 self._start_using() 

+

856 if self.has_arcs(): 

+

857 arcs = self.arcs(filename) 

+

858 if arcs is not None: 

+

859 all_lines = itertools.chain.from_iterable(arcs) 

+

860 return list({l for l in all_lines if l > 0}) 

+

861 

+

862 with self._connect() as con: 

+

863 file_id = self._file_id(filename) 

+

864 if file_id is None: 

+

865 return None 

+

866 else: 

+

867 query = "select numbits from line_bits where file_id = ?" 

+

868 data = [file_id] 

+

869 if self._query_context_ids is not None: 

+

870 ids_array = ', '.join('?' * len(self._query_context_ids)) 

+

871 query += " and context_id in (" + ids_array + ")" 

+

872 data += self._query_context_ids 

+

873 bitmaps = list(con.execute(query, data)) 

+

874 nums = set() 

+

875 for row in bitmaps: 

+

876 nums.update(numbits_to_nums(row[0])) 

+

877 return list(nums) 

+

878 

+

879 def arcs(self, filename): 

+

880 """Get the list of arcs executed for a file. 

+

881 

+

882 If the file was not measured, returns None. A file might be measured, 

+

883 and have no arcs executed, in which case an empty list is returned. 

+

884 

+

885 If the file was executed, returns a list of 2-tuples of integers. Each 

+

886 pair is a starting line number and an ending line number for a 

+

887 transition from one line to another. The list is in no particular 

+

888 order. 

+

889 

+

890 Negative numbers have special meaning. If the starting line number is 

+

891 -N, it represents an entry to the code object that starts at line N. 

+

892 If the ending ling number is -N, it's an exit from the code object that 

+

893 starts at line N. 

+

894 

+

895 """ 

+

896 self._start_using() 

+

897 with self._connect() as con: 

+

898 file_id = self._file_id(filename) 

+

899 if file_id is None: 

+

900 return None 

+

901 else: 

+

902 query = "select distinct fromno, tono from arc where file_id = ?" 

+

903 data = [file_id] 

+

904 if self._query_context_ids is not None: 

+

905 ids_array = ', '.join('?' * len(self._query_context_ids)) 

+

906 query += " and context_id in (" + ids_array + ")" 

+

907 data += self._query_context_ids 

+

908 arcs = con.execute(query, data) 

+

909 return list(arcs) 

+

910 

+

911 def contexts_by_lineno(self, filename): 

+

912 """Get the contexts for each line in a file. 

+

913 

+

914 Returns: 

+

915 A dict mapping line numbers to a list of context names. 

+

916 

+

917 .. versionadded:: 5.0 

+

918 

+

919 """ 

+

920 lineno_contexts_map = collections.defaultdict(list) 

+

921 self._start_using() 

+

922 with self._connect() as con: 

+

923 file_id = self._file_id(filename) 

+

924 if file_id is None: 

+

925 return lineno_contexts_map 

+

926 if self.has_arcs(): 

+

927 query = ( 

+

928 "select arc.fromno, arc.tono, context.context " 

+

929 "from arc, context " 

+

930 "where arc.file_id = ? and arc.context_id = context.id" 

+

931 ) 

+

932 data = [file_id] 

+

933 if self._query_context_ids is not None: 933 ↛ 934line 933 didn't jump to line 934, because the condition on line 933 was never true

+

934 ids_array = ', '.join('?' * len(self._query_context_ids)) 

+

935 query += " and arc.context_id in (" + ids_array + ")" 

+

936 data += self._query_context_ids 

+

937 for fromno, tono, context in con.execute(query, data): 

+

938 if context not in lineno_contexts_map[fromno]: 

+

939 lineno_contexts_map[fromno].append(context) 

+

940 if context not in lineno_contexts_map[tono]: 

+

941 lineno_contexts_map[tono].append(context) 

+

942 else: 

+

943 query = ( 

+

944 "select l.numbits, c.context from line_bits l, context c " 

+

945 "where l.context_id = c.id " 

+

946 "and file_id = ?" 

+

947 ) 

+

948 data = [file_id] 

+

949 if self._query_context_ids is not None: 

+

950 ids_array = ', '.join('?' * len(self._query_context_ids)) 

+

951 query += " and l.context_id in (" + ids_array + ")" 

+

952 data += self._query_context_ids 

+

953 for numbits, context in con.execute(query, data): 

+

954 for lineno in numbits_to_nums(numbits): 

+

955 lineno_contexts_map[lineno].append(context) 

+

956 return lineno_contexts_map 

+

957 

+

958 @classmethod 

+

959 def sys_info(cls): 

+

960 """Our information for `Coverage.sys_info`. 

+

961 

+

962 Returns a list of (key, value) pairs. 

+

963 

+

964 """ 

+

965 with SqliteDb(":memory:", debug=NoDebugging()) as db: 

+

966 temp_store = [row[0] for row in db.execute("pragma temp_store")] 

+

967 compile_options = [row[0] for row in db.execute("pragma compile_options")] 

+

968 

+

969 return [ 

+

970 ('sqlite3_version', sqlite3.version), 

+

971 ('sqlite3_sqlite_version', sqlite3.sqlite_version), 

+

972 ('sqlite3_temp_store', temp_store), 

+

973 ('sqlite3_compile_options', compile_options), 

+

974 ] 

+

975 

+

976 

+

977class SqliteDb(SimpleReprMixin): 

+

978 """A simple abstraction over a SQLite database. 

+

979 

+

980 Use as a context manager, then you can use it like a 

+

981 :class:`python:sqlite3.Connection` object:: 

+

982 

+

983 with SqliteDb(filename, debug_control) as db: 

+

984 db.execute("insert into schema (version) values (?)", (SCHEMA_VERSION,)) 

+

985 

+

986 """ 

+

987 def __init__(self, filename, debug): 

+

988 self.debug = debug if debug.should('sql') else None 

+

989 self.filename = filename 

+

990 self.nest = 0 

+

991 self.con = None 

+

992 

+

993 def _connect(self): 

+

994 """Connect to the db and do universal initialization.""" 

+

995 if self.con is not None: 

+

996 return 

+

997 

+

998 # SQLite on Windows on py2 won't open a file if the filename argument 

+

999 # has non-ascii characters in it. Opening a relative file name avoids 

+

1000 # a problem if the current directory has non-ascii. 

+

1001 filename = self.filename 

+

1002 if env.WINDOWS and env.PY2: 

+

1003 try: 

+

1004 filename = os.path.relpath(self.filename) 

+

1005 except ValueError: 

+

1006 # ValueError can be raised under Windows when os.getcwd() returns a 

+

1007 # folder from a different drive than the drive of self.filename in 

+

1008 # which case we keep the original value of self.filename unchanged, 

+

1009 # hoping that we won't face the non-ascii directory problem. 

+

1010 pass 

+

1011 

+

1012 # It can happen that Python switches threads while the tracer writes 

+

1013 # data. The second thread will also try to write to the data, 

+

1014 # effectively causing a nested context. However, given the idempotent 

+

1015 # nature of the tracer operations, sharing a connection among threads 

+

1016 # is not a problem. 

+

1017 if self.debug: 1017 ↛ 1018line 1017 didn't jump to line 1018, because the condition on line 1017 was never true

+

1018 self.debug.write("Connecting to {!r}".format(self.filename)) 

+

1019 self.con = sqlite3.connect(filename, check_same_thread=False) 

+

1020 self.con.create_function('REGEXP', 2, _regexp) 

+

1021 

+

1022 # This pragma makes writing faster. It disables rollbacks, but we never need them. 

+

1023 # PyPy needs the .close() calls here, or sqlite gets twisted up: 

+

1024 # https://bitbucket.org/pypy/pypy/issues/2872/default-isolation-mode-is-different-on 

+

1025 self.execute("pragma journal_mode=off").close() 

+

1026 # This pragma makes writing faster. 

+

1027 self.execute("pragma synchronous=off").close() 

+

1028 

+

1029 def close(self): 

+

1030 """If needed, close the connection.""" 

+

1031 if self.con is not None and self.filename != ":memory:": 

+

1032 self.con.close() 

+

1033 self.con = None 

+

1034 

+

1035 def __enter__(self): 

+

1036 if self.nest == 0: 

+

1037 self._connect() 

+

1038 self.con.__enter__() 

+

1039 self.nest += 1 

+

1040 return self 

+

1041 

+

1042 def __exit__(self, exc_type, exc_value, traceback): 

+

1043 self.nest -= 1 

+

1044 if self.nest == 0: 

+

1045 try: 

+

1046 self.con.__exit__(exc_type, exc_value, traceback) 

+

1047 self.close() 

+

1048 except Exception as exc: 

+

1049 if self.debug: 

+

1050 self.debug.write("EXCEPTION from __exit__: {}".format(exc)) 

+

1051 raise 

+

1052 

+

1053 def execute(self, sql, parameters=()): 

+

1054 """Same as :meth:`python:sqlite3.Connection.execute`.""" 

+

1055 if self.debug: 1055 ↛ 1056line 1055 didn't jump to line 1056, because the condition on line 1055 was never true

+

1056 tail = " with {!r}".format(parameters) if parameters else "" 

+

1057 self.debug.write("Executing {!r}{}".format(sql, tail)) 

+

1058 try: 

+

1059 try: 

+

1060 return self.con.execute(sql, parameters) 

+

1061 except Exception: 

+

1062 # In some cases, an error might happen that isn't really an 

+

1063 # error. Try again immediately. 

+

1064 # https://github.com/nedbat/coveragepy/issues/1010 

+

1065 return self.con.execute(sql, parameters) 

+

1066 except sqlite3.Error as exc: 

+

1067 msg = str(exc) 

+

1068 try: 

+

1069 # `execute` is the first thing we do with the database, so try 

+

1070 # hard to provide useful hints if something goes wrong now. 

+

1071 with open(self.filename, "rb") as bad_file: 

+

1072 cov4_sig = b"!coverage.py: This is a private format" 

+

1073 if bad_file.read(len(cov4_sig)) == cov4_sig: 

+

1074 msg = ( 

+

1075 "Looks like a coverage 4.x data file. " 

+

1076 "Are you mixing versions of coverage?" 

+

1077 ) 

+

1078 except Exception: 

+

1079 pass 

+

1080 if self.debug: 1080 ↛ 1081line 1080 didn't jump to line 1081, because the condition on line 1080 was never true

+

1081 self.debug.write("EXCEPTION from execute: {}".format(msg)) 

+

1082 raise CoverageException("Couldn't use data file {!r}: {}".format(self.filename, msg)) 

+

1083 

+

1084 def execute_one(self, sql, parameters=()): 

+

1085 """Execute a statement and return the one row that results. 

+

1086 

+

1087 This is like execute(sql, parameters).fetchone(), except it is 

+

1088 correct in reading the entire result set. This will raise an 

+

1089 exception if more than one row results. 

+

1090 

+

1091 Returns a row, or None if there were no rows. 

+

1092 """ 

+

1093 rows = list(self.execute(sql, parameters)) 

+

1094 if len(rows) == 0: 

+

1095 return None 

+

1096 elif len(rows) == 1: 1096 ↛ 1099line 1096 didn't jump to line 1099, because the condition on line 1096 was never false

+

1097 return rows[0] 

+

1098 else: 

+

1099 raise CoverageException("Sql {!r} shouldn't return {} rows".format(sql, len(rows))) 

+

1100 

+

1101 def executemany(self, sql, data): 

+

1102 """Same as :meth:`python:sqlite3.Connection.executemany`.""" 

+

1103 if self.debug: 1103 ↛ 1104line 1103 didn't jump to line 1104, because the condition on line 1103 was never true

+

1104 data = list(data) 

+

1105 self.debug.write("Executing many {!r} with {} rows".format(sql, len(data))) 

+

1106 return self.con.executemany(sql, data) 

+

1107 

+

1108 def executescript(self, script): 

+

1109 """Same as :meth:`python:sqlite3.Connection.executescript`.""" 

+

1110 if self.debug: 1110 ↛ 1111line 1110 didn't jump to line 1111, because the condition on line 1110 was never true

+

1111 self.debug.write("Executing script with {} chars: {}".format( 

+

1112 len(script), clipped_repr(script, 100), 

+

1113 )) 

+

1114 self.con.executescript(script) 

+

1115 

+

1116 def dump(self): 

+

1117 """Return a multi-line string, the SQL dump of the database.""" 

+

1118 return "\n".join(self.con.iterdump()) 

+

1119 

+

1120 

+

1121def _regexp(text, pattern): 

+

1122 """A regexp function for SQLite.""" 

+

1123 return re.search(text, pattern) is not None 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_summary_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_summary_py.html new file mode 100644 index 000000000..300ebd5ce --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_summary_py.html @@ -0,0 +1,218 @@ + + + + + + Coverage for coverage/summary.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Summary reporting""" 

+

5 

+

6import sys 

+

7 

+

8from coverage import env 

+

9from coverage.report import get_analysis_to_report 

+

10from coverage.results import Numbers 

+

11from coverage.misc import CoverageException, output_encoding 

+

12 

+

13 

+

14class SummaryReporter(object): 

+

15 """A reporter for writing the summary report.""" 

+

16 

+

17 def __init__(self, coverage): 

+

18 self.coverage = coverage 

+

19 self.config = self.coverage.config 

+

20 self.branches = coverage.get_data().has_arcs() 

+

21 self.outfile = None 

+

22 self.fr_analysis = [] 

+

23 self.skipped_count = 0 

+

24 self.empty_count = 0 

+

25 self.total = Numbers() 

+

26 self.fmt_err = u"%s %s: %s" 

+

27 

+

28 def writeout(self, line): 

+

29 """Write a line to the output, adding a newline.""" 

+

30 if env.PY2: 

+

31 line = line.encode(output_encoding()) 

+

32 self.outfile.write(line.rstrip()) 

+

33 self.outfile.write("\n") 

+

34 

+

35 def report(self, morfs, outfile=None): 

+

36 """Writes a report summarizing coverage statistics per module. 

+

37 

+

38 `outfile` is a file object to write the summary to. It must be opened 

+

39 for native strings (bytes on Python 2, Unicode on Python 3). 

+

40 

+

41 """ 

+

42 self.outfile = outfile or sys.stdout 

+

43 

+

44 self.coverage.get_data().set_query_contexts(self.config.report_contexts) 

+

45 for fr, analysis in get_analysis_to_report(self.coverage, morfs): 

+

46 self.report_one_file(fr, analysis) 

+

47 

+

48 # Prepare the formatting strings, header, and column sorting. 

+

49 max_name = max([len(fr.relative_filename()) for (fr, analysis) in self.fr_analysis] + [5]) 

+

50 fmt_name = u"%%- %ds " % max_name 

+

51 fmt_skip_covered = u"\n%s file%s skipped due to complete coverage." 

+

52 fmt_skip_empty = u"\n%s empty file%s skipped." 

+

53 

+

54 header = (fmt_name % "Name") + u" Stmts Miss" 

+

55 fmt_coverage = fmt_name + u"%6d %6d" 

+

56 if self.branches: 

+

57 header += u" Branch BrPart" 

+

58 fmt_coverage += u" %6d %6d" 

+

59 width100 = Numbers.pc_str_width() 

+

60 header += u"%*s" % (width100+4, "Cover") 

+

61 fmt_coverage += u"%%%ds%%%%" % (width100+3,) 

+

62 if self.config.show_missing: 

+

63 header += u" Missing" 

+

64 fmt_coverage += u" %s" 

+

65 rule = u"-" * len(header) 

+

66 

+

67 column_order = dict(name=0, stmts=1, miss=2, cover=-1) 

+

68 if self.branches: 

+

69 column_order.update(dict(branch=3, brpart=4)) 

+

70 

+

71 # Write the header 

+

72 self.writeout(header) 

+

73 self.writeout(rule) 

+

74 

+

75 # `lines` is a list of pairs, (line text, line values). The line text 

+

76 # is a string that will be printed, and line values is a tuple of 

+

77 # sortable values. 

+

78 lines = [] 

+

79 

+

80 for (fr, analysis) in self.fr_analysis: 

+

81 nums = analysis.numbers 

+

82 

+

83 args = (fr.relative_filename(), nums.n_statements, nums.n_missing) 

+

84 if self.branches: 

+

85 args += (nums.n_branches, nums.n_partial_branches) 

+

86 args += (nums.pc_covered_str,) 

+

87 if self.config.show_missing: 

+

88 args += (analysis.missing_formatted(branches=True),) 

+

89 text = fmt_coverage % args 

+

90 # Add numeric percent coverage so that sorting makes sense. 

+

91 args += (nums.pc_covered,) 

+

92 lines.append((text, args)) 

+

93 

+

94 # Sort the lines and write them out. 

+

95 if getattr(self.config, 'sort', None): 

+

96 sort_option = self.config.sort.lower() 

+

97 reverse = False 

+

98 if sort_option[0] == '-': 

+

99 reverse = True 

+

100 sort_option = sort_option[1:] 

+

101 elif sort_option[0] == '+': 

+

102 sort_option = sort_option[1:] 

+

103 

+

104 position = column_order.get(sort_option) 

+

105 if position is None: 

+

106 raise CoverageException("Invalid sorting option: {!r}".format(self.config.sort)) 

+

107 lines.sort(key=lambda l: (l[1][position], l[0]), reverse=reverse) 

+

108 

+

109 for line in lines: 

+

110 self.writeout(line[0]) 

+

111 

+

112 # Write a TOTAL line if we had at least one file. 

+

113 if self.total.n_files > 0: 

+

114 self.writeout(rule) 

+

115 args = ("TOTAL", self.total.n_statements, self.total.n_missing) 

+

116 if self.branches: 

+

117 args += (self.total.n_branches, self.total.n_partial_branches) 

+

118 args += (self.total.pc_covered_str,) 

+

119 if self.config.show_missing: 

+

120 args += ("",) 

+

121 self.writeout(fmt_coverage % args) 

+

122 

+

123 # Write other final lines. 

+

124 if not self.total.n_files and not self.skipped_count: 

+

125 raise CoverageException("No data to report.") 

+

126 

+

127 if self.config.skip_covered and self.skipped_count: 

+

128 self.writeout( 

+

129 fmt_skip_covered % (self.skipped_count, 's' if self.skipped_count > 1 else '') 

+

130 ) 

+

131 if self.config.skip_empty and self.empty_count: 

+

132 self.writeout( 

+

133 fmt_skip_empty % (self.empty_count, 's' if self.empty_count > 1 else '') 

+

134 ) 

+

135 

+

136 return self.total.n_statements and self.total.pc_covered 

+

137 

+

138 def report_one_file(self, fr, analysis): 

+

139 """Report on just one file, the callback from report().""" 

+

140 nums = analysis.numbers 

+

141 self.total += nums 

+

142 

+

143 no_missing_lines = (nums.n_missing == 0) 

+

144 no_missing_branches = (nums.n_partial_branches == 0) 

+

145 if self.config.skip_covered and no_missing_lines and no_missing_branches: 

+

146 # Don't report on 100% files. 

+

147 self.skipped_count += 1 

+

148 elif self.config.skip_empty and nums.n_statements == 0: 

+

149 # Don't report on empty files. 

+

150 self.empty_count += 1 

+

151 else: 

+

152 self.fr_analysis.append((fr, analysis)) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_templite_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_templite_py.html new file mode 100644 index 000000000..6ec3a1a58 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_templite_py.html @@ -0,0 +1,368 @@ + + + + + + Coverage for coverage/templite.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""A simple Python template renderer, for a nano-subset of Django syntax. 

+

5 

+

6For a detailed discussion of this code, see this chapter from 500 Lines: 

+

7http://aosabook.org/en/500L/a-template-engine.html 

+

8 

+

9""" 

+

10 

+

11# Coincidentally named the same as http://code.activestate.com/recipes/496702/ 

+

12 

+

13import re 

+

14 

+

15from coverage import env 

+

16 

+

17 

+

18class TempliteSyntaxError(ValueError): 

+

19 """Raised when a template has a syntax error.""" 

+

20 pass 

+

21 

+

22 

+

23class TempliteValueError(ValueError): 

+

24 """Raised when an expression won't evaluate in a template.""" 

+

25 pass 

+

26 

+

27 

+

28class CodeBuilder(object): 

+

29 """Build source code conveniently.""" 

+

30 

+

31 def __init__(self, indent=0): 

+

32 self.code = [] 

+

33 self.indent_level = indent 

+

34 

+

35 def __str__(self): 

+

36 return "".join(str(c) for c in self.code) 

+

37 

+

38 def add_line(self, line): 

+

39 """Add a line of source to the code. 

+

40 

+

41 Indentation and newline will be added for you, don't provide them. 

+

42 

+

43 """ 

+

44 self.code.extend([" " * self.indent_level, line, "\n"]) 

+

45 

+

46 def add_section(self): 

+

47 """Add a section, a sub-CodeBuilder.""" 

+

48 section = CodeBuilder(self.indent_level) 

+

49 self.code.append(section) 

+

50 return section 

+

51 

+

52 INDENT_STEP = 4 # PEP8 says so! 

+

53 

+

54 def indent(self): 

+

55 """Increase the current indent for following lines.""" 

+

56 self.indent_level += self.INDENT_STEP 

+

57 

+

58 def dedent(self): 

+

59 """Decrease the current indent for following lines.""" 

+

60 self.indent_level -= self.INDENT_STEP 

+

61 

+

62 def get_globals(self): 

+

63 """Execute the code, and return a dict of globals it defines.""" 

+

64 # A check that the caller really finished all the blocks they started. 

+

65 assert self.indent_level == 0 

+

66 # Get the Python source as a single string. 

+

67 python_source = str(self) 

+

68 # Execute the source, defining globals, and return them. 

+

69 global_namespace = {} 

+

70 exec(python_source, global_namespace) 

+

71 return global_namespace 

+

72 

+

73 

+

74class Templite(object): 

+

75 """A simple template renderer, for a nano-subset of Django syntax. 

+

76 

+

77 Supported constructs are extended variable access:: 

+

78 

+

79 {{var.modifier.modifier|filter|filter}} 

+

80 

+

81 loops:: 

+

82 

+

83 {% for var in list %}...{% endfor %} 

+

84 

+

85 and ifs:: 

+

86 

+

87 {% if var %}...{% endif %} 

+

88 

+

89 Comments are within curly-hash markers:: 

+

90 

+

91 {# This will be ignored #} 

+

92 

+

93 Lines between `{% joined %}` and `{% endjoined %}` will have lines stripped 

+

94 and joined. Be careful, this could join words together! 

+

95 

+

96 Any of these constructs can have a hyphen at the end (`-}}`, `-%}`, `-#}`), 

+

97 which will collapse the whitespace following the tag. 

+

98 

+

99 Construct a Templite with the template text, then use `render` against a 

+

100 dictionary context to create a finished string:: 

+

101 

+

102 templite = Templite(''' 

+

103 <h1>Hello {{name|upper}}!</h1> 

+

104 {% for topic in topics %} 

+

105 <p>You are interested in {{topic}}.</p> 

+

106 {% endif %} 

+

107 ''', 

+

108 {'upper': str.upper}, 

+

109 ) 

+

110 text = templite.render({ 

+

111 'name': "Ned", 

+

112 'topics': ['Python', 'Geometry', 'Juggling'], 

+

113 }) 

+

114 

+

115 """ 

+

116 def __init__(self, text, *contexts): 

+

117 """Construct a Templite with the given `text`. 

+

118 

+

119 `contexts` are dictionaries of values to use for future renderings. 

+

120 These are good for filters and global values. 

+

121 

+

122 """ 

+

123 self.context = {} 

+

124 for context in contexts: 

+

125 self.context.update(context) 

+

126 

+

127 self.all_vars = set() 

+

128 self.loop_vars = set() 

+

129 

+

130 # We construct a function in source form, then compile it and hold onto 

+

131 # it, and execute it to render the template. 

+

132 code = CodeBuilder() 

+

133 

+

134 code.add_line("def render_function(context, do_dots):") 

+

135 code.indent() 

+

136 vars_code = code.add_section() 

+

137 code.add_line("result = []") 

+

138 code.add_line("append_result = result.append") 

+

139 code.add_line("extend_result = result.extend") 

+

140 if env.PY2: 

+

141 code.add_line("to_str = unicode") 

+

142 else: 

+

143 code.add_line("to_str = str") 

+

144 

+

145 buffered = [] 

+

146 

+

147 def flush_output(): 

+

148 """Force `buffered` to the code builder.""" 

+

149 if len(buffered) == 1: 

+

150 code.add_line("append_result(%s)" % buffered[0]) 

+

151 elif len(buffered) > 1: 

+

152 code.add_line("extend_result([%s])" % ", ".join(buffered)) 

+

153 del buffered[:] 

+

154 

+

155 ops_stack = [] 

+

156 

+

157 # Split the text to form a list of tokens. 

+

158 tokens = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text) 

+

159 

+

160 squash = in_joined = False 

+

161 

+

162 for token in tokens: 

+

163 if token.startswith('{'): 

+

164 start, end = 2, -2 

+

165 squash = (token[-3] == '-') 

+

166 if squash: 

+

167 end = -3 

+

168 

+

169 if token.startswith('{#'): 

+

170 # Comment: ignore it and move on. 

+

171 continue 

+

172 elif token.startswith('{{'): 

+

173 # An expression to evaluate. 

+

174 expr = self._expr_code(token[start:end].strip()) 

+

175 buffered.append("to_str(%s)" % expr) 

+

176 else: 

+

177 # token.startswith('{%') 

+

178 # Action tag: split into words and parse further. 

+

179 flush_output() 

+

180 

+

181 words = token[start:end].strip().split() 

+

182 if words[0] == 'if': 

+

183 # An if statement: evaluate the expression to determine if. 

+

184 if len(words) != 2: 

+

185 self._syntax_error("Don't understand if", token) 

+

186 ops_stack.append('if') 

+

187 code.add_line("if %s:" % self._expr_code(words[1])) 

+

188 code.indent() 

+

189 elif words[0] == 'for': 

+

190 # A loop: iterate over expression result. 

+

191 if len(words) != 4 or words[2] != 'in': 

+

192 self._syntax_error("Don't understand for", token) 

+

193 ops_stack.append('for') 

+

194 self._variable(words[1], self.loop_vars) 

+

195 code.add_line( 

+

196 "for c_%s in %s:" % ( 

+

197 words[1], 

+

198 self._expr_code(words[3]) 

+

199 ) 

+

200 ) 

+

201 code.indent() 

+

202 elif words[0] == 'joined': 

+

203 ops_stack.append('joined') 

+

204 in_joined = True 

+

205 elif words[0].startswith('end'): 

+

206 # Endsomething. Pop the ops stack. 

+

207 if len(words) != 1: 

+

208 self._syntax_error("Don't understand end", token) 

+

209 end_what = words[0][3:] 

+

210 if not ops_stack: 

+

211 self._syntax_error("Too many ends", token) 

+

212 start_what = ops_stack.pop() 

+

213 if start_what != end_what: 

+

214 self._syntax_error("Mismatched end tag", end_what) 

+

215 if end_what == 'joined': 

+

216 in_joined = False 

+

217 else: 

+

218 code.dedent() 

+

219 else: 

+

220 self._syntax_error("Don't understand tag", words[0]) 

+

221 else: 

+

222 # Literal content. If it isn't empty, output it. 

+

223 if in_joined: 

+

224 token = re.sub(r"\s*\n\s*", "", token.strip()) 

+

225 elif squash: 

+

226 token = token.lstrip() 

+

227 if token: 

+

228 buffered.append(repr(token)) 

+

229 

+

230 if ops_stack: 

+

231 self._syntax_error("Unmatched action tag", ops_stack[-1]) 

+

232 

+

233 flush_output() 

+

234 

+

235 for var_name in self.all_vars - self.loop_vars: 

+

236 vars_code.add_line("c_%s = context[%r]" % (var_name, var_name)) 

+

237 

+

238 code.add_line('return "".join(result)') 

+

239 code.dedent() 

+

240 self._render_function = code.get_globals()['render_function'] 

+

241 

+

242 def _expr_code(self, expr): 

+

243 """Generate a Python expression for `expr`.""" 

+

244 if "|" in expr: 

+

245 pipes = expr.split("|") 

+

246 code = self._expr_code(pipes[0]) 

+

247 for func in pipes[1:]: 

+

248 self._variable(func, self.all_vars) 

+

249 code = "c_%s(%s)" % (func, code) 

+

250 elif "." in expr: 

+

251 dots = expr.split(".") 

+

252 code = self._expr_code(dots[0]) 

+

253 args = ", ".join(repr(d) for d in dots[1:]) 

+

254 code = "do_dots(%s, %s)" % (code, args) 

+

255 else: 

+

256 self._variable(expr, self.all_vars) 

+

257 code = "c_%s" % expr 

+

258 return code 

+

259 

+

260 def _syntax_error(self, msg, thing): 

+

261 """Raise a syntax error using `msg`, and showing `thing`.""" 

+

262 raise TempliteSyntaxError("%s: %r" % (msg, thing)) 

+

263 

+

264 def _variable(self, name, vars_set): 

+

265 """Track that `name` is used as a variable. 

+

266 

+

267 Adds the name to `vars_set`, a set of variable names. 

+

268 

+

269 Raises an syntax error if `name` is not a valid name. 

+

270 

+

271 """ 

+

272 if not re.match(r"[_a-zA-Z][_a-zA-Z0-9]*$", name): 

+

273 self._syntax_error("Not a valid name", name) 

+

274 vars_set.add(name) 

+

275 

+

276 def render(self, context=None): 

+

277 """Render this template by applying it to `context`. 

+

278 

+

279 `context` is a dictionary of values to use in this rendering. 

+

280 

+

281 """ 

+

282 # Make the complete context we'll use. 

+

283 render_context = dict(self.context) 

+

284 if context: 

+

285 render_context.update(context) 

+

286 return self._render_function(render_context, self._do_dots) 

+

287 

+

288 def _do_dots(self, value, *dots): 

+

289 """Evaluate dotted expressions at run-time.""" 

+

290 for dot in dots: 

+

291 try: 

+

292 value = getattr(value, dot) 

+

293 except AttributeError: 

+

294 try: 

+

295 value = value[dot] 

+

296 except (TypeError, KeyError): 

+

297 raise TempliteValueError( 

+

298 "Couldn't evaluate %r.%s" % (value, dot) 

+

299 ) 

+

300 if callable(value): 

+

301 value = value() 

+

302 return value 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_tomlconfig_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_tomlconfig_py.html new file mode 100644 index 000000000..368bd0988 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_tomlconfig_py.html @@ -0,0 +1,234 @@ + + + + + + Coverage for coverage/tomlconfig.py: 95.556% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""TOML configuration support for coverage.py""" 

+

5 

+

6import io 

+

7import os 

+

8import re 

+

9 

+

10from coverage import env 

+

11from coverage.backward import configparser, path_types 

+

12from coverage.misc import CoverageException, substitute_variables 

+

13 

+

14# TOML support is an install-time extra option. 

+

15try: 

+

16 import toml 

+

17except ImportError: # pragma: not covered 

+

18 toml = None 

+

19 

+

20 

+

21class TomlDecodeError(Exception): 

+

22 """An exception class that exists even when toml isn't installed.""" 

+

23 pass 

+

24 

+

25 

+

26class TomlConfigParser: 

+

27 """TOML file reading with the interface of HandyConfigParser.""" 

+

28 

+

29 # This class has the same interface as config.HandyConfigParser, no 

+

30 # need for docstrings. 

+

31 # pylint: disable=missing-function-docstring 

+

32 

+

33 def __init__(self, our_file): 

+

34 self.our_file = our_file 

+

35 self.data = None 

+

36 

+

37 def read(self, filenames): 

+

38 # RawConfigParser takes a filename or list of filenames, but we only 

+

39 # ever call this with a single filename. 

+

40 assert isinstance(filenames, path_types) 

+

41 filename = filenames 

+

42 if env.PYVERSION >= (3, 6): 

+

43 filename = os.fspath(filename) 

+

44 

+

45 try: 

+

46 with io.open(filename, encoding='utf-8') as fp: 

+

47 toml_text = fp.read() 

+

48 except IOError: 

+

49 return [] 

+

50 if toml: 

+

51 toml_text = substitute_variables(toml_text, os.environ) 

+

52 try: 

+

53 self.data = toml.loads(toml_text) 

+

54 except toml.TomlDecodeError as err: 

+

55 raise TomlDecodeError(*err.args) 

+

56 return [filename] 

+

57 else: 

+

58 has_toml = re.search(r"^\[tool\.coverage\.", toml_text, flags=re.MULTILINE) 

+

59 if self.our_file or has_toml: 

+

60 # Looks like they meant to read TOML, but we can't read it. 

+

61 msg = "Can't read {!r} without TOML support. Install with [toml] extra" 

+

62 raise CoverageException(msg.format(filename)) 

+

63 return [] 

+

64 

+

65 def _get_section(self, section): 

+

66 """Get a section from the data. 

+

67 

+

68 Arguments: 

+

69 section (str): A section name, which can be dotted. 

+

70 

+

71 Returns: 

+

72 name (str): the actual name of the section that was found, if any, 

+

73 or None. 

+

74 data (str): the dict of data in the section, or None if not found. 

+

75 

+

76 """ 

+

77 prefixes = ["tool.coverage."] 

+

78 if self.our_file: 

+

79 prefixes.append("") 

+

80 for prefix in prefixes: 

+

81 real_section = prefix + section 

+

82 parts = real_section.split(".") 

+

83 try: 

+

84 data = self.data[parts[0]] 

+

85 for part in parts[1:]: 

+

86 data = data[part] 

+

87 except KeyError: 

+

88 continue 

+

89 break 

+

90 else: 

+

91 return None, None 

+

92 return real_section, data 

+

93 

+

94 def _get(self, section, option): 

+

95 """Like .get, but returns the real section name and the value.""" 

+

96 name, data = self._get_section(section) 

+

97 if data is None: 97 ↛ 98line 97 didn't jump to line 98, because the condition on line 97 was never true

+

98 raise configparser.NoSectionError(section) 

+

99 try: 

+

100 return name, data[option] 

+

101 except KeyError: 

+

102 raise configparser.NoOptionError(option, name) 

+

103 

+

104 def has_option(self, section, option): 

+

105 _, data = self._get_section(section) 

+

106 if data is None: 

+

107 return False 

+

108 return option in data 

+

109 

+

110 def has_section(self, section): 

+

111 name, _ = self._get_section(section) 

+

112 return name 

+

113 

+

114 def options(self, section): 

+

115 _, data = self._get_section(section) 

+

116 if data is None: 116 ↛ 117line 116 didn't jump to line 117, because the condition on line 116 was never true

+

117 raise configparser.NoSectionError(section) 

+

118 return list(data.keys()) 

+

119 

+

120 def get_section(self, section): 

+

121 _, data = self._get_section(section) 

+

122 return data 

+

123 

+

124 def get(self, section, option): 

+

125 _, value = self._get(section, option) 

+

126 return value 

+

127 

+

128 def _check_type(self, section, option, value, type_, type_desc): 

+

129 if not isinstance(value, type_): 

+

130 raise ValueError( 

+

131 'Option {!r} in section {!r} is not {}: {!r}' 

+

132 .format(option, section, type_desc, value) 

+

133 ) 

+

134 

+

135 def getboolean(self, section, option): 

+

136 name, value = self._get(section, option) 

+

137 self._check_type(name, option, value, bool, "a boolean") 

+

138 return value 

+

139 

+

140 def getlist(self, section, option): 

+

141 name, values = self._get(section, option) 

+

142 self._check_type(name, option, values, list, "a list") 

+

143 return values 

+

144 

+

145 def getregexlist(self, section, option): 

+

146 name, values = self._get(section, option) 

+

147 self._check_type(name, option, values, list, "a list") 

+

148 for value in values: 

+

149 value = value.strip() 

+

150 try: 

+

151 re.compile(value) 

+

152 except re.error as e: 

+

153 raise CoverageException( 

+

154 "Invalid [%s].%s value %r: %s" % (name, option, value, e) 

+

155 ) 

+

156 return values 

+

157 

+

158 def getint(self, section, option): 

+

159 name, value = self._get(section, option) 

+

160 self._check_type(name, option, value, int, "an integer") 

+

161 return value 

+

162 

+

163 def getfloat(self, section, option): 

+

164 name, value = self._get(section, option) 

+

165 if isinstance(value, int): 

+

166 value = float(value) 

+

167 self._check_type(name, option, value, float, "a float") 

+

168 return value 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_version_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_version_py.html new file mode 100644 index 000000000..7bbb1c940 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_version_py.html @@ -0,0 +1,99 @@ + + + + + + Coverage for coverage/version.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""The version and URL for coverage.py""" 

+

5# This file is exec'ed in setup.py, don't import anything! 

+

6 

+

7# Same semantics as sys.version_info. 

+

8version_info = (5, 5, 1, "alpha", 0) 

+

9 

+

10 

+

11def _make_version(major, minor, micro, releaselevel, serial): 

+

12 """Create a readable version string from version_info tuple components.""" 

+

13 assert releaselevel in ['alpha', 'beta', 'candidate', 'final'] 

+

14 version = "%d.%d" % (major, minor) 

+

15 if micro: 

+

16 version += ".%d" % (micro,) 

+

17 if releaselevel != 'final': 

+

18 short = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc'}[releaselevel] 

+

19 version += "%s%d" % (short, serial) 

+

20 return version 

+

21 

+

22 

+

23def _make_url(major, minor, micro, releaselevel, serial): 

+

24 """Make the URL people should start at for this version of coverage.py.""" 

+

25 url = "https://coverage.readthedocs.io" 

+

26 if releaselevel != 'final': 

+

27 # For pre-releases, use a version-specific URL. 

+

28 url += "/en/coverage-" + _make_version(major, minor, micro, releaselevel, serial) 

+

29 return url 

+

30 

+

31 

+

32__version__ = _make_version(*version_info) 

+

33__url__ = _make_url(*version_info) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/coverage_xmlreport_py.html b/reports/20210322_66173dc24d/htmlcov/coverage_xmlreport_py.html new file mode 100644 index 000000000..533b0283b --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/coverage_xmlreport_py.html @@ -0,0 +1,300 @@ + + + + + + Coverage for coverage/xmlreport.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# coding: utf-8 

+

2# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

3# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

4 

+

5"""XML reporting for coverage.py""" 

+

6 

+

7import os 

+

8import os.path 

+

9import sys 

+

10import time 

+

11import xml.dom.minidom 

+

12 

+

13from coverage import env 

+

14from coverage import __url__, __version__, files 

+

15from coverage.backward import iitems 

+

16from coverage.misc import isolate_module 

+

17from coverage.report import get_analysis_to_report 

+

18 

+

19os = isolate_module(os) 

+

20 

+

21 

+

22DTD_URL = 'https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd' 

+

23 

+

24 

+

25def rate(hit, num): 

+

26 """Return the fraction of `hit`/`num`, as a string.""" 

+

27 if num == 0: 

+

28 return "1" 

+

29 else: 

+

30 return "%.4g" % (float(hit) / num) 

+

31 

+

32 

+

33class XmlReporter(object): 

+

34 """A reporter for writing Cobertura-style XML coverage results.""" 

+

35 

+

36 def __init__(self, coverage): 

+

37 self.coverage = coverage 

+

38 self.config = self.coverage.config 

+

39 

+

40 self.source_paths = set() 

+

41 if self.config.source: 

+

42 for src in self.config.source: 

+

43 if os.path.exists(src): 

+

44 if not self.config.relative_files: 

+

45 src = files.canonical_filename(src) 

+

46 self.source_paths.add(src) 

+

47 self.packages = {} 

+

48 self.xml_out = None 

+

49 

+

50 def report(self, morfs, outfile=None): 

+

51 """Generate a Cobertura-compatible XML report for `morfs`. 

+

52 

+

53 `morfs` is a list of modules or file names. 

+

54 

+

55 `outfile` is a file object to write the XML to. 

+

56 

+

57 """ 

+

58 # Initial setup. 

+

59 outfile = outfile or sys.stdout 

+

60 has_arcs = self.coverage.get_data().has_arcs() 

+

61 

+

62 # Create the DOM that will store the data. 

+

63 impl = xml.dom.minidom.getDOMImplementation() 

+

64 self.xml_out = impl.createDocument(None, "coverage", None) 

+

65 

+

66 # Write header stuff. 

+

67 xcoverage = self.xml_out.documentElement 

+

68 xcoverage.setAttribute("version", __version__) 

+

69 xcoverage.setAttribute("timestamp", str(int(time.time()*1000))) 

+

70 xcoverage.appendChild(self.xml_out.createComment( 

+

71 " Generated by coverage.py: %s " % __url__ 

+

72 )) 

+

73 xcoverage.appendChild(self.xml_out.createComment(" Based on %s " % DTD_URL)) 

+

74 

+

75 # Call xml_file for each file in the data. 

+

76 for fr, analysis in get_analysis_to_report(self.coverage, morfs): 

+

77 self.xml_file(fr, analysis, has_arcs) 

+

78 

+

79 xsources = self.xml_out.createElement("sources") 

+

80 xcoverage.appendChild(xsources) 

+

81 

+

82 # Populate the XML DOM with the source info. 

+

83 for path in sorted(self.source_paths): 

+

84 xsource = self.xml_out.createElement("source") 

+

85 xsources.appendChild(xsource) 

+

86 txt = self.xml_out.createTextNode(path) 

+

87 xsource.appendChild(txt) 

+

88 

+

89 lnum_tot, lhits_tot = 0, 0 

+

90 bnum_tot, bhits_tot = 0, 0 

+

91 

+

92 xpackages = self.xml_out.createElement("packages") 

+

93 xcoverage.appendChild(xpackages) 

+

94 

+

95 # Populate the XML DOM with the package info. 

+

96 for pkg_name, pkg_data in sorted(iitems(self.packages)): 

+

97 class_elts, lhits, lnum, bhits, bnum = pkg_data 

+

98 xpackage = self.xml_out.createElement("package") 

+

99 xpackages.appendChild(xpackage) 

+

100 xclasses = self.xml_out.createElement("classes") 

+

101 xpackage.appendChild(xclasses) 

+

102 for _, class_elt in sorted(iitems(class_elts)): 

+

103 xclasses.appendChild(class_elt) 

+

104 xpackage.setAttribute("name", pkg_name.replace(os.sep, '.')) 

+

105 xpackage.setAttribute("line-rate", rate(lhits, lnum)) 

+

106 if has_arcs: 

+

107 branch_rate = rate(bhits, bnum) 

+

108 else: 

+

109 branch_rate = "0" 

+

110 xpackage.setAttribute("branch-rate", branch_rate) 

+

111 xpackage.setAttribute("complexity", "0") 

+

112 

+

113 lnum_tot += lnum 

+

114 lhits_tot += lhits 

+

115 bnum_tot += bnum 

+

116 bhits_tot += bhits 

+

117 

+

118 xcoverage.setAttribute("lines-valid", str(lnum_tot)) 

+

119 xcoverage.setAttribute("lines-covered", str(lhits_tot)) 

+

120 xcoverage.setAttribute("line-rate", rate(lhits_tot, lnum_tot)) 

+

121 if has_arcs: 

+

122 xcoverage.setAttribute("branches-valid", str(bnum_tot)) 

+

123 xcoverage.setAttribute("branches-covered", str(bhits_tot)) 

+

124 xcoverage.setAttribute("branch-rate", rate(bhits_tot, bnum_tot)) 

+

125 else: 

+

126 xcoverage.setAttribute("branches-covered", "0") 

+

127 xcoverage.setAttribute("branches-valid", "0") 

+

128 xcoverage.setAttribute("branch-rate", "0") 

+

129 xcoverage.setAttribute("complexity", "0") 

+

130 

+

131 # Write the output file. 

+

132 outfile.write(serialize_xml(self.xml_out)) 

+

133 

+

134 # Return the total percentage. 

+

135 denom = lnum_tot + bnum_tot 

+

136 if denom == 0: 

+

137 pct = 0.0 

+

138 else: 

+

139 pct = 100.0 * (lhits_tot + bhits_tot) / denom 

+

140 return pct 

+

141 

+

142 def xml_file(self, fr, analysis, has_arcs): 

+

143 """Add to the XML report for a single file.""" 

+

144 

+

145 if self.config.skip_empty: 

+

146 if analysis.numbers.n_statements == 0: 

+

147 return 

+

148 

+

149 # Create the 'lines' and 'package' XML elements, which 

+

150 # are populated later. Note that a package == a directory. 

+

151 filename = fr.filename.replace("\\", "/") 

+

152 for source_path in self.source_paths: 

+

153 source_path = files.canonical_filename(source_path) 

+

154 if filename.startswith(source_path.replace("\\", "/") + "/"): 

+

155 rel_name = filename[len(source_path)+1:] 

+

156 break 

+

157 else: 

+

158 rel_name = fr.relative_filename() 

+

159 self.source_paths.add(fr.filename[:-len(rel_name)].rstrip(r"\/")) 

+

160 

+

161 dirname = os.path.dirname(rel_name) or u"." 

+

162 dirname = "/".join(dirname.split("/")[:self.config.xml_package_depth]) 

+

163 package_name = dirname.replace("/", ".") 

+

164 

+

165 package = self.packages.setdefault(package_name, [{}, 0, 0, 0, 0]) 

+

166 

+

167 xclass = self.xml_out.createElement("class") 

+

168 

+

169 xclass.appendChild(self.xml_out.createElement("methods")) 

+

170 

+

171 xlines = self.xml_out.createElement("lines") 

+

172 xclass.appendChild(xlines) 

+

173 

+

174 xclass.setAttribute("name", os.path.relpath(rel_name, dirname)) 

+

175 xclass.setAttribute("filename", rel_name.replace("\\", "/")) 

+

176 xclass.setAttribute("complexity", "0") 

+

177 

+

178 branch_stats = analysis.branch_stats() 

+

179 missing_branch_arcs = analysis.missing_branch_arcs() 

+

180 

+

181 # For each statement, create an XML 'line' element. 

+

182 for line in sorted(analysis.statements): 

+

183 xline = self.xml_out.createElement("line") 

+

184 xline.setAttribute("number", str(line)) 

+

185 

+

186 # Q: can we get info about the number of times a statement is 

+

187 # executed? If so, that should be recorded here. 

+

188 xline.setAttribute("hits", str(int(line not in analysis.missing))) 

+

189 

+

190 if has_arcs: 

+

191 if line in branch_stats: 

+

192 total, taken = branch_stats[line] 

+

193 xline.setAttribute("branch", "true") 

+

194 xline.setAttribute( 

+

195 "condition-coverage", 

+

196 "%d%% (%d/%d)" % (100*taken//total, taken, total) 

+

197 ) 

+

198 if line in missing_branch_arcs: 

+

199 annlines = ["exit" if b < 0 else str(b) for b in missing_branch_arcs[line]] 

+

200 xline.setAttribute("missing-branches", ",".join(annlines)) 

+

201 xlines.appendChild(xline) 

+

202 

+

203 class_lines = len(analysis.statements) 

+

204 class_hits = class_lines - len(analysis.missing) 

+

205 

+

206 if has_arcs: 

+

207 class_branches = sum(t for t, k in branch_stats.values()) 

+

208 missing_branches = sum(t - k for t, k in branch_stats.values()) 

+

209 class_br_hits = class_branches - missing_branches 

+

210 else: 

+

211 class_branches = 0.0 

+

212 class_br_hits = 0.0 

+

213 

+

214 # Finalize the statistics that are collected in the XML DOM. 

+

215 xclass.setAttribute("line-rate", rate(class_hits, class_lines)) 

+

216 if has_arcs: 

+

217 branch_rate = rate(class_br_hits, class_branches) 

+

218 else: 

+

219 branch_rate = "0" 

+

220 xclass.setAttribute("branch-rate", branch_rate) 

+

221 

+

222 package[0][rel_name] = xclass 

+

223 package[1] += class_hits 

+

224 package[2] += class_lines 

+

225 package[3] += class_br_hits 

+

226 package[4] += class_branches 

+

227 

+

228 

+

229def serialize_xml(dom): 

+

230 """Serialize a minidom node to XML.""" 

+

231 out = dom.toprettyxml() 

+

232 if env.PY2: 

+

233 out = out.encode("utf8") 

+

234 return out 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/favicon_32.png b/reports/20210322_66173dc24d/htmlcov/favicon_32.png new file mode 100644 index 000000000..8649f0475 Binary files /dev/null and b/reports/20210322_66173dc24d/htmlcov/favicon_32.png differ diff --git a/reports/20210322_66173dc24d/htmlcov/index.html b/reports/20210322_66173dc24d/htmlcov/index.html new file mode 100644 index 000000000..27eea4d61 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/index.html @@ -0,0 +1,1029 @@ + + + + + Coverage report + + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ n + s + m + x + b + p + c   change column sorting +

+
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Modulestatementsmissingexcludedbranchespartialcoverage
Total13112488649279511395.266%
coverage/__init__.py140000100.000%
coverage/__main__.py40000100.000%
coverage/annotate.py591028296.552%
coverage/backward.py1370614199.338%
coverage/bytecode.py100060100.000%
coverage/cmdline.py311312133498.423%
coverage/collector.py211782841158.305%
coverage/config.py263001080100.000%
coverage/context.py5711036477.419%
coverage/control.py3763481481488.931%
coverage/data.py543032491.860%
coverage/debug.py111513332194.406%
coverage/disposition.py20604058.333%
coverage/env.py521012196.875%
coverage/execfile.py20324075586.691%
coverage/files.py20551090297.627%
coverage/html.py24700960100.000%
coverage/inorout.py2675121521978.520%
coverage/jsonreport.py370080100.000%
coverage/misc.py16231064596.460%
coverage/multiproc.py638014385.714%
coverage/numbits.py5500240100.000%
coverage/parser.py6613563101198.558%
coverage/phystokens.py15300640100.000%
coverage/plugin.py671022085.507%
coverage/plugin_support.py16881014151.648%
coverage/python.py1365442196.629%
coverage/pytracer.py121102254012.000%
coverage/report.py4900340100.000%
coverage/results.py18301580100.000%
coverage/sqldata.py4882602261693.557%
coverage/summary.py10000460100.000%
coverage/templite.py15100660100.000%
coverage/tomlconfig.py1114224295.556%
coverage/version.py180060100.000%
coverage/xmlreport.py15000520100.000%
tests/__init__.py10000100.000%
tests/conftest.py37108195.556%
tests/coveragetest.py194012640100.000%
tests/covmain.zip/__main__.py40000100.000%
tests/covmodzip1.py220000.000%
tests/goldtest.py7609240100.000%
tests/helpers.py11700360100.000%
tests/mixins.py6900110100.000%
tests/modules/aa/__init__.py00000100.000%
tests/modules/aa/afile.py00000100.000%
tests/modules/aa/bb/__init__.py00000100.000%
tests/modules/aa/bb/bfile.py00000100.000%
tests/modules/aa/bb/cc/__init__.py00000100.000%
tests/modules/aa/bb/cc/cfile.py00000100.000%
tests/modules/pkg1/__init__.py10000100.000%
tests/modules/pkg1/__main__.py20000100.000%
tests/modules/pkg1/runmod2.py20000100.000%
tests/modules/pkg1/sub/__init__.py00000100.000%
tests/modules/pkg1/sub/__main__.py20000100.000%
tests/modules/pkg1/sub/runmod3.py20000100.000%
tests/modules/pkg2/__init__.py00000100.000%
tests/modules/plugins/__init__.py00000100.000%
tests/modules/plugins/a_plugin.py50000100.000%
tests/modules/plugins/another.py60000100.000%
tests/modules/process_test/__init__.py00000100.000%
tests/modules/process_test/try_execfile.py441027197.183%
tests/modules/runmod1.py20000100.000%
tests/osinfo.py270440100.000%
tests/plugin1.py24802061.538%
tests/plugin2.py271004054.839%
tests/plugin_config.py110000100.000%
tests/test_annotate.py350000100.000%
tests/test_api.py66215424099.854%
tests/test_arcs.py33502700100.000%
tests/test_backward.py140000100.000%
tests/test_cmdline.py345019340100.000%
tests/test_collector.py150020100.000%
tests/test_concurrency.py228036480100.000%
tests/test_config.py3470060100.000%
tests/test_context.py17400190100.000%
tests/test_coverage.py32400220100.000%
tests/test_data.py5630080100.000%
tests/test_debug.py11200100100.000%
tests/test_execfile.py1501212298.148%
tests/test_filereporter.py730000100.000%
tests/test_files.py20900260100.000%
tests/test_html.py55600280100.000%
tests/test_json.py340000100.000%
tests/test_misc.py990020100.000%
tests/test_mixins.py500000100.000%
tests/test_numbits.py980010199.074%
tests/test_oddball.py10506280100.000%
tests/test_parser.py16000120100.000%
tests/test_phystokens.py11900120100.000%
tests/test_plugins.py39301524199.760%
tests/test_process.py6280151260100.000%
tests/test_python.py370020100.000%
tests/test_results.py690000100.000%
tests/test_setup.py290000100.000%
tests/test_summary.py41102880100.000%
tests/test_templite.py1310060100.000%
tests/test_testing.py1930200100.000%
tests/test_version.py210020100.000%
tests/test_xml.py24403260100.000%
tests/zipmods.zip/encoded_cp1252.py40020100.000%
tests/zipmods.zip/encoded_gb2312.py40020100.000%
tests/zipmods.zip/encoded_hebrew.py40020100.000%
tests/zipmods.zip/encoded_shift_jis.py40020100.000%
tests/zipmods.zip/encoded_utf8.py40020100.000%
+

+ No items found using the specified filter. +

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/jquery.ba-throttle-debounce.min.js b/reports/20210322_66173dc24d/htmlcov/jquery.ba-throttle-debounce.min.js new file mode 100644 index 000000000..648fe5d3c --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/jquery.ba-throttle-debounce.min.js @@ -0,0 +1,9 @@ +/* + * jQuery throttle / debounce - v1.1 - 3/7/2010 + * http://benalman.com/projects/jquery-throttle-debounce-plugin/ + * + * Copyright (c) 2010 "Cowboy" Ben Alman + * Dual licensed under the MIT and GPL licenses. + * http://benalman.com/about/license/ + */ +(function(b,c){var $=b.jQuery||b.Cowboy||(b.Cowboy={}),a;$.throttle=a=function(e,f,j,i){var h,d=0;if(typeof f!=="boolean"){i=j;j=f;f=c}function g(){var o=this,m=+new Date()-d,n=arguments;function l(){d=+new Date();j.apply(o,n)}function k(){h=c}if(i&&!h){l()}h&&clearTimeout(h);if(i===c&&m>e){l()}else{if(f!==true){h=setTimeout(i?k:l,i===c?e-m:e)}}}if($.guid){g.guid=j.guid=j.guid||$.guid++}return g};$.debounce=function(d,e,f){return f===c?a(d,e,false):a(d,f,e!==false)}})(this); diff --git a/reports/20210322_66173dc24d/htmlcov/jquery.hotkeys.js b/reports/20210322_66173dc24d/htmlcov/jquery.hotkeys.js new file mode 100644 index 000000000..09b21e03c --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/jquery.hotkeys.js @@ -0,0 +1,99 @@ +/* + * jQuery Hotkeys Plugin + * Copyright 2010, John Resig + * Dual licensed under the MIT or GPL Version 2 licenses. + * + * Based upon the plugin by Tzury Bar Yochay: + * http://github.com/tzuryby/hotkeys + * + * Original idea by: + * Binny V A, http://www.openjs.com/scripts/events/keyboard_shortcuts/ +*/ + +(function(jQuery){ + + jQuery.hotkeys = { + version: "0.8", + + specialKeys: { + 8: "backspace", 9: "tab", 13: "return", 16: "shift", 17: "ctrl", 18: "alt", 19: "pause", + 20: "capslock", 27: "esc", 32: "space", 33: "pageup", 34: "pagedown", 35: "end", 36: "home", + 37: "left", 38: "up", 39: "right", 40: "down", 45: "insert", 46: "del", + 96: "0", 97: "1", 98: "2", 99: "3", 100: "4", 101: "5", 102: "6", 103: "7", + 104: "8", 105: "9", 106: "*", 107: "+", 109: "-", 110: ".", 111 : "/", + 112: "f1", 113: "f2", 114: "f3", 115: "f4", 116: "f5", 117: "f6", 118: "f7", 119: "f8", + 120: "f9", 121: "f10", 122: "f11", 123: "f12", 144: "numlock", 145: "scroll", 191: "/", 224: "meta" + }, + + shiftNums: { + "`": "~", "1": "!", "2": "@", "3": "#", "4": "$", "5": "%", "6": "^", "7": "&", + "8": "*", "9": "(", "0": ")", "-": "_", "=": "+", ";": ": ", "'": "\"", ",": "<", + ".": ">", "/": "?", "\\": "|" + } + }; + + function keyHandler( handleObj ) { + // Only care when a possible input has been specified + if ( typeof handleObj.data !== "string" ) { + return; + } + + var origHandler = handleObj.handler, + keys = handleObj.data.toLowerCase().split(" "); + + handleObj.handler = function( event ) { + // Don't fire in text-accepting inputs that we didn't directly bind to + if ( this !== event.target && (/textarea|select/i.test( event.target.nodeName ) || + event.target.type === "text") ) { + return; + } + + // Keypress represents characters, not special keys + var special = event.type !== "keypress" && jQuery.hotkeys.specialKeys[ event.which ], + character = String.fromCharCode( event.which ).toLowerCase(), + key, modif = "", possible = {}; + + // check combinations (alt|ctrl|shift+anything) + if ( event.altKey && special !== "alt" ) { + modif += "alt+"; + } + + if ( event.ctrlKey && special !== "ctrl" ) { + modif += "ctrl+"; + } + + // TODO: Need to make sure this works consistently across platforms + if ( event.metaKey && !event.ctrlKey && special !== "meta" ) { + modif += "meta+"; + } + + if ( event.shiftKey && special !== "shift" ) { + modif += "shift+"; + } + + if ( special ) { + possible[ modif + special ] = true; + + } else { + possible[ modif + character ] = true; + possible[ modif + jQuery.hotkeys.shiftNums[ character ] ] = true; + + // "$" can be triggered as "Shift+4" or "Shift+$" or just "$" + if ( modif === "shift+" ) { + possible[ jQuery.hotkeys.shiftNums[ character ] ] = true; + } + } + + for ( var i = 0, l = keys.length; i < l; i++ ) { + if ( possible[ keys[i] ] ) { + return origHandler.apply( this, arguments ); + } + } + }; + } + + jQuery.each([ "keydown", "keyup", "keypress" ], function() { + jQuery.event.special[ this ] = { add: keyHandler }; + }); + +})( jQuery ); diff --git a/reports/20210322_66173dc24d/htmlcov/jquery.isonscreen.js b/reports/20210322_66173dc24d/htmlcov/jquery.isonscreen.js new file mode 100644 index 000000000..0182ebd21 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/jquery.isonscreen.js @@ -0,0 +1,53 @@ +/* Copyright (c) 2010 + * @author Laurence Wheway + * Dual licensed under the MIT (http://www.opensource.org/licenses/mit-license.php) + * and GPL (http://www.opensource.org/licenses/gpl-license.php) licenses. + * + * @version 1.2.0 + */ +(function($) { + jQuery.extend({ + isOnScreen: function(box, container) { + //ensure numbers come in as intgers (not strings) and remove 'px' is it's there + for(var i in box){box[i] = parseFloat(box[i])}; + for(var i in container){container[i] = parseFloat(container[i])}; + + if(!container){ + container = { + left: $(window).scrollLeft(), + top: $(window).scrollTop(), + width: $(window).width(), + height: $(window).height() + } + } + + if( box.left+box.width-container.left > 0 && + box.left < container.width+container.left && + box.top+box.height-container.top > 0 && + box.top < container.height+container.top + ) return true; + return false; + } + }) + + + jQuery.fn.isOnScreen = function (container) { + for(var i in container){container[i] = parseFloat(container[i])}; + + if(!container){ + container = { + left: $(window).scrollLeft(), + top: $(window).scrollTop(), + width: $(window).width(), + height: $(window).height() + } + } + + if( $(this).offset().left+$(this).width()-container.left > 0 && + $(this).offset().left < container.width+container.left && + $(this).offset().top+$(this).height()-container.top > 0 && + $(this).offset().top < container.height+container.top + ) return true; + return false; + } +})(jQuery); diff --git a/reports/20210322_66173dc24d/htmlcov/jquery.min.js b/reports/20210322_66173dc24d/htmlcov/jquery.min.js new file mode 100644 index 000000000..d2906fc9e --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/jquery.min.js @@ -0,0 +1,5 @@ +(function(global,factory){"use strict";if(typeof module==="object"&&typeof module.exports==="object"){module.exports=global.document?factory(global,true):function(w){if(!w.document){throw new Error("jQuery requires a window with a document")}return factory(w)}}else{factory(global)}})(typeof window!=="undefined"?window:this,function(window,noGlobal){var arr=[];var document=window.document;var getProto=Object.getPrototypeOf;var slice=arr.slice;var concat=arr.concat;var push=arr.push;var indexOf=arr.indexOf;var class2type={};var toString=class2type.toString;var hasOwn=class2type.hasOwnProperty;var fnToString=hasOwn.toString;var ObjectFunctionString=fnToString.call(Object);var support={};var isFunction=function isFunction(obj){return typeof obj==="function"&&typeof obj.nodeType!=="number"};var isWindow=function isWindow(obj){return obj!=null&&obj===obj.window};var preservedScriptAttributes={type:true,src:true,noModule:true};function DOMEval(code,doc,node){doc=doc||document;var i,script=doc.createElement("script");script.text=code;if(node){for(i in preservedScriptAttributes){if(node[i]){script[i]=node[i]}}}doc.head.appendChild(script).parentNode.removeChild(script)}function toType(obj){if(obj==null){return obj+""}return typeof obj==="object"||typeof obj==="function"?class2type[toString.call(obj)]||"object":typeof obj}var version="3.3.1",jQuery=function(selector,context){return new jQuery.fn.init(selector,context)},rtrim=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;jQuery.fn=jQuery.prototype={jquery:version,constructor:jQuery,length:0,toArray:function(){return slice.call(this)},get:function(num){if(num==null){return slice.call(this)}return num<0?this[num+this.length]:this[num]},pushStack:function(elems){var ret=jQuery.merge(this.constructor(),elems);ret.prevObject=this;return ret},each:function(callback){return jQuery.each(this,callback)},map:function(callback){return this.pushStack(jQuery.map(this,function(elem,i){return callback.call(elem,i,elem)}))},slice:function(){return this.pushStack(slice.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(i){var len=this.length,j=+i+(i<0?len:0);return this.pushStack(j>=0&&j0&&length-1 in obj}var Sizzle=function(window){var i,support,Expr,getText,isXML,tokenize,compile,select,outermostContext,sortInput,hasDuplicate,setDocument,document,docElem,documentIsHTML,rbuggyQSA,rbuggyMatches,matches,contains,expando="sizzle"+1*new Date,preferredDoc=window.document,dirruns=0,done=0,classCache=createCache(),tokenCache=createCache(),compilerCache=createCache(),sortOrder=function(a,b){if(a===b){hasDuplicate=true}return 0},hasOwn={}.hasOwnProperty,arr=[],pop=arr.pop,push_native=arr.push,push=arr.push,slice=arr.slice,indexOf=function(list,elem){var i=0,len=list.length;for(;i+~]|"+whitespace+")"+whitespace+"*"),rattributeQuotes=new RegExp("="+whitespace+"*([^\\]'\"]*?)"+whitespace+"*\\]","g"),rpseudo=new RegExp(pseudos),ridentifier=new RegExp("^"+identifier+"$"),matchExpr={ID:new RegExp("^#("+identifier+")"),CLASS:new RegExp("^\\.("+identifier+")"),TAG:new RegExp("^("+identifier+"|[*])"),ATTR:new RegExp("^"+attributes),PSEUDO:new RegExp("^"+pseudos),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+whitespace+"*(even|odd|(([+-]|)(\\d*)n|)"+whitespace+"*(?:([+-]|)"+whitespace+"*(\\d+)|))"+whitespace+"*\\)|)","i"),bool:new RegExp("^(?:"+booleans+")$","i"),needsContext:new RegExp("^"+whitespace+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+whitespace+"*((?:-\\d)?\\d*)"+whitespace+"*\\)|)(?=[^-]|$)","i")},rinputs=/^(?:input|select|textarea|button)$/i,rheader=/^h\d$/i,rnative=/^[^{]+\{\s*\[native \w/,rquickExpr=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,rsibling=/[+~]/,runescape=new RegExp("\\\\([\\da-f]{1,6}"+whitespace+"?|("+whitespace+")|.)","ig"),funescape=function(_,escaped,escapedWhitespace){var high="0x"+escaped-65536;return high!==high||escapedWhitespace?escaped:high<0?String.fromCharCode(high+65536):String.fromCharCode(high>>10|55296,high&1023|56320)},rcssescape=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,fcssescape=function(ch,asCodePoint){if(asCodePoint){if(ch==="\0"){return"�"}return ch.slice(0,-1)+"\\"+ch.charCodeAt(ch.length-1).toString(16)+" "}return"\\"+ch},unloadHandler=function(){setDocument()},disabledAncestor=addCombinator(function(elem){return elem.disabled===true&&("form"in elem||"label"in elem)},{dir:"parentNode",next:"legend"});try{push.apply(arr=slice.call(preferredDoc.childNodes),preferredDoc.childNodes);arr[preferredDoc.childNodes.length].nodeType}catch(e){push={apply:arr.length?function(target,els){push_native.apply(target,slice.call(els))}:function(target,els){var j=target.length,i=0;while(target[j++]=els[i++]){}target.length=j-1}}}function Sizzle(selector,context,results,seed){var m,i,elem,nid,match,groups,newSelector,newContext=context&&context.ownerDocument,nodeType=context?context.nodeType:9;results=results||[];if(typeof selector!=="string"||!selector||nodeType!==1&&nodeType!==9&&nodeType!==11){return results}if(!seed){if((context?context.ownerDocument||context:preferredDoc)!==document){setDocument(context)}context=context||document;if(documentIsHTML){if(nodeType!==11&&(match=rquickExpr.exec(selector))){if(m=match[1]){if(nodeType===9){if(elem=context.getElementById(m)){if(elem.id===m){results.push(elem);return results}}else{return results}}else{if(newContext&&(elem=newContext.getElementById(m))&&contains(context,elem)&&elem.id===m){results.push(elem);return results}}}else if(match[2]){push.apply(results,context.getElementsByTagName(selector));return results}else if((m=match[3])&&support.getElementsByClassName&&context.getElementsByClassName){push.apply(results,context.getElementsByClassName(m));return results}}if(support.qsa&&!compilerCache[selector+" "]&&(!rbuggyQSA||!rbuggyQSA.test(selector))){if(nodeType!==1){newContext=context;newSelector=selector}else if(context.nodeName.toLowerCase()!=="object"){if(nid=context.getAttribute("id")){nid=nid.replace(rcssescape,fcssescape)}else{context.setAttribute("id",nid=expando)}groups=tokenize(selector);i=groups.length;while(i--){groups[i]="#"+nid+" "+toSelector(groups[i])}newSelector=groups.join(",");newContext=rsibling.test(selector)&&testContext(context.parentNode)||context}if(newSelector){try{push.apply(results,newContext.querySelectorAll(newSelector));return results}catch(qsaError){}finally{if(nid===expando){context.removeAttribute("id")}}}}}}return select(selector.replace(rtrim,"$1"),context,results,seed)}function createCache(){var keys=[];function cache(key,value){if(keys.push(key+" ")>Expr.cacheLength){delete cache[keys.shift()]}return cache[key+" "]=value}return cache}function markFunction(fn){fn[expando]=true;return fn}function assert(fn){var el=document.createElement("fieldset");try{return!!fn(el)}catch(e){return false}finally{if(el.parentNode){el.parentNode.removeChild(el)}el=null}}function addHandle(attrs,handler){var arr=attrs.split("|"),i=arr.length;while(i--){Expr.attrHandle[arr[i]]=handler}}function siblingCheck(a,b){var cur=b&&a,diff=cur&&a.nodeType===1&&b.nodeType===1&&a.sourceIndex-b.sourceIndex;if(diff){return diff}if(cur){while(cur=cur.nextSibling){if(cur===b){return-1}}}return a?1:-1}function createInputPseudo(type){return function(elem){var name=elem.nodeName.toLowerCase();return name==="input"&&elem.type===type}}function createButtonPseudo(type){return function(elem){var name=elem.nodeName.toLowerCase();return(name==="input"||name==="button")&&elem.type===type}}function createDisabledPseudo(disabled){return function(elem){if("form"in elem){if(elem.parentNode&&elem.disabled===false){if("label"in elem){if("label"in elem.parentNode){return elem.parentNode.disabled===disabled}else{return elem.disabled===disabled}}return elem.isDisabled===disabled||elem.isDisabled!==!disabled&&disabledAncestor(elem)===disabled}return elem.disabled===disabled}else if("label"in elem){return elem.disabled===disabled}return false}}function createPositionalPseudo(fn){return markFunction(function(argument){argument=+argument;return markFunction(function(seed,matches){var j,matchIndexes=fn([],seed.length,argument),i=matchIndexes.length;while(i--){if(seed[j=matchIndexes[i]]){seed[j]=!(matches[j]=seed[j])}}})})}function testContext(context){return context&&typeof context.getElementsByTagName!=="undefined"&&context}support=Sizzle.support={};isXML=Sizzle.isXML=function(elem){var documentElement=elem&&(elem.ownerDocument||elem).documentElement;return documentElement?documentElement.nodeName!=="HTML":false};setDocument=Sizzle.setDocument=function(node){var hasCompare,subWindow,doc=node?node.ownerDocument||node:preferredDoc;if(doc===document||doc.nodeType!==9||!doc.documentElement){return document}document=doc;docElem=document.documentElement;documentIsHTML=!isXML(document);if(preferredDoc!==document&&(subWindow=document.defaultView)&&subWindow.top!==subWindow){if(subWindow.addEventListener){subWindow.addEventListener("unload",unloadHandler,false)}else if(subWindow.attachEvent){subWindow.attachEvent("onunload",unloadHandler)}}support.attributes=assert(function(el){el.className="i";return!el.getAttribute("className")});support.getElementsByTagName=assert(function(el){el.appendChild(document.createComment(""));return!el.getElementsByTagName("*").length});support.getElementsByClassName=rnative.test(document.getElementsByClassName);support.getById=assert(function(el){docElem.appendChild(el).id=expando;return!document.getElementsByName||!document.getElementsByName(expando).length});if(support.getById){Expr.filter["ID"]=function(id){var attrId=id.replace(runescape,funescape);return function(elem){return elem.getAttribute("id")===attrId}};Expr.find["ID"]=function(id,context){if(typeof context.getElementById!=="undefined"&&documentIsHTML){var elem=context.getElementById(id);return elem?[elem]:[]}}}else{Expr.filter["ID"]=function(id){var attrId=id.replace(runescape,funescape);return function(elem){var node=typeof elem.getAttributeNode!=="undefined"&&elem.getAttributeNode("id");return node&&node.value===attrId}};Expr.find["ID"]=function(id,context){if(typeof context.getElementById!=="undefined"&&documentIsHTML){var node,i,elems,elem=context.getElementById(id);if(elem){node=elem.getAttributeNode("id");if(node&&node.value===id){return[elem]}elems=context.getElementsByName(id);i=0;while(elem=elems[i++]){node=elem.getAttributeNode("id");if(node&&node.value===id){return[elem]}}}return[]}}}Expr.find["TAG"]=support.getElementsByTagName?function(tag,context){if(typeof context.getElementsByTagName!=="undefined"){return context.getElementsByTagName(tag)}else if(support.qsa){return context.querySelectorAll(tag)}}:function(tag,context){var elem,tmp=[],i=0,results=context.getElementsByTagName(tag);if(tag==="*"){while(elem=results[i++]){if(elem.nodeType===1){tmp.push(elem)}}return tmp}return results};Expr.find["CLASS"]=support.getElementsByClassName&&function(className,context){if(typeof context.getElementsByClassName!=="undefined"&&documentIsHTML){return context.getElementsByClassName(className)}};rbuggyMatches=[];rbuggyQSA=[];if(support.qsa=rnative.test(document.querySelectorAll)){assert(function(el){docElem.appendChild(el).innerHTML=""+"";if(el.querySelectorAll("[msallowcapture^='']").length){rbuggyQSA.push("[*^$]="+whitespace+"*(?:''|\"\")")}if(!el.querySelectorAll("[selected]").length){rbuggyQSA.push("\\["+whitespace+"*(?:value|"+booleans+")")}if(!el.querySelectorAll("[id~="+expando+"-]").length){rbuggyQSA.push("~=")}if(!el.querySelectorAll(":checked").length){rbuggyQSA.push(":checked")}if(!el.querySelectorAll("a#"+expando+"+*").length){rbuggyQSA.push(".#.+[+~]")}});assert(function(el){el.innerHTML=""+"";var input=document.createElement("input");input.setAttribute("type","hidden");el.appendChild(input).setAttribute("name","D");if(el.querySelectorAll("[name=d]").length){rbuggyQSA.push("name"+whitespace+"*[*^$|!~]?=")}if(el.querySelectorAll(":enabled").length!==2){rbuggyQSA.push(":enabled",":disabled")}docElem.appendChild(el).disabled=true;if(el.querySelectorAll(":disabled").length!==2){rbuggyQSA.push(":enabled",":disabled")}el.querySelectorAll("*,:x");rbuggyQSA.push(",.*:")})}if(support.matchesSelector=rnative.test(matches=docElem.matches||docElem.webkitMatchesSelector||docElem.mozMatchesSelector||docElem.oMatchesSelector||docElem.msMatchesSelector)){assert(function(el){support.disconnectedMatch=matches.call(el,"*");matches.call(el,"[s!='']:x");rbuggyMatches.push("!=",pseudos)})}rbuggyQSA=rbuggyQSA.length&&new RegExp(rbuggyQSA.join("|"));rbuggyMatches=rbuggyMatches.length&&new RegExp(rbuggyMatches.join("|"));hasCompare=rnative.test(docElem.compareDocumentPosition);contains=hasCompare||rnative.test(docElem.contains)?function(a,b){var adown=a.nodeType===9?a.documentElement:a,bup=b&&b.parentNode;return a===bup||!!(bup&&bup.nodeType===1&&(adown.contains?adown.contains(bup):a.compareDocumentPosition&&a.compareDocumentPosition(bup)&16))}:function(a,b){if(b){while(b=b.parentNode){if(b===a){return true}}}return false};sortOrder=hasCompare?function(a,b){if(a===b){hasDuplicate=true;return 0}var compare=!a.compareDocumentPosition-!b.compareDocumentPosition;if(compare){return compare}compare=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1;if(compare&1||!support.sortDetached&&b.compareDocumentPosition(a)===compare){if(a===document||a.ownerDocument===preferredDoc&&contains(preferredDoc,a)){return-1}if(b===document||b.ownerDocument===preferredDoc&&contains(preferredDoc,b)){return 1}return sortInput?indexOf(sortInput,a)-indexOf(sortInput,b):0}return compare&4?-1:1}:function(a,b){if(a===b){hasDuplicate=true;return 0}var cur,i=0,aup=a.parentNode,bup=b.parentNode,ap=[a],bp=[b];if(!aup||!bup){return a===document?-1:b===document?1:aup?-1:bup?1:sortInput?indexOf(sortInput,a)-indexOf(sortInput,b):0}else if(aup===bup){return siblingCheck(a,b)}cur=a;while(cur=cur.parentNode){ap.unshift(cur)}cur=b;while(cur=cur.parentNode){bp.unshift(cur)}while(ap[i]===bp[i]){i++}return i?siblingCheck(ap[i],bp[i]):ap[i]===preferredDoc?-1:bp[i]===preferredDoc?1:0};return document};Sizzle.matches=function(expr,elements){return Sizzle(expr,null,null,elements)};Sizzle.matchesSelector=function(elem,expr){if((elem.ownerDocument||elem)!==document){setDocument(elem)}expr=expr.replace(rattributeQuotes,"='$1']");if(support.matchesSelector&&documentIsHTML&&!compilerCache[expr+" "]&&(!rbuggyMatches||!rbuggyMatches.test(expr))&&(!rbuggyQSA||!rbuggyQSA.test(expr))){try{var ret=matches.call(elem,expr);if(ret||support.disconnectedMatch||elem.document&&elem.document.nodeType!==11){return ret}}catch(e){}}return Sizzle(expr,document,null,[elem]).length>0};Sizzle.contains=function(context,elem){if((context.ownerDocument||context)!==document){setDocument(context)}return contains(context,elem)};Sizzle.attr=function(elem,name){if((elem.ownerDocument||elem)!==document){setDocument(elem)}var fn=Expr.attrHandle[name.toLowerCase()],val=fn&&hasOwn.call(Expr.attrHandle,name.toLowerCase())?fn(elem,name,!documentIsHTML):undefined;return val!==undefined?val:support.attributes||!documentIsHTML?elem.getAttribute(name):(val=elem.getAttributeNode(name))&&val.specified?val.value:null};Sizzle.escape=function(sel){return(sel+"").replace(rcssescape,fcssescape)};Sizzle.error=function(msg){throw new Error("Syntax error, unrecognized expression: "+msg)};Sizzle.uniqueSort=function(results){var elem,duplicates=[],j=0,i=0;hasDuplicate=!support.detectDuplicates;sortInput=!support.sortStable&&results.slice(0);results.sort(sortOrder);if(hasDuplicate){while(elem=results[i++]){if(elem===results[i]){j=duplicates.push(i)}}while(j--){results.splice(duplicates[j],1)}}sortInput=null;return results};getText=Sizzle.getText=function(elem){var node,ret="",i=0,nodeType=elem.nodeType;if(!nodeType){while(node=elem[i++]){ret+=getText(node)}}else if(nodeType===1||nodeType===9||nodeType===11){if(typeof elem.textContent==="string"){return elem.textContent}else{for(elem=elem.firstChild;elem;elem=elem.nextSibling){ret+=getText(elem)}}}else if(nodeType===3||nodeType===4){return elem.nodeValue}return ret};Expr=Sizzle.selectors={cacheLength:50,createPseudo:markFunction,match:matchExpr,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:true}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:true},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(match){match[1]=match[1].replace(runescape,funescape);match[3]=(match[3]||match[4]||match[5]||"").replace(runescape,funescape);if(match[2]==="~="){match[3]=" "+match[3]+" "}return match.slice(0,4)},CHILD:function(match){match[1]=match[1].toLowerCase();if(match[1].slice(0,3)==="nth"){if(!match[3]){Sizzle.error(match[0])}match[4]=+(match[4]?match[5]+(match[6]||1):2*(match[3]==="even"||match[3]==="odd"));match[5]=+(match[7]+match[8]||match[3]==="odd")}else if(match[3]){Sizzle.error(match[0])}return match},PSEUDO:function(match){var excess,unquoted=!match[6]&&match[2];if(matchExpr["CHILD"].test(match[0])){return null}if(match[3]){match[2]=match[4]||match[5]||""}else if(unquoted&&rpseudo.test(unquoted)&&(excess=tokenize(unquoted,true))&&(excess=unquoted.indexOf(")",unquoted.length-excess)-unquoted.length)){match[0]=match[0].slice(0,excess);match[2]=unquoted.slice(0,excess)}return match.slice(0,3)}},filter:{TAG:function(nodeNameSelector){var nodeName=nodeNameSelector.replace(runescape,funescape).toLowerCase();return nodeNameSelector==="*"?function(){return true}:function(elem){return elem.nodeName&&elem.nodeName.toLowerCase()===nodeName}},CLASS:function(className){var pattern=classCache[className+" "];return pattern||(pattern=new RegExp("(^|"+whitespace+")"+className+"("+whitespace+"|$)"))&&classCache(className,function(elem){return pattern.test(typeof elem.className==="string"&&elem.className||typeof elem.getAttribute!=="undefined"&&elem.getAttribute("class")||"")})},ATTR:function(name,operator,check){return function(elem){var result=Sizzle.attr(elem,name);if(result==null){return operator==="!="}if(!operator){return true}result+="";return operator==="="?result===check:operator==="!="?result!==check:operator==="^="?check&&result.indexOf(check)===0:operator==="*="?check&&result.indexOf(check)>-1:operator==="$="?check&&result.slice(-check.length)===check:operator==="~="?(" "+result.replace(rwhitespace," ")+" ").indexOf(check)>-1:operator==="|="?result===check||result.slice(0,check.length+1)===check+"-":false}},CHILD:function(type,what,argument,first,last){var simple=type.slice(0,3)!=="nth",forward=type.slice(-4)!=="last",ofType=what==="of-type";return first===1&&last===0?function(elem){return!!elem.parentNode}:function(elem,context,xml){var cache,uniqueCache,outerCache,node,nodeIndex,start,dir=simple!==forward?"nextSibling":"previousSibling",parent=elem.parentNode,name=ofType&&elem.nodeName.toLowerCase(),useCache=!xml&&!ofType,diff=false;if(parent){if(simple){while(dir){node=elem;while(node=node[dir]){if(ofType?node.nodeName.toLowerCase()===name:node.nodeType===1){return false}}start=dir=type==="only"&&!start&&"nextSibling"}return true}start=[forward?parent.firstChild:parent.lastChild];if(forward&&useCache){node=parent;outerCache=node[expando]||(node[expando]={});uniqueCache=outerCache[node.uniqueID]||(outerCache[node.uniqueID]={});cache=uniqueCache[type]||[];nodeIndex=cache[0]===dirruns&&cache[1];diff=nodeIndex&&cache[2];node=nodeIndex&&parent.childNodes[nodeIndex];while(node=++nodeIndex&&node&&node[dir]||(diff=nodeIndex=0)||start.pop()){if(node.nodeType===1&&++diff&&node===elem){uniqueCache[type]=[dirruns,nodeIndex,diff];break}}}else{if(useCache){node=elem;outerCache=node[expando]||(node[expando]={});uniqueCache=outerCache[node.uniqueID]||(outerCache[node.uniqueID]={});cache=uniqueCache[type]||[];nodeIndex=cache[0]===dirruns&&cache[1];diff=nodeIndex}if(diff===false){while(node=++nodeIndex&&node&&node[dir]||(diff=nodeIndex=0)||start.pop()){if((ofType?node.nodeName.toLowerCase()===name:node.nodeType===1)&&++diff){if(useCache){outerCache=node[expando]||(node[expando]={});uniqueCache=outerCache[node.uniqueID]||(outerCache[node.uniqueID]={});uniqueCache[type]=[dirruns,diff]}if(node===elem){break}}}}}diff-=last;return diff===first||diff%first===0&&diff/first>=0}}},PSEUDO:function(pseudo,argument){var args,fn=Expr.pseudos[pseudo]||Expr.setFilters[pseudo.toLowerCase()]||Sizzle.error("unsupported pseudo: "+pseudo);if(fn[expando]){return fn(argument)}if(fn.length>1){args=[pseudo,pseudo,"",argument];return Expr.setFilters.hasOwnProperty(pseudo.toLowerCase())?markFunction(function(seed,matches){var idx,matched=fn(seed,argument),i=matched.length;while(i--){idx=indexOf(seed,matched[i]);seed[idx]=!(matches[idx]=matched[i])}}):function(elem){return fn(elem,0,args)}}return fn}},pseudos:{not:markFunction(function(selector){var input=[],results=[],matcher=compile(selector.replace(rtrim,"$1"));return matcher[expando]?markFunction(function(seed,matches,context,xml){var elem,unmatched=matcher(seed,null,xml,[]),i=seed.length;while(i--){if(elem=unmatched[i]){seed[i]=!(matches[i]=elem)}}}):function(elem,context,xml){input[0]=elem;matcher(input,null,xml,results);input[0]=null;return!results.pop()}}),has:markFunction(function(selector){return function(elem){return Sizzle(selector,elem).length>0}}),contains:markFunction(function(text){text=text.replace(runescape,funescape);return function(elem){return(elem.textContent||elem.innerText||getText(elem)).indexOf(text)>-1}}),lang:markFunction(function(lang){if(!ridentifier.test(lang||"")){Sizzle.error("unsupported lang: "+lang)}lang=lang.replace(runescape,funescape).toLowerCase();return function(elem){var elemLang;do{if(elemLang=documentIsHTML?elem.lang:elem.getAttribute("xml:lang")||elem.getAttribute("lang")){elemLang=elemLang.toLowerCase();return elemLang===lang||elemLang.indexOf(lang+"-")===0}}while((elem=elem.parentNode)&&elem.nodeType===1);return false}}),target:function(elem){var hash=window.location&&window.location.hash;return hash&&hash.slice(1)===elem.id},root:function(elem){return elem===docElem},focus:function(elem){return elem===document.activeElement&&(!document.hasFocus||document.hasFocus())&&!!(elem.type||elem.href||~elem.tabIndex)},enabled:createDisabledPseudo(false),disabled:createDisabledPseudo(true),checked:function(elem){var nodeName=elem.nodeName.toLowerCase();return nodeName==="input"&&!!elem.checked||nodeName==="option"&&!!elem.selected},selected:function(elem){if(elem.parentNode){elem.parentNode.selectedIndex}return elem.selected===true},empty:function(elem){for(elem=elem.firstChild;elem;elem=elem.nextSibling){if(elem.nodeType<6){return false}}return true},parent:function(elem){return!Expr.pseudos["empty"](elem)},header:function(elem){return rheader.test(elem.nodeName)},input:function(elem){return rinputs.test(elem.nodeName)},button:function(elem){var name=elem.nodeName.toLowerCase();return name==="input"&&elem.type==="button"||name==="button"},text:function(elem){var attr;return elem.nodeName.toLowerCase()==="input"&&elem.type==="text"&&((attr=elem.getAttribute("type"))==null||attr.toLowerCase()==="text")},first:createPositionalPseudo(function(){return[0]}),last:createPositionalPseudo(function(matchIndexes,length){return[length-1]}),eq:createPositionalPseudo(function(matchIndexes,length,argument){return[argument<0?argument+length:argument]}),even:createPositionalPseudo(function(matchIndexes,length){var i=0;for(;i=0;){matchIndexes.push(i)}return matchIndexes}),gt:createPositionalPseudo(function(matchIndexes,length,argument){var i=argument<0?argument+length:argument;for(;++i1?function(elem,context,xml){var i=matchers.length;while(i--){if(!matchers[i](elem,context,xml)){return false}}return true}:matchers[0]}function multipleContexts(selector,contexts,results){var i=0,len=contexts.length;for(;i-1){seed[temp]=!(results[temp]=elem)}}}}else{matcherOut=condense(matcherOut===results?matcherOut.splice(preexisting,matcherOut.length):matcherOut);if(postFinder){postFinder(null,results,matcherOut,xml)}else{push.apply(results,matcherOut)}}})}function matcherFromTokens(tokens){var checkContext,matcher,j,len=tokens.length,leadingRelative=Expr.relative[tokens[0].type],implicitRelative=leadingRelative||Expr.relative[" "],i=leadingRelative?1:0,matchContext=addCombinator(function(elem){return elem===checkContext},implicitRelative,true),matchAnyContext=addCombinator(function(elem){return indexOf(checkContext,elem)>-1},implicitRelative,true),matchers=[function(elem,context,xml){var ret=!leadingRelative&&(xml||context!==outermostContext)||((checkContext=context).nodeType?matchContext(elem,context,xml):matchAnyContext(elem,context,xml));checkContext=null;return ret}];for(;i1&&elementMatcher(matchers),i>1&&toSelector(tokens.slice(0,i-1).concat({value:tokens[i-2].type===" "?"*":""})).replace(rtrim,"$1"),matcher,i0,byElement=elementMatchers.length>0,superMatcher=function(seed,context,xml,results,outermost){var elem,j,matcher,matchedCount=0,i="0",unmatched=seed&&[],setMatched=[],contextBackup=outermostContext,elems=seed||byElement&&Expr.find["TAG"]("*",outermost),dirrunsUnique=dirruns+=contextBackup==null?1:Math.random()||.1,len=elems.length;if(outermost){outermostContext=context===document||context||outermost}for(;i!==len&&(elem=elems[i])!=null;i++){if(byElement&&elem){j=0;if(!context&&elem.ownerDocument!==document){setDocument(elem);xml=!documentIsHTML}while(matcher=elementMatchers[j++]){if(matcher(elem,context||document,xml)){results.push(elem);break}}if(outermost){dirruns=dirrunsUnique}}if(bySet){if(elem=!matcher&&elem){matchedCount--}if(seed){unmatched.push(elem)}}}matchedCount+=i;if(bySet&&i!==matchedCount){j=0;while(matcher=setMatchers[j++]){matcher(unmatched,setMatched,context,xml)}if(seed){if(matchedCount>0){while(i--){if(!(unmatched[i]||setMatched[i])){setMatched[i]=pop.call(results)}}}setMatched=condense(setMatched)}push.apply(results,setMatched);if(outermost&&!seed&&setMatched.length>0&&matchedCount+setMatchers.length>1){Sizzle.uniqueSort(results)}}if(outermost){dirruns=dirrunsUnique;outermostContext=contextBackup}return unmatched};return bySet?markFunction(superMatcher):superMatcher}compile=Sizzle.compile=function(selector,match){var i,setMatchers=[],elementMatchers=[],cached=compilerCache[selector+" "];if(!cached){if(!match){match=tokenize(selector)}i=match.length;while(i--){cached=matcherFromTokens(match[i]);if(cached[expando]){setMatchers.push(cached)}else{elementMatchers.push(cached)}}cached=compilerCache(selector,matcherFromGroupMatchers(elementMatchers,setMatchers));cached.selector=selector}return cached};select=Sizzle.select=function(selector,context,results,seed){var i,tokens,token,type,find,compiled=typeof selector==="function"&&selector,match=!seed&&tokenize(selector=compiled.selector||selector);results=results||[];if(match.length===1){tokens=match[0]=match[0].slice(0);if(tokens.length>2&&(token=tokens[0]).type==="ID"&&context.nodeType===9&&documentIsHTML&&Expr.relative[tokens[1].type]){context=(Expr.find["ID"](token.matches[0].replace(runescape,funescape),context)||[])[0];if(!context){return results}else if(compiled){context=context.parentNode}selector=selector.slice(tokens.shift().value.length)}i=matchExpr["needsContext"].test(selector)?0:tokens.length;while(i--){token=tokens[i];if(Expr.relative[type=token.type]){break}if(find=Expr.find[type]){if(seed=find(token.matches[0].replace(runescape,funescape),rsibling.test(tokens[0].type)&&testContext(context.parentNode)||context)){tokens.splice(i,1);selector=seed.length&&toSelector(tokens);if(!selector){push.apply(results,seed);return results}break}}}}(compiled||compile(selector,match))(seed,context,!documentIsHTML,results,!context||rsibling.test(selector)&&testContext(context.parentNode)||context);return results};support.sortStable=expando.split("").sort(sortOrder).join("")===expando;support.detectDuplicates=!!hasDuplicate;setDocument();support.sortDetached=assert(function(el){return el.compareDocumentPosition(document.createElement("fieldset"))&1});if(!assert(function(el){el.innerHTML="";return el.firstChild.getAttribute("href")==="#"})){addHandle("type|href|height|width",function(elem,name,isXML){if(!isXML){return elem.getAttribute(name,name.toLowerCase()==="type"?1:2)}})}if(!support.attributes||!assert(function(el){el.innerHTML="";el.firstChild.setAttribute("value","");return el.firstChild.getAttribute("value")===""})){addHandle("value",function(elem,name,isXML){if(!isXML&&elem.nodeName.toLowerCase()==="input"){return elem.defaultValue}})}if(!assert(function(el){return el.getAttribute("disabled")==null})){addHandle(booleans,function(elem,name,isXML){var val;if(!isXML){return elem[name]===true?name.toLowerCase():(val=elem.getAttributeNode(name))&&val.specified?val.value:null}})}return Sizzle}(window);jQuery.find=Sizzle;jQuery.expr=Sizzle.selectors;jQuery.expr[":"]=jQuery.expr.pseudos;jQuery.uniqueSort=jQuery.unique=Sizzle.uniqueSort;jQuery.text=Sizzle.getText;jQuery.isXMLDoc=Sizzle.isXML;jQuery.contains=Sizzle.contains;jQuery.escapeSelector=Sizzle.escape;var dir=function(elem,dir,until){var matched=[],truncate=until!==undefined;while((elem=elem[dir])&&elem.nodeType!==9){if(elem.nodeType===1){if(truncate&&jQuery(elem).is(until)){break}matched.push(elem)}}return matched};var siblings=function(n,elem){var matched=[];for(;n;n=n.nextSibling){if(n.nodeType===1&&n!==elem){matched.push(n)}}return matched};var rneedsContext=jQuery.expr.match.needsContext;function nodeName(elem,name){return elem.nodeName&&elem.nodeName.toLowerCase()===name.toLowerCase()}var rsingleTag=/^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function winnow(elements,qualifier,not){if(isFunction(qualifier)){return jQuery.grep(elements,function(elem,i){return!!qualifier.call(elem,i,elem)!==not})}if(qualifier.nodeType){return jQuery.grep(elements,function(elem){return elem===qualifier!==not})}if(typeof qualifier!=="string"){return jQuery.grep(elements,function(elem){return indexOf.call(qualifier,elem)>-1!==not})}return jQuery.filter(qualifier,elements,not)}jQuery.filter=function(expr,elems,not){var elem=elems[0];if(not){expr=":not("+expr+")"}if(elems.length===1&&elem.nodeType===1){return jQuery.find.matchesSelector(elem,expr)?[elem]:[]}return jQuery.find.matches(expr,jQuery.grep(elems,function(elem){return elem.nodeType===1}))};jQuery.fn.extend({find:function(selector){var i,ret,len=this.length,self=this;if(typeof selector!=="string"){return this.pushStack(jQuery(selector).filter(function(){for(i=0;i1?jQuery.uniqueSort(ret):ret},filter:function(selector){return this.pushStack(winnow(this,selector||[],false))},not:function(selector){return this.pushStack(winnow(this,selector||[],true))},is:function(selector){return!!winnow(this,typeof selector==="string"&&rneedsContext.test(selector)?jQuery(selector):selector||[],false).length}});var rootjQuery,rquickExpr=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/,init=jQuery.fn.init=function(selector,context,root){var match,elem;if(!selector){return this}root=root||rootjQuery;if(typeof selector==="string"){if(selector[0]==="<"&&selector[selector.length-1]===">"&&selector.length>=3){match=[null,selector,null]}else{match=rquickExpr.exec(selector)}if(match&&(match[1]||!context)){if(match[1]){context=context instanceof jQuery?context[0]:context;jQuery.merge(this,jQuery.parseHTML(match[1],context&&context.nodeType?context.ownerDocument||context:document,true));if(rsingleTag.test(match[1])&&jQuery.isPlainObject(context)){for(match in context){if(isFunction(this[match])){this[match](context[match])}else{this.attr(match,context[match])}}}return this}else{elem=document.getElementById(match[2]);if(elem){this[0]=elem;this.length=1}return this}}else if(!context||context.jquery){return(context||root).find(selector)}else{return this.constructor(context).find(selector)}}else if(selector.nodeType){this[0]=selector;this.length=1;return this}else if(isFunction(selector)){return root.ready!==undefined?root.ready(selector):selector(jQuery)}return jQuery.makeArray(selector,this)};init.prototype=jQuery.fn;rootjQuery=jQuery(document);var rparentsprev=/^(?:parents|prev(?:Until|All))/,guaranteedUnique={children:true,contents:true,next:true,prev:true};jQuery.fn.extend({has:function(target){var targets=jQuery(target,this),l=targets.length;return this.filter(function(){var i=0;for(;i-1:cur.nodeType===1&&jQuery.find.matchesSelector(cur,selectors))){matched.push(cur);break}}}}return this.pushStack(matched.length>1?jQuery.uniqueSort(matched):matched)},index:function(elem){if(!elem){return this[0]&&this[0].parentNode?this.first().prevAll().length:-1}if(typeof elem==="string"){return indexOf.call(jQuery(elem),this[0])}return indexOf.call(this,elem.jquery?elem[0]:elem)},add:function(selector,context){return this.pushStack(jQuery.uniqueSort(jQuery.merge(this.get(),jQuery(selector,context))))},addBack:function(selector){return this.add(selector==null?this.prevObject:this.prevObject.filter(selector))}});function sibling(cur,dir){while((cur=cur[dir])&&cur.nodeType!==1){}return cur}jQuery.each({parent:function(elem){var parent=elem.parentNode;return parent&&parent.nodeType!==11?parent:null},parents:function(elem){return dir(elem,"parentNode")},parentsUntil:function(elem,i,until){return dir(elem,"parentNode",until)},next:function(elem){return sibling(elem,"nextSibling")},prev:function(elem){return sibling(elem,"previousSibling")},nextAll:function(elem){return dir(elem,"nextSibling")},prevAll:function(elem){return dir(elem,"previousSibling")},nextUntil:function(elem,i,until){return dir(elem,"nextSibling",until)},prevUntil:function(elem,i,until){return dir(elem,"previousSibling",until)},siblings:function(elem){return siblings((elem.parentNode||{}).firstChild,elem)},children:function(elem){return siblings(elem.firstChild)},contents:function(elem){if(nodeName(elem,"iframe")){return elem.contentDocument}if(nodeName(elem,"template")){elem=elem.content||elem}return jQuery.merge([],elem.childNodes)}},function(name,fn){jQuery.fn[name]=function(until,selector){var matched=jQuery.map(this,fn,until);if(name.slice(-5)!=="Until"){selector=until}if(selector&&typeof selector==="string"){matched=jQuery.filter(selector,matched)}if(this.length>1){if(!guaranteedUnique[name]){jQuery.uniqueSort(matched)}if(rparentsprev.test(name)){matched.reverse()}}return this.pushStack(matched)}});var rnothtmlwhite=/[^\x20\t\r\n\f]+/g;function createOptions(options){var object={};jQuery.each(options.match(rnothtmlwhite)||[],function(_,flag){object[flag]=true});return object}jQuery.Callbacks=function(options){options=typeof options==="string"?createOptions(options):jQuery.extend({},options);var firing,memory,fired,locked,list=[],queue=[],firingIndex=-1,fire=function(){locked=locked||options.once;fired=firing=true;for(;queue.length;firingIndex=-1){memory=queue.shift();while(++firingIndex-1){list.splice(index,1);if(index<=firingIndex){firingIndex--}}});return this},has:function(fn){return fn?jQuery.inArray(fn,list)>-1:list.length>0},empty:function(){if(list){list=[]}return this},disable:function(){locked=queue=[];list=memory="";return this},disabled:function(){return!list},lock:function(){locked=queue=[];if(!memory&&!firing){list=memory=""}return this},locked:function(){return!!locked},fireWith:function(context,args){if(!locked){args=args||[];args=[context,args.slice?args.slice():args];queue.push(args);if(!firing){fire()}}return this},fire:function(){self.fireWith(this,arguments);return this},fired:function(){return!!fired}};return self};function Identity(v){return v}function Thrower(ex){throw ex}function adoptValue(value,resolve,reject,noValue){var method;try{if(value&&isFunction(method=value.promise)){method.call(value).done(resolve).fail(reject)}else if(value&&isFunction(method=value.then)){method.call(value,resolve,reject)}else{resolve.apply(undefined,[value].slice(noValue))}}catch(value){reject.apply(undefined,[value])}}jQuery.extend({Deferred:function(func){var tuples=[["notify","progress",jQuery.Callbacks("memory"),jQuery.Callbacks("memory"),2],["resolve","done",jQuery.Callbacks("once memory"),jQuery.Callbacks("once memory"),0,"resolved"],["reject","fail",jQuery.Callbacks("once memory"),jQuery.Callbacks("once memory"),1,"rejected"]],state="pending",promise={state:function(){return state},always:function(){deferred.done(arguments).fail(arguments);return this},catch:function(fn){return promise.then(null,fn)},pipe:function(){var fns=arguments;return jQuery.Deferred(function(newDefer){jQuery.each(tuples,function(i,tuple){var fn=isFunction(fns[tuple[4]])&&fns[tuple[4]];deferred[tuple[1]](function(){var returned=fn&&fn.apply(this,arguments);if(returned&&isFunction(returned.promise)){returned.promise().progress(newDefer.notify).done(newDefer.resolve).fail(newDefer.reject)}else{newDefer[tuple[0]+"With"](this,fn?[returned]:arguments)}})});fns=null}).promise()},then:function(onFulfilled,onRejected,onProgress){var maxDepth=0;function resolve(depth,deferred,handler,special){return function(){var that=this,args=arguments,mightThrow=function(){var returned,then;if(depth=maxDepth){if(handler!==Thrower){that=undefined;args=[e]}deferred.rejectWith(that,args)}}};if(depth){process()}else{if(jQuery.Deferred.getStackHook){process.stackTrace=jQuery.Deferred.getStackHook()}window.setTimeout(process)}}}return jQuery.Deferred(function(newDefer){tuples[0][3].add(resolve(0,newDefer,isFunction(onProgress)?onProgress:Identity,newDefer.notifyWith));tuples[1][3].add(resolve(0,newDefer,isFunction(onFulfilled)?onFulfilled:Identity));tuples[2][3].add(resolve(0,newDefer,isFunction(onRejected)?onRejected:Thrower))}).promise()},promise:function(obj){return obj!=null?jQuery.extend(obj,promise):promise}},deferred={};jQuery.each(tuples,function(i,tuple){var list=tuple[2],stateString=tuple[5];promise[tuple[1]]=list.add;if(stateString){list.add(function(){state=stateString},tuples[3-i][2].disable,tuples[3-i][3].disable,tuples[0][2].lock,tuples[0][3].lock)}list.add(tuple[3].fire);deferred[tuple[0]]=function(){deferred[tuple[0]+"With"](this===deferred?undefined:this,arguments);return this};deferred[tuple[0]+"With"]=list.fireWith});promise.promise(deferred);if(func){func.call(deferred,deferred)}return deferred},when:function(singleValue){var remaining=arguments.length,i=remaining,resolveContexts=Array(i),resolveValues=slice.call(arguments),master=jQuery.Deferred(),updateFunc=function(i){return function(value){resolveContexts[i]=this;resolveValues[i]=arguments.length>1?slice.call(arguments):value;if(!--remaining){master.resolveWith(resolveContexts,resolveValues)}}};if(remaining<=1){adoptValue(singleValue,master.done(updateFunc(i)).resolve,master.reject,!remaining);if(master.state()==="pending"||isFunction(resolveValues[i]&&resolveValues[i].then)){return master.then()}}while(i--){adoptValue(resolveValues[i],updateFunc(i),master.reject)}return master.promise()}});var rerrorNames=/^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;jQuery.Deferred.exceptionHook=function(error,stack){if(window.console&&window.console.warn&&error&&rerrorNames.test(error.name)){window.console.warn("jQuery.Deferred exception: "+error.message,error.stack,stack)}};jQuery.readyException=function(error){window.setTimeout(function(){throw error})};var readyList=jQuery.Deferred();jQuery.fn.ready=function(fn){readyList.then(fn).catch(function(error){jQuery.readyException(error)});return this};jQuery.extend({isReady:false,readyWait:1,ready:function(wait){if(wait===true?--jQuery.readyWait:jQuery.isReady){return}jQuery.isReady=true;if(wait!==true&&--jQuery.readyWait>0){return}readyList.resolveWith(document,[jQuery])}});jQuery.ready.then=readyList.then;function completed(){document.removeEventListener("DOMContentLoaded",completed);window.removeEventListener("load",completed);jQuery.ready()}if(document.readyState==="complete"||document.readyState!=="loading"&&!document.documentElement.doScroll){window.setTimeout(jQuery.ready)}else{document.addEventListener("DOMContentLoaded",completed);window.addEventListener("load",completed)}var access=function(elems,fn,key,value,chainable,emptyGet,raw){var i=0,len=elems.length,bulk=key==null;if(toType(key)==="object"){chainable=true;for(i in key){access(elems,fn,i,key[i],true,emptyGet,raw)}}else if(value!==undefined){chainable=true;if(!isFunction(value)){raw=true}if(bulk){if(raw){fn.call(elems,value);fn=null}else{bulk=fn;fn=function(elem,key,value){return bulk.call(jQuery(elem),value)}}}if(fn){for(;i1,null,true)},removeData:function(key){return this.each(function(){dataUser.remove(this,key)})}});jQuery.extend({queue:function(elem,type,data){var queue;if(elem){type=(type||"fx")+"queue";queue=dataPriv.get(elem,type);if(data){if(!queue||Array.isArray(data)){queue=dataPriv.access(elem,type,jQuery.makeArray(data))}else{queue.push(data)}}return queue||[]}},dequeue:function(elem,type){type=type||"fx";var queue=jQuery.queue(elem,type),startLength=queue.length,fn=queue.shift(),hooks=jQuery._queueHooks(elem,type),next=function(){jQuery.dequeue(elem,type)};if(fn==="inprogress"){fn=queue.shift();startLength--}if(fn){if(type==="fx"){queue.unshift("inprogress")}delete hooks.stop;fn.call(elem,next,hooks)}if(!startLength&&hooks){hooks.empty.fire()}},_queueHooks:function(elem,type){var key=type+"queueHooks";return dataPriv.get(elem,key)||dataPriv.access(elem,key,{empty:jQuery.Callbacks("once memory").add(function(){dataPriv.remove(elem,[type+"queue",key])})})}});jQuery.fn.extend({queue:function(type,data){var setter=2;if(typeof type!=="string"){data=type;type="fx";setter--}if(arguments.length\x20\t\r\n\f]+)/i;var rscriptType=/^$|^module$|\/(?:java|ecma)script/i;var wrapMap={option:[1,""],thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};wrapMap.optgroup=wrapMap.option;wrapMap.tbody=wrapMap.tfoot=wrapMap.colgroup=wrapMap.caption=wrapMap.thead;wrapMap.th=wrapMap.td;function getAll(context,tag){var ret;if(typeof context.getElementsByTagName!=="undefined"){ret=context.getElementsByTagName(tag||"*")}else if(typeof context.querySelectorAll!=="undefined"){ret=context.querySelectorAll(tag||"*")}else{ret=[]}if(tag===undefined||tag&&nodeName(context,tag)){return jQuery.merge([context],ret)}return ret}function setGlobalEval(elems,refElements){var i=0,l=elems.length;for(;i-1){if(ignored){ignored.push(elem)}continue}contains=jQuery.contains(elem.ownerDocument,elem);tmp=getAll(fragment.appendChild(elem),"script");if(contains){setGlobalEval(tmp)}if(scripts){j=0;while(elem=tmp[j++]){if(rscriptType.test(elem.type||"")){scripts.push(elem)}}}}return fragment}(function(){var fragment=document.createDocumentFragment(),div=fragment.appendChild(document.createElement("div")),input=document.createElement("input");input.setAttribute("type","radio");input.setAttribute("checked","checked");input.setAttribute("name","t");div.appendChild(input);support.checkClone=div.cloneNode(true).cloneNode(true).lastChild.checked;div.innerHTML="";support.noCloneChecked=!!div.cloneNode(true).lastChild.defaultValue})();var documentElement=document.documentElement;var rkeyEvent=/^key/,rmouseEvent=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,rtypenamespace=/^([^.]*)(?:\.(.+)|)/;function returnTrue(){return true}function returnFalse(){return false}function safeActiveElement(){try{return document.activeElement}catch(err){}}function on(elem,types,selector,data,fn,one){var origFn,type;if(typeof types==="object"){if(typeof selector!=="string"){data=data||selector;selector=undefined}for(type in types){on(elem,type,selector,data,types[type],one)}return elem}if(data==null&&fn==null){fn=selector;data=selector=undefined}else if(fn==null){if(typeof selector==="string"){fn=data;data=undefined}else{fn=data;data=selector;selector=undefined}}if(fn===false){fn=returnFalse}else if(!fn){return elem}if(one===1){origFn=fn;fn=function(event){jQuery().off(event);return origFn.apply(this,arguments)};fn.guid=origFn.guid||(origFn.guid=jQuery.guid++)}return elem.each(function(){jQuery.event.add(this,types,fn,data,selector)})}jQuery.event={global:{},add:function(elem,types,handler,data,selector){var handleObjIn,eventHandle,tmp,events,t,handleObj,special,handlers,type,namespaces,origType,elemData=dataPriv.get(elem);if(!elemData){return}if(handler.handler){handleObjIn=handler;handler=handleObjIn.handler;selector=handleObjIn.selector}if(selector){jQuery.find.matchesSelector(documentElement,selector)}if(!handler.guid){handler.guid=jQuery.guid++}if(!(events=elemData.events)){events=elemData.events={}}if(!(eventHandle=elemData.handle)){eventHandle=elemData.handle=function(e){return typeof jQuery!=="undefined"&&jQuery.event.triggered!==e.type?jQuery.event.dispatch.apply(elem,arguments):undefined}}types=(types||"").match(rnothtmlwhite)||[""];t=types.length;while(t--){tmp=rtypenamespace.exec(types[t])||[];type=origType=tmp[1];namespaces=(tmp[2]||"").split(".").sort();if(!type){continue}special=jQuery.event.special[type]||{} +;type=(selector?special.delegateType:special.bindType)||type;special=jQuery.event.special[type]||{};handleObj=jQuery.extend({type:type,origType:origType,data:data,handler:handler,guid:handler.guid,selector:selector,needsContext:selector&&jQuery.expr.match.needsContext.test(selector),namespace:namespaces.join(".")},handleObjIn);if(!(handlers=events[type])){handlers=events[type]=[];handlers.delegateCount=0;if(!special.setup||special.setup.call(elem,data,namespaces,eventHandle)===false){if(elem.addEventListener){elem.addEventListener(type,eventHandle)}}}if(special.add){special.add.call(elem,handleObj);if(!handleObj.handler.guid){handleObj.handler.guid=handler.guid}}if(selector){handlers.splice(handlers.delegateCount++,0,handleObj)}else{handlers.push(handleObj)}jQuery.event.global[type]=true}},remove:function(elem,types,handler,selector,mappedTypes){var j,origCount,tmp,events,t,handleObj,special,handlers,type,namespaces,origType,elemData=dataPriv.hasData(elem)&&dataPriv.get(elem);if(!elemData||!(events=elemData.events)){return}types=(types||"").match(rnothtmlwhite)||[""];t=types.length;while(t--){tmp=rtypenamespace.exec(types[t])||[];type=origType=tmp[1];namespaces=(tmp[2]||"").split(".").sort();if(!type){for(type in events){jQuery.event.remove(elem,type+types[t],handler,selector,true)}continue}special=jQuery.event.special[type]||{};type=(selector?special.delegateType:special.bindType)||type;handlers=events[type]||[];tmp=tmp[2]&&new RegExp("(^|\\.)"+namespaces.join("\\.(?:.*\\.|)")+"(\\.|$)");origCount=j=handlers.length;while(j--){handleObj=handlers[j];if((mappedTypes||origType===handleObj.origType)&&(!handler||handler.guid===handleObj.guid)&&(!tmp||tmp.test(handleObj.namespace))&&(!selector||selector===handleObj.selector||selector==="**"&&handleObj.selector)){handlers.splice(j,1);if(handleObj.selector){handlers.delegateCount--}if(special.remove){special.remove.call(elem,handleObj)}}}if(origCount&&!handlers.length){if(!special.teardown||special.teardown.call(elem,namespaces,elemData.handle)===false){jQuery.removeEvent(elem,type,elemData.handle)}delete events[type]}}if(jQuery.isEmptyObject(events)){dataPriv.remove(elem,"handle events")}},dispatch:function(nativeEvent){var event=jQuery.event.fix(nativeEvent);var i,j,ret,matched,handleObj,handlerQueue,args=new Array(arguments.length),handlers=(dataPriv.get(this,"events")||{})[event.type]||[],special=jQuery.event.special[event.type]||{};args[0]=event;for(i=1;i=1)){for(;cur!==this;cur=cur.parentNode||this){if(cur.nodeType===1&&!(event.type==="click"&&cur.disabled===true)){matchedHandlers=[];matchedSelectors={};for(i=0;i-1:jQuery.find(sel,this,null,[cur]).length}if(matchedSelectors[sel]){matchedHandlers.push(handleObj)}}if(matchedHandlers.length){handlerQueue.push({elem:cur,handlers:matchedHandlers})}}}}cur=this;if(delegateCount\x20\t\r\n\f]*)[^>]*)\/>/gi,rnoInnerhtml=/\s*$/g;function manipulationTarget(elem,content){if(nodeName(elem,"table")&&nodeName(content.nodeType!==11?content:content.firstChild,"tr")){return jQuery(elem).children("tbody")[0]||elem}return elem}function disableScript(elem){elem.type=(elem.getAttribute("type")!==null)+"/"+elem.type;return elem}function restoreScript(elem){if((elem.type||"").slice(0,5)==="true/"){elem.type=elem.type.slice(5)}else{elem.removeAttribute("type")}return elem}function cloneCopyEvent(src,dest){var i,l,type,pdataOld,pdataCur,udataOld,udataCur,events;if(dest.nodeType!==1){return}if(dataPriv.hasData(src)){pdataOld=dataPriv.access(src);pdataCur=dataPriv.set(dest,pdataOld);events=pdataOld.events;if(events){delete pdataCur.handle;pdataCur.events={};for(type in events){for(i=0,l=events[type].length;i1&&typeof value==="string"&&!support.checkClone&&rchecked.test(value)){return collection.each(function(index){var self=collection.eq(index);if(valueIsFunction){args[0]=value.call(this,index,self.html())}domManip(self,args,callback,ignored)})}if(l){fragment=buildFragment(args,collection[0].ownerDocument,false,collection,ignored);first=fragment.firstChild;if(fragment.childNodes.length===1){fragment=first}if(first||ignored){scripts=jQuery.map(getAll(fragment,"script"),disableScript);hasScripts=scripts.length;for(;i")},clone:function(elem,dataAndEvents,deepDataAndEvents){var i,l,srcElements,destElements,clone=elem.cloneNode(true),inPage=jQuery.contains(elem.ownerDocument,elem);if(!support.noCloneChecked&&(elem.nodeType===1||elem.nodeType===11)&&!jQuery.isXMLDoc(elem)){destElements=getAll(clone);srcElements=getAll(elem);for(i=0,l=srcElements.length;i0){setGlobalEval(destElements,!inPage&&getAll(elem,"script"))}return clone},cleanData:function(elems){var data,elem,type,special=jQuery.event.special,i=0;for(;(elem=elems[i])!==undefined;i++){if(acceptData(elem)){if(data=elem[dataPriv.expando]){if(data.events){for(type in data.events){if(special[type]){jQuery.event.remove(elem,type)}else{jQuery.removeEvent(elem,type,data.handle)}}}elem[dataPriv.expando]=undefined}if(elem[dataUser.expando]){elem[dataUser.expando]=undefined}}}}});jQuery.fn.extend({detach:function(selector){return remove(this,selector,true)},remove:function(selector){return remove(this,selector)},text:function(value){return access(this,function(value){return value===undefined?jQuery.text(this):this.empty().each(function(){if(this.nodeType===1||this.nodeType===11||this.nodeType===9){this.textContent=value}})},null,value,arguments.length)},append:function(){return domManip(this,arguments,function(elem){if(this.nodeType===1||this.nodeType===11||this.nodeType===9){var target=manipulationTarget(this,elem);target.appendChild(elem)}})},prepend:function(){return domManip(this,arguments,function(elem){if(this.nodeType===1||this.nodeType===11||this.nodeType===9){var target=manipulationTarget(this,elem);target.insertBefore(elem,target.firstChild)}})},before:function(){return domManip(this,arguments,function(elem){if(this.parentNode){this.parentNode.insertBefore(elem,this)}})},after:function(){return domManip(this,arguments,function(elem){if(this.parentNode){this.parentNode.insertBefore(elem,this.nextSibling)}})},empty:function(){var elem,i=0;for(;(elem=this[i])!=null;i++){if(elem.nodeType===1){jQuery.cleanData(getAll(elem,false));elem.textContent=""}}return this},clone:function(dataAndEvents,deepDataAndEvents){dataAndEvents=dataAndEvents==null?false:dataAndEvents;deepDataAndEvents=deepDataAndEvents==null?dataAndEvents:deepDataAndEvents;return this.map(function(){return jQuery.clone(this,dataAndEvents,deepDataAndEvents)})},html:function(value){return access(this,function(value){var elem=this[0]||{},i=0,l=this.length;if(value===undefined&&elem.nodeType===1){return elem.innerHTML}if(typeof value==="string"&&!rnoInnerhtml.test(value)&&!wrapMap[(rtagName.exec(value)||["",""])[1].toLowerCase()]){value=jQuery.htmlPrefilter(value);try{for(;i=0){delta+=Math.max(0,Math.ceil(elem["offset"+dimension[0].toUpperCase()+dimension.slice(1)]-computedVal-delta-extra-.5))}return delta}function getWidthOrHeight(elem,dimension,extra){var styles=getStyles(elem),val=curCSS(elem,dimension,styles),isBorderBox=jQuery.css(elem,"boxSizing",false,styles)==="border-box",valueIsBorderBox=isBorderBox;if(rnumnonpx.test(val)){if(!extra){return val}val="auto"}valueIsBorderBox=valueIsBorderBox&&(support.boxSizingReliable()||val===elem.style[dimension]);if(val==="auto"||!parseFloat(val)&&jQuery.css(elem,"display",false,styles)==="inline"){val=elem["offset"+dimension[0].toUpperCase()+dimension.slice(1)];valueIsBorderBox=true}val=parseFloat(val)||0;return val+boxModelAdjustment(elem,dimension,extra||(isBorderBox?"border":"content"),valueIsBorderBox,styles,val)+"px"}jQuery.extend({cssHooks:{opacity:{get:function(elem,computed){if(computed){var ret=curCSS(elem,"opacity");return ret===""?"1":ret}}}},cssNumber:{animationIterationCount:true,columnCount:true,fillOpacity:true,flexGrow:true,flexShrink:true,fontWeight:true,lineHeight:true,opacity:true,order:true,orphans:true,widows:true,zIndex:true,zoom:true},cssProps:{},style:function(elem,name,value,extra){if(!elem||elem.nodeType===3||elem.nodeType===8||!elem.style){return}var ret,type,hooks,origName=camelCase(name),isCustomProp=rcustomProp.test(name),style=elem.style;if(!isCustomProp){name=finalPropName(origName)}hooks=jQuery.cssHooks[name]||jQuery.cssHooks[origName];if(value!==undefined){type=typeof value;if(type==="string"&&(ret=rcssNum.exec(value))&&ret[1]){value=adjustCSS(elem,name,ret);type="number"}if(value==null||value!==value){return}if(type==="number"){value+=ret&&ret[3]||(jQuery.cssNumber[origName]?"":"px")}if(!support.clearCloneStyle&&value===""&&name.indexOf("background")===0){style[name]="inherit"}if(!hooks||!("set"in hooks)||(value=hooks.set(elem,value,extra))!==undefined){if(isCustomProp){style.setProperty(name,value)}else{style[name]=value}}}else{if(hooks&&"get"in hooks&&(ret=hooks.get(elem,false,extra))!==undefined){return ret}return style[name]}},css:function(elem,name,extra,styles){var val,num,hooks,origName=camelCase(name),isCustomProp=rcustomProp.test(name);if(!isCustomProp){name=finalPropName(origName)}hooks=jQuery.cssHooks[name]||jQuery.cssHooks[origName];if(hooks&&"get"in hooks){val=hooks.get(elem,true,extra)}if(val===undefined){val=curCSS(elem,name,styles)}if(val==="normal"&&name in cssNormalTransform){val=cssNormalTransform[name]}if(extra===""||extra){num=parseFloat(val);return extra===true||isFinite(num)?num||0:val}return val}});jQuery.each(["height","width"],function(i,dimension){jQuery.cssHooks[dimension]={get:function(elem,computed,extra){if(computed){return rdisplayswap.test(jQuery.css(elem,"display"))&&(!elem.getClientRects().length||!elem.getBoundingClientRect().width)?swap(elem,cssShow,function(){return getWidthOrHeight(elem,dimension,extra)}):getWidthOrHeight(elem,dimension,extra)}},set:function(elem,value,extra){var matches,styles=getStyles(elem),isBorderBox=jQuery.css(elem,"boxSizing",false,styles)==="border-box",subtract=extra&&boxModelAdjustment(elem,dimension,extra,isBorderBox,styles);if(isBorderBox&&support.scrollboxSize()===styles.position){subtract-=Math.ceil(elem["offset"+dimension[0].toUpperCase()+dimension.slice(1)]-parseFloat(styles[dimension])-boxModelAdjustment(elem,dimension,"border",false,styles)-.5)}if(subtract&&(matches=rcssNum.exec(value))&&(matches[3]||"px")!=="px"){elem.style[dimension]=value;value=jQuery.css(elem,dimension)}return setPositiveNumber(elem,value,subtract)}}});jQuery.cssHooks.marginLeft=addGetHookIf(support.reliableMarginLeft,function(elem,computed){if(computed){return(parseFloat(curCSS(elem,"marginLeft"))||elem.getBoundingClientRect().left-swap(elem,{marginLeft:0},function(){return elem.getBoundingClientRect().left}))+"px"}});jQuery.each({margin:"",padding:"",border:"Width"},function(prefix,suffix){jQuery.cssHooks[prefix+suffix]={expand:function(value){var i=0,expanded={},parts=typeof value==="string"?value.split(" "):[value];for(;i<4;i++){expanded[prefix+cssExpand[i]+suffix]=parts[i]||parts[i-2]||parts[0]}return expanded}};if(prefix!=="margin"){jQuery.cssHooks[prefix+suffix].set=setPositiveNumber}});jQuery.fn.extend({css:function(name,value){return access(this,function(elem,name,value){var styles,len,map={},i=0;if(Array.isArray(name)){styles=getStyles(elem);len=name.length;for(;i1)}});function Tween(elem,options,prop,end,easing){return new Tween.prototype.init(elem,options,prop,end,easing)}jQuery.Tween=Tween;Tween.prototype={constructor:Tween,init:function(elem,options,prop,end,easing,unit){this.elem=elem;this.prop=prop;this.easing=easing||jQuery.easing._default;this.options=options;this.start=this.now=this.cur();this.end=end;this.unit=unit||(jQuery.cssNumber[prop]?"":"px")},cur:function(){var hooks=Tween.propHooks[this.prop];return hooks&&hooks.get?hooks.get(this):Tween.propHooks._default.get(this)},run:function(percent){var eased,hooks=Tween.propHooks[this.prop];if(this.options.duration){this.pos=eased=jQuery.easing[this.easing](percent,this.options.duration*percent,0,1,this.options.duration)}else{this.pos=eased=percent}this.now=(this.end-this.start)*eased+this.start;if(this.options.step){this.options.step.call(this.elem,this.now,this)}if(hooks&&hooks.set){hooks.set(this)}else{Tween.propHooks._default.set(this)}return this}};Tween.prototype.init.prototype=Tween.prototype;Tween.propHooks={_default:{get:function(tween){var result;if(tween.elem.nodeType!==1||tween.elem[tween.prop]!=null&&tween.elem.style[tween.prop]==null){return tween.elem[tween.prop]}result=jQuery.css(tween.elem,tween.prop,"");return!result||result==="auto"?0:result},set:function(tween){if(jQuery.fx.step[tween.prop]){jQuery.fx.step[tween.prop](tween)}else if(tween.elem.nodeType===1&&(tween.elem.style[jQuery.cssProps[tween.prop]]!=null||jQuery.cssHooks[tween.prop])){jQuery.style(tween.elem,tween.prop,tween.now+tween.unit)}else{tween.elem[tween.prop]=tween.now}}}};Tween.propHooks.scrollTop=Tween.propHooks.scrollLeft={set:function(tween){if(tween.elem.nodeType&&tween.elem.parentNode){tween.elem[tween.prop]=tween.now}}};jQuery.easing={linear:function(p){return p},swing:function(p){return.5-Math.cos(p*Math.PI)/2},_default:"swing"};jQuery.fx=Tween.prototype.init;jQuery.fx.step={};var fxNow,inProgress,rfxtypes=/^(?:toggle|show|hide)$/,rrun=/queueHooks$/;function schedule(){if(inProgress){if(document.hidden===false&&window.requestAnimationFrame){window.requestAnimationFrame(schedule)}else{window.setTimeout(schedule,jQuery.fx.interval)}jQuery.fx.tick()}}function createFxNow(){window.setTimeout(function(){fxNow=undefined});return fxNow=Date.now()}function genFx(type,includeWidth){var which,i=0,attrs={height:type};includeWidth=includeWidth?1:0;for(;i<4;i+=2-includeWidth){which=cssExpand[i];attrs["margin"+which]=attrs["padding"+which]=type}if(includeWidth){attrs.opacity=attrs.width=type}return attrs}function createTween(value,prop,animation){var tween,collection=(Animation.tweeners[prop]||[]).concat(Animation.tweeners["*"]),index=0,length=collection.length;for(;index1)},removeAttr:function(name){return this.each(function(){jQuery.removeAttr(this,name)})}});jQuery.extend({attr:function(elem,name,value){var ret,hooks,nType=elem.nodeType;if(nType===3||nType===8||nType===2){return}if(typeof elem.getAttribute==="undefined"){return jQuery.prop(elem,name,value)}if(nType!==1||!jQuery.isXMLDoc(elem)){hooks=jQuery.attrHooks[name.toLowerCase()]||(jQuery.expr.match.bool.test(name)?boolHook:undefined)}if(value!==undefined){if(value===null){jQuery.removeAttr(elem,name);return}if(hooks&&"set"in hooks&&(ret=hooks.set(elem,value,name))!==undefined){return ret}elem.setAttribute(name,value+"");return value}if(hooks&&"get"in hooks&&(ret=hooks.get(elem,name))!==null){return ret}ret=jQuery.find.attr(elem,name);return ret==null?undefined:ret},attrHooks:{type:{set:function(elem,value){if(!support.radioValue&&value==="radio"&&nodeName(elem,"input")){var val=elem.value;elem.setAttribute("type",value);if(val){elem.value=val}return value}}}},removeAttr:function(elem,value){var name,i=0,attrNames=value&&value.match(rnothtmlwhite);if(attrNames&&elem.nodeType===1){while(name=attrNames[i++]){elem.removeAttribute(name)}}}});boolHook={set:function(elem,value,name){if(value===false){jQuery.removeAttr(elem,name)}else{elem.setAttribute(name,name)}return name}};jQuery.each(jQuery.expr.match.bool.source.match(/\w+/g),function(i,name){var getter=attrHandle[name]||jQuery.find.attr;attrHandle[name]=function(elem,name,isXML){var ret,handle,lowercaseName=name.toLowerCase();if(!isXML){handle=attrHandle[lowercaseName];attrHandle[lowercaseName]=ret;ret=getter(elem,name,isXML)!=null?lowercaseName:null;attrHandle[lowercaseName]=handle}return ret}});var rfocusable=/^(?:input|select|textarea|button)$/i,rclickable=/^(?:a|area)$/i;jQuery.fn.extend({prop:function(name,value){return access(this,jQuery.prop,name,value,arguments.length>1)},removeProp:function(name){return this.each(function(){delete this[jQuery.propFix[name]||name]})}});jQuery.extend({prop:function(elem,name,value){var ret,hooks,nType=elem.nodeType;if(nType===3||nType===8||nType===2){return}if(nType!==1||!jQuery.isXMLDoc(elem)){name=jQuery.propFix[name]||name;hooks=jQuery.propHooks[name]}if(value!==undefined){if(hooks&&"set"in hooks&&(ret=hooks.set(elem,value,name))!==undefined){return ret}return elem[name]=value}if(hooks&&"get"in hooks&&(ret=hooks.get(elem,name))!==null){return ret}return elem[name]},propHooks:{tabIndex:{get:function(elem){var tabindex=jQuery.find.attr(elem,"tabindex");if(tabindex){return parseInt(tabindex,10)}if(rfocusable.test(elem.nodeName)||rclickable.test(elem.nodeName)&&elem.href){return 0}return-1}}},propFix:{for:"htmlFor",class:"className"}});if(!support.optSelected){jQuery.propHooks.selected={get:function(elem){var parent=elem.parentNode;if(parent&&parent.parentNode){parent.parentNode.selectedIndex}return null},set:function(elem){var parent=elem.parentNode;if(parent){parent.selectedIndex;if(parent.parentNode){parent.parentNode.selectedIndex}}}}}jQuery.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){jQuery.propFix[this.toLowerCase()]=this});function stripAndCollapse(value){var tokens=value.match(rnothtmlwhite)||[];return tokens.join(" ")}function getClass(elem){return elem.getAttribute&&elem.getAttribute("class")||""}function classesToArray(value){if(Array.isArray(value)){return value}if(typeof value==="string"){return value.match(rnothtmlwhite)||[]}return[]}jQuery.fn.extend({addClass:function(value){var classes,elem,cur,curValue,clazz,j,finalValue,i=0;if(isFunction(value)){return this.each(function(j){jQuery(this).addClass(value.call(this,j,getClass(this)))})}classes=classesToArray(value);if(classes.length){while(elem=this[i++]){curValue=getClass(elem);cur=elem.nodeType===1&&" "+stripAndCollapse(curValue)+" ";if(cur){j=0;while(clazz=classes[j++]){if(cur.indexOf(" "+clazz+" ")<0){cur+=clazz+" "}}finalValue=stripAndCollapse(cur);if(curValue!==finalValue){elem.setAttribute("class",finalValue)}}}}return this},removeClass:function(value){var classes,elem,cur,curValue,clazz,j,finalValue,i=0;if(isFunction(value)){return this.each(function(j){jQuery(this).removeClass(value.call(this,j,getClass(this)))})}if(!arguments.length){return this.attr("class","")}classes=classesToArray(value);if(classes.length){while(elem=this[i++]){curValue=getClass(elem);cur=elem.nodeType===1&&" "+stripAndCollapse(curValue)+" ";if(cur){j=0;while(clazz=classes[j++]){while(cur.indexOf(" "+clazz+" ")>-1){cur=cur.replace(" "+clazz+" "," ")}}finalValue=stripAndCollapse(cur);if(curValue!==finalValue){elem.setAttribute("class",finalValue)}}}}return this},toggleClass:function(value,stateVal){var type=typeof value,isValidValue=type==="string"||Array.isArray(value);if(typeof stateVal==="boolean"&&isValidValue){return stateVal?this.addClass(value):this.removeClass(value)}if(isFunction(value)){return this.each(function(i){jQuery(this).toggleClass(value.call(this,i,getClass(this),stateVal),stateVal)})}return this.each(function(){var className,i,self,classNames;if(isValidValue){i=0;self=jQuery(this);classNames=classesToArray(value);while(className=classNames[i++]){if(self.hasClass(className)){self.removeClass(className)}else{self.addClass(className)}}}else if(value===undefined||type==="boolean"){className=getClass(this);if(className){dataPriv.set(this,"__className__",className)}if(this.setAttribute){this.setAttribute("class",className||value===false?"":dataPriv.get(this,"__className__")||"")}}})},hasClass:function(selector){var className,elem,i=0;className=" "+selector+" ";while(elem=this[i++]){if(elem.nodeType===1&&(" "+stripAndCollapse(getClass(elem))+" ").indexOf(className)>-1){return true}}return false}});var rreturn=/\r/g;jQuery.fn.extend({val:function(value){var hooks,ret,valueIsFunction,elem=this[0];if(!arguments.length){if(elem){hooks=jQuery.valHooks[elem.type]||jQuery.valHooks[elem.nodeName.toLowerCase()];if(hooks&&"get"in hooks&&(ret=hooks.get(elem,"value"))!==undefined){return ret}ret=elem.value;if(typeof ret==="string"){return ret.replace(rreturn,"")}return ret==null?"":ret}return}valueIsFunction=isFunction(value);return this.each(function(i){var val;if(this.nodeType!==1){return}if(valueIsFunction){val=value.call(this,i,jQuery(this).val())}else{val=value}if(val==null){val=""}else if(typeof val==="number"){val+=""}else if(Array.isArray(val)){val=jQuery.map(val,function(value){return value==null?"":value+""})}hooks=jQuery.valHooks[this.type]||jQuery.valHooks[this.nodeName.toLowerCase()];if(!hooks||!("set"in hooks)||hooks.set(this,val,"value")===undefined){this.value=val}})}});jQuery.extend({valHooks:{option:{get:function(elem){var val=jQuery.find.attr(elem,"value");return val!=null?val:stripAndCollapse(jQuery.text(elem))}},select:{get:function(elem){var value,option,i,options=elem.options,index=elem.selectedIndex,one=elem.type==="select-one",values=one?null:[],max=one?index+1:options.length;if(index<0){i=max}else{i=one?index:0}for(;i-1){optionSet=true}}if(!optionSet){elem.selectedIndex=-1}return values}}}});jQuery.each(["radio","checkbox"],function(){jQuery.valHooks[this]={set:function(elem,value){if(Array.isArray(value)){return elem.checked=jQuery.inArray(jQuery(elem).val(),value)>-1}}};if(!support.checkOn){jQuery.valHooks[this].get=function(elem){return elem.getAttribute("value")===null?"on":elem.value}}});support.focusin="onfocusin"in window;var rfocusMorph=/^(?:focusinfocus|focusoutblur)$/,stopPropagationCallback=function(e){e.stopPropagation()};jQuery.extend(jQuery.event,{trigger:function(event,data,elem,onlyHandlers){var i,cur,tmp,bubbleType,ontype,handle,special,lastElement,eventPath=[elem||document],type=hasOwn.call(event,"type")?event.type:event,namespaces=hasOwn.call(event,"namespace")?event.namespace.split("."):[];cur=lastElement=tmp=elem=elem||document;if(elem.nodeType===3||elem.nodeType===8){return}if(rfocusMorph.test(type+jQuery.event.triggered)){return}if(type.indexOf(".")>-1){namespaces=type.split(".");type=namespaces.shift();namespaces.sort()}ontype=type.indexOf(":")<0&&"on"+type;event=event[jQuery.expando]?event:new jQuery.Event(type,typeof event==="object"&&event);event.isTrigger=onlyHandlers?2:3;event.namespace=namespaces.join(".");event.rnamespace=event.namespace?new RegExp("(^|\\.)"+namespaces.join("\\.(?:.*\\.|)")+"(\\.|$)"):null;event.result=undefined;if(!event.target){event.target=elem}data=data==null?[event]:jQuery.makeArray(data,[event]);special=jQuery.event.special[type]||{};if(!onlyHandlers&&special.trigger&&special.trigger.apply(elem,data)===false){return}if(!onlyHandlers&&!special.noBubble&&!isWindow(elem)){bubbleType=special.delegateType||type;if(!rfocusMorph.test(bubbleType+type)){cur=cur.parentNode}for(;cur;cur=cur.parentNode){eventPath.push(cur);tmp=cur}if(tmp===(elem.ownerDocument||document)){eventPath.push(tmp.defaultView||tmp.parentWindow||window)}}i=0;while((cur=eventPath[i++])&&!event.isPropagationStopped()){lastElement=cur;event.type=i>1?bubbleType:special.bindType||type;handle=(dataPriv.get(cur,"events")||{})[event.type]&&dataPriv.get(cur,"handle");if(handle){handle.apply(cur,data)}handle=ontype&&cur[ontype];if(handle&&handle.apply&&acceptData(cur)){event.result=handle.apply(cur,data);if(event.result===false){event.preventDefault()}}}event.type=type;if(!onlyHandlers&&!event.isDefaultPrevented()){if((!special._default||special._default.apply(eventPath.pop(),data)===false)&&acceptData(elem)){if(ontype&&isFunction(elem[type])&&!isWindow(elem)){tmp=elem[ontype];if(tmp){elem[ontype]=null}jQuery.event.triggered=type;if(event.isPropagationStopped()){lastElement.addEventListener(type,stopPropagationCallback)}elem[type]();if(event.isPropagationStopped()){lastElement.removeEventListener(type,stopPropagationCallback)}jQuery.event.triggered=undefined;if(tmp){elem[ontype]=tmp}}}}return event.result},simulate:function(type,elem,event){var e=jQuery.extend(new jQuery.Event,event,{type:type,isSimulated:true});jQuery.event.trigger(e,null,elem)}});jQuery.fn.extend({trigger:function(type,data){return this.each(function(){jQuery.event.trigger(type,data,this)})},triggerHandler:function(type,data){var elem=this[0];if(elem){return jQuery.event.trigger(type,data,elem,true)}}});if(!support.focusin){jQuery.each({focus:"focusin",blur:"focusout"},function(orig,fix){var handler=function(event){jQuery.event.simulate(fix,event.target,jQuery.event.fix(event))};jQuery.event.special[fix]={setup:function(){var doc=this.ownerDocument||this,attaches=dataPriv.access(doc,fix);if(!attaches){doc.addEventListener(orig,handler,true)}dataPriv.access(doc,fix,(attaches||0)+1)},teardown:function(){var doc=this.ownerDocument||this,attaches=dataPriv.access(doc,fix)-1;if(!attaches){doc.removeEventListener(orig,handler,true);dataPriv.remove(doc,fix)}else{dataPriv.access(doc,fix,attaches)}}}})}var location=window.location;var nonce=Date.now();var rquery=/\?/;jQuery.parseXML=function(data){var xml;if(!data||typeof data!=="string"){return null}try{xml=(new window.DOMParser).parseFromString(data,"text/xml")}catch(e){xml=undefined}if(!xml||xml.getElementsByTagName("parsererror").length){jQuery.error("Invalid XML: "+data)}return xml};var rbracket=/\[\]$/,rCRLF=/\r?\n/g,rsubmitterTypes=/^(?:submit|button|image|reset|file)$/i,rsubmittable=/^(?:input|select|textarea|keygen)/i;function buildParams(prefix,obj,traditional,add){var name;if(Array.isArray(obj)){jQuery.each(obj,function(i,v){if(traditional||rbracket.test(prefix)){add(prefix,v)}else{buildParams(prefix+"["+(typeof v==="object"&&v!=null?i:"")+"]",v,traditional,add)}})}else if(!traditional&&toType(obj)==="object"){for(name in obj){buildParams(prefix+"["+name+"]",obj[name],traditional,add)}}else{add(prefix,obj)}}jQuery.param=function(a,traditional){var prefix,s=[],add=function(key,valueOrFunction){var value=isFunction(valueOrFunction)?valueOrFunction():valueOrFunction;s[s.length]=encodeURIComponent(key)+"="+encodeURIComponent(value==null?"":value)};if(Array.isArray(a)||a.jquery&&!jQuery.isPlainObject(a)){jQuery.each(a,function(){add(this.name,this.value)})}else{for(prefix in a){buildParams(prefix,a[prefix],traditional,add)}}return s.join("&")};jQuery.fn.extend({serialize:function(){return jQuery.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var elements=jQuery.prop(this,"elements");return elements?jQuery.makeArray(elements):this}).filter(function(){var type=this.type;return this.name&&!jQuery(this).is(":disabled")&&rsubmittable.test(this.nodeName)&&!rsubmitterTypes.test(type)&&(this.checked||!rcheckableType.test(type))}).map(function(i,elem){var val=jQuery(this).val();if(val==null){return null}if(Array.isArray(val)){return jQuery.map(val,function(val){return{name:elem.name,value:val.replace(rCRLF,"\r\n")}})}return{name:elem.name,value:val.replace(rCRLF,"\r\n")}}).get()}});var r20=/%20/g,rhash=/#.*$/,rantiCache=/([?&])_=[^&]*/,rheaders=/^(.*?):[ \t]*([^\r\n]*)$/gm,rlocalProtocol=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,rnoContent=/^(?:GET|HEAD)$/,rprotocol=/^\/\//,prefilters={},transports={},allTypes="*/".concat("*"),originAnchor=document.createElement("a");originAnchor.href=location.href;function addToPrefiltersOrTransports(structure){return function(dataTypeExpression,func){if(typeof dataTypeExpression!=="string"){func=dataTypeExpression;dataTypeExpression="*"}var dataType,i=0,dataTypes=dataTypeExpression.toLowerCase().match(rnothtmlwhite)||[];if(isFunction(func)){while(dataType=dataTypes[i++]){if(dataType[0]==="+"){dataType=dataType.slice(1)||"*";(structure[dataType]=structure[dataType]||[]).unshift(func)}else{(structure[dataType]=structure[dataType]||[]).push(func)}}}}}function inspectPrefiltersOrTransports(structure,options,originalOptions,jqXHR){var inspected={},seekingTransport=structure===transports;function inspect(dataType){var selected;inspected[dataType]=true;jQuery.each(structure[dataType]||[],function(_,prefilterOrFactory){var dataTypeOrTransport=prefilterOrFactory(options,originalOptions,jqXHR);if(typeof dataTypeOrTransport==="string"&&!seekingTransport&&!inspected[dataTypeOrTransport]){options.dataTypes.unshift(dataTypeOrTransport);inspect(dataTypeOrTransport);return false}else if(seekingTransport){return!(selected=dataTypeOrTransport)}});return selected}return inspect(options.dataTypes[0])||!inspected["*"]&&inspect("*")}function ajaxExtend(target,src){var key,deep,flatOptions=jQuery.ajaxSettings.flatOptions||{};for(key in src){if(src[key]!==undefined){(flatOptions[key]?target:deep||(deep={}))[key]=src[key]}}if(deep){jQuery.extend(true,target,deep)}return target}function ajaxHandleResponses(s,jqXHR,responses){var ct,type,finalDataType,firstDataType,contents=s.contents,dataTypes=s.dataTypes;while(dataTypes[0]==="*"){dataTypes.shift();if(ct===undefined){ct=s.mimeType||jqXHR.getResponseHeader("Content-Type")}}if(ct){for(type in contents){if(contents[type]&&contents[type].test(ct)){dataTypes.unshift(type);break}}}if(dataTypes[0]in responses){finalDataType=dataTypes[0]}else{for(type in responses){if(!dataTypes[0]||s.converters[type+" "+dataTypes[0]]){finalDataType=type;break}if(!firstDataType){firstDataType=type}}finalDataType=finalDataType||firstDataType}if(finalDataType){if(finalDataType!==dataTypes[0]){dataTypes.unshift(finalDataType)}return responses[finalDataType]}}function ajaxConvert(s,response,jqXHR,isSuccess){var conv2,current,conv,tmp,prev,converters={},dataTypes=s.dataTypes.slice();if(dataTypes[1]){for(conv in s.converters){converters[conv.toLowerCase()]=s.converters[conv]}}current=dataTypes.shift();while(current){if(s.responseFields[current]){jqXHR[s.responseFields[current]]=response}if(!prev&&isSuccess&&s.dataFilter){response=s.dataFilter(response,s.dataType)}prev=current;current=dataTypes.shift();if(current){if(current==="*"){current=prev}else if(prev!=="*"&&prev!==current){conv=converters[prev+" "+current]||converters["* "+current];if(!conv){for(conv2 in converters){tmp=conv2.split(" ");if(tmp[1]===current){conv=converters[prev+" "+tmp[0]]||converters["* "+tmp[0]];if(conv){if(conv===true){conv=converters[conv2]}else if(converters[conv2]!==true){current=tmp[0];dataTypes.unshift(tmp[1])}break}}}}if(conv!==true){if(conv&&s.throws){response=conv(response)}else{try{response=conv(response)}catch(e){return{state:"parsererror",error:conv?e:"No conversion from "+prev+" to "+current}}}}}}}return{state:"success",data:response}}jQuery.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:location.href,type:"GET",isLocal:rlocalProtocol.test(location.protocol),global:true,processData:true,async:true,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":allTypes,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":true,"text json":JSON.parse,"text xml":jQuery.parseXML},flatOptions:{url:true,context:true}},ajaxSetup:function(target,settings){return settings?ajaxExtend(ajaxExtend(target,jQuery.ajaxSettings),settings):ajaxExtend(jQuery.ajaxSettings,target)},ajaxPrefilter:addToPrefiltersOrTransports(prefilters),ajaxTransport:addToPrefiltersOrTransports(transports),ajax:function(url,options){if(typeof url==="object"){options=url;url=undefined}options=options||{};var transport,cacheURL,responseHeadersString,responseHeaders,timeoutTimer,urlAnchor,completed,fireGlobals,i,uncached,s=jQuery.ajaxSetup({},options),callbackContext=s.context||s,globalEventContext=s.context&&(callbackContext.nodeType||callbackContext.jquery)?jQuery(callbackContext):jQuery.event,deferred=jQuery.Deferred(),completeDeferred=jQuery.Callbacks("once memory"),statusCode=s.statusCode||{},requestHeaders={},requestHeadersNames={},strAbort="canceled",jqXHR={readyState:0,getResponseHeader:function(key){var match;if(completed){if(!responseHeaders){responseHeaders={};while(match=rheaders.exec(responseHeadersString)){responseHeaders[match[1].toLowerCase()]=match[2]}}match=responseHeaders[key.toLowerCase()]}return match==null?null:match},getAllResponseHeaders:function(){return completed?responseHeadersString:null},setRequestHeader:function(name,value){if(completed==null){name=requestHeadersNames[name.toLowerCase()]=requestHeadersNames[name.toLowerCase()]||name;requestHeaders[name]=value}return this},overrideMimeType:function(type){if(completed==null){s.mimeType=type}return this},statusCode:function(map){var code;if(map){if(completed){jqXHR.always(map[jqXHR.status])}else{for(code in map){statusCode[code]=[statusCode[code],map[code]]}}}return this},abort:function(statusText){var finalText=statusText||strAbort;if(transport){transport.abort(finalText)}done(0,finalText);return this}};deferred.promise(jqXHR);s.url=((url||s.url||location.href)+"").replace(rprotocol,location.protocol+"//");s.type=options.method||options.type||s.method||s.type;s.dataTypes=(s.dataType||"*").toLowerCase().match(rnothtmlwhite)||[""];if(s.crossDomain==null){urlAnchor=document.createElement("a");try{urlAnchor.href=s.url;urlAnchor.href=urlAnchor.href;s.crossDomain=originAnchor.protocol+"//"+originAnchor.host!==urlAnchor.protocol+"//"+urlAnchor.host}catch(e){s.crossDomain=true}}if(s.data&&s.processData&&typeof s.data!=="string"){s.data=jQuery.param(s.data,s.traditional)}inspectPrefiltersOrTransports(prefilters,s,options,jqXHR);if(completed){return jqXHR}fireGlobals=jQuery.event&&s.global;if(fireGlobals&&jQuery.active++===0){jQuery.event.trigger("ajaxStart")}s.type=s.type.toUpperCase();s.hasContent=!rnoContent.test(s.type);cacheURL=s.url.replace(rhash,"");if(!s.hasContent){uncached=s.url.slice(cacheURL.length);if(s.data&&(s.processData||typeof s.data==="string")){cacheURL+=(rquery.test(cacheURL)?"&":"?")+s.data;delete s.data}if(s.cache===false){cacheURL=cacheURL.replace(rantiCache,"$1");uncached=(rquery.test(cacheURL)?"&":"?")+"_="+nonce+++uncached}s.url=cacheURL+uncached}else if(s.data&&s.processData&&(s.contentType||"").indexOf("application/x-www-form-urlencoded")===0){s.data=s.data.replace(r20,"+")}if(s.ifModified){if(jQuery.lastModified[cacheURL]){jqXHR.setRequestHeader("If-Modified-Since",jQuery.lastModified[cacheURL])}if(jQuery.etag[cacheURL]){jqXHR.setRequestHeader("If-None-Match",jQuery.etag[cacheURL])}}if(s.data&&s.hasContent&&s.contentType!==false||options.contentType){jqXHR.setRequestHeader("Content-Type",s.contentType)}jqXHR.setRequestHeader("Accept",s.dataTypes[0]&&s.accepts[s.dataTypes[0]]?s.accepts[s.dataTypes[0]]+(s.dataTypes[0]!=="*"?", "+allTypes+"; q=0.01":""):s.accepts["*"]);for(i in s.headers){jqXHR.setRequestHeader(i,s.headers[i])}if(s.beforeSend&&(s.beforeSend.call(callbackContext,jqXHR,s)===false||completed)){return jqXHR.abort()}strAbort="abort";completeDeferred.add(s.complete);jqXHR.done(s.success);jqXHR.fail(s.error);transport=inspectPrefiltersOrTransports(transports,s,options,jqXHR);if(!transport){done(-1,"No Transport")}else{jqXHR.readyState=1;if(fireGlobals){globalEventContext.trigger("ajaxSend",[jqXHR,s])}if(completed){return jqXHR}if(s.async&&s.timeout>0){timeoutTimer=window.setTimeout(function(){jqXHR.abort("timeout")},s.timeout)}try{completed=false;transport.send(requestHeaders,done)}catch(e){if(completed){throw e}done(-1,e)}}function done(status,nativeStatusText,responses,headers){var isSuccess,success,error,response,modified,statusText=nativeStatusText;if(completed){return}completed=true;if(timeoutTimer){window.clearTimeout(timeoutTimer)}transport=undefined;responseHeadersString=headers||"";jqXHR.readyState=status>0?4:0;isSuccess=status>=200&&status<300||status===304;if(responses){response=ajaxHandleResponses(s,jqXHR,responses)}response=ajaxConvert(s,response,jqXHR,isSuccess);if(isSuccess){if(s.ifModified){modified=jqXHR.getResponseHeader("Last-Modified");if(modified){jQuery.lastModified[cacheURL]=modified}modified=jqXHR.getResponseHeader("etag");if(modified){jQuery.etag[cacheURL]=modified}}if(status===204||s.type==="HEAD"){statusText="nocontent"}else if(status===304){statusText="notmodified"}else{statusText=response.state;success=response.data;error=response.error;isSuccess=!error}}else{error=statusText;if(status||!statusText){statusText="error";if(status<0){status=0}}}jqXHR.status=status;jqXHR.statusText=(nativeStatusText||statusText)+"";if(isSuccess){deferred.resolveWith(callbackContext,[success,statusText,jqXHR])}else{deferred.rejectWith(callbackContext,[jqXHR,statusText,error])}jqXHR.statusCode(statusCode);statusCode=undefined;if(fireGlobals){globalEventContext.trigger(isSuccess?"ajaxSuccess":"ajaxError",[jqXHR,s,isSuccess?success:error])}completeDeferred.fireWith(callbackContext,[jqXHR,statusText]);if(fireGlobals){globalEventContext.trigger("ajaxComplete",[jqXHR,s]);if(!--jQuery.active){jQuery.event.trigger("ajaxStop")}}}return jqXHR},getJSON:function(url,data,callback){return jQuery.get(url,data,callback,"json")},getScript:function(url,callback){return jQuery.get(url,undefined,callback,"script")}});jQuery.each(["get","post"],function(i,method){jQuery[method]=function(url,data,callback,type){if(isFunction(data)){type=type||callback;callback=data;data=undefined}return jQuery.ajax(jQuery.extend({url:url,type:method,dataType:type,data:data,success:callback},jQuery.isPlainObject(url)&&url))}});jQuery._evalUrl=function(url){return jQuery.ajax({url:url,type:"GET",dataType:"script",cache:true,async:false,global:false,throws:true})};jQuery.fn.extend({wrapAll:function(html){var wrap;if(this[0]){if(isFunction(html)){html=html.call(this[0])}wrap=jQuery(html,this[0].ownerDocument).eq(0).clone(true);if(this[0].parentNode){wrap.insertBefore(this[0])}wrap.map(function(){var elem=this;while(elem.firstElementChild){elem=elem.firstElementChild}return elem}).append(this)}return this},wrapInner:function(html){if(isFunction(html)){return this.each(function(i){jQuery(this).wrapInner(html.call(this,i))})}return this.each(function(){var self=jQuery(this),contents=self.contents();if(contents.length){contents.wrapAll(html)}else{self.append(html)}})},wrap:function(html){var htmlIsFunction=isFunction(html);return this.each(function(i){jQuery(this).wrapAll(htmlIsFunction?html.call(this,i):html)})},unwrap:function(selector){this.parent(selector).not("body").each(function(){jQuery(this).replaceWith(this.childNodes)});return this}});jQuery.expr.pseudos.hidden=function(elem){return!jQuery.expr.pseudos.visible(elem)};jQuery.expr.pseudos.visible=function(elem){return!!(elem.offsetWidth||elem.offsetHeight||elem.getClientRects().length)};jQuery.ajaxSettings.xhr=function(){try{return new window.XMLHttpRequest}catch(e){}};var xhrSuccessStatus={0:200,1223:204},xhrSupported=jQuery.ajaxSettings.xhr();support.cors=!!xhrSupported&&"withCredentials"in xhrSupported;support.ajax=xhrSupported=!!xhrSupported;jQuery.ajaxTransport(function(options){var callback,errorCallback;if(support.cors||xhrSupported&&!options.crossDomain){return{send:function(headers,complete){var i,xhr=options.xhr();xhr.open(options.type,options.url,options.async,options.username,options.password);if(options.xhrFields){for(i in options.xhrFields){xhr[i]=options.xhrFields[i]}}if(options.mimeType&&xhr.overrideMimeType){xhr.overrideMimeType(options.mimeType)}if(!options.crossDomain&&!headers["X-Requested-With"]){headers["X-Requested-With"]="XMLHttpRequest"}for(i in headers){xhr.setRequestHeader(i,headers[i])}callback=function(type){return function(){if(callback){callback=errorCallback=xhr.onload=xhr.onerror=xhr.onabort=xhr.ontimeout=xhr.onreadystatechange=null;if(type==="abort"){xhr.abort()}else if(type==="error"){if(typeof xhr.status!=="number"){complete(0,"error")}else{complete(xhr.status,xhr.statusText)}}else{complete(xhrSuccessStatus[xhr.status]||xhr.status,xhr.statusText,(xhr.responseType||"text")!=="text"||typeof xhr.responseText!=="string"?{binary:xhr.response}:{text:xhr.responseText},xhr.getAllResponseHeaders())}}}};xhr.onload=callback();errorCallback=xhr.onerror=xhr.ontimeout=callback("error");if(xhr.onabort!==undefined){xhr.onabort=errorCallback}else{xhr.onreadystatechange=function(){if(xhr.readyState===4){window.setTimeout(function(){if(callback){errorCallback()}})}}}callback=callback("abort");try{xhr.send(options.hasContent&&options.data||null)}catch(e){if(callback){throw e}}},abort:function(){if(callback){callback()}}}}});jQuery.ajaxPrefilter(function(s){if(s.crossDomain){s.contents.script=false}});jQuery.ajaxSetup({accepts:{script:"text/javascript, application/javascript, "+"application/ecmascript, application/x-ecmascript"}, +contents:{script:/\b(?:java|ecma)script\b/},converters:{"text script":function(text){jQuery.globalEval(text);return text}}});jQuery.ajaxPrefilter("script",function(s){if(s.cache===undefined){s.cache=false}if(s.crossDomain){s.type="GET"}});jQuery.ajaxTransport("script",function(s){if(s.crossDomain){var script,callback;return{send:function(_,complete){script=jQuery(" + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Automated tests. Run with pytest.""" 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_conftest_py.html b/reports/20210322_66173dc24d/htmlcov/tests_conftest_py.html new file mode 100644 index 000000000..bdfe9d437 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_conftest_py.html @@ -0,0 +1,169 @@ + + + + + + Coverage for tests/conftest.py: 95.556% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4""" 

+

5Pytest auto configuration. 

+

6 

+

7This module is run automatically by pytest, to define and enable fixtures. 

+

8""" 

+

9 

+

10import os 

+

11import sys 

+

12import warnings 

+

13 

+

14import pytest 

+

15 

+

16from coverage import env 

+

17from coverage.misc import StopEverything 

+

18 

+

19 

+

20# Pytest will rewrite assertions in test modules, but not elsewhere. 

+

21# This tells pytest to also rewrite assertions in coveragetest.py. 

+

22pytest.register_assert_rewrite("tests.coveragetest") 

+

23pytest.register_assert_rewrite("tests.helpers") 

+

24 

+

25# Pytest can take additional options: 

+

26# $set_env.py: PYTEST_ADDOPTS - Extra arguments to pytest. 

+

27 

+

28@pytest.fixture(autouse=True) 

+

29def set_warnings(): 

+

30 """Enable DeprecationWarnings during all tests.""" 

+

31 warnings.simplefilter("default") 

+

32 warnings.simplefilter("once", DeprecationWarning) 

+

33 

+

34 # Warnings to suppress: 

+

35 # How come these warnings are successfully suppressed here, but not in setup.cfg?? 

+

36 

+

37 # setuptools/py33compat.py:54: DeprecationWarning: The value of convert_charrefs will become 

+

38 # True in 3.5. You are encouraged to set the value explicitly. 

+

39 # unescape = getattr(html, 'unescape', html_parser.HTMLParser().unescape) 

+

40 warnings.filterwarnings( 

+

41 "ignore", 

+

42 category=DeprecationWarning, 

+

43 message=r"The value of convert_charrefs will become True in 3.5.", 

+

44 ) 

+

45 

+

46 warnings.filterwarnings( 

+

47 "ignore", 

+

48 category=DeprecationWarning, 

+

49 message=r".* instead of inspect.getfullargspec", 

+

50 ) 

+

51 

+

52 # <frozen importlib._bootstrap>:681: 

+

53 # ImportWarning: VendorImporter.exec_module() not found; falling back to load_module() 

+

54 warnings.filterwarnings( 

+

55 "ignore", 

+

56 category=ImportWarning, 

+

57 message=r".*exec_module\(\) not found; falling back to load_module\(\)", 

+

58 ) 

+

59 

+

60 if env.PYPY3: 

+

61 # pypy3 warns about unclosed files a lot. 

+

62 warnings.filterwarnings("ignore", r".*unclosed file", category=ResourceWarning) 

+

63 

+

64 

+

65@pytest.fixture(autouse=True) 

+

66def reset_sys_path(): 

+

67 """Clean up sys.path changes around every test.""" 

+

68 sys_path = list(sys.path) 

+

69 yield 

+

70 sys.path[:] = sys_path 

+

71 

+

72 

+

73@pytest.fixture(autouse=True) 

+

74def fix_xdist_sys_path(): 

+

75 """Prevent xdist from polluting the Python path. 

+

76 

+

77 We run tests that care a lot about the contents of sys.path. Pytest-xdist 

+

78 changes sys.path, so running with xdist, vs without xdist, sets sys.path 

+

79 differently. With xdist, sys.path[1] is an empty string, without xdist, 

+

80 it's the virtualenv bin directory. We don't want the empty string, so 

+

81 clobber that entry. 

+

82 

+

83 See: https://github.com/pytest-dev/pytest-xdist/issues/376 

+

84 

+

85 """ 

+

86 if os.environ.get('PYTEST_XDIST_WORKER', ''): # pragma: part covered 

+

87 # We are running in an xdist worker. 

+

88 if sys.path[1] == '': 

+

89 # xdist has set sys.path[1] to ''. Clobber it. 

+

90 del sys.path[1] 

+

91 # Also, don't let it sneak stuff in via PYTHONPATH. 

+

92 try: 

+

93 del os.environ['PYTHONPATH'] 

+

94 except KeyError: 

+

95 pass 

+

96 

+

97 

+

98@pytest.hookimpl(hookwrapper=True) 

+

99def pytest_runtest_call(item): 

+

100 """Convert StopEverything into skipped tests.""" 

+

101 outcome = yield 

+

102 if outcome.excinfo and issubclass(outcome.excinfo[0], StopEverything): 102 ↛ 103line 102 didn't jump to line 103, because the condition on line 102 was never true

+

103 pytest.skip("Skipping {} for StopEverything: {}".format(item.nodeid, outcome.excinfo[1])) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_coveragetest_py.html b/reports/20210322_66173dc24d/htmlcov/tests_coveragetest_py.html new file mode 100644 index 000000000..2743dd5e2 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_coveragetest_py.html @@ -0,0 +1,527 @@ + + + + + + Coverage for tests/coveragetest.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Base test case class for coverage.py testing.""" 

+

5 

+

6import contextlib 

+

7import datetime 

+

8import difflib 

+

9import glob 

+

10import os 

+

11import os.path 

+

12import random 

+

13import re 

+

14import shlex 

+

15import sys 

+

16 

+

17import pytest 

+

18 

+

19import coverage 

+

20from coverage import env 

+

21from coverage.backward import StringIO, import_local_file, string_class, shlex_quote 

+

22from coverage.cmdline import CoverageScript 

+

23 

+

24from tests.helpers import arcs_to_arcz_repr, arcz_to_arcs, assert_count_equal 

+

25from tests.helpers import nice_file, run_command 

+

26from tests.mixins import PytestBase, StdStreamCapturingMixin, SysPathModulesMixin, TempDirMixin 

+

27 

+

28 

+

29# Status returns for the command line. 

+

30OK, ERR = 0, 1 

+

31 

+

32# The coverage/tests directory, for all sorts of finding test helping things. 

+

33TESTS_DIR = os.path.dirname(__file__) 

+

34 

+

35 

+

36class CoverageTest( 

+

37 StdStreamCapturingMixin, 

+

38 SysPathModulesMixin, 

+

39 TempDirMixin, 

+

40 PytestBase, 

+

41): 

+

42 """A base class for coverage.py test cases.""" 

+

43 

+

44 # Standard unittest setting: show me diffs even if they are very long. 

+

45 maxDiff = None 

+

46 

+

47 # Tell newer unittest implementations to print long helpful messages. 

+

48 longMessage = True 

+

49 

+

50 # Let stderr go to stderr, pytest will capture it for us. 

+

51 show_stderr = True 

+

52 

+

53 # Temp dirs go to $TMPDIR/coverage_test/* 

+

54 temp_dir_prefix = "coverage_test/" 

+

55 if os.getenv('COVERAGE_ENV_ID'): # pragma: debugging 

+

56 temp_dir_prefix += "{}/".format(os.getenv('COVERAGE_ENV_ID')) 

+

57 

+

58 # Keep the temp directories if the env says to. 

+

59 # $set_env.py: COVERAGE_KEEP_TMP - Keep the temp directories made by tests. 

+

60 keep_temp_dir = bool(int(os.getenv("COVERAGE_KEEP_TMP", "0"))) 

+

61 

+

62 def setup_test(self): 

+

63 super(CoverageTest, self).setup_test() 

+

64 

+

65 # Attributes for getting info about what happened. 

+

66 self.last_command_status = None 

+

67 self.last_command_output = None 

+

68 self.last_module_name = None 

+

69 

+

70 def start_import_stop(self, cov, modname, modfile=None): 

+

71 """Start coverage, import a file, then stop coverage. 

+

72 

+

73 `cov` is started and stopped, with an `import_local_file` of 

+

74 `modname` in the middle. `modfile` is the file to import as `modname` 

+

75 if it isn't in the current directory. 

+

76 

+

77 The imported module is returned. 

+

78 

+

79 """ 

+

80 cov.start() 

+

81 try: # pragma: nested 

+

82 # Import the Python file, executing it. 

+

83 mod = import_local_file(modname, modfile) 

+

84 finally: # pragma: nested 

+

85 # Stop coverage.py. 

+

86 cov.stop() 

+

87 return mod 

+

88 

+

89 def get_module_name(self): 

+

90 """Return a random module name to use for this test run.""" 

+

91 self.last_module_name = 'coverage_test_' + str(random.random())[2:] 

+

92 return self.last_module_name 

+

93 

+

94 def _check_arcs(self, a1, a2, arc_type): 

+

95 """Check that the arc lists `a1` and `a2` are equal. 

+

96 

+

97 If they are equal, return empty string. If they are unequal, return 

+

98 a string explaining what is different. 

+

99 """ 

+

100 # Make them into multi-line strings so we can see what's going wrong. 

+

101 s1 = arcs_to_arcz_repr(a1) 

+

102 s2 = arcs_to_arcz_repr(a2) 

+

103 if s1 != s2: 

+

104 lines1 = s1.splitlines(True) 

+

105 lines2 = s2.splitlines(True) 

+

106 diff = "".join(difflib.ndiff(lines1, lines2)) 

+

107 return "\n" + arc_type + " arcs differ: minus is expected, plus is actual\n" + diff 

+

108 else: 

+

109 return "" 

+

110 

+

111 def check_coverage( 

+

112 self, text, lines=None, missing="", report="", 

+

113 excludes=None, partials="", 

+

114 arcz=None, arcz_missing=None, arcz_unpredicted=None, 

+

115 arcs=None, arcs_missing=None, arcs_unpredicted=None, 

+

116 ): 

+

117 """Check the coverage measurement of `text`. 

+

118 

+

119 The source `text` is run and measured. `lines` are the line numbers 

+

120 that are executable, or a list of possible line numbers, any of which 

+

121 could match. `missing` are the lines not executed, `excludes` are 

+

122 regexes to match against for excluding lines, and `report` is the text 

+

123 of the measurement report. 

+

124 

+

125 For arc measurement, `arcz` is a string that can be decoded into arcs 

+

126 in the code (see `arcz_to_arcs` for the encoding scheme). 

+

127 `arcz_missing` are the arcs that are not executed, and 

+

128 `arcz_unpredicted` are the arcs executed in the code, but not deducible 

+

129 from the code. These last two default to "", meaning we explicitly 

+

130 check that there are no missing or unpredicted arcs. 

+

131 

+

132 Returns the Coverage object, in case you want to poke at it some more. 

+

133 

+

134 """ 

+

135 # We write the code into a file so that we can import it. 

+

136 # Coverage.py wants to deal with things as modules with file names. 

+

137 modname = self.get_module_name() 

+

138 

+

139 self.make_file(modname + ".py", text) 

+

140 

+

141 if arcs is None and arcz is not None: 

+

142 arcs = arcz_to_arcs(arcz) 

+

143 if arcs_missing is None and arcz_missing is not None: 

+

144 arcs_missing = arcz_to_arcs(arcz_missing) 

+

145 if arcs_unpredicted is None and arcz_unpredicted is not None: 

+

146 arcs_unpredicted = arcz_to_arcs(arcz_unpredicted) 

+

147 

+

148 # Start up coverage.py. 

+

149 cov = coverage.Coverage(branch=True) 

+

150 cov.erase() 

+

151 for exc in excludes or []: 

+

152 cov.exclude(exc) 

+

153 for par in partials or []: 

+

154 cov.exclude(par, which='partial') 

+

155 

+

156 mod = self.start_import_stop(cov, modname) 

+

157 

+

158 # Clean up our side effects 

+

159 del sys.modules[modname] 

+

160 

+

161 # Get the analysis results, and check that they are right. 

+

162 analysis = cov._analyze(mod) 

+

163 statements = sorted(analysis.statements) 

+

164 if lines is not None: 

+

165 if isinstance(lines[0], int): 

+

166 # lines is just a list of numbers, it must match the statements 

+

167 # found in the code. 

+

168 assert statements == lines, "{!r} != {!r}".format(statements, lines) 

+

169 else: 

+

170 # lines is a list of possible line number lists, one of them 

+

171 # must match. 

+

172 for line_list in lines: 

+

173 if statements == line_list: 

+

174 break 

+

175 else: 

+

176 assert False, "None of the lines choices matched %r" % (statements,) 

+

177 

+

178 missing_formatted = analysis.missing_formatted() 

+

179 if isinstance(missing, string_class): 

+

180 msg = "{!r} != {!r}".format(missing_formatted, missing) 

+

181 assert missing_formatted == missing, msg 

+

182 else: 

+

183 for missing_list in missing: 

+

184 if missing_formatted == missing_list: 

+

185 break 

+

186 else: 

+

187 assert False, "None of the missing choices matched %r" % (missing_formatted,) 

+

188 

+

189 if arcs is not None: 

+

190 # print("Possible arcs:") 

+

191 # print(" expected:", arcs) 

+

192 # print(" actual:", analysis.arc_possibilities()) 

+

193 # print("Executed:") 

+

194 # print(" actual:", sorted(set(analysis.arcs_executed()))) 

+

195 # TODO: this would be nicer with pytest-check, once we can run that. 

+

196 msg = ( 

+

197 self._check_arcs(arcs, analysis.arc_possibilities(), "Possible") + 

+

198 self._check_arcs(arcs_missing, analysis.arcs_missing(), "Missing") + 

+

199 self._check_arcs(arcs_unpredicted, analysis.arcs_unpredicted(), "Unpredicted") 

+

200 ) 

+

201 if msg: 

+

202 assert False, msg 

+

203 

+

204 if report: 

+

205 frep = StringIO() 

+

206 cov.report(mod, file=frep, show_missing=True) 

+

207 rep = " ".join(frep.getvalue().split("\n")[2].split()[1:]) 

+

208 assert report == rep, "{!r} != {!r}".format(report, rep) 

+

209 

+

210 return cov 

+

211 

+

212 @contextlib.contextmanager 

+

213 def assert_warnings(self, cov, warnings, not_warnings=()): 

+

214 """A context manager to check that particular warnings happened in `cov`. 

+

215 

+

216 `cov` is a Coverage instance. `warnings` is a list of regexes. Every 

+

217 regex must match a warning that was issued by `cov`. It is OK for 

+

218 extra warnings to be issued by `cov` that are not matched by any regex. 

+

219 Warnings that are disabled are still considered issued by this function. 

+

220 

+

221 `not_warnings` is a list of regexes that must not appear in the 

+

222 warnings. This is only checked if there are some positive warnings to 

+

223 test for in `warnings`. 

+

224 

+

225 If `warnings` is empty, then `cov` is not allowed to issue any 

+

226 warnings. 

+

227 

+

228 """ 

+

229 saved_warnings = [] 

+

230 def capture_warning(msg, slug=None, once=False): # pylint: disable=unused-argument 

+

231 """A fake implementation of Coverage._warn, to capture warnings.""" 

+

232 # NOTE: we don't implement `once`. 

+

233 if slug: 

+

234 msg = "%s (%s)" % (msg, slug) 

+

235 saved_warnings.append(msg) 

+

236 

+

237 original_warn = cov._warn 

+

238 cov._warn = capture_warning 

+

239 

+

240 try: 

+

241 yield 

+

242 except: # pylint: disable=try-except-raise 

+

243 raise 

+

244 else: 

+

245 if warnings: 

+

246 for warning_regex in warnings: 

+

247 for saved in saved_warnings: 

+

248 if re.search(warning_regex, saved): 

+

249 break 

+

250 else: 

+

251 msg = "Didn't find warning %r in %r" % (warning_regex, saved_warnings) 

+

252 assert False, msg 

+

253 for warning_regex in not_warnings: 

+

254 for saved in saved_warnings: 

+

255 if re.search(warning_regex, saved): 

+

256 msg = "Found warning %r in %r" % (warning_regex, saved_warnings) 

+

257 assert False, msg 

+

258 else: 

+

259 # No warnings expected. Raise if any warnings happened. 

+

260 if saved_warnings: 

+

261 assert False, "Unexpected warnings: %r" % (saved_warnings,) 

+

262 finally: 

+

263 cov._warn = original_warn 

+

264 

+

265 def assert_same_files(self, flist1, flist2): 

+

266 """Assert that `flist1` and `flist2` are the same set of file names.""" 

+

267 flist1_nice = [nice_file(f) for f in flist1] 

+

268 flist2_nice = [nice_file(f) for f in flist2] 

+

269 assert_count_equal(flist1_nice, flist2_nice) 

+

270 

+

271 def assert_exists(self, fname): 

+

272 """Assert that `fname` is a file that exists.""" 

+

273 msg = "File %r should exist" % fname 

+

274 assert os.path.exists(fname), msg 

+

275 

+

276 def assert_doesnt_exist(self, fname): 

+

277 """Assert that `fname` is a file that doesn't exist.""" 

+

278 msg = "File %r shouldn't exist" % fname 

+

279 assert not os.path.exists(fname), msg 

+

280 

+

281 def assert_file_count(self, pattern, count): 

+

282 """Assert that there are `count` files matching `pattern`.""" 

+

283 files = sorted(glob.glob(pattern)) 

+

284 msg = "There should be {} files matching {!r}, but there are these: {}" 

+

285 msg = msg.format(count, pattern, files) 

+

286 assert len(files) == count, msg 

+

287 

+

288 def assert_recent_datetime(self, dt, seconds=10, msg=None): 

+

289 """Assert that `dt` marks a time at most `seconds` seconds ago.""" 

+

290 age = datetime.datetime.now() - dt 

+

291 assert age.total_seconds() >= 0, msg 

+

292 assert age.total_seconds() <= seconds, msg 

+

293 

+

294 def command_line(self, args, ret=OK): 

+

295 """Run `args` through the command line. 

+

296 

+

297 Use this when you want to run the full coverage machinery, but in the 

+

298 current process. Exceptions may be thrown from deep in the code. 

+

299 Asserts that `ret` is returned by `CoverageScript.command_line`. 

+

300 

+

301 Compare with `run_command`. 

+

302 

+

303 Returns None. 

+

304 

+

305 """ 

+

306 ret_actual = command_line(args) 

+

307 assert ret_actual == ret, "{!r} != {!r}".format(ret_actual, ret) 

+

308 

+

309 # Some distros rename the coverage command, and need a way to indicate 

+

310 # their new command name to the tests. This is here for them to override, 

+

311 # for example: 

+

312 # https://salsa.debian.org/debian/pkg-python-coverage/-/blob/master/debian/patches/02.rename-public-programs.patch 

+

313 coverage_command = "coverage" 

+

314 

+

315 def run_command(self, cmd): 

+

316 """Run the command-line `cmd` in a sub-process. 

+

317 

+

318 `cmd` is the command line to invoke in a sub-process. Returns the 

+

319 combined content of `stdout` and `stderr` output streams from the 

+

320 sub-process. 

+

321 

+

322 See `run_command_status` for complete semantics. 

+

323 

+

324 Use this when you need to test the process behavior of coverage. 

+

325 

+

326 Compare with `command_line`. 

+

327 

+

328 """ 

+

329 _, output = self.run_command_status(cmd) 

+

330 return output 

+

331 

+

332 def run_command_status(self, cmd): 

+

333 """Run the command-line `cmd` in a sub-process, and print its output. 

+

334 

+

335 Use this when you need to test the process behavior of coverage. 

+

336 

+

337 Compare with `command_line`. 

+

338 

+

339 Handles the following command names specially: 

+

340 

+

341 * "python" is replaced with the command name of the current 

+

342 Python interpreter. 

+

343 

+

344 * "coverage" is replaced with the command name for the main 

+

345 coverage.py program. 

+

346 

+

347 Returns a pair: the process' exit status and its stdout/stderr text, 

+

348 which are also stored as `self.last_command_status` and 

+

349 `self.last_command_output`. 

+

350 

+

351 """ 

+

352 # Make sure "python" and "coverage" mean specifically what we want 

+

353 # them to mean. 

+

354 split_commandline = cmd.split() 

+

355 command_name = split_commandline[0] 

+

356 command_args = split_commandline[1:] 

+

357 

+

358 if command_name == "python": 

+

359 # Running a Python interpreter in a sub-processes can be tricky. 

+

360 # Use the real name of our own executable. So "python foo.py" might 

+

361 # get executed as "python3.3 foo.py". This is important because 

+

362 # Python 3.x doesn't install as "python", so you might get a Python 

+

363 # 2 executable instead if you don't use the executable's basename. 

+

364 command_words = [os.path.basename(sys.executable)] 

+

365 

+

366 elif command_name == "coverage": 

+

367 if env.JYTHON: # pragma: only jython 

+

368 # Jython can't do reporting, so let's skip the test now. 

+

369 if command_args and command_args[0] in ('report', 'html', 'xml', 'annotate'): 

+

370 pytest.skip("Can't run reporting commands in Jython") 

+

371 # Jython can't run "coverage" as a command because the shebang 

+

372 # refers to another shebang'd Python script. So run them as 

+

373 # modules. 

+

374 command_words = "jython -m coverage".split() 

+

375 else: 

+

376 # The invocation requests the coverage.py program. Substitute the 

+

377 # actual coverage.py main command name. 

+

378 command_words = [self.coverage_command] 

+

379 

+

380 else: 

+

381 command_words = [command_name] 

+

382 

+

383 cmd = " ".join([shlex_quote(w) for w in command_words] + command_args) 

+

384 

+

385 # Add our test modules directory to PYTHONPATH. I'm sure there's too 

+

386 # much path munging here, but... 

+

387 pythonpath_name = "PYTHONPATH" 

+

388 if env.JYTHON: 

+

389 pythonpath_name = "JYTHONPATH" # pragma: only jython 

+

390 

+

391 testmods = nice_file(self.working_root(), "tests/modules") 

+

392 zipfile = nice_file(self.working_root(), "tests/zipmods.zip") 

+

393 pypath = os.getenv(pythonpath_name, '') 

+

394 if pypath: 

+

395 pypath += os.pathsep 

+

396 pypath += testmods + os.pathsep + zipfile 

+

397 self.set_environ(pythonpath_name, pypath) 

+

398 

+

399 self.last_command_status, self.last_command_output = run_command(cmd) 

+

400 print(self.last_command_output) 

+

401 return self.last_command_status, self.last_command_output 

+

402 

+

403 def working_root(self): 

+

404 """Where is the root of the coverage.py working tree?""" 

+

405 return os.path.dirname(nice_file(coverage.__file__, "..")) 

+

406 

+

407 def report_from_command(self, cmd): 

+

408 """Return the report from the `cmd`, with some convenience added.""" 

+

409 report = self.run_command(cmd).replace('\\', '/') 

+

410 assert "error" not in report.lower() 

+

411 return report 

+

412 

+

413 def report_lines(self, report): 

+

414 """Return the lines of the report, as a list.""" 

+

415 lines = report.split('\n') 

+

416 assert lines[-1] == "" 

+

417 return lines[:-1] 

+

418 

+

419 def line_count(self, report): 

+

420 """How many lines are in `report`?""" 

+

421 return len(self.report_lines(report)) 

+

422 

+

423 def squeezed_lines(self, report): 

+

424 """Return a list of the lines in report, with the spaces squeezed.""" 

+

425 lines = self.report_lines(report) 

+

426 return [re.sub(r"\s+", " ", l.strip()) for l in lines] 

+

427 

+

428 def last_line_squeezed(self, report): 

+

429 """Return the last line of `report` with the spaces squeezed down.""" 

+

430 return self.squeezed_lines(report)[-1] 

+

431 

+

432 def get_measured_filenames(self, coverage_data): 

+

433 """Get paths to measured files. 

+

434 

+

435 Returns a dict of {filename: absolute path to file} 

+

436 for given CoverageData. 

+

437 """ 

+

438 return {os.path.basename(filename): filename 

+

439 for filename in coverage_data.measured_files()} 

+

440 

+

441 

+

442class UsingModulesMixin(object): 

+

443 """A mixin for importing modules from tests/modules and tests/moremodules.""" 

+

444 

+

445 def setup_test(self): 

+

446 super(UsingModulesMixin, self).setup_test() 

+

447 

+

448 # Parent class saves and restores sys.path, we can just modify it. 

+

449 sys.path.append(nice_file(TESTS_DIR, "modules")) 

+

450 sys.path.append(nice_file(TESTS_DIR, "moremodules")) 

+

451 

+

452 

+

453def command_line(args): 

+

454 """Run `args` through the CoverageScript command line. 

+

455 

+

456 Returns the return code from CoverageScript.command_line. 

+

457 

+

458 """ 

+

459 script = CoverageScript() 

+

460 ret = script.command_line(shlex.split(args)) 

+

461 return ret 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_covmain_zip___main___py.html b/reports/20210322_66173dc24d/htmlcov/tests_covmain_zip___main___py.html new file mode 100644 index 000000000..0e04858db --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_covmain_zip___main___py.html @@ -0,0 +1,74 @@ + + + + + + Coverage for tests/covmain.zip/__main__.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Coverage.py's main entry point.""" 

+

5 

+

6import sys 

+

7from coverage.cmdline import main 

+

8sys.exit(main()) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_covmodzip1_py.html b/reports/20210322_66173dc24d/htmlcov/tests_covmodzip1_py.html new file mode 100644 index 000000000..d89f65010 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_covmodzip1_py.html @@ -0,0 +1,76 @@ + + + + + + Coverage for tests/covmodzip1.py: 0.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4# Module-level docstrings are counted differently in different versions of Python, 

+

5# so don't add one here. 

+

6# pylint: disable=missing-module-docstring 

+

7 

+

8# covmodzip.py: for putting into a zip file. 

+

9j = 1 

+

10j += 1 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_goldtest_py.html b/reports/20210322_66173dc24d/htmlcov/tests_goldtest_py.html new file mode 100644 index 000000000..a905eda86 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_goldtest_py.html @@ -0,0 +1,249 @@ + + + + + + Coverage for tests/goldtest.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""A test base class for tests based on gold file comparison.""" 

+

5 

+

6import difflib 

+

7import filecmp 

+

8import fnmatch 

+

9import os 

+

10import os.path 

+

11import re 

+

12import sys 

+

13import xml.etree.ElementTree 

+

14 

+

15from coverage import env 

+

16 

+

17from tests.coveragetest import TESTS_DIR 

+

18 

+

19 

+

20def gold_path(path): 

+

21 """Get a path to a gold file for comparison.""" 

+

22 return os.path.join(TESTS_DIR, "gold", path) 

+

23 

+

24 

+

25# "rU" was deprecated in 3.4 

+

26READ_MODE = "rU" if env.PYVERSION < (3, 4) else "r" 

+

27 

+

28 

+

29def versioned_directory(d): 

+

30 """Find a subdirectory of d specific to the Python version. 

+

31 For example, on Python 3.6.4 rc 1, it returns the first of these 

+

32 directories that exists:: 

+

33 d/3.6.4.candidate.1 

+

34 d/3.6.4.candidate 

+

35 d/3.6.4 

+

36 d/3.6 

+

37 d/3 

+

38 d 

+

39 Returns: a string, the path to an existing directory. 

+

40 """ 

+

41 ver_parts = list(map(str, sys.version_info)) 

+

42 for nparts in range(len(ver_parts), -1, -1): 

+

43 version = ".".join(ver_parts[:nparts]) 

+

44 subdir = os.path.join(d, version) 

+

45 if os.path.exists(subdir): 

+

46 return subdir 

+

47 raise Exception("Directory missing: {}".format(d)) # pragma: only failure 

+

48 

+

49 

+

50def compare( 

+

51 expected_dir, actual_dir, file_pattern=None, 

+

52 actual_extra=False, scrubs=None, 

+

53 ): 

+

54 """Compare files matching `file_pattern` in `expected_dir` and `actual_dir`. 

+

55 

+

56 A version-specific subdirectory of `expected_dir` will be used if 

+

57 it exists. 

+

58 

+

59 `actual_extra` true means `actual_dir` can have extra files in it 

+

60 without triggering an assertion. 

+

61 

+

62 `scrubs` is a list of pairs: regexes to find and replace to scrub the 

+

63 files of unimportant differences. 

+

64 

+

65 An assertion will be raised if the directories fail one of their 

+

66 matches. 

+

67 

+

68 """ 

+

69 expected_dir = versioned_directory(expected_dir) 

+

70 

+

71 dc = filecmp.dircmp(expected_dir, actual_dir) 

+

72 diff_files = fnmatch_list(dc.diff_files, file_pattern) 

+

73 expected_only = fnmatch_list(dc.left_only, file_pattern) 

+

74 actual_only = fnmatch_list(dc.right_only, file_pattern) 

+

75 

+

76 # filecmp only compares in binary mode, but we want text mode. So 

+

77 # look through the list of different files, and compare them 

+

78 # ourselves. 

+

79 text_diff = [] 

+

80 for f in diff_files: 

+

81 

+

82 expected_file = os.path.join(expected_dir, f) 

+

83 with open(expected_file, READ_MODE) as fobj: 

+

84 expected = fobj.read() 

+

85 if expected_file.endswith(".xml"): 

+

86 expected = canonicalize_xml(expected) 

+

87 

+

88 actual_file = os.path.join(actual_dir, f) 

+

89 with open(actual_file, READ_MODE) as fobj: 

+

90 actual = fobj.read() 

+

91 if actual_file.endswith(".xml"): 

+

92 actual = canonicalize_xml(actual) 

+

93 

+

94 if scrubs: 

+

95 expected = scrub(expected, scrubs) 

+

96 actual = scrub(actual, scrubs) 

+

97 if expected != actual: # pragma: only failure 

+

98 text_diff.append('%s != %s' % (expected_file, actual_file)) 

+

99 expected = expected.splitlines() 

+

100 actual = actual.splitlines() 

+

101 print(":::: diff {!r} and {!r}".format(expected_file, actual_file)) 

+

102 print("\n".join(difflib.Differ().compare(expected, actual))) 

+

103 print(":::: end diff {!r} and {!r}".format(expected_file, actual_file)) 

+

104 assert not text_diff, "Files differ: %s" % '\n'.join(text_diff) 

+

105 

+

106 assert not expected_only, "Files in %s only: %s" % (expected_dir, expected_only) 

+

107 if not actual_extra: 

+

108 assert not actual_only, "Files in %s only: %s" % (actual_dir, actual_only) 

+

109 

+

110 

+

111def canonicalize_xml(xtext): 

+

112 """Canonicalize some XML text.""" 

+

113 root = xml.etree.ElementTree.fromstring(xtext) 

+

114 for node in root.iter(): 

+

115 node.attrib = dict(sorted(node.items())) 

+

116 xtext = xml.etree.ElementTree.tostring(root) 

+

117 return xtext.decode('utf8') 

+

118 

+

119 

+

120def contains(filename, *strlist): 

+

121 """Check that the file contains all of a list of strings. 

+

122 

+

123 An assert will be raised if one of the arguments in `strlist` is 

+

124 missing in `filename`. 

+

125 

+

126 """ 

+

127 with open(filename, "r") as fobj: 

+

128 text = fobj.read() 

+

129 for s in strlist: 

+

130 assert s in text, "Missing content in %s: %r" % (filename, s) 

+

131 

+

132 

+

133def contains_any(filename, *strlist): 

+

134 """Check that the file contains at least one of a list of strings. 

+

135 

+

136 An assert will be raised if none of the arguments in `strlist` is in 

+

137 `filename`. 

+

138 

+

139 """ 

+

140 with open(filename, "r") as fobj: 

+

141 text = fobj.read() 

+

142 for s in strlist: 

+

143 if s in text: 

+

144 return 

+

145 

+

146 assert False, ( # pragma: only failure 

+

147 "Missing content in %s: %r [1 of %d]" % (filename, strlist[0], len(strlist),) 

+

148 ) 

+

149 

+

150 

+

151def doesnt_contain(filename, *strlist): 

+

152 """Check that the file contains none of a list of strings. 

+

153 

+

154 An assert will be raised if any of the strings in `strlist` appears in 

+

155 `filename`. 

+

156 

+

157 """ 

+

158 with open(filename, "r") as fobj: 

+

159 text = fobj.read() 

+

160 for s in strlist: 

+

161 assert s not in text, "Forbidden content in %s: %r" % (filename, s) 

+

162 

+

163 

+

164# Helpers 

+

165 

+

166def fnmatch_list(files, file_pattern): 

+

167 """Filter the list of `files` to only those that match `file_pattern`. 

+

168 If `file_pattern` is None, then return the entire list of files. 

+

169 Returns a list of the filtered files. 

+

170 """ 

+

171 if file_pattern: 

+

172 files = [f for f in files if fnmatch.fnmatch(f, file_pattern)] 

+

173 return files 

+

174 

+

175 

+

176def scrub(strdata, scrubs): 

+

177 """Scrub uninteresting data from the payload in `strdata`. 

+

178 `scrubs` is a list of (find, replace) pairs of regexes that are used on 

+

179 `strdata`. A string is returned. 

+

180 """ 

+

181 for rgx_find, rgx_replace in scrubs: 

+

182 strdata = re.sub(rgx_find, rgx_replace, strdata) 

+

183 return strdata 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_helpers_py.html b/reports/20210322_66173dc24d/htmlcov/tests_helpers_py.html new file mode 100644 index 000000000..1e6e98491 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_helpers_py.html @@ -0,0 +1,341 @@ + + + + + + Coverage for tests/helpers.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Helpers for coverage.py tests.""" 

+

5 

+

6import collections 

+

7import contextlib 

+

8import glob 

+

9import os 

+

10import os.path 

+

11import re 

+

12import subprocess 

+

13import sys 

+

14import textwrap 

+

15 

+

16import mock 

+

17 

+

18from coverage import env 

+

19from coverage.backward import unicode_class 

+

20from coverage.misc import output_encoding 

+

21 

+

22 

+

23def run_command(cmd): 

+

24 """Run a command in a sub-process. 

+

25 

+

26 Returns the exit status code and the combined stdout and stderr. 

+

27 

+

28 """ 

+

29 if env.PY2 and isinstance(cmd, unicode_class): 

+

30 cmd = cmd.encode(sys.getfilesystemencoding()) 

+

31 

+

32 # In some strange cases (PyPy3 in a virtualenv!?) the stdout encoding of 

+

33 # the subprocess is set incorrectly to ascii. Use an environment variable 

+

34 # to force the encoding to be the same as ours. 

+

35 sub_env = dict(os.environ) 

+

36 sub_env['PYTHONIOENCODING'] = output_encoding() 

+

37 

+

38 proc = subprocess.Popen( 

+

39 cmd, 

+

40 shell=True, 

+

41 env=sub_env, 

+

42 stdin=subprocess.PIPE, stdout=subprocess.PIPE, 

+

43 stderr=subprocess.STDOUT 

+

44 ) 

+

45 output, _ = proc.communicate() 

+

46 status = proc.returncode 

+

47 

+

48 # Get the output, and canonicalize it to strings with newlines. 

+

49 if not isinstance(output, str): 

+

50 output = output.decode(output_encoding()) 

+

51 output = output.replace('\r', '') 

+

52 

+

53 return status, output 

+

54 

+

55 

+

56def make_file(filename, text="", bytes=b"", newline=None): 

+

57 """Create a file for testing. 

+

58 

+

59 `filename` is the relative path to the file, including directories if 

+

60 desired, which will be created if need be. 

+

61 

+

62 `text` is the content to create in the file, a native string (bytes in 

+

63 Python 2, unicode in Python 3), or `bytes` are the bytes to write. 

+

64 

+

65 If `newline` is provided, it is a string that will be used as the line 

+

66 endings in the created file, otherwise the line endings are as provided 

+

67 in `text`. 

+

68 

+

69 Returns `filename`. 

+

70 

+

71 """ 

+

72 # pylint: disable=redefined-builtin # bytes 

+

73 if bytes: 

+

74 data = bytes 

+

75 else: 

+

76 text = textwrap.dedent(text) 

+

77 if newline: 

+

78 text = text.replace("\n", newline) 

+

79 if env.PY3: 

+

80 data = text.encode('utf8') 

+

81 else: 

+

82 data = text 

+

83 

+

84 # Make sure the directories are available. 

+

85 dirs, _ = os.path.split(filename) 

+

86 if dirs and not os.path.exists(dirs): 

+

87 os.makedirs(dirs) 

+

88 

+

89 # Create the file. 

+

90 with open(filename, 'wb') as f: 

+

91 f.write(data) 

+

92 

+

93 return filename 

+

94 

+

95 

+

96def nice_file(*fparts): 

+

97 """Canonicalize the file name composed of the parts in `fparts`.""" 

+

98 fname = os.path.join(*fparts) 

+

99 return os.path.normcase(os.path.abspath(os.path.realpath(fname))) 

+

100 

+

101 

+

102class CheckUniqueFilenames(object): 

+

103 """Asserts the uniqueness of file names passed to a function.""" 

+

104 def __init__(self, wrapped): 

+

105 self.filenames = set() 

+

106 self.wrapped = wrapped 

+

107 

+

108 @classmethod 

+

109 def hook(cls, obj, method_name): 

+

110 """Replace a method with our checking wrapper. 

+

111 

+

112 The method must take a string as a first argument. That argument 

+

113 will be checked for uniqueness across all the calls to this method. 

+

114 

+

115 The values don't have to be file names actually, just strings, but 

+

116 we only use it for filename arguments. 

+

117 

+

118 """ 

+

119 method = getattr(obj, method_name) 

+

120 hook = cls(method) 

+

121 setattr(obj, method_name, hook.wrapper) 

+

122 return hook 

+

123 

+

124 def wrapper(self, filename, *args, **kwargs): 

+

125 """The replacement method. Check that we don't have dupes.""" 

+

126 assert filename not in self.filenames, ( 

+

127 "File name %r passed to %r twice" % (filename, self.wrapped) 

+

128 ) 

+

129 self.filenames.add(filename) 

+

130 ret = self.wrapped(filename, *args, **kwargs) 

+

131 return ret 

+

132 

+

133 

+

134def re_lines(text, pat, match=True): 

+

135 """Return the text of lines that match `pat` in the string `text`. 

+

136 

+

137 If `match` is false, the selection is inverted: only the non-matching 

+

138 lines are included. 

+

139 

+

140 Returns a string, the text of only the selected lines. 

+

141 

+

142 """ 

+

143 return "".join(l for l in text.splitlines(True) if bool(re.search(pat, l)) == match) 

+

144 

+

145 

+

146def re_line(text, pat): 

+

147 """Return the one line in `text` that matches regex `pat`. 

+

148 

+

149 Raises an AssertionError if more than one, or less than one, line matches. 

+

150 

+

151 """ 

+

152 lines = re_lines(text, pat).splitlines() 

+

153 assert len(lines) == 1 

+

154 return lines[0] 

+

155 

+

156 

+

157def remove_files(*patterns): 

+

158 """Remove all files that match any of the patterns.""" 

+

159 for pattern in patterns: 

+

160 for fname in glob.glob(pattern): 

+

161 os.remove(fname) 

+

162 

+

163 

+

164# Map chars to numbers for arcz_to_arcs 

+

165_arcz_map = {'.': -1} 

+

166_arcz_map.update(dict((c, ord(c) - ord('0')) for c in '123456789')) 

+

167_arcz_map.update(dict( 

+

168 (c, 10 + ord(c) - ord('A')) for c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 

+

169)) 

+

170 

+

171def arcz_to_arcs(arcz): 

+

172 """Convert a compact textual representation of arcs to a list of pairs. 

+

173 

+

174 The text has space-separated pairs of letters. Period is -1, 1-9 are 

+

175 1-9, A-Z are 10 through 36. The resulting list is sorted regardless of 

+

176 the order of the input pairs. 

+

177 

+

178 ".1 12 2." --> [(-1,1), (1,2), (2,-1)] 

+

179 

+

180 Minus signs can be included in the pairs: 

+

181 

+

182 "-11, 12, 2-5" --> [(-1,1), (1,2), (2,-5)] 

+

183 

+

184 """ 

+

185 arcs = [] 

+

186 for pair in arcz.split(): 

+

187 asgn = bsgn = 1 

+

188 if len(pair) == 2: 

+

189 a, b = pair 

+

190 else: 

+

191 assert len(pair) == 3 

+

192 if pair[0] == '-': 

+

193 _, a, b = pair 

+

194 asgn = -1 

+

195 else: 

+

196 assert pair[1] == '-' 

+

197 a, _, b = pair 

+

198 bsgn = -1 

+

199 arcs.append((asgn * _arcz_map[a], bsgn * _arcz_map[b])) 

+

200 return sorted(arcs) 

+

201 

+

202 

+

203_arcz_unmap = {val: ch for ch, val in _arcz_map.items()} 

+

204 

+

205def _arcs_to_arcz_repr_one(num): 

+

206 """Return an arcz form of the number `num`, or "?" if there is none.""" 

+

207 if num == -1: 

+

208 return "." 

+

209 z = "" 

+

210 if num < 0: 

+

211 z += "-" 

+

212 num *= -1 

+

213 z += _arcz_unmap.get(num, "?") 

+

214 return z 

+

215 

+

216 

+

217def arcs_to_arcz_repr(arcs): 

+

218 """Convert a list of arcs to a readable multi-line form for asserting. 

+

219 

+

220 Each pair is on its own line, with a comment showing the arcz form, 

+

221 to make it easier to decode when debugging test failures. 

+

222 

+

223 """ 

+

224 repr_list = [] 

+

225 for a, b in (arcs or ()): 

+

226 line = repr((a, b)) 

+

227 line += " # " 

+

228 line += _arcs_to_arcz_repr_one(a) 

+

229 line += _arcs_to_arcz_repr_one(b) 

+

230 repr_list.append(line) 

+

231 return "\n".join(repr_list) + "\n" 

+

232 

+

233 

+

234@contextlib.contextmanager 

+

235def change_dir(new_dir): 

+

236 """Change directory, and then change back. 

+

237 

+

238 Use as a context manager, it will return to the original 

+

239 directory at the end of the block. 

+

240 

+

241 """ 

+

242 old_dir = os.getcwd() 

+

243 os.chdir(new_dir) 

+

244 try: 

+

245 yield 

+

246 finally: 

+

247 os.chdir(old_dir) 

+

248 

+

249 

+

250def without_module(using_module, missing_module_name): 

+

251 """ 

+

252 Hide a module for testing. 

+

253 

+

254 Use this in a test function to make an optional module unavailable during 

+

255 the test:: 

+

256 

+

257 with without_module(product.something, 'toml'): 

+

258 use_toml_somehow() 

+

259 

+

260 Arguments: 

+

261 using_module: a module in which to hide `missing_module_name`. 

+

262 missing_module_name (str): the name of the module to hide. 

+

263 

+

264 """ 

+

265 return mock.patch.object(using_module, missing_module_name, None) 

+

266 

+

267 

+

268def assert_count_equal(a, b): 

+

269 """ 

+

270 A pytest-friendly implementation of assertCountEqual. 

+

271 

+

272 Assert that `a` and `b` have the same elements, but maybe in different order. 

+

273 This only works for hashable elements. 

+

274 """ 

+

275 assert collections.Counter(list(a)) == collections.Counter(list(b)) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_mixins_py.html b/reports/20210322_66173dc24d/htmlcov/tests_mixins_py.html new file mode 100644 index 000000000..55973fb49 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_mixins_py.html @@ -0,0 +1,229 @@ + + + + + + Coverage for tests/mixins.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4""" 

+

5Test class mixins 

+

6 

+

7Some of these are transitional while working toward pure-pytest style. 

+

8""" 

+

9 

+

10import os 

+

11import os.path 

+

12import shutil 

+

13import sys 

+

14 

+

15import pytest 

+

16 

+

17from coverage.backward import importlib 

+

18 

+

19from tests.helpers import change_dir, make_file, remove_files 

+

20 

+

21 

+

22class PytestBase(object): 

+

23 """A base class to connect to pytest in a test class hierarchy.""" 

+

24 

+

25 @pytest.fixture(autouse=True) 

+

26 def connect_to_pytest(self, request, monkeypatch): 

+

27 """Captures pytest facilities for use by other test helpers.""" 

+

28 # pylint: disable=attribute-defined-outside-init 

+

29 self._pytest_request = request 

+

30 self._monkeypatch = monkeypatch 

+

31 self.setup_test() 

+

32 

+

33 # Can't call this setUp or setup because pytest sniffs out unittest and 

+

34 # nosetest special names, and does things with them. 

+

35 # https://github.com/pytest-dev/pytest/issues/8424 

+

36 def setup_test(self): 

+

37 """Per-test initialization. Override this as you wish.""" 

+

38 pass 

+

39 

+

40 def addCleanup(self, fn, *args): 

+

41 """Like unittest's addCleanup: code to call when the test is done.""" 

+

42 self._pytest_request.addfinalizer(lambda: fn(*args)) 

+

43 

+

44 def set_environ(self, name, value): 

+

45 """Set an environment variable `name` to be `value`.""" 

+

46 self._monkeypatch.setenv(name, value) 

+

47 

+

48 def del_environ(self, name): 

+

49 """Delete an environment variable, unless we set it.""" 

+

50 self._monkeypatch.delenv(name) 

+

51 

+

52 

+

53class TempDirMixin(object): 

+

54 """Provides temp dir and data file helpers for tests.""" 

+

55 

+

56 # Our own setting: most of these tests run in their own temp directory. 

+

57 # Set this to False in your subclass if you don't want a temp directory 

+

58 # created. 

+

59 run_in_temp_dir = True 

+

60 

+

61 @pytest.fixture(autouse=True) 

+

62 def _temp_dir(self, tmpdir_factory): 

+

63 """Create a temp dir for the tests, if they want it.""" 

+

64 if self.run_in_temp_dir: 

+

65 tmpdir = tmpdir_factory.mktemp("") 

+

66 self.temp_dir = str(tmpdir) 

+

67 with change_dir(self.temp_dir): 

+

68 # Modules should be importable from this temp directory. We don't 

+

69 # use '' because we make lots of different temp directories and 

+

70 # nose's caching importer can get confused. The full path prevents 

+

71 # problems. 

+

72 sys.path.insert(0, os.getcwd()) 

+

73 

+

74 yield None 

+

75 else: 

+

76 yield None 

+

77 

+

78 def make_file(self, filename, text="", bytes=b"", newline=None): 

+

79 """Make a file. See `tests.helpers.make_file`""" 

+

80 # pylint: disable=redefined-builtin # bytes 

+

81 assert self.run_in_temp_dir, "Only use make_file when running in a temp dir" 

+

82 return make_file(filename, text, bytes, newline) 

+

83 

+

84 

+

85class SysPathModulesMixin: 

+

86 """Auto-restore sys.path and the imported modules at the end of each test.""" 

+

87 

+

88 @pytest.fixture(autouse=True) 

+

89 def _save_sys_path(self): 

+

90 """Restore sys.path at the end of each test.""" 

+

91 old_syspath = sys.path[:] 

+

92 try: 

+

93 yield 

+

94 finally: 

+

95 sys.path = old_syspath 

+

96 

+

97 @pytest.fixture(autouse=True) 

+

98 def _module_saving(self): 

+

99 """Remove modules we imported during the test.""" 

+

100 self._old_modules = list(sys.modules) 

+

101 try: 

+

102 yield 

+

103 finally: 

+

104 self._cleanup_modules() 

+

105 

+

106 def _cleanup_modules(self): 

+

107 """Remove any new modules imported since our construction. 

+

108 

+

109 This lets us import the same source files for more than one test, or 

+

110 if called explicitly, within one test. 

+

111 

+

112 """ 

+

113 for m in [m for m in sys.modules if m not in self._old_modules]: 

+

114 del sys.modules[m] 

+

115 

+

116 def clean_local_file_imports(self): 

+

117 """Clean up the results of calls to `import_local_file`. 

+

118 

+

119 Use this if you need to `import_local_file` the same file twice in 

+

120 one test. 

+

121 

+

122 """ 

+

123 # So that we can re-import files, clean them out first. 

+

124 self._cleanup_modules() 

+

125 

+

126 # Also have to clean out the .pyc file, since the timestamp 

+

127 # resolution is only one second, a changed file might not be 

+

128 # picked up. 

+

129 remove_files("*.pyc", "*$py.class") 

+

130 if os.path.exists("__pycache__"): 

+

131 shutil.rmtree("__pycache__") 

+

132 

+

133 if importlib and hasattr(importlib, "invalidate_caches"): 

+

134 importlib.invalidate_caches() 

+

135 

+

136 

+

137class StdStreamCapturingMixin: 

+

138 """ 

+

139 Adapter from the pytest capsys fixture to more convenient methods. 

+

140 

+

141 This doesn't also output to the real stdout, so we probably want to move 

+

142 to "real" capsys when we can use fixtures in test methods. 

+

143 

+

144 Once you've used one of these methods, the capturing is reset, so another 

+

145 invocation will only return the delta. 

+

146 

+

147 """ 

+

148 @pytest.fixture(autouse=True) 

+

149 def _capcapsys(self, capsys): 

+

150 """Grab the fixture so our methods can use it.""" 

+

151 self.capsys = capsys 

+

152 

+

153 def stdouterr(self): 

+

154 """Returns (out, err), two strings for stdout and stderr.""" 

+

155 return self.capsys.readouterr() 

+

156 

+

157 def stdout(self): 

+

158 """Returns a string, the captured stdout.""" 

+

159 return self.capsys.readouterr().out 

+

160 

+

161 def stderr(self): 

+

162 """Returns a string, the captured stderr.""" 

+

163 return self.capsys.readouterr().err 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_modules_aa___init___py.html b/reports/20210322_66173dc24d/htmlcov/tests_modules_aa___init___py.html new file mode 100644 index 000000000..101e6ea6e --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_modules_aa___init___py.html @@ -0,0 +1,67 @@ + + + + + + Coverage for tests/modules/aa/__init__.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# aa 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_modules_aa_afile_py.html b/reports/20210322_66173dc24d/htmlcov/tests_modules_aa_afile_py.html new file mode 100644 index 000000000..d0a52e619 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_modules_aa_afile_py.html @@ -0,0 +1,67 @@ + + + + + + Coverage for tests/modules/aa/afile.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# afile.py 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_modules_aa_bb___init___py.html b/reports/20210322_66173dc24d/htmlcov/tests_modules_aa_bb___init___py.html new file mode 100644 index 000000000..1d0d307f9 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_modules_aa_bb___init___py.html @@ -0,0 +1,67 @@ + + + + + + Coverage for tests/modules/aa/bb/__init__.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# bb 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_modules_aa_bb_bfile_py.html b/reports/20210322_66173dc24d/htmlcov/tests_modules_aa_bb_bfile_py.html new file mode 100644 index 000000000..0be2de0c7 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_modules_aa_bb_bfile_py.html @@ -0,0 +1,67 @@ + + + + + + Coverage for tests/modules/aa/bb/bfile.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# bfile.py 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_modules_aa_bb_cc___init___py.html b/reports/20210322_66173dc24d/htmlcov/tests_modules_aa_bb_cc___init___py.html new file mode 100644 index 000000000..1dbeff20a --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_modules_aa_bb_cc___init___py.html @@ -0,0 +1,66 @@ + + + + + + Coverage for tests/modules/aa/bb/cc/__init__.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_modules_aa_bb_cc_cfile_py.html b/reports/20210322_66173dc24d/htmlcov/tests_modules_aa_bb_cc_cfile_py.html new file mode 100644 index 000000000..e8253180d --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_modules_aa_bb_cc_cfile_py.html @@ -0,0 +1,67 @@ + + + + + + Coverage for tests/modules/aa/bb/cc/cfile.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# cfile.py 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_modules_pkg1___init___py.html b/reports/20210322_66173dc24d/htmlcov/tests_modules_pkg1___init___py.html new file mode 100644 index 000000000..8fa74f561 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_modules_pkg1___init___py.html @@ -0,0 +1,68 @@ + + + + + + Coverage for tests/modules/pkg1/__init__.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# A simple package for testing with. 

+

2print("pkg1.__init__: %s" % (__name__,)) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_modules_pkg1___main___py.html b/reports/20210322_66173dc24d/htmlcov/tests_modules_pkg1___main___py.html new file mode 100644 index 000000000..7c4dedc51 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_modules_pkg1___main___py.html @@ -0,0 +1,69 @@ + + + + + + Coverage for tests/modules/pkg1/__main__.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Used in the tests for PyRunner 

+

2import sys 

+

3print("pkg1.__main__: passed %s" % sys.argv[1]) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_modules_pkg1_runmod2_py.html b/reports/20210322_66173dc24d/htmlcov/tests_modules_pkg1_runmod2_py.html new file mode 100644 index 000000000..d33710d3d --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_modules_pkg1_runmod2_py.html @@ -0,0 +1,72 @@ + + + + + + Coverage for tests/modules/pkg1/runmod2.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4# Used in the tests for PyRunner 

+

5import sys 

+

6print("runmod2: passed %s" % sys.argv[1]) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_modules_pkg1_sub___init___py.html b/reports/20210322_66173dc24d/htmlcov/tests_modules_pkg1_sub___init___py.html new file mode 100644 index 000000000..fe787e081 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_modules_pkg1_sub___init___py.html @@ -0,0 +1,66 @@ + + + + + + Coverage for tests/modules/pkg1/sub/__init__.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_modules_pkg1_sub___main___py.html b/reports/20210322_66173dc24d/htmlcov/tests_modules_pkg1_sub___main___py.html new file mode 100644 index 000000000..f11fefede --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_modules_pkg1_sub___main___py.html @@ -0,0 +1,69 @@ + + + + + + Coverage for tests/modules/pkg1/sub/__main__.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Used in the tests for PyRunner 

+

2import sys 

+

3print("pkg1.sub.__main__: passed %s" % sys.argv[1]) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_modules_pkg1_sub_runmod3_py.html b/reports/20210322_66173dc24d/htmlcov/tests_modules_pkg1_sub_runmod3_py.html new file mode 100644 index 000000000..363cd56e2 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_modules_pkg1_sub_runmod3_py.html @@ -0,0 +1,72 @@ + + + + + + Coverage for tests/modules/pkg1/sub/runmod3.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4# Used in the tests for PyRunner 

+

5import sys 

+

6print("runmod3: passed %s" % sys.argv[1]) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_modules_pkg2___init___py.html b/reports/20210322_66173dc24d/htmlcov/tests_modules_pkg2___init___py.html new file mode 100644 index 000000000..1bd529e2e --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_modules_pkg2___init___py.html @@ -0,0 +1,68 @@ + + + + + + Coverage for tests/modules/pkg2/__init__.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# This is an __init__.py file, with no executable statements in it. 

+

2# This comment shouldn't confuse the parser. 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_modules_plugins___init___py.html b/reports/20210322_66173dc24d/htmlcov/tests_modules_plugins___init___py.html new file mode 100644 index 000000000..2345e3903 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_modules_plugins___init___py.html @@ -0,0 +1,66 @@ + + + + + + Coverage for tests/modules/plugins/__init__.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_modules_plugins_a_plugin_py.html b/reports/20210322_66173dc24d/htmlcov/tests_modules_plugins_a_plugin_py.html new file mode 100644 index 000000000..c2806a7cb --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_modules_plugins_a_plugin_py.html @@ -0,0 +1,77 @@ + + + + + + Coverage for tests/modules/plugins/a_plugin.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1"""A plugin for tests to reference.""" 

+

2 

+

3from coverage import CoveragePlugin 

+

4 

+

5 

+

6class Plugin(CoveragePlugin): 

+

7 pass 

+

8 

+

9 

+

10def coverage_init(reg, options): 

+

11 reg.add_file_tracer(Plugin()) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_modules_plugins_another_py.html b/reports/20210322_66173dc24d/htmlcov/tests_modules_plugins_another_py.html new file mode 100644 index 000000000..86e922067 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_modules_plugins_another_py.html @@ -0,0 +1,80 @@ + + + + + + Coverage for tests/modules/plugins/another.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""A plugin for tests to reference.""" 

+

5 

+

6from coverage import CoveragePlugin 

+

7 

+

8 

+

9class Plugin(CoveragePlugin): 

+

10 pass 

+

11 

+

12 

+

13def coverage_init(reg, options): 

+

14 reg.add_file_tracer(Plugin()) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_modules_process_test___init___py.html b/reports/20210322_66173dc24d/htmlcov/tests_modules_process_test___init___py.html new file mode 100644 index 000000000..2cf808c70 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_modules_process_test___init___py.html @@ -0,0 +1,66 @@ + + + + + + Coverage for tests/modules/process_test/__init__.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_modules_process_test_try_execfile_py.html b/reports/20210322_66173dc24d/htmlcov/tests_modules_process_test_try_execfile_py.html new file mode 100644 index 000000000..4c8dd2dd4 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_modules_process_test_try_execfile_py.html @@ -0,0 +1,178 @@ + + + + + + Coverage for tests/modules/process_test/try_execfile.py: 97.183% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Test file for run_python_file. 

+

5 

+

6This file is executed two ways:: 

+

7 

+

8 $ coverage run try_execfile.py 

+

9 

+

10and:: 

+

11 

+

12 $ python try_execfile.py 

+

13 

+

14The output is compared to see that the program execution context is the same 

+

15under coverage and under Python. 

+

16 

+

17It is not crucial that the execution be identical, there are some differences 

+

18that are OK. This program canonicalizes the output to gloss over those 

+

19differences and get a clean diff. 

+

20 

+

21""" 

+

22 

+

23import itertools 

+

24import json 

+

25import os 

+

26import sys 

+

27 

+

28# sys.path varies by execution environments. Coverage.py uses setuptools to 

+

29# make console scripts, which means pkg_resources is imported. pkg_resources 

+

30# removes duplicate entries from sys.path. So we do that too, since the extra 

+

31# entries don't affect the running of the program. 

+

32 

+

33def same_file(p1, p2): 

+

34 """Determine if `p1` and `p2` refer to the same existing file.""" 

+

35 if not p1: 35 ↛ 36line 35 didn't jump to line 36, because the condition on line 35 was never true

+

36 return not p2 

+

37 if not os.path.exists(p1): 

+

38 return False 

+

39 if not os.path.exists(p2): 

+

40 return False 

+

41 if hasattr(os.path, "samefile"): 

+

42 return os.path.samefile(p1, p2) 

+

43 else: 

+

44 norm1 = os.path.normcase(os.path.normpath(p1)) 

+

45 norm2 = os.path.normcase(os.path.normpath(p2)) 

+

46 return norm1 == norm2 

+

47 

+

48def without_same_files(filenames): 

+

49 """Return the list `filenames` with duplicates (by same_file) removed.""" 

+

50 reduced = [] 

+

51 for filename in filenames: 

+

52 if not any(same_file(filename, other) for other in reduced): 

+

53 reduced.append(filename) 

+

54 return reduced 

+

55 

+

56cleaned_sys_path = [os.path.normcase(p) for p in without_same_files(sys.path)] 

+

57 

+

58DATA = "xyzzy" 

+

59 

+

60import __main__ 

+

61 

+

62def my_function(a): 

+

63 """A function to force execution of module-level values.""" 

+

64 return "my_fn(%r)" % a 

+

65 

+

66FN_VAL = my_function("fooey") 

+

67 

+

68loader = globals().get('__loader__') 

+

69spec = globals().get('__spec__') 

+

70 

+

71# A more compact ad-hoc grouped-by-first-letter list of builtins. 

+

72CLUMPS = "ABC,DEF,GHI,JKLMN,OPQR,ST,U,VWXYZ_,ab,cd,efg,hij,lmno,pqr,stuvwxyz".split(",") 

+

73 

+

74def word_group(w): 

+

75 """Figure out which CLUMP the first letter of w is in.""" 

+

76 for i, clump in enumerate(CLUMPS): 

+

77 if w[0] in clump: 

+

78 return i 

+

79 return 99 

+

80 

+

81builtin_dir = [" ".join(s) for _, s in itertools.groupby(dir(__builtins__), key=word_group)] 

+

82 

+

83globals_to_check = { 

+

84 'os.getcwd': os.getcwd(), 

+

85 '__name__': __name__, 

+

86 '__file__': __file__, 

+

87 '__doc__': __doc__, 

+

88 '__builtins__.has_open': hasattr(__builtins__, 'open'), 

+

89 '__builtins__.dir': builtin_dir, 

+

90 '__loader__ exists': loader is not None, 

+

91 '__package__': __package__, 

+

92 '__spec__ exists': spec is not None, 

+

93 'DATA': DATA, 

+

94 'FN_VAL': FN_VAL, 

+

95 '__main__.DATA': getattr(__main__, "DATA", "nothing"), 

+

96 'argv0': sys.argv[0], 

+

97 'argv1-n': sys.argv[1:], 

+

98 'path': cleaned_sys_path, 

+

99} 

+

100 

+

101if loader is not None: 

+

102 globals_to_check.update({ 

+

103 '__loader__.fullname': getattr(loader, 'fullname', None) or getattr(loader, 'name', None) 

+

104 }) 

+

105 

+

106if spec is not None: 

+

107 globals_to_check.update({ 

+

108 '__spec__.' + aname: getattr(spec, aname) 

+

109 for aname in ['name', 'origin', 'submodule_search_locations', 'parent', 'has_location'] 

+

110 }) 

+

111 

+

112print(json.dumps(globals_to_check, indent=4, sort_keys=True)) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_modules_runmod1_py.html b/reports/20210322_66173dc24d/htmlcov/tests_modules_runmod1_py.html new file mode 100644 index 000000000..dfd6395d6 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_modules_runmod1_py.html @@ -0,0 +1,72 @@ + + + + + + Coverage for tests/modules/runmod1.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4# Used in the tests for PyRunner 

+

5import sys 

+

6print("runmod1: passed %s" % sys.argv[1]) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_osinfo_py.html b/reports/20210322_66173dc24d/htmlcov/tests_osinfo_py.html new file mode 100644 index 000000000..77ccfe519 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_osinfo_py.html @@ -0,0 +1,138 @@ + + + + + + Coverage for tests/osinfo.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""OS information for testing.""" 

+

5 

+

6from coverage import env 

+

7 

+

8 

+

9if env.WINDOWS: 

+

10 # Windows implementation 

+

11 def process_ram(): 

+

12 """How much RAM is this process using? (Windows)""" 

+

13 import ctypes 

+

14 # From: http://lists.ubuntu.com/archives/bazaar-commits/2009-February/011990.html 

+

15 class PROCESS_MEMORY_COUNTERS_EX(ctypes.Structure): 

+

16 """Used by GetProcessMemoryInfo""" 

+

17 _fields_ = [ 

+

18 ('cb', ctypes.c_ulong), 

+

19 ('PageFaultCount', ctypes.c_ulong), 

+

20 ('PeakWorkingSetSize', ctypes.c_size_t), 

+

21 ('WorkingSetSize', ctypes.c_size_t), 

+

22 ('QuotaPeakPagedPoolUsage', ctypes.c_size_t), 

+

23 ('QuotaPagedPoolUsage', ctypes.c_size_t), 

+

24 ('QuotaPeakNonPagedPoolUsage', ctypes.c_size_t), 

+

25 ('QuotaNonPagedPoolUsage', ctypes.c_size_t), 

+

26 ('PagefileUsage', ctypes.c_size_t), 

+

27 ('PeakPagefileUsage', ctypes.c_size_t), 

+

28 ('PrivateUsage', ctypes.c_size_t), 

+

29 ] 

+

30 

+

31 mem_struct = PROCESS_MEMORY_COUNTERS_EX() 

+

32 ret = ctypes.windll.psapi.GetProcessMemoryInfo( 

+

33 ctypes.windll.kernel32.GetCurrentProcess(), 

+

34 ctypes.byref(mem_struct), 

+

35 ctypes.sizeof(mem_struct) 

+

36 ) 

+

37 if not ret: # pragma: part covered 

+

38 return 0 # pragma: cant happen 

+

39 return mem_struct.PrivateUsage 

+

40 

+

41elif env.LINUX: 

+

42 # Linux implementation 

+

43 import os 

+

44 

+

45 _scale = {'kb': 1024, 'mb': 1024*1024} 

+

46 

+

47 def _VmB(key): 

+

48 """Read the /proc/PID/status file to find memory use.""" 

+

49 try: 

+

50 # Get pseudo file /proc/<pid>/status 

+

51 with open('/proc/%d/status' % os.getpid()) as t: 

+

52 v = t.read() 

+

53 except IOError: # pragma: cant happen 

+

54 return 0 # non-Linux? 

+

55 # Get VmKey line e.g. 'VmRSS: 9999 kB\n ...' 

+

56 i = v.index(key) 

+

57 v = v[i:].split(None, 3) 

+

58 if len(v) < 3: # pragma: part covered 

+

59 return 0 # pragma: cant happen 

+

60 # Convert Vm value to bytes. 

+

61 return int(float(v[1]) * _scale[v[2].lower()]) 

+

62 

+

63 def process_ram(): 

+

64 """How much RAM is this process using? (Linux implementation)""" 

+

65 return _VmB('VmRSS') 

+

66 

+

67else: 

+

68 # Generic implementation. 

+

69 def process_ram(): 

+

70 """How much RAM is this process using? (stdlib implementation)""" 

+

71 import resource 

+

72 return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_plugin1_py.html b/reports/20210322_66173dc24d/htmlcov/tests_plugin1_py.html new file mode 100644 index 000000000..e8547b84a --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_plugin1_py.html @@ -0,0 +1,118 @@ + + + + + + Coverage for tests/plugin1.py: 61.538% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""A file tracer plugin for test_plugins.py to import.""" 

+

5 

+

6import os.path 

+

7 

+

8import coverage 

+

9 

+

10 

+

11class Plugin(coverage.CoveragePlugin): 

+

12 """A file tracer plugin to import, so that it isn't in the test's current directory.""" 

+

13 

+

14 def file_tracer(self, filename): 

+

15 """Trace only files named xyz.py""" 

+

16 if "xyz.py" in filename: 

+

17 return FileTracer(filename) 

+

18 return None 

+

19 

+

20 def file_reporter(self, filename): 

+

21 return FileReporter(filename) 

+

22 

+

23 

+

24class FileTracer(coverage.FileTracer): 

+

25 """A FileTracer emulating a simple static plugin.""" 

+

26 

+

27 def __init__(self, filename): 

+

28 """Claim that */*xyz.py was actually sourced from /src/*ABC.zz""" 

+

29 self._filename = filename 

+

30 self._source_filename = os.path.join( 

+

31 "/src", 

+

32 os.path.basename(filename.replace("xyz.py", "ABC.zz")) 

+

33 ) 

+

34 

+

35 def source_filename(self): 

+

36 return self._source_filename 

+

37 

+

38 def line_number_range(self, frame): 

+

39 """Map the line number X to X05,X06,X07.""" 

+

40 lineno = frame.f_lineno 

+

41 return lineno*100+5, lineno*100+7 

+

42 

+

43 

+

44class FileReporter(coverage.FileReporter): 

+

45 """Dead-simple FileReporter.""" 

+

46 def lines(self): 

+

47 return {105, 106, 107, 205, 206, 207} 

+

48 

+

49 

+

50def coverage_init(reg, options): # pylint: disable=unused-argument 

+

51 """Called by coverage to initialize the plugins here.""" 

+

52 reg.add_file_tracer(Plugin()) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_plugin2_py.html b/reports/20210322_66173dc24d/htmlcov/tests_plugin2_py.html new file mode 100644 index 000000000..d3a884290 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_plugin2_py.html @@ -0,0 +1,116 @@ + + + + + + Coverage for tests/plugin2.py: 54.839% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""A file tracer plugin for test_plugins.py to import.""" 

+

5 

+

6import os.path 

+

7 

+

8import coverage 

+

9 

+

10 

+

11class Plugin(coverage.CoveragePlugin): 

+

12 """A file tracer plugin for testing.""" 

+

13 def file_tracer(self, filename): 

+

14 if "render.py" in filename: 

+

15 return RenderFileTracer() 

+

16 return None 

+

17 

+

18 def file_reporter(self, filename): 

+

19 return FileReporter(filename) 

+

20 

+

21 

+

22class RenderFileTracer(coverage.FileTracer): 

+

23 """A FileTracer using information from the caller.""" 

+

24 

+

25 def has_dynamic_source_filename(self): 

+

26 return True 

+

27 

+

28 def dynamic_source_filename(self, filename, frame): 

+

29 if frame.f_code.co_name != "render": 

+

30 return None 

+

31 source_filename = os.path.abspath(frame.f_locals['filename']) 

+

32 return source_filename 

+

33 

+

34 def line_number_range(self, frame): 

+

35 lineno = frame.f_locals['linenum'] 

+

36 return lineno, lineno+1 

+

37 

+

38 

+

39class FileReporter(coverage.FileReporter): 

+

40 """A goofy file reporter.""" 

+

41 def lines(self): 

+

42 # Goofy test arrangement: claim that the file has as many lines as the 

+

43 # number in its name. 

+

44 num = os.path.basename(self.filename).split(".")[0].split("_")[1] 

+

45 return set(range(1, int(num)+1)) 

+

46 

+

47 

+

48def coverage_init(reg, options): # pylint: disable=unused-argument 

+

49 """Called by coverage to initialize the plugins here.""" 

+

50 reg.add_file_tracer(Plugin()) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_plugin_config_py.html b/reports/20210322_66173dc24d/htmlcov/tests_plugin_config_py.html new file mode 100644 index 000000000..555a28466 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_plugin_config_py.html @@ -0,0 +1,88 @@ + + + + + + Coverage for tests/plugin_config.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""A configuring plugin for test_plugins.py to import.""" 

+

5 

+

6import coverage 

+

7 

+

8 

+

9class Plugin(coverage.CoveragePlugin): 

+

10 """A configuring plugin for testing.""" 

+

11 def configure(self, config): 

+

12 """Configure all the things!""" 

+

13 opt_name = "report:exclude_lines" 

+

14 exclude_lines = config.get_option(opt_name) 

+

15 exclude_lines.append(r"pragma: custom") 

+

16 exclude_lines.append(r"pragma: or whatever") 

+

17 config.set_option(opt_name, exclude_lines) 

+

18 

+

19 

+

20def coverage_init(reg, options): # pylint: disable=unused-argument 

+

21 """Called by coverage to initialize the plugins here.""" 

+

22 reg.add_configurer(Plugin()) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_annotate_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_annotate_py.html new file mode 100644 index 000000000..8e7cb5154 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_annotate_py.html @@ -0,0 +1,174 @@ + + + + + + Coverage for tests/test_annotate.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# coding: utf-8 

+

2# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

3# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

4 

+

5"""Tests for annotation from coverage.py.""" 

+

6 

+

7import coverage 

+

8 

+

9from tests.coveragetest import CoverageTest 

+

10from tests.goldtest import compare, gold_path 

+

11 

+

12 

+

13class AnnotationGoldTest(CoverageTest): 

+

14 """Test the annotate feature with gold files.""" 

+

15 

+

16 def make_multi(self): 

+

17 """Make a few source files we need for the tests.""" 

+

18 self.make_file("multi.py", """\ 

+

19 import a.a 

+

20 import b.b 

+

21 

+

22 a.a.a(1) 

+

23 b.b.b(2) 

+

24 """) 

+

25 self.make_file("a/__init__.py") 

+

26 self.make_file("a/a.py", """\ 

+

27 def a(x): 

+

28 if x == 1: 

+

29 print("x is 1") 

+

30 else: 

+

31 print("x is not 1") 

+

32 """) 

+

33 self.make_file("b/__init__.py") 

+

34 self.make_file("b/b.py", """\ 

+

35 def b(x): 

+

36 msg = "x is %s" % x 

+

37 print(msg) 

+

38 """) 

+

39 

+

40 def test_multi(self): 

+

41 self.make_multi() 

+

42 cov = coverage.Coverage() 

+

43 self.start_import_stop(cov, "multi") 

+

44 cov.annotate() 

+

45 

+

46 compare(gold_path("annotate/multi"), ".", "*,cover") 

+

47 

+

48 def test_annotate_dir(self): 

+

49 self.make_multi() 

+

50 cov = coverage.Coverage(source=["."]) 

+

51 self.start_import_stop(cov, "multi") 

+

52 cov.annotate(directory="out_anno_dir") 

+

53 

+

54 compare(gold_path("annotate/anno_dir"), "out_anno_dir", "*,cover") 

+

55 

+

56 def test_encoding(self): 

+

57 self.make_file("utf8.py", """\ 

+

58 # -*- coding: utf-8 -*- 

+

59 # This comment has an accent: é 

+

60 

+

61 print("spam eggs") 

+

62 """) 

+

63 cov = coverage.Coverage() 

+

64 self.start_import_stop(cov, "utf8") 

+

65 cov.annotate() 

+

66 compare(gold_path("annotate/encodings"), ".", "*,cover") 

+

67 

+

68 def test_white(self): 

+

69 self.make_file("white.py", """\ 

+

70 # A test case sent to me by Steve White 

+

71 

+

72 def f(self): 

+

73 if self==1: 

+

74 pass 

+

75 elif self.m('fred'): 

+

76 pass 

+

77 elif (g==1) and (b==2): 

+

78 pass 

+

79 elif self.m('fred')==True: 

+

80 pass 

+

81 elif ((g==1) and (b==2))==True: 

+

82 pass 

+

83 else: 

+

84 pass 

+

85 

+

86 def g(x): 

+

87 if x == 1: 

+

88 a = 1 

+

89 else: 

+

90 a = 2 

+

91 

+

92 g(1) 

+

93 

+

94 def h(x): 

+

95 if 0: #pragma: no cover 

+

96 pass 

+

97 if x == 1: 

+

98 a = 1 

+

99 else: 

+

100 a = 2 

+

101 

+

102 h(2) 

+

103 """) 

+

104 

+

105 cov = coverage.Coverage() 

+

106 self.start_import_stop(cov, "white") 

+

107 cov.annotate() 

+

108 compare(gold_path("annotate/annotate"), ".", "*,cover") 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_api_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_api_py.html new file mode 100644 index 000000000..e69c76481 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_api_py.html @@ -0,0 +1,1252 @@ + + + + + + Coverage for tests/test_api.py: 99.854% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Tests for coverage.py's API.""" 

+

5 

+

6import fnmatch 

+

7import glob 

+

8import os 

+

9import os.path 

+

10import re 

+

11import shutil 

+

12import sys 

+

13import textwrap 

+

14 

+

15import pytest 

+

16 

+

17import coverage 

+

18from coverage import env 

+

19from coverage.backward import code_object, import_local_file, StringIO 

+

20from coverage.data import line_counts 

+

21from coverage.files import abs_file, relative_filename 

+

22from coverage.misc import CoverageException 

+

23 

+

24from tests.coveragetest import CoverageTest, TESTS_DIR, UsingModulesMixin 

+

25from tests.helpers import assert_count_equal, change_dir, nice_file 

+

26 

+

27 

+

28class ApiTest(CoverageTest): 

+

29 """Api-oriented tests for coverage.py.""" 

+

30 

+

31 def clean_files(self, files, pats): 

+

32 """Remove names matching `pats` from `files`, a list of file names.""" 

+

33 good = [] 

+

34 for f in files: 

+

35 for pat in pats: 

+

36 if fnmatch.fnmatch(f, pat): 

+

37 break 

+

38 else: 

+

39 good.append(f) 

+

40 return good 

+

41 

+

42 def assertFiles(self, files): 

+

43 """Assert that the files here are `files`, ignoring the usual junk.""" 

+

44 here = os.listdir(".") 

+

45 here = self.clean_files(here, ["*.pyc", "__pycache__", "*$py.class"]) 

+

46 assert_count_equal(here, files) 

+

47 

+

48 def test_unexecuted_file(self): 

+

49 cov = coverage.Coverage() 

+

50 

+

51 self.make_file("mycode.py", """\ 

+

52 a = 1 

+

53 b = 2 

+

54 if b == 3: 

+

55 c = 4 

+

56 d = 5 

+

57 """) 

+

58 

+

59 self.make_file("not_run.py", """\ 

+

60 fooey = 17 

+

61 """) 

+

62 

+

63 # Import the Python file, executing it. 

+

64 self.start_import_stop(cov, "mycode") 

+

65 

+

66 _, statements, missing, _ = cov.analysis("not_run.py") 

+

67 assert statements == [1] 

+

68 assert missing == [1] 

+

69 

+

70 def test_filenames(self): 

+

71 

+

72 self.make_file("mymain.py", """\ 

+

73 import mymod 

+

74 a = 1 

+

75 """) 

+

76 

+

77 self.make_file("mymod.py", """\ 

+

78 fooey = 17 

+

79 """) 

+

80 

+

81 # Import the Python file, executing it. 

+

82 cov = coverage.Coverage() 

+

83 self.start_import_stop(cov, "mymain") 

+

84 

+

85 filename, _, _, _ = cov.analysis("mymain.py") 

+

86 assert os.path.basename(filename) == "mymain.py" 

+

87 filename, _, _, _ = cov.analysis("mymod.py") 

+

88 assert os.path.basename(filename) == "mymod.py" 

+

89 

+

90 filename, _, _, _ = cov.analysis(sys.modules["mymain"]) 

+

91 assert os.path.basename(filename) == "mymain.py" 

+

92 filename, _, _, _ = cov.analysis(sys.modules["mymod"]) 

+

93 assert os.path.basename(filename) == "mymod.py" 

+

94 

+

95 # Import the Python file, executing it again, once it's been compiled 

+

96 # already. 

+

97 cov = coverage.Coverage() 

+

98 self.start_import_stop(cov, "mymain") 

+

99 

+

100 filename, _, _, _ = cov.analysis("mymain.py") 

+

101 assert os.path.basename(filename) == "mymain.py" 

+

102 filename, _, _, _ = cov.analysis("mymod.py") 

+

103 assert os.path.basename(filename) == "mymod.py" 

+

104 

+

105 filename, _, _, _ = cov.analysis(sys.modules["mymain"]) 

+

106 assert os.path.basename(filename) == "mymain.py" 

+

107 filename, _, _, _ = cov.analysis(sys.modules["mymod"]) 

+

108 assert os.path.basename(filename) == "mymod.py" 

+

109 

+

110 def test_ignore_stdlib(self): 

+

111 self.make_file("mymain.py", """\ 

+

112 import colorsys 

+

113 a = 1 

+

114 hls = colorsys.rgb_to_hls(1.0, 0.5, 0.0) 

+

115 """) 

+

116 

+

117 # Measure without the stdlib. 

+

118 cov1 = coverage.Coverage() 

+

119 assert cov1.config.cover_pylib is False 

+

120 self.start_import_stop(cov1, "mymain") 

+

121 

+

122 # some statements were marked executed in mymain.py 

+

123 _, statements, missing, _ = cov1.analysis("mymain.py") 

+

124 assert statements != missing 

+

125 # but none were in colorsys.py 

+

126 _, statements, missing, _ = cov1.analysis("colorsys.py") 

+

127 assert statements == missing 

+

128 

+

129 # Measure with the stdlib. 

+

130 cov2 = coverage.Coverage(cover_pylib=True) 

+

131 self.start_import_stop(cov2, "mymain") 

+

132 

+

133 # some statements were marked executed in mymain.py 

+

134 _, statements, missing, _ = cov2.analysis("mymain.py") 

+

135 assert statements != missing 

+

136 # and some were marked executed in colorsys.py 

+

137 _, statements, missing, _ = cov2.analysis("colorsys.py") 

+

138 assert statements != missing 

+

139 

+

140 def test_include_can_measure_stdlib(self): 

+

141 self.make_file("mymain.py", """\ 

+

142 import colorsys, random 

+

143 a = 1 

+

144 r, g, b = [random.random() for _ in range(3)] 

+

145 hls = colorsys.rgb_to_hls(r, g, b) 

+

146 """) 

+

147 

+

148 # Measure without the stdlib, but include colorsys. 

+

149 cov1 = coverage.Coverage(cover_pylib=False, include=["*/colorsys.py"]) 

+

150 self.start_import_stop(cov1, "mymain") 

+

151 

+

152 # some statements were marked executed in colorsys.py 

+

153 _, statements, missing, _ = cov1.analysis("colorsys.py") 

+

154 assert statements != missing 

+

155 # but none were in random.py 

+

156 _, statements, missing, _ = cov1.analysis("random.py") 

+

157 assert statements == missing 

+

158 

+

159 def test_exclude_list(self): 

+

160 cov = coverage.Coverage() 

+

161 cov.clear_exclude() 

+

162 assert cov.get_exclude_list() == [] 

+

163 cov.exclude("foo") 

+

164 assert cov.get_exclude_list() == ["foo"] 

+

165 cov.exclude("bar") 

+

166 assert cov.get_exclude_list() == ["foo", "bar"] 

+

167 assert cov._exclude_regex('exclude') == "(?:foo)|(?:bar)" 

+

168 cov.clear_exclude() 

+

169 assert cov.get_exclude_list() == [] 

+

170 

+

171 def test_exclude_partial_list(self): 

+

172 cov = coverage.Coverage() 

+

173 cov.clear_exclude(which='partial') 

+

174 assert cov.get_exclude_list(which='partial') == [] 

+

175 cov.exclude("foo", which='partial') 

+

176 assert cov.get_exclude_list(which='partial') == ["foo"] 

+

177 cov.exclude("bar", which='partial') 

+

178 assert cov.get_exclude_list(which='partial') == ["foo", "bar"] 

+

179 assert cov._exclude_regex(which='partial') == "(?:foo)|(?:bar)" 

+

180 cov.clear_exclude(which='partial') 

+

181 assert cov.get_exclude_list(which='partial') == [] 

+

182 

+

183 def test_exclude_and_partial_are_separate_lists(self): 

+

184 cov = coverage.Coverage() 

+

185 cov.clear_exclude(which='partial') 

+

186 cov.clear_exclude(which='exclude') 

+

187 cov.exclude("foo", which='partial') 

+

188 assert cov.get_exclude_list(which='partial') == ['foo'] 

+

189 assert cov.get_exclude_list(which='exclude') == [] 

+

190 cov.exclude("bar", which='exclude') 

+

191 assert cov.get_exclude_list(which='partial') == ['foo'] 

+

192 assert cov.get_exclude_list(which='exclude') == ['bar'] 

+

193 cov.exclude("p2", which='partial') 

+

194 cov.exclude("e2", which='exclude') 

+

195 assert cov.get_exclude_list(which='partial') == ['foo', 'p2'] 

+

196 assert cov.get_exclude_list(which='exclude') == ['bar', 'e2'] 

+

197 cov.clear_exclude(which='partial') 

+

198 assert cov.get_exclude_list(which='partial') == [] 

+

199 assert cov.get_exclude_list(which='exclude') == ['bar', 'e2'] 

+

200 cov.clear_exclude(which='exclude') 

+

201 assert cov.get_exclude_list(which='partial') == [] 

+

202 assert cov.get_exclude_list(which='exclude') == [] 

+

203 

+

204 def test_datafile_default(self): 

+

205 # Default data file behavior: it's .coverage 

+

206 self.make_file("datatest1.py", """\ 

+

207 fooey = 17 

+

208 """) 

+

209 

+

210 self.assertFiles(["datatest1.py"]) 

+

211 cov = coverage.Coverage() 

+

212 self.start_import_stop(cov, "datatest1") 

+

213 cov.save() 

+

214 self.assertFiles(["datatest1.py", ".coverage"]) 

+

215 

+

216 def test_datafile_specified(self): 

+

217 # You can specify the data file name. 

+

218 self.make_file("datatest2.py", """\ 

+

219 fooey = 17 

+

220 """) 

+

221 

+

222 self.assertFiles(["datatest2.py"]) 

+

223 cov = coverage.Coverage(data_file="cov.data") 

+

224 self.start_import_stop(cov, "datatest2") 

+

225 cov.save() 

+

226 self.assertFiles(["datatest2.py", "cov.data"]) 

+

227 

+

228 def test_datafile_and_suffix_specified(self): 

+

229 # You can specify the data file name and suffix. 

+

230 self.make_file("datatest3.py", """\ 

+

231 fooey = 17 

+

232 """) 

+

233 

+

234 self.assertFiles(["datatest3.py"]) 

+

235 cov = coverage.Coverage(data_file="cov.data", data_suffix="14") 

+

236 self.start_import_stop(cov, "datatest3") 

+

237 cov.save() 

+

238 self.assertFiles(["datatest3.py", "cov.data.14"]) 

+

239 

+

240 def test_datafile_from_rcfile(self): 

+

241 # You can specify the data file name in the .coveragerc file 

+

242 self.make_file("datatest4.py", """\ 

+

243 fooey = 17 

+

244 """) 

+

245 self.make_file(".coveragerc", """\ 

+

246 [run] 

+

247 data_file = mydata.dat 

+

248 """) 

+

249 

+

250 self.assertFiles(["datatest4.py", ".coveragerc"]) 

+

251 cov = coverage.Coverage() 

+

252 self.start_import_stop(cov, "datatest4") 

+

253 cov.save() 

+

254 self.assertFiles(["datatest4.py", ".coveragerc", "mydata.dat"]) 

+

255 

+

256 def test_deep_datafile(self): 

+

257 self.make_file("datatest5.py", "fooey = 17") 

+

258 self.assertFiles(["datatest5.py"]) 

+

259 cov = coverage.Coverage(data_file="deep/sub/cov.data") 

+

260 self.start_import_stop(cov, "datatest5") 

+

261 cov.save() 

+

262 self.assertFiles(["datatest5.py", "deep"]) 

+

263 self.assert_exists("deep/sub/cov.data") 

+

264 

+

265 def test_datafile_none(self): 

+

266 cov = coverage.Coverage(data_file=None) 

+

267 

+

268 def f1(): 

+

269 a = 1 # pylint: disable=unused-variable 

+

270 

+

271 one_line_number = code_object(f1).co_firstlineno + 1 

+

272 lines = [] 

+

273 

+

274 def run_one_function(f): 

+

275 cov.erase() 

+

276 cov.start() 

+

277 f() 

+

278 cov.stop() 

+

279 

+

280 fs = cov.get_data().measured_files() 

+

281 lines.append(cov.get_data().lines(list(fs)[0])) 

+

282 

+

283 run_one_function(f1) 

+

284 run_one_function(f1) 

+

285 run_one_function(f1) 

+

286 assert lines == [[one_line_number]] * 3 

+

287 self.assert_doesnt_exist(".coverage") 

+

288 assert os.listdir(".") == [] 

+

289 

+

290 def test_empty_reporting(self): 

+

291 # empty summary reports raise exception, just like the xml report 

+

292 cov = coverage.Coverage() 

+

293 cov.erase() 

+

294 with pytest.raises(CoverageException, match="No data to report."): 

+

295 cov.report() 

+

296 

+

297 def test_completely_zero_reporting(self): 

+

298 # https://github.com/nedbat/coveragepy/issues/884 

+

299 # If nothing was measured, the file-touching didn't happen properly. 

+

300 self.make_file("foo/bar.py", "print('Never run')") 

+

301 self.make_file("test.py", "assert True") 

+

302 cov = coverage.Coverage(source=["foo"]) 

+

303 self.start_import_stop(cov, "test") 

+

304 cov.report() 

+

305 # Name Stmts Miss Cover 

+

306 # -------------------------------- 

+

307 # foo/bar.py 1 1 0% 

+

308 # -------------------------------- 

+

309 # TOTAL 1 1 0% 

+

310 

+

311 last = self.last_line_squeezed(self.stdout()) 

+

312 assert "TOTAL 1 1 0%" == last 

+

313 

+

314 def test_cov4_data_file(self): 

+

315 cov4_data = ( 

+

316 "!coverage.py: This is a private format, don't read it directly!" 

+

317 '{"lines":{"/private/tmp/foo.py":[1,5,2,3]}}' 

+

318 ) 

+

319 self.make_file(".coverage", cov4_data) 

+

320 cov = coverage.Coverage() 

+

321 with pytest.raises(CoverageException, match="Looks like a coverage 4.x data file"): 

+

322 cov.load() 

+

323 cov.erase() 

+

324 

+

325 def make_code1_code2(self): 

+

326 """Create the code1.py and code2.py files.""" 

+

327 self.make_file("code1.py", """\ 

+

328 code1 = 1 

+

329 """) 

+

330 self.make_file("code2.py", """\ 

+

331 code2 = 1 

+

332 code2 = 2 

+

333 """) 

+

334 

+

335 def check_code1_code2(self, cov): 

+

336 """Check the analysis is correct for code1.py and code2.py.""" 

+

337 _, statements, missing, _ = cov.analysis("code1.py") 

+

338 assert statements == [1] 

+

339 assert missing == [] 

+

340 _, statements, missing, _ = cov.analysis("code2.py") 

+

341 assert statements == [1, 2] 

+

342 assert missing == [] 

+

343 

+

344 def test_start_stop_start_stop(self): 

+

345 self.make_code1_code2() 

+

346 cov = coverage.Coverage() 

+

347 self.start_import_stop(cov, "code1") 

+

348 cov.save() 

+

349 self.start_import_stop(cov, "code2") 

+

350 self.check_code1_code2(cov) 

+

351 

+

352 def test_start_save_stop(self): 

+

353 self.make_code1_code2() 

+

354 cov = coverage.Coverage() 

+

355 cov.start() 

+

356 import_local_file("code1") # pragma: nested 

+

357 cov.save() # pragma: nested 

+

358 import_local_file("code2") # pragma: nested 

+

359 cov.stop() # pragma: nested 

+

360 self.check_code1_code2(cov) 

+

361 

+

362 def test_start_save_nostop(self): 

+

363 self.make_code1_code2() 

+

364 cov = coverage.Coverage() 

+

365 cov.start() 

+

366 import_local_file("code1") # pragma: nested 

+

367 cov.save() # pragma: nested 

+

368 import_local_file("code2") # pragma: nested 

+

369 self.check_code1_code2(cov) # pragma: nested 

+

370 # Then stop it, or the test suite gets out of whack. 

+

371 cov.stop() # pragma: nested 

+

372 

+

373 def test_two_getdata_only_warn_once(self): 

+

374 self.make_code1_code2() 

+

375 cov = coverage.Coverage(source=["."], omit=["code1.py"]) 

+

376 cov.start() 

+

377 import_local_file("code1") # pragma: nested 

+

378 cov.stop() # pragma: nested 

+

379 # We didn't collect any data, so we should get a warning. 

+

380 with self.assert_warnings(cov, ["No data was collected"]): 

+

381 cov.get_data() 

+

382 # But calling get_data a second time with no intervening activity 

+

383 # won't make another warning. 

+

384 with self.assert_warnings(cov, []): 

+

385 cov.get_data() 

+

386 

+

387 def test_two_getdata_warn_twice(self): 

+

388 self.make_code1_code2() 

+

389 cov = coverage.Coverage(source=["."], omit=["code1.py", "code2.py"]) 

+

390 cov.start() 

+

391 import_local_file("code1") # pragma: nested 

+

392 # We didn't collect any data, so we should get a warning. 

+

393 with self.assert_warnings(cov, ["No data was collected"]): # pragma: nested 

+

394 cov.save() # pragma: nested 

+

395 import_local_file("code2") # pragma: nested 

+

396 # Calling get_data a second time after tracing some more will warn again. 

+

397 with self.assert_warnings(cov, ["No data was collected"]): # pragma: nested 

+

398 cov.get_data() # pragma: nested 

+

399 # Then stop it, or the test suite gets out of whack. 

+

400 cov.stop() # pragma: nested 

+

401 

+

402 def make_good_data_files(self): 

+

403 """Make some good data files.""" 

+

404 self.make_code1_code2() 

+

405 cov = coverage.Coverage(data_suffix=True) 

+

406 self.start_import_stop(cov, "code1") 

+

407 cov.save() 

+

408 

+

409 cov = coverage.Coverage(data_suffix=True) 

+

410 self.start_import_stop(cov, "code2") 

+

411 cov.save() 

+

412 self.assert_file_count(".coverage.*", 2) 

+

413 

+

414 def test_combining_corrupt_data(self): 

+

415 # If you combine a corrupt data file, then you will get a warning, 

+

416 # and the file will remain. 

+

417 self.make_good_data_files() 

+

418 self.make_file(".coverage.foo", """La la la, this isn't coverage data!""") 

+

419 cov = coverage.Coverage() 

+

420 warning_regex = ( 

+

421 r"Couldn't use data file '.*\.coverage\.foo': file (is encrypted or )?is not a database" 

+

422 ) 

+

423 with self.assert_warnings(cov, [warning_regex]): 

+

424 cov.combine() 

+

425 

+

426 # We got the results from code1 and code2 properly. 

+

427 self.check_code1_code2(cov) 

+

428 

+

429 # The bad file still exists, but it's the only parallel data file left. 

+

430 self.assert_exists(".coverage.foo") 

+

431 self.assert_file_count(".coverage.*", 1) 

+

432 

+

433 def test_combining_twice(self): 

+

434 self.make_good_data_files() 

+

435 cov1 = coverage.Coverage() 

+

436 cov1.combine() 

+

437 cov1.save() 

+

438 self.check_code1_code2(cov1) 

+

439 self.assert_file_count(".coverage.*", 0) 

+

440 self.assert_exists(".coverage") 

+

441 

+

442 cov2 = coverage.Coverage() 

+

443 with pytest.raises(CoverageException, match=r"No data to combine"): 

+

444 cov2.combine(strict=True, keep=False) 

+

445 

+

446 cov3 = coverage.Coverage() 

+

447 cov3.combine() 

+

448 # Now the data is empty! 

+

449 _, statements, missing, _ = cov3.analysis("code1.py") 

+

450 assert statements == [1] 

+

451 assert missing == [1] 

+

452 _, statements, missing, _ = cov3.analysis("code2.py") 

+

453 assert statements == [1, 2] 

+

454 assert missing == [1, 2] 

+

455 

+

456 def test_combining_with_a_used_coverage(self): 

+

457 # Can you use a coverage object to run one shard of a parallel suite, 

+

458 # and then also combine the data? 

+

459 self.make_code1_code2() 

+

460 cov = coverage.Coverage(data_suffix=True) 

+

461 self.start_import_stop(cov, "code1") 

+

462 cov.save() 

+

463 

+

464 cov = coverage.Coverage(data_suffix=True) 

+

465 self.start_import_stop(cov, "code2") 

+

466 cov.save() 

+

467 

+

468 cov.combine() 

+

469 self.check_code1_code2(cov) 

+

470 

+

471 def test_ordered_combine(self): 

+

472 # https://github.com/nedbat/coveragepy/issues/649 

+

473 # The order of the [paths] setting matters 

+

474 def make_data_file(): 

+

475 data = coverage.CoverageData(".coverage.1") 

+

476 data.add_lines({abs_file('ci/girder/g1.py'): dict.fromkeys(range(10))}) 

+

477 data.add_lines({abs_file('ci/girder/plugins/p1.py'): dict.fromkeys(range(10))}) 

+

478 data.write() 

+

479 

+

480 def get_combined_filenames(): 

+

481 cov = coverage.Coverage() 

+

482 cov.combine() 

+

483 cov.save() 

+

484 data = cov.get_data() 

+

485 filenames = {relative_filename(f).replace("\\", "/") for f in data.measured_files()} 

+

486 return filenames 

+

487 

+

488 # Case 1: get the order right. 

+

489 make_data_file() 

+

490 self.make_file(".coveragerc", """\ 

+

491 [paths] 

+

492 plugins = 

+

493 plugins/ 

+

494 ci/girder/plugins/ 

+

495 girder = 

+

496 girder/ 

+

497 ci/girder/ 

+

498 """) 

+

499 assert get_combined_filenames() == {'girder/g1.py', 'plugins/p1.py'} 

+

500 

+

501 # Case 2: get the order wrong. 

+

502 make_data_file() 

+

503 self.make_file(".coveragerc", """\ 

+

504 [paths] 

+

505 girder = 

+

506 girder/ 

+

507 ci/girder/ 

+

508 plugins = 

+

509 plugins/ 

+

510 ci/girder/plugins/ 

+

511 """) 

+

512 assert get_combined_filenames() == {'girder/g1.py', 'girder/plugins/p1.py'} 

+

513 

+

514 def test_warnings(self): 

+

515 self.make_file("hello.py", """\ 

+

516 import sys, os 

+

517 print("Hello") 

+

518 """) 

+

519 cov = coverage.Coverage(source=["sys", "xyzzy", "quux"]) 

+

520 self.start_import_stop(cov, "hello") 

+

521 cov.get_data() 

+

522 

+

523 out, err = self.stdouterr() 

+

524 assert "Hello\n" in out 

+

525 assert textwrap.dedent("""\ 

+

526 Coverage.py warning: Module sys has no Python source. (module-not-python) 

+

527 Coverage.py warning: Module xyzzy was never imported. (module-not-imported) 

+

528 Coverage.py warning: Module quux was never imported. (module-not-imported) 

+

529 Coverage.py warning: No data was collected. (no-data-collected) 

+

530 """) in err 

+

531 

+

532 def test_warnings_suppressed(self): 

+

533 self.make_file("hello.py", """\ 

+

534 import sys, os 

+

535 print("Hello") 

+

536 """) 

+

537 self.make_file(".coveragerc", """\ 

+

538 [run] 

+

539 disable_warnings = no-data-collected, module-not-imported 

+

540 """) 

+

541 cov = coverage.Coverage(source=["sys", "xyzzy", "quux"]) 

+

542 self.start_import_stop(cov, "hello") 

+

543 cov.get_data() 

+

544 

+

545 out, err = self.stdouterr() 

+

546 assert "Hello\n" in out 

+

547 assert "Coverage.py warning: Module sys has no Python source. (module-not-python)" in err 

+

548 assert "module-not-imported" not in err 

+

549 assert "no-data-collected" not in err 

+

550 

+

551 def test_warn_once(self): 

+

552 cov = coverage.Coverage() 

+

553 cov.load() 

+

554 cov._warn("Warning, warning 1!", slug="bot", once=True) 

+

555 cov._warn("Warning, warning 2!", slug="bot", once=True) 

+

556 err = self.stderr() 

+

557 assert "Warning, warning 1!" in err 

+

558 assert "Warning, warning 2!" not in err 

+

559 

+

560 def test_source_and_include_dont_conflict(self): 

+

561 # A bad fix made this case fail: https://github.com/nedbat/coveragepy/issues/541 

+

562 self.make_file("a.py", "import b\na = 1") 

+

563 self.make_file("b.py", "b = 1") 

+

564 self.make_file(".coveragerc", """\ 

+

565 [run] 

+

566 source = . 

+

567 """) 

+

568 

+

569 # Just like: coverage run a.py 

+

570 cov = coverage.Coverage() 

+

571 self.start_import_stop(cov, "a") 

+

572 cov.save() 

+

573 

+

574 # Run the equivalent of: coverage report --include=b.py 

+

575 cov = coverage.Coverage(include=["b.py"]) 

+

576 cov.load() 

+

577 # There should be no exception. At one point, report() threw: 

+

578 # CoverageException: --include and --source are mutually exclusive 

+

579 cov.report() 

+

580 expected = textwrap.dedent("""\ 

+

581 Name Stmts Miss Cover 

+

582 --------------------------- 

+

583 b.py 1 0 100% 

+

584 --------------------------- 

+

585 TOTAL 1 0 100% 

+

586 """) 

+

587 assert expected == self.stdout() 

+

588 

+

589 def make_test_files(self): 

+

590 """Create a simple file representing a method with two tests. 

+

591 

+

592 Returns absolute path to the file. 

+

593 """ 

+

594 self.make_file("testsuite.py", """\ 

+

595 def timestwo(x): 

+

596 return x*2 

+

597 

+

598 def test_multiply_zero(): 

+

599 assert timestwo(0) == 0 

+

600 

+

601 def test_multiply_six(): 

+

602 assert timestwo(6) == 12 

+

603 """) 

+

604 

+

605 def test_switch_context_testrunner(self): 

+

606 # This test simulates a coverage-aware test runner, 

+

607 # measuring labeled coverage via public API 

+

608 self.make_test_files() 

+

609 

+

610 # Test runner starts 

+

611 cov = coverage.Coverage() 

+

612 cov.start() 

+

613 

+

614 if "pragma: nested": 

+

615 # Imports the test suite 

+

616 suite = import_local_file("testsuite") 

+

617 

+

618 # Measures test case 1 

+

619 cov.switch_context('multiply_zero') 

+

620 suite.test_multiply_zero() 

+

621 

+

622 # Measures test case 2 

+

623 cov.switch_context('multiply_six') 

+

624 suite.test_multiply_six() 

+

625 

+

626 # Runner finishes 

+

627 cov.save() 

+

628 cov.stop() 

+

629 

+

630 # Labeled data is collected 

+

631 data = cov.get_data() 

+

632 assert [u'', u'multiply_six', u'multiply_zero'] == sorted(data.measured_contexts()) 

+

633 

+

634 filenames = self.get_measured_filenames(data) 

+

635 suite_filename = filenames['testsuite.py'] 

+

636 

+

637 data.set_query_context("multiply_six") 

+

638 assert [2, 8] == sorted(data.lines(suite_filename)) 

+

639 data.set_query_context("multiply_zero") 

+

640 assert [2, 5] == sorted(data.lines(suite_filename)) 

+

641 

+

642 def test_switch_context_with_static(self): 

+

643 # This test simulates a coverage-aware test runner, 

+

644 # measuring labeled coverage via public API, 

+

645 # with static label prefix. 

+

646 self.make_test_files() 

+

647 

+

648 # Test runner starts 

+

649 cov = coverage.Coverage(context="mysuite") 

+

650 cov.start() 

+

651 

+

652 if "pragma: nested": 

+

653 # Imports the test suite 

+

654 suite = import_local_file("testsuite") 

+

655 

+

656 # Measures test case 1 

+

657 cov.switch_context('multiply_zero') 

+

658 suite.test_multiply_zero() 

+

659 

+

660 # Measures test case 2 

+

661 cov.switch_context('multiply_six') 

+

662 suite.test_multiply_six() 

+

663 

+

664 # Runner finishes 

+

665 cov.save() 

+

666 cov.stop() 

+

667 

+

668 # Labeled data is collected 

+

669 data = cov.get_data() 

+

670 expected = [u'mysuite', u'mysuite|multiply_six', u'mysuite|multiply_zero'] 

+

671 assert expected == sorted(data.measured_contexts()) 

+

672 

+

673 filenames = self.get_measured_filenames(data) 

+

674 suite_filename = filenames['testsuite.py'] 

+

675 

+

676 data.set_query_context("mysuite|multiply_six") 

+

677 assert [2, 8] == sorted(data.lines(suite_filename)) 

+

678 data.set_query_context("mysuite|multiply_zero") 

+

679 assert [2, 5] == sorted(data.lines(suite_filename)) 

+

680 

+

681 def test_dynamic_context_conflict(self): 

+

682 cov = coverage.Coverage(source=["."]) 

+

683 cov.set_option("run:dynamic_context", "test_function") 

+

684 cov.start() 

+

685 # Switch twice, but only get one warning. 

+

686 cov.switch_context("test1") # pragma: nested 

+

687 cov.switch_context("test2") # pragma: nested 

+

688 expected = "Coverage.py warning: Conflicting dynamic contexts (dynamic-conflict)\n" 

+

689 assert expected == self.stderr() 

+

690 cov.stop() # pragma: nested 

+

691 

+

692 def test_switch_context_unstarted(self): 

+

693 # Coverage must be started to switch context 

+

694 msg = "Cannot switch context, coverage is not started" 

+

695 cov = coverage.Coverage() 

+

696 with pytest.raises(CoverageException, match=msg): 

+

697 cov.switch_context("test1") 

+

698 

+

699 cov.start() 

+

700 cov.switch_context("test2") # pragma: nested 

+

701 

+

702 cov.stop() # pragma: nested 

+

703 with pytest.raises(CoverageException, match=msg): 

+

704 cov.switch_context("test3") 

+

705 

+

706 def test_config_crash(self): 

+

707 # The internal '[run] _crash' setting can be used to artificially raise 

+

708 # exceptions from inside Coverage. 

+

709 cov = coverage.Coverage() 

+

710 cov.set_option("run:_crash", "test_config_crash") 

+

711 with pytest.raises(Exception, match="Crashing because called by test_config_crash"): 

+

712 cov.start() 

+

713 

+

714 def test_config_crash_no_crash(self): 

+

715 # '[run] _crash' really checks the call stack. 

+

716 cov = coverage.Coverage() 

+

717 cov.set_option("run:_crash", "not_my_caller") 

+

718 cov.start() 

+

719 cov.stop() 

+

720 

+

721 def test_run_debug_sys(self): 

+

722 # https://github.com/nedbat/coveragepy/issues/907 

+

723 cov = coverage.Coverage() 

+

724 cov.start() 

+

725 d = dict(cov.sys_info()) # pragma: nested 

+

726 cov.stop() # pragma: nested 

+

727 assert d['data_file'].endswith(".coverage") 

+

728 

+

729 

+

730class CurrentInstanceTest(CoverageTest): 

+

731 """Tests of Coverage.current().""" 

+

732 

+

733 run_in_temp_dir = False 

+

734 

+

735 def assert_current_is_none(self, current): 

+

736 """Assert that a current we expect to be None is correct.""" 

+

737 # During meta-coverage, the None answers will be wrong because the 

+

738 # overall coverage measurement will still be on the current-stack. 

+

739 # Since we know they will be wrong, and we have non-meta test runs 

+

740 # also, don't assert them. 

+

741 if not env.METACOV: 

+

742 assert current is None 

+

743 

+

744 def test_current(self): 

+

745 cur0 = coverage.Coverage.current() 

+

746 self.assert_current_is_none(cur0) 

+

747 # Making an instance doesn't make it current. 

+

748 cov = coverage.Coverage() 

+

749 cur1 = coverage.Coverage.current() 

+

750 self.assert_current_is_none(cur1) 

+

751 assert cur0 is cur1 

+

752 # Starting the instance makes it current. 

+

753 cov.start() 

+

754 if "# pragma: nested": 

+

755 cur2 = coverage.Coverage.current() 

+

756 assert cur2 is cov 

+

757 # Stopping the instance makes current None again. 

+

758 cov.stop() 

+

759 

+

760 cur3 = coverage.Coverage.current() 

+

761 self.assert_current_is_none(cur3) 

+

762 assert cur0 is cur3 

+

763 

+

764 

+

765@pytest.mark.skipif(not env.PYBEHAVIOR.namespaces_pep420, 

+

766 reason="Python before 3.3 doesn't have namespace packages" 

+

767) 

+

768class NamespaceModuleTest(UsingModulesMixin, CoverageTest): 

+

769 """Test PEP-420 namespace modules.""" 

+

770 

+

771 def test_explicit_namespace_module(self): 

+

772 self.make_file("main.py", "import namespace_420\n") 

+

773 

+

774 cov = coverage.Coverage() 

+

775 self.start_import_stop(cov, "main") 

+

776 

+

777 with pytest.raises(CoverageException, match=r"Module .* has no file"): 

+

778 cov.analysis(sys.modules['namespace_420']) 

+

779 

+

780 def test_bug_572(self): 

+

781 self.make_file("main.py", "import namespace_420\n") 

+

782 

+

783 # Use source=namespace_420 to trigger the check that used to fail, 

+

784 # and use source=main so that something is measured. 

+

785 cov = coverage.Coverage(source=["namespace_420", "main"]) 

+

786 with self.assert_warnings(cov, []): 

+

787 self.start_import_stop(cov, "main") 

+

788 cov.report() 

+

789 

+

790 

+

791class IncludeOmitTestsMixin(UsingModulesMixin, CoverageTest): 

+

792 """Test methods for coverage methods taking include and omit.""" 

+

793 

+

794 def filenames_in(self, summary, filenames): 

+

795 """Assert the `filenames` are in the keys of `summary`.""" 

+

796 for filename in filenames.split(): 

+

797 assert filename in summary 

+

798 

+

799 def filenames_not_in(self, summary, filenames): 

+

800 """Assert the `filenames` are not in the keys of `summary`.""" 

+

801 for filename in filenames.split(): 

+

802 assert filename not in summary 

+

803 

+

804 def test_nothing_specified(self): 

+

805 result = self.coverage_usepkgs() 

+

806 self.filenames_in(result, "p1a p1b p2a p2b othera otherb osa osb") 

+

807 self.filenames_not_in(result, "p1c") 

+

808 # Because there was no source= specified, we don't search for 

+

809 # unexecuted files. 

+

810 

+

811 def test_include(self): 

+

812 result = self.coverage_usepkgs(include=["*/p1a.py"]) 

+

813 self.filenames_in(result, "p1a") 

+

814 self.filenames_not_in(result, "p1b p1c p2a p2b othera otherb osa osb") 

+

815 

+

816 def test_include_2(self): 

+

817 result = self.coverage_usepkgs(include=["*a.py"]) 

+

818 self.filenames_in(result, "p1a p2a othera osa") 

+

819 self.filenames_not_in(result, "p1b p1c p2b otherb osb") 

+

820 

+

821 def test_include_as_string(self): 

+

822 result = self.coverage_usepkgs(include="*a.py") 

+

823 self.filenames_in(result, "p1a p2a othera osa") 

+

824 self.filenames_not_in(result, "p1b p1c p2b otherb osb") 

+

825 

+

826 def test_omit(self): 

+

827 result = self.coverage_usepkgs(omit=["*/p1a.py"]) 

+

828 self.filenames_in(result, "p1b p2a p2b") 

+

829 self.filenames_not_in(result, "p1a p1c") 

+

830 

+

831 def test_omit_2(self): 

+

832 result = self.coverage_usepkgs(omit=["*a.py"]) 

+

833 self.filenames_in(result, "p1b p2b otherb osb") 

+

834 self.filenames_not_in(result, "p1a p1c p2a othera osa") 

+

835 

+

836 def test_omit_as_string(self): 

+

837 result = self.coverage_usepkgs(omit="*a.py") 

+

838 self.filenames_in(result, "p1b p2b otherb osb") 

+

839 self.filenames_not_in(result, "p1a p1c p2a othera osa") 

+

840 

+

841 def test_omit_and_include(self): 

+

842 result = self.coverage_usepkgs(include=["*/p1*"], omit=["*/p1a.py"]) 

+

843 self.filenames_in(result, "p1b") 

+

844 self.filenames_not_in(result, "p1a p1c p2a p2b") 

+

845 

+

846 

+

847class SourceIncludeOmitTest(IncludeOmitTestsMixin, CoverageTest): 

+

848 """Test using `source`, `include`, and `omit` when measuring code.""" 

+

849 

+

850 def coverage_usepkgs(self, **kwargs): 

+

851 """Run coverage on usepkgs and return the line summary. 

+

852 

+

853 Arguments are passed to the `coverage.Coverage` constructor. 

+

854 

+

855 """ 

+

856 cov = coverage.Coverage(**kwargs) 

+

857 cov.start() 

+

858 import usepkgs # pragma: nested # pylint: disable=import-error, unused-import 

+

859 cov.stop() # pragma: nested 

+

860 data = cov.get_data() 

+

861 summary = line_counts(data) 

+

862 for k, v in list(summary.items()): 

+

863 assert k.endswith(".py") 

+

864 summary[k[:-3]] = v 

+

865 return summary 

+

866 

+

867 def test_source_include_exclusive(self): 

+

868 cov = coverage.Coverage(source=["pkg1"], include=["pkg2"]) 

+

869 with self.assert_warnings(cov, ["--include is ignored because --source is set"]): 

+

870 cov.start() 

+

871 cov.stop() # pragma: nested 

+

872 

+

873 def test_source_package_as_package(self): 

+

874 assert not os.path.isdir("pkg1") 

+

875 lines = self.coverage_usepkgs(source=["pkg1"]) 

+

876 self.filenames_in(lines, "p1a p1b") 

+

877 self.filenames_not_in(lines, "p2a p2b othera otherb osa osb") 

+

878 # Because source= was specified, we do search for unexecuted files. 

+

879 assert lines['p1c'] == 0 

+

880 

+

881 def test_source_package_as_dir(self): 

+

882 os.chdir(nice_file(TESTS_DIR, "modules")) 

+

883 assert os.path.isdir("pkg1") 

+

884 lines = self.coverage_usepkgs(source=["pkg1"]) 

+

885 self.filenames_in(lines, "p1a p1b") 

+

886 self.filenames_not_in(lines, "p2a p2b othera otherb osa osb") 

+

887 # Because source= was specified, we do search for unexecuted files. 

+

888 assert lines['p1c'] == 0 

+

889 

+

890 def test_source_package_dotted_sub(self): 

+

891 lines = self.coverage_usepkgs(source=["pkg1.sub"]) 

+

892 self.filenames_not_in(lines, "p2a p2b othera otherb osa osb") 

+

893 # Because source= was specified, we do search for unexecuted files. 

+

894 assert lines['runmod3'] == 0 

+

895 

+

896 def test_source_package_dotted_p1b(self): 

+

897 lines = self.coverage_usepkgs(source=["pkg1.p1b"]) 

+

898 self.filenames_in(lines, "p1b") 

+

899 self.filenames_not_in(lines, "p1a p1c p2a p2b othera otherb osa osb") 

+

900 

+

901 def test_source_package_part_omitted(self): 

+

902 # https://github.com/nedbat/coveragepy/issues/218 

+

903 # Used to be if you omitted something executed and inside the source, 

+

904 # then after it was executed but not recorded, it would be found in 

+

905 # the search for unexecuted files, and given a score of 0%. 

+

906 

+

907 # The omit arg is by path, so need to be in the modules directory. 

+

908 os.chdir(nice_file(TESTS_DIR, "modules")) 

+

909 lines = self.coverage_usepkgs(source=["pkg1"], omit=["pkg1/p1b.py"]) 

+

910 self.filenames_in(lines, "p1a") 

+

911 self.filenames_not_in(lines, "p1b") 

+

912 assert lines['p1c'] == 0 

+

913 

+

914 def test_source_package_as_package_part_omitted(self): 

+

915 # https://github.com/nedbat/coveragepy/issues/638 

+

916 lines = self.coverage_usepkgs(source=["pkg1"], omit=["*/p1b.py"]) 

+

917 self.filenames_in(lines, "p1a") 

+

918 self.filenames_not_in(lines, "p1b") 

+

919 assert lines['p1c'] == 0 

+

920 

+

921 def test_ambiguous_source_package_as_dir(self): 

+

922 # pkg1 is a directory and a pkg, since we cd into tests/modules/ambiguous 

+

923 os.chdir(nice_file(TESTS_DIR, "modules", "ambiguous")) 

+

924 # pkg1 defaults to directory because tests/modules/ambiguous/pkg1 exists 

+

925 lines = self.coverage_usepkgs(source=["pkg1"]) 

+

926 self.filenames_in(lines, "ambiguous") 

+

927 self.filenames_not_in(lines, "p1a p1b p1c") 

+

928 

+

929 def test_ambiguous_source_package_as_package(self): 

+

930 # pkg1 is a directory and a pkg, since we cd into tests/modules/ambiguous 

+

931 os.chdir(nice_file(TESTS_DIR, "modules", "ambiguous")) 

+

932 lines = self.coverage_usepkgs(source_pkgs=["pkg1"]) 

+

933 self.filenames_in(lines, "p1a p1b") 

+

934 self.filenames_not_in(lines, "p2a p2b othera otherb osa osb ambiguous") 

+

935 # Because source= was specified, we do search for unexecuted files. 

+

936 assert lines['p1c'] == 0 

+

937 

+

938 

+

939class ReportIncludeOmitTest(IncludeOmitTestsMixin, CoverageTest): 

+

940 """Tests of the report include/omit functionality.""" 

+

941 

+

942 def coverage_usepkgs(self, **kwargs): 

+

943 """Try coverage.report().""" 

+

944 cov = coverage.Coverage() 

+

945 cov.start() 

+

946 import usepkgs # pragma: nested # pylint: disable=import-error, unused-import 

+

947 cov.stop() # pragma: nested 

+

948 report = StringIO() 

+

949 cov.report(file=report, **kwargs) 

+

950 return report.getvalue() 

+

951 

+

952 

+

953class XmlIncludeOmitTest(IncludeOmitTestsMixin, CoverageTest): 

+

954 """Tests of the XML include/omit functionality. 

+

955 

+

956 This also takes care of the HTML and annotate include/omit, by virtue 

+

957 of the structure of the code. 

+

958 

+

959 """ 

+

960 

+

961 def coverage_usepkgs(self, **kwargs): 

+

962 """Try coverage.xml_report().""" 

+

963 cov = coverage.Coverage() 

+

964 cov.start() 

+

965 import usepkgs # pragma: nested # pylint: disable=import-error, unused-import 

+

966 cov.stop() # pragma: nested 

+

967 cov.xml_report(outfile="-", **kwargs) 

+

968 return self.stdout() 

+

969 

+

970 

+

971class AnalysisTest(CoverageTest): 

+

972 """Test the numerical analysis of results.""" 

+

973 def test_many_missing_branches(self): 

+

974 cov = coverage.Coverage(branch=True) 

+

975 

+

976 self.make_file("missing.py", """\ 

+

977 def fun1(x): 

+

978 if x == 1: 

+

979 print("one") 

+

980 else: 

+

981 print("not one") 

+

982 print("done") # pragma: nocover 

+

983 

+

984 def fun2(x): 

+

985 print("x") 

+

986 

+

987 fun2(3) 

+

988 """) 

+

989 

+

990 # Import the Python file, executing it. 

+

991 self.start_import_stop(cov, "missing") 

+

992 

+

993 nums = cov._analyze("missing.py").numbers 

+

994 assert nums.n_files == 1 

+

995 assert nums.n_statements == 7 

+

996 assert nums.n_excluded == 1 

+

997 assert nums.n_missing == 3 

+

998 assert nums.n_branches == 2 

+

999 assert nums.n_partial_branches == 0 

+

1000 assert nums.n_missing_branches == 2 

+

1001 

+

1002 

+

1003class TestRunnerPluginTest(CoverageTest): 

+

1004 """Test that the API works properly the way various third-party plugins call it. 

+

1005 

+

1006 We don't actually use the plugins, but these tests call the API the same 

+

1007 way they do. 

+

1008 

+

1009 """ 

+

1010 def pretend_to_be_nose_with_cover(self, erase=False, cd=False): 

+

1011 """This is what the nose --with-cover plugin does.""" 

+

1012 self.make_file("no_biggie.py", """\ 

+

1013 a = 1 

+

1014 b = 2 

+

1015 if b == 1: 

+

1016 c = 4 

+

1017 """) 

+

1018 self.make_file("sub/hold.txt", "") 

+

1019 

+

1020 cov = coverage.Coverage() 

+

1021 if erase: 

+

1022 cov.combine() 

+

1023 cov.erase() 

+

1024 cov.load() 

+

1025 self.start_import_stop(cov, "no_biggie") 

+

1026 if cd: 

+

1027 os.chdir("sub") 

+

1028 cov.combine() 

+

1029 cov.save() 

+

1030 cov.report(["no_biggie.py"], show_missing=True) 

+

1031 assert self.stdout() == textwrap.dedent("""\ 

+

1032 Name Stmts Miss Cover Missing 

+

1033 -------------------------------------------- 

+

1034 no_biggie.py 4 1 75% 4 

+

1035 -------------------------------------------- 

+

1036 TOTAL 4 1 75% 

+

1037 """) 

+

1038 if cd: 

+

1039 os.chdir("..") 

+

1040 

+

1041 def test_nose_plugin(self): 

+

1042 self.pretend_to_be_nose_with_cover() 

+

1043 

+

1044 def test_nose_plugin_with_erase(self): 

+

1045 self.pretend_to_be_nose_with_cover(erase=True) 

+

1046 

+

1047 def test_nose_plugin_with_cd(self): 

+

1048 # https://github.com/nedbat/coveragepy/issues/916 

+

1049 self.pretend_to_be_nose_with_cover(cd=True) 

+

1050 

+

1051 def pretend_to_be_pytestcov(self, append): 

+

1052 """Act like pytest-cov.""" 

+

1053 self.make_file("prog.py", """\ 

+

1054 a = 1 

+

1055 b = 2 

+

1056 if b == 1: 

+

1057 c = 4 

+

1058 """) 

+

1059 self.make_file(".coveragerc", """\ 

+

1060 [run] 

+

1061 parallel = True 

+

1062 source = . 

+

1063 """) 

+

1064 

+

1065 cov = coverage.Coverage(source=None, branch=None, config_file='.coveragerc') 

+

1066 if append: 

+

1067 cov.load() 

+

1068 else: 

+

1069 cov.erase() 

+

1070 self.start_import_stop(cov, "prog") 

+

1071 cov.combine() 

+

1072 cov.save() 

+

1073 report = StringIO() 

+

1074 cov.report(show_missing=None, ignore_errors=True, file=report, skip_covered=None, 

+

1075 skip_empty=None) 

+

1076 assert report.getvalue() == textwrap.dedent("""\ 

+

1077 Name Stmts Miss Cover 

+

1078 ----------------------------- 

+

1079 prog.py 4 1 75% 

+

1080 ----------------------------- 

+

1081 TOTAL 4 1 75% 

+

1082 """) 

+

1083 self.assert_file_count(".coverage", 0) 

+

1084 self.assert_file_count(".coverage.*", 1) 

+

1085 

+

1086 def test_pytestcov_parallel(self): 

+

1087 self.pretend_to_be_pytestcov(append=False) 

+

1088 

+

1089 def test_pytestcov_parallel_append(self): 

+

1090 self.pretend_to_be_pytestcov(append=True) 

+

1091 

+

1092 

+

1093class ImmutableConfigTest(CoverageTest): 

+

1094 """Check that reporting methods don't permanently change the configuration.""" 

+

1095 def test_config_doesnt_change(self): 

+

1096 self.make_file("simple.py", "a = 1") 

+

1097 cov = coverage.Coverage() 

+

1098 self.start_import_stop(cov, "simple") 

+

1099 assert cov.get_option("report:show_missing") is False 

+

1100 cov.report(show_missing=True) 

+

1101 assert cov.get_option("report:show_missing") is False 

+

1102 

+

1103 

+

1104class RelativePathTest(CoverageTest): 

+

1105 """Tests of the relative_files setting.""" 

+

1106 def test_moving_stuff(self): 

+

1107 # When using absolute file names, moving the source around results in 

+

1108 # "No source for code" errors while reporting. 

+

1109 self.make_file("foo.py", "a = 1") 

+

1110 cov = coverage.Coverage(source=["."]) 

+

1111 self.start_import_stop(cov, "foo") 

+

1112 res = cov.report() 

+

1113 assert res == 100 

+

1114 

+

1115 expected = re.escape("No source for code: '{}'.".format(abs_file("foo.py"))) 

+

1116 os.remove("foo.py") 

+

1117 self.make_file("new/foo.py", "a = 1") 

+

1118 shutil.move(".coverage", "new/.coverage") 

+

1119 with change_dir("new"): 

+

1120 cov = coverage.Coverage() 

+

1121 cov.load() 

+

1122 with pytest.raises(CoverageException, match=expected): 

+

1123 cov.report() 

+

1124 

+

1125 def test_moving_stuff_with_relative(self): 

+

1126 # When using relative file names, moving the source around is fine. 

+

1127 self.make_file("foo.py", "a = 1") 

+

1128 self.make_file(".coveragerc", """\ 

+

1129 [run] 

+

1130 relative_files = true 

+

1131 """) 

+

1132 cov = coverage.Coverage(source=["."]) 

+

1133 self.start_import_stop(cov, "foo") 

+

1134 res = cov.report() 

+

1135 assert res == 100 

+

1136 

+

1137 os.remove("foo.py") 

+

1138 self.make_file("new/foo.py", "a = 1") 

+

1139 shutil.move(".coverage", "new/.coverage") 

+

1140 shutil.move(".coveragerc", "new/.coveragerc") 

+

1141 with change_dir("new"): 

+

1142 cov = coverage.Coverage() 

+

1143 cov.load() 

+

1144 res = cov.report() 

+

1145 assert res == 100 

+

1146 

+

1147 def test_combine_relative(self): 

+

1148 self.make_file("dir1/foo.py", "a = 1") 

+

1149 self.make_file("dir1/.coveragerc", """\ 

+

1150 [run] 

+

1151 relative_files = true 

+

1152 """) 

+

1153 with change_dir("dir1"): 

+

1154 cov = coverage.Coverage(source=["."], data_suffix=True) 

+

1155 self.start_import_stop(cov, "foo") 

+

1156 cov.save() 

+

1157 shutil.move(glob.glob(".coverage.*")[0], "..") 

+

1158 

+

1159 self.make_file("dir2/bar.py", "a = 1") 

+

1160 self.make_file("dir2/.coveragerc", """\ 

+

1161 [run] 

+

1162 relative_files = true 

+

1163 """) 

+

1164 with change_dir("dir2"): 

+

1165 cov = coverage.Coverage(source=["."], data_suffix=True) 

+

1166 self.start_import_stop(cov, "bar") 

+

1167 cov.save() 

+

1168 shutil.move(glob.glob(".coverage.*")[0], "..") 

+

1169 

+

1170 self.make_file(".coveragerc", """\ 

+

1171 [run] 

+

1172 relative_files = true 

+

1173 """) 

+

1174 cov = coverage.Coverage() 

+

1175 cov.combine() 

+

1176 cov.save() 

+

1177 

+

1178 self.make_file("foo.py", "a = 1") 

+

1179 self.make_file("bar.py", "a = 1") 

+

1180 

+

1181 cov = coverage.Coverage() 

+

1182 cov.load() 

+

1183 files = cov.get_data().measured_files() 

+

1184 assert files == {'foo.py', 'bar.py'} 

+

1185 res = cov.report() 

+

1186 assert res == 100 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_arcs_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_arcs_py.html new file mode 100644 index 000000000..6d0cc6329 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_arcs_py.html @@ -0,0 +1,1800 @@ + + + + + + Coverage for tests/test_arcs.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Tests for coverage.py's arc measurement.""" 

+

5 

+

6import pytest 

+

7 

+

8from tests.coveragetest import CoverageTest 

+

9from tests.helpers import assert_count_equal 

+

10 

+

11import coverage 

+

12from coverage import env 

+

13from coverage.files import abs_file 

+

14 

+

15 

+

16class SimpleArcTest(CoverageTest): 

+

17 """Tests for coverage.py's arc measurement.""" 

+

18 

+

19 def test_simple_sequence(self): 

+

20 self.check_coverage("""\ 

+

21 a = 1 

+

22 b = 2 

+

23 """, 

+

24 arcz=".1 12 2.") 

+

25 self.check_coverage("""\ 

+

26 a = 1 

+

27 

+

28 b = 3 

+

29 """, 

+

30 arcz=".1 13 3.") 

+

31 line1 = 1 if env.PYBEHAVIOR.module_firstline_1 else 2 

+

32 self.check_coverage("""\ 

+

33 

+

34 a = 2 

+

35 b = 3 

+

36 

+

37 c = 5 

+

38 """, 

+

39 arcz="-{0}2 23 35 5-{0}".format(line1) 

+

40 ) 

+

41 

+

42 def test_function_def(self): 

+

43 self.check_coverage("""\ 

+

44 def foo(): 

+

45 a = 2 

+

46 

+

47 foo() 

+

48 """, 

+

49 arcz=".1 .2 14 2. 4.") 

+

50 

+

51 def test_if(self): 

+

52 self.check_coverage("""\ 

+

53 a = 1 

+

54 if len([]) == 0: 

+

55 a = 3 

+

56 assert a == 3 

+

57 """, 

+

58 arcz=".1 12 23 24 34 4.", arcz_missing="24") 

+

59 self.check_coverage("""\ 

+

60 a = 1 

+

61 if len([]) == 1: 

+

62 a = 3 

+

63 assert a == 1 

+

64 """, 

+

65 arcz=".1 12 23 24 34 4.", arcz_missing="23 34") 

+

66 

+

67 def test_if_else(self): 

+

68 self.check_coverage("""\ 

+

69 if len([]) == 0: 

+

70 a = 2 

+

71 else: 

+

72 a = 4 

+

73 assert a == 2 

+

74 """, 

+

75 arcz=".1 12 25 14 45 5.", arcz_missing="14 45") 

+

76 self.check_coverage("""\ 

+

77 if len([]) == 1: 

+

78 a = 2 

+

79 else: 

+

80 a = 4 

+

81 assert a == 4 

+

82 """, 

+

83 arcz=".1 12 25 14 45 5.", arcz_missing="12 25") 

+

84 

+

85 def test_compact_if(self): 

+

86 self.check_coverage("""\ 

+

87 a = 1 

+

88 if len([]) == 0: a = 2 

+

89 assert a == 2 

+

90 """, 

+

91 arcz=".1 12 23 3.", 

+

92 ) 

+

93 self.check_coverage("""\ 

+

94 def fn(x): 

+

95 if x % 2: return True 

+

96 return False 

+

97 a = fn(1) 

+

98 assert a is True 

+

99 """, 

+

100 arcz=".1 14 45 5. .2 2. 23 3.", arcz_missing="23 3.") 

+

101 

+

102 def test_multiline(self): 

+

103 self.check_coverage("""\ 

+

104 a = ( 

+

105 2 + 

+

106 3 

+

107 ) 

+

108 b = \\ 

+

109 6 

+

110 """, 

+

111 arcz=".1 15 5.", 

+

112 ) 

+

113 

+

114 def test_if_return(self): 

+

115 self.check_coverage("""\ 

+

116 def if_ret(a): 

+

117 if a: 

+

118 return 3 

+

119 b = 4 

+

120 return 5 

+

121 x = if_ret(0) + if_ret(1) 

+

122 assert x == 8 

+

123 """, 

+

124 arcz=".1 16 67 7. .2 23 24 3. 45 5.", 

+

125 ) 

+

126 

+

127 def test_dont_confuse_exit_and_else(self): 

+

128 self.check_coverage("""\ 

+

129 def foo(): 

+

130 if foo: 

+

131 a = 3 

+

132 else: 

+

133 a = 5 

+

134 return a 

+

135 assert foo() == 3 # 7 

+

136 """, 

+

137 arcz=".1 17 7. .2 23 36 25 56 6.", arcz_missing="25 56" 

+

138 ) 

+

139 self.check_coverage("""\ 

+

140 def foo(): 

+

141 if foo: 

+

142 a = 3 

+

143 else: 

+

144 a = 5 

+

145 foo() # 6 

+

146 """, 

+

147 arcz=".1 16 6. .2 23 3. 25 5.", arcz_missing="25 5." 

+

148 ) 

+

149 

+

150 def test_what_is_the_sound_of_no_lines_clapping(self): 

+

151 if env.JYTHON: 

+

152 # Jython reports no lines for an empty file. 

+

153 arcz_missing=".1 1." # pragma: only jython 

+

154 else: 

+

155 # Other Pythons report one line. 

+

156 arcz_missing="" 

+

157 self.check_coverage("""\ 

+

158 # __init__.py 

+

159 """, 

+

160 arcz=".1 1.", 

+

161 arcz_missing=arcz_missing, 

+

162 ) 

+

163 

+

164 

+

165class WithTest(CoverageTest): 

+

166 """Arc-measuring tests involving context managers.""" 

+

167 

+

168 def test_with(self): 

+

169 self.check_coverage("""\ 

+

170 def example(): 

+

171 with open("test", "w") as f: # exit 

+

172 f.write("") 

+

173 return 1 

+

174 

+

175 example() 

+

176 """, 

+

177 arcz=".1 .2 23 34 4. 16 6." 

+

178 ) 

+

179 

+

180 def test_bug_146(self): 

+

181 # https://github.com/nedbat/coveragepy/issues/146 

+

182 self.check_coverage("""\ 

+

183 for i in range(2): 

+

184 with open("test", "w") as f: 

+

185 print(3) 

+

186 print(4) 

+

187 print(5) 

+

188 """, 

+

189 arcz=".1 12 23 34 41 15 5." 

+

190 ) 

+

191 

+

192 

+

193class LoopArcTest(CoverageTest): 

+

194 """Arc-measuring tests involving loops.""" 

+

195 

+

196 def test_loop(self): 

+

197 self.check_coverage("""\ 

+

198 for i in range(10): 

+

199 a = i 

+

200 assert a == 9 

+

201 """, 

+

202 arcz=".1 12 21 13 3.", 

+

203 ) 

+

204 self.check_coverage("""\ 

+

205 a = -1 

+

206 for i in range(0): 

+

207 a = i 

+

208 assert a == -1 

+

209 """, 

+

210 arcz=".1 12 23 32 24 4.", arcz_missing="23 32") 

+

211 

+

212 def test_nested_loop(self): 

+

213 self.check_coverage("""\ 

+

214 for i in range(3): 

+

215 for j in range(3): 

+

216 a = i + j 

+

217 assert a == 4 

+

218 """, 

+

219 arcz=".1 12 23 32 21 14 4.", 

+

220 ) 

+

221 

+

222 def test_break(self): 

+

223 if env.PYBEHAVIOR.omit_after_jump: 

+

224 arcz = ".1 12 23 35 15 5." 

+

225 arcz_missing = "15" 

+

226 else: 

+

227 arcz = ".1 12 23 35 15 41 5." 

+

228 arcz_missing = "15 41" 

+

229 

+

230 self.check_coverage("""\ 

+

231 for i in range(10): 

+

232 a = i 

+

233 break # 3 

+

234 a = 99 

+

235 assert a == 0 # 5 

+

236 """, 

+

237 arcz=arcz, arcz_missing=arcz_missing 

+

238 ) 

+

239 

+

240 def test_continue(self): 

+

241 if env.PYBEHAVIOR.omit_after_jump: 

+

242 arcz = ".1 12 23 31 15 5." 

+

243 arcz_missing = "" 

+

244 else: 

+

245 arcz = ".1 12 23 31 15 41 5." 

+

246 arcz_missing = "41" 

+

247 

+

248 self.check_coverage("""\ 

+

249 for i in range(10): 

+

250 a = i 

+

251 continue # 3 

+

252 a = 99 

+

253 assert a == 9 # 5 

+

254 """, 

+

255 arcz=arcz, arcz_missing=arcz_missing 

+

256 ) 

+

257 

+

258 def test_nested_breaks(self): 

+

259 self.check_coverage("""\ 

+

260 for i in range(3): 

+

261 for j in range(3): 

+

262 a = i + j 

+

263 break # 4 

+

264 if i == 2: 

+

265 break 

+

266 assert a == 2 and i == 2 # 7 

+

267 """, 

+

268 arcz=".1 12 23 34 45 25 56 51 67 17 7.", arcz_missing="17 25") 

+

269 

+

270 def test_while_1(self): 

+

271 # With "while 1", the loop knows it's constant. 

+

272 if env.PYBEHAVIOR.keep_constant_test: 

+

273 arcz = ".1 12 23 34 45 36 62 57 7." 

+

274 elif env.PYBEHAVIOR.nix_while_true: 

+

275 arcz = ".1 13 34 45 36 63 57 7." 

+

276 else: 

+

277 arcz = ".1 12 23 34 45 36 63 57 7." 

+

278 self.check_coverage("""\ 

+

279 a, i = 1, 0 

+

280 while 1: 

+

281 if i >= 3: 

+

282 a = 4 

+

283 break 

+

284 i += 1 

+

285 assert a == 4 and i == 3 

+

286 """, 

+

287 arcz=arcz, 

+

288 ) 

+

289 

+

290 def test_while_true(self): 

+

291 # With "while True", 2.x thinks it's computation, 

+

292 # 3.x thinks it's constant. 

+

293 if env.PYBEHAVIOR.keep_constant_test: 

+

294 arcz = ".1 12 23 34 45 36 62 57 7." 

+

295 elif env.PYBEHAVIOR.nix_while_true: 

+

296 arcz = ".1 13 34 45 36 63 57 7." 

+

297 elif env.PY3: 

+

298 arcz = ".1 12 23 34 45 36 63 57 7." 

+

299 else: 

+

300 arcz = ".1 12 23 34 45 36 62 57 7." 

+

301 self.check_coverage("""\ 

+

302 a, i = 1, 0 

+

303 while True: 

+

304 if i >= 3: 

+

305 a = 4 

+

306 break 

+

307 i += 1 

+

308 assert a == 4 and i == 3 

+

309 """, 

+

310 arcz=arcz, 

+

311 ) 

+

312 

+

313 def test_zero_coverage_while_loop(self): 

+

314 # https://github.com/nedbat/coveragepy/issues/502 

+

315 self.make_file("main.py", "print('done')") 

+

316 self.make_file("zero.py", """\ 

+

317 def method(self): 

+

318 while True: 

+

319 return 1 

+

320 """) 

+

321 out = self.run_command("coverage run --branch --source=. main.py") 

+

322 assert out == 'done\n' 

+

323 if env.PYBEHAVIOR.keep_constant_test: 

+

324 num_stmts = 3 

+

325 elif env.PYBEHAVIOR.nix_while_true: 

+

326 num_stmts = 2 

+

327 else: 

+

328 num_stmts = 3 

+

329 expected = "zero.py {n} {n} 0 0 0% 1-3".format(n=num_stmts) 

+

330 report = self.report_from_command("coverage report -m") 

+

331 squeezed = self.squeezed_lines(report) 

+

332 assert expected in squeezed[3] 

+

333 

+

334 def test_bug_496_continue_in_constant_while(self): 

+

335 # https://github.com/nedbat/coveragepy/issues/496 

+

336 # A continue in a while-true needs to jump to the right place. 

+

337 if env.PYBEHAVIOR.keep_constant_test: 

+

338 arcz = ".1 12 23 34 45 52 46 67 7." 

+

339 elif env.PYBEHAVIOR.nix_while_true: 

+

340 arcz = ".1 13 34 45 53 46 67 7." 

+

341 elif env.PY3: 

+

342 arcz = ".1 12 23 34 45 53 46 67 7." 

+

343 else: 

+

344 arcz = ".1 12 23 34 45 52 46 67 7." 

+

345 self.check_coverage("""\ 

+

346 up = iter('ta') 

+

347 while True: 

+

348 char = next(up) 

+

349 if char == 't': 

+

350 continue 

+

351 i = "line 6" 

+

352 break 

+

353 """, 

+

354 arcz=arcz 

+

355 ) 

+

356 

+

357 def test_for_if_else_for(self): 

+

358 self.check_coverage("""\ 

+

359 def branches_2(l): 

+

360 if l: 

+

361 for e in l: 

+

362 a = 4 

+

363 else: 

+

364 a = 6 

+

365 

+

366 def branches_3(l): 

+

367 for x in l: 

+

368 if x: 

+

369 for e in l: 

+

370 a = 12 

+

371 else: 

+

372 a = 14 

+

373 

+

374 branches_2([0,1]) 

+

375 branches_3([0,1]) 

+

376 """, 

+

377 arcz= 

+

378 ".1 18 8G GH H. " 

+

379 ".2 23 34 43 26 3. 6. " 

+

380 "-89 9A 9-8 AB BC CB B9 AE E9", 

+

381 arcz_missing="26 6." 

+

382 ) 

+

383 

+

384 def test_for_else(self): 

+

385 self.check_coverage("""\ 

+

386 def forelse(seq): 

+

387 for n in seq: 

+

388 if n > 5: 

+

389 break 

+

390 else: 

+

391 print('None of the values were greater than 5') 

+

392 print('Done') 

+

393 forelse([1,2]) 

+

394 forelse([1,6]) 

+

395 """, 

+

396 arcz=".1 .2 23 32 34 47 26 67 7. 18 89 9." 

+

397 ) 

+

398 

+

399 def test_while_else(self): 

+

400 self.check_coverage("""\ 

+

401 def whileelse(seq): 

+

402 while seq: 

+

403 n = seq.pop() 

+

404 if n > 4: 

+

405 break 

+

406 else: 

+

407 n = 99 

+

408 return n 

+

409 assert whileelse([1, 2]) == 99 

+

410 assert whileelse([1, 5]) == 5 

+

411 """, 

+

412 arcz=".1 19 9A A. .2 23 34 45 58 42 27 78 8.", 

+

413 ) 

+

414 

+

415 def test_confusing_for_loop_bug_175(self): 

+

416 if env.PY3: 

+

417 # Py3 counts the list comp as a separate code object. 

+

418 arcz = ".1 -22 2-2 12 23 34 45 53 3." 

+

419 else: 

+

420 arcz = ".1 12 23 34 45 53 3." 

+

421 self.check_coverage("""\ 

+

422 o = [(1,2), (3,4)] 

+

423 o = [a for a in o] 

+

424 for tup in o: 

+

425 x = tup[0] 

+

426 y = tup[1] 

+

427 """, 

+

428 arcz=arcz, 

+

429 ) 

+

430 if env.PY3: 

+

431 arcz = ".1 12 -22 2-2 23 34 42 2." 

+

432 else: 

+

433 arcz = ".1 12 23 34 42 2." 

+

434 self.check_coverage("""\ 

+

435 o = [(1,2), (3,4)] 

+

436 for tup in [a for a in o]: 

+

437 x = tup[0] 

+

438 y = tup[1] 

+

439 """, 

+

440 arcz=arcz, 

+

441 ) 

+

442 

+

443 def test_generator_expression(self): 

+

444 # Generator expression: 

+

445 self.check_coverage("""\ 

+

446 o = ((1,2), (3,4)) 

+

447 o = (a for a in o) 

+

448 for tup in o: 

+

449 x = tup[0] 

+

450 y = tup[1] 

+

451 """, 

+

452 arcz=".1 -22 2-2 12 23 34 45 53 3.", 

+

453 ) 

+

454 

+

455 def test_other_comprehensions(self): 

+

456 # Set comprehension: 

+

457 self.check_coverage("""\ 

+

458 o = ((1,2), (3,4)) 

+

459 o = {a for a in o} 

+

460 for tup in o: 

+

461 x = tup[0] 

+

462 y = tup[1] 

+

463 """, 

+

464 arcz=".1 -22 2-2 12 23 34 45 53 3.", 

+

465 ) 

+

466 # Dict comprehension: 

+

467 self.check_coverage("""\ 

+

468 o = ((1,2), (3,4)) 

+

469 o = {a:1 for a in o} 

+

470 for tup in o: 

+

471 x = tup[0] 

+

472 y = tup[1] 

+

473 """, 

+

474 arcz=".1 -22 2-2 12 23 34 45 53 3.", 

+

475 ) 

+

476 

+

477 def test_multiline_dict_comp(self): 

+

478 # Multiline dict comp: 

+

479 self.check_coverage("""\ 

+

480 # comment 

+

481 d = \\ 

+

482 { 

+

483 i: 

+

484 str(i) 

+

485 for 

+

486 i 

+

487 in 

+

488 range(9) 

+

489 } 

+

490 x = 11 

+

491 """, 

+

492 arcz="-22 2B B-2 2-2" 

+

493 ) 

+

494 # Multi dict comp: 

+

495 self.check_coverage("""\ 

+

496 # comment 

+

497 d = \\ 

+

498 { 

+

499 (i, j): 

+

500 str(i+j) 

+

501 for 

+

502 i 

+

503 in 

+

504 range(9) 

+

505 for 

+

506 j 

+

507 in 

+

508 range(13) 

+

509 } 

+

510 x = 15 

+

511 """, 

+

512 arcz="-22 2F F-2 2-2" 

+

513 ) 

+

514 

+

515 

+

516class ExceptionArcTest(CoverageTest): 

+

517 """Arc-measuring tests involving exception handling.""" 

+

518 

+

519 def test_try_except(self): 

+

520 self.check_coverage("""\ 

+

521 a, b = 1, 1 

+

522 try: 

+

523 a = 3 

+

524 except: 

+

525 b = 5 

+

526 assert a == 3 and b == 1 

+

527 """, 

+

528 arcz=".1 12 23 36 45 56 6.", arcz_missing="45 56") 

+

529 

+

530 def test_raise_followed_by_statement(self): 

+

531 if env.PYBEHAVIOR.omit_after_jump: 

+

532 arcz = ".1 12 23 34 46 67 78 8." 

+

533 arcz_missing = "" 

+

534 else: 

+

535 arcz = ".1 12 23 34 46 58 67 78 8." 

+

536 arcz_missing = "58" 

+

537 self.check_coverage("""\ 

+

538 a, b = 1, 1 

+

539 try: 

+

540 a = 3 

+

541 raise Exception("Yikes!") 

+

542 a = 5 

+

543 except: 

+

544 b = 7 

+

545 assert a == 3 and b == 7 

+

546 """, 

+

547 arcz=arcz, arcz_missing=arcz_missing, 

+

548 ) 

+

549 

+

550 def test_hidden_raise(self): 

+

551 self.check_coverage("""\ 

+

552 a, b = 1, 1 

+

553 def oops(x): 

+

554 if x % 2: 

+

555 raise Exception("odd") 

+

556 try: 

+

557 a = 6 

+

558 oops(1) 

+

559 a = 8 

+

560 except: 

+

561 b = 10 

+

562 assert a == 6 and b == 10 

+

563 """, 

+

564 arcz=".1 12 -23 34 3-2 4-2 25 56 67 78 8B 9A AB B.", 

+

565 arcz_missing="3-2 78 8B", arcz_unpredicted="79", 

+

566 ) 

+

567 

+

568 def test_except_with_type(self): 

+

569 self.check_coverage("""\ 

+

570 a, b = 1, 1 

+

571 def oops(x): 

+

572 if x % 2: 

+

573 raise ValueError("odd") 

+

574 def try_it(x): 

+

575 try: 

+

576 a = 7 

+

577 oops(x) 

+

578 a = 9 

+

579 except ValueError: 

+

580 b = 11 

+

581 return a 

+

582 assert try_it(0) == 9 # C 

+

583 assert try_it(1) == 7 # D 

+

584 """, 

+

585 arcz=".1 12 -23 34 3-2 4-2 25 5D DE E. -56 67 78 89 9C AB BC C-5", 

+

586 arcz_unpredicted="8A", 

+

587 ) 

+

588 

+

589 def test_try_finally(self): 

+

590 self.check_coverage("""\ 

+

591 a, c = 1, 1 

+

592 try: 

+

593 a = 3 

+

594 finally: 

+

595 c = 5 

+

596 assert a == 3 and c == 5 

+

597 """, 

+

598 arcz=".1 12 23 35 56 6.", 

+

599 ) 

+

600 self.check_coverage("""\ 

+

601 a, c, d = 1, 1, 1 

+

602 try: 

+

603 try: 

+

604 a = 4 

+

605 finally: 

+

606 c = 6 

+

607 except: 

+

608 d = 8 

+

609 assert a == 4 and c == 6 and d == 1 # 9 

+

610 """, 

+

611 arcz=".1 12 23 34 46 78 89 69 9.", 

+

612 arcz_missing="78 89", 

+

613 ) 

+

614 self.check_coverage("""\ 

+

615 a, c, d = 1, 1, 1 

+

616 try: 

+

617 try: 

+

618 a = 4 

+

619 raise Exception("Yikes!") 

+

620 # line 6 

+

621 finally: 

+

622 c = 8 

+

623 except: 

+

624 d = 10 # A 

+

625 assert a == 4 and c == 8 and d == 10 # B 

+

626 """, 

+

627 arcz=".1 12 23 34 45 58 89 9A AB B.", 

+

628 arcz_missing="", 

+

629 ) 

+

630 

+

631 def test_finally_in_loop(self): 

+

632 self.check_coverage("""\ 

+

633 a, c, d, i = 1, 1, 1, 99 

+

634 try: 

+

635 for i in range(5): 

+

636 try: 

+

637 a = 5 

+

638 if i > 0: 

+

639 raise Exception("Yikes!") 

+

640 a = 8 

+

641 finally: 

+

642 c = 10 

+

643 except: 

+

644 d = 12 # C 

+

645 assert a == 5 and c == 10 and d == 12 # D 

+

646 """, 

+

647 arcz=".1 12 23 34 3D 45 56 67 68 7A 8A A3 AB BC CD D.", 

+

648 arcz_missing="3D", 

+

649 ) 

+

650 self.check_coverage("""\ 

+

651 a, c, d, i = 1, 1, 1, 99 

+

652 try: 

+

653 for i in range(5): 

+

654 try: 

+

655 a = 5 

+

656 if i > 10: 

+

657 raise Exception("Yikes!") 

+

658 a = 8 

+

659 finally: 

+

660 c = 10 

+

661 except: 

+

662 d = 12 # C 

+

663 assert a == 8 and c == 10 and d == 1 # D 

+

664 """, 

+

665 arcz=".1 12 23 34 3D 45 56 67 68 7A 8A A3 AB BC CD D.", 

+

666 arcz_missing="67 7A AB BC CD", 

+

667 ) 

+

668 

+

669 

+

670 def test_break_through_finally(self): 

+

671 if env.PYBEHAVIOR.finally_jumps_back: 

+

672 arcz = ".1 12 23 34 3D 45 56 67 68 7A 7D 8A A3 A7 BC CD D." 

+

673 else: 

+

674 arcz = ".1 12 23 34 3D 45 56 67 68 7A 8A A3 AD BC CD D." 

+

675 self.check_coverage("""\ 

+

676 a, c, d, i = 1, 1, 1, 99 

+

677 try: 

+

678 for i in range(3): 

+

679 try: 

+

680 a = 5 

+

681 if i > 0: 

+

682 break 

+

683 a = 8 

+

684 finally: 

+

685 c = 10 

+

686 except: 

+

687 d = 12 # C 

+

688 assert a == 5 and c == 10 and d == 1 # D 

+

689 """, 

+

690 arcz=arcz, 

+

691 arcz_missing="3D BC CD", 

+

692 ) 

+

693 

+

694 def test_continue_through_finally(self): 

+

695 if env.PYBEHAVIOR.finally_jumps_back: 

+

696 arcz = ".1 12 23 34 3D 45 56 67 68 73 7A 8A A3 A7 BC CD D." 

+

697 else: 

+

698 arcz = ".1 12 23 34 3D 45 56 67 68 7A 8A A3 BC CD D." 

+

699 self.check_coverage("""\ 

+

700 a, b, c, d, i = 1, 1, 1, 1, 99 

+

701 try: 

+

702 for i in range(5): 

+

703 try: 

+

704 a = 5 

+

705 if i > 0: 

+

706 continue 

+

707 b = 8 

+

708 finally: 

+

709 c = 10 

+

710 except: 

+

711 d = 12 # C 

+

712 assert (a, b, c, d) == (5, 8, 10, 1) # D 

+

713 """, 

+

714 arcz=arcz, 

+

715 arcz_missing="BC CD", 

+

716 ) 

+

717 

+

718 def test_finally_in_loop_bug_92(self): 

+

719 self.check_coverage("""\ 

+

720 for i in range(5): 

+

721 try: 

+

722 j = 3 

+

723 finally: 

+

724 f = 5 

+

725 g = 6 

+

726 h = 7 

+

727 """, 

+

728 arcz=".1 12 23 35 56 61 17 7.", 

+

729 ) 

+

730 

+

731 def test_bug_212(self): 

+

732 # "except Exception as e" is crucial here. 

+

733 # Bug 212 said that the "if exc" line was incorrectly marked as only 

+

734 # partially covered. 

+

735 self.check_coverage("""\ 

+

736 def b(exc): 

+

737 try: 

+

738 while "no peephole".upper(): 

+

739 raise Exception(exc) # 4 

+

740 except Exception as e: 

+

741 if exc != 'expected': 

+

742 raise 

+

743 q = 8 

+

744 

+

745 b('expected') 

+

746 try: 

+

747 b('unexpected') # C 

+

748 except: 

+

749 pass 

+

750 """, 

+

751 arcz=".1 .2 1A 23 34 3. 45 56 67 68 7. 8. AB BC C. DE E.", 

+

752 arcz_missing="3. C.", 

+

753 arcz_unpredicted="CD", 

+

754 ) 

+

755 

+

756 def test_except_finally(self): 

+

757 self.check_coverage("""\ 

+

758 a, b, c = 1, 1, 1 

+

759 try: 

+

760 a = 3 

+

761 except: 

+

762 b = 5 

+

763 finally: 

+

764 c = 7 

+

765 assert a == 3 and b == 1 and c == 7 

+

766 """, 

+

767 arcz=".1 12 23 45 37 57 78 8.", arcz_missing="45 57") 

+

768 self.check_coverage("""\ 

+

769 a, b, c = 1, 1, 1 

+

770 def oops(x): 

+

771 if x % 2: raise Exception("odd") 

+

772 try: 

+

773 a = 5 

+

774 oops(1) 

+

775 a = 7 

+

776 except: 

+

777 b = 9 

+

778 finally: 

+

779 c = 11 

+

780 assert a == 5 and b == 9 and c == 11 

+

781 """, 

+

782 arcz=".1 12 -23 3-2 24 45 56 67 7B 89 9B BC C.", 

+

783 arcz_missing="67 7B", arcz_unpredicted="68") 

+

784 

+

785 def test_multiple_except_clauses(self): 

+

786 self.check_coverage("""\ 

+

787 a, b, c = 1, 1, 1 

+

788 try: 

+

789 a = 3 

+

790 except ValueError: 

+

791 b = 5 

+

792 except IndexError: 

+

793 a = 7 

+

794 finally: 

+

795 c = 9 

+

796 assert a == 3 and b == 1 and c == 9 

+

797 """, 

+

798 arcz=".1 12 23 45 46 39 59 67 79 9A A.", 

+

799 arcz_missing="45 59 46 67 79", 

+

800 ) 

+

801 self.check_coverage("""\ 

+

802 a, b, c = 1, 1, 1 

+

803 try: 

+

804 a = int("xyz") # ValueError 

+

805 except ValueError: 

+

806 b = 5 

+

807 except IndexError: 

+

808 a = 7 

+

809 finally: 

+

810 c = 9 

+

811 assert a == 1 and b == 5 and c == 9 

+

812 """, 

+

813 arcz=".1 12 23 45 46 39 59 67 79 9A A.", 

+

814 arcz_missing="39 46 67 79", 

+

815 arcz_unpredicted="34", 

+

816 ) 

+

817 self.check_coverage("""\ 

+

818 a, b, c = 1, 1, 1 

+

819 try: 

+

820 a = [1][3] # IndexError 

+

821 except ValueError: 

+

822 b = 5 

+

823 except IndexError: 

+

824 a = 7 

+

825 finally: 

+

826 c = 9 

+

827 assert a == 7 and b == 1 and c == 9 

+

828 """, 

+

829 arcz=".1 12 23 45 46 39 59 67 79 9A A.", 

+

830 arcz_missing="39 45 59", 

+

831 arcz_unpredicted="34", 

+

832 ) 

+

833 self.check_coverage("""\ 

+

834 a, b, c = 1, 1, 1 

+

835 try: 

+

836 try: 

+

837 a = 4/0 # ZeroDivisionError 

+

838 except ValueError: 

+

839 b = 6 

+

840 except IndexError: 

+

841 a = 8 

+

842 finally: 

+

843 c = 10 

+

844 except ZeroDivisionError: 

+

845 pass 

+

846 assert a == 1 and b == 1 and c == 10 

+

847 """, 

+

848 arcz=".1 12 23 34 4A 56 6A 57 78 8A AD BC CD D.", 

+

849 arcz_missing="4A 56 6A 78 8A AD", 

+

850 arcz_unpredicted="45 7A AB", 

+

851 ) 

+

852 

+

853 def test_return_finally(self): 

+

854 if env.PYBEHAVIOR.finally_jumps_back: 

+

855 arcz = ".1 12 29 9A AB BC C-1 -23 34 45 5-2 57 75 38 8-2" 

+

856 else: 

+

857 arcz = ".1 12 29 9A AB BC C-1 -23 34 45 57 7-2 38 8-2" 

+

858 self.check_coverage("""\ 

+

859 a = [1] 

+

860 def check_token(data): 

+

861 if data: 

+

862 try: 

+

863 return 5 

+

864 finally: 

+

865 a.append(7) 

+

866 return 8 

+

867 assert check_token(False) == 8 

+

868 assert a == [1] 

+

869 assert check_token(True) == 5 

+

870 assert a == [1, 7] 

+

871 """, 

+

872 arcz=arcz, 

+

873 ) 

+

874 

+

875 def test_except_jump_finally(self): 

+

876 if env.PYBEHAVIOR.finally_jumps_back: 

+

877 arcz = ( 

+

878 ".1 1Q QR RS ST TU U. " 

+

879 ".2 23 34 45 56 4O 6L " 

+

880 "78 89 9A AL LA AO 8B BC CD DL LD D4 BE EF FG GL LG G. EH HI IJ JL HL " 

+

881 "L4 LM " 

+

882 "MN NO O." 

+

883 ) 

+

884 else: 

+

885 arcz = ( 

+

886 ".1 1Q QR RS ST TU U. " 

+

887 ".2 23 34 45 56 4O 6L " 

+

888 "78 89 9A AL 8B BC CD DL BE EF FG GL EH HI IJ JL HL " 

+

889 "LO L4 L. LM " 

+

890 "MN NO O." 

+

891 ) 

+

892 self.check_coverage("""\ 

+

893 def func(x): 

+

894 a = f = g = 2 

+

895 try: 

+

896 for i in range(4): 

+

897 try: 

+

898 6/0 

+

899 except ZeroDivisionError: 

+

900 if x == 'break': 

+

901 a = 9 

+

902 break 

+

903 elif x == 'continue': 

+

904 a = 12 

+

905 continue 

+

906 elif x == 'return': 

+

907 a = 15 # F 

+

908 return a, f, g, i # G 

+

909 elif x == 'raise': # H 

+

910 a = 18 # I 

+

911 raise ValueError() # J 

+

912 finally: 

+

913 f = 21 # L 

+

914 except ValueError: # M 

+

915 g = 23 # N 

+

916 return a, f, g, i # O 

+

917 

+

918 assert func('break') == (9, 21, 2, 0) # Q 

+

919 assert func('continue') == (12, 21, 2, 3) # R 

+

920 assert func('return') == (15, 2, 2, 0) # S 

+

921 assert func('raise') == (18, 21, 23, 0) # T 

+

922 assert func('other') == (2, 21, 2, 3) # U 30 

+

923 """, 

+

924 arcz=arcz, 

+

925 arcz_missing="6L", 

+

926 arcz_unpredicted="67", 

+

927 ) 

+

928 

+

929 def test_else_jump_finally(self): 

+

930 if env.PYBEHAVIOR.finally_jumps_back: 

+

931 arcz = ( 

+

932 ".1 1S ST TU UV VW W. " 

+

933 ".2 23 34 45 56 6A 78 8N 4Q " 

+

934 "AB BC CN NC CQ AD DE EF FN NF F4 DG GH HI IN NI I. GJ JK KL LN JN " 

+

935 "N4 NO " 

+

936 "OP PQ Q." 

+

937 ) 

+

938 else: 

+

939 arcz = ( 

+

940 ".1 1S ST TU UV VW W. " 

+

941 ".2 23 34 45 56 6A 78 8N 4Q " 

+

942 "AB BC CN AD DE EF FN DG GH HI IN GJ JK KL LN JN " 

+

943 "N4 NQ N. NO " 

+

944 "OP PQ Q." 

+

945 ) 

+

946 self.check_coverage("""\ 

+

947 def func(x): 

+

948 a = f = g = 2 

+

949 try: 

+

950 for i in range(4): 

+

951 try: 

+

952 b = 6 

+

953 except ZeroDivisionError: 

+

954 pass 

+

955 else: 

+

956 if x == 'break': 

+

957 a = 11 

+

958 break 

+

959 elif x == 'continue': 

+

960 a = 14 

+

961 continue 

+

962 elif x == 'return': 

+

963 a = 17 # H 

+

964 return a, f, g, i # I 

+

965 elif x == 'raise': # J 

+

966 a = 20 # K 

+

967 raise ValueError() # L 

+

968 finally: 

+

969 f = 23 # N 

+

970 except ValueError: # O 

+

971 g = 25 # P 

+

972 return a, f, g, i # Q 

+

973 

+

974 assert func('break') == (11, 23, 2, 0) # S 

+

975 assert func('continue') == (14, 23, 2, 3) # T 

+

976 assert func('return') == (17, 2, 2, 0) # U 

+

977 assert func('raise') == (20, 23, 25, 0) # V 

+

978 assert func('other') == (2, 23, 2, 3) # W 32 

+

979 """, 

+

980 arcz=arcz, 

+

981 arcz_missing="78 8N", 

+

982 arcz_unpredicted="", 

+

983 ) 

+

984 

+

985 

+

986class YieldTest(CoverageTest): 

+

987 """Arc tests for generators.""" 

+

988 

+

989 def test_yield_in_loop(self): 

+

990 self.check_coverage("""\ 

+

991 def gen(inp): 

+

992 for n in inp: 

+

993 yield n 

+

994 

+

995 list(gen([1,2,3])) 

+

996 """, 

+

997 arcz=".1 .2 23 2. 32 15 5.", 

+

998 ) 

+

999 

+

1000 def test_padded_yield_in_loop(self): 

+

1001 self.check_coverage("""\ 

+

1002 def gen(inp): 

+

1003 i = 2 

+

1004 for n in inp: 

+

1005 i = 4 

+

1006 yield n 

+

1007 i = 6 

+

1008 i = 7 

+

1009 

+

1010 list(gen([1,2,3])) 

+

1011 """, 

+

1012 arcz=".1 19 9. .2 23 34 45 56 63 37 7.", 

+

1013 ) 

+

1014 

+

1015 def test_bug_308(self): 

+

1016 self.check_coverage("""\ 

+

1017 def run(): 

+

1018 for i in range(10): 

+

1019 yield lambda: i 

+

1020 

+

1021 for f in run(): 

+

1022 print(f()) 

+

1023 """, 

+

1024 arcz=".1 15 56 65 5. .2 23 32 2. -33 3-3", 

+

1025 ) 

+

1026 

+

1027 self.check_coverage("""\ 

+

1028 def run(): 

+

1029 yield lambda: 100 

+

1030 for i in range(10): 

+

1031 yield lambda: i 

+

1032 

+

1033 for f in run(): 

+

1034 print(f()) 

+

1035 """, 

+

1036 arcz=".1 16 67 76 6. .2 23 34 43 3. -22 2-2 -44 4-4", 

+

1037 ) 

+

1038 

+

1039 self.check_coverage("""\ 

+

1040 def run(): 

+

1041 yield lambda: 100 # no branch miss 

+

1042 

+

1043 for f in run(): 

+

1044 print(f()) 

+

1045 """, 

+

1046 arcz=".1 14 45 54 4. .2 2. -22 2-2", 

+

1047 ) 

+

1048 

+

1049 def test_bug_324(self): 

+

1050 # This code is tricky: the list() call pulls all the values from gen(), 

+

1051 # but each of them is a generator itself that is never iterated. As a 

+

1052 # result, the generator expression on line 3 is never entered or run. 

+

1053 self.check_coverage("""\ 

+

1054 def gen(inp): 

+

1055 for n in inp: 

+

1056 yield (i * 2 for i in range(n)) 

+

1057 

+

1058 list(gen([1,2,3])) 

+

1059 """, 

+

1060 arcz= 

+

1061 ".1 15 5. " # The module level 

+

1062 ".2 23 32 2. " # The gen() function 

+

1063 "-33 3-3", # The generator expression 

+

1064 arcz_missing="-33 3-3", 

+

1065 ) 

+

1066 

+

1067 def test_coroutines(self): 

+

1068 self.check_coverage("""\ 

+

1069 def double_inputs(): 

+

1070 while len([1]): # avoid compiler differences 

+

1071 x = yield 

+

1072 x *= 2 

+

1073 yield x 

+

1074 

+

1075 gen = double_inputs() 

+

1076 next(gen) 

+

1077 print(gen.send(10)) 

+

1078 next(gen) 

+

1079 print(gen.send(6)) 

+

1080 """, 

+

1081 arcz= 

+

1082 ".1 17 78 89 9A AB B. " 

+

1083 ".2 23 34 45 52 2.", 

+

1084 arcz_missing="2.", 

+

1085 ) 

+

1086 assert self.stdout() == "20\n12\n" 

+

1087 

+

1088 @pytest.mark.skipif(not env.PYBEHAVIOR.yield_from, 

+

1089 reason="Python before 3.3 doesn't have 'yield from'" 

+

1090 ) 

+

1091 def test_yield_from(self): 

+

1092 self.check_coverage("""\ 

+

1093 def gen(inp): 

+

1094 i = 2 

+

1095 for n in inp: 

+

1096 i = 4 

+

1097 yield from range(3) 

+

1098 i = 6 

+

1099 i = 7 

+

1100 

+

1101 list(gen([1,2,3])) 

+

1102 """, 

+

1103 arcz=".1 19 9. .2 23 34 45 56 63 37 7.", 

+

1104 arcz_unpredicted="5.", 

+

1105 ) 

+

1106 

+

1107 def test_abandoned_yield(self): 

+

1108 # https://github.com/nedbat/coveragepy/issues/440 

+

1109 self.check_coverage("""\ 

+

1110 def gen(): 

+

1111 print("yup") 

+

1112 yield "yielded" 

+

1113 print("nope") 

+

1114 

+

1115 print(next(gen())) 

+

1116 """, 

+

1117 lines=[1, 2, 3, 4, 6], 

+

1118 missing="4", 

+

1119 arcz=".1 16 6. .2 23 34 4.", 

+

1120 arcz_missing="34 4.", 

+

1121 ) 

+

1122 

+

1123 

+

1124class OptimizedIfTest(CoverageTest): 

+

1125 """Tests of if statements being optimized away.""" 

+

1126 

+

1127 def test_optimized_away_if_0(self): 

+

1128 if env.PYBEHAVIOR.keep_constant_test: 

+

1129 lines = [1, 2, 3, 4, 8, 9] 

+

1130 arcz = ".1 12 23 24 34 48 49 89 9." 

+

1131 arcz_missing = "24" 

+

1132 # 49 isn't missing because line 4 is matched by the default partial 

+

1133 # exclusion regex, and no branches are considered missing if they 

+

1134 # start from an excluded line. 

+

1135 else: 

+

1136 lines = [1, 2, 3, 8, 9] 

+

1137 arcz = ".1 12 23 28 38 89 9." 

+

1138 arcz_missing = "28" 

+

1139 

+

1140 self.check_coverage("""\ 

+

1141 a = 1 

+

1142 if len([2]): 

+

1143 c = 3 

+

1144 if 0: 

+

1145 if len([5]): 

+

1146 d = 6 

+

1147 else: 

+

1148 e = 8 

+

1149 f = 9 

+

1150 """, 

+

1151 lines=lines, 

+

1152 arcz=arcz, 

+

1153 arcz_missing=arcz_missing, 

+

1154 ) 

+

1155 

+

1156 def test_optimized_away_if_1(self): 

+

1157 if env.PYBEHAVIOR.keep_constant_test: 

+

1158 lines = [1, 2, 3, 4, 5, 6, 9] 

+

1159 arcz = ".1 12 23 24 34 45 49 56 69 59 9." 

+

1160 arcz_missing = "24 59" 

+

1161 # 49 isn't missing because line 4 is matched by the default partial 

+

1162 # exclusion regex, and no branches are considered missing if they 

+

1163 # start from an excluded line. 

+

1164 else: 

+

1165 lines = [1, 2, 3, 5, 6, 9] 

+

1166 arcz = ".1 12 23 25 35 56 69 59 9." 

+

1167 arcz_missing = "25 59" 

+

1168 

+

1169 self.check_coverage("""\ 

+

1170 a = 1 

+

1171 if len([2]): 

+

1172 c = 3 

+

1173 if 1: 

+

1174 if len([5]): 

+

1175 d = 6 

+

1176 else: 

+

1177 e = 8 

+

1178 f = 9 

+

1179 """, 

+

1180 lines=lines, 

+

1181 arcz=arcz, 

+

1182 arcz_missing=arcz_missing, 

+

1183 ) 

+

1184 

+

1185 def test_optimized_away_if_1_no_else(self): 

+

1186 if env.PYBEHAVIOR.keep_constant_test: 

+

1187 lines = [1, 2, 3, 4, 5] 

+

1188 arcz = ".1 12 23 25 34 45 5." 

+

1189 arcz_missing = "" 

+

1190 # 25 isn't missing because line 2 is matched by the default partial 

+

1191 # exclusion regex, and no branches are considered missing if they 

+

1192 # start from an excluded line. 

+

1193 else: 

+

1194 lines = [1, 3, 4, 5] 

+

1195 arcz = ".1 13 34 45 5." 

+

1196 arcz_missing = "" 

+

1197 self.check_coverage("""\ 

+

1198 a = 1 

+

1199 if 1: 

+

1200 b = 3 

+

1201 c = 4 

+

1202 d = 5 

+

1203 """, 

+

1204 lines=lines, 

+

1205 arcz=arcz, 

+

1206 arcz_missing=arcz_missing, 

+

1207 ) 

+

1208 

+

1209 def test_optimized_if_nested(self): 

+

1210 if env.PYBEHAVIOR.keep_constant_test: 

+

1211 lines = [1, 2, 8, 11, 12, 13, 14, 15] 

+

1212 arcz = ".1 12 28 2F 8B 8F BC CD DE EF F." 

+

1213 arcz_missing = "" 

+

1214 # 2F and 8F aren't missing because they're matched by the default 

+

1215 # partial exclusion regex, and no branches are considered missing 

+

1216 # if they start from an excluded line. 

+

1217 else: 

+

1218 lines = [1, 12, 14, 15] 

+

1219 arcz = ".1 1C CE EF F." 

+

1220 arcz_missing = "" 

+

1221 

+

1222 self.check_coverage("""\ 

+

1223 a = 1 

+

1224 if 0: 

+

1225 if 0: 

+

1226 b = 4 

+

1227 else: 

+

1228 c = 6 

+

1229 else: 

+

1230 if 0: 

+

1231 d = 9 

+

1232 else: 

+

1233 if 0: e = 11 

+

1234 f = 12 

+

1235 if 0: g = 13 

+

1236 h = 14 

+

1237 i = 15 

+

1238 """, 

+

1239 lines=lines, 

+

1240 arcz=arcz, 

+

1241 arcz_missing=arcz_missing, 

+

1242 ) 

+

1243 

+

1244 def test_dunder_debug(self): 

+

1245 # Since some of our tests use __debug__, let's make sure it is true as 

+

1246 # we expect 

+

1247 assert __debug__ 

+

1248 # Check that executed code has __debug__ 

+

1249 self.check_coverage("""\ 

+

1250 assert __debug__, "assert __debug__" 

+

1251 """ 

+

1252 ) 

+

1253 # Check that if it didn't have debug, it would let us know. 

+

1254 with pytest.raises(AssertionError): 

+

1255 self.check_coverage("""\ 

+

1256 assert not __debug__, "assert not __debug__" 

+

1257 """ 

+

1258 ) 

+

1259 

+

1260 def test_if_debug(self): 

+

1261 if env.PYBEHAVIOR.optimize_if_debug: 

+

1262 arcz = ".1 12 24 41 26 61 1." 

+

1263 arcz_missing = "" 

+

1264 else: 

+

1265 arcz = ".1 12 23 31 34 41 26 61 1." 

+

1266 arcz_missing = "31" 

+

1267 self.check_coverage("""\ 

+

1268 for value in [True, False]: 

+

1269 if value: 

+

1270 if __debug__: 

+

1271 x = 4 

+

1272 else: 

+

1273 x = 6 

+

1274 """, 

+

1275 arcz=arcz, 

+

1276 arcz_missing=arcz_missing, 

+

1277 ) 

+

1278 

+

1279 def test_if_not_debug(self): 

+

1280 arcz_missing = "" 

+

1281 if env.PYBEHAVIOR.pep626: 

+

1282 arcz = ".1 12 23 34 42 37 72 28 8." 

+

1283 elif env.PYBEHAVIOR.optimize_if_not_debug2: 

+

1284 arcz = ".1 12 23 35 52 37 72 28 8." 

+

1285 elif env.PYBEHAVIOR.optimize_if_not_debug: 

+

1286 arcz = ".1 12 23 34 42 37 72 28 8." 

+

1287 else: 

+

1288 arcz = ".1 12 23 34 45 42 52 37 72 28 8." 

+

1289 arcz_missing = "45 52" 

+

1290 self.check_coverage("""\ 

+

1291 lines = set() 

+

1292 for value in [True, False]: 

+

1293 if value: 

+

1294 if not __debug__: 

+

1295 lines.add(5) 

+

1296 else: 

+

1297 lines.add(7) 

+

1298 assert lines == set([7]) 

+

1299 """, 

+

1300 arcz=arcz, 

+

1301 arcz_missing=arcz_missing, 

+

1302 ) 

+

1303 

+

1304 

+

1305class MiscArcTest(CoverageTest): 

+

1306 """Miscellaneous arc-measuring tests.""" 

+

1307 

+

1308 def test_dict_literal(self): 

+

1309 self.check_coverage("""\ 

+

1310 d = { 

+

1311 'a': 2, 

+

1312 'b': 3, 

+

1313 'c': { 

+

1314 'd': 5, 

+

1315 'e': 6, 

+

1316 } 

+

1317 } 

+

1318 assert d 

+

1319 """, 

+

1320 arcz=".1 19 9.", 

+

1321 ) 

+

1322 self.check_coverage("""\ 

+

1323 d = \\ 

+

1324 { 'a': 2, 

+

1325 'b': 3, 

+

1326 'c': { 

+

1327 'd': 5, 

+

1328 'e': 6, 

+

1329 } 

+

1330 } 

+

1331 assert d 

+

1332 """, 

+

1333 arcz=".1 19 9.", 

+

1334 ) 

+

1335 

+

1336 @pytest.mark.skipif(not env.PYBEHAVIOR.unpackings_pep448, 

+

1337 reason="Don't have unpacked literals until 3.5" 

+

1338 ) 

+

1339 def test_unpacked_literals(self): 

+

1340 self.check_coverage("""\ 

+

1341 d = { 

+

1342 'a': 2, 

+

1343 'b': 3, 

+

1344 } 

+

1345 weird = { 

+

1346 **d, 

+

1347 **{'c': 7}, 

+

1348 'd': 8, 

+

1349 } 

+

1350 assert weird['b'] == 3 

+

1351 """, 

+

1352 arcz=".1 15 5A A." 

+

1353 ) 

+

1354 self.check_coverage("""\ 

+

1355 l = [ 

+

1356 2, 

+

1357 3, 

+

1358 ] 

+

1359 weird = [ 

+

1360 *l, 

+

1361 *[7], 

+

1362 8, 

+

1363 ] 

+

1364 assert weird[1] == 3 

+

1365 """, 

+

1366 arcz=".1 15 5A A." 

+

1367 ) 

+

1368 

+

1369 def test_pathologically_long_code_object(self): 

+

1370 # https://github.com/nedbat/coveragepy/issues/359 

+

1371 # The structure of this file is such that an EXTENDED_ARG bytecode is 

+

1372 # needed to encode the jump at the end. We weren't interpreting those 

+

1373 # opcodes. 

+

1374 # Note that we no longer interpret bytecode at all, but it couldn't 

+

1375 # hurt to keep the test... 

+

1376 sizes = [10, 50, 100, 500, 1000, 2000] 

+

1377 for n in sizes: 

+

1378 code = """\ 

+

1379 data = [ 

+

1380 """ + "".join("""\ 

+

1381 [ 

+

1382 {i}, {i}, {i}, {i}, {i}, {i}, {i}, {i}, {i}, {i}], 

+

1383 """.format(i=i) for i in range(n) 

+

1384 ) + """\ 

+

1385 ] 

+

1386 

+

1387 print(len(data)) 

+

1388 """ 

+

1389 self.check_coverage(code, arcs=[(-1, 1), (1, 2*n+4), (2*n+4, -1)]) 

+

1390 assert self.stdout().split() == [str(n) for n in sizes] 

+

1391 

+

1392 def test_partial_generators(self): 

+

1393 # https://github.com/nedbat/coveragepy/issues/475 

+

1394 # Line 2 is executed completely. 

+

1395 # Line 3 is started but not finished, because zip ends before it finishes. 

+

1396 # Line 4 is never started. 

+

1397 cov = self.check_coverage("""\ 

+

1398 def f(a, b): 

+

1399 c = (i for i in a) # 2 

+

1400 d = (j for j in b) # 3 

+

1401 e = (k for k in b) # 4 

+

1402 return dict(zip(c, d)) 

+

1403 

+

1404 f(['a', 'b'], [1, 2, 3]) 

+

1405 """, 

+

1406 arcz=".1 17 7. .2 23 34 45 5. -22 2-2 -33 3-3 -44 4-4", 

+

1407 arcz_missing="3-3 -44 4-4", 

+

1408 ) 

+

1409 # ugh, unexposed methods?? 

+

1410 filename = self.last_module_name + ".py" 

+

1411 fr = cov._get_file_reporter(filename) 

+

1412 arcs_executed = cov._analyze(filename).arcs_executed() 

+

1413 expected = "line 3 didn't finish the generator expression on line 3" 

+

1414 assert expected == fr.missing_arc_description(3, -3, arcs_executed) 

+

1415 expected = "line 4 didn't run the generator expression on line 4" 

+

1416 assert expected == fr.missing_arc_description(4, -4, arcs_executed) 

+

1417 

+

1418 

+

1419class DecoratorArcTest(CoverageTest): 

+

1420 """Tests of arcs with decorators.""" 

+

1421 

+

1422 def test_function_decorator(self): 

+

1423 self.check_coverage("""\ 

+

1424 def decorator(arg): 

+

1425 def _dec(f): 

+

1426 return f 

+

1427 return _dec 

+

1428 

+

1429 @decorator(6) 

+

1430 @decorator( 

+

1431 len([8]), 

+

1432 ) 

+

1433 def my_function( 

+

1434 a=len([11]), 

+

1435 ): 

+

1436 x = 13 

+

1437 a = 14 

+

1438 my_function() 

+

1439 """, 

+

1440 arcz= 

+

1441 ".1 16 67 7A AE EF F. " # main line 

+

1442 ".2 24 4. -23 3-2 " # decorators 

+

1443 "-6D D-6 ", # my_function 

+

1444 ) 

+

1445 

+

1446 def test_class_decorator(self): 

+

1447 self.check_coverage("""\ 

+

1448 def decorator(arg): 

+

1449 def _dec(c): 

+

1450 return c 

+

1451 return _dec 

+

1452 

+

1453 @decorator(6) 

+

1454 @decorator( 

+

1455 len([8]), 

+

1456 ) 

+

1457 class MyObject( 

+

1458 object 

+

1459 ): 

+

1460 X = 13 

+

1461 a = 14 

+

1462 """, 

+

1463 arcz= 

+

1464 ".1 16 67 6D 7A AE E. " # main line 

+

1465 ".2 24 4. -23 3-2 " # decorators 

+

1466 "-66 D-6 ", # MyObject 

+

1467 ) 

+

1468 

+

1469 def test_bug_466(self): 

+

1470 # A bad interaction between decorators and multi-line list assignments, 

+

1471 # believe it or not...! 

+

1472 if env.PYBEHAVIOR.trace_decorated_def: 

+

1473 arcz = ".1 1A A. 13 34 4. -35 58 8-3" 

+

1474 else: 

+

1475 arcz = ".1 1A A. 13 3. -35 58 8-3" 

+

1476 self.check_coverage("""\ 

+

1477 class Parser(object): 

+

1478 

+

1479 @classmethod 

+

1480 def parse(cls): 

+

1481 formats = [ 5 ] 

+

1482 

+

1483 

+

1484 return None 

+

1485 

+

1486 Parser.parse() 

+

1487 """, 

+

1488 arcz=arcz, 

+

1489 ) 

+

1490 if env.PYBEHAVIOR.trace_decorated_def: 

+

1491 arcz = ".1 1A A. 13 34 4. -35 58 8-3" 

+

1492 else: 

+

1493 arcz = ".1 1A A. 13 3. -35 58 8-3" 

+

1494 self.check_coverage("""\ 

+

1495 class Parser(object): 

+

1496 

+

1497 @classmethod 

+

1498 def parse(cls): 

+

1499 formats = [ 

+

1500 6, 

+

1501 ] 

+

1502 return None 

+

1503 

+

1504 Parser.parse() 

+

1505 """, 

+

1506 arcz=arcz, 

+

1507 ) 

+

1508 

+

1509 

+

1510class LambdaArcTest(CoverageTest): 

+

1511 """Tests of lambdas""" 

+

1512 

+

1513 def test_multiline_lambda(self): 

+

1514 self.check_coverage("""\ 

+

1515 fn = (lambda x: 

+

1516 x + 2 

+

1517 ) 

+

1518 assert fn(4) == 6 

+

1519 """, 

+

1520 arcz=".1 14 4-1 1-1", 

+

1521 ) 

+

1522 self.check_coverage("""\ 

+

1523 

+

1524 fn = \\ 

+

1525 ( 

+

1526 lambda 

+

1527 x: 

+

1528 x 

+

1529 + 

+

1530 8 

+

1531 ) 

+

1532 assert fn(10) == 18 

+

1533 """, 

+

1534 arcz="-22 2A A-2 2-2", 

+

1535 ) 

+

1536 

+

1537 def test_unused_lambdas_are_confusing_bug_90(self): 

+

1538 self.check_coverage("""\ 

+

1539 a = 1 

+

1540 fn = lambda x: x 

+

1541 b = 3 

+

1542 """, 

+

1543 arcz=".1 12 -22 2-2 23 3.", arcz_missing="-22 2-2", 

+

1544 ) 

+

1545 

+

1546 def test_raise_with_lambda_looks_like_partial_branch(self): 

+

1547 self.check_coverage("""\ 

+

1548 def ouch(fn): 

+

1549 2/0 

+

1550 a = b = c = d = 3 

+

1551 try: 

+

1552 a = ouch(lambda: 5) 

+

1553 if a: 

+

1554 b = 7 

+

1555 except ZeroDivisionError: 

+

1556 c = 9 

+

1557 d = 10 

+

1558 assert (a, b, c, d) == (3, 3, 9, 10) 

+

1559 """, 

+

1560 lines=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], 

+

1561 missing="6-7", 

+

1562 arcz=".1 13 34 45 56 67 6A 7A 89 9A AB B. .2 2. -55 5-5", 

+

1563 arcz_missing="56 67 6A 7A -55 5-5", 

+

1564 arcz_unpredicted="58", 

+

1565 ) 

+

1566 

+

1567 def test_lambda_in_dict(self): 

+

1568 self.check_coverage("""\ 

+

1569 x = 1 

+

1570 x = 2 

+

1571 d = { 

+

1572 4: lambda: [], 

+

1573 5: lambda: [], 

+

1574 6: lambda: [], 

+

1575 7: lambda: [], 

+

1576 } 

+

1577 

+

1578 for k, v in d.items(): # 10 

+

1579 if k & 1: 

+

1580 v() 

+

1581 """, 

+

1582 arcz=".1 12 23 3A AB BC BA CA A. -33 3-3", 

+

1583 ) 

+

1584 

+

1585 

+

1586@pytest.mark.skipif(not env.PYBEHAVIOR.async_syntax, reason="Async features are new in Python 3.5") 

+

1587class AsyncTest(CoverageTest): 

+

1588 """Tests of the new async and await keywords in Python 3.5""" 

+

1589 

+

1590 def test_async(self): 

+

1591 self.check_coverage("""\ 

+

1592 import asyncio 

+

1593 

+

1594 async def compute(x, y): # 3 

+

1595 print("Compute %s + %s ..." % (x, y)) 

+

1596 await asyncio.sleep(0.001) 

+

1597 return x + y # 6 

+

1598 

+

1599 async def print_sum(x, y): # 8 

+

1600 result = (0 + 

+

1601 await compute(x, y) # A 

+

1602 ) 

+

1603 print("%s + %s = %s" % (x, y, result)) 

+

1604 

+

1605 loop = asyncio.new_event_loop() # E 

+

1606 loop.run_until_complete(print_sum(1, 2)) 

+

1607 loop.close() # G 

+

1608 """, 

+

1609 arcz= 

+

1610 ".1 13 38 8E EF FG G. " 

+

1611 "-34 45 56 6-3 " 

+

1612 "-89 9C C-8", 

+

1613 arcz_unpredicted="5-3 9-8", 

+

1614 ) 

+

1615 assert self.stdout() == "Compute 1 + 2 ...\n1 + 2 = 3\n" 

+

1616 

+

1617 def test_async_for(self): 

+

1618 self.check_coverage("""\ 

+

1619 import asyncio 

+

1620 

+

1621 class AsyncIteratorWrapper: # 3 

+

1622 def __init__(self, obj): # 4 

+

1623 self._it = iter(obj) 

+

1624 

+

1625 def __aiter__(self): # 7 

+

1626 return self 

+

1627 

+

1628 async def __anext__(self): # A 

+

1629 try: 

+

1630 return next(self._it) 

+

1631 except StopIteration: 

+

1632 raise StopAsyncIteration 

+

1633 

+

1634 async def doit(): # G 

+

1635 async for letter in AsyncIteratorWrapper("abc"): 

+

1636 print(letter) 

+

1637 print(".") 

+

1638 

+

1639 loop = asyncio.new_event_loop() # L 

+

1640 loop.run_until_complete(doit()) 

+

1641 loop.close() 

+

1642 """, 

+

1643 arcz= 

+

1644 ".1 13 3G GL LM MN N. " # module main line 

+

1645 "-33 34 47 7A A-3 " # class definition 

+

1646 "-GH HI IH HJ J-G " # doit 

+

1647 "-45 5-4 " # __init__ 

+

1648 "-78 8-7 " # __aiter__ 

+

1649 "-AB BC C-A DE E-A ", # __anext__ 

+

1650 arcz_unpredicted="CD", 

+

1651 ) 

+

1652 assert self.stdout() == "a\nb\nc\n.\n" 

+

1653 

+

1654 def test_async_with(self): 

+

1655 self.check_coverage("""\ 

+

1656 async def go(): 

+

1657 async with x: 

+

1658 pass 

+

1659 """, 

+

1660 arcz=".1 1. .2 23 3.", 

+

1661 arcz_missing=".2 23 3.", 

+

1662 ) 

+

1663 

+

1664 def test_async_decorator(self): 

+

1665 if env.PYBEHAVIOR.trace_decorated_def: 

+

1666 arcz = ".1 14 45 5. .2 2. -46 6-4" 

+

1667 else: 

+

1668 arcz = ".1 14 4. .2 2. -46 6-4" 

+

1669 self.check_coverage("""\ 

+

1670 def wrap(f): # 1 

+

1671 return f 

+

1672 

+

1673 @wrap # 4 

+

1674 async def go(): 

+

1675 return 

+

1676 """, 

+

1677 arcz=arcz, 

+

1678 arcz_missing='-46 6-4', 

+

1679 ) 

+

1680 

+

1681 

+

1682class ExcludeTest(CoverageTest): 

+

1683 """Tests of exclusions to indicate known partial branches.""" 

+

1684 

+

1685 def test_default(self): 

+

1686 # A number of forms of pragma comment are accepted. 

+

1687 self.check_coverage("""\ 

+

1688 a = 1 

+

1689 if a: #pragma: no branch 

+

1690 b = 3 

+

1691 c = 4 

+

1692 if c: # pragma NOBRANCH 

+

1693 d = 6 

+

1694 e = 7 

+

1695 if e:#\tpragma:\tno branch 

+

1696 f = 9 

+

1697 """, 

+

1698 [1,2,3,4,5,6,7,8,9], 

+

1699 arcz=".1 12 23 24 34 45 56 57 67 78 89 9. 8.", 

+

1700 ) 

+

1701 

+

1702 def test_custom_pragmas(self): 

+

1703 self.check_coverage("""\ 

+

1704 a = 1 

+

1705 while a: # [only some] 

+

1706 c = 3 

+

1707 break 

+

1708 assert c == 5-2 

+

1709 """, 

+

1710 [1,2,3,4,5], 

+

1711 partials=["only some"], 

+

1712 arcz=".1 12 23 34 45 25 5.", 

+

1713 ) 

+

1714 

+

1715 

+

1716class LineDataTest(CoverageTest): 

+

1717 """Tests that line_data gives us what we expect.""" 

+

1718 

+

1719 def test_branch(self): 

+

1720 cov = coverage.Coverage(branch=True) 

+

1721 

+

1722 self.make_file("fun1.py", """\ 

+

1723 def fun1(x): 

+

1724 if x == 1: 

+

1725 return 

+

1726 

+

1727 fun1(3) 

+

1728 """) 

+

1729 

+

1730 self.start_import_stop(cov, "fun1") 

+

1731 

+

1732 data = cov.get_data() 

+

1733 fun1_lines = data.lines(abs_file("fun1.py")) 

+

1734 assert_count_equal(fun1_lines, [1, 2, 5]) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_backward_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_backward_py.html new file mode 100644 index 000000000..d4bf3773b --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_backward_py.html @@ -0,0 +1,90 @@ + + + + + + Coverage for tests/test_backward.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Tests that our version shims in backward.py are working.""" 

+

5 

+

6from coverage.backward import iitems, binary_bytes, bytes_to_ints 

+

7 

+

8from tests.coveragetest import CoverageTest 

+

9from tests.helpers import assert_count_equal 

+

10 

+

11 

+

12class BackwardTest(CoverageTest): 

+

13 """Tests of things from backward.py.""" 

+

14 

+

15 def test_iitems(self): 

+

16 d = {'a': 1, 'b': 2, 'c': 3} 

+

17 items = [('a', 1), ('b', 2), ('c', 3)] 

+

18 assert_count_equal(list(iitems(d)), items) 

+

19 

+

20 def test_binary_bytes(self): 

+

21 byte_values = [0, 255, 17, 23, 42, 57] 

+

22 bb = binary_bytes(byte_values) 

+

23 assert len(bb) == len(byte_values) 

+

24 assert byte_values == list(bytes_to_ints(bb)) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_cmdline_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_cmdline_py.html new file mode 100644 index 000000000..dbb59ed1c --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_cmdline_py.html @@ -0,0 +1,1058 @@ + + + + + + Coverage for tests/test_cmdline.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Test cmdline.py for coverage.py.""" 

+

5 

+

6import os 

+

7import pprint 

+

8import sys 

+

9import textwrap 

+

10 

+

11import mock 

+

12import pytest 

+

13 

+

14import coverage 

+

15import coverage.cmdline 

+

16from coverage import env 

+

17from coverage.config import CoverageConfig 

+

18from coverage.data import CoverageData 

+

19from coverage.misc import ExceptionDuringRun 

+

20from coverage.version import __url__ 

+

21 

+

22from tests.coveragetest import CoverageTest, OK, ERR, command_line 

+

23 

+

24 

+

25class BaseCmdLineTest(CoverageTest): 

+

26 """Tests of execution paths through the command line interpreter.""" 

+

27 

+

28 run_in_temp_dir = False 

+

29 

+

30 # Make a dict mapping function names to the default values that cmdline.py 

+

31 # uses when calling the function. 

+

32 _defaults = mock.Mock() 

+

33 _defaults.Coverage().annotate( 

+

34 directory=None, ignore_errors=None, include=None, omit=None, morfs=[], 

+

35 contexts=None, 

+

36 ) 

+

37 _defaults.Coverage().html_report( 

+

38 directory=None, ignore_errors=None, include=None, omit=None, morfs=[], 

+

39 skip_covered=None, show_contexts=None, title=None, contexts=None, 

+

40 skip_empty=None, precision=None, 

+

41 ) 

+

42 _defaults.Coverage().report( 

+

43 ignore_errors=None, include=None, omit=None, morfs=[], 

+

44 show_missing=None, skip_covered=None, contexts=None, skip_empty=None, precision=None, 

+

45 sort=None, 

+

46 ) 

+

47 _defaults.Coverage().xml_report( 

+

48 ignore_errors=None, include=None, omit=None, morfs=[], outfile=None, 

+

49 contexts=None, skip_empty=None, 

+

50 ) 

+

51 _defaults.Coverage().json_report( 

+

52 ignore_errors=None, include=None, omit=None, morfs=[], outfile=None, 

+

53 contexts=None, pretty_print=None, show_contexts=None, 

+

54 ) 

+

55 _defaults.Coverage( 

+

56 cover_pylib=None, data_suffix=None, timid=None, branch=None, 

+

57 config_file=True, source=None, include=None, omit=None, debug=None, 

+

58 concurrency=None, check_preimported=True, context=None, 

+

59 ) 

+

60 

+

61 DEFAULT_KWARGS = dict((name, kw) for name, _, kw in _defaults.mock_calls) 

+

62 

+

63 def model_object(self): 

+

64 """Return a Mock suitable for use in CoverageScript.""" 

+

65 mk = mock.Mock() 

+

66 

+

67 cov = mk.Coverage.return_value 

+

68 

+

69 # The mock needs options. 

+

70 mk.config = CoverageConfig() 

+

71 cov.get_option = mk.config.get_option 

+

72 cov.set_option = mk.config.set_option 

+

73 

+

74 # Get the type right for the result of reporting. 

+

75 cov.report.return_value = 50.0 

+

76 cov.html_report.return_value = 50.0 

+

77 cov.xml_report.return_value = 50.0 

+

78 cov.json_report.return_value = 50.0 

+

79 

+

80 return mk 

+

81 

+

82 # Global names in cmdline.py that will be mocked during the tests. 

+

83 MOCK_GLOBALS = ['Coverage', 'PyRunner', 'show_help'] 

+

84 

+

85 def mock_command_line(self, args, options=None): 

+

86 """Run `args` through the command line, with a Mock. 

+

87 

+

88 `options` is a dict of names and values to pass to `set_option`. 

+

89 

+

90 Returns the Mock it used and the status code returned. 

+

91 

+

92 """ 

+

93 mk = self.model_object() 

+

94 

+

95 if options is not None: 

+

96 for name, value in options.items(): 

+

97 mk.config.set_option(name, value) 

+

98 

+

99 patchers = [ 

+

100 mock.patch("coverage.cmdline."+name, getattr(mk, name)) 

+

101 for name in self.MOCK_GLOBALS 

+

102 ] 

+

103 for patcher in patchers: 

+

104 patcher.start() 

+

105 try: 

+

106 ret = command_line(args) 

+

107 finally: 

+

108 for patcher in patchers: 

+

109 patcher.stop() 

+

110 

+

111 return mk, ret 

+

112 

+

113 def cmd_executes(self, args, code, ret=OK, options=None): 

+

114 """Assert that the `args` end up executing the sequence in `code`.""" 

+

115 called, status = self.mock_command_line(args, options=options) 

+

116 assert status == ret, "Wrong status: got %r, wanted %r" % (status, ret) 

+

117 

+

118 # Remove all indentation, and execute with mock globals 

+

119 code = textwrap.dedent(code) 

+

120 expected = self.model_object() 

+

121 globs = {n: getattr(expected, n) for n in self.MOCK_GLOBALS} 

+

122 code_obj = compile(code, "<code>", "exec") 

+

123 eval(code_obj, globs, {}) # pylint: disable=eval-used 

+

124 

+

125 # Many of our functions take a lot of arguments, and cmdline.py 

+

126 # calls them with many. But most of them are just the defaults, which 

+

127 # we don't want to have to repeat in all tests. For each call, apply 

+

128 # the defaults. This lets the tests just mention the interesting ones. 

+

129 for name, _, kwargs in expected.mock_calls: 

+

130 for k, v in self.DEFAULT_KWARGS.get(name, {}).items(): 

+

131 kwargs.setdefault(k, v) 

+

132 

+

133 self.assert_same_mock_calls(expected, called) 

+

134 

+

135 def cmd_executes_same(self, args1, args2): 

+

136 """Assert that the `args1` executes the same as `args2`.""" 

+

137 m1, r1 = self.mock_command_line(args1) 

+

138 m2, r2 = self.mock_command_line(args2) 

+

139 assert r1 == r2 

+

140 self.assert_same_mock_calls(m1, m2) 

+

141 

+

142 def assert_same_mock_calls(self, m1, m2): 

+

143 """Assert that `m1.mock_calls` and `m2.mock_calls` are the same.""" 

+

144 # Use a real equality comparison, but if it fails, use a nicer assert 

+

145 # so we can tell what's going on. We have to use the real == first due 

+

146 # to CmdOptionParser.__eq__ 

+

147 if m1.mock_calls != m2.mock_calls: 

+

148 pp1 = pprint.pformat(m1.mock_calls) 

+

149 pp2 = pprint.pformat(m2.mock_calls) 

+

150 assert pp1+'\n' == pp2+'\n' 

+

151 

+

152 def cmd_help(self, args, help_msg=None, topic=None, ret=ERR): 

+

153 """Run a command line, and check that it prints the right help. 

+

154 

+

155 Only the last function call in the mock is checked, which should be the 

+

156 help message that we want to see. 

+

157 

+

158 """ 

+

159 mk, status = self.mock_command_line(args) 

+

160 assert status == ret, "Wrong status: got %s, wanted %s" % (status, ret) 

+

161 if help_msg: 

+

162 assert mk.mock_calls[-1] == ('show_help', (help_msg,), {}) 

+

163 else: 

+

164 assert mk.mock_calls[-1] == ('show_help', (), {'topic': topic}) 

+

165 

+

166 

+

167class BaseCmdLineTestTest(BaseCmdLineTest): 

+

168 """Tests that our BaseCmdLineTest helpers work.""" 

+

169 def test_cmd_executes_same(self): 

+

170 # All the other tests here use self.cmd_executes_same in successful 

+

171 # ways, so here we just check that it fails. 

+

172 with pytest.raises(AssertionError): 

+

173 self.cmd_executes_same("run", "debug") 

+

174 

+

175 

+

176class CmdLineTest(BaseCmdLineTest): 

+

177 """Tests of the coverage.py command line.""" 

+

178 

+

179 def test_annotate(self): 

+

180 # coverage annotate [-d DIR] [-i] [--omit DIR,...] [FILE1 FILE2 ...] 

+

181 self.cmd_executes("annotate", """\ 

+

182 cov = Coverage() 

+

183 cov.load() 

+

184 cov.annotate() 

+

185 """) 

+

186 self.cmd_executes("annotate -d dir1", """\ 

+

187 cov = Coverage() 

+

188 cov.load() 

+

189 cov.annotate(directory="dir1") 

+

190 """) 

+

191 self.cmd_executes("annotate -i", """\ 

+

192 cov = Coverage() 

+

193 cov.load() 

+

194 cov.annotate(ignore_errors=True) 

+

195 """) 

+

196 self.cmd_executes("annotate --omit fooey", """\ 

+

197 cov = Coverage(omit=["fooey"]) 

+

198 cov.load() 

+

199 cov.annotate(omit=["fooey"]) 

+

200 """) 

+

201 self.cmd_executes("annotate --omit fooey,booey", """\ 

+

202 cov = Coverage(omit=["fooey", "booey"]) 

+

203 cov.load() 

+

204 cov.annotate(omit=["fooey", "booey"]) 

+

205 """) 

+

206 self.cmd_executes("annotate mod1", """\ 

+

207 cov = Coverage() 

+

208 cov.load() 

+

209 cov.annotate(morfs=["mod1"]) 

+

210 """) 

+

211 self.cmd_executes("annotate mod1 mod2 mod3", """\ 

+

212 cov = Coverage() 

+

213 cov.load() 

+

214 cov.annotate(morfs=["mod1", "mod2", "mod3"]) 

+

215 """) 

+

216 

+

217 def test_combine(self): 

+

218 # coverage combine with args 

+

219 self.cmd_executes("combine datadir1", """\ 

+

220 cov = Coverage() 

+

221 cov.combine(["datadir1"], strict=True, keep=False) 

+

222 cov.save() 

+

223 """) 

+

224 # coverage combine, appending 

+

225 self.cmd_executes("combine --append datadir1", """\ 

+

226 cov = Coverage() 

+

227 cov.load() 

+

228 cov.combine(["datadir1"], strict=True, keep=False) 

+

229 cov.save() 

+

230 """) 

+

231 # coverage combine without args 

+

232 self.cmd_executes("combine", """\ 

+

233 cov = Coverage() 

+

234 cov.combine(None, strict=True, keep=False) 

+

235 cov.save() 

+

236 """) 

+

237 

+

238 def test_combine_doesnt_confuse_options_with_args(self): 

+

239 # https://github.com/nedbat/coveragepy/issues/385 

+

240 self.cmd_executes("combine --rcfile cov.ini", """\ 

+

241 cov = Coverage(config_file='cov.ini') 

+

242 cov.combine(None, strict=True, keep=False) 

+

243 cov.save() 

+

244 """) 

+

245 self.cmd_executes("combine --rcfile cov.ini data1 data2/more", """\ 

+

246 cov = Coverage(config_file='cov.ini') 

+

247 cov.combine(["data1", "data2/more"], strict=True, keep=False) 

+

248 cov.save() 

+

249 """) 

+

250 

+

251 def test_debug(self): 

+

252 self.cmd_help("debug", "What information would you like: config, data, sys, premain?") 

+

253 self.cmd_help("debug foo", "Don't know what you mean by 'foo'") 

+

254 

+

255 def test_debug_sys(self): 

+

256 self.command_line("debug sys") 

+

257 out = self.stdout() 

+

258 assert "version:" in out 

+

259 assert "data_file:" in out 

+

260 

+

261 def test_debug_config(self): 

+

262 self.command_line("debug config") 

+

263 out = self.stdout() 

+

264 assert "cover_pylib:" in out 

+

265 assert "skip_covered:" in out 

+

266 assert "skip_empty:" in out 

+

267 

+

268 def test_erase(self): 

+

269 # coverage erase 

+

270 self.cmd_executes("erase", """\ 

+

271 cov = Coverage() 

+

272 cov.erase() 

+

273 """) 

+

274 

+

275 def test_version(self): 

+

276 # coverage --version 

+

277 self.cmd_help("--version", topic="version", ret=OK) 

+

278 

+

279 def test_help_option(self): 

+

280 # coverage -h 

+

281 self.cmd_help("-h", topic="help", ret=OK) 

+

282 self.cmd_help("--help", topic="help", ret=OK) 

+

283 

+

284 def test_help_command(self): 

+

285 self.cmd_executes("help", "show_help(topic='help')") 

+

286 

+

287 def test_cmd_help(self): 

+

288 self.cmd_executes("run --help", "show_help(parser='<CmdOptionParser:run>')") 

+

289 self.cmd_executes_same("help run", "run --help") 

+

290 

+

291 def test_html(self): 

+

292 # coverage html -d DIR [-i] [--omit DIR,...] [FILE1 FILE2 ...] 

+

293 self.cmd_executes("html", """\ 

+

294 cov = Coverage() 

+

295 cov.load() 

+

296 cov.html_report() 

+

297 """) 

+

298 self.cmd_executes("html -d dir1", """\ 

+

299 cov = Coverage() 

+

300 cov.load() 

+

301 cov.html_report(directory="dir1") 

+

302 """) 

+

303 self.cmd_executes("html -i", """\ 

+

304 cov = Coverage() 

+

305 cov.load() 

+

306 cov.html_report(ignore_errors=True) 

+

307 """) 

+

308 self.cmd_executes("html --omit fooey", """\ 

+

309 cov = Coverage(omit=["fooey"]) 

+

310 cov.load() 

+

311 cov.html_report(omit=["fooey"]) 

+

312 """) 

+

313 self.cmd_executes("html --omit fooey,booey", """\ 

+

314 cov = Coverage(omit=["fooey", "booey"]) 

+

315 cov.load() 

+

316 cov.html_report(omit=["fooey", "booey"]) 

+

317 """) 

+

318 self.cmd_executes("html mod1", """\ 

+

319 cov = Coverage() 

+

320 cov.load() 

+

321 cov.html_report(morfs=["mod1"]) 

+

322 """) 

+

323 self.cmd_executes("html mod1 mod2 mod3", """\ 

+

324 cov = Coverage() 

+

325 cov.load() 

+

326 cov.html_report(morfs=["mod1", "mod2", "mod3"]) 

+

327 """) 

+

328 self.cmd_executes("html --precision=3", """\ 

+

329 cov = Coverage() 

+

330 cov.load() 

+

331 cov.html_report(precision=3) 

+

332 """) 

+

333 self.cmd_executes("html --title=Hello_there", """\ 

+

334 cov = Coverage() 

+

335 cov.load() 

+

336 cov.html_report(title='Hello_there') 

+

337 """) 

+

338 

+

339 def test_report(self): 

+

340 # coverage report [-m] [-i] [-o DIR,...] [FILE1 FILE2 ...] 

+

341 self.cmd_executes("report", """\ 

+

342 cov = Coverage() 

+

343 cov.load() 

+

344 cov.report(show_missing=None) 

+

345 """) 

+

346 self.cmd_executes("report -i", """\ 

+

347 cov = Coverage() 

+

348 cov.load() 

+

349 cov.report(ignore_errors=True) 

+

350 """) 

+

351 self.cmd_executes("report -m", """\ 

+

352 cov = Coverage() 

+

353 cov.load() 

+

354 cov.report(show_missing=True) 

+

355 """) 

+

356 self.cmd_executes("report --omit fooey", """\ 

+

357 cov = Coverage(omit=["fooey"]) 

+

358 cov.load() 

+

359 cov.report(omit=["fooey"]) 

+

360 """) 

+

361 self.cmd_executes("report --omit fooey,booey", """\ 

+

362 cov = Coverage(omit=["fooey", "booey"]) 

+

363 cov.load() 

+

364 cov.report(omit=["fooey", "booey"]) 

+

365 """) 

+

366 self.cmd_executes("report mod1", """\ 

+

367 cov = Coverage() 

+

368 cov.load() 

+

369 cov.report(morfs=["mod1"]) 

+

370 """) 

+

371 self.cmd_executes("report mod1 mod2 mod3", """\ 

+

372 cov = Coverage() 

+

373 cov.load() 

+

374 cov.report(morfs=["mod1", "mod2", "mod3"]) 

+

375 """) 

+

376 self.cmd_executes("report --precision=7", """\ 

+

377 cov = Coverage() 

+

378 cov.load() 

+

379 cov.report(precision=7) 

+

380 """) 

+

381 self.cmd_executes("report --skip-covered", """\ 

+

382 cov = Coverage() 

+

383 cov.load() 

+

384 cov.report(skip_covered=True) 

+

385 """) 

+

386 self.cmd_executes("report --skip-covered --no-skip-covered", """\ 

+

387 cov = Coverage() 

+

388 cov.load() 

+

389 cov.report(skip_covered=False) 

+

390 """) 

+

391 self.cmd_executes("report --no-skip-covered", """\ 

+

392 cov = Coverage() 

+

393 cov.load() 

+

394 cov.report(skip_covered=False) 

+

395 """) 

+

396 self.cmd_executes("report --skip-empty", """\ 

+

397 cov = Coverage() 

+

398 cov.load() 

+

399 cov.report(skip_empty=True) 

+

400 """) 

+

401 self.cmd_executes("report --contexts=foo,bar", """\ 

+

402 cov = Coverage() 

+

403 cov.load() 

+

404 cov.report(contexts=["foo", "bar"]) 

+

405 """) 

+

406 self.cmd_executes("report --sort=-foo", """\ 

+

407 cov = Coverage() 

+

408 cov.load() 

+

409 cov.report(sort='-foo') 

+

410 """) 

+

411 

+

412 def test_run(self): 

+

413 # coverage run [-p] [-L] [--timid] MODULE.py [ARG1 ARG2 ...] 

+

414 

+

415 # run calls coverage.erase first. 

+

416 self.cmd_executes("run foo.py", """\ 

+

417 cov = Coverage() 

+

418 runner = PyRunner(['foo.py'], as_module=False) 

+

419 runner.prepare() 

+

420 cov.start() 

+

421 runner.run() 

+

422 cov.stop() 

+

423 cov.save() 

+

424 """) 

+

425 # run -a combines with an existing data file before saving. 

+

426 self.cmd_executes("run -a foo.py", """\ 

+

427 cov = Coverage() 

+

428 runner = PyRunner(['foo.py'], as_module=False) 

+

429 runner.prepare() 

+

430 cov.load() 

+

431 cov.start() 

+

432 runner.run() 

+

433 cov.stop() 

+

434 cov.save() 

+

435 """) 

+

436 # --timid sets a flag, and program arguments get passed through. 

+

437 self.cmd_executes("run --timid foo.py abc 123", """\ 

+

438 cov = Coverage(timid=True) 

+

439 runner = PyRunner(['foo.py', 'abc', '123'], as_module=False) 

+

440 runner.prepare() 

+

441 cov.start() 

+

442 runner.run() 

+

443 cov.stop() 

+

444 cov.save() 

+

445 """) 

+

446 # -L sets a flag, and flags for the program don't confuse us. 

+

447 self.cmd_executes("run -p -L foo.py -a -b", """\ 

+

448 cov = Coverage(cover_pylib=True, data_suffix=True) 

+

449 runner = PyRunner(['foo.py', '-a', '-b'], as_module=False) 

+

450 runner.prepare() 

+

451 cov.start() 

+

452 runner.run() 

+

453 cov.stop() 

+

454 cov.save() 

+

455 """) 

+

456 self.cmd_executes("run --branch foo.py", """\ 

+

457 cov = Coverage(branch=True) 

+

458 runner = PyRunner(['foo.py'], as_module=False) 

+

459 runner.prepare() 

+

460 cov.start() 

+

461 runner.run() 

+

462 cov.stop() 

+

463 cov.save() 

+

464 """) 

+

465 self.cmd_executes("run --rcfile=myrc.rc foo.py", """\ 

+

466 cov = Coverage(config_file="myrc.rc") 

+

467 runner = PyRunner(['foo.py'], as_module=False) 

+

468 runner.prepare() 

+

469 cov.start() 

+

470 runner.run() 

+

471 cov.stop() 

+

472 cov.save() 

+

473 """) 

+

474 self.cmd_executes("run --include=pre1,pre2 foo.py", """\ 

+

475 cov = Coverage(include=["pre1", "pre2"]) 

+

476 runner = PyRunner(['foo.py'], as_module=False) 

+

477 runner.prepare() 

+

478 cov.start() 

+

479 runner.run() 

+

480 cov.stop() 

+

481 cov.save() 

+

482 """) 

+

483 self.cmd_executes("run --omit=opre1,opre2 foo.py", """\ 

+

484 cov = Coverage(omit=["opre1", "opre2"]) 

+

485 runner = PyRunner(['foo.py'], as_module=False) 

+

486 runner.prepare() 

+

487 cov.start() 

+

488 runner.run() 

+

489 cov.stop() 

+

490 cov.save() 

+

491 """) 

+

492 self.cmd_executes("run --include=pre1,pre2 --omit=opre1,opre2 foo.py", """\ 

+

493 cov = Coverage(include=["pre1", "pre2"], omit=["opre1", "opre2"]) 

+

494 runner = PyRunner(['foo.py'], as_module=False) 

+

495 runner.prepare() 

+

496 cov.start() 

+

497 runner.run() 

+

498 cov.stop() 

+

499 cov.save() 

+

500 """) 

+

501 self.cmd_executes("run --source=quux,hi.there,/home/bar foo.py", """\ 

+

502 cov = Coverage(source=["quux", "hi.there", "/home/bar"]) 

+

503 runner = PyRunner(['foo.py'], as_module=False) 

+

504 runner.prepare() 

+

505 cov.start() 

+

506 runner.run() 

+

507 cov.stop() 

+

508 cov.save() 

+

509 """) 

+

510 self.cmd_executes("run --concurrency=gevent foo.py", """\ 

+

511 cov = Coverage(concurrency='gevent') 

+

512 runner = PyRunner(['foo.py'], as_module=False) 

+

513 runner.prepare() 

+

514 cov.start() 

+

515 runner.run() 

+

516 cov.stop() 

+

517 cov.save() 

+

518 """) 

+

519 self.cmd_executes("run --concurrency=multiprocessing foo.py", """\ 

+

520 cov = Coverage(concurrency='multiprocessing') 

+

521 runner = PyRunner(['foo.py'], as_module=False) 

+

522 runner.prepare() 

+

523 cov.start() 

+

524 runner.run() 

+

525 cov.stop() 

+

526 cov.save() 

+

527 """) 

+

528 

+

529 def test_bad_concurrency(self): 

+

530 self.command_line("run --concurrency=nothing", ret=ERR) 

+

531 err = self.stderr() 

+

532 assert "option --concurrency: invalid choice: 'nothing'" in err 

+

533 

+

534 def test_no_multiple_concurrency(self): 

+

535 # You can't use multiple concurrency values on the command line. 

+

536 # I would like to have a better message about not allowing multiple 

+

537 # values for this option, but optparse is not that flexible. 

+

538 self.command_line("run --concurrency=multiprocessing,gevent foo.py", ret=ERR) 

+

539 err = self.stderr() 

+

540 assert "option --concurrency: invalid choice: 'multiprocessing,gevent'" in err 

+

541 

+

542 def test_multiprocessing_needs_config_file(self): 

+

543 # You can't use command-line args to add options to multiprocessing 

+

544 # runs, since they won't make it to the subprocesses. You need to use a 

+

545 # config file. 

+

546 self.command_line("run --concurrency=multiprocessing --branch foo.py", ret=ERR) 

+

547 msg = "Options affecting multiprocessing must only be specified in a configuration file." 

+

548 _, err = self.stdouterr() 

+

549 assert msg in err 

+

550 assert "Remove --branch from the command line." in err 

+

551 

+

552 def test_run_debug(self): 

+

553 self.cmd_executes("run --debug=opt1 foo.py", """\ 

+

554 cov = Coverage(debug=["opt1"]) 

+

555 runner = PyRunner(['foo.py'], as_module=False) 

+

556 runner.prepare() 

+

557 cov.start() 

+

558 runner.run() 

+

559 cov.stop() 

+

560 cov.save() 

+

561 """) 

+

562 self.cmd_executes("run --debug=opt1,opt2 foo.py", """\ 

+

563 cov = Coverage(debug=["opt1","opt2"]) 

+

564 runner = PyRunner(['foo.py'], as_module=False) 

+

565 runner.prepare() 

+

566 cov.start() 

+

567 runner.run() 

+

568 cov.stop() 

+

569 cov.save() 

+

570 """) 

+

571 

+

572 def test_run_module(self): 

+

573 self.cmd_executes("run -m mymodule", """\ 

+

574 cov = Coverage() 

+

575 runner = PyRunner(['mymodule'], as_module=True) 

+

576 runner.prepare() 

+

577 cov.start() 

+

578 runner.run() 

+

579 cov.stop() 

+

580 cov.save() 

+

581 """) 

+

582 self.cmd_executes("run -m mymodule -qq arg1 arg2", """\ 

+

583 cov = Coverage() 

+

584 runner = PyRunner(['mymodule', '-qq', 'arg1', 'arg2'], as_module=True) 

+

585 runner.prepare() 

+

586 cov.start() 

+

587 runner.run() 

+

588 cov.stop() 

+

589 cov.save() 

+

590 """) 

+

591 self.cmd_executes("run --branch -m mymodule", """\ 

+

592 cov = Coverage(branch=True) 

+

593 runner = PyRunner(['mymodule'], as_module=True) 

+

594 runner.prepare() 

+

595 cov.start() 

+

596 runner.run() 

+

597 cov.stop() 

+

598 cov.save() 

+

599 """) 

+

600 self.cmd_executes_same("run -m mymodule", "run --module mymodule") 

+

601 

+

602 def test_run_nothing(self): 

+

603 self.command_line("run", ret=ERR) 

+

604 assert "Nothing to do" in self.stderr() 

+

605 

+

606 def test_run_from_config(self): 

+

607 options = {"run:command_line": "myprog.py a 123 'a quoted thing' xyz"} 

+

608 self.cmd_executes("run", """\ 

+

609 cov = Coverage() 

+

610 runner = PyRunner(['myprog.py', 'a', '123', 'a quoted thing', 'xyz'], as_module=False) 

+

611 runner.prepare() 

+

612 cov.start() 

+

613 runner.run() 

+

614 cov.stop() 

+

615 cov.save() 

+

616 """, 

+

617 options=options, 

+

618 ) 

+

619 

+

620 def test_run_module_from_config(self): 

+

621 self.cmd_executes("run", """\ 

+

622 cov = Coverage() 

+

623 runner = PyRunner(['mymodule', 'thing1', 'thing2'], as_module=True) 

+

624 runner.prepare() 

+

625 cov.start() 

+

626 runner.run() 

+

627 cov.stop() 

+

628 cov.save() 

+

629 """, 

+

630 options={"run:command_line": "-m mymodule thing1 thing2"}, 

+

631 ) 

+

632 

+

633 def test_run_from_config_but_empty(self): 

+

634 self.cmd_executes("run", """\ 

+

635 cov = Coverage() 

+

636 show_help('Nothing to do.') 

+

637 """, 

+

638 ret=ERR, 

+

639 options={"run:command_line": ""}, 

+

640 ) 

+

641 

+

642 def test_run_dashm_only(self): 

+

643 self.cmd_executes("run -m", """\ 

+

644 cov = Coverage() 

+

645 show_help('No module specified for -m') 

+

646 """, 

+

647 ret=ERR, 

+

648 ) 

+

649 self.cmd_executes("run -m", """\ 

+

650 cov = Coverage() 

+

651 show_help('No module specified for -m') 

+

652 """, 

+

653 ret=ERR, 

+

654 options={"run:command_line": "myprog.py"} 

+

655 ) 

+

656 

+

657 def test_cant_append_parallel(self): 

+

658 self.command_line("run --append --parallel-mode foo.py", ret=ERR) 

+

659 assert "Can't append to data files in parallel mode." in self.stderr() 

+

660 

+

661 def test_xml(self): 

+

662 # coverage xml [-i] [--omit DIR,...] [FILE1 FILE2 ...] 

+

663 self.cmd_executes("xml", """\ 

+

664 cov = Coverage() 

+

665 cov.load() 

+

666 cov.xml_report() 

+

667 """) 

+

668 self.cmd_executes("xml -i", """\ 

+

669 cov = Coverage() 

+

670 cov.load() 

+

671 cov.xml_report(ignore_errors=True) 

+

672 """) 

+

673 self.cmd_executes("xml -o myxml.foo", """\ 

+

674 cov = Coverage() 

+

675 cov.load() 

+

676 cov.xml_report(outfile="myxml.foo") 

+

677 """) 

+

678 self.cmd_executes("xml -o -", """\ 

+

679 cov = Coverage() 

+

680 cov.load() 

+

681 cov.xml_report(outfile="-") 

+

682 """) 

+

683 self.cmd_executes("xml --omit fooey", """\ 

+

684 cov = Coverage(omit=["fooey"]) 

+

685 cov.load() 

+

686 cov.xml_report(omit=["fooey"]) 

+

687 """) 

+

688 self.cmd_executes("xml --omit fooey,booey", """\ 

+

689 cov = Coverage(omit=["fooey", "booey"]) 

+

690 cov.load() 

+

691 cov.xml_report(omit=["fooey", "booey"]) 

+

692 """) 

+

693 self.cmd_executes("xml mod1", """\ 

+

694 cov = Coverage() 

+

695 cov.load() 

+

696 cov.xml_report(morfs=["mod1"]) 

+

697 """) 

+

698 self.cmd_executes("xml mod1 mod2 mod3", """\ 

+

699 cov = Coverage() 

+

700 cov.load() 

+

701 cov.xml_report(morfs=["mod1", "mod2", "mod3"]) 

+

702 """) 

+

703 

+

704 def test_json(self): 

+

705 # coverage json [-i] [--omit DIR,...] [FILE1 FILE2 ...] 

+

706 self.cmd_executes("json", """\ 

+

707 cov = Coverage() 

+

708 cov.load() 

+

709 cov.json_report() 

+

710 """) 

+

711 self.cmd_executes("json --pretty-print", """\ 

+

712 cov = Coverage() 

+

713 cov.load() 

+

714 cov.json_report(pretty_print=True) 

+

715 """) 

+

716 self.cmd_executes("json --pretty-print --show-contexts", """\ 

+

717 cov = Coverage() 

+

718 cov.load() 

+

719 cov.json_report(pretty_print=True, show_contexts=True) 

+

720 """) 

+

721 self.cmd_executes("json -i", """\ 

+

722 cov = Coverage() 

+

723 cov.load() 

+

724 cov.json_report(ignore_errors=True) 

+

725 """) 

+

726 self.cmd_executes("json -o myjson.foo", """\ 

+

727 cov = Coverage() 

+

728 cov.load() 

+

729 cov.json_report(outfile="myjson.foo") 

+

730 """) 

+

731 self.cmd_executes("json -o -", """\ 

+

732 cov = Coverage() 

+

733 cov.load() 

+

734 cov.json_report(outfile="-") 

+

735 """) 

+

736 self.cmd_executes("json --omit fooey", """\ 

+

737 cov = Coverage(omit=["fooey"]) 

+

738 cov.load() 

+

739 cov.json_report(omit=["fooey"]) 

+

740 """) 

+

741 self.cmd_executes("json --omit fooey,booey", """\ 

+

742 cov = Coverage(omit=["fooey", "booey"]) 

+

743 cov.load() 

+

744 cov.json_report(omit=["fooey", "booey"]) 

+

745 """) 

+

746 self.cmd_executes("json mod1", """\ 

+

747 cov = Coverage() 

+

748 cov.load() 

+

749 cov.json_report(morfs=["mod1"]) 

+

750 """) 

+

751 self.cmd_executes("json mod1 mod2 mod3", """\ 

+

752 cov = Coverage() 

+

753 cov.load() 

+

754 cov.json_report(morfs=["mod1", "mod2", "mod3"]) 

+

755 """) 

+

756 

+

757 def test_no_arguments_at_all(self): 

+

758 self.cmd_help("", topic="minimum_help", ret=OK) 

+

759 

+

760 def test_bad_command(self): 

+

761 self.cmd_help("xyzzy", "Unknown command: 'xyzzy'") 

+

762 

+

763 

+

764class CmdLineWithFilesTest(BaseCmdLineTest): 

+

765 """Test the command line in ways that need temp files.""" 

+

766 

+

767 run_in_temp_dir = True 

+

768 

+

769 def test_debug_data(self): 

+

770 data = CoverageData() 

+

771 data.add_lines({ 

+

772 "file1.py": dict.fromkeys(range(1, 18)), 

+

773 "file2.py": dict.fromkeys(range(1, 24)), 

+

774 }) 

+

775 data.add_file_tracers({"file1.py": "a_plugin"}) 

+

776 data.write() 

+

777 

+

778 self.command_line("debug data") 

+

779 assert self.stdout() == textwrap.dedent("""\ 

+

780 -- data ------------------------------------------------------ 

+

781 path: FILENAME 

+

782 has_arcs: False 

+

783 

+

784 2 files: 

+

785 file1.py: 17 lines [a_plugin] 

+

786 file2.py: 23 lines 

+

787 """).replace("FILENAME", data.data_filename()) 

+

788 

+

789 def test_debug_data_with_no_data(self): 

+

790 data = CoverageData() 

+

791 self.command_line("debug data") 

+

792 assert self.stdout() == textwrap.dedent("""\ 

+

793 -- data ------------------------------------------------------ 

+

794 path: FILENAME 

+

795 No data collected 

+

796 """).replace("FILENAME", data.data_filename()) 

+

797 

+

798 

+

799class CmdLineStdoutTest(BaseCmdLineTest): 

+

800 """Test the command line with real stdout output.""" 

+

801 

+

802 def test_minimum_help(self): 

+

803 self.command_line("") 

+

804 out = self.stdout() 

+

805 assert "Code coverage for Python" in out 

+

806 assert out.count("\n") < 4 

+

807 

+

808 def test_version(self): 

+

809 self.command_line("--version") 

+

810 out = self.stdout() 

+

811 assert "ersion " in out 

+

812 if env.C_TRACER: 

+

813 assert "with C extension" in out 

+

814 else: 

+

815 assert "without C extension" in out 

+

816 assert out.count("\n") < 4 

+

817 

+

818 @pytest.mark.skipif(env.JYTHON, reason="Jython gets mad if you patch sys.argv") 

+

819 def test_help_contains_command_name(self): 

+

820 # Command name should be present in help output. 

+

821 fake_command_path = "lorem/ipsum/dolor".replace("/", os.sep) 

+

822 expected_command_name = "dolor" 

+

823 fake_argv = [fake_command_path, "sit", "amet"] 

+

824 with mock.patch.object(sys, 'argv', new=fake_argv): 

+

825 self.command_line("help") 

+

826 out = self.stdout() 

+

827 assert expected_command_name in out 

+

828 

+

829 @pytest.mark.skipif(env.JYTHON, reason="Jython gets mad if you patch sys.argv") 

+

830 def test_help_contains_command_name_from_package(self): 

+

831 # Command package name should be present in help output. 

+

832 # 

+

833 # When the main module is actually a package's `__main__` module, the resulting command line 

+

834 # has the `__main__.py` file's patch as the command name. Instead, the command name should 

+

835 # be derived from the package name. 

+

836 

+

837 fake_command_path = "lorem/ipsum/dolor/__main__.py".replace("/", os.sep) 

+

838 expected_command_name = "dolor" 

+

839 fake_argv = [fake_command_path, "sit", "amet"] 

+

840 with mock.patch.object(sys, 'argv', new=fake_argv): 

+

841 self.command_line("help") 

+

842 out = self.stdout() 

+

843 assert expected_command_name in out 

+

844 

+

845 def test_help(self): 

+

846 self.command_line("help") 

+

847 lines = self.stdout().splitlines() 

+

848 assert len(lines) > 10 

+

849 assert lines[-1] == "Full documentation is at {}".format(__url__) 

+

850 

+

851 def test_cmd_help(self): 

+

852 self.command_line("help run") 

+

853 out = self.stdout() 

+

854 lines = out.splitlines() 

+

855 assert "<pyfile>" in lines[0] 

+

856 assert "--timid" in out 

+

857 assert len(lines) > 20 

+

858 assert lines[-1] == "Full documentation is at {}".format(__url__) 

+

859 

+

860 def test_unknown_topic(self): 

+

861 # Should probably be an ERR return, but meh. 

+

862 self.command_line("help foobar") 

+

863 lines = self.stdout().splitlines() 

+

864 assert lines[0] == "Don't know topic 'foobar'" 

+

865 assert lines[-1] == "Full documentation is at {}".format(__url__) 

+

866 

+

867 def test_error(self): 

+

868 self.command_line("fooey kablooey", ret=ERR) 

+

869 err = self.stderr() 

+

870 assert "fooey" in err 

+

871 assert "help" in err 

+

872 

+

873 def test_doc_url(self): 

+

874 assert __url__.startswith("https://coverage.readthedocs.io") 

+

875 

+

876 

+

877class CmdMainTest(CoverageTest): 

+

878 """Tests of coverage.cmdline.main(), using mocking for isolation.""" 

+

879 

+

880 run_in_temp_dir = False 

+

881 

+

882 class CoverageScriptStub(object): 

+

883 """A stub for coverage.cmdline.CoverageScript, used by CmdMainTest.""" 

+

884 

+

885 def command_line(self, argv): 

+

886 """Stub for command_line, the arg determines what it will do.""" 

+

887 if argv[0] == 'hello': 

+

888 print("Hello, world!") 

+

889 elif argv[0] == 'raise': 

+

890 try: 

+

891 raise Exception("oh noes!") 

+

892 except: 

+

893 raise ExceptionDuringRun(*sys.exc_info()) 

+

894 elif argv[0] == 'internalraise': 

+

895 raise ValueError("coverage is broken") 

+

896 elif argv[0] == 'exit': 

+

897 sys.exit(23) 

+

898 else: 

+

899 raise AssertionError("Bad CoverageScriptStub: %r" % (argv,)) 

+

900 return 0 

+

901 

+

902 def setup_test(self): 

+

903 super(CmdMainTest, self).setup_test() 

+

904 old_CoverageScript = coverage.cmdline.CoverageScript 

+

905 coverage.cmdline.CoverageScript = self.CoverageScriptStub 

+

906 self.addCleanup(setattr, coverage.cmdline, 'CoverageScript', old_CoverageScript) 

+

907 

+

908 def test_normal(self): 

+

909 ret = coverage.cmdline.main(['hello']) 

+

910 assert ret == 0 

+

911 assert self.stdout() == "Hello, world!\n" 

+

912 

+

913 def test_raise(self): 

+

914 ret = coverage.cmdline.main(['raise']) 

+

915 assert ret == 1 

+

916 out, err = self.stdouterr() 

+

917 assert out == "" 

+

918 err = err.split('\n') 

+

919 assert err[0] == 'Traceback (most recent call last):' 

+

920 assert err[-3] == ' raise Exception("oh noes!")' 

+

921 assert err[-2] == 'Exception: oh noes!' 

+

922 

+

923 def test_internalraise(self): 

+

924 with pytest.raises(ValueError, match="coverage is broken"): 

+

925 coverage.cmdline.main(['internalraise']) 

+

926 

+

927 def test_exit(self): 

+

928 ret = coverage.cmdline.main(['exit']) 

+

929 assert ret == 23 

+

930 

+

931 

+

932class CoverageReportingFake(object): 

+

933 """A fake Coverage.coverage test double.""" 

+

934 # pylint: disable=missing-function-docstring 

+

935 def __init__(self, report_result, html_result, xml_result, json_report): 

+

936 self.config = CoverageConfig() 

+

937 self.report_result = report_result 

+

938 self.html_result = html_result 

+

939 self.xml_result = xml_result 

+

940 self.json_result = json_report 

+

941 

+

942 def set_option(self, optname, optvalue): 

+

943 self.config.set_option(optname, optvalue) 

+

944 

+

945 def get_option(self, optname): 

+

946 return self.config.get_option(optname) 

+

947 

+

948 def load(self): 

+

949 pass 

+

950 

+

951 def report(self, *args_unused, **kwargs_unused): 

+

952 return self.report_result 

+

953 

+

954 def html_report(self, *args_unused, **kwargs_unused): 

+

955 return self.html_result 

+

956 

+

957 def xml_report(self, *args_unused, **kwargs_unused): 

+

958 return self.xml_result 

+

959 

+

960 def json_report(self, *args_unused, **kwargs_unused): 

+

961 return self.json_result 

+

962 

+

963 

+

964@pytest.mark.parametrize("results, fail_under, cmd, ret", [ 

+

965 # Command-line switch properly checks the result of reporting functions. 

+

966 ((20, 30, 40, 50), None, "report --fail-under=19", 0), 

+

967 ((20, 30, 40, 50), None, "report --fail-under=21", 2), 

+

968 ((20, 30, 40, 50), None, "html --fail-under=29", 0), 

+

969 ((20, 30, 40, 50), None, "html --fail-under=31", 2), 

+

970 ((20, 30, 40, 50), None, "xml --fail-under=39", 0), 

+

971 ((20, 30, 40, 50), None, "xml --fail-under=41", 2), 

+

972 ((20, 30, 40, 50), None, "json --fail-under=49", 0), 

+

973 ((20, 30, 40, 50), None, "json --fail-under=51", 2), 

+

974 # Configuration file setting properly checks the result of reporting. 

+

975 ((20, 30, 40, 50), 19, "report", 0), 

+

976 ((20, 30, 40, 50), 21, "report", 2), 

+

977 ((20, 30, 40, 50), 29, "html", 0), 

+

978 ((20, 30, 40, 50), 31, "html", 2), 

+

979 ((20, 30, 40, 50), 39, "xml", 0), 

+

980 ((20, 30, 40, 50), 41, "xml", 2), 

+

981 ((20, 30, 40, 50), 49, "json", 0), 

+

982 ((20, 30, 40, 50), 51, "json", 2), 

+

983 # Command-line overrides configuration. 

+

984 ((20, 30, 40, 50), 19, "report --fail-under=21", 2), 

+

985]) 

+

986def test_fail_under(results, fail_under, cmd, ret): 

+

987 cov = CoverageReportingFake(*results) 

+

988 if fail_under is not None: 

+

989 cov.set_option("report:fail_under", fail_under) 

+

990 with mock.patch("coverage.cmdline.Coverage", lambda *a,**kw: cov): 

+

991 ret_actual = command_line(cmd) 

+

992 assert ret_actual == ret 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_collector_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_collector_py.html new file mode 100644 index 000000000..c58b2ca4d --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_collector_py.html @@ -0,0 +1,116 @@ + + + + + + Coverage for tests/test_collector.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Tests of coverage/collector.py and other collectors.""" 

+

5 

+

6import os.path 

+

7 

+

8import coverage 

+

9 

+

10from tests.coveragetest import CoverageTest 

+

11from tests.helpers import CheckUniqueFilenames 

+

12 

+

13 

+

14class CollectorTest(CoverageTest): 

+

15 """Test specific aspects of the collection process.""" 

+

16 

+

17 def test_should_trace_cache(self): 

+

18 # The tracers should only invoke should_trace once for each file name. 

+

19 

+

20 # Make some files that invoke each other. 

+

21 self.make_file("f1.py", """\ 

+

22 def f1(x, f): 

+

23 return f(x) 

+

24 """) 

+

25 

+

26 self.make_file("f2.py", """\ 

+

27 import f1 

+

28 

+

29 def func(x): 

+

30 return f1.f1(x, otherfunc) 

+

31 

+

32 def otherfunc(x): 

+

33 return x*x 

+

34 

+

35 for i in range(10): 

+

36 func(i) 

+

37 """) 

+

38 

+

39 # Trace one file, but not the other. CheckUniqueFilenames will assert 

+

40 # that _should_trace hasn't been called twice for the same file. 

+

41 cov = coverage.Coverage(include=["f1.py"]) 

+

42 should_trace_hook = CheckUniqueFilenames.hook(cov, '_should_trace') 

+

43 

+

44 # Import the Python file, executing it. 

+

45 self.start_import_stop(cov, "f2") 

+

46 

+

47 # Double-check that our files were checked. 

+

48 abs_files = {os.path.abspath(f) for f in should_trace_hook.filenames} 

+

49 assert os.path.abspath("f1.py") in abs_files 

+

50 assert os.path.abspath("f2.py") in abs_files 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_concurrency_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_concurrency_py.html new file mode 100644 index 000000000..92f4e5a00 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_concurrency_py.html @@ -0,0 +1,669 @@ + + + + + + Coverage for tests/test_concurrency.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Tests for concurrency libraries.""" 

+

5 

+

6import glob 

+

7import os 

+

8import random 

+

9import re 

+

10import sys 

+

11import threading 

+

12import time 

+

13 

+

14from flaky import flaky 

+

15import pytest 

+

16 

+

17import coverage 

+

18from coverage import env 

+

19from coverage.backward import import_local_file 

+

20from coverage.data import line_counts 

+

21from coverage.files import abs_file 

+

22 

+

23from tests.coveragetest import CoverageTest 

+

24from tests.helpers import remove_files 

+

25 

+

26 

+

27# These libraries aren't always available, we'll skip tests if they aren't. 

+

28 

+

29try: 

+

30 import multiprocessing 

+

31except ImportError: # pragma: only jython 

+

32 multiprocessing = None 

+

33 

+

34try: 

+

35 import eventlet 

+

36except ImportError: 

+

37 eventlet = None 

+

38 

+

39try: 

+

40 import gevent 

+

41except ImportError: 

+

42 gevent = None 

+

43 

+

44try: 

+

45 import greenlet 

+

46except ImportError: # pragma: only jython 

+

47 greenlet = None 

+

48 

+

49 

+

50def measurable_line(l): 

+

51 """Is this a line of code coverage will measure? 

+

52 

+

53 Not blank, not a comment, and not "else" 

+

54 """ 

+

55 l = l.strip() 

+

56 if not l: 

+

57 return False 

+

58 if l.startswith('#'): 

+

59 return False 

+

60 if l.startswith('else:'): 

+

61 return False 

+

62 if env.JYTHON and l.startswith(('try:', 'except:', 'except ', 'break', 'with ')): 

+

63 # Jython doesn't measure these statements. 

+

64 return False # pragma: only jython 

+

65 return True 

+

66 

+

67 

+

68def line_count(s): 

+

69 """How many measurable lines are in `s`?""" 

+

70 return len(list(filter(measurable_line, s.splitlines()))) 

+

71 

+

72 

+

73def print_simple_annotation(code, linenos): 

+

74 """Print the lines in `code` with X for each line number in `linenos`.""" 

+

75 for lineno, line in enumerate(code.splitlines(), start=1): 

+

76 print(" {} {}".format("X" if lineno in linenos else " ", line)) 

+

77 

+

78 

+

79class LineCountTest(CoverageTest): 

+

80 """Test the helpers here.""" 

+

81 

+

82 run_in_temp_dir = False 

+

83 

+

84 def test_line_count(self): 

+

85 CODE = """ 

+

86 # Hey there! 

+

87 x = 1 

+

88 if x: 

+

89 print("hello") 

+

90 else: 

+

91 print("bye") 

+

92 

+

93 print("done") 

+

94 """ 

+

95 

+

96 assert line_count(CODE) == 5 

+

97 

+

98 

+

99# The code common to all the concurrency models. 

+

100SUM_RANGE_Q = """ 

+

101 # Above this will be imports defining queue and threading. 

+

102 

+

103 class Producer(threading.Thread): 

+

104 def __init__(self, limit, q): 

+

105 threading.Thread.__init__(self) 

+

106 self.limit = limit 

+

107 self.q = q 

+

108 

+

109 def run(self): 

+

110 for i in range(self.limit): 

+

111 self.q.put(i) 

+

112 self.q.put(None) 

+

113 

+

114 class Consumer(threading.Thread): 

+

115 def __init__(self, q, qresult): 

+

116 threading.Thread.__init__(self) 

+

117 self.q = q 

+

118 self.qresult = qresult 

+

119 

+

120 def run(self): 

+

121 sum = 0 

+

122 while "no peephole".upper(): 

+

123 i = self.q.get() 

+

124 if i is None: 

+

125 break 

+

126 sum += i 

+

127 self.qresult.put(sum) 

+

128 

+

129 def sum_range(limit): 

+

130 q = queue.Queue() 

+

131 qresult = queue.Queue() 

+

132 c = Consumer(q, qresult) 

+

133 p = Producer(limit, q) 

+

134 c.start() 

+

135 p.start() 

+

136 

+

137 p.join() 

+

138 c.join() 

+

139 return qresult.get() 

+

140 

+

141 # Below this will be something using sum_range. 

+

142 """ 

+

143 

+

144PRINT_SUM_RANGE = """ 

+

145 print(sum_range({QLIMIT})) 

+

146 """ 

+

147 

+

148# Import the things to use threads. 

+

149if env.PY2: 

+

150 THREAD = """ 

+

151 import threading 

+

152 import Queue as queue 

+

153 """ 

+

154else: 

+

155 THREAD = """ 

+

156 import threading 

+

157 import queue 

+

158 """ 

+

159 

+

160# Import the things to use eventlet. 

+

161EVENTLET = """ 

+

162 import eventlet.green.threading as threading 

+

163 import eventlet.queue as queue 

+

164 """ 

+

165 

+

166# Import the things to use gevent. 

+

167GEVENT = """ 

+

168 from gevent import monkey 

+

169 monkey.patch_thread() 

+

170 import threading 

+

171 import gevent.queue as queue 

+

172 """ 

+

173 

+

174# Uncomplicated code that doesn't use any of the concurrency stuff, to test 

+

175# the simple case under each of the regimes. 

+

176SIMPLE = """ 

+

177 total = 0 

+

178 for i in range({QLIMIT}): 

+

179 total += i 

+

180 print(total) 

+

181 """ 

+

182 

+

183 

+

184def cant_trace_msg(concurrency, the_module): 

+

185 """What might coverage.py say about a concurrency setting and imported module?""" 

+

186 # In the concurrency choices, "multiprocessing" doesn't count, so remove it. 

+

187 if "multiprocessing" in concurrency: 

+

188 parts = concurrency.split(",") 

+

189 parts.remove("multiprocessing") 

+

190 concurrency = ",".join(parts) 

+

191 

+

192 if the_module is None: 

+

193 # We don't even have the underlying module installed, we expect 

+

194 # coverage to alert us to this fact. 

+

195 expected_out = ( 

+

196 "Couldn't trace with concurrency=%s, " 

+

197 "the module isn't installed.\n" % concurrency 

+

198 ) 

+

199 elif env.C_TRACER or concurrency == "thread" or concurrency == "": 

+

200 expected_out = None 

+

201 else: 

+

202 expected_out = ( 

+

203 "Can't support concurrency=%s with PyTracer, " 

+

204 "only threads are supported\n" % concurrency 

+

205 ) 

+

206 return expected_out 

+

207 

+

208 

+

209class ConcurrencyTest(CoverageTest): 

+

210 """Tests of the concurrency support in coverage.py.""" 

+

211 

+

212 QLIMIT = 1000 

+

213 

+

214 def try_some_code(self, code, concurrency, the_module, expected_out=None): 

+

215 """Run some concurrency testing code and see that it was all covered. 

+

216 

+

217 `code` is the Python code to execute. `concurrency` is the name of 

+

218 the concurrency regime to test it under. `the_module` is the imported 

+

219 module that must be available for this to work at all. `expected_out` 

+

220 is the text we expect the code to produce. 

+

221 

+

222 """ 

+

223 

+

224 self.make_file("try_it.py", code) 

+

225 

+

226 cmd = "coverage run --concurrency=%s try_it.py" % concurrency 

+

227 out = self.run_command(cmd) 

+

228 

+

229 expected_cant_trace = cant_trace_msg(concurrency, the_module) 

+

230 

+

231 if expected_cant_trace is not None: 

+

232 assert out == expected_cant_trace 

+

233 else: 

+

234 # We can fully measure the code if we are using the C tracer, which 

+

235 # can support all the concurrency, or if we are using threads. 

+

236 if expected_out is None: 

+

237 expected_out = "%d\n" % (sum(range(self.QLIMIT))) 

+

238 print(code) 

+

239 assert out == expected_out 

+

240 

+

241 # Read the coverage file and see that try_it.py has all its lines 

+

242 # executed. 

+

243 data = coverage.CoverageData(".coverage") 

+

244 data.read() 

+

245 

+

246 # If the test fails, it's helpful to see this info: 

+

247 fname = abs_file("try_it.py") 

+

248 linenos = data.lines(fname) 

+

249 print("{}: {}".format(len(linenos), linenos)) 

+

250 print_simple_annotation(code, linenos) 

+

251 

+

252 lines = line_count(code) 

+

253 assert line_counts(data)['try_it.py'] == lines 

+

254 

+

255 def test_threads(self): 

+

256 code = (THREAD + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT) 

+

257 self.try_some_code(code, "thread", threading) 

+

258 

+

259 def test_threads_simple_code(self): 

+

260 code = SIMPLE.format(QLIMIT=self.QLIMIT) 

+

261 self.try_some_code(code, "thread", threading) 

+

262 

+

263 def test_eventlet(self): 

+

264 code = (EVENTLET + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT) 

+

265 self.try_some_code(code, "eventlet", eventlet) 

+

266 

+

267 def test_eventlet_simple_code(self): 

+

268 code = SIMPLE.format(QLIMIT=self.QLIMIT) 

+

269 self.try_some_code(code, "eventlet", eventlet) 

+

270 

+

271 def test_gevent(self): 

+

272 code = (GEVENT + SUM_RANGE_Q + PRINT_SUM_RANGE).format(QLIMIT=self.QLIMIT) 

+

273 self.try_some_code(code, "gevent", gevent) 

+

274 

+

275 def test_gevent_simple_code(self): 

+

276 code = SIMPLE.format(QLIMIT=self.QLIMIT) 

+

277 self.try_some_code(code, "gevent", gevent) 

+

278 

+

279 def test_greenlet(self): 

+

280 GREENLET = """\ 

+

281 from greenlet import greenlet 

+

282 

+

283 def test1(x, y): 

+

284 z = gr2.switch(x+y) 

+

285 print(z) 

+

286 

+

287 def test2(u): 

+

288 print(u) 

+

289 gr1.switch(42) 

+

290 

+

291 gr1 = greenlet(test1) 

+

292 gr2 = greenlet(test2) 

+

293 gr1.switch("hello", " world") 

+

294 """ 

+

295 self.try_some_code(GREENLET, "greenlet", greenlet, "hello world\n42\n") 

+

296 

+

297 def test_greenlet_simple_code(self): 

+

298 code = SIMPLE.format(QLIMIT=self.QLIMIT) 

+

299 self.try_some_code(code, "greenlet", greenlet) 

+

300 

+

301 def test_bug_330(self): 

+

302 BUG_330 = """\ 

+

303 from weakref import WeakKeyDictionary 

+

304 import eventlet 

+

305 

+

306 def do(): 

+

307 eventlet.sleep(.01) 

+

308 

+

309 gts = WeakKeyDictionary() 

+

310 for _ in range(100): 

+

311 gts[eventlet.spawn(do)] = True 

+

312 eventlet.sleep(.005) 

+

313 

+

314 eventlet.sleep(.1) 

+

315 print(len(gts)) 

+

316 """ 

+

317 self.try_some_code(BUG_330, "eventlet", eventlet, "0\n") 

+

318 

+

319 

+

320SQUARE_OR_CUBE_WORK = """ 

+

321 def work(x): 

+

322 # Use different lines in different subprocesses. 

+

323 if x % 2: 

+

324 y = x*x 

+

325 else: 

+

326 y = x*x*x 

+

327 return y 

+

328 """ 

+

329 

+

330SUM_RANGE_WORK = """ 

+

331 def work(x): 

+

332 return sum_range((x+1)*100) 

+

333 """ 

+

334 

+

335MULTI_CODE = """ 

+

336 # Above this will be a definition of work(). 

+

337 import multiprocessing 

+

338 import os 

+

339 import time 

+

340 import sys 

+

341 

+

342 def process_worker_main(args): 

+

343 # Need to pause, or the tasks go too quickly, and some processes 

+

344 # in the pool don't get any work, and then don't record data. 

+

345 time.sleep(0.02) 

+

346 ret = work(*args) 

+

347 return os.getpid(), ret 

+

348 

+

349 if __name__ == "__main__": # pragma: no branch 

+

350 # This if is on a single line so we can get 100% coverage 

+

351 # even if we have no arguments. 

+

352 if len(sys.argv) > 1: multiprocessing.set_start_method(sys.argv[1]) 

+

353 pool = multiprocessing.Pool({NPROCS}) 

+

354 inputs = [(x,) for x in range({UPTO})] 

+

355 outputs = pool.imap_unordered(process_worker_main, inputs) 

+

356 pids = set() 

+

357 total = 0 

+

358 for pid, sq in outputs: 

+

359 pids.add(pid) 

+

360 total += sq 

+

361 print("%d pids, total = %d" % (len(pids), total)) 

+

362 pool.close() 

+

363 pool.join() 

+

364 """ 

+

365 

+

366 

+

367@pytest.mark.skipif(not multiprocessing, reason="No multiprocessing in this Python") 

+

368@flaky(max_runs=30) # Sometimes a test fails due to inherent randomness. Try more times. 

+

369class MultiprocessingTest(CoverageTest): 

+

370 """Test support of the multiprocessing module.""" 

+

371 

+

372 def try_multiprocessing_code( 

+

373 self, code, expected_out, the_module, nprocs, concurrency="multiprocessing", args="" 

+

374 ): 

+

375 """Run code using multiprocessing, it should produce `expected_out`.""" 

+

376 self.make_file("multi.py", code) 

+

377 self.make_file(".coveragerc", """\ 

+

378 [run] 

+

379 concurrency = %s 

+

380 source = . 

+

381 """ % concurrency) 

+

382 

+

383 if env.PYVERSION >= (3, 4): 

+

384 start_methods = ['fork', 'spawn'] 

+

385 else: 

+

386 start_methods = [''] 

+

387 

+

388 for start_method in start_methods: 

+

389 if start_method and start_method not in multiprocessing.get_all_start_methods(): 

+

390 continue 

+

391 

+

392 remove_files(".coverage", ".coverage.*") 

+

393 cmd = "coverage run {args} multi.py {start_method}".format( 

+

394 args=args, start_method=start_method, 

+

395 ) 

+

396 out = self.run_command(cmd) 

+

397 expected_cant_trace = cant_trace_msg(concurrency, the_module) 

+

398 

+

399 if expected_cant_trace is not None: 

+

400 assert out == expected_cant_trace 

+

401 else: 

+

402 assert out.rstrip() == expected_out 

+

403 assert len(glob.glob(".coverage.*")) == nprocs + 1 

+

404 

+

405 out = self.run_command("coverage combine") 

+

406 assert out == "" 

+

407 out = self.run_command("coverage report -m") 

+

408 

+

409 last_line = self.squeezed_lines(out)[-1] 

+

410 assert re.search(r"TOTAL \d+ 0 100%", last_line) 

+

411 

+

412 def test_multiprocessing_simple(self): 

+

413 nprocs = 3 

+

414 upto = 30 

+

415 code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs, UPTO=upto) 

+

416 total = sum(x*x if x%2 else x*x*x for x in range(upto)) 

+

417 expected_out = "{nprocs} pids, total = {total}".format(nprocs=nprocs, total=total) 

+

418 self.try_multiprocessing_code(code, expected_out, threading, nprocs) 

+

419 

+

420 def test_multiprocessing_append(self): 

+

421 nprocs = 3 

+

422 upto = 30 

+

423 code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs, UPTO=upto) 

+

424 total = sum(x*x if x%2 else x*x*x for x in range(upto)) 

+

425 expected_out = "{nprocs} pids, total = {total}".format(nprocs=nprocs, total=total) 

+

426 self.try_multiprocessing_code(code, expected_out, threading, nprocs, args="--append") 

+

427 

+

428 def test_multiprocessing_and_gevent(self): 

+

429 nprocs = 3 

+

430 upto = 30 

+

431 code = ( 

+

432 SUM_RANGE_WORK + EVENTLET + SUM_RANGE_Q + MULTI_CODE 

+

433 ).format(NPROCS=nprocs, UPTO=upto) 

+

434 total = sum(sum(range((x + 1) * 100)) for x in range(upto)) 

+

435 expected_out = "{nprocs} pids, total = {total}".format(nprocs=nprocs, total=total) 

+

436 self.try_multiprocessing_code( 

+

437 code, expected_out, eventlet, nprocs, concurrency="multiprocessing,eventlet" 

+

438 ) 

+

439 

+

440 def try_multiprocessing_code_with_branching(self, code, expected_out): 

+

441 """Run code using multiprocessing, it should produce `expected_out`.""" 

+

442 self.make_file("multi.py", code) 

+

443 self.make_file("multi.rc", """\ 

+

444 [run] 

+

445 concurrency = multiprocessing 

+

446 branch = True 

+

447 omit = */site-packages/* 

+

448 """) 

+

449 

+

450 if env.PYVERSION >= (3, 4): 

+

451 start_methods = ['fork', 'spawn'] 

+

452 else: 

+

453 start_methods = [''] 

+

454 

+

455 for start_method in start_methods: 

+

456 if start_method and start_method not in multiprocessing.get_all_start_methods(): 

+

457 continue 

+

458 

+

459 out = self.run_command("coverage run --rcfile=multi.rc multi.py %s" % (start_method,)) 

+

460 assert out.rstrip() == expected_out 

+

461 

+

462 out = self.run_command("coverage combine") 

+

463 assert out == "" 

+

464 out = self.run_command("coverage report -m") 

+

465 

+

466 last_line = self.squeezed_lines(out)[-1] 

+

467 assert re.search(r"TOTAL \d+ 0 \d+ 0 100%", last_line) 

+

468 

+

469 def test_multiprocessing_with_branching(self): 

+

470 nprocs = 3 

+

471 upto = 30 

+

472 code = (SQUARE_OR_CUBE_WORK + MULTI_CODE).format(NPROCS=nprocs, UPTO=upto) 

+

473 total = sum(x*x if x%2 else x*x*x for x in range(upto)) 

+

474 expected_out = "{nprocs} pids, total = {total}".format(nprocs=nprocs, total=total) 

+

475 self.try_multiprocessing_code_with_branching(code, expected_out) 

+

476 

+

477 def test_multiprocessing_bootstrap_error_handling(self): 

+

478 # An exception during bootstrapping will be reported. 

+

479 self.make_file("multi.py", """\ 

+

480 import multiprocessing 

+

481 if __name__ == "__main__": 

+

482 with multiprocessing.Manager(): 

+

483 pass 

+

484 """) 

+

485 self.make_file(".coveragerc", """\ 

+

486 [run] 

+

487 concurrency = multiprocessing 

+

488 _crash = _bootstrap 

+

489 """) 

+

490 out = self.run_command("coverage run multi.py") 

+

491 assert "Exception during multiprocessing bootstrap init" in out 

+

492 assert "Exception: Crashing because called by _bootstrap" in out 

+

493 

+

494 def test_bug890(self): 

+

495 # chdir in multiprocessing shouldn't keep us from finding the 

+

496 # .coveragerc file. 

+

497 self.make_file("multi.py", """\ 

+

498 import multiprocessing, os, os.path 

+

499 if __name__ == "__main__": 

+

500 if not os.path.exists("./tmp"): os.mkdir("./tmp") 

+

501 os.chdir("./tmp") 

+

502 with multiprocessing.Manager(): 

+

503 pass 

+

504 print("ok") 

+

505 """) 

+

506 self.make_file(".coveragerc", """\ 

+

507 [run] 

+

508 concurrency = multiprocessing 

+

509 """) 

+

510 out = self.run_command("coverage run multi.py") 

+

511 assert out.splitlines()[-1] == "ok" 

+

512 

+

513 

+

514def test_coverage_stop_in_threads(): 

+

515 has_started_coverage = [] 

+

516 has_stopped_coverage = [] 

+

517 

+

518 def run_thread(): # pragma: nested 

+

519 """Check that coverage is stopping properly in threads.""" 

+

520 deadline = time.time() + 5 

+

521 ident = threading.currentThread().ident 

+

522 if sys.gettrace() is not None: 

+

523 has_started_coverage.append(ident) 

+

524 while sys.gettrace() is not None: 

+

525 # Wait for coverage to stop 

+

526 time.sleep(0.01) 

+

527 if time.time() > deadline: 

+

528 return 

+

529 has_stopped_coverage.append(ident) 

+

530 

+

531 cov = coverage.Coverage() 

+

532 cov.start() 

+

533 

+

534 t = threading.Thread(target=run_thread) # pragma: nested 

+

535 t.start() # pragma: nested 

+

536 

+

537 time.sleep(0.1) # pragma: nested 

+

538 cov.stop() # pragma: nested 

+

539 t.join() 

+

540 

+

541 assert has_started_coverage == [t.ident] 

+

542 assert has_stopped_coverage == [t.ident] 

+

543 

+

544 

+

545def test_thread_safe_save_data(tmpdir): 

+

546 # Non-regression test for: https://github.com/nedbat/coveragepy/issues/581 

+

547 

+

548 # Create some Python modules and put them in the path 

+

549 modules_dir = tmpdir.mkdir('test_modules') 

+

550 module_names = ["m{:03d}".format(i) for i in range(1000)] 

+

551 for module_name in module_names: 

+

552 modules_dir.join(module_name + ".py").write("def f(): pass\n") 

+

553 

+

554 # Shared variables for threads 

+

555 should_run = [True] 

+

556 imported = [] 

+

557 

+

558 old_dir = os.getcwd() 

+

559 os.chdir(modules_dir.strpath) 

+

560 try: 

+

561 # Make sure that all dummy modules can be imported. 

+

562 for module_name in module_names: 

+

563 import_local_file(module_name) 

+

564 

+

565 def random_load(): # pragma: nested 

+

566 """Import modules randomly to stress coverage.""" 

+

567 while should_run[0]: 

+

568 module_name = random.choice(module_names) 

+

569 mod = import_local_file(module_name) 

+

570 mod.f() 

+

571 imported.append(mod) 

+

572 

+

573 # Spawn some threads with coverage enabled and attempt to read the 

+

574 # results right after stopping coverage collection with the threads 

+

575 # still running. 

+

576 duration = 0.01 

+

577 for _ in range(3): 

+

578 cov = coverage.Coverage() 

+

579 cov.start() 

+

580 

+

581 threads = [threading.Thread(target=random_load) for _ in range(10)] # pragma: nested 

+

582 should_run[0] = True # pragma: nested 

+

583 for t in threads: # pragma: nested 

+

584 t.start() 

+

585 

+

586 time.sleep(duration) # pragma: nested 

+

587 

+

588 cov.stop() # pragma: nested 

+

589 

+

590 # The following call used to crash with running background threads. 

+

591 cov.get_data() 

+

592 

+

593 # Stop the threads 

+

594 should_run[0] = False 

+

595 for t in threads: 

+

596 t.join() 

+

597 

+

598 if (not imported) and duration < 10: # pragma: only failure 

+

599 duration *= 2 

+

600 

+

601 finally: 

+

602 os.chdir(old_dir) 

+

603 should_run[0] = False 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_config_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_config_py.html new file mode 100644 index 000000000..07e5fec62 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_config_py.html @@ -0,0 +1,822 @@ + + + + + + Coverage for tests/test_config.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# coding: utf-8 

+

2# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

3# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

4 

+

5"""Test the config file handling for coverage.py""" 

+

6 

+

7from collections import OrderedDict 

+

8 

+

9import mock 

+

10import pytest 

+

11 

+

12import coverage 

+

13from coverage.config import HandyConfigParser 

+

14from coverage.misc import CoverageException 

+

15 

+

16from tests.coveragetest import CoverageTest, UsingModulesMixin 

+

17from tests.helpers import without_module 

+

18 

+

19 

+

20class ConfigTest(CoverageTest): 

+

21 """Tests of the different sources of configuration settings.""" 

+

22 

+

23 def test_default_config(self): 

+

24 # Just constructing a coverage() object gets the right defaults. 

+

25 cov = coverage.Coverage() 

+

26 assert not cov.config.timid 

+

27 assert not cov.config.branch 

+

28 assert cov.config.data_file == ".coverage" 

+

29 

+

30 def test_arguments(self): 

+

31 # Arguments to the constructor are applied to the configuration. 

+

32 cov = coverage.Coverage(timid=True, data_file="fooey.dat", concurrency="multiprocessing") 

+

33 assert cov.config.timid 

+

34 assert not cov.config.branch 

+

35 assert cov.config.data_file == "fooey.dat" 

+

36 assert cov.config.concurrency == ["multiprocessing"] 

+

37 

+

38 def test_config_file(self): 

+

39 # A .coveragerc file will be read into the configuration. 

+

40 self.make_file(".coveragerc", """\ 

+

41 # This is just a bogus .rc file for testing. 

+

42 [run] 

+

43 timid = True 

+

44 data_file = .hello_kitty.data 

+

45 """) 

+

46 cov = coverage.Coverage() 

+

47 assert cov.config.timid 

+

48 assert not cov.config.branch 

+

49 assert cov.config.data_file == ".hello_kitty.data" 

+

50 

+

51 def test_named_config_file(self): 

+

52 # You can name the config file what you like. 

+

53 self.make_file("my_cov.ini", """\ 

+

54 [run] 

+

55 timid = True 

+

56 ; I wouldn't really use this as a data file... 

+

57 data_file = delete.me 

+

58 """) 

+

59 cov = coverage.Coverage(config_file="my_cov.ini") 

+

60 assert cov.config.timid 

+

61 assert not cov.config.branch 

+

62 assert cov.config.data_file == "delete.me" 

+

63 

+

64 def test_toml_config_file(self): 

+

65 # A .coveragerc file will be read into the configuration. 

+

66 self.make_file("pyproject.toml", """\ 

+

67 # This is just a bogus toml file for testing. 

+

68 [tool.somethingelse] 

+

69 authors = ["Joe D'Ávila <joe@gmail.com>"] 

+

70 [tool.coverage.run] 

+

71 concurrency = ["a", "b"] 

+

72 timid = true 

+

73 data_file = ".hello_kitty.data" 

+

74 plugins = ["plugins.a_plugin"] 

+

75 [tool.coverage.report] 

+

76 precision = 3 

+

77 fail_under = 90.5 

+

78 [tool.coverage.html] 

+

79 title = "tabblo & «ταБЬℓσ»" 

+

80 [tool.coverage.plugins.a_plugin] 

+

81 hello = "world" 

+

82 """) 

+

83 cov = coverage.Coverage(config_file="pyproject.toml") 

+

84 assert cov.config.timid 

+

85 assert not cov.config.branch 

+

86 assert cov.config.concurrency == [u"a", u"b"] 

+

87 assert cov.config.data_file == u".hello_kitty.data" 

+

88 assert cov.config.plugins == [u"plugins.a_plugin"] 

+

89 assert cov.config.precision == 3 

+

90 assert cov.config.html_title == u"tabblo & «ταБЬℓσ»" 

+

91 assert round(abs(cov.config.fail_under-90.5), 7) == 0 

+

92 assert cov.config.get_plugin_options("plugins.a_plugin") == {u"hello": u"world"} 

+

93 

+

94 # Test that our class doesn't reject integers when loading floats 

+

95 self.make_file("pyproject.toml", """\ 

+

96 # This is just a bogus toml file for testing. 

+

97 [tool.coverage.report] 

+

98 fail_under = 90 

+

99 """) 

+

100 cov = coverage.Coverage(config_file="pyproject.toml") 

+

101 assert round(abs(cov.config.fail_under-90), 7) == 0 

+

102 assert isinstance(cov.config.fail_under, float) 

+

103 

+

104 def test_ignored_config_file(self): 

+

105 # You can disable reading the .coveragerc file. 

+

106 self.make_file(".coveragerc", """\ 

+

107 [run] 

+

108 timid = True 

+

109 data_file = delete.me 

+

110 """) 

+

111 cov = coverage.Coverage(config_file=False) 

+

112 assert not cov.config.timid 

+

113 assert not cov.config.branch 

+

114 assert cov.config.data_file == ".coverage" 

+

115 

+

116 def test_config_file_then_args(self): 

+

117 # The arguments override the .coveragerc file. 

+

118 self.make_file(".coveragerc", """\ 

+

119 [run] 

+

120 timid = True 

+

121 data_file = weirdo.file 

+

122 """) 

+

123 cov = coverage.Coverage(timid=False, data_file=".mycov") 

+

124 assert not cov.config.timid 

+

125 assert not cov.config.branch 

+

126 assert cov.config.data_file == ".mycov" 

+

127 

+

128 def test_data_file_from_environment(self): 

+

129 # There's an environment variable for the data_file. 

+

130 self.make_file(".coveragerc", """\ 

+

131 [run] 

+

132 timid = True 

+

133 data_file = weirdo.file 

+

134 """) 

+

135 self.set_environ("COVERAGE_FILE", "fromenv.dat") 

+

136 cov = coverage.Coverage() 

+

137 assert cov.config.data_file == "fromenv.dat" 

+

138 # But the constructor arguments override the environment variable. 

+

139 cov = coverage.Coverage(data_file="fromarg.dat") 

+

140 assert cov.config.data_file == "fromarg.dat" 

+

141 

+

142 def test_debug_from_environment(self): 

+

143 self.make_file(".coveragerc", """\ 

+

144 [run] 

+

145 debug = dataio, pids 

+

146 """) 

+

147 self.set_environ("COVERAGE_DEBUG", "callers, fooey") 

+

148 cov = coverage.Coverage() 

+

149 assert cov.config.debug == ["dataio", "pids", "callers", "fooey"] 

+

150 

+

151 def test_rcfile_from_environment(self): 

+

152 self.make_file("here.ini", """\ 

+

153 [run] 

+

154 data_file = overthere.dat 

+

155 """) 

+

156 self.set_environ("COVERAGE_RCFILE", "here.ini") 

+

157 cov = coverage.Coverage() 

+

158 assert cov.config.data_file == "overthere.dat" 

+

159 

+

160 def test_missing_rcfile_from_environment(self): 

+

161 self.set_environ("COVERAGE_RCFILE", "nowhere.ini") 

+

162 msg = "Couldn't read 'nowhere.ini' as a config file" 

+

163 with pytest.raises(CoverageException, match=msg): 

+

164 coverage.Coverage() 

+

165 

+

166 def test_parse_errors(self): 

+

167 # Im-parsable values raise CoverageException, with details. 

+

168 bad_configs_and_msgs = [ 

+

169 ("[run]\ntimid = maybe?\n", r"maybe[?]"), 

+

170 ("timid = 1\n", r"no section headers"), 

+

171 ("[run\n", r"\[run"), 

+

172 ("[report]\nexclude_lines = foo(\n", 

+

173 r"Invalid \[report\].exclude_lines value 'foo\(': " 

+

174 r"(unbalanced parenthesis|missing \))"), 

+

175 ("[report]\npartial_branches = foo[\n", 

+

176 r"Invalid \[report\].partial_branches value 'foo\[': " 

+

177 r"(unexpected end of regular expression|unterminated character set)"), 

+

178 ("[report]\npartial_branches_always = foo***\n", 

+

179 r"Invalid \[report\].partial_branches_always value " 

+

180 r"'foo\*\*\*': " 

+

181 r"multiple repeat"), 

+

182 ] 

+

183 

+

184 for bad_config, msg in bad_configs_and_msgs: 

+

185 print("Trying %r" % bad_config) 

+

186 self.make_file(".coveragerc", bad_config) 

+

187 with pytest.raises(CoverageException, match=msg): 

+

188 coverage.Coverage() 

+

189 

+

190 def test_toml_parse_errors(self): 

+

191 # Im-parsable values raise CoverageException, with details. 

+

192 bad_configs_and_msgs = [ 

+

193 ("[tool.coverage.run]\ntimid = \"maybe?\"\n", r"maybe[?]"), 

+

194 ("[tool.coverage.run\n", r"Key group"), 

+

195 ('[tool.coverage.report]\nexclude_lines = ["foo("]\n', 

+

196 r"Invalid \[tool.coverage.report\].exclude_lines value u?'foo\(': " 

+

197 r"(unbalanced parenthesis|missing \))"), 

+

198 ('[tool.coverage.report]\npartial_branches = ["foo["]\n', 

+

199 r"Invalid \[tool.coverage.report\].partial_branches value u?'foo\[': " 

+

200 r"(unexpected end of regular expression|unterminated character set)"), 

+

201 ('[tool.coverage.report]\npartial_branches_always = ["foo***"]\n', 

+

202 r"Invalid \[tool.coverage.report\].partial_branches_always value " 

+

203 r"u?'foo\*\*\*': " 

+

204 r"multiple repeat"), 

+

205 ('[tool.coverage.run]\nconcurrency="foo"', "not a list"), 

+

206 ("[tool.coverage.report]\nprecision=1.23", "not an integer"), 

+

207 ('[tool.coverage.report]\nfail_under="s"', "not a float"), 

+

208 ] 

+

209 

+

210 for bad_config, msg in bad_configs_and_msgs: 

+

211 print("Trying %r" % bad_config) 

+

212 self.make_file("pyproject.toml", bad_config) 

+

213 with pytest.raises(CoverageException, match=msg): 

+

214 coverage.Coverage() 

+

215 

+

216 def test_environment_vars_in_config(self): 

+

217 # Config files can have $envvars in them. 

+

218 self.make_file(".coveragerc", """\ 

+

219 [run] 

+

220 data_file = $DATA_FILE.fooey 

+

221 branch = $OKAY 

+

222 [report] 

+

223 exclude_lines = 

+

224 the_$$one 

+

225 another${THING} 

+

226 x${THING}y 

+

227 x${NOTHING}y 

+

228 huh$${X}what 

+

229 """) 

+

230 self.set_environ("DATA_FILE", "hello-world") 

+

231 self.set_environ("THING", "ZZZ") 

+

232 self.set_environ("OKAY", "yes") 

+

233 cov = coverage.Coverage() 

+

234 assert cov.config.data_file == "hello-world.fooey" 

+

235 assert cov.config.branch is True 

+

236 assert cov.config.exclude_list == ["the_$one", "anotherZZZ", "xZZZy", "xy", "huh${X}what"] 

+

237 

+

238 def test_environment_vars_in_toml_config(self): 

+

239 # Config files can have $envvars in them. 

+

240 self.make_file("pyproject.toml", """\ 

+

241 [tool.coverage.run] 

+

242 data_file = "$DATA_FILE.fooey" 

+

243 branch = $BRANCH 

+

244 [tool.coverage.report] 

+

245 exclude_lines = [ 

+

246 "the_$$one", 

+

247 "another${THING}", 

+

248 "x${THING}y", 

+

249 "x${NOTHING}y", 

+

250 "huh$${X}what", 

+

251 ] 

+

252 """) 

+

253 self.set_environ("BRANCH", "true") 

+

254 self.set_environ("DATA_FILE", "hello-world") 

+

255 self.set_environ("THING", "ZZZ") 

+

256 cov = coverage.Coverage() 

+

257 assert cov.config.data_file == "hello-world.fooey" 

+

258 assert cov.config.branch is True 

+

259 assert cov.config.exclude_list == ["the_$one", "anotherZZZ", "xZZZy", "xy", "huh${X}what"] 

+

260 

+

261 def test_tilde_in_config(self): 

+

262 # Config entries that are file paths can be tilde-expanded. 

+

263 self.make_file(".coveragerc", """\ 

+

264 [run] 

+

265 data_file = ~/data.file 

+

266 

+

267 [html] 

+

268 directory = ~joe/html_dir 

+

269 

+

270 [xml] 

+

271 output = ~/somewhere/xml.out 

+

272 

+

273 [report] 

+

274 # Strings that aren't file paths are not tilde-expanded. 

+

275 exclude_lines = 

+

276 ~/data.file 

+

277 ~joe/html_dir 

+

278 

+

279 [paths] 

+

280 mapping = 

+

281 ~/src 

+

282 ~joe/source 

+

283 """) 

+

284 def expanduser(s): 

+

285 """Fake tilde expansion""" 

+

286 s = s.replace("~/", "/Users/me/") 

+

287 s = s.replace("~joe/", "/Users/joe/") 

+

288 return s 

+

289 

+

290 with mock.patch.object(coverage.config.os.path, 'expanduser', new=expanduser): 

+

291 cov = coverage.Coverage() 

+

292 assert cov.config.data_file == "/Users/me/data.file" 

+

293 assert cov.config.html_dir == "/Users/joe/html_dir" 

+

294 assert cov.config.xml_output == "/Users/me/somewhere/xml.out" 

+

295 assert cov.config.exclude_list == ["~/data.file", "~joe/html_dir"] 

+

296 assert cov.config.paths == {'mapping': ['/Users/me/src', '/Users/joe/source']} 

+

297 

+

298 def test_tilde_in_toml_config(self): 

+

299 # Config entries that are file paths can be tilde-expanded. 

+

300 self.make_file("pyproject.toml", """\ 

+

301 [tool.coverage.run] 

+

302 data_file = "~/data.file" 

+

303 

+

304 [tool.coverage.html] 

+

305 directory = "~joe/html_dir" 

+

306 

+

307 [tool.coverage.xml] 

+

308 output = "~/somewhere/xml.out" 

+

309 

+

310 [tool.coverage.report] 

+

311 # Strings that aren't file paths are not tilde-expanded. 

+

312 exclude_lines = [ 

+

313 "~/data.file", 

+

314 "~joe/html_dir", 

+

315 ] 

+

316 """) 

+

317 def expanduser(s): 

+

318 """Fake tilde expansion""" 

+

319 s = s.replace("~/", "/Users/me/") 

+

320 s = s.replace("~joe/", "/Users/joe/") 

+

321 return s 

+

322 

+

323 with mock.patch.object(coverage.config.os.path, 'expanduser', new=expanduser): 

+

324 cov = coverage.Coverage() 

+

325 assert cov.config.data_file == "/Users/me/data.file" 

+

326 assert cov.config.html_dir == "/Users/joe/html_dir" 

+

327 assert cov.config.xml_output == "/Users/me/somewhere/xml.out" 

+

328 assert cov.config.exclude_list == ["~/data.file", "~joe/html_dir"] 

+

329 

+

330 def test_tweaks_after_constructor(self): 

+

331 # set_option can be used after construction to affect the config. 

+

332 cov = coverage.Coverage(timid=True, data_file="fooey.dat") 

+

333 cov.set_option("run:timid", False) 

+

334 

+

335 assert not cov.config.timid 

+

336 assert not cov.config.branch 

+

337 assert cov.config.data_file == "fooey.dat" 

+

338 

+

339 assert not cov.get_option("run:timid") 

+

340 assert not cov.get_option("run:branch") 

+

341 assert cov.get_option("run:data_file") == "fooey.dat" 

+

342 

+

343 def test_tweaks_paths_after_constructor(self): 

+

344 self.make_file(".coveragerc", """\ 

+

345 [paths] 

+

346 first = 

+

347 /first/1 

+

348 /first/2 

+

349 

+

350 second = 

+

351 /second/a 

+

352 /second/b 

+

353 """) 

+

354 old_paths = OrderedDict() 

+

355 old_paths["first"] = ["/first/1", "/first/2"] 

+

356 old_paths["second"] = ["/second/a", "/second/b"] 

+

357 cov = coverage.Coverage() 

+

358 paths = cov.get_option("paths") 

+

359 assert paths == old_paths 

+

360 

+

361 new_paths = OrderedDict() 

+

362 new_paths['magic'] = ['src', 'ok'] 

+

363 cov.set_option("paths", new_paths) 

+

364 

+

365 assert cov.get_option("paths") == new_paths 

+

366 

+

367 def test_tweak_error_checking(self): 

+

368 # Trying to set an unknown config value raises an error. 

+

369 cov = coverage.Coverage() 

+

370 with pytest.raises(CoverageException, match="No such option: 'run:xyzzy'"): 

+

371 cov.set_option("run:xyzzy", 12) 

+

372 with pytest.raises(CoverageException, match="No such option: 'xyzzy:foo'"): 

+

373 cov.set_option("xyzzy:foo", 12) 

+

374 with pytest.raises(CoverageException, match="No such option: 'run:xyzzy'"): 

+

375 _ = cov.get_option("run:xyzzy") 

+

376 with pytest.raises(CoverageException, match="No such option: 'xyzzy:foo'"): 

+

377 _ = cov.get_option("xyzzy:foo") 

+

378 

+

379 def test_tweak_plugin_options(self): 

+

380 # Plugin options have a more flexible syntax. 

+

381 cov = coverage.Coverage() 

+

382 cov.set_option("run:plugins", ["fooey.plugin", "xyzzy.coverage.plugin"]) 

+

383 cov.set_option("fooey.plugin:xyzzy", 17) 

+

384 cov.set_option("xyzzy.coverage.plugin:plugh", ["a", "b"]) 

+

385 with pytest.raises(CoverageException, match="No such option: 'no_such.plugin:foo'"): 

+

386 cov.set_option("no_such.plugin:foo", 23) 

+

387 

+

388 assert cov.get_option("fooey.plugin:xyzzy") == 17 

+

389 assert cov.get_option("xyzzy.coverage.plugin:plugh") == ["a", "b"] 

+

390 with pytest.raises(CoverageException, match="No such option: 'no_such.plugin:foo'"): 

+

391 _ = cov.get_option("no_such.plugin:foo") 

+

392 

+

393 def test_unknown_option(self): 

+

394 self.make_file(".coveragerc", """\ 

+

395 [run] 

+

396 xyzzy = 17 

+

397 """) 

+

398 msg = r"Unrecognized option '\[run\] xyzzy=' in config file .coveragerc" 

+

399 with pytest.raises(CoverageException, match=msg): 

+

400 _ = coverage.Coverage() 

+

401 

+

402 def test_unknown_option_toml(self): 

+

403 self.make_file("pyproject.toml", """\ 

+

404 [tool.coverage.run] 

+

405 xyzzy = 17 

+

406 """) 

+

407 msg = r"Unrecognized option '\[tool.coverage.run\] xyzzy=' in config file pyproject.toml" 

+

408 with pytest.raises(CoverageException, match=msg): 

+

409 _ = coverage.Coverage() 

+

410 

+

411 def test_misplaced_option(self): 

+

412 self.make_file(".coveragerc", """\ 

+

413 [report] 

+

414 branch = True 

+

415 """) 

+

416 msg = r"Unrecognized option '\[report\] branch=' in config file .coveragerc" 

+

417 with pytest.raises(CoverageException, match=msg): 

+

418 _ = coverage.Coverage() 

+

419 

+

420 def test_unknown_option_in_other_ini_file(self): 

+

421 self.make_file("setup.cfg", """\ 

+

422 [coverage:run] 

+

423 huh = what? 

+

424 """) 

+

425 msg = (r"Unrecognized option '\[coverage:run\] huh=' in config file setup.cfg") 

+

426 with pytest.raises(CoverageException, match=msg): 

+

427 _ = coverage.Coverage() 

+

428 

+

429 def test_exceptions_from_missing_things(self): 

+

430 self.make_file("config.ini", """\ 

+

431 [run] 

+

432 branch = True 

+

433 """) 

+

434 config = HandyConfigParser("config.ini") 

+

435 with pytest.raises(Exception, match="No section: 'xyzzy'"): 

+

436 config.options("xyzzy") 

+

437 with pytest.raises(Exception, match="No option 'foo' in section: 'xyzzy'"): 

+

438 config.get("xyzzy", "foo") 

+

439 

+

440 

+

441class ConfigFileTest(UsingModulesMixin, CoverageTest): 

+

442 """Tests of the config file settings in particular.""" 

+

443 

+

444 # This sample file tries to use lots of variation of syntax... 

+

445 # The {section} placeholder lets us nest these settings in another file. 

+

446 LOTSA_SETTINGS = """\ 

+

447 # This is a settings file for coverage.py 

+

448 [{section}run] 

+

449 timid = yes 

+

450 data_file = something_or_other.dat 

+

451 branch = 1 

+

452 cover_pylib = TRUE 

+

453 parallel = on 

+

454 concurrency = thread 

+

455 ; this omit is overriden by the omit from [report] 

+

456 omit = twenty 

+

457 source = myapp 

+

458 source_pkgs = ned 

+

459 plugins = 

+

460 plugins.a_plugin 

+

461 plugins.another 

+

462 debug = callers, pids , dataio 

+

463 disable_warnings = abcd , efgh 

+

464 

+

465 [{section}report] 

+

466 ; these settings affect reporting. 

+

467 exclude_lines = 

+

468 if 0: 

+

469 

+

470 pragma:?\\s+no cover 

+

471 another_tab 

+

472 

+

473 ignore_errors = TRUE 

+

474 omit = 

+

475 one, another, some_more, 

+

476 yet_more 

+

477 include = thirty 

+

478 precision = 3 

+

479 

+

480 partial_branches = 

+

481 pragma:?\\s+no branch 

+

482 partial_branches_always = 

+

483 if 0: 

+

484 while True: 

+

485 

+

486 show_missing= TruE 

+

487 skip_covered = TruE 

+

488 skip_empty =TruE 

+

489 

+

490 [{section}html] 

+

491 

+

492 directory = c:\\tricky\\dir.somewhere 

+

493 extra_css=something/extra.css 

+

494 title = Title & nums # nums! 

+

495 [{section}xml] 

+

496 output=mycov.xml 

+

497 package_depth = 17 

+

498 

+

499 [{section}paths] 

+

500 source = 

+

501 . 

+

502 /home/ned/src/ 

+

503 

+

504 other = other, /home/ned/other, c:\\Ned\\etc 

+

505 

+

506 [{section}plugins.a_plugin] 

+

507 hello = world 

+

508 ; comments still work. 

+

509 names = Jane/John/Jenny 

+

510 

+

511 [{section}json] 

+

512 pretty_print = True 

+

513 show_contexts = True 

+

514 """ 

+

515 

+

516 # Just some sample setup.cfg text from the docs. 

+

517 SETUP_CFG = """\ 

+

518 [bdist_rpm] 

+

519 release = 1 

+

520 packager = Jane Packager <janep@pysoft.com> 

+

521 doc_files = CHANGES.txt 

+

522 README.txt 

+

523 USAGE.txt 

+

524 doc/ 

+

525 examples/ 

+

526 """ 

+

527 

+

528 # Just some sample tox.ini text from the docs. 

+

529 TOX_INI = """\ 

+

530 [tox] 

+

531 envlist = py{26,27,33,34,35}-{c,py}tracer 

+

532 skip_missing_interpreters = True 

+

533 

+

534 [testenv] 

+

535 commands = 

+

536 # Create tests/zipmods.zip, install the egg1 egg 

+

537 python igor.py zip_mods install_egg 

+

538 """ 

+

539 

+

540 def assert_config_settings_are_correct(self, cov): 

+

541 """Check that `cov` has all the settings from LOTSA_SETTINGS.""" 

+

542 assert cov.config.timid 

+

543 assert cov.config.data_file == "something_or_other.dat" 

+

544 assert cov.config.branch 

+

545 assert cov.config.cover_pylib 

+

546 assert cov.config.debug == ["callers", "pids", "dataio"] 

+

547 assert cov.config.parallel 

+

548 assert cov.config.concurrency == ["thread"] 

+

549 assert cov.config.source == ["myapp"] 

+

550 assert cov.config.source_pkgs == ["ned"] 

+

551 assert cov.config.disable_warnings == ["abcd", "efgh"] 

+

552 

+

553 assert cov.get_exclude_list() == ["if 0:", r"pragma:?\s+no cover", "another_tab"] 

+

554 assert cov.config.ignore_errors 

+

555 assert cov.config.run_omit == ["twenty"] 

+

556 assert cov.config.report_omit == ["one", "another", "some_more", "yet_more"] 

+

557 assert cov.config.report_include == ["thirty"] 

+

558 assert cov.config.precision == 3 

+

559 

+

560 assert cov.config.partial_list == [r"pragma:?\s+no branch"] 

+

561 assert cov.config.partial_always_list == ["if 0:", "while True:"] 

+

562 assert cov.config.plugins == ["plugins.a_plugin", "plugins.another"] 

+

563 assert cov.config.show_missing 

+

564 assert cov.config.skip_covered 

+

565 assert cov.config.skip_empty 

+

566 assert cov.config.html_dir == r"c:\tricky\dir.somewhere" 

+

567 assert cov.config.extra_css == "something/extra.css" 

+

568 assert cov.config.html_title == "Title & nums # nums!" 

+

569 

+

570 assert cov.config.xml_output == "mycov.xml" 

+

571 assert cov.config.xml_package_depth == 17 

+

572 

+

573 assert cov.config.paths == { 

+

574 'source': ['.', '/home/ned/src/'], 

+

575 'other': ['other', '/home/ned/other', 'c:\\Ned\\etc'] 

+

576 } 

+

577 

+

578 assert cov.config.get_plugin_options("plugins.a_plugin") == { 

+

579 'hello': 'world', 

+

580 'names': 'Jane/John/Jenny', 

+

581 } 

+

582 assert cov.config.get_plugin_options("plugins.another") == {} 

+

583 assert cov.config.json_show_contexts is True 

+

584 assert cov.config.json_pretty_print is True 

+

585 

+

586 def test_config_file_settings(self): 

+

587 self.make_file(".coveragerc", self.LOTSA_SETTINGS.format(section="")) 

+

588 cov = coverage.Coverage() 

+

589 self.assert_config_settings_are_correct(cov) 

+

590 

+

591 def check_config_file_settings_in_other_file(self, fname, contents): 

+

592 """Check config will be read from another file, with prefixed sections.""" 

+

593 nested = self.LOTSA_SETTINGS.format(section="coverage:") 

+

594 fname = self.make_file(fname, nested + "\n" + contents) 

+

595 cov = coverage.Coverage() 

+

596 self.assert_config_settings_are_correct(cov) 

+

597 

+

598 def test_config_file_settings_in_setupcfg(self): 

+

599 self.check_config_file_settings_in_other_file("setup.cfg", self.SETUP_CFG) 

+

600 

+

601 def test_config_file_settings_in_toxini(self): 

+

602 self.check_config_file_settings_in_other_file("tox.ini", self.TOX_INI) 

+

603 

+

604 def check_other_config_if_coveragerc_specified(self, fname, contents): 

+

605 """Check that config `fname` is read if .coveragerc is missing, but specified.""" 

+

606 nested = self.LOTSA_SETTINGS.format(section="coverage:") 

+

607 self.make_file(fname, nested + "\n" + contents) 

+

608 cov = coverage.Coverage(config_file=".coveragerc") 

+

609 self.assert_config_settings_are_correct(cov) 

+

610 

+

611 def test_config_file_settings_in_setupcfg_if_coveragerc_specified(self): 

+

612 self.check_other_config_if_coveragerc_specified("setup.cfg", self.SETUP_CFG) 

+

613 

+

614 def test_config_file_settings_in_tox_if_coveragerc_specified(self): 

+

615 self.check_other_config_if_coveragerc_specified("tox.ini", self.TOX_INI) 

+

616 

+

617 def check_other_not_read_if_coveragerc(self, fname): 

+

618 """Check config `fname` is not read if .coveragerc exists.""" 

+

619 self.make_file(".coveragerc", """\ 

+

620 [run] 

+

621 include = foo 

+

622 """) 

+

623 self.make_file(fname, """\ 

+

624 [coverage:run] 

+

625 omit = bar 

+

626 branch = true 

+

627 """) 

+

628 cov = coverage.Coverage() 

+

629 assert cov.config.run_include == ["foo"] 

+

630 assert cov.config.run_omit is None 

+

631 assert cov.config.branch is False 

+

632 

+

633 def test_setupcfg_only_if_not_coveragerc(self): 

+

634 self.check_other_not_read_if_coveragerc("setup.cfg") 

+

635 

+

636 def test_toxini_only_if_not_coveragerc(self): 

+

637 self.check_other_not_read_if_coveragerc("tox.ini") 

+

638 

+

639 def check_other_config_need_prefixes(self, fname): 

+

640 """Check that `fname` sections won't be read if un-prefixed.""" 

+

641 self.make_file(fname, """\ 

+

642 [run] 

+

643 omit = bar 

+

644 branch = true 

+

645 """) 

+

646 cov = coverage.Coverage() 

+

647 assert cov.config.run_omit is None 

+

648 assert cov.config.branch is False 

+

649 

+

650 def test_setupcfg_only_if_prefixed(self): 

+

651 self.check_other_config_need_prefixes("setup.cfg") 

+

652 

+

653 def test_toxini_only_if_prefixed(self): 

+

654 self.check_other_config_need_prefixes("tox.ini") 

+

655 

+

656 def test_tox_ini_even_if_setup_cfg(self): 

+

657 # There's a setup.cfg, but no coverage settings in it, so tox.ini 

+

658 # is read. 

+

659 nested = self.LOTSA_SETTINGS.format(section="coverage:") 

+

660 self.make_file("tox.ini", self.TOX_INI + "\n" + nested) 

+

661 self.make_file("setup.cfg", self.SETUP_CFG) 

+

662 cov = coverage.Coverage() 

+

663 self.assert_config_settings_are_correct(cov) 

+

664 

+

665 def test_read_prefixed_sections_from_explicit_file(self): 

+

666 # You can point to a tox.ini, and it will find [coverage:run] sections 

+

667 nested = self.LOTSA_SETTINGS.format(section="coverage:") 

+

668 self.make_file("tox.ini", self.TOX_INI + "\n" + nested) 

+

669 cov = coverage.Coverage(config_file="tox.ini") 

+

670 self.assert_config_settings_are_correct(cov) 

+

671 

+

672 def test_non_ascii(self): 

+

673 self.make_file(".coveragerc", """\ 

+

674 [report] 

+

675 exclude_lines = 

+

676 first 

+

677 ✘${TOX_ENVNAME} 

+

678 third 

+

679 [html] 

+

680 title = tabblo & «ταБЬℓσ» # numbers 

+

681 """) 

+

682 self.set_environ("TOX_ENVNAME", "weirdo") 

+

683 cov = coverage.Coverage() 

+

684 

+

685 assert cov.config.exclude_list == ["first", "✘weirdo", "third"] 

+

686 assert cov.config.html_title == "tabblo & «ταБЬℓσ» # numbers" 

+

687 

+

688 def test_unreadable_config(self): 

+

689 # If a config file is explicitly specified, then it is an error for it 

+

690 # to not be readable. 

+

691 bad_files = [ 

+

692 "nosuchfile.txt", 

+

693 ".", 

+

694 ] 

+

695 for bad_file in bad_files: 

+

696 msg = "Couldn't read %r as a config file" % bad_file 

+

697 with pytest.raises(CoverageException, match=msg): 

+

698 coverage.Coverage(config_file=bad_file) 

+

699 

+

700 def test_nocoveragerc_file_when_specified(self): 

+

701 cov = coverage.Coverage(config_file=".coveragerc") 

+

702 assert not cov.config.timid 

+

703 assert not cov.config.branch 

+

704 assert cov.config.data_file == ".coverage" 

+

705 

+

706 def test_note_is_obsolete(self): 

+

707 self.make_file("main.py", "a = 1") 

+

708 self.make_file(".coveragerc", """\ 

+

709 [run] 

+

710 note = I am here I am here I am here! 

+

711 """) 

+

712 cov = coverage.Coverage() 

+

713 with self.assert_warnings(cov, [r"The '\[run] note' setting is no longer supported."]): 

+

714 self.start_import_stop(cov, "main") 

+

715 cov.report() 

+

716 

+

717 def test_no_toml_installed_no_toml(self): 

+

718 # Can't read a toml file that doesn't exist. 

+

719 with without_module(coverage.tomlconfig, 'toml'): 

+

720 msg = "Couldn't read 'cov.toml' as a config file" 

+

721 with pytest.raises(CoverageException, match=msg): 

+

722 coverage.Coverage(config_file="cov.toml") 

+

723 

+

724 def test_no_toml_installed_explicit_toml(self): 

+

725 # Can't specify a toml config file if toml isn't installed. 

+

726 self.make_file("cov.toml", "# A toml file!") 

+

727 with without_module(coverage.tomlconfig, 'toml'): 

+

728 msg = "Can't read 'cov.toml' without TOML support" 

+

729 with pytest.raises(CoverageException, match=msg): 

+

730 coverage.Coverage(config_file="cov.toml") 

+

731 

+

732 def test_no_toml_installed_pyproject_toml(self): 

+

733 # Can't have coverage config in pyproject.toml without toml installed. 

+

734 self.make_file("pyproject.toml", """\ 

+

735 # A toml file! 

+

736 [tool.coverage.run] 

+

737 xyzzy = 17 

+

738 """) 

+

739 with without_module(coverage.tomlconfig, 'toml'): 

+

740 msg = "Can't read 'pyproject.toml' without TOML support" 

+

741 with pytest.raises(CoverageException, match=msg): 

+

742 coverage.Coverage() 

+

743 

+

744 def test_no_toml_installed_pyproject_no_coverage(self): 

+

745 # It's ok to have non-coverage pyproject.toml without toml installed. 

+

746 self.make_file("pyproject.toml", """\ 

+

747 # A toml file! 

+

748 [tool.something] 

+

749 xyzzy = 17 

+

750 """) 

+

751 with without_module(coverage.tomlconfig, 'toml'): 

+

752 cov = coverage.Coverage() 

+

753 # We get default settings: 

+

754 assert not cov.config.timid 

+

755 assert not cov.config.branch 

+

756 assert cov.config.data_file == ".coverage" 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_context_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_context_py.html new file mode 100644 index 000000000..c5ab7c620 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_context_py.html @@ -0,0 +1,358 @@ + + + + + + Coverage for tests/test_context.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Tests for context support.""" 

+

5 

+

6import inspect 

+

7import os.path 

+

8 

+

9import pytest 

+

10 

+

11import coverage 

+

12from coverage import env 

+

13from coverage.context import qualname_from_frame 

+

14from coverage.data import CoverageData 

+

15 

+

16from tests.coveragetest import CoverageTest 

+

17from tests.helpers import assert_count_equal 

+

18 

+

19 

+

20class StaticContextTest(CoverageTest): 

+

21 """Tests of the static context.""" 

+

22 

+

23 def test_no_context(self): 

+

24 self.make_file("main.py", "a = 1") 

+

25 cov = coverage.Coverage() 

+

26 self.start_import_stop(cov, "main") 

+

27 data = cov.get_data() 

+

28 assert_count_equal(data.measured_contexts(), [""]) 

+

29 

+

30 def test_static_context(self): 

+

31 self.make_file("main.py", "a = 1") 

+

32 cov = coverage.Coverage(context="gooey") 

+

33 self.start_import_stop(cov, "main") 

+

34 data = cov.get_data() 

+

35 assert_count_equal(data.measured_contexts(), ["gooey"]) 

+

36 

+

37 SOURCE = """\ 

+

38 a = 1 

+

39 if a > 2: 

+

40 a = 3 

+

41 assert a == 1 

+

42 """ 

+

43 

+

44 LINES = [1, 2, 4] 

+

45 ARCS = [(-1, 1), (1, 2), (2, 4), (4, -1)] 

+

46 

+

47 def run_red_blue(self, **options): 

+

48 """Run red.py and blue.py, and return their CoverageData objects.""" 

+

49 self.make_file("red.py", self.SOURCE) 

+

50 red_cov = coverage.Coverage(context="red", data_suffix="r", source=["."], **options) 

+

51 self.start_import_stop(red_cov, "red") 

+

52 red_cov.save() 

+

53 red_data = red_cov.get_data() 

+

54 

+

55 self.make_file("blue.py", self.SOURCE) 

+

56 blue_cov = coverage.Coverage(context="blue", data_suffix="b", source=["."], **options) 

+

57 self.start_import_stop(blue_cov, "blue") 

+

58 blue_cov.save() 

+

59 blue_data = blue_cov.get_data() 

+

60 

+

61 return red_data, blue_data 

+

62 

+

63 def test_combining_line_contexts(self): 

+

64 red_data, blue_data = self.run_red_blue() 

+

65 for datas in [[red_data, blue_data], [blue_data, red_data]]: 

+

66 combined = CoverageData(suffix="combined") 

+

67 for data in datas: 

+

68 combined.update(data) 

+

69 

+

70 assert combined.measured_contexts() == {'red', 'blue'} 

+

71 

+

72 full_names = {os.path.basename(f): f for f in combined.measured_files()} 

+

73 assert_count_equal(full_names, ['red.py', 'blue.py']) 

+

74 

+

75 fred = full_names['red.py'] 

+

76 fblue = full_names['blue.py'] 

+

77 

+

78 def assert_combined_lines(filename, context, lines): 

+

79 # pylint: disable=cell-var-from-loop 

+

80 combined.set_query_context(context) 

+

81 assert combined.lines(filename) == lines 

+

82 

+

83 assert_combined_lines(fred, 'red', self.LINES) 

+

84 assert_combined_lines(fred, 'blue', []) 

+

85 assert_combined_lines(fblue, 'red', []) 

+

86 assert_combined_lines(fblue, 'blue', self.LINES) 

+

87 

+

88 def test_combining_arc_contexts(self): 

+

89 red_data, blue_data = self.run_red_blue(branch=True) 

+

90 for datas in [[red_data, blue_data], [blue_data, red_data]]: 

+

91 combined = CoverageData(suffix="combined") 

+

92 for data in datas: 

+

93 combined.update(data) 

+

94 

+

95 assert combined.measured_contexts() == {'red', 'blue'} 

+

96 

+

97 full_names = {os.path.basename(f): f for f in combined.measured_files()} 

+

98 assert_count_equal(full_names, ['red.py', 'blue.py']) 

+

99 

+

100 fred = full_names['red.py'] 

+

101 fblue = full_names['blue.py'] 

+

102 

+

103 def assert_combined_lines(filename, context, lines): 

+

104 # pylint: disable=cell-var-from-loop 

+

105 combined.set_query_context(context) 

+

106 assert combined.lines(filename) == lines 

+

107 

+

108 assert_combined_lines(fred, 'red', self.LINES) 

+

109 assert_combined_lines(fred, 'blue', []) 

+

110 assert_combined_lines(fblue, 'red', []) 

+

111 assert_combined_lines(fblue, 'blue', self.LINES) 

+

112 

+

113 def assert_combined_arcs(filename, context, lines): 

+

114 # pylint: disable=cell-var-from-loop 

+

115 combined.set_query_context(context) 

+

116 assert combined.arcs(filename) == lines 

+

117 

+

118 assert_combined_arcs(fred, 'red', self.ARCS) 

+

119 assert_combined_arcs(fred, 'blue', []) 

+

120 assert_combined_arcs(fblue, 'red', []) 

+

121 assert_combined_arcs(fblue, 'blue', self.ARCS) 

+

122 

+

123 

+

124class DynamicContextTest(CoverageTest): 

+

125 """Tests of dynamically changing contexts.""" 

+

126 

+

127 SOURCE = """\ 

+

128 def helper(lineno): 

+

129 x = 2 

+

130 

+

131 def test_one(): 

+

132 a = 5 

+

133 helper(6) 

+

134 

+

135 def test_two(): 

+

136 a = 9 

+

137 b = 10 

+

138 if a > 11: 

+

139 b = 12 

+

140 assert a == (13-4) 

+

141 assert b == (14-4) 

+

142 helper(15) 

+

143 

+

144 test_one() 

+

145 x = 18 

+

146 helper(19) 

+

147 test_two() 

+

148 """ 

+

149 

+

150 OUTER_LINES = [1, 4, 8, 17, 18, 19, 2, 20] 

+

151 TEST_ONE_LINES = [5, 6, 2] 

+

152 TEST_TWO_LINES = [9, 10, 11, 13, 14, 15, 2] 

+

153 

+

154 def test_dynamic_alone(self): 

+

155 self.make_file("two_tests.py", self.SOURCE) 

+

156 cov = coverage.Coverage(source=["."]) 

+

157 cov.set_option("run:dynamic_context", "test_function") 

+

158 self.start_import_stop(cov, "two_tests") 

+

159 data = cov.get_data() 

+

160 

+

161 full_names = {os.path.basename(f): f for f in data.measured_files()} 

+

162 fname = full_names["two_tests.py"] 

+

163 assert_count_equal( 

+

164 data.measured_contexts(), 

+

165 ["", "two_tests.test_one", "two_tests.test_two"] 

+

166 ) 

+

167 

+

168 def assert_context_lines(context, lines): 

+

169 data.set_query_context(context) 

+

170 assert_count_equal(lines, data.lines(fname)) 

+

171 

+

172 assert_context_lines("", self.OUTER_LINES) 

+

173 assert_context_lines("two_tests.test_one", self.TEST_ONE_LINES) 

+

174 assert_context_lines("two_tests.test_two", self.TEST_TWO_LINES) 

+

175 

+

176 def test_static_and_dynamic(self): 

+

177 self.make_file("two_tests.py", self.SOURCE) 

+

178 cov = coverage.Coverage(context="stat", source=["."]) 

+

179 cov.set_option("run:dynamic_context", "test_function") 

+

180 self.start_import_stop(cov, "two_tests") 

+

181 data = cov.get_data() 

+

182 

+

183 full_names = {os.path.basename(f): f for f in data.measured_files()} 

+

184 fname = full_names["two_tests.py"] 

+

185 assert_count_equal( 

+

186 data.measured_contexts(), 

+

187 ["stat", "stat|two_tests.test_one", "stat|two_tests.test_two"] 

+

188 ) 

+

189 

+

190 def assert_context_lines(context, lines): 

+

191 data.set_query_context(context) 

+

192 assert_count_equal(lines, data.lines(fname)) 

+

193 

+

194 assert_context_lines("stat", self.OUTER_LINES) 

+

195 assert_context_lines("stat|two_tests.test_one", self.TEST_ONE_LINES) 

+

196 assert_context_lines("stat|two_tests.test_two", self.TEST_TWO_LINES) 

+

197 

+

198 

+

199def get_qualname(): 

+

200 """Helper to return qualname_from_frame for the caller.""" 

+

201 stack = inspect.stack()[1:] 

+

202 if any(sinfo[0].f_code.co_name == "get_qualname" for sinfo in stack): 

+

203 # We're calling outselves recursively, maybe because we're testing 

+

204 # properties. Return an int to try to get back on track. 

+

205 return 17 

+

206 caller_frame = stack[0][0] 

+

207 return qualname_from_frame(caller_frame) 

+

208 

+

209# pylint: disable=missing-class-docstring, missing-function-docstring, unused-argument 

+

210 

+

211class Parent(object): 

+

212 def meth(self): 

+

213 return get_qualname() 

+

214 

+

215 @property 

+

216 def a_property(self): 

+

217 return get_qualname() 

+

218 

+

219class Child(Parent): 

+

220 pass 

+

221 

+

222class SomethingElse(object): 

+

223 pass 

+

224 

+

225class MultiChild(SomethingElse, Child): 

+

226 pass 

+

227 

+

228def no_arguments(): 

+

229 return get_qualname() 

+

230 

+

231def plain_old_function(a, b): 

+

232 return get_qualname() 

+

233 

+

234def fake_out(self): 

+

235 return get_qualname() 

+

236 

+

237def patch_meth(self): 

+

238 return get_qualname() 

+

239 

+

240class OldStyle: 

+

241 def meth(self): 

+

242 return get_qualname() 

+

243 

+

244class OldChild(OldStyle): 

+

245 pass 

+

246 

+

247# pylint: enable=missing-class-docstring, missing-function-docstring, unused-argument 

+

248 

+

249 

+

250class QualnameTest(CoverageTest): 

+

251 """Tests of qualname_from_frame.""" 

+

252 

+

253 # Pylint gets confused about meth() below. 

+

254 # pylint: disable=no-value-for-parameter 

+

255 

+

256 run_in_temp_dir = False 

+

257 

+

258 def test_method(self): 

+

259 assert Parent().meth() == "tests.test_context.Parent.meth" 

+

260 

+

261 def test_inherited_method(self): 

+

262 assert Child().meth() == "tests.test_context.Parent.meth" 

+

263 

+

264 def test_mi_inherited_method(self): 

+

265 assert MultiChild().meth() == "tests.test_context.Parent.meth" 

+

266 

+

267 def test_no_arguments(self): 

+

268 assert no_arguments() == "tests.test_context.no_arguments" 

+

269 

+

270 def test_plain_old_function(self): 

+

271 assert plain_old_function(0, 1) == "tests.test_context.plain_old_function" 

+

272 

+

273 def test_fake_out(self): 

+

274 assert fake_out(0) == "tests.test_context.fake_out" 

+

275 

+

276 def test_property(self): 

+

277 assert Parent().a_property == "tests.test_context.Parent.a_property" 

+

278 

+

279 def test_changeling(self): 

+

280 c = Child() 

+

281 c.meth = patch_meth 

+

282 assert c.meth(c) == "tests.test_context.patch_meth" 

+

283 

+

284 @pytest.mark.skipif(not env.PY2, reason="Old-style classes are only in Python 2") 

+

285 def test_oldstyle(self): 

+

286 assert OldStyle().meth() == "tests.test_context.OldStyle.meth" 

+

287 assert OldChild().meth() == "tests.test_context.OldStyle.meth" 

+

288 

+

289 def test_bug_829(self): 

+

290 # A class with a name like a function shouldn't confuse qualname_from_frame. 

+

291 class test_something(object): # pylint: disable=unused-variable 

+

292 assert get_qualname() is None 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_coverage_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_coverage_py.html new file mode 100644 index 000000000..2d5cc1c0b --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_coverage_py.html @@ -0,0 +1,1936 @@ + + + + + + Coverage for tests/test_coverage.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# coding: utf-8 

+

2# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

3# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

4 

+

5"""Tests for coverage.py.""" 

+

6 

+

7import pytest 

+

8 

+

9import coverage 

+

10from coverage import env 

+

11from coverage.misc import CoverageException 

+

12 

+

13from tests.coveragetest import CoverageTest 

+

14 

+

15 

+

16class TestCoverageTest(CoverageTest): 

+

17 """Make sure our complex self.check_coverage method works.""" 

+

18 

+

19 def test_successful_coverage(self): 

+

20 # The simplest run possible. 

+

21 self.check_coverage("""\ 

+

22 a = 1 

+

23 b = 2 

+

24 """, 

+

25 [1,2] 

+

26 ) 

+

27 # You can provide a list of possible statement matches. 

+

28 self.check_coverage("""\ 

+

29 a = 1 

+

30 b = 2 

+

31 """, 

+

32 ([100], [1,2], [1723,47]), 

+

33 ) 

+

34 # You can specify missing lines. 

+

35 self.check_coverage("""\ 

+

36 a = 1 

+

37 if a == 2: 

+

38 a = 3 

+

39 """, 

+

40 [1,2,3], 

+

41 missing="3", 

+

42 ) 

+

43 # You can specify a list of possible missing lines. 

+

44 self.check_coverage("""\ 

+

45 a = 1 

+

46 if a == 2: 

+

47 a = 3 

+

48 """, 

+

49 [1,2,3], 

+

50 missing=("47-49", "3", "100,102") 

+

51 ) 

+

52 

+

53 def test_failed_coverage(self): 

+

54 # If the lines are wrong, the message shows right and wrong. 

+

55 with pytest.raises(AssertionError, match=r"\[1, 2] != \[1]"): 

+

56 self.check_coverage("""\ 

+

57 a = 1 

+

58 b = 2 

+

59 """, 

+

60 [1] 

+

61 ) 

+

62 # If the list of lines possibilities is wrong, the msg shows right. 

+

63 msg = r"None of the lines choices matched \[1, 2]" 

+

64 with pytest.raises(AssertionError, match=msg): 

+

65 self.check_coverage("""\ 

+

66 a = 1 

+

67 b = 2 

+

68 """, 

+

69 ([1], [2]) 

+

70 ) 

+

71 # If the missing lines are wrong, the message shows right and wrong. 

+

72 with pytest.raises(AssertionError, match=r"'3' != '37'"): 

+

73 self.check_coverage("""\ 

+

74 a = 1 

+

75 if a == 2: 

+

76 a = 3 

+

77 """, 

+

78 [1,2,3], 

+

79 missing="37", 

+

80 ) 

+

81 # If the missing lines possibilities are wrong, the msg shows right. 

+

82 msg = r"None of the missing choices matched '3'" 

+

83 with pytest.raises(AssertionError, match=msg): 

+

84 self.check_coverage("""\ 

+

85 a = 1 

+

86 if a == 2: 

+

87 a = 3 

+

88 """, 

+

89 [1,2,3], 

+

90 missing=("37", "4-10"), 

+

91 ) 

+

92 

+

93 def test_exceptions_really_fail(self): 

+

94 # An assert in the checked code will really raise up to us. 

+

95 with pytest.raises(AssertionError, match="This is bad"): 

+

96 self.check_coverage("""\ 

+

97 a = 1 

+

98 assert a == 99, "This is bad" 

+

99 """ 

+

100 ) 

+

101 # Other exceptions too. 

+

102 with pytest.raises(ZeroDivisionError, match="division"): 

+

103 self.check_coverage("""\ 

+

104 a = 1 

+

105 assert a == 1, "This is good" 

+

106 a/0 

+

107 """ 

+

108 ) 

+

109 

+

110 

+

111class BasicCoverageTest(CoverageTest): 

+

112 """The simplest tests, for quick smoke testing of fundamental changes.""" 

+

113 

+

114 def test_simple(self): 

+

115 self.check_coverage("""\ 

+

116 a = 1 

+

117 b = 2 

+

118 

+

119 c = 4 

+

120 # Nothing here 

+

121 d = 6 

+

122 """, 

+

123 [1,2,4,6], report="4 0 0 0 100%") 

+

124 

+

125 def test_indentation_wackiness(self): 

+

126 # Partial final lines are OK. 

+

127 self.check_coverage("""\ 

+

128 import sys 

+

129 if not sys.path: 

+

130 a = 1 

+

131 """, # indented last line 

+

132 [1,2,3], "3") 

+

133 

+

134 def test_multiline_initializer(self): 

+

135 self.check_coverage("""\ 

+

136 d = { 

+

137 'foo': 1+2, 

+

138 'bar': (lambda x: x+1)(1), 

+

139 'baz': str(1), 

+

140 } 

+

141 

+

142 e = { 'foo': 1, 'bar': 2 } 

+

143 """, 

+

144 [1,7], "") 

+

145 

+

146 def test_list_comprehension(self): 

+

147 self.check_coverage("""\ 

+

148 l = [ 

+

149 2*i for i in range(10) 

+

150 if i > 5 

+

151 ] 

+

152 assert l == [12, 14, 16, 18] 

+

153 """, 

+

154 [1,5], "") 

+

155 

+

156 

+

157class SimpleStatementTest(CoverageTest): 

+

158 """Testing simple single-line statements.""" 

+

159 

+

160 def test_expression(self): 

+

161 # Bare expressions as statements are tricky: some implementations 

+

162 # optimize some of them away. All implementations seem to count 

+

163 # the implicit return at the end as executable. 

+

164 self.check_coverage("""\ 

+

165 12 

+

166 23 

+

167 """, 

+

168 ([1,2],[2]), "") 

+

169 self.check_coverage("""\ 

+

170 12 

+

171 23 

+

172 a = 3 

+

173 """, 

+

174 ([1,2,3],[3]), "") 

+

175 self.check_coverage("""\ 

+

176 1 + 2 

+

177 1 + \\ 

+

178 2 

+

179 """, 

+

180 ([1,2], [2]), "") 

+

181 self.check_coverage("""\ 

+

182 1 + 2 

+

183 1 + \\ 

+

184 2 

+

185 a = 4 

+

186 """, 

+

187 ([1,2,4], [4]), "") 

+

188 

+

189 def test_assert(self): 

+

190 self.check_coverage("""\ 

+

191 assert (1 + 2) 

+

192 assert (1 + 

+

193 2) 

+

194 assert (1 + 2), 'the universe is broken' 

+

195 assert (1 + 

+

196 2), \\ 

+

197 'something is amiss' 

+

198 """, 

+

199 [1,2,4,5], "") 

+

200 

+

201 def test_assignment(self): 

+

202 # Simple variable assignment 

+

203 self.check_coverage("""\ 

+

204 a = (1 + 2) 

+

205 b = (1 + 

+

206 2) 

+

207 c = \\ 

+

208 1 

+

209 """, 

+

210 [1,2,4], "") 

+

211 

+

212 def test_assign_tuple(self): 

+

213 self.check_coverage("""\ 

+

214 a = 1 

+

215 a,b,c = 7,8,9 

+

216 assert a == 7 and b == 8 and c == 9 

+

217 """, 

+

218 [1,2,3], "") 

+

219 

+

220 def test_more_assignments(self): 

+

221 self.check_coverage("""\ 

+

222 x = [] 

+

223 d = {} 

+

224 d[ 

+

225 4 + len(x) 

+

226 + 5 

+

227 ] = \\ 

+

228 d[ 

+

229 8 ** 2 

+

230 ] = \\ 

+

231 9 

+

232 """, 

+

233 [1, 2, 3], "") 

+

234 

+

235 def test_attribute_assignment(self): 

+

236 # Attribute assignment 

+

237 self.check_coverage("""\ 

+

238 class obj: pass 

+

239 o = obj() 

+

240 o.foo = (1 + 2) 

+

241 o.foo = (1 + 

+

242 2) 

+

243 o.foo = \\ 

+

244 1 

+

245 """, 

+

246 [1,2,3,4,6], "") 

+

247 

+

248 def test_list_of_attribute_assignment(self): 

+

249 self.check_coverage("""\ 

+

250 class obj: pass 

+

251 o = obj() 

+

252 o.a, o.b = (1 + 2), 3 

+

253 o.a, o.b = (1 + 

+

254 2), (3 + 

+

255 4) 

+

256 o.a, o.b = \\ 

+

257 1, \\ 

+

258 2 

+

259 """, 

+

260 [1,2,3,4,7], "") 

+

261 

+

262 def test_augmented_assignment(self): 

+

263 self.check_coverage("""\ 

+

264 a = 1 

+

265 a += 1 

+

266 a += (1 + 

+

267 2) 

+

268 a += \\ 

+

269 1 

+

270 """, 

+

271 [1,2,3,5], "") 

+

272 

+

273 def test_triple_string_stuff(self): 

+

274 self.check_coverage("""\ 

+

275 a = ''' 

+

276 a multiline 

+

277 string. 

+

278 ''' 

+

279 b = ''' 

+

280 long expression 

+

281 ''' + ''' 

+

282 on many 

+

283 lines. 

+

284 ''' 

+

285 c = len(''' 

+

286 long expression 

+

287 ''' + 

+

288 ''' 

+

289 on many 

+

290 lines. 

+

291 ''') 

+

292 """, 

+

293 [1,5,11], "") 

+

294 

+

295 def test_pass(self): 

+

296 # pass is tricky: if it's the only statement in a block, then it is 

+

297 # "executed". But if it is not the only statement, then it is not. 

+

298 self.check_coverage("""\ 

+

299 if 1==1: 

+

300 pass 

+

301 """, 

+

302 [1,2], "") 

+

303 self.check_coverage("""\ 

+

304 def foo(): 

+

305 pass 

+

306 foo() 

+

307 """, 

+

308 [1,2,3], "") 

+

309 self.check_coverage("""\ 

+

310 def foo(): 

+

311 "doc" 

+

312 pass 

+

313 foo() 

+

314 """, 

+

315 ([1,3,4], [1,4]), "") 

+

316 self.check_coverage("""\ 

+

317 class Foo: 

+

318 def foo(self): 

+

319 pass 

+

320 Foo().foo() 

+

321 """, 

+

322 [1,2,3,4], "") 

+

323 self.check_coverage("""\ 

+

324 class Foo: 

+

325 def foo(self): 

+

326 "Huh?" 

+

327 pass 

+

328 Foo().foo() 

+

329 """, 

+

330 ([1,2,4,5], [1,2,5]), "") 

+

331 

+

332 def test_del(self): 

+

333 self.check_coverage("""\ 

+

334 d = { 'a': 1, 'b': 1, 'c': 1, 'd': 1, 'e': 1 } 

+

335 del d['a'] 

+

336 del d[ 

+

337 'b' 

+

338 ] 

+

339 del d['c'], \\ 

+

340 d['d'], \\ 

+

341 d['e'] 

+

342 assert(len(d.keys()) == 0) 

+

343 """, 

+

344 [1,2,3,6,9], "") 

+

345 

+

346 @pytest.mark.skipif(env.PY3, reason="No more print statement in Python 3.") 

+

347 def test_print(self): 

+

348 self.check_coverage("""\ 

+

349 print "hello, world!" 

+

350 print ("hey: %d" % 

+

351 17) 

+

352 print "goodbye" 

+

353 print "hello, world!", 

+

354 print ("hey: %d" % 

+

355 17), 

+

356 print "goodbye", 

+

357 """, 

+

358 [1,2,4,5,6,8], "") 

+

359 

+

360 def test_raise(self): 

+

361 self.check_coverage("""\ 

+

362 try: 

+

363 raise Exception( 

+

364 "hello %d" % 

+

365 17) 

+

366 except: 

+

367 pass 

+

368 """, 

+

369 [1,2,5,6], "") 

+

370 

+

371 def test_raise_followed_by_statement(self): 

+

372 if env.PYBEHAVIOR.omit_after_jump: 

+

373 lines = [1,2,4,5] 

+

374 missing = "" 

+

375 else: 

+

376 lines = [1,2,3,4,5] 

+

377 missing = "3" 

+

378 self.check_coverage("""\ 

+

379 try: 

+

380 raise Exception("hello") 

+

381 a = 3 

+

382 except: 

+

383 pass 

+

384 """, 

+

385 lines=lines, missing=missing) 

+

386 

+

387 def test_return(self): 

+

388 self.check_coverage("""\ 

+

389 def fn(): 

+

390 a = 1 

+

391 return a 

+

392 

+

393 x = fn() 

+

394 assert(x == 1) 

+

395 """, 

+

396 [1,2,3,5,6], "") 

+

397 self.check_coverage("""\ 

+

398 def fn(): 

+

399 a = 1 

+

400 return ( 

+

401 a + 

+

402 1) 

+

403 

+

404 x = fn() 

+

405 assert(x == 2) 

+

406 """, 

+

407 [1,2,3,7,8], "") 

+

408 self.check_coverage("""\ 

+

409 def fn(): 

+

410 a = 1 

+

411 return (a, 

+

412 a + 1, 

+

413 a + 2) 

+

414 

+

415 x,y,z = fn() 

+

416 assert x == 1 and y == 2 and z == 3 

+

417 """, 

+

418 [1,2,3,7,8], "") 

+

419 

+

420 def test_return_followed_by_statement(self): 

+

421 if env.PYBEHAVIOR.omit_after_return: 

+

422 lines = [1,2,3,6,7] 

+

423 missing = "" 

+

424 else: 

+

425 lines = [1,2,3,4,6,7] 

+

426 missing = "4" 

+

427 self.check_coverage("""\ 

+

428 def fn(): 

+

429 a = 2 

+

430 return a 

+

431 a = 4 

+

432 

+

433 x = fn() 

+

434 assert(x == 2) 

+

435 """, 

+

436 lines=lines, missing=missing, 

+

437 ) 

+

438 

+

439 def test_yield(self): 

+

440 self.check_coverage("""\ 

+

441 def gen(): 

+

442 yield 1 

+

443 yield (2+ 

+

444 3+ 

+

445 4) 

+

446 yield 1, \\ 

+

447 2 

+

448 a,b,c = gen() 

+

449 assert a == 1 and b == 9 and c == (1,2) 

+

450 """, 

+

451 [1,2,3,6,8,9], "") 

+

452 

+

453 def test_break(self): 

+

454 if env.PYBEHAVIOR.omit_after_jump: 

+

455 lines = [1,2,3,5] 

+

456 missing = "" 

+

457 else: 

+

458 lines = [1,2,3,4,5] 

+

459 missing = "4" 

+

460 

+

461 self.check_coverage("""\ 

+

462 for x in range(10): 

+

463 a = 2 + x 

+

464 break 

+

465 a = 4 

+

466 assert a == 2 

+

467 """, 

+

468 lines=lines, missing=missing) 

+

469 

+

470 def test_continue(self): 

+

471 if env.PYBEHAVIOR.omit_after_jump: 

+

472 lines = [1,2,3,5] 

+

473 missing = "" 

+

474 else: 

+

475 lines = [1,2,3,4,5] 

+

476 missing = "4" 

+

477 

+

478 self.check_coverage("""\ 

+

479 for x in range(10): 

+

480 a = 2 + x 

+

481 continue 

+

482 a = 4 

+

483 assert a == 11 

+

484 """, 

+

485 lines=lines, missing=missing) 

+

486 

+

487 @pytest.mark.skipif(env.PY2, reason="Expected failure: peephole optimization of jumps to jumps") 

+

488 def test_strange_unexecuted_continue(self): 

+

489 # Peephole optimization of jumps to jumps can mean that some statements 

+

490 # never hit the line tracer. The behavior is different in different 

+

491 # versions of Python, so be careful when running this test. 

+

492 self.check_coverage("""\ 

+

493 a = b = c = 0 

+

494 for n in range(100): 

+

495 if n % 2: 

+

496 if n % 4: 

+

497 a += 1 

+

498 continue # <-- This line may not be hit. 

+

499 else: 

+

500 b += 1 

+

501 c += 1 

+

502 assert a == 50 and b == 50 and c == 50 

+

503 

+

504 a = b = c = 0 

+

505 for n in range(100): 

+

506 if n % 2: 

+

507 if n % 3: 

+

508 a += 1 

+

509 continue # <-- This line is always hit. 

+

510 else: 

+

511 b += 1 

+

512 c += 1 

+

513 assert a == 33 and b == 50 and c == 50 

+

514 """, 

+

515 lines=[1,2,3,4,5,6,8,9,10, 12,13,14,15,16,17,19,20,21], 

+

516 missing=["", "6"], 

+

517 ) 

+

518 

+

519 def test_import(self): 

+

520 self.check_coverage("""\ 

+

521 import string 

+

522 from sys import path 

+

523 a = 1 

+

524 """, 

+

525 [1,2,3], "") 

+

526 self.check_coverage("""\ 

+

527 import string 

+

528 if 1 == 2: 

+

529 from sys import path 

+

530 a = 1 

+

531 """, 

+

532 [1,2,3,4], "3") 

+

533 self.check_coverage("""\ 

+

534 import string, \\ 

+

535 os, \\ 

+

536 re 

+

537 from sys import path, \\ 

+

538 stdout 

+

539 a = 1 

+

540 """, 

+

541 [1,4,6], "") 

+

542 self.check_coverage("""\ 

+

543 import sys, sys as s 

+

544 assert s.path == sys.path 

+

545 """, 

+

546 [1,2], "") 

+

547 self.check_coverage("""\ 

+

548 import sys, \\ 

+

549 sys as s 

+

550 assert s.path == sys.path 

+

551 """, 

+

552 [1,3], "") 

+

553 self.check_coverage("""\ 

+

554 from sys import path, \\ 

+

555 path as p 

+

556 assert p == path 

+

557 """, 

+

558 [1,3], "") 

+

559 self.check_coverage("""\ 

+

560 from sys import \\ 

+

561 * 

+

562 assert len(path) > 0 

+

563 """, 

+

564 [1,3], "") 

+

565 

+

566 def test_global(self): 

+

567 self.check_coverage("""\ 

+

568 g = h = i = 1 

+

569 def fn(): 

+

570 global g 

+

571 global h, \\ 

+

572 i 

+

573 g = h = i = 2 

+

574 fn() 

+

575 assert g == 2 and h == 2 and i == 2 

+

576 """, 

+

577 [1,2,6,7,8], "") 

+

578 self.check_coverage("""\ 

+

579 g = h = i = 1 

+

580 def fn(): 

+

581 global g; g = 2 

+

582 fn() 

+

583 assert g == 2 and h == 1 and i == 1 

+

584 """, 

+

585 [1,2,3,4,5], "") 

+

586 

+

587 def test_exec(self): 

+

588 self.check_coverage("""\ 

+

589 a = b = c = 1 

+

590 exec("a = 2") 

+

591 exec("b = " + 

+

592 "c = " + 

+

593 "2") 

+

594 assert a == 2 and b == 2 and c == 2 

+

595 """, 

+

596 [1,2,3,6], "") 

+

597 self.check_coverage("""\ 

+

598 vars = {'a': 1, 'b': 1, 'c': 1} 

+

599 exec("a = 2", vars) 

+

600 exec("b = " + 

+

601 "c = " + 

+

602 "2", vars) 

+

603 assert vars['a'] == 2 and vars['b'] == 2 and vars['c'] == 2 

+

604 """, 

+

605 [1,2,3,6], "") 

+

606 self.check_coverage("""\ 

+

607 globs = {} 

+

608 locs = {'a': 1, 'b': 1, 'c': 1} 

+

609 exec("a = 2", globs, locs) 

+

610 exec("b = " + 

+

611 "c = " + 

+

612 "2", globs, locs) 

+

613 assert locs['a'] == 2 and locs['b'] == 2 and locs['c'] == 2 

+

614 """, 

+

615 [1,2,3,4,7], "") 

+

616 

+

617 def test_extra_doc_string(self): 

+

618 self.check_coverage("""\ 

+

619 a = 1 

+

620 "An extra docstring, should be a comment." 

+

621 b = 3 

+

622 assert (a,b) == (1,3) 

+

623 """, 

+

624 ([1,3,4], [1,2,3,4]), 

+

625 "", 

+

626 ) 

+

627 self.check_coverage("""\ 

+

628 a = 1 

+

629 "An extra docstring, should be a comment." 

+

630 b = 3 

+

631 123 # A number for some reason: ignored 

+

632 1+1 # An expression: executed. 

+

633 c = 6 

+

634 assert (a,b,c) == (1,3,6) 

+

635 """, 

+

636 ([1,3,6,7], [1,3,5,6,7], [1,3,4,5,6,7], [1,2,3,4,5,6,7]), 

+

637 "", 

+

638 ) 

+

639 

+

640 def test_nonascii(self): 

+

641 self.check_coverage("""\ 

+

642 # coding: utf-8 

+

643 a = 2 

+

644 b = 3 

+

645 """, 

+

646 [2, 3] 

+

647 ) 

+

648 

+

649 def test_module_docstring(self): 

+

650 self.check_coverage("""\ 

+

651 '''I am a module docstring.''' 

+

652 a = 2 

+

653 b = 3 

+

654 """, 

+

655 [2, 3] 

+

656 ) 

+

657 lines = [2, 3, 4] 

+

658 self.check_coverage("""\ 

+

659 # Start with a comment, because it changes the behavior(!?) 

+

660 '''I am a module docstring.''' 

+

661 a = 3 

+

662 b = 4 

+

663 """, 

+

664 lines 

+

665 ) 

+

666 

+

667 

+

668class CompoundStatementTest(CoverageTest): 

+

669 """Testing coverage of multi-line compound statements.""" 

+

670 

+

671 def test_statement_list(self): 

+

672 self.check_coverage("""\ 

+

673 a = 1; 

+

674 b = 2; c = 3 

+

675 d = 4; e = 5; 

+

676 

+

677 assert (a,b,c,d,e) == (1,2,3,4,5) 

+

678 """, 

+

679 [1,2,3,5], "") 

+

680 

+

681 def test_if(self): 

+

682 self.check_coverage("""\ 

+

683 a = 1 

+

684 if a == 1: 

+

685 x = 3 

+

686 assert x == 3 

+

687 if (a == 

+

688 1): 

+

689 x = 7 

+

690 assert x == 7 

+

691 """, 

+

692 [1,2,3,4,5,7,8], "") 

+

693 self.check_coverage("""\ 

+

694 a = 1 

+

695 if a == 1: 

+

696 x = 3 

+

697 else: 

+

698 y = 5 

+

699 assert x == 3 

+

700 """, 

+

701 [1,2,3,5,6], "5") 

+

702 self.check_coverage("""\ 

+

703 a = 1 

+

704 if a != 1: 

+

705 x = 3 

+

706 else: 

+

707 y = 5 

+

708 assert y == 5 

+

709 """, 

+

710 [1,2,3,5,6], "3") 

+

711 self.check_coverage("""\ 

+

712 a = 1; b = 2 

+

713 if a == 1: 

+

714 if b == 2: 

+

715 x = 4 

+

716 else: 

+

717 y = 6 

+

718 else: 

+

719 z = 8 

+

720 assert x == 4 

+

721 """, 

+

722 [1,2,3,4,6,8,9], "6-8") 

+

723 

+

724 def test_elif(self): 

+

725 self.check_coverage("""\ 

+

726 a = 1; b = 2; c = 3; 

+

727 if a == 1: 

+

728 x = 3 

+

729 elif b == 2: 

+

730 y = 5 

+

731 else: 

+

732 z = 7 

+

733 assert x == 3 

+

734 """, 

+

735 [1,2,3,4,5,7,8], "4-7", report="7 3 4 1 45% 4-7", 

+

736 ) 

+

737 self.check_coverage("""\ 

+

738 a = 1; b = 2; c = 3; 

+

739 if a != 1: 

+

740 x = 3 

+

741 elif b == 2: 

+

742 y = 5 

+

743 else: 

+

744 z = 7 

+

745 assert y == 5 

+

746 """, 

+

747 [1,2,3,4,5,7,8], "3, 7", report="7 2 4 2 64% 3, 7", 

+

748 ) 

+

749 self.check_coverage("""\ 

+

750 a = 1; b = 2; c = 3; 

+

751 if a != 1: 

+

752 x = 3 

+

753 elif b != 2: 

+

754 y = 5 

+

755 else: 

+

756 z = 7 

+

757 assert z == 7 

+

758 """, 

+

759 [1,2,3,4,5,7,8], "3, 5", report="7 2 4 2 64% 3, 5", 

+

760 ) 

+

761 

+

762 def test_elif_no_else(self): 

+

763 self.check_coverage("""\ 

+

764 a = 1; b = 2; c = 3; 

+

765 if a == 1: 

+

766 x = 3 

+

767 elif b == 2: 

+

768 y = 5 

+

769 assert x == 3 

+

770 """, 

+

771 [1,2,3,4,5,6], "4-5", report="6 2 4 1 50% 4-5", 

+

772 ) 

+

773 self.check_coverage("""\ 

+

774 a = 1; b = 2; c = 3; 

+

775 if a != 1: 

+

776 x = 3 

+

777 elif b == 2: 

+

778 y = 5 

+

779 assert y == 5 

+

780 """, 

+

781 [1,2,3,4,5,6], "3", report="6 1 4 2 70% 3, 4->6", 

+

782 ) 

+

783 

+

784 def test_elif_bizarre(self): 

+

785 self.check_coverage("""\ 

+

786 def f(self): 

+

787 if self==1: 

+

788 x = 3 

+

789 elif self.m('fred'): 

+

790 x = 5 

+

791 elif (g==1) and (b==2): 

+

792 x = 7 

+

793 elif self.m('fred')==True: 

+

794 x = 9 

+

795 elif ((g==1) and (b==2))==True: 

+

796 x = 11 

+

797 else: 

+

798 x = 13 

+

799 """, 

+

800 [1,2,3,4,5,6,7,8,9,10,11,13], "2-13") 

+

801 

+

802 def test_split_if(self): 

+

803 self.check_coverage("""\ 

+

804 a = 1; b = 2; c = 3; 

+

805 if \\ 

+

806 a == 1: 

+

807 x = 3 

+

808 elif \\ 

+

809 b == 2: 

+

810 y = 5 

+

811 else: 

+

812 z = 7 

+

813 assert x == 3 

+

814 """, 

+

815 [1,2,4,5,7,9,10], "5-9") 

+

816 self.check_coverage("""\ 

+

817 a = 1; b = 2; c = 3; 

+

818 if \\ 

+

819 a != 1: 

+

820 x = 3 

+

821 elif \\ 

+

822 b == 2: 

+

823 y = 5 

+

824 else: 

+

825 z = 7 

+

826 assert y == 5 

+

827 """, 

+

828 [1,2,4,5,7,9,10], "4, 9") 

+

829 self.check_coverage("""\ 

+

830 a = 1; b = 2; c = 3; 

+

831 if \\ 

+

832 a != 1: 

+

833 x = 3 

+

834 elif \\ 

+

835 b != 2: 

+

836 y = 5 

+

837 else: 

+

838 z = 7 

+

839 assert z == 7 

+

840 """, 

+

841 [1,2,4,5,7,9,10], "4, 7") 

+

842 

+

843 def test_pathological_split_if(self): 

+

844 self.check_coverage("""\ 

+

845 a = 1; b = 2; c = 3; 

+

846 if ( 

+

847 a == 1 

+

848 ): 

+

849 x = 3 

+

850 elif ( 

+

851 b == 2 

+

852 ): 

+

853 y = 5 

+

854 else: 

+

855 z = 7 

+

856 assert x == 3 

+

857 """, 

+

858 [1,2,5,6,9,11,12], "6-11") 

+

859 self.check_coverage("""\ 

+

860 a = 1; b = 2; c = 3; 

+

861 if ( 

+

862 a != 1 

+

863 ): 

+

864 x = 3 

+

865 elif ( 

+

866 b == 2 

+

867 ): 

+

868 y = 5 

+

869 else: 

+

870 z = 7 

+

871 assert y == 5 

+

872 """, 

+

873 [1,2,5,6,9,11,12], "5, 11") 

+

874 self.check_coverage("""\ 

+

875 a = 1; b = 2; c = 3; 

+

876 if ( 

+

877 a != 1 

+

878 ): 

+

879 x = 3 

+

880 elif ( 

+

881 b != 2 

+

882 ): 

+

883 y = 5 

+

884 else: 

+

885 z = 7 

+

886 assert z == 7 

+

887 """, 

+

888 [1,2,5,6,9,11,12], "5, 9") 

+

889 

+

890 def test_absurd_split_if(self): 

+

891 self.check_coverage("""\ 

+

892 a = 1; b = 2; c = 3; 

+

893 if a == 1 \\ 

+

894 : 

+

895 x = 3 

+

896 elif b == 2 \\ 

+

897 : 

+

898 y = 5 

+

899 else: 

+

900 z = 7 

+

901 assert x == 3 

+

902 """, 

+

903 [1,2,4,5,7,9,10], "5-9") 

+

904 self.check_coverage("""\ 

+

905 a = 1; b = 2; c = 3; 

+

906 if a != 1 \\ 

+

907 : 

+

908 x = 3 

+

909 elif b == 2 \\ 

+

910 : 

+

911 y = 5 

+

912 else: 

+

913 z = 7 

+

914 assert y == 5 

+

915 """, 

+

916 [1,2,4,5,7,9,10], "4, 9") 

+

917 self.check_coverage("""\ 

+

918 a = 1; b = 2; c = 3; 

+

919 if a != 1 \\ 

+

920 : 

+

921 x = 3 

+

922 elif b != 2 \\ 

+

923 : 

+

924 y = 5 

+

925 else: 

+

926 z = 7 

+

927 assert z == 7 

+

928 """, 

+

929 [1,2,4,5,7,9,10], "4, 7") 

+

930 

+

931 def test_constant_if(self): 

+

932 if env.PYBEHAVIOR.keep_constant_test: 

+

933 lines = [1, 2, 3] 

+

934 else: 

+

935 lines = [2, 3] 

+

936 self.check_coverage("""\ 

+

937 if 1: 

+

938 a = 2 

+

939 assert a == 2 

+

940 """, 

+

941 lines, 

+

942 "", 

+

943 ) 

+

944 

+

945 def test_while(self): 

+

946 self.check_coverage("""\ 

+

947 a = 3; b = 0 

+

948 while a: 

+

949 b += 1 

+

950 a -= 1 

+

951 assert a == 0 and b == 3 

+

952 """, 

+

953 [1,2,3,4,5], "") 

+

954 self.check_coverage("""\ 

+

955 a = 3; b = 0 

+

956 while a: 

+

957 b += 1 

+

958 break 

+

959 assert a == 3 and b == 1 

+

960 """, 

+

961 [1,2,3,4,5], "") 

+

962 

+

963 def test_while_else(self): 

+

964 # Take the else branch. 

+

965 self.check_coverage("""\ 

+

966 a = 3; b = 0 

+

967 while a: 

+

968 b += 1 

+

969 a -= 1 

+

970 else: 

+

971 b = 99 

+

972 assert a == 0 and b == 99 

+

973 """, 

+

974 [1,2,3,4,6,7], "") 

+

975 # Don't take the else branch. 

+

976 self.check_coverage("""\ 

+

977 a = 3; b = 0 

+

978 while a: 

+

979 b += 1 

+

980 a -= 1 

+

981 break 

+

982 else: 

+

983 b = 99 

+

984 assert a == 2 and b == 1 

+

985 """, 

+

986 [1,2,3,4,5,7,8], "7") 

+

987 

+

988 def test_split_while(self): 

+

989 self.check_coverage("""\ 

+

990 a = 3; b = 0 

+

991 while \\ 

+

992 a: 

+

993 b += 1 

+

994 a -= 1 

+

995 assert a == 0 and b == 3 

+

996 """, 

+

997 [1,2,4,5,6], "") 

+

998 self.check_coverage("""\ 

+

999 a = 3; b = 0 

+

1000 while ( 

+

1001 a 

+

1002 ): 

+

1003 b += 1 

+

1004 a -= 1 

+

1005 assert a == 0 and b == 3 

+

1006 """, 

+

1007 [1,2,5,6,7], "") 

+

1008 

+

1009 def test_for(self): 

+

1010 self.check_coverage("""\ 

+

1011 a = 0 

+

1012 for i in [1,2,3,4,5]: 

+

1013 a += i 

+

1014 assert a == 15 

+

1015 """, 

+

1016 [1,2,3,4], "") 

+

1017 self.check_coverage("""\ 

+

1018 a = 0 

+

1019 for i in [1, 

+

1020 2,3,4, 

+

1021 5]: 

+

1022 a += i 

+

1023 assert a == 15 

+

1024 """, 

+

1025 [1,2,5,6], "") 

+

1026 self.check_coverage("""\ 

+

1027 a = 0 

+

1028 for i in [1,2,3,4,5]: 

+

1029 a += i 

+

1030 break 

+

1031 assert a == 1 

+

1032 """, 

+

1033 [1,2,3,4,5], "") 

+

1034 

+

1035 def test_for_else(self): 

+

1036 self.check_coverage("""\ 

+

1037 a = 0 

+

1038 for i in range(5): 

+

1039 a += i+1 

+

1040 else: 

+

1041 a = 99 

+

1042 assert a == 99 

+

1043 """, 

+

1044 [1,2,3,5,6], "") 

+

1045 self.check_coverage("""\ 

+

1046 a = 0 

+

1047 for i in range(5): 

+

1048 a += i+1 

+

1049 break 

+

1050 else: 

+

1051 a = 123 

+

1052 assert a == 1 

+

1053 """, 

+

1054 [1,2,3,4,6,7], "6") 

+

1055 

+

1056 def test_split_for(self): 

+

1057 self.check_coverage("""\ 

+

1058 a = 0 

+

1059 for \\ 

+

1060 i in [1,2,3,4,5]: 

+

1061 a += i 

+

1062 assert a == 15 

+

1063 """, 

+

1064 [1,2,4,5], "") 

+

1065 self.check_coverage("""\ 

+

1066 a = 0 

+

1067 for \\ 

+

1068 i in [1, 

+

1069 2,3,4, 

+

1070 5]: 

+

1071 a += i 

+

1072 assert a == 15 

+

1073 """, 

+

1074 [1,2,6,7], "") 

+

1075 

+

1076 def test_try_except(self): 

+

1077 self.check_coverage("""\ 

+

1078 a = 0 

+

1079 try: 

+

1080 a = 1 

+

1081 except: 

+

1082 a = 99 

+

1083 assert a == 1 

+

1084 """, 

+

1085 [1,2,3,4,5,6], "4-5") 

+

1086 self.check_coverage("""\ 

+

1087 a = 0 

+

1088 try: 

+

1089 a = 1 

+

1090 raise Exception("foo") 

+

1091 except: 

+

1092 a = 99 

+

1093 assert a == 99 

+

1094 """, 

+

1095 [1,2,3,4,5,6,7], "") 

+

1096 self.check_coverage("""\ 

+

1097 a = 0 

+

1098 try: 

+

1099 a = 1 

+

1100 raise Exception("foo") 

+

1101 except ImportError: 

+

1102 a = 99 

+

1103 except: 

+

1104 a = 123 

+

1105 assert a == 123 

+

1106 """, 

+

1107 [1,2,3,4,5,6,7,8,9], "6") 

+

1108 self.check_coverage("""\ 

+

1109 a = 0 

+

1110 try: 

+

1111 a = 1 

+

1112 raise IOError("foo") 

+

1113 except ImportError: 

+

1114 a = 99 

+

1115 except IOError: 

+

1116 a = 17 

+

1117 except: 

+

1118 a = 123 

+

1119 assert a == 17 

+

1120 """, 

+

1121 [1,2,3,4,5,6,7,8,9,10,11], "6, 9-10") 

+

1122 self.check_coverage("""\ 

+

1123 a = 0 

+

1124 try: 

+

1125 a = 1 

+

1126 except: 

+

1127 a = 99 

+

1128 else: 

+

1129 a = 123 

+

1130 assert a == 123 

+

1131 """, 

+

1132 [1,2,3,4,5,7,8], "4-5", 

+

1133 arcz=".1 12 23 45 58 37 78 8.", 

+

1134 arcz_missing="45 58", 

+

1135 ) 

+

1136 

+

1137 def test_try_except_stranded_else(self): 

+

1138 if env.PYBEHAVIOR.omit_after_jump: 

+

1139 # The else can't be reached because the try ends with a raise. 

+

1140 lines = [1,2,3,4,5,6,9] 

+

1141 missing = "" 

+

1142 arcz = ".1 12 23 34 45 56 69 9." 

+

1143 arcz_missing = "" 

+

1144 else: 

+

1145 lines = [1,2,3,4,5,6,8,9] 

+

1146 missing = "8" 

+

1147 arcz = ".1 12 23 34 45 56 69 89 9." 

+

1148 arcz_missing = "89" 

+

1149 self.check_coverage("""\ 

+

1150 a = 0 

+

1151 try: 

+

1152 a = 1 

+

1153 raise Exception("foo") 

+

1154 except: 

+

1155 a = 99 

+

1156 else: 

+

1157 a = 123 

+

1158 assert a == 99 

+

1159 """, 

+

1160 lines=lines, 

+

1161 missing=missing, 

+

1162 arcz=arcz, 

+

1163 arcz_missing=arcz_missing, 

+

1164 ) 

+

1165 

+

1166 def test_try_finally(self): 

+

1167 self.check_coverage("""\ 

+

1168 a = 0 

+

1169 try: 

+

1170 a = 1 

+

1171 finally: 

+

1172 a = 99 

+

1173 assert a == 99 

+

1174 """, 

+

1175 [1,2,3,5,6], "") 

+

1176 self.check_coverage("""\ 

+

1177 a = 0; b = 0 

+

1178 try: 

+

1179 a = 1 

+

1180 try: 

+

1181 raise Exception("foo") 

+

1182 finally: 

+

1183 b = 123 

+

1184 except: 

+

1185 a = 99 

+

1186 assert a == 99 and b == 123 

+

1187 """, 

+

1188 [1,2,3,4,5,7,8,9,10], "") 

+

1189 

+

1190 def test_function_def(self): 

+

1191 self.check_coverage("""\ 

+

1192 a = 99 

+

1193 def foo(): 

+

1194 ''' docstring 

+

1195 ''' 

+

1196 return 1 

+

1197 

+

1198 a = foo() 

+

1199 assert a == 1 

+

1200 """, 

+

1201 [1,2,5,7,8], "") 

+

1202 self.check_coverage("""\ 

+

1203 def foo( 

+

1204 a, 

+

1205 b 

+

1206 ): 

+

1207 ''' docstring 

+

1208 ''' 

+

1209 return a+b 

+

1210 

+

1211 x = foo(17, 23) 

+

1212 assert x == 40 

+

1213 """, 

+

1214 [1,7,9,10], "") 

+

1215 self.check_coverage("""\ 

+

1216 def foo( 

+

1217 a = (lambda x: x*2)(10), 

+

1218 b = ( 

+

1219 lambda x: 

+

1220 x+1 

+

1221 )(1) 

+

1222 ): 

+

1223 ''' docstring 

+

1224 ''' 

+

1225 return a+b 

+

1226 

+

1227 x = foo() 

+

1228 assert x == 22 

+

1229 """, 

+

1230 [1,10,12,13], "") 

+

1231 

+

1232 def test_class_def(self): 

+

1233 arcz="-22 2D DE E-2 23 36 6A A-2 -68 8-6 -AB B-A" 

+

1234 self.check_coverage("""\ 

+

1235 # A comment. 

+

1236 class theClass: 

+

1237 ''' the docstring. 

+

1238 Don't be fooled. 

+

1239 ''' 

+

1240 def __init__(self): 

+

1241 ''' Another docstring. ''' 

+

1242 self.a = 1 

+

1243 

+

1244 def foo(self): 

+

1245 return self.a 

+

1246 

+

1247 x = theClass().foo() 

+

1248 assert x == 1 

+

1249 """, 

+

1250 [2, 6, 8, 10, 11, 13, 14], "", 

+

1251 arcz=arcz, 

+

1252 ) 

+

1253 

+

1254 

+

1255class ExcludeTest(CoverageTest): 

+

1256 """Tests of the exclusion feature to mark lines as not covered.""" 

+

1257 

+

1258 def test_default(self): 

+

1259 # A number of forms of pragma comment are accepted. 

+

1260 self.check_coverage("""\ 

+

1261 a = 1 

+

1262 b = 2 # pragma: no cover 

+

1263 c = 3 

+

1264 d = 4 #pragma NOCOVER 

+

1265 e = 5 

+

1266 f = 6#\tpragma:\tno cover 

+

1267 g = 7 

+

1268 """, 

+

1269 [1,3,5,7] 

+

1270 ) 

+

1271 

+

1272 def test_simple(self): 

+

1273 self.check_coverage("""\ 

+

1274 a = 1; b = 2 

+

1275 

+

1276 if len([]): 

+

1277 a = 4 # -cc 

+

1278 """, 

+

1279 [1,3], "", excludes=['-cc']) 

+

1280 

+

1281 def test_two_excludes(self): 

+

1282 self.check_coverage("""\ 

+

1283 a = 1; b = 2 

+

1284 

+

1285 if a == 99: 

+

1286 a = 4 # -cc 

+

1287 b = 5 

+

1288 c = 6 # -xx 

+

1289 assert a == 1 and b == 2 

+

1290 """, 

+

1291 [1,3,5,7], "5", excludes=['-cc', '-xx']) 

+

1292 

+

1293 def test_excluding_if_suite(self): 

+

1294 self.check_coverage("""\ 

+

1295 a = 1; b = 2 

+

1296 

+

1297 if len([]): # not-here 

+

1298 a = 4 

+

1299 b = 5 

+

1300 c = 6 

+

1301 assert a == 1 and b == 2 

+

1302 """, 

+

1303 [1,7], "", excludes=['not-here']) 

+

1304 

+

1305 def test_excluding_if_but_not_else_suite(self): 

+

1306 self.check_coverage("""\ 

+

1307 a = 1; b = 2 

+

1308 

+

1309 if len([]): # not-here 

+

1310 a = 4 

+

1311 b = 5 

+

1312 c = 6 

+

1313 else: 

+

1314 a = 8 

+

1315 b = 9 

+

1316 assert a == 8 and b == 9 

+

1317 """, 

+

1318 [1,8,9,10], "", excludes=['not-here']) 

+

1319 

+

1320 def test_excluding_else_suite(self): 

+

1321 self.check_coverage("""\ 

+

1322 a = 1; b = 2 

+

1323 

+

1324 if 1==1: 

+

1325 a = 4 

+

1326 b = 5 

+

1327 c = 6 

+

1328 else: #pragma: NO COVER 

+

1329 a = 8 

+

1330 b = 9 

+

1331 assert a == 4 and b == 5 and c == 6 

+

1332 """, 

+

1333 [1,3,4,5,6,10], "", excludes=['#pragma: NO COVER']) 

+

1334 self.check_coverage("""\ 

+

1335 a = 1; b = 2 

+

1336 

+

1337 if 1==1: 

+

1338 a = 4 

+

1339 b = 5 

+

1340 c = 6 

+

1341 

+

1342 # Lots of comments to confuse the else handler. 

+

1343 # more. 

+

1344 

+

1345 else: #pragma: NO COVER 

+

1346 

+

1347 # Comments here too. 

+

1348 

+

1349 a = 8 

+

1350 b = 9 

+

1351 assert a == 4 and b == 5 and c == 6 

+

1352 """, 

+

1353 [1,3,4,5,6,17], "", excludes=['#pragma: NO COVER']) 

+

1354 

+

1355 def test_excluding_elif_suites(self): 

+

1356 self.check_coverage("""\ 

+

1357 a = 1; b = 2 

+

1358 

+

1359 if 1==1: 

+

1360 a = 4 

+

1361 b = 5 

+

1362 c = 6 

+

1363 elif 1==0: #pragma: NO COVER 

+

1364 a = 8 

+

1365 b = 9 

+

1366 else: 

+

1367 a = 11 

+

1368 b = 12 

+

1369 assert a == 4 and b == 5 and c == 6 

+

1370 """, 

+

1371 [1,3,4,5,6,11,12,13], "11-12", excludes=['#pragma: NO COVER']) 

+

1372 

+

1373 def test_excluding_oneline_if(self): 

+

1374 self.check_coverage("""\ 

+

1375 def foo(): 

+

1376 a = 2 

+

1377 if len([]): x = 3 # no cover 

+

1378 b = 4 

+

1379 

+

1380 foo() 

+

1381 """, 

+

1382 [1,2,4,6], "", excludes=["no cover"]) 

+

1383 

+

1384 def test_excluding_a_colon_not_a_suite(self): 

+

1385 self.check_coverage("""\ 

+

1386 def foo(): 

+

1387 l = list(range(10)) 

+

1388 a = l[:3] # no cover 

+

1389 b = 4 

+

1390 

+

1391 foo() 

+

1392 """, 

+

1393 [1,2,4,6], "", excludes=["no cover"]) 

+

1394 

+

1395 def test_excluding_for_suite(self): 

+

1396 self.check_coverage("""\ 

+

1397 a = 0 

+

1398 for i in [1,2,3,4,5]: #pragma: NO COVER 

+

1399 a += i 

+

1400 assert a == 15 

+

1401 """, 

+

1402 [1,4], "", excludes=['#pragma: NO COVER']) 

+

1403 self.check_coverage("""\ 

+

1404 a = 0 

+

1405 for i in [1, 

+

1406 2,3,4, 

+

1407 5]: #pragma: NO COVER 

+

1408 a += i 

+

1409 assert a == 15 

+

1410 """, 

+

1411 [1,6], "", excludes=['#pragma: NO COVER']) 

+

1412 self.check_coverage("""\ 

+

1413 a = 0 

+

1414 for i in [1,2,3,4,5 

+

1415 ]: #pragma: NO COVER 

+

1416 a += i 

+

1417 break 

+

1418 a = 99 

+

1419 assert a == 1 

+

1420 """, 

+

1421 [1,7], "", excludes=['#pragma: NO COVER']) 

+

1422 

+

1423 def test_excluding_for_else(self): 

+

1424 self.check_coverage("""\ 

+

1425 a = 0 

+

1426 for i in range(5): 

+

1427 a += i+1 

+

1428 break 

+

1429 else: #pragma: NO COVER 

+

1430 a = 123 

+

1431 assert a == 1 

+

1432 """, 

+

1433 [1,2,3,4,7], "", excludes=['#pragma: NO COVER']) 

+

1434 

+

1435 def test_excluding_while(self): 

+

1436 self.check_coverage("""\ 

+

1437 a = 3; b = 0 

+

1438 while a*b: #pragma: NO COVER 

+

1439 b += 1 

+

1440 break 

+

1441 assert a == 3 and b == 0 

+

1442 """, 

+

1443 [1,5], "", excludes=['#pragma: NO COVER']) 

+

1444 self.check_coverage("""\ 

+

1445 a = 3; b = 0 

+

1446 while ( 

+

1447 a*b 

+

1448 ): #pragma: NO COVER 

+

1449 b += 1 

+

1450 break 

+

1451 assert a == 3 and b == 0 

+

1452 """, 

+

1453 [1,7], "", excludes=['#pragma: NO COVER']) 

+

1454 

+

1455 def test_excluding_while_else(self): 

+

1456 self.check_coverage("""\ 

+

1457 a = 3; b = 0 

+

1458 while a: 

+

1459 b += 1 

+

1460 break 

+

1461 else: #pragma: NO COVER 

+

1462 b = 123 

+

1463 assert a == 3 and b == 1 

+

1464 """, 

+

1465 [1,2,3,4,7], "", excludes=['#pragma: NO COVER']) 

+

1466 

+

1467 def test_excluding_try_except(self): 

+

1468 self.check_coverage("""\ 

+

1469 a = 0 

+

1470 try: 

+

1471 a = 1 

+

1472 except: #pragma: NO COVER 

+

1473 a = 99 

+

1474 assert a == 1 

+

1475 """, 

+

1476 [1,2,3,6], "", excludes=['#pragma: NO COVER']) 

+

1477 self.check_coverage("""\ 

+

1478 a = 0 

+

1479 try: 

+

1480 a = 1 

+

1481 raise Exception("foo") 

+

1482 except: 

+

1483 a = 99 

+

1484 assert a == 99 

+

1485 """, 

+

1486 [1,2,3,4,5,6,7], "", excludes=['#pragma: NO COVER']) 

+

1487 self.check_coverage("""\ 

+

1488 a = 0 

+

1489 try: 

+

1490 a = 1 

+

1491 raise Exception("foo") 

+

1492 except ImportError: #pragma: NO COVER 

+

1493 a = 99 

+

1494 except: 

+

1495 a = 123 

+

1496 assert a == 123 

+

1497 """, 

+

1498 [1,2,3,4,7,8,9], "", excludes=['#pragma: NO COVER']) 

+

1499 self.check_coverage("""\ 

+

1500 a = 0 

+

1501 try: 

+

1502 a = 1 

+

1503 except: #pragma: NO COVER 

+

1504 a = 99 

+

1505 else: 

+

1506 a = 123 

+

1507 assert a == 123 

+

1508 """, 

+

1509 [1,2,3,7,8], "", excludes=['#pragma: NO COVER'], 

+

1510 arcz=".1 12 23 37 45 58 78 8.", 

+

1511 arcz_missing="45 58", 

+

1512 ) 

+

1513 

+

1514 def test_excluding_try_except_stranded_else(self): 

+

1515 if env.PYBEHAVIOR.omit_after_jump: 

+

1516 # The else can't be reached because the try ends with a raise. 

+

1517 arcz = ".1 12 23 34 45 56 69 9." 

+

1518 arcz_missing = "" 

+

1519 else: 

+

1520 arcz = ".1 12 23 34 45 56 69 89 9." 

+

1521 arcz_missing = "89" 

+

1522 self.check_coverage("""\ 

+

1523 a = 0 

+

1524 try: 

+

1525 a = 1 

+

1526 raise Exception("foo") 

+

1527 except: 

+

1528 a = 99 

+

1529 else: #pragma: NO COVER 

+

1530 x = 2 

+

1531 assert a == 99 

+

1532 """, 

+

1533 [1,2,3,4,5,6,9], "", excludes=['#pragma: NO COVER'], 

+

1534 arcz=arcz, 

+

1535 arcz_missing=arcz_missing, 

+

1536 ) 

+

1537 

+

1538 def test_excluding_if_pass(self): 

+

1539 # From a comment on the coverage.py page by Michael McNeil Forbes: 

+

1540 self.check_coverage("""\ 

+

1541 def f(): 

+

1542 if False: # pragma: no cover 

+

1543 pass # This line still reported as missing 

+

1544 if False: # pragma: no cover 

+

1545 x = 1 # Now it is skipped. 

+

1546 

+

1547 f() 

+

1548 """, 

+

1549 [1,7], "", excludes=["no cover"]) 

+

1550 

+

1551 def test_excluding_function(self): 

+

1552 self.check_coverage("""\ 

+

1553 def fn(foo): #pragma: NO COVER 

+

1554 a = 1 

+

1555 b = 2 

+

1556 c = 3 

+

1557 

+

1558 x = 1 

+

1559 assert x == 1 

+

1560 """, 

+

1561 [6,7], "", excludes=['#pragma: NO COVER']) 

+

1562 

+

1563 def test_excluding_method(self): 

+

1564 self.check_coverage("""\ 

+

1565 class Fooey: 

+

1566 def __init__(self): 

+

1567 self.a = 1 

+

1568 

+

1569 def foo(self): #pragma: NO COVER 

+

1570 return self.a 

+

1571 

+

1572 x = Fooey() 

+

1573 assert x.a == 1 

+

1574 """, 

+

1575 [1,2,3,8,9], "", excludes=['#pragma: NO COVER']) 

+

1576 

+

1577 def test_excluding_class(self): 

+

1578 self.check_coverage("""\ 

+

1579 class Fooey: #pragma: NO COVER 

+

1580 def __init__(self): 

+

1581 self.a = 1 

+

1582 

+

1583 def foo(self): 

+

1584 return self.a 

+

1585 

+

1586 x = 1 

+

1587 assert x == 1 

+

1588 """, 

+

1589 [8,9], "", excludes=['#pragma: NO COVER']) 

+

1590 

+

1591 def test_excludes_non_ascii(self): 

+

1592 self.check_coverage("""\ 

+

1593 # coding: utf-8 

+

1594 a = 1; b = 2 

+

1595 

+

1596 if len([]): 

+

1597 a = 5 # ✘cover 

+

1598 """, 

+

1599 [2, 4], "", excludes=['✘cover'] 

+

1600 ) 

+

1601 

+

1602 def test_formfeed(self): 

+

1603 # https://github.com/nedbat/coveragepy/issues/461 

+

1604 self.check_coverage("""\ 

+

1605 x = 1 

+

1606 assert len([]) == 0, ( 

+

1607 "This won't happen %s" % ("hello",) 

+

1608 ) 

+

1609 \f 

+

1610 x = 6 

+

1611 assert len([]) == 0, ( 

+

1612 "This won't happen %s" % ("hello",) 

+

1613 ) 

+

1614 """, 

+

1615 [1, 6], "", excludes=['assert'], 

+

1616 ) 

+

1617 

+

1618 

+

1619class Py24Test(CoverageTest): 

+

1620 """Tests of new syntax in Python 2.4.""" 

+

1621 

+

1622 def test_function_decorators(self): 

+

1623 lines = [1, 2, 3, 4, 6, 8, 10, 12] 

+

1624 if env.PYBEHAVIOR.trace_decorated_def: 

+

1625 lines = sorted(lines + [9]) 

+

1626 self.check_coverage("""\ 

+

1627 def require_int(func): 

+

1628 def wrapper(arg): 

+

1629 assert isinstance(arg, int) 

+

1630 return func(arg) 

+

1631 

+

1632 return wrapper 

+

1633 

+

1634 @require_int 

+

1635 def p1(arg): 

+

1636 return arg*2 

+

1637 

+

1638 assert p1(10) == 20 

+

1639 """, 

+

1640 lines, "") 

+

1641 

+

1642 def test_function_decorators_with_args(self): 

+

1643 lines = [1, 2, 3, 4, 5, 6, 8, 10, 12] 

+

1644 if env.PYBEHAVIOR.trace_decorated_def: 

+

1645 lines = sorted(lines + [9]) 

+

1646 self.check_coverage("""\ 

+

1647 def boost_by(extra): 

+

1648 def decorator(func): 

+

1649 def wrapper(arg): 

+

1650 return extra*func(arg) 

+

1651 return wrapper 

+

1652 return decorator 

+

1653 

+

1654 @boost_by(10) 

+

1655 def boosted(arg): 

+

1656 return arg*2 

+

1657 

+

1658 assert boosted(10) == 200 

+

1659 """, 

+

1660 lines, "") 

+

1661 

+

1662 def test_double_function_decorators(self): 

+

1663 lines = [1, 2, 3, 4, 5, 7, 8, 9, 10, 11, 12, 14, 15, 17, 19, 21, 22, 24, 26] 

+

1664 if env.PYBEHAVIOR.trace_decorated_def: 

+

1665 lines = sorted(lines + [16, 23]) 

+

1666 self.check_coverage("""\ 

+

1667 def require_int(func): 

+

1668 def wrapper(arg): 

+

1669 assert isinstance(arg, int) 

+

1670 return func(arg) 

+

1671 return wrapper 

+

1672 

+

1673 def boost_by(extra): 

+

1674 def decorator(func): 

+

1675 def wrapper(arg): 

+

1676 return extra*func(arg) 

+

1677 return wrapper 

+

1678 return decorator 

+

1679 

+

1680 @require_int 

+

1681 @boost_by(10) 

+

1682 def boosted1(arg): 

+

1683 return arg*2 

+

1684 

+

1685 assert boosted1(10) == 200 

+

1686 

+

1687 @boost_by(10) 

+

1688 @require_int 

+

1689 def boosted2(arg): 

+

1690 return arg*2 

+

1691 

+

1692 assert boosted2(10) == 200 

+

1693 """, 

+

1694 lines, "") 

+

1695 

+

1696 

+

1697class Py25Test(CoverageTest): 

+

1698 """Tests of new syntax in Python 2.5.""" 

+

1699 

+

1700 def test_with_statement(self): 

+

1701 self.check_coverage("""\ 

+

1702 class Managed: 

+

1703 def __enter__(self): 

+

1704 desc = "enter" 

+

1705 

+

1706 def __exit__(self, type, value, tb): 

+

1707 desc = "exit" 

+

1708 

+

1709 m = Managed() 

+

1710 with m: 

+

1711 desc = "block1a" 

+

1712 desc = "block1b" 

+

1713 

+

1714 try: 

+

1715 with m: 

+

1716 desc = "block2" 

+

1717 raise Exception("Boo!") 

+

1718 except: 

+

1719 desc = "caught" 

+

1720 """, 

+

1721 [1,2,3,5,6,8,9,10,11,13,14,15,16,17,18], "") 

+

1722 

+

1723 def test_try_except_finally(self): 

+

1724 self.check_coverage("""\ 

+

1725 a = 0; b = 0 

+

1726 try: 

+

1727 a = 1 

+

1728 except: 

+

1729 a = 99 

+

1730 finally: 

+

1731 b = 2 

+

1732 assert a == 1 and b == 2 

+

1733 """, 

+

1734 [1,2,3,4,5,7,8], "4-5", 

+

1735 arcz=".1 12 23 37 45 57 78 8.", arcz_missing="45 57", 

+

1736 ) 

+

1737 self.check_coverage("""\ 

+

1738 a = 0; b = 0 

+

1739 try: 

+

1740 a = 1 

+

1741 raise Exception("foo") 

+

1742 except: 

+

1743 a = 99 

+

1744 finally: 

+

1745 b = 2 

+

1746 assert a == 99 and b == 2 

+

1747 """, 

+

1748 [1,2,3,4,5,6,8,9], "", 

+

1749 arcz=".1 12 23 34 45 56 68 89 9.", 

+

1750 ) 

+

1751 self.check_coverage("""\ 

+

1752 a = 0; b = 0 

+

1753 try: 

+

1754 a = 1 

+

1755 raise Exception("foo") 

+

1756 except ImportError: 

+

1757 a = 99 

+

1758 except: 

+

1759 a = 123 

+

1760 finally: 

+

1761 b = 2 

+

1762 assert a == 123 and b == 2 

+

1763 """, 

+

1764 [1,2,3,4,5,6,7,8,10,11], "6", 

+

1765 arcz=".1 12 23 34 45 56 57 78 6A 8A AB B.", arcz_missing="56 6A", 

+

1766 ) 

+

1767 self.check_coverage("""\ 

+

1768 a = 0; b = 0 

+

1769 try: 

+

1770 a = 1 

+

1771 raise IOError("foo") 

+

1772 except ImportError: 

+

1773 a = 99 

+

1774 except IOError: 

+

1775 a = 17 

+

1776 except: 

+

1777 a = 123 

+

1778 finally: 

+

1779 b = 2 

+

1780 assert a == 17 and b == 2 

+

1781 """, 

+

1782 [1,2,3,4,5,6,7,8,9,10,12,13], "6, 9-10", 

+

1783 arcz=".1 12 23 34 45 56 6C 57 78 8C 79 9A AC CD D.", 

+

1784 arcz_missing="56 6C 79 9A AC", 

+

1785 ) 

+

1786 self.check_coverage("""\ 

+

1787 a = 0; b = 0 

+

1788 try: 

+

1789 a = 1 

+

1790 except: 

+

1791 a = 99 

+

1792 else: 

+

1793 a = 123 

+

1794 finally: 

+

1795 b = 2 

+

1796 assert a == 123 and b == 2 

+

1797 """, 

+

1798 [1,2,3,4,5,7,9,10], "4-5", 

+

1799 arcz=".1 12 23 37 45 59 79 9A A.", 

+

1800 arcz_missing="45 59", 

+

1801 ) 

+

1802 

+

1803 def test_try_except_finally_stranded_else(self): 

+

1804 if env.PYBEHAVIOR.omit_after_jump: 

+

1805 # The else can't be reached because the try ends with a raise. 

+

1806 lines = [1,2,3,4,5,6,10,11] 

+

1807 missing = "" 

+

1808 arcz = ".1 12 23 34 45 56 6A AB B." 

+

1809 arcz_missing = "" 

+

1810 else: 

+

1811 lines = [1,2,3,4,5,6,8,10,11] 

+

1812 missing = "8" 

+

1813 arcz = ".1 12 23 34 45 56 6A 8A AB B." 

+

1814 arcz_missing = "8A" 

+

1815 self.check_coverage("""\ 

+

1816 a = 0; b = 0 

+

1817 try: 

+

1818 a = 1 

+

1819 raise Exception("foo") 

+

1820 except: 

+

1821 a = 99 

+

1822 else: 

+

1823 a = 123 

+

1824 finally: 

+

1825 b = 2 

+

1826 assert a == 99 and b == 2 

+

1827 """, 

+

1828 lines=lines, 

+

1829 missing=missing, 

+

1830 arcz=arcz, 

+

1831 arcz_missing=arcz_missing, 

+

1832 ) 

+

1833 

+

1834 

+

1835class ModuleTest(CoverageTest): 

+

1836 """Tests for the module-level behavior of the `coverage` module.""" 

+

1837 

+

1838 run_in_temp_dir = False 

+

1839 

+

1840 def test_not_singleton(self): 

+

1841 # You *can* create another coverage object. 

+

1842 coverage.Coverage() 

+

1843 coverage.Coverage() 

+

1844 

+

1845 def test_old_name_and_new_name(self): 

+

1846 assert coverage.coverage is coverage.Coverage 

+

1847 

+

1848 

+

1849class ReportingTest(CoverageTest): 

+

1850 """Tests of some reporting behavior.""" 

+

1851 

+

1852 def test_no_data_to_report_on_annotate(self): 

+

1853 # Reporting with no data produces a nice message and no output 

+

1854 # directory. 

+

1855 with pytest.raises(CoverageException, match="No data to report."): 

+

1856 self.command_line("annotate -d ann") 

+

1857 self.assert_doesnt_exist("ann") 

+

1858 

+

1859 def test_no_data_to_report_on_html(self): 

+

1860 # Reporting with no data produces a nice message and no output 

+

1861 # directory. 

+

1862 with pytest.raises(CoverageException, match="No data to report."): 

+

1863 self.command_line("html -d htmlcov") 

+

1864 self.assert_doesnt_exist("htmlcov") 

+

1865 

+

1866 def test_no_data_to_report_on_xml(self): 

+

1867 # Reporting with no data produces a nice message. 

+

1868 with pytest.raises(CoverageException, match="No data to report."): 

+

1869 self.command_line("xml") 

+

1870 self.assert_doesnt_exist("coverage.xml") 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_data_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_data_py.html new file mode 100644 index 000000000..bc8183d60 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_data_py.html @@ -0,0 +1,883 @@ + + + + + + Coverage for tests/test_data.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Tests for coverage.data""" 

+

5 

+

6import glob 

+

7import os 

+

8import os.path 

+

9import re 

+

10import sqlite3 

+

11import threading 

+

12 

+

13import mock 

+

14import pytest 

+

15 

+

16from coverage.data import CoverageData, combine_parallel_data 

+

17from coverage.data import add_data_to_hash, line_counts 

+

18from coverage.debug import DebugControlString 

+

19from coverage.files import PathAliases, canonical_filename 

+

20from coverage.misc import CoverageException 

+

21 

+

22from tests.coveragetest import CoverageTest 

+

23from tests.helpers import assert_count_equal 

+

24 

+

25 

+

26LINES_1 = { 

+

27 'a.py': {1: None, 2: None}, 

+

28 'b.py': {3: None}, 

+

29} 

+

30SUMMARY_1 = {'a.py': 2, 'b.py': 1} 

+

31MEASURED_FILES_1 = ['a.py', 'b.py'] 

+

32A_PY_LINES_1 = [1, 2] 

+

33B_PY_LINES_1 = [3] 

+

34 

+

35LINES_2 = { 

+

36 'a.py': {1: None, 5: None}, 

+

37 'c.py': {17: None}, 

+

38} 

+

39SUMMARY_1_2 = {'a.py': 3, 'b.py': 1, 'c.py': 1} 

+

40MEASURED_FILES_1_2 = ['a.py', 'b.py', 'c.py'] 

+

41 

+

42ARCS_3 = { 

+

43 'x.py': { 

+

44 (-1, 1): None, 

+

45 (1, 2): None, 

+

46 (2, 3): None, 

+

47 (3, -1): None, 

+

48 }, 

+

49 'y.py': { 

+

50 (-1, 17): None, 

+

51 (17, 23): None, 

+

52 (23, -1): None, 

+

53 }, 

+

54} 

+

55X_PY_ARCS_3 = [(-1, 1), (1, 2), (2, 3), (3, -1)] 

+

56Y_PY_ARCS_3 = [(-1, 17), (17, 23), (23, -1)] 

+

57SUMMARY_3 = {'x.py': 3, 'y.py': 2} 

+

58MEASURED_FILES_3 = ['x.py', 'y.py'] 

+

59X_PY_LINES_3 = [1, 2, 3] 

+

60Y_PY_LINES_3 = [17, 23] 

+

61 

+

62ARCS_4 = { 

+

63 'x.py': { 

+

64 (-1, 2): None, 

+

65 (2, 5): None, 

+

66 (5, -1): None, 

+

67 }, 

+

68 'z.py': { 

+

69 (-1, 1000): None, 

+

70 (1000, -1): None, 

+

71 }, 

+

72} 

+

73SUMMARY_3_4 = {'x.py': 4, 'y.py': 2, 'z.py': 1} 

+

74MEASURED_FILES_3_4 = ['x.py', 'y.py', 'z.py'] 

+

75 

+

76 

+

77class DataTestHelpers(CoverageTest): 

+

78 """Test helpers for data tests.""" 

+

79 

+

80 def assert_line_counts(self, covdata, counts, fullpath=False): 

+

81 """Check that the line_counts of `covdata` is `counts`.""" 

+

82 assert line_counts(covdata, fullpath) == counts 

+

83 

+

84 def assert_measured_files(self, covdata, measured): 

+

85 """Check that `covdata`'s measured files are `measured`.""" 

+

86 assert_count_equal(covdata.measured_files(), measured) 

+

87 

+

88 def assert_lines1_data(self, covdata): 

+

89 """Check that `covdata` has the data from LINES1.""" 

+

90 self.assert_line_counts(covdata, SUMMARY_1) 

+

91 self.assert_measured_files(covdata, MEASURED_FILES_1) 

+

92 assert_count_equal(covdata.lines("a.py"), A_PY_LINES_1) 

+

93 assert not covdata.has_arcs() 

+

94 

+

95 def assert_arcs3_data(self, covdata): 

+

96 """Check that `covdata` has the data from ARCS3.""" 

+

97 self.assert_line_counts(covdata, SUMMARY_3) 

+

98 self.assert_measured_files(covdata, MEASURED_FILES_3) 

+

99 assert_count_equal(covdata.lines("x.py"), X_PY_LINES_3) 

+

100 assert_count_equal(covdata.arcs("x.py"), X_PY_ARCS_3) 

+

101 assert_count_equal(covdata.lines("y.py"), Y_PY_LINES_3) 

+

102 assert_count_equal(covdata.arcs("y.py"), Y_PY_ARCS_3) 

+

103 assert covdata.has_arcs() 

+

104 

+

105 

+

106class CoverageDataTest(DataTestHelpers, CoverageTest): 

+

107 """Test cases for CoverageData.""" 

+

108 

+

109 def test_empty_data_is_false(self): 

+

110 covdata = CoverageData() 

+

111 assert not covdata 

+

112 

+

113 def test_line_data_is_true(self): 

+

114 covdata = CoverageData() 

+

115 covdata.add_lines(LINES_1) 

+

116 assert covdata 

+

117 

+

118 def test_arc_data_is_true(self): 

+

119 covdata = CoverageData() 

+

120 covdata.add_arcs(ARCS_3) 

+

121 assert covdata 

+

122 

+

123 def test_empty_line_data_is_false(self): 

+

124 covdata = CoverageData() 

+

125 covdata.add_lines({}) 

+

126 assert not covdata 

+

127 

+

128 def test_empty_arc_data_is_false(self): 

+

129 covdata = CoverageData() 

+

130 covdata.add_arcs({}) 

+

131 assert not covdata 

+

132 

+

133 def test_adding_lines(self): 

+

134 covdata = CoverageData() 

+

135 covdata.add_lines(LINES_1) 

+

136 self.assert_lines1_data(covdata) 

+

137 

+

138 def test_adding_arcs(self): 

+

139 covdata = CoverageData() 

+

140 covdata.add_arcs(ARCS_3) 

+

141 self.assert_arcs3_data(covdata) 

+

142 

+

143 def test_ok_to_add_lines_twice(self): 

+

144 covdata = CoverageData() 

+

145 covdata.add_lines(LINES_1) 

+

146 covdata.add_lines(LINES_2) 

+

147 self.assert_line_counts(covdata, SUMMARY_1_2) 

+

148 self.assert_measured_files(covdata, MEASURED_FILES_1_2) 

+

149 

+

150 def test_ok_to_add_arcs_twice(self): 

+

151 covdata = CoverageData() 

+

152 covdata.add_arcs(ARCS_3) 

+

153 covdata.add_arcs(ARCS_4) 

+

154 self.assert_line_counts(covdata, SUMMARY_3_4) 

+

155 self.assert_measured_files(covdata, MEASURED_FILES_3_4) 

+

156 

+

157 def test_cant_add_arcs_with_lines(self): 

+

158 covdata = CoverageData() 

+

159 covdata.add_lines(LINES_1) 

+

160 msg = "Can't add branch measurements to existing line data" 

+

161 with pytest.raises(CoverageException, match=msg): 

+

162 covdata.add_arcs(ARCS_3) 

+

163 

+

164 def test_cant_add_lines_with_arcs(self): 

+

165 covdata = CoverageData() 

+

166 covdata.add_arcs(ARCS_3) 

+

167 msg = "Can't add line measurements to existing branch data" 

+

168 with pytest.raises(CoverageException, match=msg): 

+

169 covdata.add_lines(LINES_1) 

+

170 

+

171 def test_touch_file_with_lines(self): 

+

172 covdata = CoverageData() 

+

173 covdata.add_lines(LINES_1) 

+

174 covdata.touch_file('zzz.py') 

+

175 self.assert_measured_files(covdata, MEASURED_FILES_1 + ['zzz.py']) 

+

176 

+

177 def test_touch_file_with_arcs(self): 

+

178 covdata = CoverageData() 

+

179 covdata.add_arcs(ARCS_3) 

+

180 covdata.touch_file('zzz.py') 

+

181 self.assert_measured_files(covdata, MEASURED_FILES_3 + ['zzz.py']) 

+

182 

+

183 def test_set_query_contexts(self): 

+

184 covdata = CoverageData() 

+

185 covdata.set_context('test_a') 

+

186 covdata.add_lines(LINES_1) 

+

187 covdata.set_query_contexts(['test_*']) 

+

188 assert covdata.lines('a.py') == [1, 2] 

+

189 covdata.set_query_contexts(['other*']) 

+

190 assert covdata.lines('a.py') == [] 

+

191 

+

192 def test_no_lines_vs_unmeasured_file(self): 

+

193 covdata = CoverageData() 

+

194 covdata.add_lines(LINES_1) 

+

195 covdata.touch_file('zzz.py') 

+

196 assert covdata.lines('zzz.py') == [] 

+

197 assert covdata.lines('no_such_file.py') is None 

+

198 

+

199 def test_lines_with_contexts(self): 

+

200 covdata = CoverageData() 

+

201 covdata.set_context('test_a') 

+

202 covdata.add_lines(LINES_1) 

+

203 assert covdata.lines('a.py') == [1, 2] 

+

204 covdata.set_query_contexts(['test*']) 

+

205 assert covdata.lines('a.py') == [1, 2] 

+

206 covdata.set_query_contexts(['other*']) 

+

207 assert covdata.lines('a.py') == [] 

+

208 

+

209 def test_contexts_by_lineno_with_lines(self): 

+

210 covdata = CoverageData() 

+

211 covdata.set_context('test_a') 

+

212 covdata.add_lines(LINES_1) 

+

213 assert covdata.contexts_by_lineno('a.py') == {1: ['test_a'], 2: ['test_a']} 

+

214 

+

215 def test_no_duplicate_lines(self): 

+

216 covdata = CoverageData() 

+

217 covdata.set_context("context1") 

+

218 covdata.add_lines(LINES_1) 

+

219 covdata.set_context("context2") 

+

220 covdata.add_lines(LINES_1) 

+

221 assert covdata.lines('a.py') == A_PY_LINES_1 

+

222 

+

223 def test_no_duplicate_arcs(self): 

+

224 covdata = CoverageData() 

+

225 covdata.set_context("context1") 

+

226 covdata.add_arcs(ARCS_3) 

+

227 covdata.set_context("context2") 

+

228 covdata.add_arcs(ARCS_3) 

+

229 assert covdata.arcs('x.py') == X_PY_ARCS_3 

+

230 

+

231 def test_no_arcs_vs_unmeasured_file(self): 

+

232 covdata = CoverageData() 

+

233 covdata.add_arcs(ARCS_3) 

+

234 covdata.touch_file('zzz.py') 

+

235 assert covdata.lines('zzz.py') == [] 

+

236 assert covdata.lines('no_such_file.py') is None 

+

237 assert covdata.arcs('zzz.py') == [] 

+

238 assert covdata.arcs('no_such_file.py') is None 

+

239 

+

240 def test_arcs_with_contexts(self): 

+

241 covdata = CoverageData() 

+

242 covdata.set_context('test_x') 

+

243 covdata.add_arcs(ARCS_3) 

+

244 assert covdata.arcs('x.py') == [(-1, 1), (1, 2), (2, 3), (3, -1)] 

+

245 covdata.set_query_contexts(['test*']) 

+

246 assert covdata.arcs('x.py') == [(-1, 1), (1, 2), (2, 3), (3, -1)] 

+

247 covdata.set_query_contexts(['other*']) 

+

248 assert covdata.arcs('x.py') == [] 

+

249 

+

250 def test_contexts_by_lineno_with_arcs(self): 

+

251 covdata = CoverageData() 

+

252 covdata.set_context('test_x') 

+

253 covdata.add_arcs(ARCS_3) 

+

254 expected = {-1: ['test_x'], 1: ['test_x'], 2: ['test_x'], 3: ['test_x']} 

+

255 assert expected == covdata.contexts_by_lineno('x.py') 

+

256 

+

257 def test_contexts_by_lineno_with_unknown_file(self): 

+

258 covdata = CoverageData() 

+

259 assert covdata.contexts_by_lineno('xyz.py') == {} 

+

260 

+

261 def test_file_tracer_name(self): 

+

262 covdata = CoverageData() 

+

263 covdata.add_lines({ 

+

264 "p1.foo": dict.fromkeys([1, 2, 3]), 

+

265 "p2.html": dict.fromkeys([10, 11, 12]), 

+

266 "main.py": dict.fromkeys([20]), 

+

267 }) 

+

268 covdata.add_file_tracers({"p1.foo": "p1.plugin", "p2.html": "p2.plugin"}) 

+

269 assert covdata.file_tracer("p1.foo") == "p1.plugin" 

+

270 assert covdata.file_tracer("main.py") == "" 

+

271 assert covdata.file_tracer("p3.not_here") is None 

+

272 

+

273 def test_cant_file_tracer_unmeasured_files(self): 

+

274 covdata = CoverageData() 

+

275 msg = "Can't add file tracer data for unmeasured file 'p1.foo'" 

+

276 with pytest.raises(CoverageException, match=msg): 

+

277 covdata.add_file_tracers({"p1.foo": "p1.plugin"}) 

+

278 

+

279 covdata.add_lines({"p2.html": dict.fromkeys([10, 11, 12])}) 

+

280 with pytest.raises(CoverageException, match=msg): 

+

281 covdata.add_file_tracers({"p1.foo": "p1.plugin"}) 

+

282 

+

283 def test_cant_change_file_tracer_name(self): 

+

284 covdata = CoverageData() 

+

285 covdata.add_lines({"p1.foo": dict.fromkeys([1, 2, 3])}) 

+

286 covdata.add_file_tracers({"p1.foo": "p1.plugin"}) 

+

287 

+

288 msg = "Conflicting file tracer name for 'p1.foo': u?'p1.plugin' vs u?'p1.plugin.foo'" 

+

289 with pytest.raises(CoverageException, match=msg): 

+

290 covdata.add_file_tracers({"p1.foo": "p1.plugin.foo"}) 

+

291 

+

292 def test_update_lines(self): 

+

293 covdata1 = CoverageData(suffix='1') 

+

294 covdata1.add_lines(LINES_1) 

+

295 

+

296 covdata2 = CoverageData(suffix='2') 

+

297 covdata2.add_lines(LINES_2) 

+

298 

+

299 covdata3 = CoverageData(suffix='3') 

+

300 covdata3.update(covdata1) 

+

301 covdata3.update(covdata2) 

+

302 

+

303 self.assert_line_counts(covdata3, SUMMARY_1_2) 

+

304 self.assert_measured_files(covdata3, MEASURED_FILES_1_2) 

+

305 

+

306 def test_update_arcs(self): 

+

307 covdata1 = CoverageData(suffix='1') 

+

308 covdata1.add_arcs(ARCS_3) 

+

309 

+

310 covdata2 = CoverageData(suffix='2') 

+

311 covdata2.add_arcs(ARCS_4) 

+

312 

+

313 covdata3 = CoverageData(suffix='3') 

+

314 covdata3.update(covdata1) 

+

315 covdata3.update(covdata2) 

+

316 

+

317 self.assert_line_counts(covdata3, SUMMARY_3_4) 

+

318 self.assert_measured_files(covdata3, MEASURED_FILES_3_4) 

+

319 

+

320 def test_update_cant_mix_lines_and_arcs(self): 

+

321 covdata1 = CoverageData(suffix='1') 

+

322 covdata1.add_lines(LINES_1) 

+

323 

+

324 covdata2 = CoverageData(suffix='2') 

+

325 covdata2.add_arcs(ARCS_3) 

+

326 

+

327 with pytest.raises(CoverageException, match="Can't combine arc data with line data"): 

+

328 covdata1.update(covdata2) 

+

329 

+

330 with pytest.raises(CoverageException, match="Can't combine line data with arc data"): 

+

331 covdata2.update(covdata1) 

+

332 

+

333 def test_update_file_tracers(self): 

+

334 covdata1 = CoverageData(suffix='1') 

+

335 covdata1.add_lines({ 

+

336 "p1.html": dict.fromkeys([1, 2, 3, 4]), 

+

337 "p2.html": dict.fromkeys([5, 6, 7]), 

+

338 "main.py": dict.fromkeys([10, 11, 12]), 

+

339 }) 

+

340 covdata1.add_file_tracers({ 

+

341 "p1.html": "html.plugin", 

+

342 "p2.html": "html.plugin2", 

+

343 }) 

+

344 

+

345 covdata2 = CoverageData(suffix='2') 

+

346 covdata2.add_lines({ 

+

347 "p1.html": dict.fromkeys([3, 4, 5, 6]), 

+

348 "p2.html": dict.fromkeys([7, 8, 9]), 

+

349 "p3.foo": dict.fromkeys([1000, 1001]), 

+

350 "main.py": dict.fromkeys([10, 11, 12]), 

+

351 }) 

+

352 covdata2.add_file_tracers({ 

+

353 "p1.html": "html.plugin", 

+

354 "p2.html": "html.plugin2", 

+

355 "p3.foo": "foo_plugin", 

+

356 }) 

+

357 

+

358 covdata3 = CoverageData(suffix='3') 

+

359 covdata3.update(covdata1) 

+

360 covdata3.update(covdata2) 

+

361 assert covdata3.file_tracer("p1.html") == "html.plugin" 

+

362 assert covdata3.file_tracer("p2.html") == "html.plugin2" 

+

363 assert covdata3.file_tracer("p3.foo") == "foo_plugin" 

+

364 assert covdata3.file_tracer("main.py") == "" 

+

365 

+

366 def test_update_conflicting_file_tracers(self): 

+

367 covdata1 = CoverageData(suffix='1') 

+

368 covdata1.add_lines({"p1.html": dict.fromkeys([1, 2, 3])}) 

+

369 covdata1.add_file_tracers({"p1.html": "html.plugin"}) 

+

370 

+

371 covdata2 = CoverageData(suffix='2') 

+

372 covdata2.add_lines({"p1.html": dict.fromkeys([1, 2, 3])}) 

+

373 covdata2.add_file_tracers({"p1.html": "html.other_plugin"}) 

+

374 

+

375 msg = "Conflicting file tracer name for 'p1.html': u?'html.plugin' vs u?'html.other_plugin'" 

+

376 with pytest.raises(CoverageException, match=msg): 

+

377 covdata1.update(covdata2) 

+

378 

+

379 msg = "Conflicting file tracer name for 'p1.html': u?'html.other_plugin' vs u?'html.plugin'" 

+

380 with pytest.raises(CoverageException, match=msg): 

+

381 covdata2.update(covdata1) 

+

382 

+

383 def test_update_file_tracer_vs_no_file_tracer(self): 

+

384 covdata1 = CoverageData(suffix="1") 

+

385 covdata1.add_lines({"p1.html": dict.fromkeys([1, 2, 3])}) 

+

386 covdata1.add_file_tracers({"p1.html": "html.plugin"}) 

+

387 

+

388 covdata2 = CoverageData(suffix="2") 

+

389 covdata2.add_lines({"p1.html": dict.fromkeys([1, 2, 3])}) 

+

390 

+

391 msg = "Conflicting file tracer name for 'p1.html': u?'html.plugin' vs u?''" 

+

392 with pytest.raises(CoverageException, match=msg): 

+

393 covdata1.update(covdata2) 

+

394 

+

395 msg = "Conflicting file tracer name for 'p1.html': u?'' vs u?'html.plugin'" 

+

396 with pytest.raises(CoverageException, match=msg): 

+

397 covdata2.update(covdata1) 

+

398 

+

399 def test_update_lines_empty(self): 

+

400 covdata1 = CoverageData(suffix='1') 

+

401 covdata1.add_lines(LINES_1) 

+

402 

+

403 covdata2 = CoverageData(suffix='2') 

+

404 covdata1.update(covdata2) 

+

405 self.assert_line_counts(covdata1, SUMMARY_1) 

+

406 

+

407 def test_update_arcs_empty(self): 

+

408 covdata1 = CoverageData(suffix='1') 

+

409 covdata1.add_arcs(ARCS_3) 

+

410 

+

411 covdata2 = CoverageData(suffix='2') 

+

412 covdata1.update(covdata2) 

+

413 self.assert_line_counts(covdata1, SUMMARY_3) 

+

414 

+

415 def test_asking_isnt_measuring(self): 

+

416 # Asking about an unmeasured file shouldn't make it seem measured. 

+

417 covdata = CoverageData() 

+

418 self.assert_measured_files(covdata, []) 

+

419 assert covdata.arcs("missing.py") is None 

+

420 self.assert_measured_files(covdata, []) 

+

421 

+

422 def test_add_to_hash_with_lines(self): 

+

423 covdata = CoverageData() 

+

424 covdata.add_lines(LINES_1) 

+

425 hasher = mock.Mock() 

+

426 add_data_to_hash(covdata, "a.py", hasher) 

+

427 assert hasher.method_calls == [ 

+

428 mock.call.update([1, 2]), # lines 

+

429 mock.call.update(""), # file_tracer name 

+

430 ] 

+

431 

+

432 def test_add_to_hash_with_arcs(self): 

+

433 covdata = CoverageData() 

+

434 covdata.add_arcs(ARCS_3) 

+

435 covdata.add_file_tracers({"y.py": "hologram_plugin"}) 

+

436 hasher = mock.Mock() 

+

437 add_data_to_hash(covdata, "y.py", hasher) 

+

438 assert hasher.method_calls == [ 

+

439 mock.call.update([(-1, 17), (17, 23), (23, -1)]), # arcs 

+

440 mock.call.update("hologram_plugin"), # file_tracer name 

+

441 ] 

+

442 

+

443 def test_add_to_lines_hash_with_missing_file(self): 

+

444 # https://github.com/nedbat/coveragepy/issues/403 

+

445 covdata = CoverageData() 

+

446 covdata.add_lines(LINES_1) 

+

447 hasher = mock.Mock() 

+

448 add_data_to_hash(covdata, "missing.py", hasher) 

+

449 assert hasher.method_calls == [ 

+

450 mock.call.update([]), 

+

451 mock.call.update(None), 

+

452 ] 

+

453 

+

454 def test_add_to_arcs_hash_with_missing_file(self): 

+

455 # https://github.com/nedbat/coveragepy/issues/403 

+

456 covdata = CoverageData() 

+

457 covdata.add_arcs(ARCS_3) 

+

458 covdata.add_file_tracers({"y.py": "hologram_plugin"}) 

+

459 hasher = mock.Mock() 

+

460 add_data_to_hash(covdata, "missing.py", hasher) 

+

461 assert hasher.method_calls == [ 

+

462 mock.call.update([]), 

+

463 mock.call.update(None), 

+

464 ] 

+

465 

+

466 def test_empty_lines_are_still_lines(self): 

+

467 covdata = CoverageData() 

+

468 covdata.add_lines({}) 

+

469 covdata.touch_file("abc.py") 

+

470 assert not covdata.has_arcs() 

+

471 

+

472 def test_empty_arcs_are_still_arcs(self): 

+

473 covdata = CoverageData() 

+

474 covdata.add_arcs({}) 

+

475 covdata.touch_file("abc.py") 

+

476 assert covdata.has_arcs() 

+

477 

+

478 def test_read_and_write_are_opposites(self): 

+

479 covdata1 = CoverageData() 

+

480 covdata1.add_arcs(ARCS_3) 

+

481 covdata1.write() 

+

482 

+

483 covdata2 = CoverageData() 

+

484 covdata2.read() 

+

485 self.assert_arcs3_data(covdata2) 

+

486 

+

487 def test_thread_stress(self): 

+

488 covdata = CoverageData() 

+

489 

+

490 def thread_main(): 

+

491 """Every thread will try to add the same data.""" 

+

492 covdata.add_lines(LINES_1) 

+

493 

+

494 threads = [threading.Thread(target=thread_main) for _ in range(10)] 

+

495 for t in threads: 

+

496 t.start() 

+

497 for t in threads: 

+

498 t.join() 

+

499 

+

500 self.assert_lines1_data(covdata) 

+

501 

+

502 

+

503class CoverageDataInTempDirTest(DataTestHelpers, CoverageTest): 

+

504 """Tests of CoverageData that need a temporary directory to make files.""" 

+

505 

+

506 def test_read_write_lines(self): 

+

507 covdata1 = CoverageData("lines.dat") 

+

508 covdata1.add_lines(LINES_1) 

+

509 covdata1.write() 

+

510 

+

511 covdata2 = CoverageData("lines.dat") 

+

512 covdata2.read() 

+

513 self.assert_lines1_data(covdata2) 

+

514 

+

515 def test_read_write_arcs(self): 

+

516 covdata1 = CoverageData("arcs.dat") 

+

517 covdata1.add_arcs(ARCS_3) 

+

518 covdata1.write() 

+

519 

+

520 covdata2 = CoverageData("arcs.dat") 

+

521 covdata2.read() 

+

522 self.assert_arcs3_data(covdata2) 

+

523 

+

524 def test_read_errors(self): 

+

525 msg = r"Couldn't .* '.*[/\\]{0}': \S+" 

+

526 

+

527 self.make_file("xyzzy.dat", "xyzzy") 

+

528 with pytest.raises(CoverageException, match=msg.format("xyzzy.dat")): 

+

529 covdata = CoverageData("xyzzy.dat") 

+

530 covdata.read() 

+

531 assert not covdata 

+

532 

+

533 self.make_file("empty.dat", "") 

+

534 with pytest.raises(CoverageException, match=msg.format("empty.dat")): 

+

535 covdata = CoverageData("empty.dat") 

+

536 covdata.read() 

+

537 assert not covdata 

+

538 

+

539 def test_read_sql_errors(self): 

+

540 with sqlite3.connect("wrong_schema.db") as con: 

+

541 con.execute("create table coverage_schema (version integer)") 

+

542 con.execute("insert into coverage_schema (version) values (99)") 

+

543 msg = r"Couldn't .* '.*[/\\]{}': wrong schema: 99 instead of \d+".format("wrong_schema.db") 

+

544 with pytest.raises(CoverageException, match=msg): 

+

545 covdata = CoverageData("wrong_schema.db") 

+

546 covdata.read() 

+

547 assert not covdata 

+

548 

+

549 with sqlite3.connect("no_schema.db") as con: 

+

550 con.execute("create table foobar (baz text)") 

+

551 msg = r"Couldn't .* '.*[/\\]{}': \S+".format("no_schema.db") 

+

552 with pytest.raises(CoverageException, match=msg): 

+

553 covdata = CoverageData("no_schema.db") 

+

554 covdata.read() 

+

555 assert not covdata 

+

556 

+

557 

+

558class CoverageDataFilesTest(DataTestHelpers, CoverageTest): 

+

559 """Tests of CoverageData file handling.""" 

+

560 

+

561 def test_reading_missing(self): 

+

562 self.assert_doesnt_exist(".coverage") 

+

563 covdata = CoverageData() 

+

564 covdata.read() 

+

565 self.assert_line_counts(covdata, {}) 

+

566 

+

567 def test_writing_and_reading(self): 

+

568 covdata1 = CoverageData() 

+

569 covdata1.add_lines(LINES_1) 

+

570 covdata1.write() 

+

571 

+

572 covdata2 = CoverageData() 

+

573 covdata2.read() 

+

574 self.assert_line_counts(covdata2, SUMMARY_1) 

+

575 

+

576 def test_debug_output_with_debug_option(self): 

+

577 # With debug option dataio, we get debug output about reading and 

+

578 # writing files. 

+

579 debug = DebugControlString(options=["dataio"]) 

+

580 covdata1 = CoverageData(debug=debug) 

+

581 covdata1.add_lines(LINES_1) 

+

582 covdata1.write() 

+

583 

+

584 covdata2 = CoverageData(debug=debug) 

+

585 covdata2.read() 

+

586 self.assert_line_counts(covdata2, SUMMARY_1) 

+

587 

+

588 assert re.search( 

+

589 r"^Erasing data file '.*\.coverage'\n" 

+

590 r"Creating data file '.*\.coverage'\n" 

+

591 r"Opening data file '.*\.coverage'\n$", 

+

592 debug.get_output() 

+

593 ) 

+

594 

+

595 def test_debug_output_without_debug_option(self): 

+

596 # With a debug object, but not the dataio option, we don't get debug 

+

597 # output. 

+

598 debug = DebugControlString(options=[]) 

+

599 covdata1 = CoverageData(debug=debug) 

+

600 covdata1.add_lines(LINES_1) 

+

601 covdata1.write() 

+

602 

+

603 covdata2 = CoverageData(debug=debug) 

+

604 covdata2.read() 

+

605 self.assert_line_counts(covdata2, SUMMARY_1) 

+

606 

+

607 assert debug.get_output() == "" 

+

608 

+

609 def test_explicit_suffix(self): 

+

610 self.assert_doesnt_exist(".coverage.SUFFIX") 

+

611 covdata = CoverageData(suffix='SUFFIX') 

+

612 covdata.add_lines(LINES_1) 

+

613 covdata.write() 

+

614 self.assert_exists(".coverage.SUFFIX") 

+

615 self.assert_doesnt_exist(".coverage") 

+

616 

+

617 def test_true_suffix(self): 

+

618 self.assert_file_count(".coverage.*", 0) 

+

619 

+

620 # suffix=True will make a randomly named data file. 

+

621 covdata1 = CoverageData(suffix=True) 

+

622 covdata1.add_lines(LINES_1) 

+

623 covdata1.write() 

+

624 self.assert_doesnt_exist(".coverage") 

+

625 data_files1 = glob.glob(".coverage.*") 

+

626 assert len(data_files1) == 1 

+

627 

+

628 # Another suffix=True will choose a different name. 

+

629 covdata2 = CoverageData(suffix=True) 

+

630 covdata2.add_lines(LINES_1) 

+

631 covdata2.write() 

+

632 self.assert_doesnt_exist(".coverage") 

+

633 data_files2 = glob.glob(".coverage.*") 

+

634 assert len(data_files2) == 2 

+

635 

+

636 # In addition to being different, the suffixes have the pid in them. 

+

637 assert all(str(os.getpid()) in fn for fn in data_files2) 

+

638 

+

639 def test_combining(self): 

+

640 self.assert_file_count(".coverage.*", 0) 

+

641 

+

642 covdata1 = CoverageData(suffix='1') 

+

643 covdata1.add_lines(LINES_1) 

+

644 covdata1.write() 

+

645 self.assert_exists(".coverage.1") 

+

646 self.assert_file_count(".coverage.*", 1) 

+

647 

+

648 covdata2 = CoverageData(suffix='2') 

+

649 covdata2.add_lines(LINES_2) 

+

650 covdata2.write() 

+

651 self.assert_exists(".coverage.2") 

+

652 self.assert_file_count(".coverage.*", 2) 

+

653 

+

654 covdata3 = CoverageData() 

+

655 combine_parallel_data(covdata3) 

+

656 self.assert_line_counts(covdata3, SUMMARY_1_2) 

+

657 self.assert_measured_files(covdata3, MEASURED_FILES_1_2) 

+

658 self.assert_file_count(".coverage.*", 0) 

+

659 

+

660 def test_erasing(self): 

+

661 covdata1 = CoverageData() 

+

662 covdata1.add_lines(LINES_1) 

+

663 covdata1.write() 

+

664 

+

665 covdata1.erase() 

+

666 self.assert_line_counts(covdata1, {}) 

+

667 

+

668 covdata2 = CoverageData() 

+

669 covdata2.read() 

+

670 self.assert_line_counts(covdata2, {}) 

+

671 

+

672 def test_erasing_parallel(self): 

+

673 self.make_file("datafile.1") 

+

674 self.make_file("datafile.2") 

+

675 self.make_file(".coverage") 

+

676 data = CoverageData("datafile") 

+

677 data.erase(parallel=True) 

+

678 self.assert_file_count("datafile.*", 0) 

+

679 self.assert_exists(".coverage") 

+

680 

+

681 def test_combining_with_aliases(self): 

+

682 covdata1 = CoverageData(suffix='1') 

+

683 covdata1.add_lines({ 

+

684 '/home/ned/proj/src/a.py': {1: None, 2: None}, 

+

685 '/home/ned/proj/src/sub/b.py': {3: None}, 

+

686 '/home/ned/proj/src/template.html': {10: None}, 

+

687 }) 

+

688 covdata1.add_file_tracers({ 

+

689 '/home/ned/proj/src/template.html': 'html.plugin', 

+

690 }) 

+

691 covdata1.write() 

+

692 

+

693 covdata2 = CoverageData(suffix='2') 

+

694 covdata2.add_lines({ 

+

695 r'c:\ned\test\a.py': {4: None, 5: None}, 

+

696 r'c:\ned\test\sub\b.py': {3: None, 6: None}, 

+

697 }) 

+

698 covdata2.write() 

+

699 

+

700 self.assert_file_count(".coverage.*", 2) 

+

701 

+

702 covdata3 = CoverageData() 

+

703 aliases = PathAliases() 

+

704 aliases.add("/home/ned/proj/src/", "./") 

+

705 aliases.add(r"c:\ned\test", "./") 

+

706 combine_parallel_data(covdata3, aliases=aliases) 

+

707 self.assert_file_count(".coverage.*", 0) 

+

708 # covdata3 hasn't been written yet. Should this file exist or not? 

+

709 #self.assert_exists(".coverage") 

+

710 

+

711 apy = canonical_filename('./a.py') 

+

712 sub_bpy = canonical_filename('./sub/b.py') 

+

713 template_html = canonical_filename('./template.html') 

+

714 

+

715 self.assert_line_counts(covdata3, {apy: 4, sub_bpy: 2, template_html: 1}, fullpath=True) 

+

716 self.assert_measured_files(covdata3, [apy, sub_bpy, template_html]) 

+

717 assert covdata3.file_tracer(template_html) == 'html.plugin' 

+

718 

+

719 def test_combining_from_different_directories(self): 

+

720 os.makedirs('cov1') 

+

721 covdata1 = CoverageData('cov1/.coverage.1') 

+

722 covdata1.add_lines(LINES_1) 

+

723 covdata1.write() 

+

724 

+

725 os.makedirs('cov2') 

+

726 covdata2 = CoverageData('cov2/.coverage.2') 

+

727 covdata2.add_lines(LINES_2) 

+

728 covdata2.write() 

+

729 

+

730 # This data won't be included. 

+

731 covdata_xxx = CoverageData('.coverage.xxx') 

+

732 covdata_xxx.add_arcs(ARCS_3) 

+

733 covdata_xxx.write() 

+

734 

+

735 covdata3 = CoverageData() 

+

736 combine_parallel_data(covdata3, data_paths=['cov1', 'cov2']) 

+

737 

+

738 self.assert_line_counts(covdata3, SUMMARY_1_2) 

+

739 self.assert_measured_files(covdata3, MEASURED_FILES_1_2) 

+

740 self.assert_doesnt_exist("cov1/.coverage.1") 

+

741 self.assert_doesnt_exist("cov2/.coverage.2") 

+

742 self.assert_exists(".coverage.xxx") 

+

743 

+

744 def test_combining_from_files(self): 

+

745 os.makedirs('cov1') 

+

746 covdata1 = CoverageData('cov1/.coverage.1') 

+

747 covdata1.add_lines(LINES_1) 

+

748 covdata1.write() 

+

749 

+

750 os.makedirs('cov2') 

+

751 covdata2 = CoverageData('cov2/.coverage.2') 

+

752 covdata2.add_lines(LINES_2) 

+

753 covdata2.write() 

+

754 

+

755 # This data won't be included. 

+

756 covdata_xxx = CoverageData('.coverage.xxx') 

+

757 covdata_xxx.add_arcs(ARCS_3) 

+

758 covdata_xxx.write() 

+

759 

+

760 covdata_2xxx = CoverageData('cov2/.coverage.xxx') 

+

761 covdata_2xxx.add_arcs(ARCS_3) 

+

762 covdata_2xxx.write() 

+

763 

+

764 covdata3 = CoverageData() 

+

765 combine_parallel_data(covdata3, data_paths=['cov1', 'cov2/.coverage.2']) 

+

766 

+

767 self.assert_line_counts(covdata3, SUMMARY_1_2) 

+

768 self.assert_measured_files(covdata3, MEASURED_FILES_1_2) 

+

769 self.assert_doesnt_exist("cov1/.coverage.1") 

+

770 self.assert_doesnt_exist("cov2/.coverage.2") 

+

771 self.assert_exists(".coverage.xxx") 

+

772 self.assert_exists("cov2/.coverage.xxx") 

+

773 

+

774 def test_combining_from_nonexistent_directories(self): 

+

775 covdata = CoverageData() 

+

776 msg = "Couldn't combine from non-existent path 'xyzzy'" 

+

777 with pytest.raises(CoverageException, match=msg): 

+

778 combine_parallel_data(covdata, data_paths=['xyzzy']) 

+

779 

+

780 def test_interleaved_erasing_bug716(self): 

+

781 # pytest-cov could produce this scenario. #716 

+

782 covdata1 = CoverageData() 

+

783 covdata2 = CoverageData() 

+

784 # this used to create the .coverage database file.. 

+

785 covdata2.set_context("") 

+

786 # then this would erase it all.. 

+

787 covdata1.erase() 

+

788 # then this would try to use tables that no longer exist. 

+

789 # "no such table: meta" 

+

790 covdata2.add_lines(LINES_1) 

+

791 

+

792 

+

793class DumpsLoadsTest(DataTestHelpers, CoverageTest): 

+

794 """Tests of CoverageData.dumps and loads.""" 

+

795 

+

796 run_in_temp_dir = False 

+

797 

+

798 def test_serialization(self): 

+

799 covdata1 = CoverageData(no_disk=True) 

+

800 covdata1.add_lines(LINES_1) 

+

801 covdata1.add_lines(LINES_2) 

+

802 serial = covdata1.dumps() 

+

803 

+

804 covdata2 = CoverageData(no_disk=True) 

+

805 covdata2.loads(serial) 

+

806 self.assert_line_counts(covdata2, SUMMARY_1_2) 

+

807 self.assert_measured_files(covdata2, MEASURED_FILES_1_2) 

+

808 

+

809 def test_misfed_serialization(self): 

+

810 covdata = CoverageData(no_disk=True) 

+

811 bad_data = b'Hello, world!\x07 ' + b'z' * 100 

+

812 msg = r"Unrecognized serialization: {} \(head of {} bytes\)".format( 

+

813 re.escape(repr(bad_data[:40])), 

+

814 len(bad_data), 

+

815 ) 

+

816 with pytest.raises(CoverageException, match=msg): 

+

817 covdata.loads(bad_data) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_debug_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_debug_py.html new file mode 100644 index 000000000..a78b8ac2a --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_debug_py.html @@ -0,0 +1,302 @@ + + + + + + Coverage for tests/test_debug.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Tests of coverage/debug.py""" 

+

5 

+

6import os 

+

7import re 

+

8 

+

9import pytest 

+

10 

+

11import coverage 

+

12from coverage import env 

+

13from coverage.backward import StringIO 

+

14from coverage.debug import filter_text, info_formatter, info_header, short_id, short_stack 

+

15from coverage.debug import clipped_repr 

+

16 

+

17from tests.coveragetest import CoverageTest 

+

18from tests.helpers import re_line, re_lines 

+

19 

+

20 

+

21class InfoFormatterTest(CoverageTest): 

+

22 """Tests of debug.info_formatter.""" 

+

23 

+

24 run_in_temp_dir = False 

+

25 

+

26 def test_info_formatter(self): 

+

27 lines = list(info_formatter([ 

+

28 ('x', 'hello there'), 

+

29 ('very long label', ['one element']), 

+

30 ('regular', ['abc', 'def', 'ghi', 'jkl']), 

+

31 ('nothing', []), 

+

32 ])) 

+

33 expected = [ 

+

34 ' x: hello there', 

+

35 ' very long label: one element', 

+

36 ' regular: abc', 

+

37 ' def', 

+

38 ' ghi', 

+

39 ' jkl', 

+

40 ' nothing: -none-', 

+

41 ] 

+

42 assert expected == lines 

+

43 

+

44 def test_info_formatter_with_generator(self): 

+

45 lines = list(info_formatter(('info%d' % i, i) for i in range(3))) 

+

46 expected = [ 

+

47 ' info0: 0', 

+

48 ' info1: 1', 

+

49 ' info2: 2', 

+

50 ] 

+

51 assert expected == lines 

+

52 

+

53 def test_too_long_label(self): 

+

54 with pytest.raises(AssertionError): 

+

55 list(info_formatter([('this label is way too long and will not fit', 23)])) 

+

56 

+

57 

+

58@pytest.mark.parametrize("label, header", [ 

+

59 ("x", "-- x ---------------------------------------------------------"), 

+

60 ("hello there", "-- hello there -----------------------------------------------"), 

+

61]) 

+

62def test_info_header(label, header): 

+

63 assert info_header(label) == header 

+

64 

+

65 

+

66@pytest.mark.parametrize("id64, id16", [ 

+

67 (0x1234, 0x1234), 

+

68 (0x12340000, 0x1234), 

+

69 (0xA5A55A5A, 0xFFFF), 

+

70 (0x1234cba956780fed, 0x8008), 

+

71]) 

+

72def test_short_id(id64, id16): 

+

73 assert short_id(id64) == id16 

+

74 

+

75 

+

76@pytest.mark.parametrize("text, numchars, result", [ 

+

77 ("hello", 10, "'hello'"), 

+

78 ("0123456789abcdefghijklmnopqrstuvwxyz", 15, "'01234...vwxyz'"), 

+

79]) 

+

80def test_clipped_repr(text, numchars, result): 

+

81 assert clipped_repr(text, numchars) == result 

+

82 

+

83 

+

84@pytest.mark.parametrize("text, filters, result", [ 

+

85 ("hello", [], "hello"), 

+

86 ("hello\n", [], "hello\n"), 

+

87 ("hello\nhello\n", [], "hello\nhello\n"), 

+

88 ("hello\nbye\n", [lambda x: "="+x], "=hello\n=bye\n"), 

+

89 ("hello\nbye\n", [lambda x: "="+x, lambda x: x+"\ndone\n"], "=hello\ndone\n=bye\ndone\n"), 

+

90]) 

+

91def test_filter_text(text, filters, result): 

+

92 assert filter_text(text, filters) == result 

+

93 

+

94 

+

95class DebugTraceTest(CoverageTest): 

+

96 """Tests of debug output.""" 

+

97 

+

98 def f1_debug_output(self, debug): 

+

99 """Runs some code with `debug` option, returns the debug output.""" 

+

100 # Make code to run. 

+

101 self.make_file("f1.py", """\ 

+

102 def f1(x): 

+

103 return x+1 

+

104 

+

105 for i in range(5): 

+

106 f1(i) 

+

107 """) 

+

108 

+

109 debug_out = StringIO() 

+

110 cov = coverage.Coverage(debug=debug) 

+

111 cov._debug_file = debug_out 

+

112 self.start_import_stop(cov, "f1") 

+

113 cov.save() 

+

114 

+

115 out_lines = debug_out.getvalue() 

+

116 return out_lines 

+

117 

+

118 def test_debug_no_trace(self): 

+

119 out_lines = self.f1_debug_output([]) 

+

120 

+

121 # We should have no output at all. 

+

122 assert not out_lines 

+

123 

+

124 def test_debug_trace(self): 

+

125 out_lines = self.f1_debug_output(["trace"]) 

+

126 

+

127 # We should have a line like "Tracing 'f1.py'" 

+

128 assert "Tracing 'f1.py'" in out_lines 

+

129 

+

130 # We should have lines like "Not tracing 'collector.py'..." 

+

131 coverage_lines = re_lines( 

+

132 out_lines, 

+

133 r"^Not tracing .*: is part of coverage.py$" 

+

134 ) 

+

135 assert coverage_lines 

+

136 

+

137 def test_debug_trace_pid(self): 

+

138 out_lines = self.f1_debug_output(["trace", "pid"]) 

+

139 

+

140 # Now our lines are always prefixed with the process id. 

+

141 pid_prefix = r"^%5d\.[0-9a-f]{4}: " % os.getpid() 

+

142 pid_lines = re_lines(out_lines, pid_prefix) 

+

143 assert pid_lines == out_lines 

+

144 

+

145 # We still have some tracing, and some not tracing. 

+

146 assert re_lines(out_lines, pid_prefix + "Tracing ") 

+

147 assert re_lines(out_lines, pid_prefix + "Not tracing ") 

+

148 

+

149 def test_debug_callers(self): 

+

150 out_lines = self.f1_debug_output(["pid", "dataop", "dataio", "callers"]) 

+

151 print(out_lines) 

+

152 # For every real message, there should be a stack trace with a line like 

+

153 # "f1_debug_output : /Users/ned/coverage/tests/test_debug.py @71" 

+

154 real_messages = re_lines(out_lines, r":\d+", match=False).splitlines() 

+

155 frame_pattern = r"\s+f1_debug_output : .*tests[/\\]test_debug.py:\d+$" 

+

156 frames = re_lines(out_lines, frame_pattern).splitlines() 

+

157 assert len(real_messages) == len(frames) 

+

158 

+

159 last_line = out_lines.splitlines()[-1] 

+

160 

+

161 # The details of what to expect on the stack are empirical, and can change 

+

162 # as the code changes. This test is here to ensure that the debug code 

+

163 # continues working. It's ok to adjust these details over time. 

+

164 assert re.search(r"^\s*\d+\.\w{4}: Adding file tracers: 0 files", real_messages[-1]) 

+

165 assert re.search(r"\s+add_file_tracers : .*coverage[/\\]sqldata.py:\d+$", last_line) 

+

166 

+

167 def test_debug_config(self): 

+

168 out_lines = self.f1_debug_output(["config"]) 

+

169 

+

170 labels = """ 

+

171 attempted_config_files branch config_files_read config_file cover_pylib data_file 

+

172 debug exclude_list extra_css html_dir html_title ignore_errors 

+

173 run_include run_omit parallel partial_always_list partial_list paths 

+

174 precision show_missing source timid xml_output 

+

175 report_include report_omit 

+

176 """.split() 

+

177 for label in labels: 

+

178 label_pat = r"^\s*%s: " % label 

+

179 msg = "Incorrect lines for %r" % label 

+

180 assert 1 == len(re_lines(out_lines, label_pat).splitlines()), msg 

+

181 

+

182 def test_debug_sys(self): 

+

183 out_lines = self.f1_debug_output(["sys"]) 

+

184 

+

185 labels = """ 

+

186 version coverage cover_paths pylib_paths tracer configs_attempted config_file 

+

187 configs_read data_file python platform implementation executable 

+

188 pid cwd path environment command_line cover_match pylib_match 

+

189 """.split() 

+

190 for label in labels: 

+

191 label_pat = r"^\s*%s: " % label 

+

192 msg = "Incorrect lines for %r" % label 

+

193 assert 1 == len(re_lines(out_lines, label_pat).splitlines()), msg 

+

194 

+

195 def test_debug_sys_ctracer(self): 

+

196 out_lines = self.f1_debug_output(["sys"]) 

+

197 tracer_line = re_line(out_lines, r"CTracer:").strip() 

+

198 if env.C_TRACER: 

+

199 expected = "CTracer: available" 

+

200 else: 

+

201 expected = "CTracer: unavailable" 

+

202 assert expected == tracer_line 

+

203 

+

204 

+

205def f_one(*args, **kwargs): 

+

206 """First of the chain of functions for testing `short_stack`.""" 

+

207 return f_two(*args, **kwargs) 

+

208 

+

209def f_two(*args, **kwargs): 

+

210 """Second of the chain of functions for testing `short_stack`.""" 

+

211 return f_three(*args, **kwargs) 

+

212 

+

213def f_three(*args, **kwargs): 

+

214 """Third of the chain of functions for testing `short_stack`.""" 

+

215 return short_stack(*args, **kwargs) 

+

216 

+

217 

+

218class ShortStackTest(CoverageTest): 

+

219 """Tests of coverage.debug.short_stack.""" 

+

220 

+

221 run_in_temp_dir = False 

+

222 

+

223 def test_short_stack(self): 

+

224 stack = f_one().splitlines() 

+

225 assert len(stack) > 10 

+

226 assert "f_three" in stack[-1] 

+

227 assert "f_two" in stack[-2] 

+

228 assert "f_one" in stack[-3] 

+

229 

+

230 def test_short_stack_limit(self): 

+

231 stack = f_one(limit=5).splitlines() 

+

232 assert len(stack) == 5 

+

233 

+

234 def test_short_stack_skip(self): 

+

235 stack = f_one(skip=1).splitlines() 

+

236 assert "f_two" in stack[-1] 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_execfile_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_execfile_py.html new file mode 100644 index 000000000..9ab3903f8 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_execfile_py.html @@ -0,0 +1,306 @@ + + + + + + Coverage for tests/test_execfile.py: 98.148% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Tests for coverage.execfile""" 

+

5 

+

6import compileall 

+

7import fnmatch 

+

8import json 

+

9import os 

+

10import os.path 

+

11import re 

+

12import sys 

+

13 

+

14import pytest 

+

15 

+

16from coverage import env 

+

17from coverage.backward import binary_bytes 

+

18from coverage.execfile import run_python_file, run_python_module 

+

19from coverage.files import python_reported_file 

+

20from coverage.misc import NoCode, NoSource 

+

21 

+

22from tests.coveragetest import CoverageTest, TESTS_DIR, UsingModulesMixin 

+

23 

+

24TRY_EXECFILE = os.path.join(TESTS_DIR, "modules/process_test/try_execfile.py") 

+

25 

+

26 

+

27class RunFileTest(CoverageTest): 

+

28 """Test cases for `run_python_file`.""" 

+

29 

+

30 def test_run_python_file(self): 

+

31 run_python_file([TRY_EXECFILE, "arg1", "arg2"]) 

+

32 mod_globs = json.loads(self.stdout()) 

+

33 

+

34 # The file should think it is __main__ 

+

35 assert mod_globs['__name__'] == "__main__" 

+

36 

+

37 # It should seem to come from a file named try_execfile.py 

+

38 dunder_file = os.path.basename(mod_globs['__file__']) 

+

39 assert dunder_file == "try_execfile.py" 

+

40 

+

41 # It should have its correct module data. 

+

42 assert mod_globs['__doc__'].splitlines()[0] == "Test file for run_python_file." 

+

43 assert mod_globs['DATA'] == "xyzzy" 

+

44 assert mod_globs['FN_VAL'] == "my_fn('fooey')" 

+

45 

+

46 # It must be self-importable as __main__. 

+

47 assert mod_globs['__main__.DATA'] == "xyzzy" 

+

48 

+

49 # Argv should have the proper values. 

+

50 assert mod_globs['argv0'] == TRY_EXECFILE 

+

51 assert mod_globs['argv1-n'] == ["arg1", "arg2"] 

+

52 

+

53 # __builtins__ should have the right values, like open(). 

+

54 assert mod_globs['__builtins__.has_open'] is True 

+

55 

+

56 def test_no_extra_file(self): 

+

57 # Make sure that running a file doesn't create an extra compiled file. 

+

58 self.make_file("xxx", """\ 

+

59 desc = "a non-.py file!" 

+

60 """) 

+

61 

+

62 assert os.listdir(".") == ["xxx"] 

+

63 run_python_file(["xxx"]) 

+

64 assert os.listdir(".") == ["xxx"] 

+

65 

+

66 def test_universal_newlines(self): 

+

67 # Make sure we can read any sort of line ending. 

+

68 pylines = """# try newlines|print('Hello, world!')|""".split('|') 

+

69 for nl in ('\n', '\r\n', '\r'): 

+

70 with open('nl.py', 'wb') as fpy: 

+

71 fpy.write(nl.join(pylines).encode('utf-8')) 

+

72 run_python_file(['nl.py']) 

+

73 assert self.stdout() == "Hello, world!\n"*3 

+

74 

+

75 def test_missing_final_newline(self): 

+

76 # Make sure we can deal with a Python file with no final newline. 

+

77 self.make_file("abrupt.py", """\ 

+

78 if 1: 

+

79 a = 1 

+

80 print("a is %r" % a) 

+

81 #""") 

+

82 with open("abrupt.py") as f: 

+

83 abrupt = f.read() 

+

84 assert abrupt[-1] == '#' 

+

85 run_python_file(["abrupt.py"]) 

+

86 assert self.stdout() == "a is 1\n" 

+

87 

+

88 def test_no_such_file(self): 

+

89 path = python_reported_file('xyzzy.py') 

+

90 msg = re.escape("No file to run: '{}'".format(path)) 

+

91 with pytest.raises(NoSource, match=msg): 

+

92 run_python_file(["xyzzy.py"]) 

+

93 

+

94 def test_directory_with_main(self): 

+

95 self.make_file("with_main/__main__.py", """\ 

+

96 print("I am __main__") 

+

97 """) 

+

98 run_python_file(["with_main"]) 

+

99 assert self.stdout() == "I am __main__\n" 

+

100 

+

101 def test_directory_without_main(self): 

+

102 self.make_file("without_main/__init__.py", "") 

+

103 with pytest.raises(NoSource, match="Can't find '__main__' module in 'without_main'"): 

+

104 run_python_file(["without_main"]) 

+

105 

+

106 

+

107class RunPycFileTest(CoverageTest): 

+

108 """Test cases for `run_python_file`.""" 

+

109 

+

110 def make_pyc(self): # pylint: disable=inconsistent-return-statements 

+

111 """Create a .pyc file, and return the path to it.""" 

+

112 if env.JYTHON: 

+

113 pytest.skip("Can't make .pyc files on Jython") 

+

114 

+

115 self.make_file("compiled.py", """\ 

+

116 def doit(): 

+

117 print("I am here!") 

+

118 

+

119 doit() 

+

120 """) 

+

121 compileall.compile_dir(".", quiet=True) 

+

122 os.remove("compiled.py") 

+

123 

+

124 # Find the .pyc file! 

+

125 roots = ["."] 

+

126 prefix = getattr(sys, "pycache_prefix", None) 

+

127 if prefix: 127 ↛ 128line 127 didn't jump to line 128, because the condition on line 127 was never true

+

128 roots.append(prefix) 

+

129 for root in roots: # pragma: part covered 

+

130 for there, _, files in os.walk(root): # pragma: part covered 

+

131 for fname in files: 

+

132 if fnmatch.fnmatch(fname, "compiled*.pyc"): 132 ↛ 131line 132 didn't jump to line 131, because the condition on line 132 was never false

+

133 return os.path.join(there, fname) 

+

134 

+

135 def test_running_pyc(self): 

+

136 pycfile = self.make_pyc() 

+

137 run_python_file([pycfile]) 

+

138 assert self.stdout() == "I am here!\n" 

+

139 

+

140 def test_running_pyo(self): 

+

141 pycfile = self.make_pyc() 

+

142 pyofile = re.sub(r"[.]pyc$", ".pyo", pycfile) 

+

143 assert pycfile != pyofile 

+

144 os.rename(pycfile, pyofile) 

+

145 run_python_file([pyofile]) 

+

146 assert self.stdout() == "I am here!\n" 

+

147 

+

148 def test_running_pyc_from_wrong_python(self): 

+

149 pycfile = self.make_pyc() 

+

150 

+

151 # Jam Python 2.1 magic number into the .pyc file. 

+

152 with open(pycfile, "r+b") as fpyc: 

+

153 fpyc.seek(0) 

+

154 fpyc.write(binary_bytes([0x2a, 0xeb, 0x0d, 0x0a])) 

+

155 

+

156 with pytest.raises(NoCode, match="Bad magic number in .pyc file"): 

+

157 run_python_file([pycfile]) 

+

158 

+

159 # In some environments, the pycfile persists and pollutes another test. 

+

160 os.remove(pycfile) 

+

161 

+

162 def test_no_such_pyc_file(self): 

+

163 path = python_reported_file('xyzzy.pyc') 

+

164 msg = re.escape("No file to run: '{}'".format(path)) 

+

165 with pytest.raises(NoCode, match=msg): 

+

166 run_python_file(["xyzzy.pyc"]) 

+

167 

+

168 def test_running_py_from_binary(self): 

+

169 # Use make_file to get the bookkeeping. Ideally, it would 

+

170 # be able to write binary files. 

+

171 bf = self.make_file("binary") 

+

172 with open(bf, "wb") as f: 

+

173 f.write(b'\x7fELF\x02\x01\x01\x00\x00\x00') 

+

174 

+

175 path = python_reported_file('binary') 

+

176 msg = ( 

+

177 re.escape("Couldn't run '{}' as Python code: ".format(path)) + 

+

178 r"(TypeError|ValueError): " 

+

179 r"(" 

+

180 r"compile\(\) expected string without null bytes" # for py2 

+

181 r"|" 

+

182 r"source code string cannot contain null bytes" # for py3 

+

183 r")" 

+

184 ) 

+

185 with pytest.raises(Exception, match=msg): 

+

186 run_python_file([bf]) 

+

187 

+

188 

+

189class RunModuleTest(UsingModulesMixin, CoverageTest): 

+

190 """Test run_python_module.""" 

+

191 

+

192 run_in_temp_dir = False 

+

193 

+

194 def test_runmod1(self): 

+

195 run_python_module(["runmod1", "hello"]) 

+

196 out, err = self.stdouterr() 

+

197 assert out == "runmod1: passed hello\n" 

+

198 assert err == "" 

+

199 

+

200 def test_runmod2(self): 

+

201 run_python_module(["pkg1.runmod2", "hello"]) 

+

202 out, err = self.stdouterr() 

+

203 assert out == "pkg1.__init__: pkg1\nrunmod2: passed hello\n" 

+

204 assert err == "" 

+

205 

+

206 def test_runmod3(self): 

+

207 run_python_module(["pkg1.sub.runmod3", "hello"]) 

+

208 out, err = self.stdouterr() 

+

209 assert out == "pkg1.__init__: pkg1\nrunmod3: passed hello\n" 

+

210 assert err == "" 

+

211 

+

212 def test_pkg1_main(self): 

+

213 run_python_module(["pkg1", "hello"]) 

+

214 out, err = self.stdouterr() 

+

215 assert out == "pkg1.__init__: pkg1\npkg1.__main__: passed hello\n" 

+

216 assert err == "" 

+

217 

+

218 def test_pkg1_sub_main(self): 

+

219 run_python_module(["pkg1.sub", "hello"]) 

+

220 out, err = self.stdouterr() 

+

221 assert out == "pkg1.__init__: pkg1\npkg1.sub.__main__: passed hello\n" 

+

222 assert err == "" 

+

223 

+

224 def test_pkg1_init(self): 

+

225 run_python_module(["pkg1.__init__", "wut?"]) 

+

226 out, err = self.stdouterr() 

+

227 assert out == "pkg1.__init__: pkg1\npkg1.__init__: __main__\n" 

+

228 assert err == "" 

+

229 

+

230 def test_no_such_module(self): 

+

231 with pytest.raises(NoSource, match="No module named '?i_dont_exist'?"): 

+

232 run_python_module(["i_dont_exist"]) 

+

233 with pytest.raises(NoSource, match="No module named '?i'?"): 

+

234 run_python_module(["i.dont_exist"]) 

+

235 with pytest.raises(NoSource, match="No module named '?i'?"): 

+

236 run_python_module(["i.dont.exist"]) 

+

237 

+

238 def test_no_main(self): 

+

239 with pytest.raises(NoSource): 

+

240 run_python_module(["pkg2", "hi"]) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_filereporter_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_filereporter_py.html new file mode 100644 index 000000000..88aa0b8eb --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_filereporter_py.html @@ -0,0 +1,170 @@ + + + + + + Coverage for tests/test_filereporter.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Tests for FileReporters""" 

+

5 

+

6import os 

+

7 

+

8from coverage.plugin import FileReporter 

+

9from coverage.python import PythonFileReporter 

+

10 

+

11from tests.coveragetest import CoverageTest, UsingModulesMixin 

+

12 

+

13# pylint: disable=import-error 

+

14# Unable to import 'aa' (No module named aa) 

+

15 

+

16 

+

17def native(filename): 

+

18 """Make `filename` into a native form.""" 

+

19 return filename.replace("/", os.sep) 

+

20 

+

21 

+

22class FileReporterTest(UsingModulesMixin, CoverageTest): 

+

23 """Tests for FileReporter classes.""" 

+

24 

+

25 run_in_temp_dir = False 

+

26 

+

27 def test_filenames(self): 

+

28 acu = PythonFileReporter("aa/afile.py") 

+

29 bcu = PythonFileReporter("aa/bb/bfile.py") 

+

30 ccu = PythonFileReporter("aa/bb/cc/cfile.py") 

+

31 assert acu.relative_filename() == "aa/afile.py" 

+

32 assert bcu.relative_filename() == "aa/bb/bfile.py" 

+

33 assert ccu.relative_filename() == "aa/bb/cc/cfile.py" 

+

34 assert acu.source() == "# afile.py\n" 

+

35 assert bcu.source() == "# bfile.py\n" 

+

36 assert ccu.source() == "# cfile.py\n" 

+

37 

+

38 def test_odd_filenames(self): 

+

39 acu = PythonFileReporter("aa/afile.odd.py") 

+

40 bcu = PythonFileReporter("aa/bb/bfile.odd.py") 

+

41 b2cu = PythonFileReporter("aa/bb.odd/bfile.py") 

+

42 assert acu.relative_filename() == "aa/afile.odd.py" 

+

43 assert bcu.relative_filename() == "aa/bb/bfile.odd.py" 

+

44 assert b2cu.relative_filename() == "aa/bb.odd/bfile.py" 

+

45 assert acu.source() == "# afile.odd.py\n" 

+

46 assert bcu.source() == "# bfile.odd.py\n" 

+

47 assert b2cu.source() == "# bfile.py\n" 

+

48 

+

49 def test_modules(self): 

+

50 import aa 

+

51 import aa.bb 

+

52 import aa.bb.cc 

+

53 

+

54 acu = PythonFileReporter(aa) 

+

55 bcu = PythonFileReporter(aa.bb) 

+

56 ccu = PythonFileReporter(aa.bb.cc) 

+

57 assert acu.relative_filename() == native("aa/__init__.py") 

+

58 assert bcu.relative_filename() == native("aa/bb/__init__.py") 

+

59 assert ccu.relative_filename() == native("aa/bb/cc/__init__.py") 

+

60 assert acu.source() == "# aa\n" 

+

61 assert bcu.source() == "# bb\n" 

+

62 assert ccu.source() == "" # yes, empty 

+

63 

+

64 def test_module_files(self): 

+

65 import aa.afile 

+

66 import aa.bb.bfile 

+

67 import aa.bb.cc.cfile 

+

68 

+

69 acu = PythonFileReporter(aa.afile) 

+

70 bcu = PythonFileReporter(aa.bb.bfile) 

+

71 ccu = PythonFileReporter(aa.bb.cc.cfile) 

+

72 assert acu.relative_filename() == native("aa/afile.py") 

+

73 assert bcu.relative_filename() == native("aa/bb/bfile.py") 

+

74 assert ccu.relative_filename() == native("aa/bb/cc/cfile.py") 

+

75 assert acu.source() == "# afile.py\n" 

+

76 assert bcu.source() == "# bfile.py\n" 

+

77 assert ccu.source() == "# cfile.py\n" 

+

78 

+

79 def test_comparison(self): 

+

80 acu = FileReporter("aa/afile.py") 

+

81 acu2 = FileReporter("aa/afile.py") 

+

82 zcu = FileReporter("aa/zfile.py") 

+

83 bcu = FileReporter("aa/bb/bfile.py") 

+

84 assert acu == acu2 and acu <= acu2 and acu >= acu2 # pylint: disable=chained-comparison 

+

85 assert acu < zcu and acu <= zcu and acu != zcu 

+

86 assert zcu > acu and zcu >= acu and zcu != acu 

+

87 assert acu < bcu and acu <= bcu and acu != bcu 

+

88 assert bcu > acu and bcu >= acu and bcu != acu 

+

89 

+

90 def test_egg(self): 

+

91 # Test that we can get files out of eggs, and read their source files. 

+

92 # The egg1 module is installed by an action in igor.py. 

+

93 import egg1 

+

94 import egg1.egg1 

+

95 

+

96 # Verify that we really imported from an egg. If we did, then the 

+

97 # __file__ won't be an actual file, because one of the "directories" 

+

98 # in the path is actually the .egg zip file. 

+

99 self.assert_doesnt_exist(egg1.__file__) 

+

100 

+

101 ecu = PythonFileReporter(egg1) 

+

102 eecu = PythonFileReporter(egg1.egg1) 

+

103 assert ecu.source() == u"" 

+

104 assert u"# My egg file!" in eecu.source().splitlines() 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_files_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_files_py.html new file mode 100644 index 000000000..5758db98d --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_files_py.html @@ -0,0 +1,472 @@ + + + + + + Coverage for tests/test_files.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# coding: utf-8 

+

2# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

3# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

4 

+

5"""Tests for files.py""" 

+

6 

+

7import os 

+

8import os.path 

+

9 

+

10import pytest 

+

11 

+

12from coverage import files 

+

13from coverage.files import ( 

+

14 TreeMatcher, FnmatchMatcher, ModuleMatcher, PathAliases, 

+

15 find_python_files, abs_file, actual_path, flat_rootname, fnmatches_to_regex, 

+

16) 

+

17from coverage.misc import CoverageException 

+

18from coverage import env 

+

19 

+

20from tests.coveragetest import CoverageTest 

+

21 

+

22 

+

23class FilesTest(CoverageTest): 

+

24 """Tests of coverage.files.""" 

+

25 

+

26 def abs_path(self, p): 

+

27 """Return the absolute path for `p`.""" 

+

28 return os.path.join(abs_file(os.getcwd()), os.path.normpath(p)) 

+

29 

+

30 def test_simple(self): 

+

31 self.make_file("hello.py") 

+

32 files.set_relative_directory() 

+

33 assert files.relative_filename(u"hello.py") == u"hello.py" 

+

34 a = self.abs_path("hello.py") 

+

35 assert a != "hello.py" 

+

36 assert files.relative_filename(a) == "hello.py" 

+

37 

+

38 def test_peer_directories(self): 

+

39 self.make_file("sub/proj1/file1.py") 

+

40 self.make_file("sub/proj2/file2.py") 

+

41 a1 = self.abs_path("sub/proj1/file1.py") 

+

42 a2 = self.abs_path("sub/proj2/file2.py") 

+

43 d = os.path.normpath("sub/proj1") 

+

44 os.chdir(d) 

+

45 files.set_relative_directory() 

+

46 assert files.relative_filename(a1) == "file1.py" 

+

47 assert files.relative_filename(a2) == a2 

+

48 

+

49 def test_filepath_contains_absolute_prefix_twice(self): 

+

50 # https://github.com/nedbat/coveragepy/issues/194 

+

51 # Build a path that has two pieces matching the absolute path prefix. 

+

52 # Technically, this test doesn't do that on Windows, but drive 

+

53 # letters make that impractical to achieve. 

+

54 files.set_relative_directory() 

+

55 d = abs_file(os.curdir) 

+

56 trick = os.path.splitdrive(d)[1].lstrip(os.path.sep) 

+

57 rel = os.path.join('sub', trick, 'file1.py') 

+

58 assert files.relative_filename(abs_file(rel)) == rel 

+

59 

+

60 def test_canonical_filename_ensure_cache_hit(self): 

+

61 self.make_file("sub/proj1/file1.py") 

+

62 d = actual_path(self.abs_path("sub/proj1")) 

+

63 os.chdir(d) 

+

64 files.set_relative_directory() 

+

65 canonical_path = files.canonical_filename('sub/proj1/file1.py') 

+

66 assert canonical_path == self.abs_path('file1.py') 

+

67 # After the filename has been converted, it should be in the cache. 

+

68 assert 'sub/proj1/file1.py' in files.CANONICAL_FILENAME_CACHE 

+

69 assert files.canonical_filename('sub/proj1/file1.py') == self.abs_path('file1.py') 

+

70 

+

71 

+

72@pytest.mark.parametrize("original, flat", [ 

+

73 (u"a/b/c.py", u"a_b_c_py"), 

+

74 (u"c:\\foo\\bar.html", u"_foo_bar_html"), 

+

75 (u"Montréal/☺/conf.py", u"Montréal_☺_conf_py"), 

+

76 ( # original: 

+

77 u"c:\\lorem\\ipsum\\quia\\dolor\\sit\\amet\\consectetur\\adipisci\\velit\\sed\\quia\\non" 

+

78 u"\\numquam\\eius\\modi\\tempora\\incidunt\\ut\\labore\\et\\dolore\\magnam\\aliquam" 

+

79 u"\\quaerat\\voluptatem\\ut\\enim\\ad\\minima\\veniam\\quis\\nostrum\\exercitationem" 

+

80 u"\\ullam\\corporis\\suscipit\\laboriosam\\Montréal\\☺\\my_program.py", 

+

81 # flat: 

+

82 u"re_et_dolore_magnam_aliquam_quaerat_voluptatem_ut_enim_ad_minima_veniam_quis_" 

+

83 u"nostrum_exercitationem_ullam_corporis_suscipit_laboriosam_Montréal_☺_my_program_py_" 

+

84 u"97eaca41b860faaa1a21349b1f3009bb061cf0a8" 

+

85 ), 

+

86]) 

+

87def test_flat_rootname(original, flat): 

+

88 assert flat_rootname(original) == flat 

+

89 

+

90 

+

91@pytest.mark.parametrize( 

+

92 "patterns, case_insensitive, partial," 

+

93 "matches," 

+

94 "nomatches", 

+

95[ 

+

96 ( 

+

97 ["abc", "xyz"], False, False, 

+

98 ["abc", "xyz"], 

+

99 ["ABC", "xYz", "abcx", "xabc", "axyz", "xyza"], 

+

100 ), 

+

101 ( 

+

102 ["abc", "xyz"], True, False, 

+

103 ["abc", "xyz", "Abc", "XYZ", "AbC"], 

+

104 ["abcx", "xabc", "axyz", "xyza"], 

+

105 ), 

+

106 ( 

+

107 ["abc/hi.py"], True, False, 

+

108 ["abc/hi.py", "ABC/hi.py", r"ABC\hi.py"], 

+

109 ["abc_hi.py", "abc/hi.pyc"], 

+

110 ), 

+

111 ( 

+

112 [r"abc\hi.py"], True, False, 

+

113 [r"abc\hi.py", r"ABC\hi.py"], 

+

114 ["abc/hi.py", "ABC/hi.py", "abc_hi.py", "abc/hi.pyc"], 

+

115 ), 

+

116 ( 

+

117 ["abc/*/hi.py"], True, False, 

+

118 ["abc/foo/hi.py", "ABC/foo/bar/hi.py", r"ABC\foo/bar/hi.py"], 

+

119 ["abc/hi.py", "abc/hi.pyc"], 

+

120 ), 

+

121 ( 

+

122 ["abc/[a-f]*/hi.py"], True, False, 

+

123 ["abc/foo/hi.py", "ABC/foo/bar/hi.py", r"ABC\foo/bar/hi.py"], 

+

124 ["abc/zoo/hi.py", "abc/hi.py", "abc/hi.pyc"], 

+

125 ), 

+

126 ( 

+

127 ["abc/"], True, True, 

+

128 ["abc/foo/hi.py", "ABC/foo/bar/hi.py", r"ABC\foo/bar/hi.py"], 

+

129 ["abcd/foo.py", "xabc/hi.py"], 

+

130 ), 

+

131]) 

+

132def test_fnmatches_to_regex(patterns, case_insensitive, partial, matches, nomatches): 

+

133 regex = fnmatches_to_regex(patterns, case_insensitive=case_insensitive, partial=partial) 

+

134 for s in matches: 

+

135 assert regex.match(s) 

+

136 for s in nomatches: 

+

137 assert not regex.match(s) 

+

138 

+

139 

+

140class MatcherTest(CoverageTest): 

+

141 """Tests of file matchers.""" 

+

142 

+

143 def setup_test(self): 

+

144 super(MatcherTest, self).setup_test() 

+

145 files.set_relative_directory() 

+

146 

+

147 def assertMatches(self, matcher, filepath, matches): 

+

148 """The `matcher` should agree with `matches` about `filepath`.""" 

+

149 canonical = files.canonical_filename(filepath) 

+

150 msg = "File %s should have matched as %s" % (filepath, matches) 

+

151 assert matches == matcher.match(canonical), msg 

+

152 

+

153 def test_tree_matcher(self): 

+

154 matches_to_try = [ 

+

155 (self.make_file("sub/file1.py"), True), 

+

156 (self.make_file("sub/file2.c"), True), 

+

157 (self.make_file("sub2/file3.h"), False), 

+

158 (self.make_file("sub3/file4.py"), True), 

+

159 (self.make_file("sub3/file5.c"), False), 

+

160 ] 

+

161 trees = [ 

+

162 files.canonical_filename("sub"), 

+

163 files.canonical_filename("sub3/file4.py"), 

+

164 ] 

+

165 tm = TreeMatcher(trees) 

+

166 assert tm.info() == trees 

+

167 for filepath, matches in matches_to_try: 

+

168 self.assertMatches(tm, filepath, matches) 

+

169 

+

170 def test_module_matcher(self): 

+

171 matches_to_try = [ 

+

172 ('test', True), 

+

173 ('trash', False), 

+

174 ('testing', False), 

+

175 ('test.x', True), 

+

176 ('test.x.y.z', True), 

+

177 ('py', False), 

+

178 ('py.t', False), 

+

179 ('py.test', True), 

+

180 ('py.testing', False), 

+

181 ('py.test.buz', True), 

+

182 ('py.test.buz.baz', True), 

+

183 ('__main__', False), 

+

184 ('mymain', True), 

+

185 ('yourmain', False), 

+

186 ] 

+

187 modules = ['test', 'py.test', 'mymain'] 

+

188 mm = ModuleMatcher(modules) 

+

189 assert mm.info() == modules 

+

190 for modulename, matches in matches_to_try: 

+

191 assert mm.match(modulename) == matches, modulename 

+

192 

+

193 def test_fnmatch_matcher(self): 

+

194 matches_to_try = [ 

+

195 (self.make_file("sub/file1.py"), True), 

+

196 (self.make_file("sub/file2.c"), False), 

+

197 (self.make_file("sub2/file3.h"), True), 

+

198 (self.make_file("sub3/file4.py"), True), 

+

199 (self.make_file("sub3/file5.c"), False), 

+

200 ] 

+

201 fnm = FnmatchMatcher(["*.py", "*/sub2/*"]) 

+

202 assert fnm.info() == ["*.py", "*/sub2/*"] 

+

203 for filepath, matches in matches_to_try: 

+

204 self.assertMatches(fnm, filepath, matches) 

+

205 

+

206 def test_fnmatch_matcher_overload(self): 

+

207 fnm = FnmatchMatcher(["*x%03d*.txt" % i for i in range(500)]) 

+

208 self.assertMatches(fnm, "x007foo.txt", True) 

+

209 self.assertMatches(fnm, "x123foo.txt", True) 

+

210 self.assertMatches(fnm, "x798bar.txt", False) 

+

211 

+

212 def test_fnmatch_windows_paths(self): 

+

213 # We should be able to match Windows paths even if we are running on 

+

214 # a non-Windows OS. 

+

215 fnm = FnmatchMatcher(["*/foo.py"]) 

+

216 self.assertMatches(fnm, r"dir\foo.py", True) 

+

217 fnm = FnmatchMatcher([r"*\foo.py"]) 

+

218 self.assertMatches(fnm, r"dir\foo.py", True) 

+

219 

+

220 

+

221class PathAliasesTest(CoverageTest): 

+

222 """Tests for coverage/files.py:PathAliases""" 

+

223 

+

224 run_in_temp_dir = False 

+

225 

+

226 def assert_mapped(self, aliases, inp, out): 

+

227 """Assert that `inp` mapped through `aliases` produces `out`. 

+

228 

+

229 `out` is canonicalized first, since aliases always produce 

+

230 canonicalized paths. 

+

231 

+

232 """ 

+

233 aliases.pprint() 

+

234 print(inp) 

+

235 print(out) 

+

236 assert aliases.map(inp) == files.canonical_filename(out) 

+

237 

+

238 def assert_unchanged(self, aliases, inp): 

+

239 """Assert that `inp` mapped through `aliases` is unchanged.""" 

+

240 assert aliases.map(inp) == inp 

+

241 

+

242 def test_noop(self): 

+

243 aliases = PathAliases() 

+

244 self.assert_unchanged(aliases, '/ned/home/a.py') 

+

245 

+

246 def test_nomatch(self): 

+

247 aliases = PathAliases() 

+

248 aliases.add('/home/*/src', './mysrc') 

+

249 self.assert_unchanged(aliases, '/home/foo/a.py') 

+

250 

+

251 def test_wildcard(self): 

+

252 aliases = PathAliases() 

+

253 aliases.add('/ned/home/*/src', './mysrc') 

+

254 self.assert_mapped(aliases, '/ned/home/foo/src/a.py', './mysrc/a.py') 

+

255 

+

256 aliases = PathAliases() 

+

257 aliases.add('/ned/home/*/src/', './mysrc') 

+

258 self.assert_mapped(aliases, '/ned/home/foo/src/a.py', './mysrc/a.py') 

+

259 

+

260 def test_no_accidental_match(self): 

+

261 aliases = PathAliases() 

+

262 aliases.add('/home/*/src', './mysrc') 

+

263 self.assert_unchanged(aliases, '/home/foo/srcetc') 

+

264 

+

265 def test_multiple_patterns(self): 

+

266 aliases = PathAliases() 

+

267 aliases.add('/home/*/src', './mysrc') 

+

268 aliases.add('/lib/*/libsrc', './mylib') 

+

269 self.assert_mapped(aliases, '/home/foo/src/a.py', './mysrc/a.py') 

+

270 self.assert_mapped(aliases, '/lib/foo/libsrc/a.py', './mylib/a.py') 

+

271 

+

272 def test_cant_have_wildcard_at_end(self): 

+

273 aliases = PathAliases() 

+

274 msg = "Pattern must not end with wildcards." 

+

275 with pytest.raises(CoverageException, match=msg): 

+

276 aliases.add("/ned/home/*", "fooey") 

+

277 with pytest.raises(CoverageException, match=msg): 

+

278 aliases.add("/ned/home/*/", "fooey") 

+

279 with pytest.raises(CoverageException, match=msg): 

+

280 aliases.add("/ned/home/*/*/", "fooey") 

+

281 

+

282 def test_no_accidental_munging(self): 

+

283 aliases = PathAliases() 

+

284 aliases.add(r'c:\Zoo\boo', 'src/') 

+

285 aliases.add('/home/ned$', 'src/') 

+

286 self.assert_mapped(aliases, r'c:\Zoo\boo\foo.py', 'src/foo.py') 

+

287 self.assert_mapped(aliases, r'/home/ned$/foo.py', 'src/foo.py') 

+

288 

+

289 def test_paths_are_os_corrected(self): 

+

290 aliases = PathAliases() 

+

291 aliases.add('/home/ned/*/src', './mysrc') 

+

292 aliases.add(r'c:\ned\src', './mysrc') 

+

293 self.assert_mapped(aliases, r'C:\Ned\src\sub\a.py', './mysrc/sub/a.py') 

+

294 

+

295 aliases = PathAliases() 

+

296 aliases.add('/home/ned/*/src', r'.\mysrc') 

+

297 aliases.add(r'c:\ned\src', r'.\mysrc') 

+

298 self.assert_mapped(aliases, r'/home/ned/foo/src/sub/a.py', r'.\mysrc\sub\a.py') 

+

299 

+

300 def test_windows_on_linux(self): 

+

301 # https://github.com/nedbat/coveragepy/issues/618 

+

302 lin = "*/project/module/" 

+

303 win = "*\\project\\module\\" 

+

304 

+

305 # Try the paths in both orders. 

+

306 for paths in [[lin, win], [win, lin]]: 

+

307 aliases = PathAliases() 

+

308 for path in paths: 

+

309 aliases.add(path, "project/module") 

+

310 self.assert_mapped( 

+

311 aliases, 

+

312 "C:\\a\\path\\somewhere\\coveragepy_test\\project\\module\\tests\\file.py", 

+

313 "project/module/tests/file.py" 

+

314 ) 

+

315 

+

316 def test_linux_on_windows(self): 

+

317 # https://github.com/nedbat/coveragepy/issues/618 

+

318 lin = "*/project/module/" 

+

319 win = "*\\project\\module\\" 

+

320 

+

321 # Try the paths in both orders. 

+

322 for paths in [[lin, win], [win, lin]]: 

+

323 aliases = PathAliases() 

+

324 for path in paths: 

+

325 aliases.add(path, "project\\module") 

+

326 self.assert_mapped( 

+

327 aliases, 

+

328 "C:/a/path/somewhere/coveragepy_test/project/module/tests/file.py", 

+

329 "project\\module\\tests\\file.py" 

+

330 ) 

+

331 

+

332 def test_multiple_wildcard(self): 

+

333 aliases = PathAliases() 

+

334 aliases.add('/home/jenkins/*/a/*/b/*/django', './django') 

+

335 self.assert_mapped( 

+

336 aliases, 

+

337 '/home/jenkins/xx/a/yy/b/zz/django/foo/bar.py', 

+

338 './django/foo/bar.py' 

+

339 ) 

+

340 

+

341 def test_windows_root_paths(self): 

+

342 aliases = PathAliases() 

+

343 aliases.add('X:\\', '/tmp/src') 

+

344 self.assert_mapped( 

+

345 aliases, 

+

346 "X:\\a\\file.py", 

+

347 "/tmp/src/a/file.py" 

+

348 ) 

+

349 self.assert_mapped( 

+

350 aliases, 

+

351 "X:\\file.py", 

+

352 "/tmp/src/file.py" 

+

353 ) 

+

354 

+

355 def test_leading_wildcard(self): 

+

356 aliases = PathAliases() 

+

357 aliases.add('*/d1', './mysrc1') 

+

358 aliases.add('*/d2', './mysrc2') 

+

359 self.assert_mapped(aliases, '/foo/bar/d1/x.py', './mysrc1/x.py') 

+

360 self.assert_mapped(aliases, '/foo/bar/d2/y.py', './mysrc2/y.py') 

+

361 

+

362 def test_dot(self): 

+

363 cases = ['.', '..', '../other'] 

+

364 if not env.WINDOWS: 

+

365 # The root test case was added for the manylinux Docker images, 

+

366 # and I'm not sure how it should work on Windows, so skip it. 

+

367 cases += ['/'] 

+

368 for d in cases: 

+

369 aliases = PathAliases() 

+

370 aliases.add(d, '/the/source') 

+

371 the_file = os.path.join(d, 'a.py') 

+

372 the_file = os.path.expanduser(the_file) 

+

373 the_file = os.path.abspath(os.path.realpath(the_file)) 

+

374 

+

375 assert '~' not in the_file # to be sure the test is pure. 

+

376 self.assert_mapped(aliases, the_file, '/the/source/a.py') 

+

377 

+

378 

+

379class FindPythonFilesTest(CoverageTest): 

+

380 """Tests of `find_python_files`.""" 

+

381 

+

382 def test_find_python_files(self): 

+

383 self.make_file("sub/a.py") 

+

384 self.make_file("sub/b.py") 

+

385 self.make_file("sub/x.c") # nope: not .py 

+

386 self.make_file("sub/ssub/__init__.py") 

+

387 self.make_file("sub/ssub/s.py") 

+

388 self.make_file("sub/ssub/~s.py") # nope: editor effluvia 

+

389 self.make_file("sub/lab/exp.py") # nope: no __init__.py 

+

390 self.make_file("sub/windows.pyw") 

+

391 py_files = set(find_python_files("sub")) 

+

392 self.assert_same_files(py_files, [ 

+

393 "sub/a.py", "sub/b.py", 

+

394 "sub/ssub/__init__.py", "sub/ssub/s.py", 

+

395 "sub/windows.pyw", 

+

396 ]) 

+

397 

+

398 

+

399@pytest.mark.skipif(not env.WINDOWS, reason="Only need to run Windows tests on Windows.") 

+

400class WindowsFileTest(CoverageTest): 

+

401 """Windows-specific tests of file name handling.""" 

+

402 

+

403 run_in_temp_dir = False 

+

404 

+

405 def test_actual_path(self): 

+

406 assert actual_path(r'c:\Windows') == actual_path(r'C:\wINDOWS') 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_html_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_html_py.html new file mode 100644 index 000000000..7cc949285 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_html_py.html @@ -0,0 +1,1235 @@ + + + + + + Coverage for tests/test_html.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# -*- coding: utf-8 -*- 

+

2# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

3# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

4 

+

5"""Tests that HTML generation is awesome.""" 

+

6 

+

7import datetime 

+

8import glob 

+

9import json 

+

10import os 

+

11import os.path 

+

12import re 

+

13import sys 

+

14 

+

15import mock 

+

16import pytest 

+

17 

+

18import coverage 

+

19from coverage.backward import unicode_class 

+

20from coverage import env 

+

21from coverage.files import abs_file, flat_rootname 

+

22import coverage.html 

+

23from coverage.misc import CoverageException, NotPython, NoSource 

+

24from coverage.report import get_analysis_to_report 

+

25 

+

26from tests.coveragetest import CoverageTest, TESTS_DIR 

+

27from tests.goldtest import gold_path 

+

28from tests.goldtest import compare, contains, doesnt_contain, contains_any 

+

29from tests.helpers import change_dir 

+

30 

+

31 

+

32class HtmlTestHelpers(CoverageTest): 

+

33 """Methods that help with HTML tests.""" 

+

34 

+

35 def create_initial_files(self): 

+

36 """Create the source files we need to run these tests.""" 

+

37 self.make_file("main_file.py", """\ 

+

38 import helper1, helper2 

+

39 helper1.func1(12) 

+

40 helper2.func2(12) 

+

41 """) 

+

42 self.make_file("helper1.py", """\ 

+

43 def func1(x): 

+

44 if x % 2: 

+

45 print("odd") 

+

46 """) 

+

47 self.make_file("helper2.py", """\ 

+

48 def func2(x): 

+

49 print("x is %d" % x) 

+

50 """) 

+

51 

+

52 def run_coverage(self, covargs=None, htmlargs=None): 

+

53 """Run coverage.py on main_file.py, and create an HTML report.""" 

+

54 self.clean_local_file_imports() 

+

55 cov = coverage.Coverage(**(covargs or {})) 

+

56 self.start_import_stop(cov, "main_file") 

+

57 return cov.html_report(**(htmlargs or {})) 

+

58 

+

59 def get_html_report_content(self, module): 

+

60 """Return the content of the HTML report for `module`.""" 

+

61 filename = module.replace(".", "_").replace("/", "_") + ".html" 

+

62 filename = os.path.join("htmlcov", filename) 

+

63 with open(filename) as f: 

+

64 return f.read() 

+

65 

+

66 def get_html_index_content(self): 

+

67 """Return the content of index.html. 

+

68 

+

69 Timestamps are replaced with a placeholder so that clocks don't matter. 

+

70 

+

71 """ 

+

72 with open("htmlcov/index.html") as f: 

+

73 index = f.read() 

+

74 index = re.sub( 

+

75 r"created at \d{4}-\d{2}-\d{2} \d{2}:\d{2} \+\d{4}", 

+

76 r"created at YYYY-MM-DD HH:MM +ZZZZ", 

+

77 index, 

+

78 ) 

+

79 index = re.sub( 

+

80 r"created at \d{4}-\d{2}-\d{2} \d{2}:\d{2}", 

+

81 r"created at YYYY-MM-DD HH:MM", 

+

82 index, 

+

83 ) 

+

84 return index 

+

85 

+

86 def assert_correct_timestamp(self, html): 

+

87 """Extract the timestamp from `html`, and assert it is recent.""" 

+

88 timestamp_pat = r"created at (\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2})" 

+

89 m = re.search(timestamp_pat, html) 

+

90 assert m, "Didn't find a timestamp!" 

+

91 timestamp = datetime.datetime(*map(int, m.groups())) 

+

92 # The timestamp only records the minute, so the delta could be from 

+

93 # 12:00 to 12:01:59, or two minutes. 

+

94 self.assert_recent_datetime( 

+

95 timestamp, 

+

96 seconds=120, 

+

97 msg="Timestamp is wrong: {}".format(timestamp), 

+

98 ) 

+

99 

+

100 

+

101class FileWriteTracker(object): 

+

102 """A fake object to track how `open` is used to write files.""" 

+

103 def __init__(self, written): 

+

104 self.written = written 

+

105 

+

106 def open(self, filename, mode="r"): 

+

107 """Be just like `open`, but write written file names to `self.written`.""" 

+

108 if mode.startswith("w"): 

+

109 self.written.add(filename.replace('\\', '/')) 

+

110 return open(filename, mode) 

+

111 

+

112 

+

113class HtmlDeltaTest(HtmlTestHelpers, CoverageTest): 

+

114 """Tests of the HTML delta speed-ups.""" 

+

115 

+

116 def setup_test(self): 

+

117 super(HtmlDeltaTest, self).setup_test() 

+

118 

+

119 # At least one of our tests monkey-patches the version of coverage.py, 

+

120 # so grab it here to restore it later. 

+

121 self.real_coverage_version = coverage.__version__ 

+

122 self.addCleanup(setattr, coverage, "__version__", self.real_coverage_version) 

+

123 

+

124 self.files_written = None 

+

125 

+

126 def run_coverage(self, covargs=None, htmlargs=None): 

+

127 """Run coverage in-process for the delta tests. 

+

128 

+

129 For the delta tests, we always want `source=.` and we want to track 

+

130 which files are written. `self.files_written` will be the file names 

+

131 that were opened for writing in html.py. 

+

132 

+

133 """ 

+

134 covargs = covargs or {} 

+

135 covargs['source'] = "." 

+

136 self.files_written = set() 

+

137 mock_open = FileWriteTracker(self.files_written).open 

+

138 with mock.patch("coverage.html.open", mock_open): 

+

139 return super(HtmlDeltaTest, self).run_coverage(covargs=covargs, htmlargs=htmlargs) 

+

140 

+

141 def assert_htmlcov_files_exist(self): 

+

142 """Assert that all the expected htmlcov files exist.""" 

+

143 self.assert_exists("htmlcov/index.html") 

+

144 self.assert_exists("htmlcov/main_file_py.html") 

+

145 self.assert_exists("htmlcov/helper1_py.html") 

+

146 self.assert_exists("htmlcov/helper2_py.html") 

+

147 self.assert_exists("htmlcov/style.css") 

+

148 self.assert_exists("htmlcov/coverage_html.js") 

+

149 

+

150 def test_html_created(self): 

+

151 # Test basic HTML generation: files should be created. 

+

152 self.create_initial_files() 

+

153 self.run_coverage() 

+

154 self.assert_htmlcov_files_exist() 

+

155 

+

156 def test_html_delta_from_source_change(self): 

+

157 # HTML generation can create only the files that have changed. 

+

158 # In this case, helper1 changes because its source is different. 

+

159 self.create_initial_files() 

+

160 self.run_coverage() 

+

161 index1 = self.get_html_index_content() 

+

162 

+

163 # Now change a file (but only in a comment) and do it again. 

+

164 self.make_file("helper1.py", """\ 

+

165 def func1(x): # A nice function 

+

166 if x % 2: 

+

167 print("odd") 

+

168 """) 

+

169 

+

170 self.run_coverage() 

+

171 

+

172 # Only the changed files should have been created. 

+

173 self.assert_htmlcov_files_exist() 

+

174 assert "htmlcov/index.html" in self.files_written 

+

175 assert "htmlcov/helper1_py.html" in self.files_written 

+

176 assert "htmlcov/helper2_py.html" not in self.files_written 

+

177 assert "htmlcov/main_file_py.html" not in self.files_written 

+

178 

+

179 # Because the source change was only a comment, the index is the same. 

+

180 index2 = self.get_html_index_content() 

+

181 assert index1 == index2 

+

182 

+

183 def test_html_delta_from_coverage_change(self): 

+

184 # HTML generation can create only the files that have changed. 

+

185 # In this case, helper1 changes because its coverage is different. 

+

186 self.create_initial_files() 

+

187 self.run_coverage() 

+

188 

+

189 # Now change a file and do it again. main_file is different, and calls 

+

190 # helper1 differently. 

+

191 self.make_file("main_file.py", """\ 

+

192 import helper1, helper2 

+

193 helper1.func1(23) 

+

194 helper2.func2(23) 

+

195 """) 

+

196 

+

197 self.run_coverage() 

+

198 

+

199 # Only the changed files should have been created. 

+

200 self.assert_htmlcov_files_exist() 

+

201 assert "htmlcov/index.html" in self.files_written 

+

202 assert "htmlcov/helper1_py.html" in self.files_written 

+

203 assert "htmlcov/helper2_py.html" not in self.files_written 

+

204 assert "htmlcov/main_file_py.html" in self.files_written 

+

205 

+

206 def test_html_delta_from_settings_change(self): 

+

207 # HTML generation can create only the files that have changed. 

+

208 # In this case, everything changes because the coverage.py settings 

+

209 # have changed. 

+

210 self.create_initial_files() 

+

211 self.run_coverage(covargs=dict(omit=[])) 

+

212 index1 = self.get_html_index_content() 

+

213 

+

214 self.run_coverage(covargs=dict(omit=['xyzzy*'])) 

+

215 

+

216 # All the files have been reported again. 

+

217 self.assert_htmlcov_files_exist() 

+

218 assert "htmlcov/index.html" in self.files_written 

+

219 assert "htmlcov/helper1_py.html" in self.files_written 

+

220 assert "htmlcov/helper2_py.html" in self.files_written 

+

221 assert "htmlcov/main_file_py.html" in self.files_written 

+

222 

+

223 index2 = self.get_html_index_content() 

+

224 assert index1 == index2 

+

225 

+

226 def test_html_delta_from_coverage_version_change(self): 

+

227 # HTML generation can create only the files that have changed. 

+

228 # In this case, everything changes because the coverage.py version has 

+

229 # changed. 

+

230 self.create_initial_files() 

+

231 self.run_coverage() 

+

232 index1 = self.get_html_index_content() 

+

233 

+

234 # "Upgrade" coverage.py! 

+

235 coverage.__version__ = "XYZZY" 

+

236 

+

237 self.run_coverage() 

+

238 

+

239 # All the files have been reported again. 

+

240 self.assert_htmlcov_files_exist() 

+

241 assert "htmlcov/index.html" in self.files_written 

+

242 assert "htmlcov/helper1_py.html" in self.files_written 

+

243 assert "htmlcov/helper2_py.html" in self.files_written 

+

244 assert "htmlcov/main_file_py.html" in self.files_written 

+

245 

+

246 index2 = self.get_html_index_content() 

+

247 fixed_index2 = index2.replace("XYZZY", self.real_coverage_version) 

+

248 assert index1 == fixed_index2 

+

249 

+

250 def test_file_becomes_100(self): 

+

251 self.create_initial_files() 

+

252 self.run_coverage() 

+

253 

+

254 # Now change a file and do it again 

+

255 self.make_file("main_file.py", """\ 

+

256 import helper1, helper2 

+

257 # helper1 is now 100% 

+

258 helper1.func1(12) 

+

259 helper1.func1(23) 

+

260 """) 

+

261 

+

262 self.run_coverage(htmlargs=dict(skip_covered=True)) 

+

263 

+

264 # The 100% file, skipped, shouldn't be here. 

+

265 self.assert_doesnt_exist("htmlcov/helper1_py.html") 

+

266 

+

267 def test_status_format_change(self): 

+

268 self.create_initial_files() 

+

269 self.run_coverage() 

+

270 

+

271 with open("htmlcov/status.json") as status_json: 

+

272 status_data = json.load(status_json) 

+

273 

+

274 assert status_data['format'] == 2 

+

275 status_data['format'] = 99 

+

276 with open("htmlcov/status.json", "w") as status_json: 

+

277 json.dump(status_data, status_json) 

+

278 

+

279 self.run_coverage() 

+

280 

+

281 # All the files have been reported again. 

+

282 self.assert_htmlcov_files_exist() 

+

283 assert "htmlcov/index.html" in self.files_written 

+

284 assert "htmlcov/helper1_py.html" in self.files_written 

+

285 assert "htmlcov/helper2_py.html" in self.files_written 

+

286 assert "htmlcov/main_file_py.html" in self.files_written 

+

287 

+

288 

+

289class HtmlTitleTest(HtmlTestHelpers, CoverageTest): 

+

290 """Tests of the HTML title support.""" 

+

291 

+

292 def test_default_title(self): 

+

293 self.create_initial_files() 

+

294 self.run_coverage() 

+

295 index = self.get_html_index_content() 

+

296 assert "<title>Coverage report</title>" in index 

+

297 assert "<h1>Coverage report:" in index 

+

298 

+

299 def test_title_set_in_config_file(self): 

+

300 self.create_initial_files() 

+

301 self.make_file(".coveragerc", "[html]\ntitle = Metrics & stuff!\n") 

+

302 self.run_coverage() 

+

303 index = self.get_html_index_content() 

+

304 assert "<title>Metrics &amp; stuff!</title>" in index 

+

305 assert "<h1>Metrics &amp; stuff!:" in index 

+

306 

+

307 def test_non_ascii_title_set_in_config_file(self): 

+

308 self.create_initial_files() 

+

309 self.make_file(".coveragerc", "[html]\ntitle = «ταБЬℓσ» numbers") 

+

310 self.run_coverage() 

+

311 index = self.get_html_index_content() 

+

312 assert "<title>&#171;&#964;&#945;&#1041;&#1068;&#8467;&#963;&#187; numbers" in index 

+

313 assert "<h1>&#171;&#964;&#945;&#1041;&#1068;&#8467;&#963;&#187; numbers" in index 

+

314 

+

315 def test_title_set_in_args(self): 

+

316 self.create_initial_files() 

+

317 self.make_file(".coveragerc", "[html]\ntitle = Good title\n") 

+

318 self.run_coverage(htmlargs=dict(title="«ταБЬℓσ» & stüff!")) 

+

319 index = self.get_html_index_content() 

+

320 expected = ( 

+

321 "<title>&#171;&#964;&#945;&#1041;&#1068;&#8467;&#963;&#187; " + 

+

322 "&amp; st&#252;ff!</title>" 

+

323 ) 

+

324 assert expected in index 

+

325 assert "<h1>&#171;&#964;&#945;&#1041;&#1068;&#8467;&#963;&#187; &amp; st&#252;ff!:" in index 

+

326 

+

327 

+

328class HtmlWithUnparsableFilesTest(HtmlTestHelpers, CoverageTest): 

+

329 """Test the behavior when measuring unparsable files.""" 

+

330 

+

331 def test_dotpy_not_python(self): 

+

332 self.make_file("main.py", "import innocuous") 

+

333 self.make_file("innocuous.py", "a = 1") 

+

334 cov = coverage.Coverage() 

+

335 self.start_import_stop(cov, "main") 

+

336 self.make_file("innocuous.py", "<h1>This isn't python!</h1>") 

+

337 msg = "Couldn't parse '.*innocuous.py' as Python source: .* at line 1" 

+

338 with pytest.raises(NotPython, match=msg): 

+

339 cov.html_report() 

+

340 

+

341 def test_dotpy_not_python_ignored(self): 

+

342 self.make_file("main.py", "import innocuous") 

+

343 self.make_file("innocuous.py", "a = 2") 

+

344 cov = coverage.Coverage() 

+

345 self.start_import_stop(cov, "main") 

+

346 self.make_file("innocuous.py", "<h1>This isn't python!</h1>") 

+

347 cov.html_report(ignore_errors=True) 

+

348 msg = "Expected a warning to be thrown when an invalid python file is parsed" 

+

349 assert 1 == len(cov._warnings), msg 

+

350 msg = "Warning message should be in 'invalid file' warning" 

+

351 assert "Couldn't parse Python file" in cov._warnings[0], msg 

+

352 assert "innocuous.py" in cov._warnings[0], "Filename should be in 'invalid file' warning" 

+

353 self.assert_exists("htmlcov/index.html") 

+

354 # This would be better as a glob, if the HTML layout changes: 

+

355 self.assert_doesnt_exist("htmlcov/innocuous.html") 

+

356 

+

357 def test_dothtml_not_python(self): 

+

358 # We run a .html file, and when reporting, we can't parse it as 

+

359 # Python. Since it wasn't .py, no error is reported. 

+

360 

+

361 # Run an "HTML" file 

+

362 self.make_file("innocuous.html", "a = 3") 

+

363 self.run_command("coverage run --source=. innocuous.html") 

+

364 # Before reporting, change it to be an HTML file. 

+

365 self.make_file("innocuous.html", "<h1>This isn't python at all!</h1>") 

+

366 output = self.run_command("coverage html") 

+

367 assert output.strip() == "No data to report." 

+

368 

+

369 def test_execed_liar_ignored(self): 

+

370 # Jinja2 sets __file__ to be a non-Python file, and then execs code. 

+

371 # If that file contains non-Python code, a TokenError shouldn't 

+

372 # have been raised when writing the HTML report. 

+

373 source = "exec(compile('','','exec'), {'__file__': 'liar.html'})" 

+

374 self.make_file("liar.py", source) 

+

375 self.make_file("liar.html", "{# Whoops, not python code #}") 

+

376 cov = coverage.Coverage() 

+

377 self.start_import_stop(cov, "liar") 

+

378 cov.html_report() 

+

379 self.assert_exists("htmlcov/index.html") 

+

380 

+

381 def test_execed_liar_ignored_indentation_error(self): 

+

382 # Jinja2 sets __file__ to be a non-Python file, and then execs code. 

+

383 # If that file contains untokenizable code, we shouldn't get an 

+

384 # exception. 

+

385 source = "exec(compile('','','exec'), {'__file__': 'liar.html'})" 

+

386 self.make_file("liar.py", source) 

+

387 # Tokenize will raise an IndentationError if it can't dedent. 

+

388 self.make_file("liar.html", "0\n 2\n 1\n") 

+

389 cov = coverage.Coverage() 

+

390 self.start_import_stop(cov, "liar") 

+

391 cov.html_report() 

+

392 self.assert_exists("htmlcov/index.html") 

+

393 

+

394 def test_decode_error(self): 

+

395 # https://github.com/nedbat/coveragepy/issues/351 

+

396 # imp.load_module won't load a file with an undecodable character 

+

397 # in a comment, though Python will run them. So we'll change the 

+

398 # file after running. 

+

399 self.make_file("main.py", "import sub.not_ascii") 

+

400 self.make_file("sub/__init__.py") 

+

401 self.make_file("sub/not_ascii.py", """\ 

+

402 # coding: utf-8 

+

403 a = 1 # Isn't this great?! 

+

404 """) 

+

405 cov = coverage.Coverage() 

+

406 self.start_import_stop(cov, "main") 

+

407 

+

408 # Create the undecodable version of the file. make_file is too helpful, 

+

409 # so get down and dirty with bytes. 

+

410 with open("sub/not_ascii.py", "wb") as f: 

+

411 f.write(b"# coding: utf-8\na = 1 # Isn't this great?\xcb!\n") 

+

412 

+

413 with open("sub/not_ascii.py", "rb") as f: 

+

414 undecodable = f.read() 

+

415 assert b"?\xcb!" in undecodable 

+

416 

+

417 cov.html_report() 

+

418 

+

419 html_report = self.get_html_report_content("sub/not_ascii.py") 

+

420 expected = "# Isn't this great?&#65533;!" 

+

421 assert expected in html_report 

+

422 

+

423 def test_formfeeds(self): 

+

424 # https://github.com/nedbat/coveragepy/issues/360 

+

425 self.make_file("formfeed.py", "line_one = 1\n\f\nline_two = 2\n") 

+

426 cov = coverage.Coverage() 

+

427 self.start_import_stop(cov, "formfeed") 

+

428 cov.html_report() 

+

429 

+

430 formfeed_html = self.get_html_report_content("formfeed.py") 

+

431 assert "line_two" in formfeed_html 

+

432 

+

433 

+

434class HtmlTest(HtmlTestHelpers, CoverageTest): 

+

435 """Moar HTML tests.""" 

+

436 

+

437 def test_missing_source_file_incorrect_message(self): 

+

438 # https://github.com/nedbat/coveragepy/issues/60 

+

439 self.make_file("thefile.py", "import sub.another\n") 

+

440 self.make_file("sub/__init__.py", "") 

+

441 self.make_file("sub/another.py", "print('another')\n") 

+

442 cov = coverage.Coverage() 

+

443 self.start_import_stop(cov, 'thefile') 

+

444 os.remove("sub/another.py") 

+

445 

+

446 missing_file = os.path.join(self.temp_dir, "sub", "another.py") 

+

447 missing_file = os.path.realpath(missing_file) 

+

448 msg = "(?i)No source for code: '%s'" % re.escape(missing_file) 

+

449 with pytest.raises(NoSource, match=msg): 

+

450 cov.html_report() 

+

451 

+

452 def test_extensionless_file_collides_with_extension(self): 

+

453 # It used to be that "program" and "program.py" would both be reported 

+

454 # to "program.html". Now they are not. 

+

455 # https://github.com/nedbat/coveragepy/issues/69 

+

456 self.make_file("program", "import program\n") 

+

457 self.make_file("program.py", "a = 1\n") 

+

458 self.run_command("coverage run program") 

+

459 self.run_command("coverage html") 

+

460 self.assert_exists("htmlcov/index.html") 

+

461 self.assert_exists("htmlcov/program.html") 

+

462 self.assert_exists("htmlcov/program_py.html") 

+

463 

+

464 def test_has_date_stamp_in_files(self): 

+

465 self.create_initial_files() 

+

466 self.run_coverage() 

+

467 

+

468 with open("htmlcov/index.html") as f: 

+

469 self.assert_correct_timestamp(f.read()) 

+

470 with open("htmlcov/main_file_py.html") as f: 

+

471 self.assert_correct_timestamp(f.read()) 

+

472 

+

473 def test_reporting_on_unmeasured_file(self): 

+

474 # It should be ok to ask for an HTML report on a file that wasn't even 

+

475 # measured at all. https://github.com/nedbat/coveragepy/issues/403 

+

476 self.create_initial_files() 

+

477 self.make_file("other.py", "a = 1\n") 

+

478 self.run_coverage(htmlargs=dict(morfs=['other.py'])) 

+

479 self.assert_exists("htmlcov/index.html") 

+

480 self.assert_exists("htmlcov/other_py.html") 

+

481 

+

482 def make_main_and_not_covered(self): 

+

483 """Helper to create files for skip_covered scenarios.""" 

+

484 self.make_file("main_file.py", """ 

+

485 import not_covered 

+

486 

+

487 def normal(): 

+

488 print("z") 

+

489 normal() 

+

490 """) 

+

491 self.make_file("not_covered.py", """ 

+

492 def not_covered(): 

+

493 print("n") 

+

494 """) 

+

495 

+

496 def test_report_skip_covered(self): 

+

497 self.make_main_and_not_covered() 

+

498 self.run_coverage(htmlargs=dict(skip_covered=True)) 

+

499 self.assert_exists("htmlcov/index.html") 

+

500 self.assert_doesnt_exist("htmlcov/main_file_py.html") 

+

501 self.assert_exists("htmlcov/not_covered_py.html") 

+

502 

+

503 def test_html_skip_covered(self): 

+

504 self.make_main_and_not_covered() 

+

505 self.make_file(".coveragerc", "[html]\nskip_covered = True") 

+

506 self.run_coverage() 

+

507 self.assert_exists("htmlcov/index.html") 

+

508 self.assert_doesnt_exist("htmlcov/main_file_py.html") 

+

509 self.assert_exists("htmlcov/not_covered_py.html") 

+

510 

+

511 def test_report_skip_covered_branches(self): 

+

512 self.make_main_and_not_covered() 

+

513 self.run_coverage(covargs=dict(branch=True), htmlargs=dict(skip_covered=True)) 

+

514 self.assert_exists("htmlcov/index.html") 

+

515 self.assert_doesnt_exist("htmlcov/main_file_py.html") 

+

516 self.assert_exists("htmlcov/not_covered_py.html") 

+

517 

+

518 def test_report_skip_covered_100(self): 

+

519 self.make_file("main_file.py", """ 

+

520 def normal(): 

+

521 print("z") 

+

522 normal() 

+

523 """) 

+

524 res = self.run_coverage(covargs=dict(source="."), htmlargs=dict(skip_covered=True)) 

+

525 assert res == 100.0 

+

526 self.assert_doesnt_exist("htmlcov/main_file_py.html") 

+

527 

+

528 def make_init_and_main(self): 

+

529 """Helper to create files for skip_empty scenarios.""" 

+

530 self.make_file("submodule/__init__.py", "") 

+

531 self.make_file("main_file.py", """ 

+

532 import submodule 

+

533 

+

534 def normal(): 

+

535 print("z") 

+

536 normal() 

+

537 """) 

+

538 

+

539 def test_report_skip_empty(self): 

+

540 self.make_init_and_main() 

+

541 self.run_coverage(htmlargs=dict(skip_empty=True)) 

+

542 self.assert_exists("htmlcov/index.html") 

+

543 self.assert_exists("htmlcov/main_file_py.html") 

+

544 self.assert_doesnt_exist("htmlcov/submodule___init___py.html") 

+

545 

+

546 def test_html_skip_empty(self): 

+

547 self.make_init_and_main() 

+

548 self.make_file(".coveragerc", "[html]\nskip_empty = True") 

+

549 self.run_coverage() 

+

550 self.assert_exists("htmlcov/index.html") 

+

551 self.assert_exists("htmlcov/main_file_py.html") 

+

552 self.assert_doesnt_exist("htmlcov/submodule___init___py.html") 

+

553 

+

554 

+

555class HtmlStaticFileTest(CoverageTest): 

+

556 """Tests of the static file copying for the HTML report.""" 

+

557 

+

558 def setup_test(self): 

+

559 super(HtmlStaticFileTest, self).setup_test() 

+

560 original_path = list(coverage.html.STATIC_PATH) 

+

561 self.addCleanup(setattr, coverage.html, 'STATIC_PATH', original_path) 

+

562 

+

563 def test_copying_static_files_from_system(self): 

+

564 # Make a new place for static files. 

+

565 self.make_file("static_here/jquery.min.js", "Not Really JQuery!") 

+

566 coverage.html.STATIC_PATH.insert(0, "static_here") 

+

567 

+

568 self.make_file("main.py", "print(17)") 

+

569 cov = coverage.Coverage() 

+

570 self.start_import_stop(cov, "main") 

+

571 cov.html_report() 

+

572 

+

573 with open("htmlcov/jquery.min.js") as f: 

+

574 jquery = f.read() 

+

575 assert jquery == "Not Really JQuery!" 

+

576 

+

577 def test_copying_static_files_from_system_in_dir(self): 

+

578 # Make a new place for static files. 

+

579 INSTALLED = [ 

+

580 "jquery/jquery.min.js", 

+

581 "jquery-hotkeys/jquery.hotkeys.js", 

+

582 "jquery-isonscreen/jquery.isonscreen.js", 

+

583 "jquery-tablesorter/jquery.tablesorter.min.js", 

+

584 ] 

+

585 for fpath in INSTALLED: 

+

586 self.make_file(os.path.join("static_here", fpath), "Not real.") 

+

587 coverage.html.STATIC_PATH.insert(0, "static_here") 

+

588 

+

589 self.make_file("main.py", "print(17)") 

+

590 cov = coverage.Coverage() 

+

591 self.start_import_stop(cov, "main") 

+

592 cov.html_report() 

+

593 

+

594 for fpath in INSTALLED: 

+

595 the_file = os.path.basename(fpath) 

+

596 with open(os.path.join("htmlcov", the_file)) as f: 

+

597 contents = f.read() 

+

598 assert contents == "Not real." 

+

599 

+

600 def test_cant_find_static_files(self): 

+

601 # Make the path point to useless places. 

+

602 coverage.html.STATIC_PATH = ["/xyzzy"] 

+

603 

+

604 self.make_file("main.py", "print(17)") 

+

605 cov = coverage.Coverage() 

+

606 self.start_import_stop(cov, "main") 

+

607 msg = "Couldn't find static file u?'.*'" 

+

608 with pytest.raises(CoverageException, match=msg): 

+

609 cov.html_report() 

+

610 

+

611def filepath_to_regex(path): 

+

612 """Create a regex for scrubbing a file path.""" 

+

613 regex = re.escape(path) 

+

614 # If there's a backslash, let it match either slash. 

+

615 regex = regex.replace(r"\\", r"[\\/]") 

+

616 if env.WINDOWS: 

+

617 regex = "(?i)" + regex 

+

618 return regex 

+

619 

+

620 

+

621def compare_html(expected, actual): 

+

622 """Specialized compare function for our HTML files.""" 

+

623 scrubs = [ 

+

624 (r'/coverage.readthedocs.io/?[-.\w/]*', '/coverage.readthedocs.io/VER'), 

+

625 (r'coverage.py v[\d.abc]+', 'coverage.py vVER'), 

+

626 (r'created at \d\d\d\d-\d\d-\d\d \d\d:\d\d [-+]\d\d\d\d', 'created at DATE'), 

+

627 (r'created at \d\d\d\d-\d\d-\d\d \d\d:\d\d', 'created at DATE'), 

+

628 # Some words are identifiers in one version, keywords in another. 

+

629 (r'<span class="(nam|key)">(print|True|False)</span>', r'<span class="nam">\2</span>'), 

+

630 # Occasionally an absolute path is in the HTML report. 

+

631 (filepath_to_regex(TESTS_DIR), 'TESTS_DIR'), 

+

632 (filepath_to_regex(flat_rootname(unicode_class(TESTS_DIR))), '_TESTS_DIR'), 

+

633 # The temp dir the tests make. 

+

634 (filepath_to_regex(os.getcwd()), 'TEST_TMPDIR'), 

+

635 (filepath_to_regex(flat_rootname(unicode_class(os.getcwd()))), '_TEST_TMPDIR'), 

+

636 (filepath_to_regex(abs_file(os.getcwd())), 'TEST_TMPDIR'), 

+

637 (filepath_to_regex(flat_rootname(unicode_class(abs_file(os.getcwd())))), '_TEST_TMPDIR'), 

+

638 (r'/private/var/folders/[\w/]{35}/coverage_test/tests_test_html_\w+_\d{8}', 'TEST_TMPDIR'), 

+

639 (r'_private_var_folders_\w{35}_coverage_test_tests_test_html_\w+_\d{8}', '_TEST_TMPDIR'), 

+

640 ] 

+

641 if env.WINDOWS: 

+

642 # For file paths... 

+

643 scrubs += [(r"\\", "/")] 

+

644 compare(expected, actual, file_pattern="*.html", scrubs=scrubs) 

+

645 

+

646 

+

647class HtmlGoldTest(CoverageTest): 

+

648 """Tests of HTML reporting that use gold files.""" 

+

649 

+

650 def test_a(self): 

+

651 self.make_file("a.py", """\ 

+

652 if 1 < 2: 

+

653 # Needed a < to look at HTML entities. 

+

654 a = 3 

+

655 else: 

+

656 a = 4 

+

657 """) 

+

658 

+

659 cov = coverage.Coverage() 

+

660 a = self.start_import_stop(cov, "a") 

+

661 cov.html_report(a, directory='out/a') 

+

662 

+

663 compare_html(gold_path("html/a"), "out/a") 

+

664 contains( 

+

665 "out/a/a_py.html", 

+

666 ('<span class="key">if</span> <span class="num">1</span> ' 

+

667 '<span class="op">&lt;</span> <span class="num">2</span>'), 

+

668 (' <span class="nam">a</span> ' 

+

669 '<span class="op">=</span> <span class="num">3</span>'), 

+

670 '<span class="pc_cov">67%</span>', 

+

671 ) 

+

672 contains( 

+

673 "out/a/index.html", 

+

674 '<a href="a_py.html">a.py</a>', 

+

675 '<span class="pc_cov">67%</span>', 

+

676 '<td class="right" data-ratio="2 3">67%</td>', 

+

677 ) 

+

678 

+

679 def test_b_branch(self): 

+

680 self.make_file("b.py", """\ 

+

681 def one(x): 

+

682 # This will be a branch that misses the else. 

+

683 if x < 2: 

+

684 a = 3 

+

685 else: 

+

686 a = 4 

+

687 

+

688 one(1) 

+

689 

+

690 def two(x): 

+

691 # A missed else that branches to "exit" 

+

692 if x: 

+

693 a = 5 

+

694 

+

695 two(1) 

+

696 

+

697 def three(): 

+

698 try: 

+

699 # This if has two branches, *neither* one taken. 

+

700 if name_error_this_variable_doesnt_exist: 

+

701 a = 1 

+

702 else: 

+

703 a = 2 

+

704 except: 

+

705 pass 

+

706 

+

707 three() 

+

708 """) 

+

709 

+

710 cov = coverage.Coverage(branch=True) 

+

711 b = self.start_import_stop(cov, "b") 

+

712 cov.html_report(b, directory="out/b_branch") 

+

713 

+

714 compare_html(gold_path("html/b_branch"), "out/b_branch") 

+

715 contains( 

+

716 "out/b_branch/b_py.html", 

+

717 ('<span class="key">if</span> <span class="nam">x</span> ' 

+

718 '<span class="op">&lt;</span> <span class="num">2</span>'), 

+

719 (' <span class="nam">a</span> <span class="op">=</span> ' 

+

720 '<span class="num">3</span>'), 

+

721 '<span class="pc_cov">70%</span>', 

+

722 

+

723 ('<span class="annotate short">3&#x202F;&#x219B;&#x202F;6</span>' 

+

724 '<span class="annotate long">line 3 didn\'t jump to line 6, ' 

+

725 'because the condition on line 3 was never false</span>'), 

+

726 ('<span class="annotate short">12&#x202F;&#x219B;&#x202F;exit</span>' 

+

727 '<span class="annotate long">line 12 didn\'t return from function \'two\', ' 

+

728 'because the condition on line 12 was never false</span>'), 

+

729 ('<span class="annotate short">20&#x202F;&#x219B;&#x202F;21,&nbsp;&nbsp; ' 

+

730 '20&#x202F;&#x219B;&#x202F;23</span>' 

+

731 '<span class="annotate long">2 missed branches: ' 

+

732 '1) line 20 didn\'t jump to line 21, ' 

+

733 'because the condition on line 20 was never true, ' 

+

734 '2) line 20 didn\'t jump to line 23, ' 

+

735 'because the condition on line 20 was never false</span>'), 

+

736 ) 

+

737 contains( 

+

738 "out/b_branch/index.html", 

+

739 '<a href="b_py.html">b.py</a>', 

+

740 '<span class="pc_cov">70%</span>', 

+

741 '<td class="right" data-ratio="16 23">70%</td>', 

+

742 ) 

+

743 

+

744 def test_bom(self): 

+

745 self.make_file("bom.py", bytes=b"""\ 

+

746\xef\xbb\xbf# A Python source file in utf-8, with BOM. 

+

747math = "3\xc3\x974 = 12, \xc3\xb72 = 6\xc2\xb10" 

+

748 

+

749import sys 

+

750 

+

751if sys.version_info >= (3, 0): 

+

752 assert len(math) == 18 

+

753 assert len(math.encode('utf-8')) == 21 

+

754else: 

+

755 assert len(math) == 21 

+

756 assert len(math.decode('utf-8')) == 18 

+

757""".replace(b"\n", b"\r\n")) 

+

758 

+

759 # It's important that the source file really have a BOM, which can 

+

760 # get lost, so check that it's really there, and that we have \r\n 

+

761 # line endings. 

+

762 with open("bom.py", "rb") as f: 

+

763 data = f.read() 

+

764 assert data[:3] == b"\xef\xbb\xbf" 

+

765 assert data.count(b"\r\n") == 11 

+

766 

+

767 cov = coverage.Coverage() 

+

768 bom = self.start_import_stop(cov, "bom") 

+

769 cov.html_report(bom, directory="out/bom") 

+

770 

+

771 compare_html(gold_path("html/bom"), "out/bom") 

+

772 contains( 

+

773 "out/bom/bom_py.html", 

+

774 '<span class="str">"3&#215;4 = 12, &#247;2 = 6&#177;0"</span>', 

+

775 ) 

+

776 

+

777 def test_isolatin1(self): 

+

778 self.make_file("isolatin1.py", bytes=b"""\ 

+

779# -*- coding: iso8859-1 -*- 

+

780# A Python source file in another encoding. 

+

781 

+

782math = "3\xd74 = 12, \xf72 = 6\xb10" 

+

783assert len(math) == 18 

+

784""") 

+

785 

+

786 cov = coverage.Coverage() 

+

787 isolatin1 = self.start_import_stop(cov, "isolatin1") 

+

788 cov.html_report(isolatin1, directory="out/isolatin1") 

+

789 

+

790 compare_html(gold_path("html/isolatin1"), "out/isolatin1") 

+

791 contains( 

+

792 "out/isolatin1/isolatin1_py.html", 

+

793 '<span class="str">"3&#215;4 = 12, &#247;2 = 6&#177;0"</span>', 

+

794 ) 

+

795 

+

796 def make_main_etc(self): 

+

797 """Make main.py and m1-m3.py for other tests.""" 

+

798 self.make_file("main.py", """\ 

+

799 import m1 

+

800 import m2 

+

801 import m3 

+

802 

+

803 a = 5 

+

804 b = 6 

+

805 

+

806 assert m1.m1a == 1 

+

807 assert m2.m2a == 1 

+

808 assert m3.m3a == 1 

+

809 """) 

+

810 self.make_file("m1.py", """\ 

+

811 m1a = 1 

+

812 m1b = 2 

+

813 """) 

+

814 self.make_file("m2.py", """\ 

+

815 m2a = 1 

+

816 m2b = 2 

+

817 """) 

+

818 self.make_file("m3.py", """\ 

+

819 m3a = 1 

+

820 m3b = 2 

+

821 """) 

+

822 

+

823 def test_omit_1(self): 

+

824 self.make_main_etc() 

+

825 cov = coverage.Coverage(include=["./*"]) 

+

826 self.start_import_stop(cov, "main") 

+

827 cov.html_report(directory="out/omit_1") 

+

828 compare_html(gold_path("html/omit_1"), "out/omit_1") 

+

829 

+

830 def test_omit_2(self): 

+

831 self.make_main_etc() 

+

832 cov = coverage.Coverage(include=["./*"]) 

+

833 self.start_import_stop(cov, "main") 

+

834 cov.html_report(directory="out/omit_2", omit=["m1.py"]) 

+

835 compare_html(gold_path("html/omit_2"), "out/omit_2") 

+

836 

+

837 def test_omit_3(self): 

+

838 self.make_main_etc() 

+

839 cov = coverage.Coverage(include=["./*"]) 

+

840 self.start_import_stop(cov, "main") 

+

841 cov.html_report(directory="out/omit_3", omit=["m1.py", "m2.py"]) 

+

842 compare_html(gold_path("html/omit_3"), "out/omit_3") 

+

843 

+

844 def test_omit_4(self): 

+

845 self.make_main_etc() 

+

846 self.make_file("omit4.ini", """\ 

+

847 [report] 

+

848 omit = m2.py 

+

849 """) 

+

850 

+

851 cov = coverage.Coverage(config_file="omit4.ini", include=["./*"]) 

+

852 self.start_import_stop(cov, "main") 

+

853 cov.html_report(directory="out/omit_4") 

+

854 compare_html(gold_path("html/omit_4"), "out/omit_4") 

+

855 

+

856 def test_omit_5(self): 

+

857 self.make_main_etc() 

+

858 self.make_file("omit5.ini", """\ 

+

859 [report] 

+

860 omit = 

+

861 fooey 

+

862 gooey, m[23]*, kablooey 

+

863 helloworld 

+

864 

+

865 [html] 

+

866 directory = out/omit_5 

+

867 """) 

+

868 

+

869 cov = coverage.Coverage(config_file="omit5.ini", include=["./*"]) 

+

870 self.start_import_stop(cov, "main") 

+

871 cov.html_report() 

+

872 compare_html(gold_path("html/omit_5"), "out/omit_5") 

+

873 

+

874 def test_other(self): 

+

875 self.make_file("src/here.py", """\ 

+

876 import other 

+

877 

+

878 if 1 < 2: 

+

879 h = 3 

+

880 else: 

+

881 h = 4 

+

882 """) 

+

883 self.make_file("othersrc/other.py", """\ 

+

884 # A file in another directory. We're checking that it ends up in the 

+

885 # HTML report. 

+

886 

+

887 print("This is the other src!") 

+

888 """) 

+

889 

+

890 with change_dir("src"): 

+

891 sys.path.insert(0, "") # pytest sometimes has this, sometimes not!? 

+

892 sys.path.insert(0, "../othersrc") 

+

893 cov = coverage.Coverage(include=["./*", "../othersrc/*"]) 

+

894 self.start_import_stop(cov, "here") 

+

895 cov.html_report(directory="../out/other") 

+

896 

+

897 # Different platforms will name the "other" file differently. Rename it 

+

898 for p in glob.glob("out/other/*_other_py.html"): 

+

899 os.rename(p, "out/other/blah_blah_other_py.html") 

+

900 

+

901 compare_html(gold_path("html/other"), "out/other") 

+

902 contains( 

+

903 "out/other/index.html", 

+

904 '<a href="here_py.html">here.py</a>', 

+

905 'other_py.html">', 'other.py</a>', 

+

906 ) 

+

907 

+

908 def test_partial(self): 

+

909 self.make_file("partial.py", """\ 

+

910 # partial branches and excluded lines 

+

911 a = 2 

+

912 

+

913 while "no peephole".upper(): # t4 

+

914 break 

+

915 

+

916 while a: # pragma: no branch 

+

917 break 

+

918 

+

919 if 0: 

+

920 never_happen() 

+

921 

+

922 if 13: 

+

923 a = 14 

+

924 

+

925 if a == 16: 

+

926 raise ZeroDivisionError("17") 

+

927 """) 

+

928 self.make_file("partial.ini", """\ 

+

929 [run] 

+

930 branch = True 

+

931 

+

932 [report] 

+

933 exclude_lines = 

+

934 raise ZeroDivisionError 

+

935 """) 

+

936 

+

937 cov = coverage.Coverage(config_file="partial.ini") 

+

938 partial = self.start_import_stop(cov, "partial") 

+

939 

+

940 if env.PYBEHAVIOR.pep626: 

+

941 cov.html_report(partial, directory="out/partial_626") 

+

942 compare_html(gold_path("html/partial_626"), "out/partial_626") 

+

943 contains( 

+

944 "out/partial_626/partial_py.html", 

+

945 '<p id="t4" class="par run show_par">', 

+

946 '<p id="t7" class="run">', 

+

947 # The "if 0" and "if 1" statements are marked as run. 

+

948 '<p id="t10" class="run">', 

+

949 # The "raise ZeroDivisionError" is excluded by regex in the .ini. 

+

950 '<p id="t17" class="exc show_exc">', 

+

951 ) 

+

952 contains( 

+

953 "out/partial_626/index.html", 

+

954 '<a href="partial_py.html">partial.py</a>', 

+

955 '<span class="pc_cov">87%</span>' 

+

956 ) 

+

957 else: 

+

958 cov.html_report(partial, directory="out/partial") 

+

959 compare_html(gold_path("html/partial"), "out/partial") 

+

960 contains( 

+

961 "out/partial/partial_py.html", 

+

962 '<p id="t4" class="par run show_par">', 

+

963 '<p id="t7" class="run">', 

+

964 # The "if 0" and "if 1" statements are optimized away. 

+

965 '<p id="t10" class="pln">', 

+

966 # The "raise ZeroDivisionError" is excluded by regex in the .ini. 

+

967 '<p id="t17" class="exc show_exc">', 

+

968 ) 

+

969 contains( 

+

970 "out/partial/index.html", 

+

971 '<a href="partial_py.html">partial.py</a>', 

+

972 '<span class="pc_cov">91%</span>' 

+

973 ) 

+

974 

+

975 def test_styled(self): 

+

976 self.make_file("a.py", """\ 

+

977 if 1 < 2: 

+

978 # Needed a < to look at HTML entities. 

+

979 a = 3 

+

980 else: 

+

981 a = 4 

+

982 """) 

+

983 

+

984 self.make_file("extra.css", "/* Doesn't matter what goes in here, it gets copied. */\n") 

+

985 

+

986 cov = coverage.Coverage() 

+

987 a = self.start_import_stop(cov, "a") 

+

988 cov.html_report(a, directory="out/styled", extra_css="extra.css") 

+

989 

+

990 compare_html(gold_path("html/styled"), "out/styled") 

+

991 compare(gold_path("html/styled"), "out/styled", file_pattern="*.css") 

+

992 contains( 

+

993 "out/styled/a_py.html", 

+

994 '<link rel="stylesheet" href="extra.css" type="text/css">', 

+

995 ('<span class="key">if</span> <span class="num">1</span> ' 

+

996 '<span class="op">&lt;</span> <span class="num">2</span>'), 

+

997 (' <span class="nam">a</span> <span class="op">=</span> ' 

+

998 '<span class="num">3</span>'), 

+

999 '<span class="pc_cov">67%</span>' 

+

1000 ) 

+

1001 contains( 

+

1002 "out/styled/index.html", 

+

1003 '<link rel="stylesheet" href="extra.css" type="text/css">', 

+

1004 '<a href="a_py.html">a.py</a>', 

+

1005 '<span class="pc_cov">67%</span>' 

+

1006 ) 

+

1007 

+

1008 def test_tabbed(self): 

+

1009 # The file contents would look like this with 8-space tabs: 

+

1010 # x = 1 

+

1011 # if x: 

+

1012 # a = "tabbed" # aligned comments 

+

1013 # if x: # look nice 

+

1014 # b = "no spaces" # when they 

+

1015 # c = "done" # line up. 

+

1016 self.make_file("tabbed.py", """\ 

+

1017 x = 1 

+

1018 if x: 

+

1019 \ta = "Tabbed"\t\t\t\t# Aligned comments 

+

1020 \tif x:\t\t\t\t\t# look nice 

+

1021 \t\tb = "No spaces"\t\t\t# when they 

+

1022 \tc = "Done"\t\t\t\t# line up. 

+

1023 """) 

+

1024 

+

1025 cov = coverage.Coverage() 

+

1026 tabbed = self.start_import_stop(cov, "tabbed") 

+

1027 cov.html_report(tabbed, directory="out") 

+

1028 

+

1029 # Editors like to change things, make sure our source file still has tabs. 

+

1030 contains("tabbed.py", "\tif x:\t\t\t\t\t# look nice") 

+

1031 

+

1032 contains( 

+

1033 "out/tabbed_py.html", 

+

1034 '> <span class="key">if</span> ' 

+

1035 '<span class="nam">x</span><span class="op">:</span>' 

+

1036 ' ' 

+

1037 '<span class="com"># look nice</span>' 

+

1038 ) 

+

1039 

+

1040 doesnt_contain("out/tabbed_py.html", "\t") 

+

1041 

+

1042 def test_unicode(self): 

+

1043 surrogate = u"\U000e0100" 

+

1044 if env.PY2: 

+

1045 surrogate = surrogate.encode('utf-8') 

+

1046 

+

1047 self.make_file("unicode.py", """\ 

+

1048 # -*- coding: utf-8 -*- 

+

1049 # A Python source file with exotic characters. 

+

1050 

+

1051 upside_down = "ʎd˙ǝbɐɹǝʌoɔ" 

+

1052 surrogate = "db40,dd00: x@" 

+

1053 """.replace("@", surrogate)) 

+

1054 

+

1055 cov = coverage.Coverage() 

+

1056 unimod = self.start_import_stop(cov, "unicode") 

+

1057 cov.html_report(unimod, directory="out/unicode") 

+

1058 

+

1059 compare_html(gold_path("html/unicode"), "out/unicode") 

+

1060 contains( 

+

1061 "out/unicode/unicode_py.html", 

+

1062 '<span class="str">"&#654;d&#729;&#477;b&#592;&#633;&#477;&#652;o&#596;"</span>', 

+

1063 ) 

+

1064 

+

1065 contains_any( 

+

1066 "out/unicode/unicode_py.html", 

+

1067 '<span class="str">"db40,dd00: x&#56128;&#56576;"</span>', 

+

1068 '<span class="str">"db40,dd00: x&#917760;"</span>', 

+

1069 ) 

+

1070 

+

1071 

+

1072class HtmlWithContextsTest(HtmlTestHelpers, CoverageTest): 

+

1073 """Tests of the HTML reports with shown contexts.""" 

+

1074 

+

1075 EMPTY = coverage.html.HtmlDataGeneration.EMPTY 

+

1076 

+

1077 def html_data_from_cov(self, cov, morf): 

+

1078 """Get HTML report data from a `Coverage` object for a morf.""" 

+

1079 with self.assert_warnings(cov, []): 

+

1080 datagen = coverage.html.HtmlDataGeneration(cov) 

+

1081 fr, analysis = next(get_analysis_to_report(cov, [morf])) 

+

1082 file_data = datagen.data_for_file(fr, analysis) 

+

1083 return file_data 

+

1084 

+

1085 SOURCE = """\ 

+

1086 def helper(lineno): 

+

1087 x = 2 

+

1088 

+

1089 def test_one(): 

+

1090 a = 5 

+

1091 helper(6) 

+

1092 

+

1093 def test_two(): 

+

1094 a = 9 

+

1095 b = 10 

+

1096 if a > 11: 

+

1097 b = 12 

+

1098 assert a == (13-4) 

+

1099 assert b == (14-4) 

+

1100 helper( 

+

1101 16 

+

1102 ) 

+

1103 

+

1104 test_one() 

+

1105 x = 20 

+

1106 helper(21) 

+

1107 test_two() 

+

1108 """ 

+

1109 

+

1110 OUTER_LINES = [1, 4, 8, 19, 20, 21, 2, 22] 

+

1111 TEST_ONE_LINES = [5, 6, 2] 

+

1112 TEST_TWO_LINES = [9, 10, 11, 13, 14, 15, 2] 

+

1113 

+

1114 def test_dynamic_contexts(self): 

+

1115 self.make_file("two_tests.py", self.SOURCE) 

+

1116 cov = coverage.Coverage(source=["."]) 

+

1117 cov.set_option("run:dynamic_context", "test_function") 

+

1118 cov.set_option("html:show_contexts", True) 

+

1119 mod = self.start_import_stop(cov, "two_tests") 

+

1120 d = self.html_data_from_cov(cov, mod) 

+

1121 context_labels = [self.EMPTY, 'two_tests.test_one', 'two_tests.test_two'] 

+

1122 expected_lines = [self.OUTER_LINES, self.TEST_ONE_LINES, self.TEST_TWO_LINES] 

+

1123 for label, expected in zip(context_labels, expected_lines): 

+

1124 actual = [ 

+

1125 ld.number for ld in d.lines 

+

1126 if label == ld.contexts_label or label in (ld.contexts or ()) 

+

1127 ] 

+

1128 assert sorted(expected) == sorted(actual) 

+

1129 

+

1130 def test_filtered_dynamic_contexts(self): 

+

1131 self.make_file("two_tests.py", self.SOURCE) 

+

1132 cov = coverage.Coverage(source=["."]) 

+

1133 cov.set_option("run:dynamic_context", "test_function") 

+

1134 cov.set_option("html:show_contexts", True) 

+

1135 cov.set_option("report:contexts", ["test_one"]) 

+

1136 mod = self.start_import_stop(cov, "two_tests") 

+

1137 d = self.html_data_from_cov(cov, mod) 

+

1138 

+

1139 context_labels = [self.EMPTY, 'two_tests.test_one', 'two_tests.test_two'] 

+

1140 expected_lines = [[], self.TEST_ONE_LINES, []] 

+

1141 for label, expected in zip(context_labels, expected_lines): 

+

1142 actual = [ld.number for ld in d.lines if label in (ld.contexts or ())] 

+

1143 assert sorted(expected) == sorted(actual) 

+

1144 

+

1145 def test_no_contexts_warns_no_contexts(self): 

+

1146 # If no contexts were collected, then show_contexts emits a warning. 

+

1147 self.make_file("two_tests.py", self.SOURCE) 

+

1148 cov = coverage.Coverage(source=["."]) 

+

1149 cov.set_option("html:show_contexts", True) 

+

1150 self.start_import_stop(cov, "two_tests") 

+

1151 with self.assert_warnings(cov, ["No contexts were measured"]): 

+

1152 cov.html_report() 

+

1153 

+

1154 def test_dynamic_contexts_relative_files(self): 

+

1155 self.make_file("two_tests.py", self.SOURCE) 

+

1156 self.make_file("config", "[run]\nrelative_files = True") 

+

1157 cov = coverage.Coverage(source=["."], config_file="config") 

+

1158 cov.set_option("run:dynamic_context", "test_function") 

+

1159 cov.set_option("html:show_contexts", True) 

+

1160 mod = self.start_import_stop(cov, "two_tests") 

+

1161 d = self.html_data_from_cov(cov, mod) 

+

1162 context_labels = [self.EMPTY, 'two_tests.test_one', 'two_tests.test_two'] 

+

1163 expected_lines = [self.OUTER_LINES, self.TEST_ONE_LINES, self.TEST_TWO_LINES] 

+

1164 for label, expected in zip(context_labels, expected_lines): 

+

1165 actual = [ 

+

1166 ld.number for ld in d.lines 

+

1167 if label == ld.contexts_label or label in (ld.contexts or ()) 

+

1168 ] 

+

1169 assert sorted(expected) == sorted(actual) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_json_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_json_py.html new file mode 100644 index 000000000..637da92f4 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_json_py.html @@ -0,0 +1,236 @@ + + + + + + Coverage for tests/test_json.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# coding: utf-8 

+

2# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

3# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

4 

+

5"""Test json-based summary reporting for coverage.py""" 

+

6from datetime import datetime 

+

7import json 

+

8import os 

+

9 

+

10import coverage 

+

11from tests.coveragetest import UsingModulesMixin, CoverageTest 

+

12 

+

13 

+

14class JsonReportTest(UsingModulesMixin, CoverageTest): 

+

15 """Tests of the JSON reports from coverage.py.""" 

+

16 def _assert_expected_json_report(self, cov, expected_result): 

+

17 """ 

+

18 Helper for tests that handles the common ceremony so the tests can be clearly show the 

+

19 consequences of setting various arguments. 

+

20 """ 

+

21 self.make_file("a.py", """\ 

+

22 a = {'b': 1} 

+

23 if a.get('a'): 

+

24 b = 1 

+

25 """) 

+

26 a = self.start_import_stop(cov, "a") 

+

27 output_path = os.path.join(self.temp_dir, "a.json") 

+

28 cov.json_report(a, outfile=output_path) 

+

29 with open(output_path) as result_file: 

+

30 parsed_result = json.load(result_file) 

+

31 self.assert_recent_datetime( 

+

32 datetime.strptime(parsed_result['meta']['timestamp'], "%Y-%m-%dT%H:%M:%S.%f") 

+

33 ) 

+

34 del (parsed_result['meta']['timestamp']) 

+

35 assert parsed_result == expected_result 

+

36 

+

37 def test_branch_coverage(self): 

+

38 cov = coverage.Coverage(branch=True) 

+

39 expected_result = { 

+

40 'meta': { 

+

41 "version": coverage.__version__, 

+

42 "branch_coverage": True, 

+

43 "show_contexts": False, 

+

44 }, 

+

45 'files': { 

+

46 'a.py': { 

+

47 'executed_lines': [1, 2], 

+

48 'missing_lines': [3], 

+

49 'excluded_lines': [], 

+

50 'summary': { 

+

51 'missing_lines': 1, 

+

52 'covered_lines': 2, 

+

53 'num_statements': 3, 

+

54 'num_branches': 2, 

+

55 'excluded_lines': 0, 

+

56 'num_partial_branches': 1, 

+

57 'covered_branches': 1, 

+

58 'missing_branches': 1, 

+

59 'percent_covered': 60.0, 

+

60 'percent_covered_display': '60', 

+

61 } 

+

62 } 

+

63 }, 

+

64 'totals': { 

+

65 'missing_lines': 1, 

+

66 'covered_lines': 2, 

+

67 'num_statements': 3, 

+

68 'num_branches': 2, 

+

69 'excluded_lines': 0, 

+

70 'num_partial_branches': 1, 

+

71 'percent_covered': 60.0, 

+

72 'percent_covered_display': '60', 

+

73 'covered_branches': 1, 

+

74 'missing_branches': 1, 

+

75 } 

+

76 } 

+

77 self._assert_expected_json_report(cov, expected_result) 

+

78 

+

79 def test_simple_line_coverage(self): 

+

80 cov = coverage.Coverage() 

+

81 expected_result = { 

+

82 'meta': { 

+

83 "version": coverage.__version__, 

+

84 "branch_coverage": False, 

+

85 "show_contexts": False, 

+

86 }, 

+

87 'files': { 

+

88 'a.py': { 

+

89 'executed_lines': [1, 2], 

+

90 'missing_lines': [3], 

+

91 'excluded_lines': [], 

+

92 'summary': { 

+

93 'excluded_lines': 0, 

+

94 'missing_lines': 1, 

+

95 'covered_lines': 2, 

+

96 'num_statements': 3, 

+

97 'percent_covered': 66.66666666666667, 

+

98 'percent_covered_display': '67', 

+

99 } 

+

100 } 

+

101 }, 

+

102 'totals': { 

+

103 'excluded_lines': 0, 

+

104 'missing_lines': 1, 

+

105 'covered_lines': 2, 

+

106 'num_statements': 3, 

+

107 'percent_covered': 66.66666666666667, 

+

108 'percent_covered_display': '67', 

+

109 } 

+

110 } 

+

111 self._assert_expected_json_report(cov, expected_result) 

+

112 

+

113 def run_context_test(self, relative_files): 

+

114 """A helper for two tests below.""" 

+

115 self.make_file("config", """\ 

+

116 [run] 

+

117 relative_files = {} 

+

118 

+

119 [report] 

+

120 precision = 2 

+

121 

+

122 [json] 

+

123 show_contexts = True 

+

124 """.format(relative_files)) 

+

125 cov = coverage.Coverage(context="cool_test", config_file="config") 

+

126 expected_result = { 

+

127 'meta': { 

+

128 "version": coverage.__version__, 

+

129 "branch_coverage": False, 

+

130 "show_contexts": True, 

+

131 }, 

+

132 'files': { 

+

133 'a.py': { 

+

134 'executed_lines': [1, 2], 

+

135 'missing_lines': [3], 

+

136 'excluded_lines': [], 

+

137 "contexts": { 

+

138 "1": [ 

+

139 "cool_test" 

+

140 ], 

+

141 "2": [ 

+

142 "cool_test" 

+

143 ] 

+

144 }, 

+

145 'summary': { 

+

146 'excluded_lines': 0, 

+

147 'missing_lines': 1, 

+

148 'covered_lines': 2, 

+

149 'num_statements': 3, 

+

150 'percent_covered': 66.66666666666667, 

+

151 'percent_covered_display': '66.67', 

+

152 } 

+

153 } 

+

154 }, 

+

155 'totals': { 

+

156 'excluded_lines': 0, 

+

157 'missing_lines': 1, 

+

158 'covered_lines': 2, 

+

159 'num_statements': 3, 

+

160 'percent_covered': 66.66666666666667, 

+

161 'percent_covered_display': '66.67', 

+

162 } 

+

163 } 

+

164 self._assert_expected_json_report(cov, expected_result) 

+

165 

+

166 def test_context_non_relative(self): 

+

167 self.run_context_test(relative_files=False) 

+

168 

+

169 def test_context_relative(self): 

+

170 self.run_context_test(relative_files=True) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_misc_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_misc_py.html new file mode 100644 index 000000000..141cfabb8 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_misc_py.html @@ -0,0 +1,222 @@ + + + + + + Coverage for tests/test_misc.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Tests of miscellaneous stuff.""" 

+

5 

+

6import pytest 

+

7 

+

8from coverage.misc import contract, dummy_decorator_with_args, file_be_gone 

+

9from coverage.misc import Hasher, one_of, substitute_variables 

+

10from coverage.misc import CoverageException, USE_CONTRACTS 

+

11 

+

12from tests.coveragetest import CoverageTest 

+

13 

+

14 

+

15class HasherTest(CoverageTest): 

+

16 """Test our wrapper of md5 hashing.""" 

+

17 

+

18 run_in_temp_dir = False 

+

19 

+

20 def test_string_hashing(self): 

+

21 h1 = Hasher() 

+

22 h1.update("Hello, world!") 

+

23 h2 = Hasher() 

+

24 h2.update("Goodbye!") 

+

25 h3 = Hasher() 

+

26 h3.update("Hello, world!") 

+

27 assert h1.hexdigest() != h2.hexdigest() 

+

28 assert h1.hexdigest() == h3.hexdigest() 

+

29 

+

30 def test_bytes_hashing(self): 

+

31 h1 = Hasher() 

+

32 h1.update(b"Hello, world!") 

+

33 h2 = Hasher() 

+

34 h2.update(b"Goodbye!") 

+

35 assert h1.hexdigest() != h2.hexdigest() 

+

36 

+

37 def test_unicode_hashing(self): 

+

38 h1 = Hasher() 

+

39 h1.update(u"Hello, world! \N{SNOWMAN}") 

+

40 h2 = Hasher() 

+

41 h2.update(u"Goodbye!") 

+

42 assert h1.hexdigest() != h2.hexdigest() 

+

43 

+

44 def test_dict_hashing(self): 

+

45 h1 = Hasher() 

+

46 h1.update({'a': 17, 'b': 23}) 

+

47 h2 = Hasher() 

+

48 h2.update({'b': 23, 'a': 17}) 

+

49 assert h1.hexdigest() == h2.hexdigest() 

+

50 

+

51 def test_dict_collision(self): 

+

52 h1 = Hasher() 

+

53 h1.update({'a': 17, 'b': {'c': 1, 'd': 2}}) 

+

54 h2 = Hasher() 

+

55 h2.update({'a': 17, 'b': {'c': 1}, 'd': 2}) 

+

56 assert h1.hexdigest() != h2.hexdigest() 

+

57 

+

58 

+

59class RemoveFileTest(CoverageTest): 

+

60 """Tests of misc.file_be_gone.""" 

+

61 

+

62 def test_remove_nonexistent_file(self): 

+

63 # It's OK to try to remove a file that doesn't exist. 

+

64 file_be_gone("not_here.txt") 

+

65 

+

66 def test_remove_actual_file(self): 

+

67 # It really does remove a file that does exist. 

+

68 self.make_file("here.txt", "We are here, we are here, we are here!") 

+

69 file_be_gone("here.txt") 

+

70 self.assert_doesnt_exist("here.txt") 

+

71 

+

72 def test_actual_errors(self): 

+

73 # Errors can still happen. 

+

74 # ". is a directory" on Unix, or "Access denied" on Windows 

+

75 with pytest.raises(OSError): 

+

76 file_be_gone(".") 

+

77 

+

78 

+

79@pytest.mark.skipif(not USE_CONTRACTS, reason="Contracts are disabled, can't test them") 

+

80class ContractTest(CoverageTest): 

+

81 """Tests of our contract decorators.""" 

+

82 

+

83 run_in_temp_dir = False 

+

84 

+

85 def test_bytes(self): 

+

86 @contract(text='bytes|None') 

+

87 def need_bytes(text=None): 

+

88 return text 

+

89 

+

90 assert need_bytes(b"Hey") == b"Hey" 

+

91 assert need_bytes() is None 

+

92 with pytest.raises(Exception): 

+

93 need_bytes(u"Oops") 

+

94 

+

95 def test_unicode(self): 

+

96 @contract(text='unicode|None') 

+

97 def need_unicode(text=None): 

+

98 return text 

+

99 

+

100 assert need_unicode(u"Hey") == u"Hey" 

+

101 assert need_unicode() is None 

+

102 with pytest.raises(Exception): 

+

103 need_unicode(b"Oops") 

+

104 

+

105 def test_one_of(self): 

+

106 @one_of("a, b, c") 

+

107 def give_me_one(a=None, b=None, c=None): 

+

108 return (a, b, c) 

+

109 

+

110 assert give_me_one(a=17) == (17, None, None) 

+

111 assert give_me_one(b=set()) == (None, set(), None) 

+

112 assert give_me_one(c=17) == (None, None, 17) 

+

113 with pytest.raises(AssertionError): 

+

114 give_me_one(a=17, b=set()) 

+

115 with pytest.raises(AssertionError): 

+

116 give_me_one() 

+

117 

+

118 def test_dummy_decorator_with_args(self): 

+

119 @dummy_decorator_with_args("anything", this=17, that="is fine") 

+

120 def undecorated(a=None, b=None): 

+

121 return (a, b) 

+

122 

+

123 assert undecorated() == (None, None) 

+

124 assert undecorated(17) == (17, None) 

+

125 assert undecorated(b=23) == (None, 23) 

+

126 assert undecorated(b=42, a=3) == (3, 42) 

+

127 

+

128 

+

129VARS = { 

+

130 'FOO': 'fooey', 

+

131 'BAR': 'xyzzy', 

+

132} 

+

133 

+

134@pytest.mark.parametrize("before, after", [ 

+

135 ("Nothing to do", "Nothing to do"), 

+

136 ("Dollar: $$", "Dollar: $"), 

+

137 ("Simple: $FOO is fooey", "Simple: fooey is fooey"), 

+

138 ("Braced: X${FOO}X.", "Braced: XfooeyX."), 

+

139 ("Missing: x${NOTHING}y is xy", "Missing: xy is xy"), 

+

140 ("Multiple: $$ $FOO $BAR ${FOO}", "Multiple: $ fooey xyzzy fooey"), 

+

141 ("Ill-formed: ${%5} ${{HI}} ${", "Ill-formed: ${%5} ${{HI}} ${"), 

+

142 ("Strict: ${FOO?} is there", "Strict: fooey is there"), 

+

143 ("Defaulted: ${WUT-missing}!", "Defaulted: missing!"), 

+

144 ("Defaulted empty: ${WUT-}!", "Defaulted empty: !"), 

+

145]) 

+

146def test_substitute_variables(before, after): 

+

147 assert substitute_variables(before, VARS) == after 

+

148 

+

149@pytest.mark.parametrize("text", [ 

+

150 "Strict: ${NOTHING?} is an error", 

+

151]) 

+

152def test_substitute_variables_errors(text): 

+

153 with pytest.raises(CoverageException) as exc_info: 

+

154 substitute_variables(text, VARS) 

+

155 assert text in str(exc_info.value) 

+

156 assert "Variable NOTHING is undefined" in str(exc_info.value) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_mixins_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_mixins_py.html new file mode 100644 index 000000000..bcfaf8642 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_mixins_py.html @@ -0,0 +1,147 @@ + + + + + + Coverage for tests/test_mixins.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# -*- coding: utf-8 -*- 

+

2# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

3# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

4 

+

5"""Tests of code in tests/mixins.py""" 

+

6 

+

7import pytest 

+

8 

+

9from coverage.backward import import_local_file 

+

10 

+

11from tests.mixins import TempDirMixin, SysPathModulesMixin 

+

12 

+

13 

+

14class TempDirMixinTest(TempDirMixin): 

+

15 """Test the methods in TempDirMixin.""" 

+

16 

+

17 def file_text(self, fname): 

+

18 """Return the text read from a file.""" 

+

19 with open(fname, "rb") as f: 

+

20 return f.read().decode('ascii') 

+

21 

+

22 def test_make_file(self): 

+

23 # A simple file. 

+

24 self.make_file("fooey.boo", "Hello there") 

+

25 assert self.file_text("fooey.boo") == "Hello there" 

+

26 # A file in a sub-directory 

+

27 self.make_file("sub/another.txt", "Another") 

+

28 assert self.file_text("sub/another.txt") == "Another" 

+

29 # A second file in that sub-directory 

+

30 self.make_file("sub/second.txt", "Second") 

+

31 assert self.file_text("sub/second.txt") == "Second" 

+

32 # A deeper directory 

+

33 self.make_file("sub/deeper/evenmore/third.txt") 

+

34 assert self.file_text("sub/deeper/evenmore/third.txt") == "" 

+

35 # Dedenting 

+

36 self.make_file("dedented.txt", """\ 

+

37 Hello 

+

38 Bye 

+

39 """) 

+

40 assert self.file_text("dedented.txt") == "Hello\nBye\n" 

+

41 

+

42 def test_make_file_newline(self): 

+

43 self.make_file("unix.txt", "Hello\n") 

+

44 assert self.file_text("unix.txt") == "Hello\n" 

+

45 self.make_file("dos.txt", "Hello\n", newline="\r\n") 

+

46 assert self.file_text("dos.txt") == "Hello\r\n" 

+

47 self.make_file("mac.txt", "Hello\n", newline="\r") 

+

48 assert self.file_text("mac.txt") == "Hello\r" 

+

49 

+

50 def test_make_file_non_ascii(self): 

+

51 self.make_file("unicode.txt", "tablo: «ταБℓσ»") 

+

52 with open("unicode.txt", "rb") as f: 

+

53 text = f.read() 

+

54 assert text == b"tablo: \xc2\xab\xcf\x84\xce\xb1\xd0\x91\xe2\x84\x93\xcf\x83\xc2\xbb" 

+

55 

+

56 def test_make_bytes_file(self): 

+

57 self.make_file("binary.dat", bytes=b"\x99\x33\x66hello\0") 

+

58 with open("binary.dat", "rb") as f: 

+

59 data = f.read() 

+

60 assert data == b"\x99\x33\x66hello\0" 

+

61 

+

62 

+

63class SysPathModulessMixinTest(TempDirMixin, SysPathModulesMixin): 

+

64 """Tests of SysPathModulesMixin.""" 

+

65 

+

66 @pytest.mark.parametrize("val", [17, 42]) 

+

67 def test_module_independence(self, val): 

+

68 self.make_file("xyzzy.py", "A = {}".format(val)) 

+

69 import xyzzy # pylint: disable=import-error 

+

70 assert xyzzy.A == val 

+

71 

+

72 def test_cleanup_and_reimport(self): 

+

73 self.make_file("xyzzy.py", "A = 17") 

+

74 xyzzy = import_local_file("xyzzy") 

+

75 assert xyzzy.A == 17 

+

76 

+

77 self.clean_local_file_imports() 

+

78 

+

79 self.make_file("xyzzy.py", "A = 42") 

+

80 xyzzy = import_local_file("xyzzy") 

+

81 assert xyzzy.A == 42 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_numbits_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_numbits_py.html new file mode 100644 index 000000000..da6ffffc0 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_numbits_py.html @@ -0,0 +1,229 @@ + + + + + + Coverage for tests/test_numbits.py: 99.074% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Tests for coverage.numbits""" 

+

5 

+

6import json 

+

7import sqlite3 

+

8 

+

9from hypothesis import example, given, settings 

+

10from hypothesis.strategies import sets, integers 

+

11 

+

12from coverage import env 

+

13from coverage.backward import byte_to_int 

+

14from coverage.numbits import ( 

+

15 nums_to_numbits, numbits_to_nums, numbits_union, numbits_intersection, 

+

16 numbits_any_intersection, num_in_numbits, register_sqlite_functions, 

+

17 ) 

+

18 

+

19from tests.coveragetest import CoverageTest 

+

20 

+

21# Hypothesis-generated line number data 

+

22line_numbers = integers(min_value=1, max_value=9999) 

+

23line_number_sets = sets(line_numbers) 

+

24 

+

25# When coverage-testing ourselves, hypothesis complains about a test being 

+

26# flaky because the first run exceeds the deadline (and fails), and the second 

+

27# run succeeds. Disable the deadline if we are coverage-testing. 

+

28default_settings = settings() 

+

29if env.METACOV: 29 ↛ 33line 29 didn't jump to line 33, because the condition on line 29 was never false

+

30 default_settings = settings(default_settings, deadline=None) 

+

31 

+

32 

+

33def good_numbits(numbits): 

+

34 """Assert that numbits is good.""" 

+

35 # It shouldn't end with a zero byte, that should have been trimmed off. 

+

36 assert (not numbits) or (byte_to_int(numbits[-1]) != 0) 

+

37 

+

38 

+

39class NumbitsOpTest(CoverageTest): 

+

40 """Tests of the numbits operations in numbits.py.""" 

+

41 

+

42 run_in_temp_dir = False 

+

43 

+

44 @given(line_number_sets) 

+

45 @settings(default_settings) 

+

46 def test_conversion(self, nums): 

+

47 numbits = nums_to_numbits(nums) 

+

48 good_numbits(numbits) 

+

49 nums2 = numbits_to_nums(numbits) 

+

50 assert nums == set(nums2) 

+

51 

+

52 @given(line_number_sets, line_number_sets) 

+

53 @settings(default_settings) 

+

54 def test_union(self, nums1, nums2): 

+

55 nb1 = nums_to_numbits(nums1) 

+

56 good_numbits(nb1) 

+

57 nb2 = nums_to_numbits(nums2) 

+

58 good_numbits(nb2) 

+

59 nbu = numbits_union(nb1, nb2) 

+

60 good_numbits(nbu) 

+

61 union = numbits_to_nums(nbu) 

+

62 assert nums1 | nums2 == set(union) 

+

63 

+

64 @given(line_number_sets, line_number_sets) 

+

65 @settings(default_settings) 

+

66 def test_intersection(self, nums1, nums2): 

+

67 nb1 = nums_to_numbits(nums1) 

+

68 good_numbits(nb1) 

+

69 nb2 = nums_to_numbits(nums2) 

+

70 good_numbits(nb2) 

+

71 nbi = numbits_intersection(nb1, nb2) 

+

72 good_numbits(nbi) 

+

73 intersection = numbits_to_nums(nbi) 

+

74 assert nums1 & nums2 == set(intersection) 

+

75 

+

76 @given(line_number_sets, line_number_sets) 

+

77 @settings(default_settings) 

+

78 def test_any_intersection(self, nums1, nums2): 

+

79 nb1 = nums_to_numbits(nums1) 

+

80 good_numbits(nb1) 

+

81 nb2 = nums_to_numbits(nums2) 

+

82 good_numbits(nb2) 

+

83 inter = numbits_any_intersection(nb1, nb2) 

+

84 expect = bool(nums1 & nums2) 

+

85 assert expect == bool(inter) 

+

86 

+

87 @given(line_numbers, line_number_sets) 

+

88 @settings(default_settings) 

+

89 @example(152, {144}) 

+

90 def test_num_in_numbits(self, num, nums): 

+

91 numbits = nums_to_numbits(nums) 

+

92 good_numbits(numbits) 

+

93 is_in = num_in_numbits(num, numbits) 

+

94 assert (num in nums) == is_in 

+

95 

+

96 

+

97class NumbitsSqliteFunctionTest(CoverageTest): 

+

98 """Tests of the SQLite integration for numbits functions.""" 

+

99 

+

100 run_in_temp_dir = False 

+

101 

+

102 def setup_test(self): 

+

103 super(NumbitsSqliteFunctionTest, self).setup_test() 

+

104 conn = sqlite3.connect(":memory:") 

+

105 register_sqlite_functions(conn) 

+

106 self.cursor = conn.cursor() 

+

107 self.cursor.execute("create table data (id int, numbits blob)") 

+

108 self.cursor.executemany( 

+

109 "insert into data (id, numbits) values (?, ?)", 

+

110 [ 

+

111 (i, nums_to_numbits(range(i, 100, i))) 

+

112 for i in range(1, 11) 

+

113 ] 

+

114 ) 

+

115 self.addCleanup(self.cursor.close) 

+

116 

+

117 def test_numbits_union(self): 

+

118 res = self.cursor.execute( 

+

119 "select numbits_union(" 

+

120 "(select numbits from data where id = 7)," 

+

121 "(select numbits from data where id = 9)" 

+

122 ")" 

+

123 ) 

+

124 expected = [ 

+

125 7, 9, 14, 18, 21, 27, 28, 35, 36, 42, 45, 49, 

+

126 54, 56, 63, 70, 72, 77, 81, 84, 90, 91, 98, 99, 

+

127 ] 

+

128 answer = numbits_to_nums(list(res)[0][0]) 

+

129 assert expected == answer 

+

130 

+

131 def test_numbits_intersection(self): 

+

132 res = self.cursor.execute( 

+

133 "select numbits_intersection(" 

+

134 "(select numbits from data where id = 7)," 

+

135 "(select numbits from data where id = 9)" 

+

136 ")" 

+

137 ) 

+

138 answer = numbits_to_nums(list(res)[0][0]) 

+

139 assert [63] == answer 

+

140 

+

141 def test_numbits_any_intersection(self): 

+

142 res = self.cursor.execute( 

+

143 "select numbits_any_intersection(?, ?)", 

+

144 (nums_to_numbits([1, 2, 3]), nums_to_numbits([3, 4, 5])) 

+

145 ) 

+

146 answer = [any_inter for (any_inter,) in res] 

+

147 assert [1] == answer 

+

148 

+

149 res = self.cursor.execute( 

+

150 "select numbits_any_intersection(?, ?)", 

+

151 (nums_to_numbits([1, 2, 3]), nums_to_numbits([7, 8, 9])) 

+

152 ) 

+

153 answer = [any_inter for (any_inter,) in res] 

+

154 assert [0] == answer 

+

155 

+

156 def test_num_in_numbits(self): 

+

157 res = self.cursor.execute("select id, num_in_numbits(12, numbits) from data order by id") 

+

158 answer = [is_in for (id, is_in) in res] 

+

159 assert [1, 1, 1, 1, 0, 1, 0, 0, 0, 0] == answer 

+

160 

+

161 def test_numbits_to_nums(self): 

+

162 res = self.cursor.execute("select numbits_to_nums(?)", [nums_to_numbits([1, 2, 3])]) 

+

163 assert [1, 2, 3] == json.loads(res.fetchone()[0]) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_oddball_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_oddball_py.html new file mode 100644 index 000000000..537293d98 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_oddball_py.html @@ -0,0 +1,664 @@ + + + + + + Coverage for tests/test_oddball.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Oddball cases for testing coverage.py""" 

+

5 

+

6import os.path 

+

7import sys 

+

8 

+

9from flaky import flaky 

+

10import pytest 

+

11 

+

12import coverage 

+

13from coverage import env 

+

14from coverage.backward import import_local_file 

+

15from coverage.files import abs_file 

+

16 

+

17from tests.coveragetest import CoverageTest 

+

18from tests import osinfo 

+

19 

+

20 

+

21class ThreadingTest(CoverageTest): 

+

22 """Tests of the threading support.""" 

+

23 

+

24 def test_threading(self): 

+

25 self.check_coverage("""\ 

+

26 import threading 

+

27 

+

28 def fromMainThread(): 

+

29 return "called from main thread" 

+

30 

+

31 def fromOtherThread(): 

+

32 return "called from other thread" 

+

33 

+

34 def neverCalled(): 

+

35 return "no one calls me" 

+

36 

+

37 other = threading.Thread(target=fromOtherThread) 

+

38 other.start() 

+

39 fromMainThread() 

+

40 other.join() 

+

41 """, 

+

42 [1, 3, 4, 6, 7, 9, 10, 12, 13, 14, 15], "10") 

+

43 

+

44 def test_thread_run(self): 

+

45 self.check_coverage("""\ 

+

46 import threading 

+

47 

+

48 class TestThread(threading.Thread): 

+

49 def run(self): 

+

50 self.a = 5 

+

51 self.do_work() 

+

52 self.a = 7 

+

53 

+

54 def do_work(self): 

+

55 self.a = 10 

+

56 

+

57 thd = TestThread() 

+

58 thd.start() 

+

59 thd.join() 

+

60 """, 

+

61 [1, 3, 4, 5, 6, 7, 9, 10, 12, 13, 14], "") 

+

62 

+

63 

+

64class RecursionTest(CoverageTest): 

+

65 """Check what happens when recursive code gets near limits.""" 

+

66 

+

67 def test_short_recursion(self): 

+

68 # We can definitely get close to 500 stack frames. 

+

69 self.check_coverage("""\ 

+

70 def recur(n): 

+

71 if n == 0: 

+

72 return 0 

+

73 else: 

+

74 return recur(n-1)+1 

+

75 

+

76 recur(495) # We can get at least this many stack frames. 

+

77 i = 8 # and this line will be traced 

+

78 """, 

+

79 [1, 2, 3, 5, 7, 8], "") 

+

80 

+

81 def test_long_recursion(self): 

+

82 # We can't finish a very deep recursion, but we don't crash. 

+

83 with pytest.raises(RuntimeError): 

+

84 self.check_coverage("""\ 

+

85 def recur(n): 

+

86 if n == 0: 

+

87 return 0 

+

88 else: 

+

89 return recur(n-1)+1 

+

90 

+

91 recur(100000) # This is definitely too many frames. 

+

92 """, 

+

93 [1, 2, 3, 5, 7], "" 

+

94 ) 

+

95 

+

96 def test_long_recursion_recovery(self): 

+

97 # Test the core of bug 93: https://github.com/nedbat/coveragepy/issues/93 

+

98 # When recovering from a stack overflow, the Python trace function is 

+

99 # disabled, but the C trace function is not. So if we're using a 

+

100 # Python trace function, we won't trace anything after the stack 

+

101 # overflow, and there should be a warning about it. If we're using 

+

102 # the C trace function, only line 3 will be missing, and all else 

+

103 # will be traced. 

+

104 

+

105 self.make_file("recur.py", """\ 

+

106 def recur(n): 

+

107 if n == 0: 

+

108 return 0 # never hit 

+

109 else: 

+

110 return recur(n-1)+1 

+

111 

+

112 try: 

+

113 recur(100000) # This is definitely too many frames. 

+

114 except RuntimeError: 

+

115 i = 10 

+

116 i = 11 

+

117 """) 

+

118 

+

119 cov = coverage.Coverage() 

+

120 self.start_import_stop(cov, "recur") 

+

121 

+

122 pytrace = (cov._collector.tracer_name() == "PyTracer") 

+

123 expected_missing = [3] 

+

124 if pytrace: # pragma: no metacov 

+

125 expected_missing += [9, 10, 11] 

+

126 

+

127 _, statements, missing, _ = cov.analysis("recur.py") 

+

128 assert statements == [1, 2, 3, 5, 7, 8, 9, 10, 11] 

+

129 assert expected_missing == missing 

+

130 

+

131 # Get a warning about the stackoverflow effect on the tracing function. 

+

132 if pytrace: # pragma: no metacov 

+

133 assert cov._warnings == ["Trace function changed, measurement is likely wrong: None"] 

+

134 else: 

+

135 assert cov._warnings == [] 

+

136 

+

137 

+

138class MemoryLeakTest(CoverageTest): 

+

139 """Attempt the impossible: test that memory doesn't leak. 

+

140 

+

141 Note: this test is truly unusual, and has had a colorful history. See 

+

142 for example: https://github.com/nedbat/coveragepy/issues/186 

+

143 

+

144 It may still fail occasionally, especially on PyPy. 

+

145 

+

146 """ 

+

147 @flaky 

+

148 @pytest.mark.skipif(env.JYTHON, reason="Don't bother on Jython") 

+

149 def test_for_leaks(self): 

+

150 # Our original bad memory leak only happened on line numbers > 255, so 

+

151 # make a code object with more lines than that. Ugly string mumbo 

+

152 # jumbo to get 300 blank lines at the beginning.. 

+

153 code = """\ 

+

154 # blank line\n""" * 300 + """\ 

+

155 def once(x): # line 301 

+

156 if x % 100 == 0: 

+

157 raise Exception("100!") 

+

158 elif x % 2: 

+

159 return 10 

+

160 else: # line 306 

+

161 return 11 

+

162 i = 0 # Portable loop without alloc'ing memory. 

+

163 while i < ITERS: 

+

164 try: 

+

165 once(i) 

+

166 except: 

+

167 pass 

+

168 i += 1 # line 315 

+

169 """ 

+

170 lines = list(range(301, 315)) 

+

171 lines.remove(306) # Line 306 is the "else". 

+

172 

+

173 # This is a non-deterministic test, so try it a few times, and fail it 

+

174 # only if it predominantly fails. 

+

175 fails = 0 

+

176 for _ in range(10): 

+

177 ram_0 = osinfo.process_ram() 

+

178 self.check_coverage(code.replace("ITERS", "10"), lines, "") 

+

179 ram_10 = osinfo.process_ram() 

+

180 self.check_coverage(code.replace("ITERS", "10000"), lines, "") 

+

181 ram_10k = osinfo.process_ram() 

+

182 # Running the code 10k times shouldn't grow the ram much more than 

+

183 # running it 10 times. 

+

184 ram_growth = (ram_10k - ram_10) - (ram_10 - ram_0) 

+

185 if ram_growth > 100000: 

+

186 fails += 1 # pragma: only failure 

+

187 

+

188 if fails > 8: 

+

189 pytest.fail("RAM grew by %d" % (ram_growth)) # pragma: only failure 

+

190 

+

191 

+

192class MemoryFumblingTest(CoverageTest): 

+

193 """Test that we properly manage the None refcount.""" 

+

194 

+

195 @pytest.mark.skipif(not env.C_TRACER, reason="Only the C tracer has refcounting issues") 

+

196 def test_dropping_none(self): # pragma: not covered 

+

197 # TODO: Mark this so it will only be run sometimes. 

+

198 pytest.skip("This is too expensive for now (30s)") 

+

199 # Start and stop coverage thousands of times to flush out bad 

+

200 # reference counting, maybe. 

+

201 self.make_file("the_code.py", """\ 

+

202 import random 

+

203 def f(): 

+

204 if random.random() > .5: 

+

205 x = 1 

+

206 else: 

+

207 x = 2 

+

208 """) 

+

209 self.make_file("main.py", """\ 

+

210 import coverage 

+

211 import sys 

+

212 from the_code import f 

+

213 for i in range(10000): 

+

214 cov = coverage.Coverage(branch=True) 

+

215 cov.start() 

+

216 f() 

+

217 cov.stop() 

+

218 cov.erase() 

+

219 print("Final None refcount: %d" % (sys.getrefcount(None))) 

+

220 """) 

+

221 status, out = self.run_command_status("python main.py") 

+

222 assert status == 0 

+

223 assert "Final None refcount" in out 

+

224 assert "Fatal" not in out 

+

225 

+

226 

+

227@pytest.mark.skipif(env.JYTHON, reason="Pyexpat isn't a problem on Jython") 

+

228class PyexpatTest(CoverageTest): 

+

229 """Pyexpat screws up tracing. Make sure we've counter-defended properly.""" 

+

230 

+

231 def test_pyexpat(self): 

+

232 # pyexpat calls the trace function explicitly (inexplicably), and does 

+

233 # it wrong for exceptions. Parsing a DOCTYPE for some reason throws 

+

234 # an exception internally, and triggers its wrong behavior. This test 

+

235 # checks that our fake PyTrace_RETURN hack in tracer.c works. It will 

+

236 # also detect if the pyexpat bug is fixed unbeknownst to us, meaning 

+

237 # we'd see two RETURNs where there should only be one. 

+

238 

+

239 self.make_file("trydom.py", """\ 

+

240 import xml.dom.minidom 

+

241 

+

242 XML = '''\\ 

+

243 <!DOCTYPE fooey SYSTEM "http://www.example.com/example.dtd"> 

+

244 <root><child/><child/></root> 

+

245 ''' 

+

246 

+

247 def foo(): 

+

248 dom = xml.dom.minidom.parseString(XML) 

+

249 assert len(dom.getElementsByTagName('child')) == 2 

+

250 a = 11 

+

251 

+

252 foo() 

+

253 """) 

+

254 

+

255 self.make_file("outer.py", "\n"*100 + "import trydom\na = 102\n") 

+

256 

+

257 cov = coverage.Coverage() 

+

258 cov.erase() 

+

259 

+

260 # Import the Python file, executing it. 

+

261 self.start_import_stop(cov, "outer") 

+

262 

+

263 _, statements, missing, _ = cov.analysis("trydom.py") 

+

264 assert statements == [1, 3, 8, 9, 10, 11, 13] 

+

265 assert missing == [] 

+

266 

+

267 _, statements, missing, _ = cov.analysis("outer.py") 

+

268 assert statements == [101, 102] 

+

269 assert missing == [] 

+

270 

+

271 # Make sure pyexpat isn't recorded as a source file. 

+

272 # https://github.com/nedbat/coveragepy/issues/419 

+

273 files = cov.get_data().measured_files() 

+

274 msg = "Pyexpat.c is in the measured files!: %r:" % (files,) 

+

275 assert not any(f.endswith("pyexpat.c") for f in files), msg 

+

276 

+

277 

+

278class ExceptionTest(CoverageTest): 

+

279 """I suspect different versions of Python deal with exceptions differently 

+

280 in the trace function. 

+

281 """ 

+

282 

+

283 def test_exception(self): 

+

284 # Python 2.3's trace function doesn't get called with "return" if the 

+

285 # scope is exiting due to an exception. This confounds our trace 

+

286 # function which relies on scope announcements to track which files to 

+

287 # trace. 

+

288 # 

+

289 # This test is designed to sniff this out. Each function in the call 

+

290 # stack is in a different file, to try to trip up the tracer. Each 

+

291 # file has active lines in a different range so we'll see if the lines 

+

292 # get attributed to the wrong file. 

+

293 

+

294 self.make_file("oops.py", """\ 

+

295 def oops(args): 

+

296 a = 2 

+

297 raise Exception("oops") 

+

298 a = 4 

+

299 """) 

+

300 

+

301 self.make_file("fly.py", "\n"*100 + """\ 

+

302 def fly(calls): 

+

303 a = 2 

+

304 calls[0](calls[1:]) 

+

305 a = 4 

+

306 """) 

+

307 

+

308 self.make_file("catch.py", "\n"*200 + """\ 

+

309 def catch(calls): 

+

310 try: 

+

311 a = 3 

+

312 calls[0](calls[1:]) 

+

313 a = 5 

+

314 except: 

+

315 a = 7 

+

316 """) 

+

317 

+

318 self.make_file("doit.py", "\n"*300 + """\ 

+

319 def doit(calls): 

+

320 try: 

+

321 calls[0](calls[1:]) 

+

322 except: 

+

323 a = 5 

+

324 """) 

+

325 

+

326 # Import all the modules before starting coverage, so the def lines 

+

327 # won't be in all the results. 

+

328 for mod in "oops fly catch doit".split(): 

+

329 import_local_file(mod) 

+

330 

+

331 # Each run nests the functions differently to get different 

+

332 # combinations of catching exceptions and letting them fly. 

+

333 runs = [ 

+

334 ("doit fly oops", { 

+

335 'doit.py': [302, 303, 304, 305], 

+

336 'fly.py': [102, 103], 

+

337 'oops.py': [2, 3], 

+

338 }), 

+

339 ("doit catch oops", { 

+

340 'doit.py': [302, 303], 

+

341 'catch.py': [202, 203, 204, 206, 207], 

+

342 'oops.py': [2, 3], 

+

343 }), 

+

344 ("doit fly catch oops", { 

+

345 'doit.py': [302, 303], 

+

346 'fly.py': [102, 103, 104], 

+

347 'catch.py': [202, 203, 204, 206, 207], 

+

348 'oops.py': [2, 3], 

+

349 }), 

+

350 ("doit catch fly oops", { 

+

351 'doit.py': [302, 303], 

+

352 'catch.py': [202, 203, 204, 206, 207], 

+

353 'fly.py': [102, 103], 

+

354 'oops.py': [2, 3], 

+

355 }), 

+

356 ] 

+

357 

+

358 for callnames, lines_expected in runs: 

+

359 

+

360 # Make the list of functions we'll call for this test. 

+

361 callnames = callnames.split() 

+

362 calls = [getattr(sys.modules[cn], cn) for cn in callnames] 

+

363 

+

364 cov = coverage.Coverage() 

+

365 cov.start() 

+

366 # Call our list of functions: invoke the first, with the rest as 

+

367 # an argument. 

+

368 calls[0](calls[1:]) # pragma: nested 

+

369 cov.stop() # pragma: nested 

+

370 

+

371 # Clean the line data and compare to expected results. 

+

372 # The file names are absolute, so keep just the base. 

+

373 clean_lines = {} 

+

374 data = cov.get_data() 

+

375 for callname in callnames: 

+

376 filename = callname + ".py" 

+

377 lines = data.lines(abs_file(filename)) 

+

378 clean_lines[filename] = sorted(lines) 

+

379 

+

380 if env.JYTHON: # pragma: only jython 

+

381 # Jython doesn't report on try or except lines, so take those 

+

382 # out of the expected lines. 

+

383 invisible = [202, 206, 302, 304] 

+

384 for lines in lines_expected.values(): 

+

385 lines[:] = [l for l in lines if l not in invisible] 

+

386 

+

387 assert clean_lines == lines_expected 

+

388 

+

389 

+

390class DoctestTest(CoverageTest): 

+

391 """Tests invoked with doctest should measure properly.""" 

+

392 

+

393 def test_doctest(self): 

+

394 self.check_coverage('''\ 

+

395 def return_arg_or_void(arg): 

+

396 """If <arg> is None, return "Void"; otherwise return <arg> 

+

397 

+

398 >>> return_arg_or_void(None) 

+

399 'Void' 

+

400 >>> return_arg_or_void("arg") 

+

401 'arg' 

+

402 >>> return_arg_or_void("None") 

+

403 'None' 

+

404 """ 

+

405 if arg is None: 

+

406 return "Void" 

+

407 else: 

+

408 return arg 

+

409 

+

410 import doctest, sys 

+

411 doctest.testmod(sys.modules[__name__]) # we're not __main__ :( 

+

412 ''', 

+

413 [1, 11, 12, 14, 16, 17], "") 

+

414 

+

415 

+

416class GettraceTest(CoverageTest): 

+

417 """Tests that we work properly with `sys.gettrace()`.""" 

+

418 def test_round_trip_in_untraced_function(self): 

+

419 # https://github.com/nedbat/coveragepy/issues/575 

+

420 self.make_file("main.py", """import sample""") 

+

421 self.make_file("sample.py", """\ 

+

422 from swap import swap_it 

+

423 def doit(): 

+

424 print(3) 

+

425 swap_it() 

+

426 print(5) 

+

427 def doit_soon(): 

+

428 print(7) 

+

429 doit() 

+

430 print(9) 

+

431 print(10) 

+

432 doit_soon() 

+

433 print(12) 

+

434 """) 

+

435 self.make_file("swap.py", """\ 

+

436 import sys 

+

437 def swap_it(): 

+

438 sys.settrace(sys.gettrace()) 

+

439 """) 

+

440 

+

441 # Use --source=sample to prevent measurement of swap.py. 

+

442 cov = coverage.Coverage(source=["sample"]) 

+

443 self.start_import_stop(cov, "main") 

+

444 

+

445 assert self.stdout() == "10\n7\n3\n5\n9\n12\n" 

+

446 

+

447 _, statements, missing, _ = cov.analysis("sample.py") 

+

448 assert statements == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] 

+

449 assert missing == [] 

+

450 

+

451 def test_setting_new_trace_function(self): 

+

452 # https://github.com/nedbat/coveragepy/issues/436 

+

453 self.check_coverage('''\ 

+

454 import sys 

+

455 

+

456 def tracer(frame, event, arg): 

+

457 print("%s: %s @ %d" % (event, frame.f_code.co_filename, frame.f_lineno)) 

+

458 return tracer 

+

459 

+

460 def begin(): 

+

461 sys.settrace(tracer) 

+

462 

+

463 def collect(): 

+

464 t = sys.gettrace() 

+

465 assert t is tracer, t 

+

466 

+

467 def test_unsets_trace(): 

+

468 begin() 

+

469 collect() 

+

470 

+

471 old = sys.gettrace() 

+

472 test_unsets_trace() 

+

473 sys.settrace(old) 

+

474 a = 21 

+

475 b = 22 

+

476 ''', 

+

477 lines=[1, 3, 4, 5, 7, 8, 10, 11, 12, 14, 15, 16, 18, 19, 20, 21, 22], 

+

478 missing="4-5, 11-12", 

+

479 ) 

+

480 

+

481 out = self.stdout().replace(self.last_module_name, "coverage_test") 

+

482 expected = ( 

+

483 "call: coverage_test.py @ 10\n" 

+

484 "line: coverage_test.py @ 11\n" 

+

485 "line: coverage_test.py @ 12\n" 

+

486 "return: coverage_test.py @ 12\n" 

+

487 ) 

+

488 assert expected == out 

+

489 

+

490 @pytest.mark.expensive 

+

491 @pytest.mark.skipif(env.METACOV, reason="Can't set trace functions during meta-coverage") 

+

492 def test_atexit_gettrace(self): 

+

493 # This is not a test of coverage at all, but of our understanding 

+

494 # of this edge-case behavior in various Pythons. 

+

495 

+

496 self.make_file("atexit_gettrace.py", """\ 

+

497 import atexit, sys 

+

498 

+

499 def trace_function(frame, event, arg): 

+

500 return trace_function 

+

501 sys.settrace(trace_function) 

+

502 

+

503 def show_trace_function(): 

+

504 tfn = sys.gettrace() 

+

505 if tfn is not None: 

+

506 tfn = tfn.__name__ 

+

507 print(tfn) 

+

508 atexit.register(show_trace_function) 

+

509 

+

510 # This will show what the trace function is at the end of the program. 

+

511 """) 

+

512 status, out = self.run_command_status("python atexit_gettrace.py") 

+

513 assert status == 0 

+

514 if env.PYPY and env.PYPYVERSION >= (5, 4): 

+

515 # Newer PyPy clears the trace function before atexit runs. 

+

516 assert out == "None\n" 

+

517 else: 

+

518 # Other Pythons leave the trace function in place. 

+

519 assert out == "trace_function\n" 

+

520 

+

521 

+

522class ExecTest(CoverageTest): 

+

523 """Tests of exec.""" 

+

524 def test_correct_filename(self): 

+

525 # https://github.com/nedbat/coveragepy/issues/380 

+

526 # Bug was that exec'd files would have their lines attributed to the 

+

527 # calling file. Make two files, both with ~30 lines, but no lines in 

+

528 # common. Line 30 in to_exec.py was recorded as line 30 in main.py, 

+

529 # but now it's fixed. :) 

+

530 self.make_file("to_exec.py", """\ 

+

531 \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n 

+

532 print("var is {}".format(var)) # line 31 

+

533 """) 

+

534 self.make_file("main.py", """\ 

+

535 namespace = {'var': 17} 

+

536 with open("to_exec.py") as to_exec_py: 

+

537 code = compile(to_exec_py.read(), 'to_exec.py', 'exec') 

+

538 exec(code, globals(), namespace) 

+

539 \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n 

+

540 print("done") # line 35 

+

541 """) 

+

542 

+

543 cov = coverage.Coverage() 

+

544 self.start_import_stop(cov, "main") 

+

545 

+

546 _, statements, missing, _ = cov.analysis("main.py") 

+

547 assert statements == [1, 2, 3, 4, 35] 

+

548 assert missing == [] 

+

549 _, statements, missing, _ = cov.analysis("to_exec.py") 

+

550 assert statements == [31] 

+

551 assert missing == [] 

+

552 

+

553 @pytest.mark.skipif(env.PY2, reason="Python 2 can't seem to compile the file.") 

+

554 def test_unencodable_filename(self): 

+

555 # https://github.com/nedbat/coveragepy/issues/891 

+

556 self.make_file("bug891.py", r"""exec(compile("pass", "\udcff.py", "exec"))""") 

+

557 cov = coverage.Coverage() 

+

558 self.start_import_stop(cov, "bug891") 

+

559 # Saving would fail trying to encode \udcff.py 

+

560 cov.save() 

+

561 files = [os.path.basename(f) for f in cov.get_data().measured_files()] 

+

562 assert "bug891.py" in files 

+

563 

+

564 

+

565class MockingProtectionTest(CoverageTest): 

+

566 """Tests about protecting ourselves from aggressive mocking. 

+

567 

+

568 https://github.com/nedbat/coveragepy/issues/416 

+

569 

+

570 """ 

+

571 def test_os_path_exists(self): 

+

572 # To see if this test still detects the problem, change isolate_module 

+

573 # in misc.py to simply return its argument. It should fail with a 

+

574 # StopIteration error. 

+

575 self.make_file("bug416.py", """\ 

+

576 import os.path 

+

577 

+

578 import mock 

+

579 

+

580 @mock.patch('os.path.exists') 

+

581 def test_path_exists(mock_exists): 

+

582 mock_exists.side_effect = [17] 

+

583 print("in test") 

+

584 import bug416a 

+

585 print(bug416a.foo) 

+

586 print(os.path.exists(".")) 

+

587 

+

588 test_path_exists() 

+

589 """) 

+

590 self.make_file("bug416a.py", """\ 

+

591 print("bug416a.py") 

+

592 foo = 23 

+

593 """) 

+

594 

+

595 import py_compile 

+

596 py_compile.compile("bug416a.py") 

+

597 out = self.run_command("coverage run bug416.py") 

+

598 assert out == "in test\nbug416a.py\n23\n17\n" 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_parser_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_parser_py.html new file mode 100644 index 000000000..088006178 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_parser_py.html @@ -0,0 +1,529 @@ + + + + + + Coverage for tests/test_parser.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Tests for coverage.py's code parsing.""" 

+

5 

+

6import textwrap 

+

7 

+

8import pytest 

+

9 

+

10from coverage import env 

+

11from coverage.misc import NotPython 

+

12from coverage.parser import PythonParser 

+

13 

+

14from tests.coveragetest import CoverageTest 

+

15from tests.helpers import arcz_to_arcs 

+

16 

+

17 

+

18class PythonParserTest(CoverageTest): 

+

19 """Tests for coverage.py's Python code parsing.""" 

+

20 

+

21 run_in_temp_dir = False 

+

22 

+

23 def parse_source(self, text): 

+

24 """Parse `text` as source, and return the `PythonParser` used.""" 

+

25 if env.PY2: 

+

26 text = text.decode("ascii") 

+

27 text = textwrap.dedent(text) 

+

28 parser = PythonParser(text=text, exclude="nocover") 

+

29 parser.parse_source() 

+

30 return parser 

+

31 

+

32 def test_exit_counts(self): 

+

33 parser = self.parse_source("""\ 

+

34 # check some basic branch counting 

+

35 class Foo: 

+

36 def foo(self, a): 

+

37 if a: 

+

38 return 5 

+

39 else: 

+

40 return 7 

+

41 

+

42 class Bar: 

+

43 pass 

+

44 """) 

+

45 assert parser.exit_counts() == { 

+

46 2:1, 3:1, 4:2, 5:1, 7:1, 9:1, 10:1 

+

47 } 

+

48 

+

49 def test_generator_exit_counts(self): 

+

50 # https://github.com/nedbat/coveragepy/issues/324 

+

51 parser = self.parse_source("""\ 

+

52 def gen(input): 

+

53 for n in inp: 

+

54 yield (i * 2 for i in range(n)) 

+

55 

+

56 list(gen([1,2,3])) 

+

57 """) 

+

58 assert parser.exit_counts() == { 

+

59 1:1, # def -> list 

+

60 2:2, # for -> yield; for -> exit 

+

61 3:2, # yield -> for; genexp exit 

+

62 5:1, # list -> exit 

+

63 } 

+

64 

+

65 def test_try_except(self): 

+

66 parser = self.parse_source("""\ 

+

67 try: 

+

68 a = 2 

+

69 except ValueError: 

+

70 a = 4 

+

71 except ZeroDivideError: 

+

72 a = 6 

+

73 except: 

+

74 a = 8 

+

75 b = 9 

+

76 """) 

+

77 assert parser.exit_counts() == { 

+

78 1: 1, 2:1, 3:2, 4:1, 5:2, 6:1, 7:1, 8:1, 9:1 

+

79 } 

+

80 

+

81 def test_excluded_classes(self): 

+

82 parser = self.parse_source("""\ 

+

83 class Foo: 

+

84 def __init__(self): 

+

85 pass 

+

86 

+

87 if len([]): # nocover 

+

88 class Bar: 

+

89 pass 

+

90 """) 

+

91 assert parser.exit_counts() == { 

+

92 1:0, 2:1, 3:1 

+

93 } 

+

94 

+

95 def test_missing_branch_to_excluded_code(self): 

+

96 parser = self.parse_source("""\ 

+

97 if fooey: 

+

98 a = 2 

+

99 else: # nocover 

+

100 a = 4 

+

101 b = 5 

+

102 """) 

+

103 assert parser.exit_counts() == { 1:1, 2:1, 5:1 } 

+

104 parser = self.parse_source("""\ 

+

105 def foo(): 

+

106 if fooey: 

+

107 a = 3 

+

108 else: 

+

109 a = 5 

+

110 b = 6 

+

111 """) 

+

112 assert parser.exit_counts() == { 1:1, 2:2, 3:1, 5:1, 6:1 } 

+

113 parser = self.parse_source("""\ 

+

114 def foo(): 

+

115 if fooey: 

+

116 a = 3 

+

117 else: # nocover 

+

118 a = 5 

+

119 b = 6 

+

120 """) 

+

121 assert parser.exit_counts() == { 1:1, 2:1, 3:1, 6:1 } 

+

122 

+

123 def test_indentation_error(self): 

+

124 msg = ( 

+

125 "Couldn't parse '<code>' as Python source: " 

+

126 "'unindent does not match any outer indentation level' at line 3" 

+

127 ) 

+

128 with pytest.raises(NotPython, match=msg): 

+

129 _ = self.parse_source("""\ 

+

130 0 spaces 

+

131 2 

+

132 1 

+

133 """) 

+

134 

+

135 def test_token_error(self): 

+

136 msg = "Couldn't parse '<code>' as Python source: 'EOF in multi-line string' at line 1" 

+

137 with pytest.raises(NotPython, match=msg): 

+

138 _ = self.parse_source("""\ 

+

139 ''' 

+

140 """) 

+

141 

+

142 @pytest.mark.xfail( 

+

143 env.PYPY3 and env.PYPYVERSION == (7, 3, 0), 

+

144 reason="https://bitbucket.org/pypy/pypy/issues/3139", 

+

145 ) 

+

146 def test_decorator_pragmas(self): 

+

147 parser = self.parse_source("""\ 

+

148 # 1 

+

149 

+

150 @foo(3) # nocover 

+

151 @bar 

+

152 def func(x, y=5): 

+

153 return 6 

+

154 

+

155 class Foo: # this is the only statement. 

+

156 '''9''' 

+

157 @foo # nocover 

+

158 def __init__(self): 

+

159 '''12''' 

+

160 return 13 

+

161 

+

162 @foo( # nocover 

+

163 16, 

+

164 17, 

+

165 ) 

+

166 def meth(self): 

+

167 return 20 

+

168 

+

169 @foo( # nocover 

+

170 23 

+

171 ) 

+

172 def func(x=25): 

+

173 return 26 

+

174 """) 

+

175 raw_statements = {3, 4, 5, 6, 8, 9, 10, 13, 15, 16, 17, 20, 22, 23, 25, 26} 

+

176 if env.PYBEHAVIOR.trace_decorated_def: 

+

177 raw_statements.update([11, 19]) 

+

178 assert parser.raw_statements == raw_statements 

+

179 assert parser.statements == {8} 

+

180 

+

181 def test_class_decorator_pragmas(self): 

+

182 parser = self.parse_source("""\ 

+

183 class Foo(object): 

+

184 def __init__(self): 

+

185 self.x = 3 

+

186 

+

187 @foo # nocover 

+

188 class Bar(object): 

+

189 def __init__(self): 

+

190 self.x = 8 

+

191 """) 

+

192 assert parser.raw_statements == {1, 2, 3, 5, 6, 7, 8} 

+

193 assert parser.statements == {1, 2, 3} 

+

194 

+

195 def test_empty_decorated_function(self): 

+

196 parser = self.parse_source("""\ 

+

197 def decorator(func): 

+

198 return func 

+

199 

+

200 @decorator 

+

201 def foo(self): 

+

202 '''Docstring''' 

+

203 

+

204 @decorator 

+

205 def bar(self): 

+

206 pass 

+

207 """) 

+

208 

+

209 if env.PYBEHAVIOR.trace_decorated_def: 

+

210 expected_statements = {1, 2, 4, 5, 8, 9, 10} 

+

211 expected_arcs = set(arcz_to_arcs(".1 14 45 58 89 9. .2 2. -8A A-8")) 

+

212 expected_exits = {1: 1, 2: 1, 4: 1, 5: 1, 8: 1, 9: 1, 10: 1} 

+

213 else: 

+

214 expected_statements = {1, 2, 4, 8, 10} 

+

215 expected_arcs = set(arcz_to_arcs(".1 14 48 8. .2 2. -8A A-8")) 

+

216 expected_exits = {1: 1, 2: 1, 4: 1, 8: 1, 10: 1} 

+

217 

+

218 if env.PYBEHAVIOR.docstring_only_function: 

+

219 # 3.7 changed how functions with only docstrings are numbered. 

+

220 expected_arcs.update(set(arcz_to_arcs("-46 6-4"))) 

+

221 expected_exits.update({6: 1}) 

+

222 

+

223 assert expected_statements == parser.statements 

+

224 assert expected_arcs == parser.arcs() 

+

225 assert expected_exits == parser.exit_counts() 

+

226 

+

227 

+

228class ParserMissingArcDescriptionTest(CoverageTest): 

+

229 """Tests for PythonParser.missing_arc_description.""" 

+

230 

+

231 run_in_temp_dir = False 

+

232 

+

233 def parse_text(self, source): 

+

234 """Parse Python source, and return the parser object.""" 

+

235 parser = PythonParser(text=textwrap.dedent(source)) 

+

236 parser.parse_source() 

+

237 return parser 

+

238 

+

239 def test_missing_arc_description(self): 

+

240 # This code is never run, so the actual values don't matter. 

+

241 parser = self.parse_text(u"""\ 

+

242 if x: 

+

243 print(2) 

+

244 print(3) 

+

245 

+

246 def func5(): 

+

247 for x in range(6): 

+

248 if x == 7: 

+

249 break 

+

250 

+

251 def func10(): 

+

252 while something(11): 

+

253 thing(12) 

+

254 more_stuff(13) 

+

255 """) 

+

256 expected = "line 1 didn't jump to line 2, because the condition on line 1 was never true" 

+

257 assert expected == parser.missing_arc_description(1, 2) 

+

258 expected = "line 1 didn't jump to line 3, because the condition on line 1 was never false" 

+

259 assert expected == parser.missing_arc_description(1, 3) 

+

260 expected = ( 

+

261 "line 6 didn't return from function 'func5', " + 

+

262 "because the loop on line 6 didn't complete" 

+

263 ) 

+

264 assert expected == parser.missing_arc_description(6, -5) 

+

265 expected = "line 6 didn't jump to line 7, because the loop on line 6 never started" 

+

266 assert expected == parser.missing_arc_description(6, 7) 

+

267 expected = "line 11 didn't jump to line 12, because the condition on line 11 was never true" 

+

268 assert expected == parser.missing_arc_description(11, 12) 

+

269 expected = ( 

+

270 "line 11 didn't jump to line 13, " + 

+

271 "because the condition on line 11 was never false" 

+

272 ) 

+

273 assert expected == parser.missing_arc_description(11, 13) 

+

274 

+

275 def test_missing_arc_descriptions_for_small_callables(self): 

+

276 parser = self.parse_text(u"""\ 

+

277 callables = [ 

+

278 lambda: 2, 

+

279 (x for x in range(3)), 

+

280 {x:1 for x in range(4)}, 

+

281 {x for x in range(5)}, 

+

282 ] 

+

283 x = 7 

+

284 """) 

+

285 expected = "line 2 didn't finish the lambda on line 2" 

+

286 assert expected == parser.missing_arc_description(2, -2) 

+

287 expected = "line 3 didn't finish the generator expression on line 3" 

+

288 assert expected == parser.missing_arc_description(3, -3) 

+

289 expected = "line 4 didn't finish the dictionary comprehension on line 4" 

+

290 assert expected == parser.missing_arc_description(4, -4) 

+

291 expected = "line 5 didn't finish the set comprehension on line 5" 

+

292 assert expected == parser.missing_arc_description(5, -5) 

+

293 

+

294 def test_missing_arc_descriptions_for_exceptions(self): 

+

295 parser = self.parse_text(u"""\ 

+

296 try: 

+

297 pass 

+

298 except ZeroDivideError: 

+

299 print("whoops") 

+

300 except ValueError: 

+

301 print("yikes") 

+

302 """) 

+

303 expected = ( 

+

304 "line 3 didn't jump to line 4, " + 

+

305 "because the exception caught by line 3 didn't happen" 

+

306 ) 

+

307 assert expected == parser.missing_arc_description(3, 4) 

+

308 expected = ( 

+

309 "line 5 didn't jump to line 6, " + 

+

310 "because the exception caught by line 5 didn't happen" 

+

311 ) 

+

312 assert expected == parser.missing_arc_description(5, 6) 

+

313 

+

314 def test_missing_arc_descriptions_for_finally(self): 

+

315 parser = self.parse_text(u"""\ 

+

316 def function(): 

+

317 for i in range(2): 

+

318 try: 

+

319 if something(4): 

+

320 break 

+

321 elif something(6): 

+

322 x = 7 

+

323 else: 

+

324 if something(9): 

+

325 continue 

+

326 else: 

+

327 continue 

+

328 if also_this(13): 

+

329 return 14 

+

330 else: 

+

331 raise Exception(16) 

+

332 finally: 

+

333 this_thing(18) 

+

334 that_thing(19) 

+

335 """) 

+

336 if env.PYBEHAVIOR.finally_jumps_back: 

+

337 expected = "line 18 didn't jump to line 5, because the break on line 5 wasn't executed" 

+

338 assert expected == parser.missing_arc_description(18, 5) 

+

339 expected = "line 5 didn't jump to line 19, because the break on line 5 wasn't executed" 

+

340 assert expected == parser.missing_arc_description(5, 19) 

+

341 expected = ( 

+

342 "line 18 didn't jump to line 10, " + 

+

343 "because the continue on line 10 wasn't executed" 

+

344 ) 

+

345 assert expected == parser.missing_arc_description(18, 10) 

+

346 expected = ( 

+

347 "line 10 didn't jump to line 2, " + 

+

348 "because the continue on line 10 wasn't executed" 

+

349 ) 

+

350 assert expected == parser.missing_arc_description(10, 2) 

+

351 expected = ( 

+

352 "line 18 didn't jump to line 14, " + 

+

353 "because the return on line 14 wasn't executed" 

+

354 ) 

+

355 assert expected == parser.missing_arc_description(18, 14) 

+

356 expected = ( 

+

357 "line 14 didn't return from function 'function', " + 

+

358 "because the return on line 14 wasn't executed" 

+

359 ) 

+

360 assert expected == parser.missing_arc_description(14, -1) 

+

361 expected = ( 

+

362 "line 18 didn't except from function 'function', " + 

+

363 "because the raise on line 16 wasn't executed" 

+

364 ) 

+

365 assert expected == parser.missing_arc_description(18, -1) 

+

366 else: 

+

367 expected = ( 

+

368 "line 18 didn't jump to line 19, " + 

+

369 "because the break on line 5 wasn't executed" 

+

370 ) 

+

371 assert expected == parser.missing_arc_description(18, 19) 

+

372 expected = ( 

+

373 "line 18 didn't jump to line 2, " + 

+

374 "because the continue on line 10 wasn't executed" + 

+

375 " or " + 

+

376 "the continue on line 12 wasn't executed" 

+

377 ) 

+

378 assert expected == parser.missing_arc_description(18, 2) 

+

379 expected = ( 

+

380 "line 18 didn't except from function 'function', " + 

+

381 "because the raise on line 16 wasn't executed" + 

+

382 " or " + 

+

383 "line 18 didn't return from function 'function', " + 

+

384 "because the return on line 14 wasn't executed" 

+

385 ) 

+

386 assert expected == parser.missing_arc_description(18, -1) 

+

387 

+

388 def test_missing_arc_descriptions_bug460(self): 

+

389 parser = self.parse_text(u"""\ 

+

390 x = 1 

+

391 d = { 

+

392 3: lambda: [], 

+

393 4: lambda: [], 

+

394 } 

+

395 x = 6 

+

396 """) 

+

397 assert parser.missing_arc_description(2, -3) == "line 3 didn't finish the lambda on line 3" 

+

398 

+

399 

+

400class ParserFileTest(CoverageTest): 

+

401 """Tests for coverage.py's code parsing from files.""" 

+

402 

+

403 def parse_file(self, filename): 

+

404 """Parse `text` as source, and return the `PythonParser` used.""" 

+

405 parser = PythonParser(filename=filename, exclude="nocover") 

+

406 parser.parse_source() 

+

407 return parser 

+

408 

+

409 def test_line_endings(self): 

+

410 text = """\ 

+

411 # check some basic branch counting 

+

412 class Foo: 

+

413 def foo(self, a): 

+

414 if a: 

+

415 return 5 

+

416 else: 

+

417 return 7 

+

418 

+

419 class Bar: 

+

420 pass 

+

421 """ 

+

422 counts = { 2:1, 3:1, 4:2, 5:1, 7:1, 9:1, 10:1 } 

+

423 name_endings = (("unix", "\n"), ("dos", "\r\n"), ("mac", "\r")) 

+

424 for fname, newline in name_endings: 

+

425 fname = fname + ".py" 

+

426 self.make_file(fname, text, newline=newline) 

+

427 parser = self.parse_file(fname) 

+

428 assert parser.exit_counts() == counts, "Wrong for %r" % fname 

+

429 

+

430 def test_encoding(self): 

+

431 self.make_file("encoded.py", """\ 

+

432 coverage = "\xe7\xf6v\xear\xe3g\xe9" 

+

433 """) 

+

434 parser = self.parse_file("encoded.py") 

+

435 assert parser.exit_counts() == {1: 1} 

+

436 

+

437 def test_missing_line_ending(self): 

+

438 # Test that the set of statements is the same even if a final 

+

439 # multi-line statement has no final newline. 

+

440 # https://github.com/nedbat/coveragepy/issues/293 

+

441 

+

442 self.make_file("normal.py", """\ 

+

443 out, err = subprocess.Popen( 

+

444 [sys.executable, '-c', 'pass'], 

+

445 stdout=subprocess.PIPE, 

+

446 stderr=subprocess.PIPE).communicate() 

+

447 """) 

+

448 

+

449 parser = self.parse_file("normal.py") 

+

450 assert parser.statements == {1} 

+

451 

+

452 self.make_file("abrupt.py", """\ 

+

453 out, err = subprocess.Popen( 

+

454 [sys.executable, '-c', 'pass'], 

+

455 stdout=subprocess.PIPE, 

+

456 stderr=subprocess.PIPE).communicate()""") # no final newline. 

+

457 

+

458 # Double-check that some test helper wasn't being helpful. 

+

459 with open("abrupt.py") as f: 

+

460 assert f.read()[-1] == ")" 

+

461 

+

462 parser = self.parse_file("abrupt.py") 

+

463 assert parser.statements == {1} 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_phystokens_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_phystokens_py.html new file mode 100644 index 000000000..907e9dd76 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_phystokens_py.html @@ -0,0 +1,347 @@ + + + + + + Coverage for tests/test_phystokens.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Tests for coverage.py's improved tokenizer.""" 

+

5 

+

6import os.path 

+

7import re 

+

8import textwrap 

+

9 

+

10import pytest 

+

11 

+

12from coverage import env 

+

13from coverage.phystokens import source_token_lines, source_encoding 

+

14from coverage.phystokens import neuter_encoding_declaration, compile_unicode 

+

15from coverage.python import get_python_source 

+

16 

+

17from tests.coveragetest import CoverageTest, TESTS_DIR 

+

18 

+

19 

+

20# A simple program and its token stream. 

+

21SIMPLE = u"""\ 

+

22# yay! 

+

23def foo(): 

+

24 say('two = %d' % 2) 

+

25""" 

+

26 

+

27SIMPLE_TOKENS = [ 

+

28 [('com', "# yay!")], 

+

29 [('key', 'def'), ('ws', ' '), ('nam', 'foo'), ('op', '('), ('op', ')'), ('op', ':')], 

+

30 [('ws', ' '), ('nam', 'say'), ('op', '('), 

+

31 ('str', "'two = %d'"), ('ws', ' '), ('op', '%'), 

+

32 ('ws', ' '), ('num', '2'), ('op', ')')], 

+

33] 

+

34 

+

35# Mixed-whitespace program, and its token stream. 

+

36MIXED_WS = u"""\ 

+

37def hello(): 

+

38 a="Hello world!" 

+

39\tb="indented" 

+

40""" 

+

41 

+

42MIXED_WS_TOKENS = [ 

+

43 [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ('op', ')'), ('op', ':')], 

+

44 [('ws', ' '), ('nam', 'a'), ('op', '='), ('str', '"Hello world!"')], 

+

45 [('ws', ' '), ('nam', 'b'), ('op', '='), ('str', '"indented"')], 

+

46] 

+

47 

+

48# https://github.com/nedbat/coveragepy/issues/822 

+

49BUG_822 = u"""\ 

+

50print( "Message 1" ) 

+

51array = [ 1,2,3,4, # 4 numbers \\ 

+

52 5,6,7 ] # 3 numbers 

+

53print( "Message 2" ) 

+

54""" 

+

55 

+

56class PhysTokensTest(CoverageTest): 

+

57 """Tests for coverage.py's improved tokenizer.""" 

+

58 

+

59 run_in_temp_dir = False 

+

60 

+

61 def check_tokenization(self, source): 

+

62 """Tokenize `source`, then put it back together, should be the same.""" 

+

63 tokenized = "" 

+

64 for line in source_token_lines(source): 

+

65 text = "".join(t for _, t in line) 

+

66 tokenized += text + "\n" 

+

67 # source_token_lines doesn't preserve trailing spaces, so trim all that 

+

68 # before comparing. 

+

69 source = source.replace('\r\n', '\n') 

+

70 source = re.sub(r"(?m)[ \t]+$", "", source) 

+

71 tokenized = re.sub(r"(?m)[ \t]+$", "", tokenized) 

+

72 assert source == tokenized 

+

73 

+

74 def check_file_tokenization(self, fname): 

+

75 """Use the contents of `fname` for `check_tokenization`.""" 

+

76 self.check_tokenization(get_python_source(fname)) 

+

77 

+

78 def test_simple(self): 

+

79 assert list(source_token_lines(SIMPLE)) == SIMPLE_TOKENS 

+

80 self.check_tokenization(SIMPLE) 

+

81 

+

82 def test_missing_final_newline(self): 

+

83 # We can tokenize source that is missing the final newline. 

+

84 assert list(source_token_lines(SIMPLE.rstrip())) == SIMPLE_TOKENS 

+

85 

+

86 def test_tab_indentation(self): 

+

87 # Mixed tabs and spaces... 

+

88 assert list(source_token_lines(MIXED_WS)) == MIXED_WS_TOKENS 

+

89 

+

90 def test_bug_822(self): 

+

91 self.check_tokenization(BUG_822) 

+

92 

+

93 def test_tokenize_real_file(self): 

+

94 # Check the tokenization of a real file (large, btw). 

+

95 real_file = os.path.join(TESTS_DIR, "test_coverage.py") 

+

96 self.check_file_tokenization(real_file) 

+

97 

+

98 def test_stress(self): 

+

99 # Check the tokenization of a stress-test file. 

+

100 stress = os.path.join(TESTS_DIR, "stress_phystoken.tok") 

+

101 self.check_file_tokenization(stress) 

+

102 stress = os.path.join(TESTS_DIR, "stress_phystoken_dos.tok") 

+

103 self.check_file_tokenization(stress) 

+

104 

+

105 

+

106# The default encoding is different in Python 2 and Python 3. 

+

107if env.PY3: 

+

108 DEF_ENCODING = "utf-8" 

+

109else: 

+

110 DEF_ENCODING = "ascii" 

+

111 

+

112 

+

113ENCODING_DECLARATION_SOURCES = [ 

+

114 # Various forms from http://www.python.org/dev/peps/pep-0263/ 

+

115 (1, b"# coding=cp850\n\n", "cp850"), 

+

116 (1, b"# coding=latin-1\n", "iso-8859-1"), 

+

117 (1, b"# coding=iso-latin-1\n", "iso-8859-1"), 

+

118 (1, b"#!/usr/bin/python\n# -*- coding: cp850 -*-\n", "cp850"), 

+

119 (1, b"#!/usr/bin/python\n# vim: set fileencoding=cp850:\n", "cp850"), 

+

120 (1, b"# This Python file uses this encoding: cp850\n", "cp850"), 

+

121 (1, b"# This file uses a different encoding:\n# coding: cp850\n", "cp850"), 

+

122 (1, b"\n# coding=cp850\n\n", "cp850"), 

+

123 (2, b"# -*- coding:cp850 -*-\n# vim: fileencoding=cp850\n", "cp850"), 

+

124] 

+

125 

+

126class SourceEncodingTest(CoverageTest): 

+

127 """Tests of source_encoding() for detecting encodings.""" 

+

128 

+

129 run_in_temp_dir = False 

+

130 

+

131 def test_detect_source_encoding(self): 

+

132 for _, source, expected in ENCODING_DECLARATION_SOURCES: 

+

133 assert source_encoding(source) == expected, "Wrong encoding in %r" % source 

+

134 

+

135 # PyPy3 gets this case wrong. Not sure what I can do about it, so skip the test. 

+

136 @pytest.mark.skipif(env.PYPY3, reason="PyPy3 is wrong about non-comment encoding. Skip it.") 

+

137 def test_detect_source_encoding_not_in_comment(self): 

+

138 # Should not detect anything here 

+

139 source = b'def parse(src, encoding=None):\n pass' 

+

140 assert source_encoding(source) == DEF_ENCODING 

+

141 

+

142 def test_dont_detect_source_encoding_on_third_line(self): 

+

143 # A coding declaration doesn't count on the third line. 

+

144 source = b"\n\n# coding=cp850\n\n" 

+

145 assert source_encoding(source) == DEF_ENCODING 

+

146 

+

147 def test_detect_source_encoding_of_empty_file(self): 

+

148 # An important edge case. 

+

149 assert source_encoding(b"") == DEF_ENCODING 

+

150 

+

151 def test_bom(self): 

+

152 # A BOM means utf-8. 

+

153 source = b"\xEF\xBB\xBFtext = 'hello'\n" 

+

154 assert source_encoding(source) == 'utf-8-sig' 

+

155 

+

156 def test_bom_with_encoding(self): 

+

157 source = b"\xEF\xBB\xBF# coding: utf-8\ntext = 'hello'\n" 

+

158 assert source_encoding(source) == 'utf-8-sig' 

+

159 

+

160 def test_bom_is_wrong(self): 

+

161 # A BOM with an explicit non-utf8 encoding is an error. 

+

162 source = b"\xEF\xBB\xBF# coding: cp850\n" 

+

163 with pytest.raises(SyntaxError, match="encoding problem: utf-8"): 

+

164 source_encoding(source) 

+

165 

+

166 def test_unknown_encoding(self): 

+

167 source = b"# coding: klingon\n" 

+

168 with pytest.raises(SyntaxError, match="unknown encoding: klingon"): 

+

169 source_encoding(source) 

+

170 

+

171 

+

172class NeuterEncodingDeclarationTest(CoverageTest): 

+

173 """Tests of phystokens.neuter_encoding_declaration().""" 

+

174 

+

175 run_in_temp_dir = False 

+

176 

+

177 def test_neuter_encoding_declaration(self): 

+

178 for lines_diff_expected, source, _ in ENCODING_DECLARATION_SOURCES: 

+

179 neutered = neuter_encoding_declaration(source.decode("ascii")) 

+

180 neutered = neutered.encode("ascii") 

+

181 

+

182 # The neutered source should have the same number of lines. 

+

183 source_lines = source.splitlines() 

+

184 neutered_lines = neutered.splitlines() 

+

185 assert len(source_lines) == len(neutered_lines) 

+

186 

+

187 # Only one of the lines should be different. 

+

188 lines_different = sum( 

+

189 int(nline != sline) for nline, sline in zip(neutered_lines, source_lines) 

+

190 ) 

+

191 assert lines_diff_expected == lines_different 

+

192 

+

193 # The neutered source will be detected as having no encoding 

+

194 # declaration. 

+

195 assert source_encoding(neutered) == DEF_ENCODING, "Wrong encoding in %r" % neutered 

+

196 

+

197 def test_two_encoding_declarations(self): 

+

198 input_src = textwrap.dedent(u"""\ 

+

199 # -*- coding: ascii -*- 

+

200 # -*- coding: utf-8 -*- 

+

201 # -*- coding: utf-16 -*- 

+

202 """) 

+

203 expected_src = textwrap.dedent(u"""\ 

+

204 # (deleted declaration) -*- 

+

205 # (deleted declaration) -*- 

+

206 # -*- coding: utf-16 -*- 

+

207 """) 

+

208 output_src = neuter_encoding_declaration(input_src) 

+

209 assert expected_src == output_src 

+

210 

+

211 def test_one_encoding_declaration(self): 

+

212 input_src = textwrap.dedent(u"""\ 

+

213 # -*- coding: utf-16 -*- 

+

214 # Just a comment. 

+

215 # -*- coding: ascii -*- 

+

216 """) 

+

217 expected_src = textwrap.dedent(u"""\ 

+

218 # (deleted declaration) -*- 

+

219 # Just a comment. 

+

220 # -*- coding: ascii -*- 

+

221 """) 

+

222 output_src = neuter_encoding_declaration(input_src) 

+

223 assert expected_src == output_src 

+

224 

+

225 

+

226class Bug529Test(CoverageTest): 

+

227 """Test of bug 529""" 

+

228 

+

229 def test_bug_529(self): 

+

230 # Don't over-neuter coding declarations. This happened with a test 

+

231 # file which contained code in multi-line strings, all with coding 

+

232 # declarations. The neutering of the file also changed the multi-line 

+

233 # strings, which it shouldn't have. 

+

234 self.make_file("the_test.py", '''\ 

+

235 # -*- coding: utf-8 -*- 

+

236 import unittest 

+

237 class Bug529Test(unittest.TestCase): 

+

238 def test_two_strings_are_equal(self): 

+

239 src1 = u"""\\ 

+

240 # -*- coding: utf-8 -*- 

+

241 # Just a comment. 

+

242 """ 

+

243 src2 = u"""\\ 

+

244 # -*- coding: utf-8 -*- 

+

245 # Just a comment. 

+

246 """ 

+

247 self.assertEqual(src1, src2) 

+

248 

+

249 if __name__ == "__main__": 

+

250 unittest.main() 

+

251 ''') 

+

252 status, out = self.run_command_status("coverage run the_test.py") 

+

253 assert status == 0 

+

254 assert "OK" in out 

+

255 # If this test fails, the output will be super-confusing, because it 

+

256 # has a failing unit test contained within the failing unit test. 

+

257 

+

258 

+

259class CompileUnicodeTest(CoverageTest): 

+

260 """Tests of compiling Unicode strings.""" 

+

261 

+

262 run_in_temp_dir = False 

+

263 

+

264 def assert_compile_unicode(self, source): 

+

265 """Assert that `source` will compile properly with `compile_unicode`.""" 

+

266 source += u"a = 42\n" 

+

267 # This doesn't raise an exception: 

+

268 code = compile_unicode(source, "<string>", "exec") 

+

269 globs = {} 

+

270 exec(code, globs) 

+

271 assert globs['a'] == 42 

+

272 

+

273 def test_cp1252(self): 

+

274 uni = u"""# coding: cp1252\n# \u201C curly \u201D\n""" 

+

275 self.assert_compile_unicode(uni) 

+

276 

+

277 def test_double_coding_declaration(self): 

+

278 # Build this string in a weird way so that actual vim's won't try to 

+

279 # interpret it... 

+

280 uni = u"# -*- coding:utf-8 -*-\n# v" + "im: fileencoding=utf-8\n" 

+

281 self.assert_compile_unicode(uni) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_plugins_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_plugins_py.html new file mode 100644 index 000000000..513bf853b --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_plugins_py.html @@ -0,0 +1,1214 @@ + + + + + + Coverage for tests/test_plugins.py: 99.760% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Tests for plugins.""" 

+

5 

+

6import inspect 

+

7import os.path 

+

8from xml.etree import ElementTree 

+

9 

+

10import pytest 

+

11 

+

12import coverage 

+

13from coverage import env 

+

14from coverage.backward import StringIO, import_local_file 

+

15from coverage.data import line_counts 

+

16from coverage.control import Plugins 

+

17from coverage.misc import CoverageException 

+

18 

+

19import coverage.plugin 

+

20 

+

21from tests.coveragetest import CoverageTest 

+

22from tests.helpers import CheckUniqueFilenames 

+

23 

+

24 

+

25class FakeConfig(object): 

+

26 """A fake config for use in tests.""" 

+

27 

+

28 def __init__(self, plugin, options): 

+

29 self.plugin = plugin 

+

30 self.options = options 

+

31 self.asked_for = [] 

+

32 

+

33 def get_plugin_options(self, module): 

+

34 """Just return the options for `module` if this is the right module.""" 

+

35 self.asked_for.append(module) 

+

36 if module == self.plugin: 

+

37 return self.options 

+

38 else: 

+

39 return {} 

+

40 

+

41 

+

42class LoadPluginsTest(CoverageTest): 

+

43 """Test Plugins.load_plugins directly.""" 

+

44 

+

45 def test_implicit_boolean(self): 

+

46 self.make_file("plugin1.py", """\ 

+

47 from coverage import CoveragePlugin 

+

48 

+

49 class Plugin(CoveragePlugin): 

+

50 pass 

+

51 

+

52 def coverage_init(reg, options): 

+

53 reg.add_file_tracer(Plugin()) 

+

54 """) 

+

55 

+

56 config = FakeConfig("plugin1", {}) 

+

57 plugins = Plugins.load_plugins([], config) 

+

58 assert not plugins 

+

59 

+

60 plugins = Plugins.load_plugins(["plugin1"], config) 

+

61 assert plugins 

+

62 

+

63 def test_importing_and_configuring(self): 

+

64 self.make_file("plugin1.py", """\ 

+

65 from coverage import CoveragePlugin 

+

66 

+

67 class Plugin(CoveragePlugin): 

+

68 def __init__(self, options): 

+

69 self.options = options 

+

70 self.this_is = "me" 

+

71 

+

72 def coverage_init(reg, options): 

+

73 reg.add_file_tracer(Plugin(options)) 

+

74 """) 

+

75 

+

76 config = FakeConfig("plugin1", {'a': 'hello'}) 

+

77 plugins = list(Plugins.load_plugins(["plugin1"], config)) 

+

78 

+

79 assert len(plugins) == 1 

+

80 assert plugins[0].this_is == "me" 

+

81 assert plugins[0].options == {'a': 'hello'} 

+

82 assert config.asked_for == ['plugin1'] 

+

83 

+

84 def test_importing_and_configuring_more_than_one(self): 

+

85 self.make_file("plugin1.py", """\ 

+

86 from coverage import CoveragePlugin 

+

87 

+

88 class Plugin(CoveragePlugin): 

+

89 def __init__(self, options): 

+

90 self.options = options 

+

91 self.this_is = "me" 

+

92 

+

93 def coverage_init(reg, options): 

+

94 reg.add_file_tracer(Plugin(options)) 

+

95 """) 

+

96 self.make_file("plugin2.py", """\ 

+

97 from coverage import CoveragePlugin 

+

98 

+

99 class Plugin(CoveragePlugin): 

+

100 def __init__(self, options): 

+

101 self.options = options 

+

102 

+

103 def coverage_init(reg, options): 

+

104 reg.add_file_tracer(Plugin(options)) 

+

105 """) 

+

106 

+

107 config = FakeConfig("plugin1", {'a': 'hello'}) 

+

108 plugins = list(Plugins.load_plugins(["plugin1", "plugin2"], config)) 

+

109 

+

110 assert len(plugins) == 2 

+

111 assert plugins[0].this_is == "me" 

+

112 assert plugins[0].options == {'a': 'hello'} 

+

113 assert plugins[1].options == {} 

+

114 assert config.asked_for == ['plugin1', 'plugin2'] 

+

115 

+

116 # The order matters... 

+

117 config = FakeConfig("plugin1", {'a': 'second'}) 

+

118 plugins = list(Plugins.load_plugins(["plugin2", "plugin1"], config)) 

+

119 

+

120 assert len(plugins) == 2 

+

121 assert plugins[0].options == {} 

+

122 assert plugins[1].this_is == "me" 

+

123 assert plugins[1].options == {'a': 'second'} 

+

124 

+

125 def test_cant_import(self): 

+

126 with pytest.raises(ImportError, match="No module named '?plugin_not_there'?"): 

+

127 _ = Plugins.load_plugins(["plugin_not_there"], None) 

+

128 

+

129 def test_plugin_must_define_coverage_init(self): 

+

130 self.make_file("no_plugin.py", """\ 

+

131 from coverage import CoveragePlugin 

+

132 Nothing = 0 

+

133 """) 

+

134 msg_pat = "Plugin module 'no_plugin' didn't define a coverage_init function" 

+

135 with pytest.raises(CoverageException, match=msg_pat): 

+

136 list(Plugins.load_plugins(["no_plugin"], None)) 

+

137 

+

138 

+

139class PluginTest(CoverageTest): 

+

140 """Test plugins through the Coverage class.""" 

+

141 

+

142 def test_plugin_imported(self): 

+

143 # Prove that a plugin will be imported. 

+

144 self.make_file("my_plugin.py", """\ 

+

145 from coverage import CoveragePlugin 

+

146 class Plugin(CoveragePlugin): 

+

147 pass 

+

148 def coverage_init(reg, options): 

+

149 reg.add_noop(Plugin()) 

+

150 with open("evidence.out", "w") as f: 

+

151 f.write("we are here!") 

+

152 """) 

+

153 

+

154 self.assert_doesnt_exist("evidence.out") 

+

155 cov = coverage.Coverage() 

+

156 cov.set_option("run:plugins", ["my_plugin"]) 

+

157 cov.start() 

+

158 cov.stop() # pragma: nested 

+

159 

+

160 with open("evidence.out") as f: 

+

161 assert f.read() == "we are here!" 

+

162 

+

163 def test_missing_plugin_raises_import_error(self): 

+

164 # Prove that a missing plugin will raise an ImportError. 

+

165 with pytest.raises(ImportError, match="No module named '?does_not_exist_woijwoicweo'?"): 

+

166 cov = coverage.Coverage() 

+

167 cov.set_option("run:plugins", ["does_not_exist_woijwoicweo"]) 

+

168 cov.start() 

+

169 cov.stop() 

+

170 

+

171 def test_bad_plugin_isnt_hidden(self): 

+

172 # Prove that a plugin with an error in it will raise the error. 

+

173 self.make_file("plugin_over_zero.py", "1/0") 

+

174 with pytest.raises(ZeroDivisionError): 

+

175 cov = coverage.Coverage() 

+

176 cov.set_option("run:plugins", ["plugin_over_zero"]) 

+

177 cov.start() 

+

178 cov.stop() 

+

179 

+

180 def test_plugin_sys_info(self): 

+

181 self.make_file("plugin_sys_info.py", """\ 

+

182 import coverage 

+

183 

+

184 class Plugin(coverage.CoveragePlugin): 

+

185 def sys_info(self): 

+

186 return [("hello", "world")] 

+

187 

+

188 def coverage_init(reg, options): 

+

189 reg.add_file_tracer(Plugin()) 

+

190 """) 

+

191 debug_out = StringIO() 

+

192 cov = coverage.Coverage(debug=["sys"]) 

+

193 cov._debug_file = debug_out 

+

194 cov.set_option("run:plugins", ["plugin_sys_info"]) 

+

195 cov.start() 

+

196 cov.stop() # pragma: nested 

+

197 

+

198 out_lines = [line.strip() for line in debug_out.getvalue().splitlines()] 

+

199 if env.C_TRACER: 

+

200 assert 'plugins.file_tracers: plugin_sys_info.Plugin' in out_lines 

+

201 else: 

+

202 assert 'plugins.file_tracers: plugin_sys_info.Plugin (disabled)' in out_lines 

+

203 assert 'plugins.configurers: -none-' in out_lines 

+

204 expected_end = [ 

+

205 "-- sys: plugin_sys_info.Plugin -------------------------------", 

+

206 "hello: world", 

+

207 "-- end -------------------------------------------------------", 

+

208 ] 

+

209 assert expected_end == out_lines[-len(expected_end):] 

+

210 

+

211 def test_plugin_with_no_sys_info(self): 

+

212 self.make_file("plugin_no_sys_info.py", """\ 

+

213 import coverage 

+

214 

+

215 class Plugin(coverage.CoveragePlugin): 

+

216 pass 

+

217 

+

218 def coverage_init(reg, options): 

+

219 reg.add_configurer(Plugin()) 

+

220 """) 

+

221 debug_out = StringIO() 

+

222 cov = coverage.Coverage(debug=["sys"]) 

+

223 cov._debug_file = debug_out 

+

224 cov.set_option("run:plugins", ["plugin_no_sys_info"]) 

+

225 cov.start() 

+

226 cov.stop() # pragma: nested 

+

227 

+

228 out_lines = [line.strip() for line in debug_out.getvalue().splitlines()] 

+

229 assert 'plugins.file_tracers: -none-' in out_lines 

+

230 assert 'plugins.configurers: plugin_no_sys_info.Plugin' in out_lines 

+

231 expected_end = [ 

+

232 "-- sys: plugin_no_sys_info.Plugin ----------------------------", 

+

233 "-- end -------------------------------------------------------", 

+

234 ] 

+

235 assert expected_end == out_lines[-len(expected_end):] 

+

236 

+

237 def test_local_files_are_importable(self): 

+

238 self.make_file("importing_plugin.py", """\ 

+

239 from coverage import CoveragePlugin 

+

240 import local_module 

+

241 class MyPlugin(CoveragePlugin): 

+

242 pass 

+

243 def coverage_init(reg, options): 

+

244 reg.add_noop(MyPlugin()) 

+

245 """) 

+

246 self.make_file("local_module.py", "CONST = 1") 

+

247 self.make_file(".coveragerc", """\ 

+

248 [run] 

+

249 plugins = importing_plugin 

+

250 """) 

+

251 self.make_file("main_file.py", "print('MAIN')") 

+

252 

+

253 out = self.run_command("coverage run main_file.py") 

+

254 assert out == "MAIN\n" 

+

255 out = self.run_command("coverage html") 

+

256 assert out == "" 

+

257 

+

258 

+

259@pytest.mark.skipif(env.C_TRACER, reason="This test is only about PyTracer.") 

+

260class PluginWarningOnPyTracerTest(CoverageTest): 

+

261 """Test that we get a controlled exception with plugins on PyTracer.""" 

+

262 def test_exception_if_plugins_on_pytracer(self): 

+

263 self.make_file("simple.py", "a = 1") 

+

264 

+

265 cov = coverage.Coverage() 

+

266 cov.set_option("run:plugins", ["tests.plugin1"]) 

+

267 

+

268 expected_warnings = [ 

+

269 r"Plugin file tracers \(tests.plugin1.Plugin\) aren't supported with PyTracer", 

+

270 ] 

+

271 with self.assert_warnings(cov, expected_warnings): 

+

272 self.start_import_stop(cov, "simple") 

+

273 

+

274 

+

275@pytest.mark.skipif(not env.C_TRACER, reason="Plugins are only supported with the C tracer.") 

+

276class FileTracerTest(CoverageTest): 

+

277 """Tests of plugins that implement file_tracer.""" 

+

278 

+

279 

+

280class GoodFileTracerTest(FileTracerTest): 

+

281 """Tests of file tracer plugin happy paths.""" 

+

282 

+

283 def test_plugin1(self): 

+

284 self.make_file("simple.py", """\ 

+

285 import try_xyz 

+

286 a = 1 

+

287 b = 2 

+

288 """) 

+

289 self.make_file("try_xyz.py", """\ 

+

290 c = 3 

+

291 d = 4 

+

292 """) 

+

293 

+

294 cov = coverage.Coverage() 

+

295 CheckUniqueFilenames.hook(cov, '_should_trace') 

+

296 CheckUniqueFilenames.hook(cov, '_check_include_omit_etc') 

+

297 cov.set_option("run:plugins", ["tests.plugin1"]) 

+

298 

+

299 # Import the Python file, executing it. 

+

300 self.start_import_stop(cov, "simple") 

+

301 

+

302 _, statements, missing, _ = cov.analysis("simple.py") 

+

303 assert statements == [1, 2, 3] 

+

304 assert missing == [] 

+

305 zzfile = os.path.abspath(os.path.join("/src", "try_ABC.zz")) 

+

306 _, statements, _, _ = cov.analysis(zzfile) 

+

307 assert statements == [105, 106, 107, 205, 206, 207] 

+

308 

+

309 def make_render_and_caller(self): 

+

310 """Make the render.py and caller.py files we need.""" 

+

311 # plugin2 emulates a dynamic tracing plugin: the caller's locals 

+

312 # are examined to determine the source file and line number. 

+

313 # The plugin is in tests/plugin2.py. 

+

314 self.make_file("render.py", """\ 

+

315 def render(filename, linenum): 

+

316 # This function emulates a template renderer. The plugin 

+

317 # will examine the `filename` and `linenum` locals to 

+

318 # determine the source file and line number. 

+

319 fiddle_around = 1 # not used, just chaff. 

+

320 return "[{} @ {}]".format(filename, linenum) 

+

321 

+

322 def helper(x): 

+

323 # This function is here just to show that not all code in 

+

324 # this file will be part of the dynamic tracing. 

+

325 return x+1 

+

326 """) 

+

327 self.make_file("caller.py", """\ 

+

328 import sys 

+

329 from render import helper, render 

+

330 

+

331 assert render("foo_7.html", 4) == "[foo_7.html @ 4]" 

+

332 # Render foo_7.html again to try the CheckUniqueFilenames asserts. 

+

333 render("foo_7.html", 4) 

+

334 

+

335 assert helper(42) == 43 

+

336 assert render("bar_4.html", 2) == "[bar_4.html @ 2]" 

+

337 assert helper(76) == 77 

+

338 

+

339 # quux_5.html will be omitted from the results. 

+

340 assert render("quux_5.html", 3) == "[quux_5.html @ 3]" 

+

341 

+

342 # For Python 2, make sure unicode is working. 

+

343 assert render(u"uni_3.html", 2) == "[uni_3.html @ 2]" 

+

344 """) 

+

345 

+

346 # will try to read the actual source files, so make some 

+

347 # source files. 

+

348 def lines(n): 

+

349 """Make a string with n lines of text.""" 

+

350 return "".join("line %d\n" % i for i in range(n)) 

+

351 

+

352 self.make_file("bar_4.html", lines(4)) 

+

353 self.make_file("foo_7.html", lines(7)) 

+

354 

+

355 def test_plugin2(self): 

+

356 self.make_render_and_caller() 

+

357 

+

358 cov = coverage.Coverage(omit=["*quux*"]) 

+

359 CheckUniqueFilenames.hook(cov, '_should_trace') 

+

360 CheckUniqueFilenames.hook(cov, '_check_include_omit_etc') 

+

361 cov.set_option("run:plugins", ["tests.plugin2"]) 

+

362 

+

363 self.start_import_stop(cov, "caller") 

+

364 

+

365 # The way plugin2 works, a file named foo_7.html will be claimed to 

+

366 # have 7 lines in it. If render() was called with line number 4, 

+

367 # then the plugin will claim that lines 4 and 5 were executed. 

+

368 _, statements, missing, _ = cov.analysis("foo_7.html") 

+

369 assert statements == [1, 2, 3, 4, 5, 6, 7] 

+

370 assert missing == [1, 2, 3, 6, 7] 

+

371 assert "foo_7.html" in line_counts(cov.get_data()) 

+

372 

+

373 _, statements, missing, _ = cov.analysis("bar_4.html") 

+

374 assert statements == [1, 2, 3, 4] 

+

375 assert missing == [1, 4] 

+

376 assert "bar_4.html" in line_counts(cov.get_data()) 

+

377 

+

378 assert "quux_5.html" not in line_counts(cov.get_data()) 

+

379 

+

380 _, statements, missing, _ = cov.analysis("uni_3.html") 

+

381 assert statements == [1, 2, 3] 

+

382 assert missing == [1] 

+

383 assert "uni_3.html" in line_counts(cov.get_data()) 

+

384 

+

385 def test_plugin2_with_branch(self): 

+

386 self.make_render_and_caller() 

+

387 

+

388 cov = coverage.Coverage(branch=True, omit=["*quux*"]) 

+

389 CheckUniqueFilenames.hook(cov, '_should_trace') 

+

390 CheckUniqueFilenames.hook(cov, '_check_include_omit_etc') 

+

391 cov.set_option("run:plugins", ["tests.plugin2"]) 

+

392 

+

393 self.start_import_stop(cov, "caller") 

+

394 

+

395 # The way plugin2 works, a file named foo_7.html will be claimed to 

+

396 # have 7 lines in it. If render() was called with line number 4, 

+

397 # then the plugin will claim that lines 4 and 5 were executed. 

+

398 analysis = cov._analyze("foo_7.html") 

+

399 assert analysis.statements == {1, 2, 3, 4, 5, 6, 7} 

+

400 # Plugins don't do branch coverage yet. 

+

401 assert analysis.has_arcs() is True 

+

402 assert analysis.arc_possibilities() == [] 

+

403 

+

404 assert analysis.missing == {1, 2, 3, 6, 7} 

+

405 

+

406 def test_plugin2_with_text_report(self): 

+

407 self.make_render_and_caller() 

+

408 

+

409 cov = coverage.Coverage(branch=True, omit=["*quux*"]) 

+

410 cov.set_option("run:plugins", ["tests.plugin2"]) 

+

411 

+

412 self.start_import_stop(cov, "caller") 

+

413 

+

414 repout = StringIO() 

+

415 total = cov.report(file=repout, include=["*.html"], omit=["uni*.html"], show_missing=True) 

+

416 report = repout.getvalue().splitlines() 

+

417 expected = [ 

+

418 'Name Stmts Miss Branch BrPart Cover Missing', 

+

419 '--------------------------------------------------------', 

+

420 'bar_4.html 4 2 0 0 50% 1, 4', 

+

421 'foo_7.html 7 5 0 0 29% 1-3, 6-7', 

+

422 '--------------------------------------------------------', 

+

423 'TOTAL 11 7 0 0 36%', 

+

424 ] 

+

425 assert expected == report 

+

426 assert round(abs(total-36.36), 2) == 0 

+

427 

+

428 def test_plugin2_with_html_report(self): 

+

429 self.make_render_and_caller() 

+

430 

+

431 cov = coverage.Coverage(branch=True, omit=["*quux*"]) 

+

432 cov.set_option("run:plugins", ["tests.plugin2"]) 

+

433 

+

434 self.start_import_stop(cov, "caller") 

+

435 

+

436 total = cov.html_report(include=["*.html"], omit=["uni*.html"]) 

+

437 assert round(abs(total-36.36), 2) == 0 

+

438 

+

439 self.assert_exists("htmlcov/index.html") 

+

440 self.assert_exists("htmlcov/bar_4_html.html") 

+

441 self.assert_exists("htmlcov/foo_7_html.html") 

+

442 

+

443 def test_plugin2_with_xml_report(self): 

+

444 self.make_render_and_caller() 

+

445 

+

446 cov = coverage.Coverage(branch=True, omit=["*quux*"]) 

+

447 cov.set_option("run:plugins", ["tests.plugin2"]) 

+

448 

+

449 self.start_import_stop(cov, "caller") 

+

450 

+

451 total = cov.xml_report(include=["*.html"], omit=["uni*.html"]) 

+

452 assert round(abs(total-36.36), 2) == 0 

+

453 

+

454 dom = ElementTree.parse("coverage.xml") 

+

455 classes = {} 

+

456 for elt in dom.findall(".//class"): 

+

457 classes[elt.get('name')] = elt 

+

458 

+

459 assert classes['bar_4.html'].attrib == { 

+

460 'branch-rate': '1', 

+

461 'complexity': '0', 

+

462 'filename': 'bar_4.html', 

+

463 'line-rate': '0.5', 

+

464 'name': 'bar_4.html', 

+

465 } 

+

466 assert classes['foo_7.html'].attrib == { 

+

467 'branch-rate': '1', 

+

468 'complexity': '0', 

+

469 'filename': 'foo_7.html', 

+

470 'line-rate': '0.2857', 

+

471 'name': 'foo_7.html', 

+

472 } 

+

473 

+

474 def test_defer_to_python(self): 

+

475 # A plugin that measures, but then wants built-in python reporting. 

+

476 self.make_file("fairly_odd_plugin.py", """\ 

+

477 # A plugin that claims all the odd lines are executed, and none of 

+

478 # the even lines, and then punts reporting off to the built-in 

+

479 # Python reporting. 

+

480 import coverage.plugin 

+

481 class Plugin(coverage.CoveragePlugin): 

+

482 def file_tracer(self, filename): 

+

483 return OddTracer(filename) 

+

484 def file_reporter(self, filename): 

+

485 return "python" 

+

486 

+

487 class OddTracer(coverage.plugin.FileTracer): 

+

488 def __init__(self, filename): 

+

489 self.filename = filename 

+

490 def source_filename(self): 

+

491 return self.filename 

+

492 def line_number_range(self, frame): 

+

493 lineno = frame.f_lineno 

+

494 if lineno % 2: 

+

495 return (lineno, lineno) 

+

496 else: 

+

497 return (-1, -1) 

+

498 

+

499 def coverage_init(reg, options): 

+

500 reg.add_file_tracer(Plugin()) 

+

501 """) 

+

502 self.make_file("unsuspecting.py", """\ 

+

503 a = 1 

+

504 b = 2 

+

505 c = 3 

+

506 d = 4 

+

507 e = 5 

+

508 f = 6 

+

509 """) 

+

510 cov = coverage.Coverage(include=["unsuspecting.py"]) 

+

511 cov.set_option("run:plugins", ["fairly_odd_plugin"]) 

+

512 self.start_import_stop(cov, "unsuspecting") 

+

513 

+

514 repout = StringIO() 

+

515 total = cov.report(file=repout, show_missing=True) 

+

516 report = repout.getvalue().splitlines() 

+

517 expected = [ 

+

518 'Name Stmts Miss Cover Missing', 

+

519 '-----------------------------------------------', 

+

520 'unsuspecting.py 6 3 50% 2, 4, 6', 

+

521 '-----------------------------------------------', 

+

522 'TOTAL 6 3 50%', 

+

523 ] 

+

524 assert expected == report 

+

525 assert total == 50 

+

526 

+

527 def test_find_unexecuted(self): 

+

528 self.make_file("unexecuted_plugin.py", """\ 

+

529 import os 

+

530 import coverage.plugin 

+

531 class Plugin(coverage.CoveragePlugin): 

+

532 def file_tracer(self, filename): 

+

533 if filename.endswith("foo.py"): 

+

534 return MyTracer(filename) 

+

535 def file_reporter(self, filename): 

+

536 return MyReporter(filename) 

+

537 def find_executable_files(self, src_dir): 

+

538 # Check that src_dir is the right value 

+

539 files = os.listdir(src_dir) 

+

540 assert "foo.py" in files 

+

541 assert "unexecuted_plugin.py" in files 

+

542 return ["chimera.py"] 

+

543 

+

544 class MyTracer(coverage.plugin.FileTracer): 

+

545 def __init__(self, filename): 

+

546 self.filename = filename 

+

547 def source_filename(self): 

+

548 return self.filename 

+

549 def line_number_range(self, frame): 

+

550 return (999, 999) 

+

551 

+

552 class MyReporter(coverage.FileReporter): 

+

553 def lines(self): 

+

554 return {99, 999, 9999} 

+

555 

+

556 def coverage_init(reg, options): 

+

557 reg.add_file_tracer(Plugin()) 

+

558 """) 

+

559 self.make_file("foo.py", "a = 1") 

+

560 cov = coverage.Coverage(source=['.']) 

+

561 cov.set_option("run:plugins", ["unexecuted_plugin"]) 

+

562 self.start_import_stop(cov, "foo") 

+

563 

+

564 # The file we executed claims to have run line 999. 

+

565 _, statements, missing, _ = cov.analysis("foo.py") 

+

566 assert statements == [99, 999, 9999] 

+

567 assert missing == [99, 9999] 

+

568 

+

569 # The completely missing file is in the results. 

+

570 _, statements, missing, _ = cov.analysis("chimera.py") 

+

571 assert statements == [99, 999, 9999] 

+

572 assert missing == [99, 999, 9999] 

+

573 

+

574 # But completely new filenames are not in the results. 

+

575 assert len(cov.get_data().measured_files()) == 3 

+

576 with pytest.raises(CoverageException): 

+

577 cov.analysis("fictional.py") 

+

578 

+

579 

+

580class BadFileTracerTest(FileTracerTest): 

+

581 """Test error handling around file tracer plugins.""" 

+

582 

+

583 def run_plugin(self, module_name): 

+

584 """Run a plugin with the given module_name. 

+

585 

+

586 Uses a few fixed Python files. 

+

587 

+

588 Returns the Coverage object. 

+

589 

+

590 """ 

+

591 self.make_file("simple.py", """\ 

+

592 import other, another 

+

593 a = other.f(2) 

+

594 b = other.f(3) 

+

595 c = another.g(4) 

+

596 d = another.g(5) 

+

597 """) 

+

598 # The names of these files are important: some plugins apply themselves 

+

599 # to "*other.py". 

+

600 self.make_file("other.py", """\ 

+

601 def f(x): 

+

602 return x+1 

+

603 """) 

+

604 self.make_file("another.py", """\ 

+

605 def g(x): 

+

606 return x-1 

+

607 """) 

+

608 

+

609 cov = coverage.Coverage() 

+

610 cov.set_option("run:plugins", [module_name]) 

+

611 self.start_import_stop(cov, "simple") 

+

612 cov.save() # pytest-cov does a save after stop, so we'll do it too. 

+

613 return cov 

+

614 

+

615 def run_bad_plugin(self, module_name, plugin_name, our_error=True, excmsg=None, excmsgs=None): 

+

616 """Run a file, and see that the plugin failed. 

+

617 

+

618 `module_name` and `plugin_name` is the module and name of the plugin to 

+

619 use. 

+

620 

+

621 `our_error` is True if the error reported to the user will be an 

+

622 explicit error in our test code, marked with an '# Oh noes!' comment. 

+

623 

+

624 `excmsg`, if provided, is text that must appear in the stderr. 

+

625 

+

626 `excmsgs`, if provided, is a list of messages, one of which must 

+

627 appear in the stderr. 

+

628 

+

629 The plugin will be disabled, and we check that a warning is output 

+

630 explaining why. 

+

631 

+

632 """ 

+

633 self.run_plugin(module_name) 

+

634 

+

635 stderr = self.stderr() 

+

636 

+

637 if our_error: 

+

638 errors = stderr.count("# Oh noes!") 

+

639 # The exception we're causing should only appear once. 

+

640 assert errors == 1 

+

641 

+

642 # There should be a warning explaining what's happening, but only one. 

+

643 # The message can be in two forms: 

+

644 # Disabling plug-in '...' due to previous exception 

+

645 # or: 

+

646 # Disabling plug-in '...' due to an exception: 

+

647 msg = "Disabling plug-in '%s.%s' due to " % (module_name, plugin_name) 

+

648 warnings = stderr.count(msg) 

+

649 assert warnings == 1 

+

650 

+

651 if excmsg: 

+

652 assert excmsg in stderr 

+

653 if excmsgs: 

+

654 assert any(em in stderr for em in excmsgs), "expected one of %r" % excmsgs 654 ↛ exitline 654 didn't finish the generator expression on line 654

+

655 

+

656 def test_file_tracer_has_no_file_tracer_method(self): 

+

657 self.make_file("bad_plugin.py", """\ 

+

658 class Plugin(object): 

+

659 pass 

+

660 

+

661 def coverage_init(reg, options): 

+

662 reg.add_file_tracer(Plugin()) 

+

663 """) 

+

664 self.run_bad_plugin("bad_plugin", "Plugin", our_error=False) 

+

665 

+

666 def test_file_tracer_has_inherited_sourcefilename_method(self): 

+

667 self.make_file("bad_plugin.py", """\ 

+

668 import coverage 

+

669 class Plugin(coverage.CoveragePlugin): 

+

670 def file_tracer(self, filename): 

+

671 # Just grab everything. 

+

672 return FileTracer() 

+

673 

+

674 class FileTracer(coverage.FileTracer): 

+

675 pass 

+

676 

+

677 def coverage_init(reg, options): 

+

678 reg.add_file_tracer(Plugin()) 

+

679 """) 

+

680 self.run_bad_plugin( 

+

681 "bad_plugin", "Plugin", our_error=False, 

+

682 excmsg="Class 'bad_plugin.FileTracer' needs to implement source_filename()", 

+

683 ) 

+

684 

+

685 def test_plugin_has_inherited_filereporter_method(self): 

+

686 self.make_file("bad_plugin.py", """\ 

+

687 import coverage 

+

688 class Plugin(coverage.CoveragePlugin): 

+

689 def file_tracer(self, filename): 

+

690 # Just grab everything. 

+

691 return FileTracer() 

+

692 

+

693 class FileTracer(coverage.FileTracer): 

+

694 def source_filename(self): 

+

695 return "foo.xxx" 

+

696 

+

697 def coverage_init(reg, options): 

+

698 reg.add_file_tracer(Plugin()) 

+

699 """) 

+

700 cov = self.run_plugin("bad_plugin") 

+

701 expected_msg = "Plugin 'bad_plugin.Plugin' needs to implement file_reporter()" 

+

702 with pytest.raises(NotImplementedError, match=expected_msg): 

+

703 cov.report() 

+

704 

+

705 def test_file_tracer_fails(self): 

+

706 self.make_file("bad_plugin.py", """\ 

+

707 import coverage.plugin 

+

708 class Plugin(coverage.plugin.CoveragePlugin): 

+

709 def file_tracer(self, filename): 

+

710 17/0 # Oh noes! 

+

711 

+

712 def coverage_init(reg, options): 

+

713 reg.add_file_tracer(Plugin()) 

+

714 """) 

+

715 self.run_bad_plugin("bad_plugin", "Plugin") 

+

716 

+

717 def test_file_tracer_fails_eventually(self): 

+

718 # Django coverage plugin can report on a few files and then fail. 

+

719 # https://github.com/nedbat/coveragepy/issues/1011 

+

720 self.make_file("bad_plugin.py", """\ 

+

721 import os.path 

+

722 import coverage.plugin 

+

723 class Plugin(coverage.plugin.CoveragePlugin): 

+

724 def __init__(self): 

+

725 self.calls = 0 

+

726 

+

727 def file_tracer(self, filename): 

+

728 print(filename) 

+

729 self.calls += 1 

+

730 if self.calls <= 2: 

+

731 return FileTracer(filename) 

+

732 else: 

+

733 17/0 # Oh noes! 

+

734 

+

735 class FileTracer(coverage.FileTracer): 

+

736 def __init__(self, filename): 

+

737 self.filename = filename 

+

738 def source_filename(self): 

+

739 return os.path.basename(self.filename).replace(".py", ".foo") 

+

740 def line_number_range(self, frame): 

+

741 return -1, -1 

+

742 

+

743 def coverage_init(reg, options): 

+

744 reg.add_file_tracer(Plugin()) 

+

745 """) 

+

746 self.run_bad_plugin("bad_plugin", "Plugin") 

+

747 

+

748 def test_file_tracer_returns_wrong(self): 

+

749 self.make_file("bad_plugin.py", """\ 

+

750 import coverage.plugin 

+

751 class Plugin(coverage.plugin.CoveragePlugin): 

+

752 def file_tracer(self, filename): 

+

753 return 3.14159 

+

754 

+

755 def coverage_init(reg, options): 

+

756 reg.add_file_tracer(Plugin()) 

+

757 """) 

+

758 self.run_bad_plugin( 

+

759 "bad_plugin", "Plugin", our_error=False, excmsg="'float' object has no attribute", 

+

760 ) 

+

761 

+

762 def test_has_dynamic_source_filename_fails(self): 

+

763 self.make_file("bad_plugin.py", """\ 

+

764 import coverage.plugin 

+

765 class Plugin(coverage.plugin.CoveragePlugin): 

+

766 def file_tracer(self, filename): 

+

767 return BadFileTracer() 

+

768 

+

769 class BadFileTracer(coverage.plugin.FileTracer): 

+

770 def has_dynamic_source_filename(self): 

+

771 23/0 # Oh noes! 

+

772 

+

773 def coverage_init(reg, options): 

+

774 reg.add_file_tracer(Plugin()) 

+

775 """) 

+

776 self.run_bad_plugin("bad_plugin", "Plugin") 

+

777 

+

778 def test_source_filename_fails(self): 

+

779 self.make_file("bad_plugin.py", """\ 

+

780 import coverage.plugin 

+

781 class Plugin(coverage.plugin.CoveragePlugin): 

+

782 def file_tracer(self, filename): 

+

783 return BadFileTracer() 

+

784 

+

785 class BadFileTracer(coverage.plugin.FileTracer): 

+

786 def source_filename(self): 

+

787 42/0 # Oh noes! 

+

788 

+

789 def coverage_init(reg, options): 

+

790 reg.add_file_tracer(Plugin()) 

+

791 """) 

+

792 self.run_bad_plugin("bad_plugin", "Plugin") 

+

793 

+

794 def test_source_filename_returns_wrong(self): 

+

795 self.make_file("bad_plugin.py", """\ 

+

796 import coverage.plugin 

+

797 class Plugin(coverage.plugin.CoveragePlugin): 

+

798 def file_tracer(self, filename): 

+

799 return BadFileTracer() 

+

800 

+

801 class BadFileTracer(coverage.plugin.FileTracer): 

+

802 def source_filename(self): 

+

803 return 17.3 

+

804 

+

805 def coverage_init(reg, options): 

+

806 reg.add_file_tracer(Plugin()) 

+

807 """) 

+

808 self.run_bad_plugin( 

+

809 "bad_plugin", "Plugin", our_error=False, 

+

810 excmsgs=[ 

+

811 "expected str, bytes or os.PathLike object, not float", 

+

812 "'float' object has no attribute", 

+

813 "object of type 'float' has no len()", 

+

814 "'float' object is unsubscriptable", 

+

815 ], 

+

816 ) 

+

817 

+

818 def test_dynamic_source_filename_fails(self): 

+

819 self.make_file("bad_plugin.py", """\ 

+

820 import coverage.plugin 

+

821 class Plugin(coverage.plugin.CoveragePlugin): 

+

822 def file_tracer(self, filename): 

+

823 if filename.endswith("other.py"): 

+

824 return BadFileTracer() 

+

825 

+

826 class BadFileTracer(coverage.plugin.FileTracer): 

+

827 def has_dynamic_source_filename(self): 

+

828 return True 

+

829 def dynamic_source_filename(self, filename, frame): 

+

830 101/0 # Oh noes! 

+

831 

+

832 def coverage_init(reg, options): 

+

833 reg.add_file_tracer(Plugin()) 

+

834 """) 

+

835 self.run_bad_plugin("bad_plugin", "Plugin") 

+

836 

+

837 def test_line_number_range_raises_error(self): 

+

838 self.make_file("bad_plugin.py", """\ 

+

839 import coverage.plugin 

+

840 class Plugin(coverage.plugin.CoveragePlugin): 

+

841 def file_tracer(self, filename): 

+

842 if filename.endswith("other.py"): 

+

843 return BadFileTracer() 

+

844 

+

845 class BadFileTracer(coverage.plugin.FileTracer): 

+

846 def source_filename(self): 

+

847 return "something.foo" 

+

848 

+

849 def line_number_range(self, frame): 

+

850 raise Exception("borked!") 

+

851 

+

852 def coverage_init(reg, options): 

+

853 reg.add_file_tracer(Plugin()) 

+

854 """) 

+

855 self.run_bad_plugin( 

+

856 "bad_plugin", "Plugin", our_error=False, excmsg="borked!", 

+

857 ) 

+

858 

+

859 def test_line_number_range_returns_non_tuple(self): 

+

860 self.make_file("bad_plugin.py", """\ 

+

861 import coverage.plugin 

+

862 class Plugin(coverage.plugin.CoveragePlugin): 

+

863 def file_tracer(self, filename): 

+

864 if filename.endswith("other.py"): 

+

865 return BadFileTracer() 

+

866 

+

867 class BadFileTracer(coverage.plugin.FileTracer): 

+

868 def source_filename(self): 

+

869 return "something.foo" 

+

870 

+

871 def line_number_range(self, frame): 

+

872 return 42.23 

+

873 

+

874 def coverage_init(reg, options): 

+

875 reg.add_file_tracer(Plugin()) 

+

876 """) 

+

877 self.run_bad_plugin( 

+

878 "bad_plugin", "Plugin", our_error=False, excmsg="line_number_range must return 2-tuple", 

+

879 ) 

+

880 

+

881 def test_line_number_range_returns_triple(self): 

+

882 self.make_file("bad_plugin.py", """\ 

+

883 import coverage.plugin 

+

884 class Plugin(coverage.plugin.CoveragePlugin): 

+

885 def file_tracer(self, filename): 

+

886 if filename.endswith("other.py"): 

+

887 return BadFileTracer() 

+

888 

+

889 class BadFileTracer(coverage.plugin.FileTracer): 

+

890 def source_filename(self): 

+

891 return "something.foo" 

+

892 

+

893 def line_number_range(self, frame): 

+

894 return (1, 2, 3) 

+

895 

+

896 def coverage_init(reg, options): 

+

897 reg.add_file_tracer(Plugin()) 

+

898 """) 

+

899 self.run_bad_plugin( 

+

900 "bad_plugin", "Plugin", our_error=False, excmsg="line_number_range must return 2-tuple", 

+

901 ) 

+

902 

+

903 def test_line_number_range_returns_pair_of_strings(self): 

+

904 self.make_file("bad_plugin.py", """\ 

+

905 import coverage.plugin 

+

906 class Plugin(coverage.plugin.CoveragePlugin): 

+

907 def file_tracer(self, filename): 

+

908 if filename.endswith("other.py"): 

+

909 return BadFileTracer() 

+

910 

+

911 class BadFileTracer(coverage.plugin.FileTracer): 

+

912 def source_filename(self): 

+

913 return "something.foo" 

+

914 

+

915 def line_number_range(self, frame): 

+

916 return ("5", "7") 

+

917 

+

918 def coverage_init(reg, options): 

+

919 reg.add_file_tracer(Plugin()) 

+

920 """) 

+

921 self.run_bad_plugin( 

+

922 "bad_plugin", "Plugin", our_error=False, 

+

923 excmsgs=[ 

+

924 "an integer is required", 

+

925 "cannot be interpreted as an integer", 

+

926 ], 

+

927 ) 

+

928 

+

929 

+

930class ConfigurerPluginTest(CoverageTest): 

+

931 """Test configuring plugins.""" 

+

932 

+

933 run_in_temp_dir = False 

+

934 

+

935 def test_configurer_plugin(self): 

+

936 cov = coverage.Coverage() 

+

937 cov.set_option("run:plugins", ["tests.plugin_config"]) 

+

938 cov.start() 

+

939 cov.stop() # pragma: nested 

+

940 excluded = cov.get_option("report:exclude_lines") 

+

941 assert "pragma: custom" in excluded 

+

942 assert "pragma: or whatever" in excluded 

+

943 

+

944 

+

945class DynamicContextPluginTest(CoverageTest): 

+

946 """Tests of plugins that implement `dynamic_context`.""" 

+

947 

+

948 def make_plugin_capitalized_testnames(self, filename): 

+

949 """Create a dynamic context plugin that capitalizes the part after 'test_'.""" 

+

950 self.make_file(filename, """\ 

+

951 from coverage import CoveragePlugin 

+

952 

+

953 class Plugin(CoveragePlugin): 

+

954 def dynamic_context(self, frame): 

+

955 name = frame.f_code.co_name 

+

956 if name.startswith(("test_", "doctest_")): 

+

957 parts = name.split("_", 1) 

+

958 return "%s:%s" % (parts[0], parts[1].upper()) 

+

959 return None 

+

960 

+

961 def coverage_init(reg, options): 

+

962 reg.add_dynamic_context(Plugin()) 

+

963 """) 

+

964 

+

965 def make_plugin_track_render(self, filename): 

+

966 """Make a dynamic context plugin that tracks 'render_' functions.""" 

+

967 self.make_file(filename, """\ 

+

968 from coverage import CoveragePlugin 

+

969 

+

970 class Plugin(CoveragePlugin): 

+

971 def dynamic_context(self, frame): 

+

972 name = frame.f_code.co_name 

+

973 if name.startswith("render_"): 

+

974 return 'renderer:' + name[7:] 

+

975 return None 

+

976 

+

977 def coverage_init(reg, options): 

+

978 reg.add_dynamic_context(Plugin()) 

+

979 """) 

+

980 

+

981 def make_test_files(self): 

+

982 """Make some files to use while testing dynamic context plugins.""" 

+

983 self.make_file("rendering.py", """\ 

+

984 def html_tag(tag, content): 

+

985 return '<%s>%s</%s>' % (tag, content, tag) 

+

986 

+

987 def render_paragraph(text): 

+

988 return html_tag('p', text) 

+

989 

+

990 def render_span(text): 

+

991 return html_tag('span', text) 

+

992 

+

993 def render_bold(text): 

+

994 return html_tag('b', text) 

+

995 """) 

+

996 

+

997 self.make_file("testsuite.py", """\ 

+

998 import rendering 

+

999 

+

1000 def test_html_tag(): 

+

1001 assert rendering.html_tag('b', 'hello') == '<b>hello</b>' 

+

1002 

+

1003 def doctest_html_tag(): 

+

1004 assert eval(''' 

+

1005 rendering.html_tag('i', 'text') == '<i>text</i>' 

+

1006 '''.strip()) 

+

1007 

+

1008 def test_renderers(): 

+

1009 assert rendering.render_paragraph('hello') == '<p>hello</p>' 

+

1010 assert rendering.render_bold('wide') == '<b>wide</b>' 

+

1011 assert rendering.render_span('world') == '<span>world</span>' 

+

1012 

+

1013 def build_full_html(): 

+

1014 html = '<html><body>%s</body></html>' % ( 

+

1015 rendering.render_paragraph( 

+

1016 rendering.render_span('hello'))) 

+

1017 return html 

+

1018 """) 

+

1019 

+

1020 def run_all_functions(self, cov, suite_name): # pragma: nested 

+

1021 """Run all functions in `suite_name` under coverage.""" 

+

1022 cov.start() 

+

1023 suite = import_local_file(suite_name) 

+

1024 try: 

+

1025 # Call all functions in this module 

+

1026 for name in dir(suite): 

+

1027 variable = getattr(suite, name) 

+

1028 if inspect.isfunction(variable): 

+

1029 variable() 

+

1030 finally: 

+

1031 cov.stop() 

+

1032 

+

1033 def test_plugin_standalone(self): 

+

1034 self.make_plugin_capitalized_testnames('plugin_tests.py') 

+

1035 self.make_test_files() 

+

1036 

+

1037 # Enable dynamic context plugin 

+

1038 cov = coverage.Coverage() 

+

1039 cov.set_option("run:plugins", ['plugin_tests']) 

+

1040 

+

1041 # Run the tests 

+

1042 self.run_all_functions(cov, 'testsuite') 

+

1043 

+

1044 # Labeled coverage is collected 

+

1045 data = cov.get_data() 

+

1046 filenames = self.get_measured_filenames(data) 

+

1047 expected = ['', 'doctest:HTML_TAG', 'test:HTML_TAG', 'test:RENDERERS'] 

+

1048 assert expected == sorted(data.measured_contexts()) 

+

1049 data.set_query_context("doctest:HTML_TAG") 

+

1050 assert [2] == data.lines(filenames['rendering.py']) 

+

1051 data.set_query_context("test:HTML_TAG") 

+

1052 assert [2] == data.lines(filenames['rendering.py']) 

+

1053 data.set_query_context("test:RENDERERS") 

+

1054 assert [2, 5, 8, 11] == sorted(data.lines(filenames['rendering.py'])) 

+

1055 

+

1056 def test_static_context(self): 

+

1057 self.make_plugin_capitalized_testnames('plugin_tests.py') 

+

1058 self.make_test_files() 

+

1059 

+

1060 # Enable dynamic context plugin for coverage with named context 

+

1061 cov = coverage.Coverage(context='mytests') 

+

1062 cov.set_option("run:plugins", ['plugin_tests']) 

+

1063 

+

1064 # Run the tests 

+

1065 self.run_all_functions(cov, 'testsuite') 

+

1066 

+

1067 # Static context prefix is preserved 

+

1068 data = cov.get_data() 

+

1069 expected = [ 

+

1070 'mytests', 

+

1071 'mytests|doctest:HTML_TAG', 

+

1072 'mytests|test:HTML_TAG', 

+

1073 'mytests|test:RENDERERS', 

+

1074 ] 

+

1075 assert expected == sorted(data.measured_contexts()) 

+

1076 

+

1077 def test_plugin_with_test_function(self): 

+

1078 self.make_plugin_capitalized_testnames('plugin_tests.py') 

+

1079 self.make_test_files() 

+

1080 

+

1081 # Enable both a plugin and test_function dynamic context 

+

1082 cov = coverage.Coverage() 

+

1083 cov.set_option("run:plugins", ['plugin_tests']) 

+

1084 cov.set_option("run:dynamic_context", "test_function") 

+

1085 

+

1086 # Run the tests 

+

1087 self.run_all_functions(cov, 'testsuite') 

+

1088 

+

1089 # test_function takes precedence over plugins - only 

+

1090 # functions that are not labeled by test_function are 

+

1091 # labeled by plugin_tests. 

+

1092 data = cov.get_data() 

+

1093 filenames = self.get_measured_filenames(data) 

+

1094 expected = [ 

+

1095 '', 

+

1096 'doctest:HTML_TAG', 

+

1097 'testsuite.test_html_tag', 

+

1098 'testsuite.test_renderers', 

+

1099 ] 

+

1100 assert expected == sorted(data.measured_contexts()) 

+

1101 

+

1102 def assert_context_lines(context, lines): 

+

1103 data.set_query_context(context) 

+

1104 assert lines == sorted(data.lines(filenames['rendering.py'])) 

+

1105 

+

1106 assert_context_lines("doctest:HTML_TAG", [2]) 

+

1107 assert_context_lines("testsuite.test_html_tag", [2]) 

+

1108 assert_context_lines("testsuite.test_renderers", [2, 5, 8, 11]) 

+

1109 

+

1110 def test_multiple_plugins(self): 

+

1111 self.make_plugin_capitalized_testnames('plugin_tests.py') 

+

1112 self.make_plugin_track_render('plugin_renderers.py') 

+

1113 self.make_test_files() 

+

1114 

+

1115 # Enable two plugins 

+

1116 cov = coverage.Coverage() 

+

1117 cov.set_option("run:plugins", ['plugin_renderers', 'plugin_tests']) 

+

1118 

+

1119 self.run_all_functions(cov, 'testsuite') 

+

1120 

+

1121 # It is important to note, that line 11 (render_bold function) is never 

+

1122 # labeled as renderer:bold context, because it is only called from 

+

1123 # test_renderers function - so it already falls under test:RENDERERS 

+

1124 # context. 

+

1125 # 

+

1126 # render_paragraph and render_span (lines 5, 8) are directly called by 

+

1127 # testsuite.build_full_html, so they get labeled by renderers plugin. 

+

1128 data = cov.get_data() 

+

1129 filenames = self.get_measured_filenames(data) 

+

1130 expected = [ 

+

1131 '', 

+

1132 'doctest:HTML_TAG', 

+

1133 'renderer:paragraph', 

+

1134 'renderer:span', 

+

1135 'test:HTML_TAG', 

+

1136 'test:RENDERERS', 

+

1137 ] 

+

1138 assert expected == sorted(data.measured_contexts()) 

+

1139 

+

1140 def assert_context_lines(context, lines): 

+

1141 data.set_query_context(context) 

+

1142 assert lines == sorted(data.lines(filenames['rendering.py'])) 

+

1143 

+

1144 assert_context_lines("test:HTML_TAG", [2]) 

+

1145 assert_context_lines("test:RENDERERS", [2, 5, 8, 11]) 

+

1146 assert_context_lines("doctest:HTML_TAG", [2]) 

+

1147 assert_context_lines("renderer:paragraph", [2, 5]) 

+

1148 assert_context_lines("renderer:span", [2, 8]) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_process_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_process_py.html new file mode 100644 index 000000000..229f9dc0d --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_process_py.html @@ -0,0 +1,1708 @@ + + + + + + Coverage for tests/test_process.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# coding: utf-8 

+

2# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

3# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

4 

+

5"""Tests for process behavior of coverage.py.""" 

+

6 

+

7import glob 

+

8import os 

+

9import os.path 

+

10import re 

+

11import stat 

+

12import sys 

+

13import sysconfig 

+

14import textwrap 

+

15import time 

+

16from xml.etree import ElementTree 

+

17 

+

18import pytest 

+

19 

+

20import coverage 

+

21from coverage import env 

+

22from coverage.data import line_counts 

+

23from coverage.files import abs_file, python_reported_file 

+

24from coverage.misc import output_encoding 

+

25 

+

26from tests.coveragetest import CoverageTest, TESTS_DIR 

+

27from tests.helpers import re_lines 

+

28 

+

29 

+

30class ProcessTest(CoverageTest): 

+

31 """Tests of the per-process behavior of coverage.py.""" 

+

32 

+

33 def test_save_on_exit(self): 

+

34 self.make_file("mycode.py", """\ 

+

35 h = "Hello" 

+

36 w = "world" 

+

37 """) 

+

38 

+

39 self.assert_doesnt_exist(".coverage") 

+

40 self.run_command("coverage run mycode.py") 

+

41 self.assert_exists(".coverage") 

+

42 

+

43 def test_environment(self): 

+

44 # Checks that we can import modules from the tests directory at all! 

+

45 self.make_file("mycode.py", """\ 

+

46 import covmod1 

+

47 import covmodzip1 

+

48 a = 1 

+

49 print('done') 

+

50 """) 

+

51 

+

52 self.assert_doesnt_exist(".coverage") 

+

53 out = self.run_command("coverage run mycode.py") 

+

54 self.assert_exists(".coverage") 

+

55 assert out == 'done\n' 

+

56 

+

57 def make_b_or_c_py(self): 

+

58 """Create b_or_c.py, used in a few of these tests.""" 

+

59 # "b_or_c.py b" will run 6 lines. 

+

60 # "b_or_c.py c" will run 7 lines. 

+

61 # Together, they run 8 lines. 

+

62 self.make_file("b_or_c.py", """\ 

+

63 import sys 

+

64 a = 1 

+

65 if sys.argv[1] == 'b': 

+

66 b = 1 

+

67 else: 

+

68 c = 1 

+

69 c2 = 2 

+

70 d = 1 

+

71 print('done') 

+

72 """) 

+

73 

+

74 def test_combine_parallel_data(self): 

+

75 self.make_b_or_c_py() 

+

76 out = self.run_command("coverage run -p b_or_c.py b") 

+

77 assert out == 'done\n' 

+

78 self.assert_doesnt_exist(".coverage") 

+

79 self.assert_file_count(".coverage.*", 1) 

+

80 

+

81 out = self.run_command("coverage run -p b_or_c.py c") 

+

82 assert out == 'done\n' 

+

83 self.assert_doesnt_exist(".coverage") 

+

84 

+

85 # After two -p runs, there should be two .coverage.machine.123 files. 

+

86 self.assert_file_count(".coverage.*", 2) 

+

87 

+

88 # Combine the parallel coverage data files into .coverage . 

+

89 self.run_command("coverage combine") 

+

90 self.assert_exists(".coverage") 

+

91 

+

92 # After combining, there should be only the .coverage file. 

+

93 self.assert_file_count(".coverage.*", 0) 

+

94 

+

95 # Read the coverage file and see that b_or_c.py has all 8 lines 

+

96 # executed. 

+

97 data = coverage.CoverageData() 

+

98 data.read() 

+

99 assert line_counts(data)['b_or_c.py'] == 8 

+

100 

+

101 # Running combine again should fail, because there are no parallel data 

+

102 # files to combine. 

+

103 status, out = self.run_command_status("coverage combine") 

+

104 assert status == 1 

+

105 assert out == "No data to combine\n" 

+

106 

+

107 # And the originally combined data is still there. 

+

108 data = coverage.CoverageData() 

+

109 data.read() 

+

110 assert line_counts(data)['b_or_c.py'] == 8 

+

111 

+

112 def test_combine_parallel_data_with_a_corrupt_file(self): 

+

113 self.make_b_or_c_py() 

+

114 out = self.run_command("coverage run -p b_or_c.py b") 

+

115 assert out == 'done\n' 

+

116 self.assert_doesnt_exist(".coverage") 

+

117 self.assert_file_count(".coverage.*", 1) 

+

118 

+

119 out = self.run_command("coverage run -p b_or_c.py c") 

+

120 assert out == 'done\n' 

+

121 self.assert_doesnt_exist(".coverage") 

+

122 

+

123 # After two -p runs, there should be two .coverage.machine.123 files. 

+

124 self.assert_file_count(".coverage.*", 2) 

+

125 

+

126 # Make a bogus data file. 

+

127 self.make_file(".coverage.bad", "This isn't a coverage data file.") 

+

128 

+

129 # Combine the parallel coverage data files into .coverage . 

+

130 out = self.run_command("coverage combine") 

+

131 self.assert_exists(".coverage") 

+

132 self.assert_exists(".coverage.bad") 

+

133 warning_regex = ( 

+

134 r"Coverage.py warning: Couldn't use data file '.*\.coverage\.bad': " 

+

135 r"file (is encrypted or )?is not a database" 

+

136 ) 

+

137 assert re.search(warning_regex, out) 

+

138 

+

139 # After combining, those two should be the only data files. 

+

140 self.assert_file_count(".coverage.*", 1) 

+

141 

+

142 # Read the coverage file and see that b_or_c.py has all 8 lines 

+

143 # executed. 

+

144 data = coverage.CoverageData() 

+

145 data.read() 

+

146 assert line_counts(data)['b_or_c.py'] == 8 

+

147 

+

148 def test_combine_no_usable_files(self): 

+

149 # https://github.com/nedbat/coveragepy/issues/629 

+

150 self.make_b_or_c_py() 

+

151 out = self.run_command("coverage run b_or_c.py b") 

+

152 assert out == 'done\n' 

+

153 self.assert_exists(".coverage") 

+

154 self.assert_file_count(".coverage.*", 0) 

+

155 

+

156 # Make bogus data files. 

+

157 self.make_file(".coverage.bad1", "This isn't a coverage data file.") 

+

158 self.make_file(".coverage.bad2", "This isn't a coverage data file.") 

+

159 

+

160 # Combine the parallel coverage data files into .coverage, but nothing is readable. 

+

161 status, out = self.run_command_status("coverage combine") 

+

162 assert status == 1 

+

163 

+

164 for n in "12": 

+

165 self.assert_exists(".coverage.bad{}".format(n)) 

+

166 warning_regex = ( 

+

167 r"Coverage.py warning: Couldn't use data file '.*\.coverage.bad{0}': " 

+

168 r"file (is encrypted or )?is not a database" 

+

169 .format(n) 

+

170 ) 

+

171 assert re.search(warning_regex, out) 

+

172 assert re.search(r"No usable data files", out) 

+

173 

+

174 # After combining, we should have a main file and two parallel files. 

+

175 self.assert_exists(".coverage") 

+

176 self.assert_file_count(".coverage.*", 2) 

+

177 

+

178 # Read the coverage file and see that b_or_c.py has 6 lines 

+

179 # executed (we only did b, not c). 

+

180 data = coverage.CoverageData() 

+

181 data.read() 

+

182 assert line_counts(data)['b_or_c.py'] == 6 

+

183 

+

184 def test_combine_parallel_data_in_two_steps(self): 

+

185 self.make_b_or_c_py() 

+

186 

+

187 out = self.run_command("coverage run -p b_or_c.py b") 

+

188 assert out == 'done\n' 

+

189 self.assert_doesnt_exist(".coverage") 

+

190 self.assert_file_count(".coverage.*", 1) 

+

191 

+

192 # Combine the (one) parallel coverage data file into .coverage . 

+

193 self.run_command("coverage combine") 

+

194 self.assert_exists(".coverage") 

+

195 self.assert_file_count(".coverage.*", 0) 

+

196 

+

197 out = self.run_command("coverage run -p b_or_c.py c") 

+

198 assert out == 'done\n' 

+

199 self.assert_exists(".coverage") 

+

200 self.assert_file_count(".coverage.*", 1) 

+

201 

+

202 # Combine the parallel coverage data files into .coverage . 

+

203 self.run_command("coverage combine --append") 

+

204 self.assert_exists(".coverage") 

+

205 

+

206 # After combining, there should be only the .coverage file. 

+

207 self.assert_file_count(".coverage.*", 0) 

+

208 

+

209 # Read the coverage file and see that b_or_c.py has all 8 lines 

+

210 # executed. 

+

211 data = coverage.CoverageData() 

+

212 data.read() 

+

213 assert line_counts(data)['b_or_c.py'] == 8 

+

214 

+

215 def test_combine_parallel_data_no_append(self): 

+

216 self.make_b_or_c_py() 

+

217 

+

218 out = self.run_command("coverage run -p b_or_c.py b") 

+

219 assert out == 'done\n' 

+

220 self.assert_doesnt_exist(".coverage") 

+

221 self.assert_file_count(".coverage.*", 1) 

+

222 

+

223 # Combine the (one) parallel coverage data file into .coverage . 

+

224 self.run_command("coverage combine") 

+

225 self.assert_exists(".coverage") 

+

226 self.assert_file_count(".coverage.*", 0) 

+

227 

+

228 out = self.run_command("coverage run -p b_or_c.py c") 

+

229 assert out == 'done\n' 

+

230 self.assert_exists(".coverage") 

+

231 self.assert_file_count(".coverage.*", 1) 

+

232 

+

233 # Combine the parallel coverage data files into .coverage, but don't 

+

234 # use the data in .coverage already. 

+

235 self.run_command("coverage combine") 

+

236 self.assert_exists(".coverage") 

+

237 

+

238 # After combining, there should be only the .coverage file. 

+

239 self.assert_file_count(".coverage.*", 0) 

+

240 

+

241 # Read the coverage file and see that b_or_c.py has only 7 lines 

+

242 # because we didn't keep the data from running b. 

+

243 data = coverage.CoverageData() 

+

244 data.read() 

+

245 assert line_counts(data)['b_or_c.py'] == 7 

+

246 

+

247 def test_combine_parallel_data_keep(self): 

+

248 self.make_b_or_c_py() 

+

249 out = self.run_command("coverage run -p b_or_c.py b") 

+

250 assert out == 'done\n' 

+

251 self.assert_doesnt_exist(".coverage") 

+

252 self.assert_file_count(".coverage.*", 1) 

+

253 

+

254 out = self.run_command("coverage run -p b_or_c.py c") 

+

255 assert out == 'done\n' 

+

256 self.assert_doesnt_exist(".coverage") 

+

257 

+

258 # After two -p runs, there should be two .coverage.machine.123 files. 

+

259 self.assert_file_count(".coverage.*", 2) 

+

260 

+

261 # Combine the parallel coverage data files into .coverage with the keep flag. 

+

262 self.run_command("coverage combine --keep") 

+

263 

+

264 # After combining, the .coverage file & the original combined file should still be there. 

+

265 self.assert_exists(".coverage") 

+

266 self.assert_file_count(".coverage.*", 2) 

+

267 

+

268 

+

269 def test_append_data(self): 

+

270 self.make_b_or_c_py() 

+

271 

+

272 out = self.run_command("coverage run b_or_c.py b") 

+

273 assert out == 'done\n' 

+

274 self.assert_exists(".coverage") 

+

275 self.assert_file_count(".coverage.*", 0) 

+

276 

+

277 out = self.run_command("coverage run --append b_or_c.py c") 

+

278 assert out == 'done\n' 

+

279 self.assert_exists(".coverage") 

+

280 self.assert_file_count(".coverage.*", 0) 

+

281 

+

282 # Read the coverage file and see that b_or_c.py has all 8 lines 

+

283 # executed. 

+

284 data = coverage.CoverageData() 

+

285 data.read() 

+

286 assert line_counts(data)['b_or_c.py'] == 8 

+

287 

+

288 def test_append_data_with_different_file(self): 

+

289 self.make_b_or_c_py() 

+

290 

+

291 self.make_file(".coveragerc", """\ 

+

292 [run] 

+

293 data_file = .mycovdata 

+

294 """) 

+

295 

+

296 out = self.run_command("coverage run b_or_c.py b") 

+

297 assert out == 'done\n' 

+

298 self.assert_doesnt_exist(".coverage") 

+

299 self.assert_exists(".mycovdata") 

+

300 

+

301 out = self.run_command("coverage run --append b_or_c.py c") 

+

302 assert out == 'done\n' 

+

303 self.assert_doesnt_exist(".coverage") 

+

304 self.assert_exists(".mycovdata") 

+

305 

+

306 # Read the coverage file and see that b_or_c.py has all 8 lines 

+

307 # executed. 

+

308 data = coverage.CoverageData(".mycovdata") 

+

309 data.read() 

+

310 assert line_counts(data)['b_or_c.py'] == 8 

+

311 

+

312 def test_append_can_create_a_data_file(self): 

+

313 self.make_b_or_c_py() 

+

314 

+

315 out = self.run_command("coverage run --append b_or_c.py b") 

+

316 assert out == 'done\n' 

+

317 self.assert_exists(".coverage") 

+

318 self.assert_file_count(".coverage.*", 0) 

+

319 

+

320 # Read the coverage file and see that b_or_c.py has only 6 lines 

+

321 # executed. 

+

322 data = coverage.CoverageData() 

+

323 data.read() 

+

324 assert line_counts(data)['b_or_c.py'] == 6 

+

325 

+

326 def test_combine_with_rc(self): 

+

327 self.make_b_or_c_py() 

+

328 

+

329 self.make_file(".coveragerc", """\ 

+

330 [run] 

+

331 source = . 

+

332 parallel = true 

+

333 """) 

+

334 

+

335 out = self.run_command("coverage run b_or_c.py b") 

+

336 assert out == 'done\n' 

+

337 self.assert_doesnt_exist(".coverage") 

+

338 

+

339 out = self.run_command("coverage run b_or_c.py c") 

+

340 assert out == 'done\n' 

+

341 self.assert_doesnt_exist(".coverage") 

+

342 

+

343 # After two runs, there should be two .coverage.machine.123 files. 

+

344 self.assert_file_count(".coverage.*", 2) 

+

345 

+

346 # Combine the parallel coverage data files into .coverage . 

+

347 self.run_command("coverage combine") 

+

348 self.assert_exists(".coverage") 

+

349 self.assert_exists(".coveragerc") 

+

350 

+

351 # After combining, there should be only the .coverage file. 

+

352 self.assert_file_count(".coverage.*", 0) 

+

353 

+

354 # Read the coverage file and see that b_or_c.py has all 8 lines 

+

355 # executed. 

+

356 data = coverage.CoverageData() 

+

357 data.read() 

+

358 assert line_counts(data)['b_or_c.py'] == 8 

+

359 

+

360 # Reporting should still work even with the .rc file 

+

361 out = self.run_command("coverage report") 

+

362 assert out == textwrap.dedent("""\ 

+

363 Name Stmts Miss Cover 

+

364 ------------------------------- 

+

365 b_or_c.py 8 0 100% 

+

366 ------------------------------- 

+

367 TOTAL 8 0 100% 

+

368 """) 

+

369 

+

370 def test_combine_with_aliases(self): 

+

371 self.make_file("d1/x.py", """\ 

+

372 a = 1 

+

373 b = 2 

+

374 print("%s %s" % (a, b)) 

+

375 """) 

+

376 

+

377 self.make_file("d2/x.py", """\ 

+

378 # 1 

+

379 # 2 

+

380 # 3 

+

381 c = 4 

+

382 d = 5 

+

383 print("%s %s" % (c, d)) 

+

384 """) 

+

385 

+

386 self.make_file(".coveragerc", """\ 

+

387 [run] 

+

388 source = . 

+

389 parallel = True 

+

390 

+

391 [paths] 

+

392 source = 

+

393 src 

+

394 */d1 

+

395 */d2 

+

396 """) 

+

397 

+

398 out = self.run_command("coverage run " + os.path.normpath("d1/x.py")) 

+

399 assert out == '1 2\n' 

+

400 out = self.run_command("coverage run " + os.path.normpath("d2/x.py")) 

+

401 assert out == '4 5\n' 

+

402 

+

403 self.assert_file_count(".coverage.*", 2) 

+

404 

+

405 self.run_command("coverage combine") 

+

406 self.assert_exists(".coverage") 

+

407 

+

408 # After combining, there should be only the .coverage file. 

+

409 self.assert_file_count(".coverage.*", 0) 

+

410 

+

411 # Read the coverage data file and see that the two different x.py 

+

412 # files have been combined together. 

+

413 data = coverage.CoverageData() 

+

414 data.read() 

+

415 summary = line_counts(data, fullpath=True) 

+

416 assert len(summary) == 1 

+

417 actual = abs_file(list(summary.keys())[0]) 

+

418 expected = abs_file('src/x.py') 

+

419 assert expected == actual 

+

420 assert list(summary.values())[0] == 6 

+

421 

+

422 def test_erase_parallel(self): 

+

423 self.make_file(".coveragerc", """\ 

+

424 [run] 

+

425 data_file = data.dat 

+

426 parallel = True 

+

427 """) 

+

428 self.make_file("data.dat") 

+

429 self.make_file("data.dat.fooey") 

+

430 self.make_file("data.dat.gooey") 

+

431 self.make_file(".coverage") 

+

432 

+

433 self.run_command("coverage erase") 

+

434 self.assert_doesnt_exist("data.dat") 

+

435 self.assert_doesnt_exist("data.dat.fooey") 

+

436 self.assert_doesnt_exist("data.dat.gooey") 

+

437 self.assert_exists(".coverage") 

+

438 

+

439 def test_missing_source_file(self): 

+

440 # Check what happens if the source is missing when reporting happens. 

+

441 self.make_file("fleeting.py", """\ 

+

442 s = 'goodbye, cruel world!' 

+

443 """) 

+

444 

+

445 self.run_command("coverage run fleeting.py") 

+

446 os.remove("fleeting.py") 

+

447 out = self.run_command("coverage html -d htmlcov") 

+

448 assert re.search("No source for code: '.*fleeting.py'", out) 

+

449 assert "Traceback" not in out 

+

450 

+

451 # It happens that the code paths are different for *.py and other 

+

452 # files, so try again with no extension. 

+

453 self.make_file("fleeting", """\ 

+

454 s = 'goodbye, cruel world!' 

+

455 """) 

+

456 

+

457 self.run_command("coverage run fleeting") 

+

458 os.remove("fleeting") 

+

459 status, out = self.run_command_status("coverage html -d htmlcov") 

+

460 assert re.search("No source for code: '.*fleeting'", out) 

+

461 assert "Traceback" not in out 

+

462 assert status == 1 

+

463 

+

464 def test_running_missing_file(self): 

+

465 status, out = self.run_command_status("coverage run xyzzy.py") 

+

466 assert re.search("No file to run: .*xyzzy.py", out) 

+

467 assert "raceback" not in out 

+

468 assert "rror" not in out 

+

469 assert status == 1 

+

470 

+

471 def test_code_throws(self): 

+

472 self.make_file("throw.py", """\ 

+

473 def f1(): 

+

474 raise Exception("hey!") 

+

475 

+

476 def f2(): 

+

477 f1() 

+

478 

+

479 f2() 

+

480 """) 

+

481 

+

482 # The important thing is for "coverage run" and "python" to report the 

+

483 # same traceback. 

+

484 status, out = self.run_command_status("coverage run throw.py") 

+

485 out2 = self.run_command("python throw.py") 

+

486 if env.PYPY: 

+

487 # Pypy has an extra frame in the traceback for some reason 

+

488 out2 = re_lines(out2, "toplevel", match=False) 

+

489 assert out == out2 

+

490 

+

491 # But also make sure that the output is what we expect. 

+

492 path = python_reported_file('throw.py') 

+

493 msg = 'File "{}", line 5,? in f2'.format(re.escape(path)) 

+

494 assert re.search(msg, out) 

+

495 assert 'raise Exception("hey!")' in out 

+

496 assert status == 1 

+

497 

+

498 def test_code_exits(self): 

+

499 self.make_file("exit.py", """\ 

+

500 import sys 

+

501 def f1(): 

+

502 print("about to exit..") 

+

503 sys.exit(17) 

+

504 

+

505 def f2(): 

+

506 f1() 

+

507 

+

508 f2() 

+

509 """) 

+

510 

+

511 # The important thing is for "coverage run" and "python" to have the 

+

512 # same output. No traceback. 

+

513 status, out = self.run_command_status("coverage run exit.py") 

+

514 status2, out2 = self.run_command_status("python exit.py") 

+

515 assert out == out2 

+

516 assert out == "about to exit..\n" 

+

517 assert status == status2 

+

518 assert status == 17 

+

519 

+

520 def test_code_exits_no_arg(self): 

+

521 self.make_file("exit_none.py", """\ 

+

522 import sys 

+

523 def f1(): 

+

524 print("about to exit quietly..") 

+

525 sys.exit() 

+

526 

+

527 f1() 

+

528 """) 

+

529 status, out = self.run_command_status("coverage run exit_none.py") 

+

530 status2, out2 = self.run_command_status("python exit_none.py") 

+

531 assert out == out2 

+

532 assert out == "about to exit quietly..\n" 

+

533 assert status == status2 

+

534 assert status == 0 

+

535 

+

536 @pytest.mark.skipif(not hasattr(os, "fork"), reason="Can't test os.fork, it doesn't exist.") 

+

537 def test_fork(self): 

+

538 self.make_file("fork.py", """\ 

+

539 import os 

+

540 

+

541 def child(): 

+

542 print('Child!') 

+

543 

+

544 def main(): 

+

545 ret = os.fork() 

+

546 

+

547 if ret == 0: 

+

548 child() 

+

549 else: 

+

550 os.waitpid(ret, 0) 

+

551 

+

552 main() 

+

553 """) 

+

554 

+

555 out = self.run_command("coverage run -p fork.py") 

+

556 assert out == 'Child!\n' 

+

557 self.assert_doesnt_exist(".coverage") 

+

558 

+

559 # After running the forking program, there should be two 

+

560 # .coverage.machine.123 files. 

+

561 self.assert_file_count(".coverage.*", 2) 

+

562 

+

563 # The two data files should have different random numbers at the end of 

+

564 # the file name. 

+

565 data_files = glob.glob(".coverage.*") 

+

566 nums = set(name.rpartition(".")[-1] for name in data_files) 

+

567 assert len(nums) == 2, "Same random: %s" % (data_files,) 

+

568 

+

569 # Combine the parallel coverage data files into .coverage . 

+

570 self.run_command("coverage combine") 

+

571 self.assert_exists(".coverage") 

+

572 

+

573 # After combining, there should be only the .coverage file. 

+

574 self.assert_file_count(".coverage.*", 0) 

+

575 

+

576 data = coverage.CoverageData() 

+

577 data.read() 

+

578 assert line_counts(data)['fork.py'] == 9 

+

579 

+

580 def test_warnings_during_reporting(self): 

+

581 # While fixing issue #224, the warnings were being printed far too 

+

582 # often. Make sure they're not any more. 

+

583 self.make_file("hello.py", """\ 

+

584 import sys, os, the_other 

+

585 print("Hello") 

+

586 """) 

+

587 self.make_file("the_other.py", """\ 

+

588 print("What?") 

+

589 """) 

+

590 self.make_file(".coveragerc", """\ 

+

591 [run] 

+

592 source = 

+

593 . 

+

594 xyzzy 

+

595 """) 

+

596 

+

597 self.run_command("coverage run hello.py") 

+

598 out = self.run_command("coverage html") 

+

599 assert out.count("Module xyzzy was never imported.") == 0 

+

600 

+

601 def test_warns_if_never_run(self): 

+

602 # Note: the name of the function can't have "warning" in it, or the 

+

603 # absolute path of the file will have "warning" in it, and an assertion 

+

604 # will fail. 

+

605 out = self.run_command("coverage run i_dont_exist.py") 

+

606 path = python_reported_file('i_dont_exist.py') 

+

607 assert "No file to run: '{}'".format(path) in out 

+

608 assert "warning" not in out 

+

609 assert "Exception" not in out 

+

610 

+

611 out = self.run_command("coverage run -m no_such_module") 

+

612 assert ( 

+

613 ("No module named no_such_module" in out) or 

+

614 ("No module named 'no_such_module'" in out) 

+

615 ) 

+

616 assert "warning" not in out 

+

617 assert "Exception" not in out 

+

618 

+

619 @pytest.mark.skipif(env.METACOV, reason="Can't test tracers changing during metacoverage") 

+

620 def test_warnings_trace_function_changed_with_threads(self): 

+

621 # https://github.com/nedbat/coveragepy/issues/164 

+

622 

+

623 self.make_file("bug164.py", """\ 

+

624 import threading 

+

625 import time 

+

626 

+

627 class MyThread (threading.Thread): 

+

628 def run(self): 

+

629 print("Hello") 

+

630 

+

631 thr = MyThread() 

+

632 thr.start() 

+

633 thr.join() 

+

634 """) 

+

635 out = self.run_command("coverage run --timid bug164.py") 

+

636 

+

637 assert "Hello\n" in out 

+

638 assert "warning" not in out 

+

639 

+

640 def test_warning_trace_function_changed(self): 

+

641 self.make_file("settrace.py", """\ 

+

642 import sys 

+

643 print("Hello") 

+

644 sys.settrace(None) 

+

645 print("Goodbye") 

+

646 """) 

+

647 out = self.run_command("coverage run --timid settrace.py") 

+

648 assert "Hello\n" in out 

+

649 assert "Goodbye\n" in out 

+

650 

+

651 assert "Trace function changed" in out 

+

652 

+

653 # When meta-coverage testing, this test doesn't work, because it finds 

+

654 # coverage.py's own trace function. 

+

655 @pytest.mark.skipif(env.METACOV, reason="Can't test timid during coverage measurement.") 

+

656 def test_timid(self): 

+

657 # Test that the --timid command line argument properly swaps the tracer 

+

658 # function for a simpler one. 

+

659 # 

+

660 # This is complicated by the fact that the tests are run twice for each 

+

661 # version: once with a compiled C-based trace function, and once without 

+

662 # it, to also test the Python trace function. So this test has to examine 

+

663 # an environment variable set in igor.py to know whether to expect to see 

+

664 # the C trace function or not. 

+

665 

+

666 self.make_file("showtrace.py", """\ 

+

667 # Show the current frame's trace function, so that we can test what the 

+

668 # command-line options do to the trace function used. 

+

669 

+

670 import sys 

+

671 

+

672 # Show what the trace function is. If a C-based function is used, then f_trace 

+

673 # may be None. 

+

674 trace_fn = sys._getframe(0).f_trace 

+

675 if trace_fn is None: 

+

676 trace_name = "None" 

+

677 else: 

+

678 # Get the name of the tracer class. Py3k has a different way to get it. 

+

679 try: 

+

680 trace_name = trace_fn.im_class.__name__ 

+

681 except AttributeError: 

+

682 try: 

+

683 trace_name = trace_fn.__self__.__class__.__name__ 

+

684 except AttributeError: 

+

685 # A C-based function could also manifest as an f_trace value 

+

686 # which doesn't have im_class or __self__. 

+

687 trace_name = trace_fn.__class__.__name__ 

+

688 

+

689 print(trace_name) 

+

690 """) 

+

691 

+

692 # When running without coverage, no trace function 

+

693 py_out = self.run_command("python showtrace.py") 

+

694 assert py_out == "None\n" 

+

695 

+

696 cov_out = self.run_command("coverage run showtrace.py") 

+

697 if os.environ.get('COVERAGE_TEST_TRACER', 'c') == 'c': 

+

698 # If the C trace function is being tested, then regular running should have 

+

699 # the C function, which registers itself as f_trace. 

+

700 assert cov_out == "CTracer\n" 

+

701 else: 

+

702 # If the Python trace function is being tested, then regular running will 

+

703 # also show the Python function. 

+

704 assert cov_out == "PyTracer\n" 

+

705 

+

706 # When running timidly, the trace function is always Python. 

+

707 timid_out = self.run_command("coverage run --timid showtrace.py") 

+

708 assert timid_out == "PyTracer\n" 

+

709 

+

710 def test_warn_preimported(self): 

+

711 self.make_file("hello.py", """\ 

+

712 import goodbye 

+

713 import coverage 

+

714 cov = coverage.Coverage(include=["good*"], check_preimported=True) 

+

715 cov.start() 

+

716 print(goodbye.f()) 

+

717 cov.stop() 

+

718 """) 

+

719 self.make_file("goodbye.py", """\ 

+

720 def f(): 

+

721 return "Goodbye!" 

+

722 """) 

+

723 goodbye_path = os.path.abspath("goodbye.py") 

+

724 

+

725 out = self.run_command("python hello.py") 

+

726 assert "Goodbye!" in out 

+

727 

+

728 msg = ( 

+

729 "Coverage.py warning: " 

+

730 "Already imported a file that will be measured: {0} " 

+

731 "(already-imported)").format(goodbye_path) 

+

732 assert msg in out 

+

733 

+

734 @pytest.mark.expensive 

+

735 @pytest.mark.skipif(env.METACOV, reason="Can't test fullcoverage when measuring ourselves") 

+

736 @pytest.mark.skipif(env.PY2, reason="fullcoverage doesn't work on Python 2.") 

+

737 @pytest.mark.skipif(not env.C_TRACER, reason="fullcoverage only works with the C tracer.") 

+

738 def test_fullcoverage(self): 

+

739 # fullcoverage is a trick to get stdlib modules measured from 

+

740 # the very beginning of the process. Here we import os and 

+

741 # then check how many lines are measured. 

+

742 self.make_file("getenv.py", """\ 

+

743 import os 

+

744 print("FOOEY == %s" % os.getenv("FOOEY")) 

+

745 """) 

+

746 

+

747 fullcov = os.path.join( 

+

748 os.path.dirname(coverage.__file__), "fullcoverage" 

+

749 ) 

+

750 self.set_environ("FOOEY", "BOO") 

+

751 self.set_environ("PYTHONPATH", fullcov) 

+

752 out = self.run_command("python -m coverage run -L getenv.py") 

+

753 assert out == "FOOEY == BOO\n" 

+

754 data = coverage.CoverageData() 

+

755 data.read() 

+

756 # The actual number of executed lines in os.py when it's 

+

757 # imported is 120 or so. Just running os.getenv executes 

+

758 # about 5. 

+

759 assert line_counts(data)['os.py'] > 50 

+

760 

+

761 # Pypy passes locally, but fails in CI? Perhaps the version of macOS is 

+

762 # significant? https://foss.heptapod.net/pypy/pypy/-/issues/3074 

+

763 @pytest.mark.skipif(env.PYPY3, reason="Pypy is unreliable with this test") 

+

764 # Jython as of 2.7.1rc3 won't compile a filename that isn't utf8. 

+

765 @pytest.mark.skipif(env.JYTHON, reason="Jython can't handle this test") 

+

766 def test_lang_c(self): 

+

767 # LANG=C forces getfilesystemencoding on Linux to 'ascii', which causes 

+

768 # failures with non-ascii file names. We don't want to make a real file 

+

769 # with strange characters, though, because that gets the test runners 

+

770 # tangled up. This will isolate the concerns to the coverage.py code. 

+

771 # https://github.com/nedbat/coveragepy/issues/533 

+

772 self.make_file("weird_file.py", r""" 

+

773 globs = {} 

+

774 code = "a = 1\nb = 2\n" 

+

775 exec(compile(code, "wut\xe9\xea\xeb\xec\x01\x02.py", 'exec'), globs) 

+

776 print(globs['a']) 

+

777 print(globs['b']) 

+

778 """) 

+

779 self.set_environ("LANG", "C") 

+

780 out = self.run_command("coverage run weird_file.py") 

+

781 assert out == "1\n2\n" 

+

782 

+

783 def test_deprecation_warnings(self): 

+

784 # Test that coverage doesn't trigger deprecation warnings. 

+

785 # https://github.com/nedbat/coveragepy/issues/305 

+

786 self.make_file("allok.py", """\ 

+

787 import warnings 

+

788 warnings.simplefilter('default') 

+

789 import coverage 

+

790 print("No warnings!") 

+

791 """) 

+

792 

+

793 # Some of our testing infrastructure can issue warnings. 

+

794 # Turn it all off for the sub-process. 

+

795 self.del_environ("COVERAGE_TESTING") 

+

796 

+

797 out = self.run_command("python allok.py") 

+

798 assert out == "No warnings!\n" 

+

799 

+

800 def test_run_twice(self): 

+

801 # https://github.com/nedbat/coveragepy/issues/353 

+

802 self.make_file("foo.py", """\ 

+

803 def foo(): 

+

804 pass 

+

805 """) 

+

806 self.make_file("run_twice.py", """\ 

+

807 import sys 

+

808 import coverage 

+

809 

+

810 for i in [1, 2]: 

+

811 sys.stderr.write("Run %s\\n" % i) 

+

812 inst = coverage.Coverage(source=['foo']) 

+

813 inst.load() 

+

814 inst.start() 

+

815 import foo 

+

816 inst.stop() 

+

817 inst.save() 

+

818 """) 

+

819 out = self.run_command("python run_twice.py") 

+

820 expected = ( 

+

821 "Run 1\n" + 

+

822 "Run 2\n" + 

+

823 "Coverage.py warning: Module foo was previously imported, but not measured " + 

+

824 "(module-not-measured)\n" 

+

825 ) 

+

826 assert expected == out 

+

827 

+

828 def test_module_name(self): 

+

829 # https://github.com/nedbat/coveragepy/issues/478 

+

830 out = self.run_command("python -m coverage") 

+

831 assert "Use 'coverage help' for help" in out 

+

832 

+

833 

+

834TRY_EXECFILE = os.path.join(os.path.dirname(__file__), "modules/process_test/try_execfile.py") 

+

835 

+

836class EnvironmentTest(CoverageTest): 

+

837 """Tests using try_execfile.py to test the execution environment.""" 

+

838 

+

839 def assert_tryexecfile_output(self, expected, actual): 

+

840 """Assert that the output we got is a successful run of try_execfile.py. 

+

841 

+

842 `expected` and `actual` must be the same, modulo a few slight known 

+

843 platform differences. 

+

844 

+

845 """ 

+

846 # First, is this even credible try_execfile.py output? 

+

847 assert '"DATA": "xyzzy"' in actual 

+

848 

+

849 if env.JYTHON: # pragma: only jython 

+

850 # Argv0 is different for Jython, remove that from the comparison. 

+

851 expected = re_lines(expected, r'\s+"argv0":', match=False) 

+

852 actual = re_lines(actual, r'\s+"argv0":', match=False) 

+

853 

+

854 assert expected == actual 

+

855 

+

856 def test_coverage_run_is_like_python(self): 

+

857 with open(TRY_EXECFILE) as f: 

+

858 self.make_file("run_me.py", f.read()) 

+

859 expected = self.run_command("python run_me.py") 

+

860 actual = self.run_command("coverage run run_me.py") 

+

861 self.assert_tryexecfile_output(expected, actual) 

+

862 

+

863 def test_coverage_run_far_away_is_like_python(self): 

+

864 with open(TRY_EXECFILE) as f: 

+

865 self.make_file("sub/overthere/prog.py", f.read()) 

+

866 expected = self.run_command("python sub/overthere/prog.py") 

+

867 actual = self.run_command("coverage run sub/overthere/prog.py") 

+

868 self.assert_tryexecfile_output(expected, actual) 

+

869 

+

870 def test_coverage_run_dashm_is_like_python_dashm(self): 

+

871 # These -m commands assume the coverage tree is on the path. 

+

872 expected = self.run_command("python -m process_test.try_execfile") 

+

873 actual = self.run_command("coverage run -m process_test.try_execfile") 

+

874 self.assert_tryexecfile_output(expected, actual) 

+

875 

+

876 @pytest.mark.skipif(env.PYVERSION == (3, 5, 4, 'final', 0, 0), 

+

877 reason="3.5.4 broke this: https://bugs.python.org/issue32551" 

+

878 ) 

+

879 def test_coverage_run_dir_is_like_python_dir(self): 

+

880 with open(TRY_EXECFILE) as f: 

+

881 self.make_file("with_main/__main__.py", f.read()) 

+

882 

+

883 expected = self.run_command("python with_main") 

+

884 actual = self.run_command("coverage run with_main") 

+

885 

+

886 # PyPy includes the current directory in the path when running a 

+

887 # directory, while CPython and coverage.py do not. Exclude that from 

+

888 # the comparison also... 

+

889 if env.PYPY: 

+

890 ignored = re.escape(os.getcwd()) 

+

891 expected = re_lines(expected, ignored, match=False) 

+

892 actual = re_lines(actual, ignored, match=False) 

+

893 self.assert_tryexecfile_output(expected, actual) 

+

894 

+

895 def test_coverage_run_dashm_dir_no_init_is_like_python(self): 

+

896 with open(TRY_EXECFILE) as f: 

+

897 self.make_file("with_main/__main__.py", f.read()) 

+

898 

+

899 expected = self.run_command("python -m with_main") 

+

900 actual = self.run_command("coverage run -m with_main") 

+

901 if env.PY2: 

+

902 assert expected.endswith("No module named with_main\n") 

+

903 assert actual.endswith("No module named with_main\n") 

+

904 else: 

+

905 self.assert_tryexecfile_output(expected, actual) 

+

906 

+

907 @pytest.mark.skipif(env.PY2, 

+

908 reason="Python 2 runs __main__ twice, I can't be bothered to make it work." 

+

909 ) 

+

910 def test_coverage_run_dashm_dir_with_init_is_like_python(self): 

+

911 with open(TRY_EXECFILE) as f: 

+

912 self.make_file("with_main/__main__.py", f.read()) 

+

913 self.make_file("with_main/__init__.py", "") 

+

914 

+

915 expected = self.run_command("python -m with_main") 

+

916 actual = self.run_command("coverage run -m with_main") 

+

917 self.assert_tryexecfile_output(expected, actual) 

+

918 

+

919 def test_coverage_run_dashm_equal_to_doubledashsource(self): 

+

920 """regression test for #328 

+

921 

+

922 When imported by -m, a module's __name__ is __main__, but we need the 

+

923 --source machinery to know and respect the original name. 

+

924 """ 

+

925 # These -m commands assume the coverage tree is on the path. 

+

926 expected = self.run_command("python -m process_test.try_execfile") 

+

927 actual = self.run_command( 

+

928 "coverage run --source process_test.try_execfile -m process_test.try_execfile" 

+

929 ) 

+

930 self.assert_tryexecfile_output(expected, actual) 

+

931 

+

932 def test_coverage_run_dashm_superset_of_doubledashsource(self): 

+

933 """Edge case: --source foo -m foo.bar""" 

+

934 # Ugh: without this config file, we'll get a warning about 

+

935 # Coverage.py warning: Module process_test was previously imported, 

+

936 # but not measured (module-not-measured) 

+

937 # 

+

938 # This is because process_test/__init__.py is imported while looking 

+

939 # for process_test.try_execfile. That import happens while setting 

+

940 # sys.path before start() is called. 

+

941 self.make_file(".coveragerc", """\ 

+

942 [run] 

+

943 disable_warnings = module-not-measured 

+

944 """) 

+

945 # These -m commands assume the coverage tree is on the path. 

+

946 expected = self.run_command("python -m process_test.try_execfile") 

+

947 actual = self.run_command( 

+

948 "coverage run --source process_test -m process_test.try_execfile" 

+

949 ) 

+

950 self.assert_tryexecfile_output(expected, actual) 

+

951 

+

952 st, out = self.run_command_status("coverage report") 

+

953 assert st == 0 

+

954 assert self.line_count(out) == 6, out 

+

955 

+

956 def test_coverage_run_script_imports_doubledashsource(self): 

+

957 # This file imports try_execfile, which compiles it to .pyc, so the 

+

958 # first run will have __file__ == "try_execfile.py" and the second will 

+

959 # have __file__ == "try_execfile.pyc", which throws off the comparison. 

+

960 # Setting dont_write_bytecode True stops the compilation to .pyc and 

+

961 # keeps the test working. 

+

962 self.make_file("myscript", """\ 

+

963 import sys; sys.dont_write_bytecode = True 

+

964 import process_test.try_execfile 

+

965 """) 

+

966 

+

967 expected = self.run_command("python myscript") 

+

968 actual = self.run_command("coverage run --source process_test myscript") 

+

969 self.assert_tryexecfile_output(expected, actual) 

+

970 

+

971 st, out = self.run_command_status("coverage report") 

+

972 assert st == 0 

+

973 assert self.line_count(out) == 6, out 

+

974 

+

975 def test_coverage_run_dashm_is_like_python_dashm_off_path(self): 

+

976 # https://github.com/nedbat/coveragepy/issues/242 

+

977 self.make_file("sub/__init__.py", "") 

+

978 with open(TRY_EXECFILE) as f: 

+

979 self.make_file("sub/run_me.py", f.read()) 

+

980 

+

981 expected = self.run_command("python -m sub.run_me") 

+

982 actual = self.run_command("coverage run -m sub.run_me") 

+

983 self.assert_tryexecfile_output(expected, actual) 

+

984 

+

985 def test_coverage_run_dashm_is_like_python_dashm_with__main__207(self): 

+

986 # https://github.com/nedbat/coveragepy/issues/207 

+

987 self.make_file("package/__init__.py", "print('init')") 

+

988 self.make_file("package/__main__.py", "print('main')") 

+

989 expected = self.run_command("python -m package") 

+

990 actual = self.run_command("coverage run -m package") 

+

991 assert expected == actual 

+

992 

+

993 def test_coverage_zip_is_like_python(self): 

+

994 # Test running coverage from a zip file itself. Some environments 

+

995 # (windows?) zip up the coverage main to be used as the coverage 

+

996 # command. 

+

997 with open(TRY_EXECFILE) as f: 

+

998 self.make_file("run_me.py", f.read()) 

+

999 expected = self.run_command("python run_me.py") 

+

1000 cov_main = os.path.join(TESTS_DIR, "covmain.zip") 

+

1001 actual = self.run_command("python {} run run_me.py".format(cov_main)) 

+

1002 self.assert_tryexecfile_output(expected, actual) 

+

1003 

+

1004 def test_coverage_custom_script(self): 

+

1005 # https://github.com/nedbat/coveragepy/issues/678 

+

1006 # If sys.path[0] isn't the Python default, then coverage.py won't 

+

1007 # fiddle with it. 

+

1008 self.make_file("a/b/c/thing.py", """\ 

+

1009 SOMETHING = "hello-xyzzy" 

+

1010 """) 

+

1011 abc = os.path.abspath("a/b/c") 

+

1012 self.make_file("run_coverage.py", """\ 

+

1013 import sys 

+

1014 sys.path[0:0] = [ 

+

1015 r'{abc}', 

+

1016 '/Users/somebody/temp/something/eggs/something-4.5.1-py2.7-xxx-10.13-x86_64.egg', 

+

1017 ] 

+

1018 

+

1019 import coverage.cmdline 

+

1020 

+

1021 if __name__ == '__main__': 

+

1022 sys.exit(coverage.cmdline.main()) 

+

1023 """.format(abc=abc)) 

+

1024 self.make_file("how_is_it.py", """\ 

+

1025 import pprint, sys 

+

1026 pprint.pprint(sys.path) 

+

1027 import thing 

+

1028 print(thing.SOMETHING) 

+

1029 """) 

+

1030 # If this test fails, it will be with "can't import thing". 

+

1031 out = self.run_command("python run_coverage.py run how_is_it.py") 

+

1032 assert "hello-xyzzy" in out 

+

1033 

+

1034 out = self.run_command("python -m run_coverage run how_is_it.py") 

+

1035 assert "hello-xyzzy" in out 

+

1036 

+

1037 @pytest.mark.skipif(env.WINDOWS, reason="Windows can't make symlinks") 

+

1038 def test_bug_862(self): 

+

1039 # This simulates how pyenv and pyenv-virtualenv end up creating the 

+

1040 # coverage executable. 

+

1041 self.make_file("elsewhere/bin/fake-coverage", """\ 

+

1042 #!{executable} 

+

1043 import sys, pkg_resources 

+

1044 sys.exit(pkg_resources.load_entry_point('coverage', 'console_scripts', 'coverage')()) 

+

1045 """.format(executable=sys.executable)) 

+

1046 os.chmod("elsewhere/bin/fake-coverage", stat.S_IREAD | stat.S_IEXEC) 

+

1047 os.symlink("elsewhere", "somewhere") 

+

1048 self.make_file("foo.py", "print('inside foo')") 

+

1049 self.make_file("bar.py", "import foo") 

+

1050 out = self.run_command("somewhere/bin/fake-coverage run bar.py") 

+

1051 assert "inside foo\n" == out 

+

1052 

+

1053 def test_bug_909(self): 

+

1054 # https://github.com/nedbat/coveragepy/issues/909 

+

1055 # The __init__ files were being imported before measurement started, 

+

1056 # so the line in __init__.py was being marked as missed, and there were 

+

1057 # warnings about measured files being imported before start. 

+

1058 self.make_file("proj/__init__.py", "print('Init')") 

+

1059 self.make_file("proj/thecode.py", "print('The code')") 

+

1060 self.make_file("proj/tests/__init__.py", "") 

+

1061 self.make_file("proj/tests/test_it.py", "import proj.thecode") 

+

1062 

+

1063 expected = "Init\nThe code\n" 

+

1064 actual = self.run_command("coverage run --source=proj -m proj.tests.test_it") 

+

1065 assert expected == actual 

+

1066 

+

1067 report = self.run_command("coverage report -m") 

+

1068 

+

1069 # Name Stmts Miss Cover Missing 

+

1070 # ------------------------------------------------------ 

+

1071 # proj/__init__.py 1 0 100% 

+

1072 # proj/tests/__init__.py 0 0 100% 

+

1073 # proj/tests/test_it.py 1 0 100% 

+

1074 # proj/thecode.py 1 0 100% 

+

1075 # ------------------------------------------------------ 

+

1076 # TOTAL 3 0 100% 

+

1077 

+

1078 squeezed = self.squeezed_lines(report) 

+

1079 assert squeezed[2].replace("\\", "/") == "proj/__init__.py 1 0 100%" 

+

1080 

+

1081 

+

1082class ExcepthookTest(CoverageTest): 

+

1083 """Tests of sys.excepthook support.""" 

+

1084 

+

1085 def test_excepthook(self): 

+

1086 self.make_file("excepthook.py", """\ 

+

1087 import sys 

+

1088 

+

1089 def excepthook(*args): 

+

1090 print('in excepthook') 

+

1091 if maybe == 2: 

+

1092 print('definitely') 

+

1093 

+

1094 sys.excepthook = excepthook 

+

1095 

+

1096 maybe = 1 

+

1097 raise RuntimeError('Error Outside') 

+

1098 """) 

+

1099 cov_st, cov_out = self.run_command_status("coverage run excepthook.py") 

+

1100 py_st, py_out = self.run_command_status("python excepthook.py") 

+

1101 if not env.JYTHON: 

+

1102 assert cov_st == py_st 

+

1103 assert cov_st == 1 

+

1104 

+

1105 assert "in excepthook" in py_out 

+

1106 assert cov_out == py_out 

+

1107 

+

1108 # Read the coverage file and see that excepthook.py has 7 lines 

+

1109 # executed. 

+

1110 data = coverage.CoverageData() 

+

1111 data.read() 

+

1112 assert line_counts(data)['excepthook.py'] == 7 

+

1113 

+

1114 @pytest.mark.skipif(not env.CPYTHON, 

+

1115 reason="non-CPython handles excepthook exits differently, punt for now." 

+

1116 ) 

+

1117 def test_excepthook_exit(self): 

+

1118 self.make_file("excepthook_exit.py", """\ 

+

1119 import sys 

+

1120 

+

1121 def excepthook(*args): 

+

1122 print('in excepthook') 

+

1123 sys.exit(0) 

+

1124 

+

1125 sys.excepthook = excepthook 

+

1126 

+

1127 raise RuntimeError('Error Outside') 

+

1128 """) 

+

1129 cov_st, cov_out = self.run_command_status("coverage run excepthook_exit.py") 

+

1130 py_st, py_out = self.run_command_status("python excepthook_exit.py") 

+

1131 assert cov_st == py_st 

+

1132 assert cov_st == 0 

+

1133 

+

1134 assert "in excepthook" in py_out 

+

1135 assert cov_out == py_out 

+

1136 

+

1137 @pytest.mark.skipif(env.PYPY, reason="PyPy handles excepthook throws differently.") 

+

1138 def test_excepthook_throw(self): 

+

1139 self.make_file("excepthook_throw.py", """\ 

+

1140 import sys 

+

1141 

+

1142 def excepthook(*args): 

+

1143 # Write this message to stderr so that we don't have to deal 

+

1144 # with interleaved stdout/stderr comparisons in the assertions 

+

1145 # in the test. 

+

1146 sys.stderr.write('in excepthook\\n') 

+

1147 raise RuntimeError('Error Inside') 

+

1148 

+

1149 sys.excepthook = excepthook 

+

1150 

+

1151 raise RuntimeError('Error Outside') 

+

1152 """) 

+

1153 cov_st, cov_out = self.run_command_status("coverage run excepthook_throw.py") 

+

1154 py_st, py_out = self.run_command_status("python excepthook_throw.py") 

+

1155 if not env.JYTHON: 

+

1156 assert cov_st == py_st 

+

1157 assert cov_st == 1 

+

1158 

+

1159 assert "in excepthook" in py_out 

+

1160 assert cov_out == py_out 

+

1161 

+

1162 

+

1163@pytest.mark.skipif(env.JYTHON, reason="Coverage command names don't work on Jython") 

+

1164class AliasedCommandTest(CoverageTest): 

+

1165 """Tests of the version-specific command aliases.""" 

+

1166 

+

1167 run_in_temp_dir = False 

+

1168 

+

1169 def test_major_version_works(self): 

+

1170 # "coverage2" works on py2 

+

1171 cmd = "coverage%d" % sys.version_info[0] 

+

1172 out = self.run_command(cmd) 

+

1173 assert "Code coverage for Python" in out 

+

1174 

+

1175 def test_wrong_alias_doesnt_work(self): 

+

1176 # "coverage3" doesn't work on py2 

+

1177 assert sys.version_info[0] in [2, 3] # Let us know when Python 4 is out... 

+

1178 badcmd = "coverage%d" % (5 - sys.version_info[0]) 

+

1179 out = self.run_command(badcmd) 

+

1180 assert "Code coverage for Python" not in out 

+

1181 

+

1182 def test_specific_alias_works(self): 

+

1183 # "coverage-2.7" works on py2.7 

+

1184 cmd = "coverage-%d.%d" % sys.version_info[:2] 

+

1185 out = self.run_command(cmd) 

+

1186 assert "Code coverage for Python" in out 

+

1187 

+

1188 def test_aliases_used_in_messages(self): 

+

1189 cmds = [ 

+

1190 "coverage", 

+

1191 "coverage%d" % sys.version_info[0], 

+

1192 "coverage-%d.%d" % sys.version_info[:2], 

+

1193 ] 

+

1194 for cmd in cmds: 

+

1195 out = self.run_command("%s foobar" % cmd) 

+

1196 assert "Unknown command: 'foobar'" in out 

+

1197 assert "Use '%s help' for help" % cmd in out 

+

1198 

+

1199 

+

1200class PydocTest(CoverageTest): 

+

1201 """Test that pydoc can get our information.""" 

+

1202 

+

1203 run_in_temp_dir = False 

+

1204 

+

1205 def assert_pydoc_ok(self, name, thing): 

+

1206 """Check that pydoc of `name` finds the docstring from `thing`.""" 

+

1207 # Run pydoc. 

+

1208 out = self.run_command("python -m pydoc " + name) 

+

1209 # It should say "Help on..", and not have a traceback 

+

1210 assert out.startswith("Help on ") 

+

1211 assert "Traceback" not in out 

+

1212 

+

1213 # All of the lines in the docstring should be there somewhere. 

+

1214 for line in thing.__doc__.splitlines(): 

+

1215 assert line.strip() in out 

+

1216 

+

1217 def test_pydoc_coverage(self): 

+

1218 self.assert_pydoc_ok("coverage", coverage) 

+

1219 

+

1220 def test_pydoc_coverage_coverage(self): 

+

1221 self.assert_pydoc_ok("coverage.Coverage", coverage.Coverage) 

+

1222 

+

1223 

+

1224class FailUnderTest(CoverageTest): 

+

1225 """Tests of the --fail-under switch.""" 

+

1226 

+

1227 def setup_test(self): 

+

1228 super(FailUnderTest, self).setup_test() 

+

1229 self.make_file("forty_two_plus.py", """\ 

+

1230 # I have 42.857% (3/7) coverage! 

+

1231 a = 1 

+

1232 b = 2 

+

1233 if a > 3: 

+

1234 b = 4 

+

1235 c = 5 

+

1236 d = 6 

+

1237 e = 7 

+

1238 """) 

+

1239 st, _ = self.run_command_status("coverage run --source=. forty_two_plus.py") 

+

1240 assert st == 0 

+

1241 

+

1242 def test_report_43_is_ok(self): 

+

1243 st, out = self.run_command_status("coverage report --fail-under=43") 

+

1244 assert st == 0 

+

1245 assert self.last_line_squeezed(out) == "TOTAL 7 4 43%" 

+

1246 

+

1247 def test_report_43_is_not_ok(self): 

+

1248 st, out = self.run_command_status("coverage report --fail-under=44") 

+

1249 assert st == 2 

+

1250 expected = "Coverage failure: total of 43 is less than fail-under=44" 

+

1251 assert expected == self.last_line_squeezed(out) 

+

1252 

+

1253 def test_report_42p86_is_not_ok(self): 

+

1254 self.make_file(".coveragerc", "[report]\nprecision = 2") 

+

1255 st, out = self.run_command_status("coverage report --fail-under=42.88") 

+

1256 assert st == 2 

+

1257 expected = "Coverage failure: total of 42.86 is less than fail-under=42.88" 

+

1258 assert expected == self.last_line_squeezed(out) 

+

1259 

+

1260 

+

1261class FailUnderNoFilesTest(CoverageTest): 

+

1262 """Test that nothing to report results in an error exit status.""" 

+

1263 def test_report(self): 

+

1264 self.make_file(".coveragerc", "[report]\nfail_under = 99\n") 

+

1265 st, out = self.run_command_status("coverage report") 

+

1266 assert 'No data to report.' in out 

+

1267 assert st == 1 

+

1268 

+

1269 

+

1270class FailUnderEmptyFilesTest(CoverageTest): 

+

1271 """Test that empty files produce the proper fail_under exit status.""" 

+

1272 def test_report(self): 

+

1273 self.make_file(".coveragerc", "[report]\nfail_under = 99\n") 

+

1274 self.make_file("empty.py", "") 

+

1275 st, _ = self.run_command_status("coverage run empty.py") 

+

1276 assert st == 0 

+

1277 st, _ = self.run_command_status("coverage report") 

+

1278 assert st == 2 

+

1279 

+

1280 

+

1281@pytest.mark.skipif(env.JYTHON, reason="Jython doesn't like accented file names") 

+

1282class UnicodeFilePathsTest(CoverageTest): 

+

1283 """Tests of using non-ascii characters in the names of files.""" 

+

1284 

+

1285 def test_accented_dot_py(self): 

+

1286 # Make a file with a non-ascii character in the filename. 

+

1287 self.make_file(u"h\xe2t.py", "print('accented')") 

+

1288 out = self.run_command(u"coverage run --source=. h\xe2t.py") 

+

1289 assert out == "accented\n" 

+

1290 

+

1291 # The HTML report uses ascii-encoded HTML entities. 

+

1292 out = self.run_command("coverage html") 

+

1293 assert out == "" 

+

1294 self.assert_exists(u"htmlcov/h\xe2t_py.html") 

+

1295 with open("htmlcov/index.html") as indexf: 

+

1296 index = indexf.read() 

+

1297 assert '<a href="h&#226;t_py.html">h&#226;t.py</a>' in index 

+

1298 

+

1299 # The XML report is always UTF8-encoded. 

+

1300 out = self.run_command("coverage xml") 

+

1301 assert out == "" 

+

1302 with open("coverage.xml", "rb") as xmlf: 

+

1303 xml = xmlf.read() 

+

1304 assert u' filename="h\xe2t.py"'.encode('utf8') in xml 

+

1305 assert u' name="h\xe2t.py"'.encode('utf8') in xml 

+

1306 

+

1307 report_expected = ( 

+

1308 u"Name Stmts Miss Cover\n" 

+

1309 u"----------------------------\n" 

+

1310 u"h\xe2t.py 1 0 100%\n" 

+

1311 u"----------------------------\n" 

+

1312 u"TOTAL 1 0 100%\n" 

+

1313 ) 

+

1314 

+

1315 if env.PY2: 

+

1316 report_expected = report_expected.encode(output_encoding()) 

+

1317 

+

1318 out = self.run_command("coverage report") 

+

1319 assert out == report_expected 

+

1320 

+

1321 def test_accented_directory(self): 

+

1322 # Make a file with a non-ascii character in the directory name. 

+

1323 self.make_file(u"\xe2/accented.py", "print('accented')") 

+

1324 out = self.run_command(u"coverage run --source=. \xe2/accented.py") 

+

1325 assert out == "accented\n" 

+

1326 

+

1327 # The HTML report uses ascii-encoded HTML entities. 

+

1328 out = self.run_command("coverage html") 

+

1329 assert out == "" 

+

1330 self.assert_exists(u"htmlcov/\xe2_accented_py.html") 

+

1331 with open("htmlcov/index.html") as indexf: 

+

1332 index = indexf.read() 

+

1333 assert '<a href="&#226;_accented_py.html">&#226;%saccented.py</a>' % os.sep in index 

+

1334 

+

1335 # The XML report is always UTF8-encoded. 

+

1336 out = self.run_command("coverage xml") 

+

1337 assert out == "" 

+

1338 with open("coverage.xml", "rb") as xmlf: 

+

1339 xml = xmlf.read() 

+

1340 assert b' filename="\xc3\xa2/accented.py"' in xml 

+

1341 assert b' name="accented.py"' in xml 

+

1342 

+

1343 dom = ElementTree.parse("coverage.xml") 

+

1344 elts = dom.findall(u".//package[@name='â']") 

+

1345 assert len(elts) == 1 

+

1346 assert elts[0].attrib == { 

+

1347 "branch-rate": u"0", 

+

1348 "complexity": u"0", 

+

1349 "line-rate": u"1", 

+

1350 "name": u"â", 

+

1351 } 

+

1352 

+

1353 report_expected = ( 

+

1354 u"Name Stmts Miss Cover\n" 

+

1355 u"-----------------------------------\n" 

+

1356 u"\xe2%saccented.py 1 0 100%%\n" 

+

1357 u"-----------------------------------\n" 

+

1358 u"TOTAL 1 0 100%%\n" 

+

1359 ) % os.sep 

+

1360 

+

1361 if env.PY2: 

+

1362 report_expected = report_expected.encode(output_encoding()) 

+

1363 

+

1364 out = self.run_command("coverage report") 

+

1365 assert out == report_expected 

+

1366 

+

1367 

+

1368@pytest.mark.skipif(env.WINDOWS, reason="Windows can't delete the directory in use.") 

+

1369class YankedDirectoryTest(CoverageTest): 

+

1370 """Tests of what happens when the current directory is deleted.""" 

+

1371 

+

1372 BUG_806 = """\ 

+

1373 import os 

+

1374 import sys 

+

1375 import tempfile 

+

1376 

+

1377 tmpdir = tempfile.mkdtemp() 

+

1378 os.chdir(tmpdir) 

+

1379 os.rmdir(tmpdir) 

+

1380 print(sys.argv[1]) 

+

1381 """ 

+

1382 

+

1383 def test_removing_directory(self): 

+

1384 self.make_file("bug806.py", self.BUG_806) 

+

1385 out = self.run_command("coverage run bug806.py noerror") 

+

1386 assert out == "noerror\n" 

+

1387 

+

1388 def test_removing_directory_with_error(self): 

+

1389 self.make_file("bug806.py", self.BUG_806) 

+

1390 out = self.run_command("coverage run bug806.py") 

+

1391 path = python_reported_file('bug806.py') 

+

1392 assert out == textwrap.dedent("""\ 

+

1393 Traceback (most recent call last): 

+

1394 File "{}", line 8, in <module> 

+

1395 print(sys.argv[1]) 

+

1396 IndexError: list index out of range 

+

1397 """.format(path)) 

+

1398 

+

1399 

+

1400def possible_pth_dirs(): 

+

1401 """Produce a sequence of directories for trying to write .pth files.""" 

+

1402 # First look through sys.path, and if we find a .pth file, then it's a good 

+

1403 # place to put ours. 

+

1404 for pth_dir in sys.path: # pragma: part covered 

+

1405 pth_files = glob.glob(os.path.join(pth_dir, "*.pth")) 

+

1406 if pth_files: 

+

1407 yield pth_dir 

+

1408 

+

1409 # If we're still looking, then try the Python library directory. 

+

1410 # https://github.com/nedbat/coveragepy/issues/339 

+

1411 yield sysconfig.get_python_lib() # pragma: cant happen 

+

1412 

+

1413 

+

1414def find_writable_pth_directory(): 

+

1415 """Find a place to write a .pth file.""" 

+

1416 for pth_dir in possible_pth_dirs(): # pragma: part covered 

+

1417 try_it = os.path.join(pth_dir, "touch_{}.it".format(WORKER)) 

+

1418 with open(try_it, "w") as f: 

+

1419 try: 

+

1420 f.write("foo") 

+

1421 except (IOError, OSError): # pragma: cant happen 

+

1422 continue 

+

1423 

+

1424 os.remove(try_it) 

+

1425 return pth_dir 

+

1426 

+

1427 return None # pragma: cant happen 

+

1428 

+

1429WORKER = os.environ.get('PYTEST_XDIST_WORKER', '') 

+

1430PTH_DIR = find_writable_pth_directory() 

+

1431 

+

1432 

+

1433def persistent_remove(path): 

+

1434 """Remove a file, and retry for a while if you can't.""" 

+

1435 tries = 100 

+

1436 while tries: # pragma: part covered 

+

1437 try: 

+

1438 os.remove(path) 

+

1439 except OSError: 

+

1440 tries -= 1 

+

1441 time.sleep(.05) 

+

1442 else: 

+

1443 return 

+

1444 raise Exception("Sorry, couldn't remove {!r}".format(path)) # pragma: cant happen 

+

1445 

+

1446 

+

1447class ProcessCoverageMixin(object): 

+

1448 """Set up a .pth file to coverage-measure all sub-processes.""" 

+

1449 

+

1450 def setup_test(self): 

+

1451 super(ProcessCoverageMixin, self).setup_test() 

+

1452 

+

1453 # Create the .pth file. 

+

1454 assert PTH_DIR 

+

1455 pth_contents = "import coverage; coverage.process_startup()\n" 

+

1456 pth_path = os.path.join(PTH_DIR, "subcover_{}.pth".format(WORKER)) 

+

1457 with open(pth_path, "w") as pth: 

+

1458 pth.write(pth_contents) 

+

1459 

+

1460 self.addCleanup(persistent_remove, pth_path) 

+

1461 

+

1462 

+

1463@pytest.mark.skipif(env.METACOV, reason="Can't test sub-process pth file during metacoverage") 

+

1464class ProcessStartupTest(ProcessCoverageMixin, CoverageTest): 

+

1465 """Test that we can measure coverage in sub-processes.""" 

+

1466 

+

1467 def setup_test(self): 

+

1468 super(ProcessStartupTest, self).setup_test() 

+

1469 

+

1470 # Main will run sub.py 

+

1471 self.make_file("main.py", """\ 

+

1472 import os, os.path, sys 

+

1473 ex = os.path.basename(sys.executable) 

+

1474 os.system(ex + " sub.py") 

+

1475 """) 

+

1476 # sub.py will write a few lines. 

+

1477 self.make_file("sub.py", """\ 

+

1478 f = open("out.txt", "w") 

+

1479 f.write("Hello, world!\\n") 

+

1480 f.close() 

+

1481 """) 

+

1482 

+

1483 def test_subprocess_with_pth_files(self): 

+

1484 # An existing data file should not be read when a subprocess gets 

+

1485 # measured automatically. Create the data file here with bogus data in 

+

1486 # it. 

+

1487 data = coverage.CoverageData(".mycovdata") 

+

1488 data.add_lines({os.path.abspath('sub.py'): dict.fromkeys(range(100))}) 

+

1489 data.write() 

+

1490 

+

1491 self.make_file("coverage.ini", """\ 

+

1492 [run] 

+

1493 data_file = .mycovdata 

+

1494 """) 

+

1495 self.set_environ("COVERAGE_PROCESS_START", "coverage.ini") 

+

1496 import main # pylint: disable=unused-import, import-error 

+

1497 

+

1498 with open("out.txt") as f: 

+

1499 assert f.read() == "Hello, world!\n" 

+

1500 

+

1501 # Read the data from .coverage 

+

1502 self.assert_exists(".mycovdata") 

+

1503 data = coverage.CoverageData(".mycovdata") 

+

1504 data.read() 

+

1505 assert line_counts(data)['sub.py'] == 3 

+

1506 

+

1507 def test_subprocess_with_pth_files_and_parallel(self): 

+

1508 # https://github.com/nedbat/coveragepy/issues/492 

+

1509 self.make_file("coverage.ini", """\ 

+

1510 [run] 

+

1511 parallel = true 

+

1512 """) 

+

1513 

+

1514 self.set_environ("COVERAGE_PROCESS_START", "coverage.ini") 

+

1515 self.run_command("coverage run main.py") 

+

1516 

+

1517 with open("out.txt") as f: 

+

1518 assert f.read() == "Hello, world!\n" 

+

1519 

+

1520 self.run_command("coverage combine") 

+

1521 

+

1522 # assert that the combined .coverage data file is correct 

+

1523 self.assert_exists(".coverage") 

+

1524 data = coverage.CoverageData() 

+

1525 data.read() 

+

1526 assert line_counts(data)['sub.py'] == 3 

+

1527 

+

1528 # assert that there are *no* extra data files left over after a combine 

+

1529 data_files = glob.glob(os.getcwd() + '/.coverage*') 

+

1530 msg = ( 

+

1531 "Expected only .coverage after combine, looks like there are " + 

+

1532 "extra data files that were not cleaned up: %r" % data_files 

+

1533 ) 

+

1534 assert len(data_files) == 1, msg 

+

1535 

+

1536 

+

1537class ProcessStartupWithSourceTest(ProcessCoverageMixin, CoverageTest): 

+

1538 """Show that we can configure {[run]source} during process-level coverage. 

+

1539 

+

1540 There are three interesting variables, for a total of eight tests: 

+

1541 

+

1542 1. -m versus a simple script argument (for example, `python myscript`), 

+

1543 

+

1544 2. filtering for the top-level (main.py) or second-level (sub.py) 

+

1545 module, and 

+

1546 

+

1547 3. whether the files are in a package or not. 

+

1548 

+

1549 """ 

+

1550 

+

1551 def assert_pth_and_source_work_together( 

+

1552 self, dashm, package, source 

+

1553 ): 

+

1554 """Run the test for a particular combination of factors. 

+

1555 

+

1556 The arguments are all strings: 

+

1557 

+

1558 * `dashm`: Either "" (run the program as a file) or "-m" (run the 

+

1559 program as a module). 

+

1560 

+

1561 * `package`: Either "" (put the source at the top level) or a 

+

1562 package name to use to hold the source. 

+

1563 

+

1564 * `source`: Either "main" or "sub", which file to use as the 

+

1565 ``--source`` argument. 

+

1566 

+

1567 """ 

+

1568 def fullname(modname): 

+

1569 """What is the full module name for `modname` for this test?""" 

+

1570 if package and dashm: 

+

1571 return '.'.join((package, modname)) 

+

1572 else: 

+

1573 return modname 

+

1574 

+

1575 def path(basename): 

+

1576 """Where should `basename` be created for this test?""" 

+

1577 return os.path.join(package, basename) 

+

1578 

+

1579 # Main will run sub.py. 

+

1580 self.make_file(path("main.py"), """\ 

+

1581 import %s 

+

1582 a = 2 

+

1583 b = 3 

+

1584 """ % fullname('sub')) 

+

1585 if package: 

+

1586 self.make_file(path("__init__.py"), "") 

+

1587 # sub.py will write a few lines. 

+

1588 self.make_file(path("sub.py"), """\ 

+

1589 # Avoid 'with' so Jython can play along. 

+

1590 f = open("out.txt", "w") 

+

1591 f.write("Hello, world!") 

+

1592 f.close() 

+

1593 """) 

+

1594 self.make_file("coverage.ini", """\ 

+

1595 [run] 

+

1596 source = %s 

+

1597 """ % fullname(source)) 

+

1598 

+

1599 self.set_environ("COVERAGE_PROCESS_START", "coverage.ini") 

+

1600 

+

1601 if dashm: 

+

1602 cmd = "python -m %s" % fullname('main') 

+

1603 else: 

+

1604 cmd = "python %s" % path('main.py') 

+

1605 

+

1606 self.run_command(cmd) 

+

1607 

+

1608 with open("out.txt") as f: 

+

1609 assert f.read() == "Hello, world!" 

+

1610 

+

1611 # Read the data from .coverage 

+

1612 self.assert_exists(".coverage") 

+

1613 data = coverage.CoverageData() 

+

1614 data.read() 

+

1615 summary = line_counts(data) 

+

1616 print(summary) 

+

1617 assert summary[source + '.py'] == 3 

+

1618 assert len(summary) == 1 

+

1619 

+

1620 def test_dashm_main(self): 

+

1621 self.assert_pth_and_source_work_together('-m', '', 'main') 

+

1622 

+

1623 def test_script_main(self): 

+

1624 self.assert_pth_and_source_work_together('', '', 'main') 

+

1625 

+

1626 def test_dashm_sub(self): 

+

1627 self.assert_pth_and_source_work_together('-m', '', 'sub') 

+

1628 

+

1629 def test_script_sub(self): 

+

1630 self.assert_pth_and_source_work_together('', '', 'sub') 

+

1631 

+

1632 def test_dashm_pkg_main(self): 

+

1633 self.assert_pth_and_source_work_together('-m', 'pkg', 'main') 

+

1634 

+

1635 def test_script_pkg_main(self): 

+

1636 self.assert_pth_and_source_work_together('', 'pkg', 'main') 

+

1637 

+

1638 def test_dashm_pkg_sub(self): 

+

1639 self.assert_pth_and_source_work_together('-m', 'pkg', 'sub') 

+

1640 

+

1641 def test_script_pkg_sub(self): 

+

1642 self.assert_pth_and_source_work_together('', 'pkg', 'sub') 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_python_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_python_py.html new file mode 100644 index 000000000..1cc79e707 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_python_py.html @@ -0,0 +1,127 @@ + + + + + + Coverage for tests/test_python.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Tests of coverage/python.py""" 

+

5 

+

6import os 

+

7import sys 

+

8 

+

9import pytest 

+

10 

+

11from coverage import env 

+

12from coverage.python import get_zip_bytes, source_for_file 

+

13 

+

14from tests.coveragetest import CoverageTest 

+

15 

+

16 

+

17class GetZipBytesTest(CoverageTest): 

+

18 """Tests of `get_zip_bytes`.""" 

+

19 

+

20 run_in_temp_dir = False 

+

21 

+

22 def test_get_encoded_zip_files(self): 

+

23 # See igor.py, do_zipmods, for the text of these files. 

+

24 zip_file = "tests/zipmods.zip" 

+

25 sys.path.append(zip_file) # So we can import the files. 

+

26 for encoding in ["utf8", "gb2312", "hebrew", "shift_jis", "cp1252"]: 

+

27 filename = zip_file + "/encoded_" + encoding + ".py" 

+

28 filename = filename.replace("/", os.sep) 

+

29 zip_data = get_zip_bytes(filename) 

+

30 zip_text = zip_data.decode(encoding) 

+

31 assert 'All OK' in zip_text 

+

32 # Run the code to see that we really got it encoded properly. 

+

33 __import__("encoded_"+encoding) 

+

34 

+

35 

+

36def test_source_for_file(tmpdir): 

+

37 path = tmpdir.join("a.py") 

+

38 src = str(path) 

+

39 assert source_for_file(src) == src 

+

40 assert source_for_file(src + 'c') == src 

+

41 assert source_for_file(src + 'o') == src 

+

42 unknown = src + 'FOO' 

+

43 assert source_for_file(unknown) == unknown 

+

44 

+

45 

+

46@pytest.mark.skipif(not env.WINDOWS, reason="not windows") 

+

47def test_source_for_file_windows(tmpdir): 

+

48 path = tmpdir.join("a.py") 

+

49 src = str(path) 

+

50 

+

51 # On windows if a pyw exists, it is an acceptable source 

+

52 path_windows = tmpdir.ensure("a.pyw") 

+

53 assert str(path_windows) == source_for_file(src + 'c') 

+

54 

+

55 # If both pyw and py exist, py is preferred 

+

56 path.ensure(file=True) 

+

57 assert source_for_file(src + 'c') == src 

+

58 

+

59 

+

60def test_source_for_file_jython(): 

+

61 assert source_for_file("a$py.class") == "a.py" 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_results_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_results_py.html new file mode 100644 index 000000000..7bccc7da2 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_results_py.html @@ -0,0 +1,216 @@ + + + + + + Coverage for tests/test_results.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Tests for coverage.py's results analysis.""" 

+

5 

+

6import pytest 

+

7 

+

8from coverage.misc import CoverageException 

+

9from coverage.results import format_lines, Numbers, should_fail_under 

+

10 

+

11from tests.coveragetest import CoverageTest 

+

12 

+

13 

+

14class NumbersTest(CoverageTest): 

+

15 """Tests for coverage.py's numeric measurement summaries.""" 

+

16 

+

17 run_in_temp_dir = False 

+

18 

+

19 def test_basic(self): 

+

20 n1 = Numbers(n_files=1, n_statements=200, n_missing=20) 

+

21 assert n1.n_statements == 200 

+

22 assert n1.n_executed == 180 

+

23 assert n1.n_missing == 20 

+

24 assert n1.pc_covered == 90 

+

25 

+

26 def test_addition(self): 

+

27 n1 = Numbers(n_files=1, n_statements=200, n_missing=20) 

+

28 n2 = Numbers(n_files=1, n_statements=10, n_missing=8) 

+

29 n3 = n1 + n2 

+

30 assert n3.n_files == 2 

+

31 assert n3.n_statements == 210 

+

32 assert n3.n_executed == 182 

+

33 assert n3.n_missing == 28 

+

34 assert round(abs(n3.pc_covered-86.666666666), 7) == 0 

+

35 

+

36 def test_sum(self): 

+

37 n1 = Numbers(n_files=1, n_statements=200, n_missing=20) 

+

38 n2 = Numbers(n_files=1, n_statements=10, n_missing=8) 

+

39 n3 = sum([n1, n2]) 

+

40 assert n3.n_files == 2 

+

41 assert n3.n_statements == 210 

+

42 assert n3.n_executed == 182 

+

43 assert n3.n_missing == 28 

+

44 assert round(abs(n3.pc_covered-86.666666666), 7) == 0 

+

45 

+

46 def test_pc_covered_str(self): 

+

47 # Numbers._precision is a global, which is bad. 

+

48 Numbers.set_precision(0) 

+

49 n0 = Numbers(n_files=1, n_statements=1000, n_missing=0) 

+

50 n1 = Numbers(n_files=1, n_statements=1000, n_missing=1) 

+

51 n999 = Numbers(n_files=1, n_statements=1000, n_missing=999) 

+

52 n1000 = Numbers(n_files=1, n_statements=1000, n_missing=1000) 

+

53 assert n0.pc_covered_str == "100" 

+

54 assert n1.pc_covered_str == "99" 

+

55 assert n999.pc_covered_str == "1" 

+

56 assert n1000.pc_covered_str == "0" 

+

57 

+

58 def test_pc_covered_str_precision(self): 

+

59 # Numbers._precision is a global, which is bad. 

+

60 Numbers.set_precision(1) 

+

61 n0 = Numbers(n_files=1, n_statements=10000, n_missing=0) 

+

62 n1 = Numbers(n_files=1, n_statements=10000, n_missing=1) 

+

63 n9999 = Numbers(n_files=1, n_statements=10000, n_missing=9999) 

+

64 n10000 = Numbers(n_files=1, n_statements=10000, n_missing=10000) 

+

65 assert n0.pc_covered_str == "100.0" 

+

66 assert n1.pc_covered_str == "99.9" 

+

67 assert n9999.pc_covered_str == "0.1" 

+

68 assert n10000.pc_covered_str == "0.0" 

+

69 Numbers.set_precision(0) 

+

70 

+

71 def test_covered_ratio(self): 

+

72 n = Numbers(n_files=1, n_statements=200, n_missing=47) 

+

73 assert n.ratio_covered == (153, 200) 

+

74 

+

75 n = Numbers( 

+

76 n_files=1, n_statements=200, n_missing=47, 

+

77 n_branches=10, n_missing_branches=3, n_partial_branches=1000, 

+

78 ) 

+

79 assert n.ratio_covered == (160, 210) 

+

80 

+

81 

+

82@pytest.mark.parametrize("total, fail_under, precision, result", [ 

+

83 # fail_under==0 means anything is fine! 

+

84 (0, 0, 0, False), 

+

85 (0.001, 0, 0, False), 

+

86 # very small fail_under is possible to fail. 

+

87 (0.001, 0.01, 0, True), 

+

88 # Rounding should work properly. 

+

89 (42.1, 42, 0, False), 

+

90 (42.1, 43, 0, True), 

+

91 (42.857, 42, 0, False), 

+

92 (42.857, 43, 0, False), 

+

93 (42.857, 44, 0, True), 

+

94 (42.857, 42.856, 3, False), 

+

95 (42.857, 42.858, 3, True), 

+

96 # If you don't specify precision, your fail-under is rounded. 

+

97 (42.857, 42.856, 0, False), 

+

98 # Values near 100 should only be treated as 100 if they are 100. 

+

99 (99.8, 100, 0, True), 

+

100 (100.0, 100, 0, False), 

+

101 (99.8, 99.7, 1, False), 

+

102 (99.88, 99.90, 2, True), 

+

103 (99.999, 100, 1, True), 

+

104 (99.999, 100, 2, True), 

+

105 (99.999, 100, 3, True), 

+

106]) 

+

107def test_should_fail_under(total, fail_under, precision, result): 

+

108 assert should_fail_under(float(total), float(fail_under), precision) == result 

+

109 

+

110 

+

111def test_should_fail_under_invalid_value(): 

+

112 with pytest.raises(CoverageException, match=r"fail_under=101"): 

+

113 should_fail_under(100.0, 101, 0) 

+

114 

+

115 

+

116@pytest.mark.parametrize("statements, lines, result", [ 

+

117 ({1,2,3,4,5,10,11,12,13,14}, {1,2,5,10,11,13,14}, "1-2, 5-11, 13-14"), 

+

118 ([1,2,3,4,5,10,11,12,13,14,98,99], [1,2,5,10,11,13,14,99], "1-2, 5-11, 13-14, 99"), 

+

119 ([1,2,3,4,98,99,100,101,102,103,104], [1,2,99,102,103,104], "1-2, 99, 102-104"), 

+

120 ([17], [17], "17"), 

+

121 ([90,91,92,93,94,95], [90,91,92,93,94,95], "90-95"), 

+

122 ([1, 2, 3, 4, 5], [], ""), 

+

123 ([1, 2, 3, 4, 5], [4], "4"), 

+

124]) 

+

125def test_format_lines(statements, lines, result): 

+

126 assert format_lines(statements, lines) == result 

+

127 

+

128 

+

129@pytest.mark.parametrize("statements, lines, arcs, result", [ 

+

130 ( 

+

131 {1,2,3,4,5,10,11,12,13,14}, 

+

132 {1,2,5,10,11,13,14}, 

+

133 (), 

+

134 "1-2, 5-11, 13-14" 

+

135 ), 

+

136 ( 

+

137 [1,2,3,4,5,10,11,12,13,14,98,99], 

+

138 [1,2,5,10,11,13,14,99], 

+

139 [(3, [4]), (5, [10, 11]), (98, [100, -1])], 

+

140 "1-2, 3->4, 5-11, 13-14, 98->100, 98->exit, 99" 

+

141 ), 

+

142 ( 

+

143 [1,2,3,4,98,99,100,101,102,103,104], 

+

144 [1,2,99,102,103,104], 

+

145 [(3, [4]), (104, [-1])], 

+

146 "1-2, 3->4, 99, 102-104" 

+

147 ), 

+

148]) 

+

149def test_format_lines_with_arcs(statements, lines, arcs, result): 

+

150 assert format_lines(statements, lines, arcs) == result 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_setup_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_setup_py.html new file mode 100644 index 000000000..d0f8965fc --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_setup_py.html @@ -0,0 +1,115 @@ + + + + + + Coverage for tests/test_setup.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Tests of miscellaneous stuff.""" 

+

5 

+

6import sys 

+

7 

+

8import coverage 

+

9 

+

10from tests.coveragetest import CoverageTest 

+

11 

+

12 

+

13class SetupPyTest(CoverageTest): 

+

14 """Tests of setup.py""" 

+

15 

+

16 run_in_temp_dir = False 

+

17 

+

18 def setup_test(self): 

+

19 super(SetupPyTest, self).setup_test() 

+

20 # Force the most restrictive interpretation. 

+

21 self.set_environ('LC_ALL', 'C') 

+

22 

+

23 def test_metadata(self): 

+

24 status, output = self.run_command_status( 

+

25 "python setup.py --description --version --url --author" 

+

26 ) 

+

27 assert status == 0 

+

28 out = output.splitlines() 

+

29 assert "measurement" in out[0] 

+

30 assert coverage.__version__ == out[1] 

+

31 assert "github.com/nedbat/coveragepy" in out[2] 

+

32 assert "Ned Batchelder" in out[3] 

+

33 

+

34 def test_more_metadata(self): 

+

35 # Let's be sure we pick up our own setup.py 

+

36 # CoverageTest restores the original sys.path for us. 

+

37 sys.path.insert(0, '') 

+

38 from setup import setup_args 

+

39 

+

40 classifiers = setup_args['classifiers'] 

+

41 assert len(classifiers) > 7 

+

42 assert classifiers[-1].startswith("Development Status ::") 

+

43 assert "Programming Language :: Python :: %d" % sys.version_info[:1] in classifiers 

+

44 assert "Programming Language :: Python :: %d.%d" % sys.version_info[:2] in classifiers 

+

45 

+

46 long_description = setup_args['long_description'].splitlines() 

+

47 assert len(long_description) > 7 

+

48 assert long_description[0].strip() != "" 

+

49 assert long_description[-1].strip() != "" 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_summary_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_summary_py.html new file mode 100644 index 000000000..1e6c80cb4 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_summary_py.html @@ -0,0 +1,1007 @@ + + + + + + Coverage for tests/test_summary.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# coding: utf-8 

+

2# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

3# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

4 

+

5"""Test text-based summary reporting for coverage.py""" 

+

6 

+

7import glob 

+

8import os 

+

9import os.path 

+

10import py_compile 

+

11import re 

+

12 

+

13import pytest 

+

14 

+

15import coverage 

+

16from coverage import env 

+

17from coverage.backward import StringIO 

+

18from coverage.control import Coverage 

+

19from coverage.data import CoverageData 

+

20from coverage.misc import CoverageException, output_encoding 

+

21from coverage.summary import SummaryReporter 

+

22 

+

23from tests.coveragetest import CoverageTest, TESTS_DIR, UsingModulesMixin 

+

24 

+

25 

+

26class SummaryTest(UsingModulesMixin, CoverageTest): 

+

27 """Tests of the text summary reporting for coverage.py.""" 

+

28 

+

29 def make_mycode(self): 

+

30 """Make the mycode.py file when needed.""" 

+

31 self.make_file("mycode.py", """\ 

+

32 import covmod1 

+

33 import covmodzip1 

+

34 a = 1 

+

35 print('done') 

+

36 """) 

+

37 self.omit_site_packages() 

+

38 

+

39 def omit_site_packages(self): 

+

40 """Write a .coveragerc file that will omit site-packages from reports.""" 

+

41 self.make_file(".coveragerc", """\ 

+

42 [report] 

+

43 omit = */site-packages/* 

+

44 """) 

+

45 

+

46 def test_report(self): 

+

47 self.make_mycode() 

+

48 out = self.run_command("coverage run mycode.py") 

+

49 assert out == 'done\n' 

+

50 report = self.report_from_command("coverage report") 

+

51 

+

52 # Name Stmts Miss Cover 

+

53 # ------------------------------------------------------------------ 

+

54 # c:/ned/coverage/tests/modules/covmod1.py 2 0 100% 

+

55 # c:/ned/coverage/tests/zipmods.zip/covmodzip1.py 2 0 100% 

+

56 # mycode.py 4 0 100% 

+

57 # ------------------------------------------------------------------ 

+

58 # TOTAL 8 0 100% 

+

59 

+

60 assert "/coverage/__init__/" not in report 

+

61 assert "/tests/modules/covmod1.py " in report 

+

62 assert "/tests/zipmods.zip/covmodzip1.py " in report 

+

63 assert "mycode.py " in report 

+

64 assert self.last_line_squeezed(report) == "TOTAL 8 0 100%" 

+

65 

+

66 def test_report_just_one(self): 

+

67 # Try reporting just one module 

+

68 self.make_mycode() 

+

69 self.run_command("coverage run mycode.py") 

+

70 report = self.report_from_command("coverage report mycode.py") 

+

71 

+

72 # Name Stmts Miss Cover 

+

73 # ------------------------------- 

+

74 # mycode.py 4 0 100% 

+

75 # ------------------------------- 

+

76 # TOTAL 4 0 100% 

+

77 

+

78 assert self.line_count(report) == 5 

+

79 assert "/coverage/" not in report 

+

80 assert "/tests/modules/covmod1.py " not in report 

+

81 assert "/tests/zipmods.zip/covmodzip1.py " not in report 

+

82 assert "mycode.py " in report 

+

83 assert self.last_line_squeezed(report) == "TOTAL 4 0 100%" 

+

84 

+

85 def test_report_wildcard(self): 

+

86 # Try reporting using wildcards to get the modules. 

+

87 self.make_mycode() 

+

88 self.run_command("coverage run mycode.py") 

+

89 report = self.report_from_command("coverage report my*.py") 

+

90 

+

91 # Name Stmts Miss Cover 

+

92 # ------------------------------- 

+

93 # mycode.py 4 0 100% 

+

94 # ------------------------------- 

+

95 # TOTAL 4 0 100% 

+

96 

+

97 assert self.line_count(report) == 5 

+

98 assert "/coverage/" not in report 

+

99 assert "/tests/modules/covmod1.py " not in report 

+

100 assert "/tests/zipmods.zip/covmodzip1.py " not in report 

+

101 assert "mycode.py " in report 

+

102 assert self.last_line_squeezed(report) == "TOTAL 4 0 100%" 

+

103 

+

104 def test_report_omitting(self): 

+

105 # Try reporting while omitting some modules 

+

106 self.make_mycode() 

+

107 self.run_command("coverage run mycode.py") 

+

108 omit = '{}/*,*/site-packages/*'.format(TESTS_DIR) 

+

109 report = self.report_from_command("coverage report --omit '{}'".format(omit)) 

+

110 

+

111 # Name Stmts Miss Cover 

+

112 # ------------------------------- 

+

113 # mycode.py 4 0 100% 

+

114 # ------------------------------- 

+

115 # TOTAL 4 0 100% 

+

116 

+

117 assert self.line_count(report) == 5 

+

118 assert "/coverage/" not in report 

+

119 assert "/tests/modules/covmod1.py " not in report 

+

120 assert "/tests/zipmods.zip/covmodzip1.py " not in report 

+

121 assert "mycode.py " in report 

+

122 assert self.last_line_squeezed(report) == "TOTAL 4 0 100%" 

+

123 

+

124 def test_report_including(self): 

+

125 # Try reporting while including some modules 

+

126 self.make_mycode() 

+

127 self.run_command("coverage run mycode.py") 

+

128 report = self.report_from_command("coverage report --include=mycode*") 

+

129 

+

130 # Name Stmts Miss Cover 

+

131 # ------------------------------- 

+

132 # mycode.py 4 0 100% 

+

133 # ------------------------------- 

+

134 # TOTAL 4 0 100% 

+

135 

+

136 assert self.line_count(report) == 5 

+

137 assert "/coverage/" not in report 

+

138 assert "/tests/modules/covmod1.py " not in report 

+

139 assert "/tests/zipmods.zip/covmodzip1.py " not in report 

+

140 assert "mycode.py " in report 

+

141 assert self.last_line_squeezed(report) == "TOTAL 4 0 100%" 

+

142 

+

143 def test_run_source_vs_report_include(self): 

+

144 # https://github.com/nedbat/coveragepy/issues/621 

+

145 self.make_file(".coveragerc", """\ 

+

146 [run] 

+

147 source = . 

+

148 

+

149 [report] 

+

150 include = mod/*,tests/* 

+

151 """) 

+

152 # It should be OK to use that configuration. 

+

153 cov = coverage.Coverage() 

+

154 with self.assert_warnings(cov, []): 

+

155 cov.start() 

+

156 cov.stop() # pragma: nested 

+

157 

+

158 def test_run_omit_vs_report_omit(self): 

+

159 # https://github.com/nedbat/coveragepy/issues/622 

+

160 # report:omit shouldn't clobber run:omit. 

+

161 self.make_mycode() 

+

162 self.make_file(".coveragerc", """\ 

+

163 [run] 

+

164 omit = */covmodzip1.py 

+

165 

+

166 [report] 

+

167 omit = */covmod1.py 

+

168 """) 

+

169 self.run_command("coverage run mycode.py") 

+

170 

+

171 # Read the data written, to see that the right files have been omitted from running. 

+

172 covdata = CoverageData() 

+

173 covdata.read() 

+

174 files = [os.path.basename(p) for p in covdata.measured_files()] 

+

175 assert "covmod1.py" in files 

+

176 assert "covmodzip1.py" not in files 

+

177 

+

178 def test_report_branches(self): 

+

179 self.make_file("mybranch.py", """\ 

+

180 def branch(x): 

+

181 if x: 

+

182 print("x") 

+

183 return x 

+

184 branch(1) 

+

185 """) 

+

186 out = self.run_command("coverage run --source=. --branch mybranch.py") 

+

187 assert out == 'x\n' 

+

188 report = self.report_from_command("coverage report") 

+

189 

+

190 # Name Stmts Miss Branch BrPart Cover 

+

191 # ----------------------------------------------- 

+

192 # mybranch.py 5 0 2 1 86% 

+

193 # ----------------------------------------------- 

+

194 # TOTAL 5 0 2 1 86% 

+

195 

+

196 assert self.line_count(report) == 5 

+

197 assert "mybranch.py " in report 

+

198 assert self.last_line_squeezed(report) == "TOTAL 5 0 2 1 86%" 

+

199 

+

200 def test_report_show_missing(self): 

+

201 self.make_file("mymissing.py", """\ 

+

202 def missing(x, y): 

+

203 if x: 

+

204 print("x") 

+

205 return x 

+

206 if y: 

+

207 print("y") 

+

208 try: 

+

209 print("z") 

+

210 1/0 

+

211 print("Never!") 

+

212 except ZeroDivisionError: 

+

213 pass 

+

214 return x 

+

215 missing(0, 1) 

+

216 """) 

+

217 out = self.run_command("coverage run --source=. mymissing.py") 

+

218 assert out == 'y\nz\n' 

+

219 report = self.report_from_command("coverage report --show-missing") 

+

220 

+

221 # Name Stmts Miss Cover Missing 

+

222 # -------------------------------------------- 

+

223 # mymissing.py 14 3 79% 3-4, 10 

+

224 # -------------------------------------------- 

+

225 # TOTAL 14 3 79% 3-4, 10 

+

226 

+

227 assert self.line_count(report) == 5 

+

228 squeezed = self.squeezed_lines(report) 

+

229 assert squeezed[2] == "mymissing.py 14 3 79% 3-4, 10" 

+

230 assert squeezed[4] == "TOTAL 14 3 79%" 

+

231 

+

232 def test_report_show_missing_branches(self): 

+

233 self.make_file("mybranch.py", """\ 

+

234 def branch(x, y): 

+

235 if x: 

+

236 print("x") 

+

237 if y: 

+

238 print("y") 

+

239 branch(1, 1) 

+

240 """) 

+

241 self.omit_site_packages() 

+

242 out = self.run_command("coverage run --branch mybranch.py") 

+

243 assert out == 'x\ny\n' 

+

244 report = self.report_from_command("coverage report --show-missing") 

+

245 

+

246 # Name Stmts Miss Branch BrPart Cover Missing 

+

247 # ---------------------------------------------------------- 

+

248 # mybranch.py 6 0 4 2 80% 2->4, 4->exit 

+

249 # ---------------------------------------------------------- 

+

250 # TOTAL 6 0 4 2 80% 

+

251 

+

252 assert self.line_count(report) == 5 

+

253 squeezed = self.squeezed_lines(report) 

+

254 assert squeezed[2] == "mybranch.py 6 0 4 2 80% 2->4, 4->exit" 

+

255 assert squeezed[4] == "TOTAL 6 0 4 2 80%" 

+

256 

+

257 def test_report_show_missing_branches_and_lines(self): 

+

258 self.make_file("main.py", """\ 

+

259 import mybranch 

+

260 """) 

+

261 self.make_file("mybranch.py", """\ 

+

262 def branch(x, y, z): 

+

263 if x: 

+

264 print("x") 

+

265 if y: 

+

266 print("y") 

+

267 if z: 

+

268 if x and y: 

+

269 print("z") 

+

270 return x 

+

271 branch(1, 1, 0) 

+

272 """) 

+

273 self.omit_site_packages() 

+

274 out = self.run_command("coverage run --branch main.py") 

+

275 assert out == 'x\ny\n' 

+

276 report = self.report_from_command("coverage report --show-missing") 

+

277 report_lines = report.splitlines() 

+

278 

+

279 expected = [ 

+

280 'Name Stmts Miss Branch BrPart Cover Missing', 

+

281 '---------------------------------------------------------', 

+

282 'main.py 1 0 0 0 100%', 

+

283 'mybranch.py 10 2 8 3 61% 2->4, 4->6, 7-8', 

+

284 '---------------------------------------------------------', 

+

285 'TOTAL 11 2 8 3 63%', 

+

286 ] 

+

287 assert expected == report_lines 

+

288 

+

289 def test_report_skip_covered_no_branches(self): 

+

290 self.make_file("main.py", """ 

+

291 import not_covered 

+

292 

+

293 def normal(): 

+

294 print("z") 

+

295 normal() 

+

296 """) 

+

297 self.make_file("not_covered.py", """ 

+

298 def not_covered(): 

+

299 print("n") 

+

300 """) 

+

301 self.omit_site_packages() 

+

302 out = self.run_command("coverage run main.py") 

+

303 assert out == "z\n" 

+

304 report = self.report_from_command("coverage report --skip-covered --fail-under=70") 

+

305 

+

306 # Name Stmts Miss Cover 

+

307 # ------------------------------------ 

+

308 # not_covered.py 2 1 50% 

+

309 # ------------------------------------ 

+

310 # TOTAL 6 1 83% 

+

311 # 

+

312 # 1 file skipped due to complete coverage. 

+

313 

+

314 assert self.line_count(report) == 7, report 

+

315 squeezed = self.squeezed_lines(report) 

+

316 assert squeezed[2] == "not_covered.py 2 1 50%" 

+

317 assert squeezed[4] == "TOTAL 6 1 83%" 

+

318 assert squeezed[6] == "1 file skipped due to complete coverage." 

+

319 assert self.last_command_status == 0 

+

320 

+

321 def test_report_skip_covered_branches(self): 

+

322 self.make_file("main.py", """ 

+

323 import not_covered, covered 

+

324 

+

325 def normal(z): 

+

326 if z: 

+

327 print("z") 

+

328 normal(True) 

+

329 normal(False) 

+

330 """) 

+

331 self.make_file("not_covered.py", """ 

+

332 def not_covered(n): 

+

333 if n: 

+

334 print("n") 

+

335 not_covered(True) 

+

336 """) 

+

337 self.make_file("covered.py", """ 

+

338 def foo(): 

+

339 pass 

+

340 foo() 

+

341 """) 

+

342 self.omit_site_packages() 

+

343 out = self.run_command("coverage run --branch main.py") 

+

344 assert out == "n\nz\n" 

+

345 report = self.report_from_command("coverage report --skip-covered") 

+

346 

+

347 # Name Stmts Miss Branch BrPart Cover 

+

348 # -------------------------------------------------- 

+

349 # not_covered.py 4 0 2 1 83% 

+

350 # -------------------------------------------------- 

+

351 # TOTAL 13 0 4 1 94% 

+

352 # 

+

353 # 2 files skipped due to complete coverage. 

+

354 

+

355 assert self.line_count(report) == 7, report 

+

356 squeezed = self.squeezed_lines(report) 

+

357 assert squeezed[2] == "not_covered.py 4 0 2 1 83%" 

+

358 assert squeezed[4] == "TOTAL 13 0 4 1 94%" 

+

359 assert squeezed[6] == "2 files skipped due to complete coverage." 

+

360 

+

361 def test_report_skip_covered_branches_with_totals(self): 

+

362 self.make_file("main.py", """ 

+

363 import not_covered 

+

364 import also_not_run 

+

365 

+

366 def normal(z): 

+

367 if z: 

+

368 print("z") 

+

369 normal(True) 

+

370 normal(False) 

+

371 """) 

+

372 self.make_file("not_covered.py", """ 

+

373 def not_covered(n): 

+

374 if n: 

+

375 print("n") 

+

376 not_covered(True) 

+

377 """) 

+

378 self.make_file("also_not_run.py", """ 

+

379 def does_not_appear_in_this_film(ni): 

+

380 print("Ni!") 

+

381 """) 

+

382 self.omit_site_packages() 

+

383 out = self.run_command("coverage run --branch main.py") 

+

384 assert out == "n\nz\n" 

+

385 report = self.report_from_command("coverage report --skip-covered") 

+

386 

+

387 # Name Stmts Miss Branch BrPart Cover 

+

388 # -------------------------------------------------- 

+

389 # also_not_run.py 2 1 0 0 50% 

+

390 # not_covered.py 4 0 2 1 83% 

+

391 # -------------------------------------------------- 

+

392 # TOTAL 13 1 4 1 88% 

+

393 # 

+

394 # 1 file skipped due to complete coverage. 

+

395 

+

396 assert self.line_count(report) == 8, report 

+

397 squeezed = self.squeezed_lines(report) 

+

398 assert squeezed[2] == "also_not_run.py 2 1 0 0 50%" 

+

399 assert squeezed[3] == "not_covered.py 4 0 2 1 83%" 

+

400 assert squeezed[5] == "TOTAL 13 1 4 1 88%" 

+

401 assert squeezed[7] == "1 file skipped due to complete coverage." 

+

402 

+

403 def test_report_skip_covered_all_files_covered(self): 

+

404 self.make_file("main.py", """ 

+

405 def foo(): 

+

406 pass 

+

407 foo() 

+

408 """) 

+

409 out = self.run_command("coverage run --source=. --branch main.py") 

+

410 assert out == "" 

+

411 report = self.report_from_command("coverage report --skip-covered") 

+

412 

+

413 # Name Stmts Miss Branch BrPart Cover 

+

414 # ------------------------------------------- 

+

415 # ----------------------------------------- 

+

416 # TOTAL 3 0 0 0 100% 

+

417 # 

+

418 # 1 file skipped due to complete coverage. 

+

419 

+

420 assert self.line_count(report) == 6, report 

+

421 squeezed = self.squeezed_lines(report) 

+

422 assert squeezed[5] == "1 file skipped due to complete coverage." 

+

423 

+

424 def test_report_skip_covered_longfilename(self): 

+

425 self.make_file("long_______________filename.py", """ 

+

426 def foo(): 

+

427 pass 

+

428 foo() 

+

429 """) 

+

430 out = self.run_command("coverage run --source=. --branch long_______________filename.py") 

+

431 assert out == "" 

+

432 report = self.report_from_command("coverage report --skip-covered") 

+

433 

+

434 # Name Stmts Miss Branch BrPart Cover 

+

435 # ----------------------------------------- 

+

436 # ----------------------------------------- 

+

437 # TOTAL 3 0 0 0 100% 

+

438 # 

+

439 # 1 file skipped due to complete coverage. 

+

440 

+

441 assert self.line_count(report) == 6, report 

+

442 lines = self.report_lines(report) 

+

443 assert lines[0] == "Name Stmts Miss Branch BrPart Cover" 

+

444 squeezed = self.squeezed_lines(report) 

+

445 assert squeezed[5] == "1 file skipped due to complete coverage." 

+

446 

+

447 def test_report_skip_covered_no_data(self): 

+

448 report = self.report_from_command("coverage report --skip-covered") 

+

449 

+

450 # No data to report. 

+

451 

+

452 assert self.line_count(report) == 1, report 

+

453 squeezed = self.squeezed_lines(report) 

+

454 assert squeezed[0] == "No data to report." 

+

455 

+

456 def test_report_skip_empty(self): 

+

457 self.make_file("main.py", """ 

+

458 import submodule 

+

459 

+

460 def normal(): 

+

461 print("z") 

+

462 normal() 

+

463 """) 

+

464 self.make_file("submodule/__init__.py", "") 

+

465 self.omit_site_packages() 

+

466 out = self.run_command("coverage run main.py") 

+

467 assert out == "z\n" 

+

468 report = self.report_from_command("coverage report --skip-empty") 

+

469 

+

470 # Name Stmts Miss Cover 

+

471 # ------------------------------------ 

+

472 # main.py 4 0 100% 

+

473 # ------------------------------------ 

+

474 # TOTAL 4 0 100% 

+

475 # 

+

476 # 1 empty file skipped. 

+

477 

+

478 assert self.line_count(report) == 7, report 

+

479 squeezed = self.squeezed_lines(report) 

+

480 assert squeezed[2] == "main.py 4 0 100%" 

+

481 assert squeezed[4] == "TOTAL 4 0 100%" 

+

482 assert squeezed[6] == "1 empty file skipped." 

+

483 assert self.last_command_status == 0 

+

484 

+

485 def test_report_skip_empty_no_data(self): 

+

486 self.make_file("__init__.py", "") 

+

487 self.omit_site_packages() 

+

488 out = self.run_command("coverage run __init__.py") 

+

489 assert out == "" 

+

490 report = self.report_from_command("coverage report --skip-empty") 

+

491 

+

492 # Name Stmts Miss Cover 

+

493 # ------------------------------------ 

+

494 # 

+

495 # 1 empty file skipped. 

+

496 

+

497 assert self.line_count(report) == 6, report 

+

498 squeezed = self.squeezed_lines(report) 

+

499 assert squeezed[3] == "TOTAL 0 0 100%" 

+

500 assert squeezed[5] == "1 empty file skipped." 

+

501 

+

502 def test_report_precision(self): 

+

503 self.make_file(".coveragerc", """\ 

+

504 [report] 

+

505 precision = 3 

+

506 omit = */site-packages/* 

+

507 """) 

+

508 self.make_file("main.py", """ 

+

509 import not_covered, covered 

+

510 

+

511 def normal(z): 

+

512 if z: 

+

513 print("z") 

+

514 normal(True) 

+

515 normal(False) 

+

516 """) 

+

517 self.make_file("not_covered.py", """ 

+

518 def not_covered(n): 

+

519 if n: 

+

520 print("n") 

+

521 not_covered(True) 

+

522 """) 

+

523 self.make_file("covered.py", """ 

+

524 def foo(): 

+

525 pass 

+

526 foo() 

+

527 """) 

+

528 out = self.run_command("coverage run --branch main.py") 

+

529 assert out == "n\nz\n" 

+

530 report = self.report_from_command("coverage report") 

+

531 

+

532 # Name Stmts Miss Branch BrPart Cover 

+

533 # ------------------------------------------------------ 

+

534 # covered.py 3 0 0 0 100.000% 

+

535 # main.py 6 0 2 0 100.000% 

+

536 # not_covered.py 4 0 2 1 83.333% 

+

537 # ------------------------------------------------------ 

+

538 # TOTAL 13 0 4 1 94.118% 

+

539 

+

540 assert self.line_count(report) == 7, report 

+

541 squeezed = self.squeezed_lines(report) 

+

542 assert squeezed[2] == "covered.py 3 0 0 0 100.000%" 

+

543 assert squeezed[4] == "not_covered.py 4 0 2 1 83.333%" 

+

544 assert squeezed[6] == "TOTAL 13 0 4 1 94.118%" 

+

545 

+

546 def test_dotpy_not_python(self): 

+

547 # We run a .py file, and when reporting, we can't parse it as Python. 

+

548 # We should get an error message in the report. 

+

549 

+

550 self.make_mycode() 

+

551 self.run_command("coverage run mycode.py") 

+

552 self.make_file("mycode.py", "This isn't python at all!") 

+

553 report = self.report_from_command("coverage report mycode.py") 

+

554 

+

555 # Couldn't parse '...' as Python source: 'invalid syntax' at line 1 

+

556 # Name Stmts Miss Cover 

+

557 # ---------------------------- 

+

558 # No data to report. 

+

559 

+

560 errmsg = self.squeezed_lines(report)[0] 

+

561 # The actual file name varies run to run. 

+

562 errmsg = re.sub(r"parse '.*mycode.py", "parse 'mycode.py", errmsg) 

+

563 # The actual error message varies version to version 

+

564 errmsg = re.sub(r": '.*' at", ": 'error' at", errmsg) 

+

565 assert errmsg == "Couldn't parse 'mycode.py' as Python source: 'error' at line 1" 

+

566 

+

567 @pytest.mark.skipif(env.JYTHON, reason="Jython doesn't like accented file names") 

+

568 def test_accenteddotpy_not_python(self): 

+

569 # We run a .py file with a non-ascii name, and when reporting, we can't 

+

570 # parse it as Python. We should get an error message in the report. 

+

571 

+

572 self.make_file(u"accented\xe2.py", "print('accented')") 

+

573 self.run_command(u"coverage run accented\xe2.py") 

+

574 self.make_file(u"accented\xe2.py", "This isn't python at all!") 

+

575 report = self.report_from_command(u"coverage report accented\xe2.py") 

+

576 

+

577 # Couldn't parse '...' as Python source: 'invalid syntax' at line 1 

+

578 # Name Stmts Miss Cover 

+

579 # ---------------------------- 

+

580 # No data to report. 

+

581 

+

582 errmsg = self.squeezed_lines(report)[0] 

+

583 # The actual file name varies run to run. 

+

584 errmsg = re.sub(r"parse '.*(accented.*?\.py)", r"parse '\1", errmsg) 

+

585 # The actual error message varies version to version 

+

586 errmsg = re.sub(r": '.*' at", ": 'error' at", errmsg) 

+

587 expected = u"Couldn't parse 'accented\xe2.py' as Python source: 'error' at line 1" 

+

588 if env.PY2: 

+

589 expected = expected.encode(output_encoding()) 

+

590 assert expected == errmsg 

+

591 

+

592 def test_dotpy_not_python_ignored(self): 

+

593 # We run a .py file, and when reporting, we can't parse it as Python, 

+

594 # but we've said to ignore errors, so there's no error reported, 

+

595 # though we still get a warning. 

+

596 self.make_mycode() 

+

597 self.run_command("coverage run mycode.py") 

+

598 self.make_file("mycode.py", "This isn't python at all!") 

+

599 report = self.report_from_command("coverage report -i mycode.py") 

+

600 

+

601 # Coverage.py warning: Couldn't parse Python file blah_blah/mycode.py (couldnt-parse) 

+

602 # Name Stmts Miss Cover 

+

603 # ---------------------------- 

+

604 # No data to report. 

+

605 

+

606 assert self.line_count(report) == 4 

+

607 assert 'No data to report.' in report 

+

608 assert '(couldnt-parse)' in report 

+

609 

+

610 def test_dothtml_not_python(self): 

+

611 # We run a .html file, and when reporting, we can't parse it as 

+

612 # Python. Since it wasn't .py, no error is reported. 

+

613 

+

614 # Run an "html" file 

+

615 self.make_file("mycode.html", "a = 1") 

+

616 self.run_command("coverage run mycode.html") 

+

617 # Before reporting, change it to be an HTML file. 

+

618 self.make_file("mycode.html", "<h1>This isn't python at all!</h1>") 

+

619 report = self.report_from_command("coverage report mycode.html") 

+

620 

+

621 # Name Stmts Miss Cover 

+

622 # ---------------------------- 

+

623 # No data to report. 

+

624 

+

625 assert self.line_count(report) == 3 

+

626 assert 'No data to report.' in report 

+

627 

+

628 def test_report_no_extension(self): 

+

629 self.make_file("xxx", """\ 

+

630 # This is a python file though it doesn't look like it, like a main script. 

+

631 a = b = c = d = 0 

+

632 a = 3 

+

633 b = 4 

+

634 if not b: 

+

635 c = 6 

+

636 d = 7 

+

637 print("xxx: %r %r %r %r" % (a, b, c, d)) 

+

638 """) 

+

639 out = self.run_command("coverage run --source=. xxx") 

+

640 assert out == "xxx: 3 4 0 7\n" 

+

641 report = self.report_from_command("coverage report") 

+

642 assert self.last_line_squeezed(report) == "TOTAL 7 1 86%" 

+

643 

+

644 def test_report_with_chdir(self): 

+

645 self.make_file("chdir.py", """\ 

+

646 import os 

+

647 print("Line One") 

+

648 os.chdir("subdir") 

+

649 print("Line Two") 

+

650 print(open("something").read()) 

+

651 """) 

+

652 self.make_file("subdir/something", "hello") 

+

653 out = self.run_command("coverage run --source=. chdir.py") 

+

654 assert out == "Line One\nLine Two\nhello\n" 

+

655 report = self.report_from_command("coverage report") 

+

656 assert self.last_line_squeezed(report) == "TOTAL 5 0 100%" 

+

657 

+

658 def get_report(self, cov): 

+

659 """Get the report from `cov`, and canonicalize it.""" 

+

660 repout = StringIO() 

+

661 cov.report(file=repout, show_missing=False) 

+

662 report = repout.getvalue().replace('\\', '/') 

+

663 report = re.sub(r" +", " ", report) 

+

664 return report 

+

665 

+

666 def test_bug_156_file_not_run_should_be_zero(self): 

+

667 # https://github.com/nedbat/coveragepy/issues/156 

+

668 self.make_file("mybranch.py", """\ 

+

669 def branch(x): 

+

670 if x: 

+

671 print("x") 

+

672 return x 

+

673 branch(1) 

+

674 """) 

+

675 self.make_file("main.py", """\ 

+

676 print("y") 

+

677 """) 

+

678 cov = coverage.Coverage(branch=True, source=["."]) 

+

679 cov.start() 

+

680 import main # pragma: nested # pylint: disable=unused-import, import-error 

+

681 cov.stop() # pragma: nested 

+

682 report = self.get_report(cov).splitlines() 

+

683 assert "mybranch.py 5 5 2 0 0%" in report 

+

684 

+

685 def run_TheCode_and_report_it(self): 

+

686 """A helper for the next few tests.""" 

+

687 cov = coverage.Coverage() 

+

688 cov.start() 

+

689 import TheCode # pragma: nested # pylint: disable=import-error, unused-import 

+

690 cov.stop() # pragma: nested 

+

691 return self.get_report(cov) 

+

692 

+

693 def test_bug_203_mixed_case_listed_twice_with_rc(self): 

+

694 self.make_file("TheCode.py", "a = 1\n") 

+

695 self.make_file(".coveragerc", "[run]\nsource = .\n") 

+

696 

+

697 report = self.run_TheCode_and_report_it() 

+

698 

+

699 assert "TheCode" in report 

+

700 assert "thecode" not in report 

+

701 

+

702 def test_bug_203_mixed_case_listed_twice(self): 

+

703 self.make_file("TheCode.py", "a = 1\n") 

+

704 

+

705 report = self.run_TheCode_and_report_it() 

+

706 

+

707 assert "TheCode" in report 

+

708 assert "thecode" not in report 

+

709 

+

710 @pytest.mark.skipif(not env.WINDOWS, reason=".pyw files are only on Windows.") 

+

711 def test_pyw_files(self): 

+

712 # https://github.com/nedbat/coveragepy/issues/261 

+

713 self.make_file("start.pyw", """\ 

+

714 import mod 

+

715 print("In start.pyw") 

+

716 """) 

+

717 self.make_file("mod.pyw", """\ 

+

718 print("In mod.pyw") 

+

719 """) 

+

720 cov = coverage.Coverage() 

+

721 cov.start() 

+

722 import start # pragma: nested # pylint: disable=import-error, unused-import 

+

723 cov.stop() # pragma: nested 

+

724 

+

725 report = self.get_report(cov) 

+

726 assert "NoSource" not in report 

+

727 report = report.splitlines() 

+

728 assert "start.pyw 2 0 100%" in report 

+

729 assert "mod.pyw 1 0 100%" in report 

+

730 

+

731 def test_tracing_pyc_file(self): 

+

732 # Create two Python files. 

+

733 self.make_file("mod.py", "a = 1\n") 

+

734 self.make_file("main.py", "import mod\n") 

+

735 

+

736 # Make one into a .pyc. 

+

737 py_compile.compile("mod.py") 

+

738 

+

739 # Run the program. 

+

740 cov = coverage.Coverage() 

+

741 cov.start() 

+

742 import main # pragma: nested # pylint: disable=unused-import, import-error 

+

743 cov.stop() # pragma: nested 

+

744 

+

745 report = self.get_report(cov).splitlines() 

+

746 assert "mod.py 1 0 100%" in report 

+

747 

+

748 @pytest.mark.skipif(env.PYPY2, reason="PyPy2 doesn't run bare .pyc files") 

+

749 def test_missing_py_file_during_run(self): 

+

750 # Create two Python files. 

+

751 self.make_file("mod.py", "a = 1\n") 

+

752 self.make_file("main.py", "import mod\n") 

+

753 

+

754 # Make one into a .pyc, and remove the .py. 

+

755 py_compile.compile("mod.py") 

+

756 os.remove("mod.py") 

+

757 

+

758 # Python 3 puts the .pyc files in a __pycache__ directory, and will 

+

759 # not import from there without source. It will import a .pyc from 

+

760 # the source location though. 

+

761 if env.PY3 and not env.JYTHON: 

+

762 pycs = glob.glob("__pycache__/mod.*.pyc") 

+

763 assert len(pycs) == 1 

+

764 os.rename(pycs[0], "mod.pyc") 

+

765 

+

766 # Run the program. 

+

767 cov = coverage.Coverage() 

+

768 cov.start() 

+

769 import main # pragma: nested # pylint: disable=unused-import, import-error 

+

770 cov.stop() # pragma: nested 

+

771 

+

772 # Put back the missing Python file. 

+

773 self.make_file("mod.py", "a = 1\n") 

+

774 report = self.get_report(cov).splitlines() 

+

775 assert "mod.py 1 0 100%" in report 

+

776 

+

777 def test_empty_files(self): 

+

778 # Shows that empty files like __init__.py are listed as having zero 

+

779 # statements, not one statement. 

+

780 cov = coverage.Coverage(branch=True) 

+

781 cov.start() 

+

782 import usepkgs # pragma: nested # pylint: disable=import-error, unused-import 

+

783 cov.stop() # pragma: nested 

+

784 

+

785 repout = StringIO() 

+

786 cov.report(file=repout, show_missing=False) 

+

787 

+

788 report = repout.getvalue().replace('\\', '/') 

+

789 report = re.sub(r"\s+", " ", report) 

+

790 assert "tests/modules/pkg1/__init__.py 1 0 0 0 100%" in report 

+

791 assert "tests/modules/pkg2/__init__.py 0 0 0 0 100%" in report 

+

792 

+

793 

+

794class ReportingReturnValueTest(CoverageTest): 

+

795 """Tests of reporting functions returning values.""" 

+

796 

+

797 def run_coverage(self): 

+

798 """Run coverage on doit.py and return the coverage object.""" 

+

799 self.make_file("doit.py", """\ 

+

800 a = 1 

+

801 b = 2 

+

802 c = 3 

+

803 d = 4 

+

804 if a > 10: 

+

805 f = 6 

+

806 g = 7 

+

807 """) 

+

808 

+

809 cov = coverage.Coverage() 

+

810 self.start_import_stop(cov, "doit") 

+

811 return cov 

+

812 

+

813 def test_report(self): 

+

814 cov = self.run_coverage() 

+

815 val = cov.report(include="*/doit.py") 

+

816 assert round(abs(val-85.7), 1) == 0 

+

817 

+

818 def test_html(self): 

+

819 cov = self.run_coverage() 

+

820 val = cov.html_report(include="*/doit.py") 

+

821 assert round(abs(val-85.7), 1) == 0 

+

822 

+

823 def test_xml(self): 

+

824 cov = self.run_coverage() 

+

825 val = cov.xml_report(include="*/doit.py") 

+

826 assert round(abs(val-85.7), 1) == 0 

+

827 

+

828 

+

829class SummaryReporterConfigurationTest(CoverageTest): 

+

830 """Tests of SummaryReporter.""" 

+

831 

+

832 def make_rigged_file(self, filename, stmts, miss): 

+

833 """Create a file that will have specific results. 

+

834 

+

835 `stmts` and `miss` are ints, the number of statements, and 

+

836 missed statements that should result. 

+

837 """ 

+

838 run = stmts - miss - 1 

+

839 dont_run = miss 

+

840 source = "" 

+

841 source += "a = 1\n" * run 

+

842 source += "if a == 99:\n" 

+

843 source += " a = 2\n" * dont_run 

+

844 self.make_file(filename, source) 

+

845 

+

846 def get_summary_text(self, *options): 

+

847 """Get text output from the SummaryReporter. 

+

848 

+

849 The arguments are tuples: (name, value) for Coverage.set_option. 

+

850 """ 

+

851 self.make_rigged_file("file1.py", 339, 155) 

+

852 self.make_rigged_file("file2.py", 13, 3) 

+

853 self.make_rigged_file("file3.py", 234, 228) 

+

854 self.make_file("doit.py", "import file1, file2, file3") 

+

855 

+

856 cov = Coverage(source=["."], omit=["doit.py"]) 

+

857 cov.start() 

+

858 import doit # pragma: nested # pylint: disable=import-error, unused-import 

+

859 cov.stop() # pragma: nested 

+

860 for name, value in options: 

+

861 cov.set_option(name, value) 

+

862 printer = SummaryReporter(cov) 

+

863 destination = StringIO() 

+

864 printer.report([], destination) 

+

865 return destination.getvalue() 

+

866 

+

867 def test_test_data(self): 

+

868 # We use our own test files as test data. Check that our assumptions 

+

869 # about them are still valid. We want the three columns of numbers to 

+

870 # sort in three different orders. 

+

871 report = self.get_summary_text() 

+

872 print(report) 

+

873 # Name Stmts Miss Cover 

+

874 # ------------------------------ 

+

875 # file1.py 339 155 54% 

+

876 # file2.py 13 3 77% 

+

877 # file3.py 234 228 3% 

+

878 # ------------------------------ 

+

879 # TOTAL 586 386 34% 

+

880 

+

881 lines = report.splitlines()[2:-2] 

+

882 assert len(lines) == 3 

+

883 nums = [list(map(int, l.replace('%', '').split()[1:])) for l in lines] 

+

884 # [ 

+

885 # [339, 155, 54], 

+

886 # [ 13, 3, 77], 

+

887 # [234, 228, 3] 

+

888 # ] 

+

889 assert nums[1][0] < nums[2][0] < nums[0][0] 

+

890 assert nums[1][1] < nums[0][1] < nums[2][1] 

+

891 assert nums[2][2] < nums[0][2] < nums[1][2] 

+

892 

+

893 def test_defaults(self): 

+

894 """Run the report with no configuration options.""" 

+

895 report = self.get_summary_text() 

+

896 assert 'Missing' not in report 

+

897 assert 'Branch' not in report 

+

898 

+

899 def test_print_missing(self): 

+

900 """Run the report printing the missing lines.""" 

+

901 report = self.get_summary_text(('report:show_missing', True)) 

+

902 assert 'Missing' in report 

+

903 assert 'Branch' not in report 

+

904 

+

905 def assert_ordering(self, text, *words): 

+

906 """Assert that the `words` appear in order in `text`.""" 

+

907 indexes = list(map(text.find, words)) 

+

908 assert -1 not in indexes 

+

909 msg = "The words %r don't appear in order in %r" % (words, text) 

+

910 assert indexes == sorted(indexes), msg 

+

911 

+

912 def test_sort_report_by_stmts(self): 

+

913 # Sort the text report by the Stmts column. 

+

914 report = self.get_summary_text(('report:sort', 'Stmts')) 

+

915 self.assert_ordering(report, "file2.py", "file3.py", "file1.py") 

+

916 

+

917 def test_sort_report_by_missing(self): 

+

918 # Sort the text report by the Missing column. 

+

919 report = self.get_summary_text(('report:sort', 'Miss')) 

+

920 self.assert_ordering(report, "file2.py", "file1.py", "file3.py") 

+

921 

+

922 def test_sort_report_by_cover(self): 

+

923 # Sort the text report by the Cover column. 

+

924 report = self.get_summary_text(('report:sort', 'Cover')) 

+

925 self.assert_ordering(report, "file3.py", "file1.py", "file2.py") 

+

926 

+

927 def test_sort_report_by_cover_plus(self): 

+

928 # Sort the text report by the Cover column, including the explicit + sign. 

+

929 report = self.get_summary_text(('report:sort', '+Cover')) 

+

930 self.assert_ordering(report, "file3.py", "file1.py", "file2.py") 

+

931 

+

932 def test_sort_report_by_cover_reversed(self): 

+

933 # Sort the text report by the Cover column reversed. 

+

934 report = self.get_summary_text(('report:sort', '-Cover')) 

+

935 self.assert_ordering(report, "file2.py", "file1.py", "file3.py") 

+

936 

+

937 def test_sort_report_by_invalid_option(self): 

+

938 # Sort the text report by a nonsense column. 

+

939 msg = "Invalid sorting option: 'Xyzzy'" 

+

940 with pytest.raises(CoverageException, match=msg): 

+

941 self.get_summary_text(('report:sort', 'Xyzzy')) 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_templite_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_templite_py.html new file mode 100644 index 000000000..360d7f789 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_templite_py.html @@ -0,0 +1,409 @@ + + + + + + Coverage for tests/test_templite.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# coding: utf-8 

+

2# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

3# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

4 

+

5"""Tests for coverage.templite.""" 

+

6 

+

7import re 

+

8 

+

9import pytest 

+

10 

+

11from coverage.templite import Templite, TempliteSyntaxError, TempliteValueError 

+

12 

+

13from tests.coveragetest import CoverageTest 

+

14 

+

15# pylint: disable=possibly-unused-variable 

+

16 

+

17class AnyOldObject(object): 

+

18 """Simple testing object. 

+

19 

+

20 Use keyword arguments in the constructor to set attributes on the object. 

+

21 

+

22 """ 

+

23 def __init__(self, **attrs): 

+

24 for n, v in attrs.items(): 

+

25 setattr(self, n, v) 

+

26 

+

27 

+

28class TempliteTest(CoverageTest): 

+

29 """Tests for Templite.""" 

+

30 

+

31 run_in_temp_dir = False 

+

32 

+

33 def try_render(self, text, ctx=None, result=None): 

+

34 """Render `text` through `ctx`, and it had better be `result`. 

+

35 

+

36 Result defaults to None so we can shorten the calls where we expect 

+

37 an exception and never get to the result comparison. 

+

38 

+

39 """ 

+

40 actual = Templite(text).render(ctx or {}) 

+

41 # If result is None, then an exception should have prevented us getting 

+

42 # to here. 

+

43 assert result is not None 

+

44 assert actual == result 

+

45 

+

46 def assertSynErr(self, msg): 

+

47 """Assert that a `TempliteSyntaxError` will happen. 

+

48 

+

49 A context manager, and the message should be `msg`. 

+

50 

+

51 """ 

+

52 pat = "^" + re.escape(msg) + "$" 

+

53 return pytest.raises(TempliteSyntaxError, match=pat) 

+

54 

+

55 def test_passthrough(self): 

+

56 # Strings without variables are passed through unchanged. 

+

57 assert Templite("Hello").render() == "Hello" 

+

58 assert Templite("Hello, 20% fun time!").render() == "Hello, 20% fun time!" 

+

59 

+

60 def test_variables(self): 

+

61 # Variables use {{var}} syntax. 

+

62 self.try_render("Hello, {{name}}!", {'name':'Ned'}, "Hello, Ned!") 

+

63 

+

64 def test_undefined_variables(self): 

+

65 # Using undefined names is an error. 

+

66 with pytest.raises(Exception, match="'name'"): 

+

67 self.try_render("Hi, {{name}}!") 

+

68 

+

69 def test_pipes(self): 

+

70 # Variables can be filtered with pipes. 

+

71 data = { 

+

72 'name': 'Ned', 

+

73 'upper': lambda x: x.upper(), 

+

74 'second': lambda x: x[1], 

+

75 } 

+

76 self.try_render("Hello, {{name|upper}}!", data, "Hello, NED!") 

+

77 

+

78 # Pipes can be concatenated. 

+

79 self.try_render("Hello, {{name|upper|second}}!", data, "Hello, E!") 

+

80 

+

81 def test_reusability(self): 

+

82 # A single Templite can be used more than once with different data. 

+

83 globs = { 

+

84 'upper': lambda x: x.upper(), 

+

85 'punct': '!', 

+

86 } 

+

87 

+

88 template = Templite("This is {{name|upper}}{{punct}}", globs) 

+

89 assert template.render({'name':'Ned'}) == "This is NED!" 

+

90 assert template.render({'name':'Ben'}) == "This is BEN!" 

+

91 

+

92 def test_attribute(self): 

+

93 # Variables' attributes can be accessed with dots. 

+

94 obj = AnyOldObject(a="Ay") 

+

95 self.try_render("{{obj.a}}", locals(), "Ay") 

+

96 

+

97 obj2 = AnyOldObject(obj=obj, b="Bee") 

+

98 self.try_render("{{obj2.obj.a}} {{obj2.b}}", locals(), "Ay Bee") 

+

99 

+

100 def test_member_function(self): 

+

101 # Variables' member functions can be used, as long as they are nullary. 

+

102 class WithMemberFns(AnyOldObject): 

+

103 """A class to try out member function access.""" 

+

104 def ditto(self): 

+

105 """Return twice the .txt attribute.""" 

+

106 return self.txt + self.txt 

+

107 obj = WithMemberFns(txt="Once") 

+

108 self.try_render("{{obj.ditto}}", locals(), "OnceOnce") 

+

109 

+

110 def test_item_access(self): 

+

111 # Variables' items can be used. 

+

112 d = {'a':17, 'b':23} 

+

113 self.try_render("{{d.a}} < {{d.b}}", locals(), "17 < 23") 

+

114 

+

115 def test_loops(self): 

+

116 # Loops work like in Django. 

+

117 nums = [1,2,3,4] 

+

118 self.try_render( 

+

119 "Look: {% for n in nums %}{{n}}, {% endfor %}done.", 

+

120 locals(), 

+

121 "Look: 1, 2, 3, 4, done." 

+

122 ) 

+

123 # Loop iterables can be filtered. 

+

124 def rev(l): 

+

125 """Return the reverse of `l`.""" 

+

126 l = l[:] 

+

127 l.reverse() 

+

128 return l 

+

129 

+

130 self.try_render( 

+

131 "Look: {% for n in nums|rev %}{{n}}, {% endfor %}done.", 

+

132 locals(), 

+

133 "Look: 4, 3, 2, 1, done." 

+

134 ) 

+

135 

+

136 def test_empty_loops(self): 

+

137 self.try_render( 

+

138 "Empty: {% for n in nums %}{{n}}, {% endfor %}done.", 

+

139 {'nums':[]}, 

+

140 "Empty: done." 

+

141 ) 

+

142 

+

143 def test_multiline_loops(self): 

+

144 self.try_render( 

+

145 "Look: \n{% for n in nums %}\n{{n}}, \n{% endfor %}done.", 

+

146 {'nums':[1,2,3]}, 

+

147 "Look: \n\n1, \n\n2, \n\n3, \ndone." 

+

148 ) 

+

149 

+

150 def test_multiple_loops(self): 

+

151 self.try_render( 

+

152 "{% for n in nums %}{{n}}{% endfor %} and " 

+

153 "{% for n in nums %}{{n}}{% endfor %}", 

+

154 {'nums': [1,2,3]}, 

+

155 "123 and 123" 

+

156 ) 

+

157 

+

158 def test_comments(self): 

+

159 # Single-line comments work: 

+

160 self.try_render( 

+

161 "Hello, {# Name goes here: #}{{name}}!", 

+

162 {'name':'Ned'}, "Hello, Ned!" 

+

163 ) 

+

164 # and so do multi-line comments: 

+

165 self.try_render( 

+

166 "Hello, {# Name\ngoes\nhere: #}{{name}}!", 

+

167 {'name':'Ned'}, "Hello, Ned!" 

+

168 ) 

+

169 

+

170 def test_if(self): 

+

171 self.try_render( 

+

172 "Hi, {% if ned %}NED{% endif %}{% if ben %}BEN{% endif %}!", 

+

173 {'ned': 1, 'ben': 0}, 

+

174 "Hi, NED!" 

+

175 ) 

+

176 self.try_render( 

+

177 "Hi, {% if ned %}NED{% endif %}{% if ben %}BEN{% endif %}!", 

+

178 {'ned': 0, 'ben': 1}, 

+

179 "Hi, BEN!" 

+

180 ) 

+

181 self.try_render( 

+

182 "Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!", 

+

183 {'ned': 0, 'ben': 0}, 

+

184 "Hi, !" 

+

185 ) 

+

186 self.try_render( 

+

187 "Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!", 

+

188 {'ned': 1, 'ben': 0}, 

+

189 "Hi, NED!" 

+

190 ) 

+

191 self.try_render( 

+

192 "Hi, {% if ned %}NED{% if ben %}BEN{% endif %}{% endif %}!", 

+

193 {'ned': 1, 'ben': 1}, 

+

194 "Hi, NEDBEN!" 

+

195 ) 

+

196 

+

197 def test_complex_if(self): 

+

198 class Complex(AnyOldObject): 

+

199 """A class to try out complex data access.""" 

+

200 def getit(self): 

+

201 """Return it.""" 

+

202 return self.it 

+

203 obj = Complex(it={'x':"Hello", 'y': 0}) 

+

204 self.try_render( 

+

205 "@" 

+

206 "{% if obj.getit.x %}X{% endif %}" 

+

207 "{% if obj.getit.y %}Y{% endif %}" 

+

208 "{% if obj.getit.y|str %}S{% endif %}" 

+

209 "!", 

+

210 { 'obj': obj, 'str': str }, 

+

211 "@XS!" 

+

212 ) 

+

213 

+

214 def test_loop_if(self): 

+

215 self.try_render( 

+

216 "@{% for n in nums %}{% if n %}Z{% endif %}{{n}}{% endfor %}!", 

+

217 {'nums': [0,1,2]}, 

+

218 "@0Z1Z2!" 

+

219 ) 

+

220 self.try_render( 

+

221 "X{%if nums%}@{% for n in nums %}{{n}}{% endfor %}{%endif%}!", 

+

222 {'nums': [0,1,2]}, 

+

223 "X@012!" 

+

224 ) 

+

225 self.try_render( 

+

226 "X{%if nums%}@{% for n in nums %}{{n}}{% endfor %}{%endif%}!", 

+

227 {'nums': []}, 

+

228 "X!" 

+

229 ) 

+

230 

+

231 def test_nested_loops(self): 

+

232 self.try_render( 

+

233 "@" 

+

234 "{% for n in nums %}" 

+

235 "{% for a in abc %}{{a}}{{n}}{% endfor %}" 

+

236 "{% endfor %}" 

+

237 "!", 

+

238 {'nums': [0,1,2], 'abc': ['a', 'b', 'c']}, 

+

239 "@a0b0c0a1b1c1a2b2c2!" 

+

240 ) 

+

241 

+

242 def test_whitespace_handling(self): 

+

243 self.try_render( 

+

244 "@{% for n in nums %}\n" 

+

245 " {% for a in abc %}{{a}}{{n}}{% endfor %}\n" 

+

246 "{% endfor %}!\n", 

+

247 {'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']}, 

+

248 "@\n a0b0c0\n\n a1b1c1\n\n a2b2c2\n!\n" 

+

249 ) 

+

250 self.try_render( 

+

251 "@{% for n in nums -%}\n" 

+

252 " {% for a in abc -%}\n" 

+

253 " {# this disappears completely -#}\n" 

+

254 " {{a-}}\n" 

+

255 " {{n -}}\n" 

+

256 " {{n -}}\n" 

+

257 " {% endfor %}\n" 

+

258 "{% endfor %}!\n", 

+

259 {'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']}, 

+

260 "@a00b00c00\na11b11c11\na22b22c22\n!\n" 

+

261 ) 

+

262 self.try_render( 

+

263 "@{% for n in nums -%}\n" 

+

264 " {{n -}}\n" 

+

265 " x\n" 

+

266 "{% endfor %}!\n", 

+

267 {'nums': [0, 1, 2]}, 

+

268 "@0x\n1x\n2x\n!\n" 

+

269 ) 

+

270 self.try_render(" hello ", {}, " hello ") 

+

271 

+

272 def test_eat_whitespace(self): 

+

273 self.try_render( 

+

274 "Hey!\n" 

+

275 "{% joined %}\n" 

+

276 "@{% for n in nums %}\n" 

+

277 " {% for a in abc %}\n" 

+

278 " {# this disappears completely #}\n" 

+

279 " X\n" 

+

280 " Y\n" 

+

281 " {{a}}\n" 

+

282 " {{n }}\n" 

+

283 " {% endfor %}\n" 

+

284 "{% endfor %}!\n" 

+

285 "{% endjoined %}\n", 

+

286 {'nums': [0, 1, 2], 'abc': ['a', 'b', 'c']}, 

+

287 "Hey!\n@XYa0XYb0XYc0XYa1XYb1XYc1XYa2XYb2XYc2!\n" 

+

288 ) 

+

289 

+

290 def test_non_ascii(self): 

+

291 self.try_render( 

+

292 u"{{where}} ollǝɥ", 

+

293 { 'where': u'ǝɹǝɥʇ' }, 

+

294 u"ǝɹǝɥʇ ollǝɥ" 

+

295 ) 

+

296 

+

297 def test_exception_during_evaluation(self): 

+

298 # TypeError: Couldn't evaluate {{ foo.bar.baz }}: 

+

299 regex = "^Couldn't evaluate None.bar$" 

+

300 with pytest.raises(TempliteValueError, match=regex): 

+

301 self.try_render( 

+

302 "Hey {{foo.bar.baz}} there", {'foo': None}, "Hey ??? there" 

+

303 ) 

+

304 

+

305 def test_bad_names(self): 

+

306 with self.assertSynErr("Not a valid name: 'var%&!@'"): 

+

307 self.try_render("Wat: {{ var%&!@ }}") 

+

308 with self.assertSynErr("Not a valid name: 'filter%&!@'"): 

+

309 self.try_render("Wat: {{ foo|filter%&!@ }}") 

+

310 with self.assertSynErr("Not a valid name: '@'"): 

+

311 self.try_render("Wat: {% for @ in x %}{% endfor %}") 

+

312 

+

313 def test_bogus_tag_syntax(self): 

+

314 with self.assertSynErr("Don't understand tag: 'bogus'"): 

+

315 self.try_render("Huh: {% bogus %}!!{% endbogus %}??") 

+

316 

+

317 def test_malformed_if(self): 

+

318 with self.assertSynErr("Don't understand if: '{% if %}'"): 

+

319 self.try_render("Buh? {% if %}hi!{% endif %}") 

+

320 with self.assertSynErr("Don't understand if: '{% if this or that %}'"): 

+

321 self.try_render("Buh? {% if this or that %}hi!{% endif %}") 

+

322 

+

323 def test_malformed_for(self): 

+

324 with self.assertSynErr("Don't understand for: '{% for %}'"): 

+

325 self.try_render("Weird: {% for %}loop{% endfor %}") 

+

326 with self.assertSynErr("Don't understand for: '{% for x from y %}'"): 

+

327 self.try_render("Weird: {% for x from y %}loop{% endfor %}") 

+

328 with self.assertSynErr("Don't understand for: '{% for x, y in z %}'"): 

+

329 self.try_render("Weird: {% for x, y in z %}loop{% endfor %}") 

+

330 

+

331 def test_bad_nesting(self): 

+

332 with self.assertSynErr("Unmatched action tag: 'if'"): 

+

333 self.try_render("{% if x %}X") 

+

334 with self.assertSynErr("Mismatched end tag: 'for'"): 

+

335 self.try_render("{% if x %}X{% endfor %}") 

+

336 with self.assertSynErr("Too many ends: '{% endif %}'"): 

+

337 self.try_render("{% if x %}{% endif %}{% endif %}") 

+

338 

+

339 def test_malformed_end(self): 

+

340 with self.assertSynErr("Don't understand end: '{% end if %}'"): 

+

341 self.try_render("{% if x %}X{% end if %}") 

+

342 with self.assertSynErr("Don't understand end: '{% endif now %}'"): 

+

343 self.try_render("{% if x %}X{% endif now %}") 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_testing_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_testing_py.html new file mode 100644 index 000000000..29498d20f --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_testing_py.html @@ -0,0 +1,452 @@ + + + + + + Coverage for tests/test_testing.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# -*- coding: utf-8 -*- 

+

2# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

3# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

4 

+

5"""Tests that our test infrastructure is really working!""" 

+

6 

+

7import datetime 

+

8import os 

+

9import re 

+

10import sys 

+

11 

+

12import pytest 

+

13 

+

14import coverage 

+

15from coverage import tomlconfig 

+

16from coverage.files import actual_path 

+

17 

+

18from tests.coveragetest import CoverageTest 

+

19from tests.helpers import ( 

+

20 arcs_to_arcz_repr, arcz_to_arcs, assert_count_equal, 

+

21 CheckUniqueFilenames, re_lines, re_line, without_module, 

+

22) 

+

23 

+

24 

+

25def test_xdist_sys_path_nuttiness_is_fixed(): 

+

26 # See conftest.py:fix_xdist_sys_path 

+

27 assert sys.path[1] != '' 

+

28 assert os.environ.get('PYTHONPATH') is None 

+

29 

+

30 

+

31def test_assert_count_equal(): 

+

32 assert_count_equal(set(), set()) 

+

33 assert_count_equal({"a": 1, "b": 2}, ["b", "a"]) 

+

34 with pytest.raises(AssertionError): 

+

35 assert_count_equal({1,2,3}, set()) 

+

36 with pytest.raises(AssertionError): 

+

37 assert_count_equal({1,2,3}, {4,5,6}) 

+

38 

+

39 

+

40class CoverageTestTest(CoverageTest): 

+

41 """Test the methods in `CoverageTest`.""" 

+

42 

+

43 def test_file_exists(self): 

+

44 self.make_file("whoville.txt", "We are here!") 

+

45 self.assert_exists("whoville.txt") 

+

46 self.assert_doesnt_exist("shadow.txt") 

+

47 msg = "File 'whoville.txt' shouldn't exist" 

+

48 with pytest.raises(AssertionError, match=msg): 

+

49 self.assert_doesnt_exist("whoville.txt") 

+

50 msg = "File 'shadow.txt' should exist" 

+

51 with pytest.raises(AssertionError, match=msg): 

+

52 self.assert_exists("shadow.txt") 

+

53 

+

54 def test_file_count(self): 

+

55 self.make_file("abcde.txt", "abcde") 

+

56 self.make_file("axczz.txt", "axczz") 

+

57 self.make_file("afile.txt", "afile") 

+

58 self.assert_file_count("a*.txt", 3) 

+

59 self.assert_file_count("*c*.txt", 2) 

+

60 self.assert_file_count("afile.*", 1) 

+

61 self.assert_file_count("*.q", 0) 

+

62 msg = re.escape( 

+

63 "There should be 13 files matching 'a*.txt', but there are these: " 

+

64 "['abcde.txt', 'afile.txt', 'axczz.txt']" 

+

65 ) 

+

66 with pytest.raises(AssertionError, match=msg): 

+

67 self.assert_file_count("a*.txt", 13) 

+

68 msg = re.escape( 

+

69 "There should be 12 files matching '*c*.txt', but there are these: " 

+

70 "['abcde.txt', 'axczz.txt']" 

+

71 ) 

+

72 with pytest.raises(AssertionError, match=msg): 

+

73 self.assert_file_count("*c*.txt", 12) 

+

74 msg = re.escape( 

+

75 "There should be 11 files matching 'afile.*', but there are these: " 

+

76 "['afile.txt']" 

+

77 ) 

+

78 with pytest.raises(AssertionError, match=msg): 

+

79 self.assert_file_count("afile.*", 11) 

+

80 msg = re.escape( 

+

81 "There should be 10 files matching '*.q', but there are these: []" 

+

82 ) 

+

83 with pytest.raises(AssertionError, match=msg): 

+

84 self.assert_file_count("*.q", 10) 

+

85 

+

86 def test_assert_recent_datetime(self): 

+

87 def now_delta(seconds): 

+

88 """Make a datetime `seconds` seconds from now.""" 

+

89 return datetime.datetime.now() + datetime.timedelta(seconds=seconds) 

+

90 

+

91 # Default delta is 10 seconds. 

+

92 self.assert_recent_datetime(now_delta(0)) 

+

93 self.assert_recent_datetime(now_delta(-9)) 

+

94 with pytest.raises(AssertionError): 

+

95 self.assert_recent_datetime(now_delta(-11)) 

+

96 with pytest.raises(AssertionError): 

+

97 self.assert_recent_datetime(now_delta(1)) 

+

98 

+

99 # Delta is settable. 

+

100 self.assert_recent_datetime(now_delta(0), seconds=120) 

+

101 self.assert_recent_datetime(now_delta(-100), seconds=120) 

+

102 with pytest.raises(AssertionError): 

+

103 self.assert_recent_datetime(now_delta(-1000), seconds=120) 

+

104 with pytest.raises(AssertionError): 

+

105 self.assert_recent_datetime(now_delta(1), seconds=120) 

+

106 

+

107 def test_assert_warnings(self): 

+

108 cov = coverage.Coverage() 

+

109 

+

110 # Make a warning, it should catch it properly. 

+

111 with self.assert_warnings(cov, ["Hello there!"]): 

+

112 cov._warn("Hello there!") 

+

113 

+

114 # The expected warnings are regexes. 

+

115 with self.assert_warnings(cov, ["Hello.*!"]): 

+

116 cov._warn("Hello there!") 

+

117 

+

118 # There can be a bunch of actual warnings. 

+

119 with self.assert_warnings(cov, ["Hello.*!"]): 

+

120 cov._warn("You there?") 

+

121 cov._warn("Hello there!") 

+

122 

+

123 # There can be a bunch of expected warnings. 

+

124 with self.assert_warnings(cov, ["Hello.*!", "You"]): 

+

125 cov._warn("You there?") 

+

126 cov._warn("Hello there!") 

+

127 

+

128 # But if there are a bunch of expected warnings, they have to all happen. 

+

129 warn_regex = r"Didn't find warning 'You' in \['Hello there!'\]" 

+

130 with pytest.raises(AssertionError, match=warn_regex): 

+

131 with self.assert_warnings(cov, ["Hello.*!", "You"]): 

+

132 cov._warn("Hello there!") 

+

133 

+

134 # Make a different warning than expected, it should raise an assertion. 

+

135 warn_regex = r"Didn't find warning 'Not me' in \['Hello there!'\]" 

+

136 with pytest.raises(AssertionError, match=warn_regex): 

+

137 with self.assert_warnings(cov, ["Not me"]): 

+

138 cov._warn("Hello there!") 

+

139 

+

140 # Try checking a warning that shouldn't appear: happy case. 

+

141 with self.assert_warnings(cov, ["Hi"], not_warnings=["Bye"]): 

+

142 cov._warn("Hi") 

+

143 

+

144 # But it should fail if the unexpected warning does appear. 

+

145 warn_regex = r"Found warning 'Bye' in \['Hi', 'Bye'\]" 

+

146 with pytest.raises(AssertionError, match=warn_regex): 

+

147 with self.assert_warnings(cov, ["Hi"], not_warnings=["Bye"]): 

+

148 cov._warn("Hi") 

+

149 cov._warn("Bye") 

+

150 

+

151 # assert_warnings shouldn't hide a real exception. 

+

152 with pytest.raises(ZeroDivisionError, match="oops"): 

+

153 with self.assert_warnings(cov, ["Hello there!"]): 

+

154 raise ZeroDivisionError("oops") 

+

155 

+

156 def test_assert_no_warnings(self): 

+

157 cov = coverage.Coverage() 

+

158 

+

159 # Happy path: no warnings. 

+

160 with self.assert_warnings(cov, []): 

+

161 pass 

+

162 

+

163 # If you said there would be no warnings, and there were, fail! 

+

164 warn_regex = r"Unexpected warnings: \['Watch out!'\]" 

+

165 with pytest.raises(AssertionError, match=warn_regex): 

+

166 with self.assert_warnings(cov, []): 

+

167 cov._warn("Watch out!") 

+

168 

+

169 def test_sub_python_is_this_python(self): 

+

170 # Try it with a Python command. 

+

171 self.set_environ('COV_FOOBAR', 'XYZZY') 

+

172 self.make_file("showme.py", """\ 

+

173 import os, sys 

+

174 print(sys.executable) 

+

175 print(os.__file__) 

+

176 print(os.environ['COV_FOOBAR']) 

+

177 """) 

+

178 out = self.run_command("python showme.py").splitlines() 

+

179 assert actual_path(out[0]) == actual_path(sys.executable) 

+

180 assert out[1] == os.__file__ 

+

181 assert out[2] == 'XYZZY' 

+

182 

+

183 # Try it with a "coverage debug sys" command. 

+

184 out = self.run_command("coverage debug sys") 

+

185 

+

186 executable = re_line(out, "executable:") 

+

187 executable = executable.split(":", 1)[1].strip() 

+

188 assert _same_python_executable(executable, sys.executable) 

+

189 

+

190 # "environment: COV_FOOBAR = XYZZY" or "COV_FOOBAR = XYZZY" 

+

191 environ = re_line(out, "COV_FOOBAR") 

+

192 _, _, environ = environ.rpartition(":") 

+

193 assert environ.strip() == "COV_FOOBAR = XYZZY" 

+

194 

+

195 def test_run_command_stdout_stderr(self): 

+

196 # run_command should give us both stdout and stderr. 

+

197 self.make_file("outputs.py", """\ 

+

198 import sys 

+

199 sys.stderr.write("StdErr\\n") 

+

200 print("StdOut") 

+

201 """) 

+

202 out = self.run_command("python outputs.py") 

+

203 assert "StdOut\n" in out 

+

204 assert "StdErr\n" in out 

+

205 

+

206 

+

207class CheckUniqueFilenamesTest(CoverageTest): 

+

208 """Tests of CheckUniqueFilenames.""" 

+

209 

+

210 run_in_temp_dir = False 

+

211 

+

212 class Stub(object): 

+

213 """A stand-in for the class we're checking.""" 

+

214 def __init__(self, x): 

+

215 self.x = x 

+

216 

+

217 def method(self, filename, a=17, b="hello"): 

+

218 """The method we'll wrap, with args to be sure args work.""" 

+

219 return (self.x, filename, a, b) 

+

220 

+

221 def test_detect_duplicate(self): 

+

222 stub = self.Stub(23) 

+

223 CheckUniqueFilenames.hook(stub, "method") 

+

224 

+

225 # Two method calls with different names are fine. 

+

226 assert stub.method("file1") == (23, "file1", 17, "hello") 

+

227 assert stub.method("file2", 1723, b="what") == (23, "file2", 1723, "what") 

+

228 

+

229 # A duplicate file name trips an assertion. 

+

230 with pytest.raises(AssertionError): 

+

231 stub.method("file1") 

+

232 

+

233 

+

234class CheckCoverageTest(CoverageTest): 

+

235 """Tests of the failure assertions in check_coverage.""" 

+

236 

+

237 CODE = """\ 

+

238 a, b = 1, 1 

+

239 def oops(x): 

+

240 if x % 2: 

+

241 raise Exception("odd") 

+

242 try: 

+

243 a = 6 

+

244 oops(1) 

+

245 a = 8 

+

246 except: 

+

247 b = 10 

+

248 assert a == 6 and b == 10 

+

249 """ 

+

250 ARCZ = ".1 12 -23 34 3-2 4-2 25 56 67 78 8B 9A AB B." 

+

251 ARCZ_MISSING = "3-2 78 8B" 

+

252 ARCZ_UNPREDICTED = "79" 

+

253 

+

254 def test_check_coverage_possible(self): 

+

255 msg = r"(?s)Possible arcs differ: .*- \(6, 3\).*\+ \(6, 7\)" 

+

256 with pytest.raises(AssertionError, match=msg): 

+

257 self.check_coverage( 

+

258 self.CODE, 

+

259 arcz=self.ARCZ.replace("7", "3"), 

+

260 arcz_missing=self.ARCZ_MISSING, 

+

261 arcz_unpredicted=self.ARCZ_UNPREDICTED, 

+

262 ) 

+

263 

+

264 def test_check_coverage_missing(self): 

+

265 msg = r"(?s)Missing arcs differ: .*- \(3, 8\).*\+ \(7, 8\)" 

+

266 with pytest.raises(AssertionError, match=msg): 

+

267 self.check_coverage( 

+

268 self.CODE, 

+

269 arcz=self.ARCZ, 

+

270 arcz_missing=self.ARCZ_MISSING.replace("7", "3"), 

+

271 arcz_unpredicted=self.ARCZ_UNPREDICTED, 

+

272 ) 

+

273 

+

274 def test_check_coverage_unpredicted(self): 

+

275 msg = r"(?s)Unpredicted arcs differ: .*- \(3, 9\).*\+ \(7, 9\)" 

+

276 with pytest.raises(AssertionError, match=msg): 

+

277 self.check_coverage( 

+

278 self.CODE, 

+

279 arcz=self.ARCZ, 

+

280 arcz_missing=self.ARCZ_MISSING, 

+

281 arcz_unpredicted=self.ARCZ_UNPREDICTED.replace("7", "3") 

+

282 ) 

+

283 

+

284 

+

285class ReLinesTest(CoverageTest): 

+

286 """Tests of `re_lines`.""" 

+

287 

+

288 run_in_temp_dir = False 

+

289 

+

290 @pytest.mark.parametrize("text, pat, result", [ 

+

291 ("line1\nline2\nline3\n", "line", "line1\nline2\nline3\n"), 

+

292 ("line1\nline2\nline3\n", "[13]", "line1\nline3\n"), 

+

293 ("line1\nline2\nline3\n", "X", ""), 

+

294 ]) 

+

295 def test_re_lines(self, text, pat, result): 

+

296 assert re_lines(text, pat) == result 

+

297 

+

298 @pytest.mark.parametrize("text, pat, result", [ 

+

299 ("line1\nline2\nline3\n", "line", ""), 

+

300 ("line1\nline2\nline3\n", "[13]", "line2\n"), 

+

301 ("line1\nline2\nline3\n", "X", "line1\nline2\nline3\n"), 

+

302 ]) 

+

303 def test_re_lines_inverted(self, text, pat, result): 

+

304 assert re_lines(text, pat, match=False) == result 

+

305 

+

306 @pytest.mark.parametrize("text, pat, result", [ 

+

307 ("line1\nline2\nline3\n", "2", "line2"), 

+

308 ]) 

+

309 def test_re_line(self, text, pat, result): 

+

310 assert re_line(text, pat) == result 

+

311 

+

312 @pytest.mark.parametrize("text, pat", [ 

+

313 ("line1\nline2\nline3\n", "line"), # too many matches 

+

314 ("line1\nline2\nline3\n", "X"), # no matches 

+

315 ]) 

+

316 def test_re_line_bad(self, text, pat): 

+

317 with pytest.raises(AssertionError): 

+

318 re_line(text, pat) 

+

319 

+

320 

+

321def _same_python_executable(e1, e2): 

+

322 """Determine if `e1` and `e2` refer to the same Python executable. 

+

323 

+

324 Either path could include symbolic links. The two paths might not refer 

+

325 to the exact same file, but if they are in the same directory and their 

+

326 numeric suffixes aren't different, they are the same executable. 

+

327 

+

328 """ 

+

329 e1 = os.path.abspath(os.path.realpath(e1)) 

+

330 e2 = os.path.abspath(os.path.realpath(e2)) 

+

331 

+

332 if os.path.dirname(e1) != os.path.dirname(e2): 

+

333 return False # pragma: only failure 

+

334 

+

335 e1 = os.path.basename(e1) 

+

336 e2 = os.path.basename(e2) 

+

337 

+

338 if e1 == "python" or e2 == "python" or e1 == e2: 

+

339 # Python and Python2.3: OK 

+

340 # Python2.3 and Python: OK 

+

341 # Python and Python: OK 

+

342 # Python2.3 and Python2.3: OK 

+

343 return True 

+

344 

+

345 return False # pragma: only failure 

+

346 

+

347 

+

348def test_without_module(): 

+

349 toml1 = tomlconfig.toml 

+

350 with without_module(tomlconfig, 'toml'): 

+

351 toml2 = tomlconfig.toml 

+

352 toml3 = tomlconfig.toml 

+

353 

+

354 assert toml1 is toml3 is not None 

+

355 assert toml2 is None 

+

356 

+

357 

+

358class ArczTest(CoverageTest): 

+

359 """Tests of arcz/arcs helpers.""" 

+

360 

+

361 run_in_temp_dir = False 

+

362 

+

363 @pytest.mark.parametrize("arcz, arcs", [ 

+

364 (".1 12 2.", [(-1, 1), (1, 2), (2, -1)]), 

+

365 ("-11 12 2-5", [(-1, 1), (1, 2), (2, -5)]), 

+

366 ("-QA CB IT Z-A", [(-26, 10), (12, 11), (18, 29), (35, -10)]), 

+

367 ]) 

+

368 def test_arcz_to_arcs(self, arcz, arcs): 

+

369 assert arcz_to_arcs(arcz) == arcs 

+

370 

+

371 @pytest.mark.parametrize("arcs, arcz_repr", [ 

+

372 ([(-1, 1), (1, 2), (2, -1)], "(-1, 1) # .1\n(1, 2) # 12\n(2, -1) # 2.\n"), 

+

373 ([(-1, 1), (1, 2), (2, -5)], "(-1, 1) # .1\n(1, 2) # 12\n(2, -5) # 2-5\n"), 

+

374 ([(-26, 10), (12, 11), (18, 29), (35, -10), (1, 33), (100, 7)], 

+

375 ( 

+

376 "(-26, 10) # -QA\n" 

+

377 "(12, 11) # CB\n" 

+

378 "(18, 29) # IT\n" 

+

379 "(35, -10) # Z-A\n" 

+

380 "(1, 33) # 1X\n" 

+

381 "(100, 7) # ?7\n" 

+

382 ) 

+

383 ), 

+

384 ]) 

+

385 def test_arcs_to_arcz_repr(self, arcs, arcz_repr): 

+

386 assert arcs_to_arcz_repr(arcs) == arcz_repr 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_version_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_version_py.html new file mode 100644 index 000000000..e56065fc1 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_version_py.html @@ -0,0 +1,100 @@ + + + + + + Coverage for tests/test_version.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

2# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

3 

+

4"""Tests of version.py.""" 

+

5 

+

6import coverage 

+

7from coverage.version import _make_url, _make_version 

+

8 

+

9from tests.coveragetest import CoverageTest 

+

10 

+

11 

+

12class VersionTest(CoverageTest): 

+

13 """Tests of version.py""" 

+

14 

+

15 run_in_temp_dir = False 

+

16 

+

17 def test_version_info(self): 

+

18 # Make sure we didn't screw up the version_info tuple. 

+

19 assert isinstance(coverage.version_info, tuple) 

+

20 assert [type(d) for d in coverage.version_info] == [int, int, int, str, int] 

+

21 assert coverage.version_info[3] in ['alpha', 'beta', 'candidate', 'final'] 

+

22 

+

23 def test_make_version(self): 

+

24 assert _make_version(4, 0, 0, 'alpha', 0) == "4.0a0" 

+

25 assert _make_version(4, 0, 0, 'alpha', 1) == "4.0a1" 

+

26 assert _make_version(4, 0, 0, 'final', 0) == "4.0" 

+

27 assert _make_version(4, 1, 2, 'beta', 3) == "4.1.2b3" 

+

28 assert _make_version(4, 1, 2, 'final', 0) == "4.1.2" 

+

29 assert _make_version(5, 10, 2, 'candidate', 7) == "5.10.2rc7" 

+

30 

+

31 def test_make_url(self): 

+

32 assert _make_url(4, 0, 0, 'final', 0) == "https://coverage.readthedocs.io" 

+

33 expected = "https://coverage.readthedocs.io/en/coverage-4.1.2b3" 

+

34 assert _make_url(4, 1, 2, 'beta', 3) == expected 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_test_xml_py.html b/reports/20210322_66173dc24d/htmlcov/tests_test_xml_py.html new file mode 100644 index 000000000..f4451f451 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_test_xml_py.html @@ -0,0 +1,522 @@ + + + + + + Coverage for tests/test_xml.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# coding: utf-8 

+

2# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 

+

3# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt 

+

4 

+

5"""Tests for XML reports from coverage.py.""" 

+

6 

+

7import os 

+

8import os.path 

+

9import re 

+

10from xml.etree import ElementTree 

+

11 

+

12import pytest 

+

13 

+

14import coverage 

+

15from coverage.backward import import_local_file 

+

16from coverage.files import abs_file 

+

17 

+

18from tests.coveragetest import CoverageTest 

+

19from tests.goldtest import compare, gold_path 

+

20from tests.helpers import change_dir 

+

21 

+

22 

+

23class XmlTestHelpers(CoverageTest): 

+

24 """Methods to use from XML tests.""" 

+

25 

+

26 def run_mycode(self): 

+

27 """Run mycode.py, so we can report on it.""" 

+

28 self.make_file("mycode.py", "print('hello')\n") 

+

29 self.run_command("coverage run mycode.py") 

+

30 

+

31 def run_doit(self): 

+

32 """Construct a simple sub-package.""" 

+

33 self.make_file("sub/__init__.py") 

+

34 self.make_file("sub/doit.py", "print('doit!')") 

+

35 self.make_file("main.py", "import sub.doit") 

+

36 cov = coverage.Coverage(source=["."]) 

+

37 self.start_import_stop(cov, "main") 

+

38 return cov 

+

39 

+

40 def make_tree(self, width, depth, curdir="."): 

+

41 """Make a tree of packages. 

+

42 

+

43 Makes `width` directories, named d0 .. d{width-1}. Each directory has 

+

44 __init__.py, and `width` files, named f0.py .. f{width-1}.py. Each 

+

45 directory also has `width` sub-directories, in the same fashion, until 

+

46 a depth of `depth` is reached. 

+

47 

+

48 """ 

+

49 if depth == 0: 

+

50 return 

+

51 

+

52 def here(p): 

+

53 """A path for `p` in our currently interesting directory.""" 

+

54 return os.path.join(curdir, p) 

+

55 

+

56 for i in range(width): 

+

57 next_dir = here("d{}".format(i)) 

+

58 self.make_tree(width, depth-1, next_dir) 

+

59 if curdir != ".": 

+

60 self.make_file(here("__init__.py"), "") 

+

61 for i in range(width): 

+

62 filename = here("f{}.py".format(i)) 

+

63 self.make_file(filename, "# {}\n".format(filename)) 

+

64 

+

65 def assert_source(self, xmldom, src): 

+

66 """Assert that the XML has a <source> element with `src`.""" 

+

67 src = abs_file(src) 

+

68 elts = xmldom.findall(".//sources/source") 

+

69 assert any(elt.text == src for elt in elts) 

+

70 

+

71 

+

72class XmlTestHelpersTest(XmlTestHelpers, CoverageTest): 

+

73 """Tests of methods in XmlTestHelpers.""" 

+

74 

+

75 run_in_temp_dir = False 

+

76 

+

77 def test_assert_source(self): 

+

78 dom = ElementTree.fromstring("""\ 

+

79 <doc> 

+

80 <src>foo</src> 

+

81 <sources> 

+

82 <source>{cwd}something</source> 

+

83 <source>{cwd}another</source> 

+

84 </sources> 

+

85 </doc> 

+

86 """.format(cwd=abs_file(".")+os.sep)) 

+

87 

+

88 self.assert_source(dom, "something") 

+

89 self.assert_source(dom, "another") 

+

90 

+

91 with pytest.raises(AssertionError): 

+

92 self.assert_source(dom, "hello") 

+

93 with pytest.raises(AssertionError): 

+

94 self.assert_source(dom, "foo") 

+

95 with pytest.raises(AssertionError): 

+

96 self.assert_source(dom, "thing") 

+

97 

+

98 

+

99class XmlReportTest(XmlTestHelpers, CoverageTest): 

+

100 """Tests of the XML reports from coverage.py.""" 

+

101 

+

102 def test_default_file_placement(self): 

+

103 self.run_mycode() 

+

104 self.run_command("coverage xml") 

+

105 self.assert_exists("coverage.xml") 

+

106 

+

107 def test_argument_affects_xml_placement(self): 

+

108 self.run_mycode() 

+

109 self.run_command("coverage xml -o put_it_there.xml") 

+

110 self.assert_doesnt_exist("coverage.xml") 

+

111 self.assert_exists("put_it_there.xml") 

+

112 

+

113 def test_config_file_directory_does_not_exist(self): 

+

114 self.run_mycode() 

+

115 self.run_command("coverage xml -o nonexistent/put_it_there.xml") 

+

116 self.assert_doesnt_exist("coverage.xml") 

+

117 self.assert_doesnt_exist("put_it_there.xml") 

+

118 self.assert_exists("nonexistent/put_it_there.xml") 

+

119 

+

120 def test_config_affects_xml_placement(self): 

+

121 self.run_mycode() 

+

122 self.make_file(".coveragerc", "[xml]\noutput = xml.out\n") 

+

123 self.run_command("coverage xml") 

+

124 self.assert_doesnt_exist("coverage.xml") 

+

125 self.assert_exists("xml.out") 

+

126 

+

127 def test_no_data(self): 

+

128 # https://github.com/nedbat/coveragepy/issues/210 

+

129 self.run_command("coverage xml") 

+

130 self.assert_doesnt_exist("coverage.xml") 

+

131 

+

132 def test_no_source(self): 

+

133 # Written while investigating a bug, might as well keep it. 

+

134 # https://github.com/nedbat/coveragepy/issues/208 

+

135 self.make_file("innocuous.py", "a = 4") 

+

136 cov = coverage.Coverage() 

+

137 self.start_import_stop(cov, "innocuous") 

+

138 os.remove("innocuous.py") 

+

139 cov.xml_report(ignore_errors=True) 

+

140 self.assert_exists("coverage.xml") 

+

141 

+

142 def test_filename_format_showing_everything(self): 

+

143 cov = self.run_doit() 

+

144 cov.xml_report() 

+

145 dom = ElementTree.parse("coverage.xml") 

+

146 elts = dom.findall(".//class[@name='doit.py']") 

+

147 assert len(elts) == 1 

+

148 assert elts[0].get('filename') == "sub/doit.py" 

+

149 

+

150 def test_filename_format_including_filename(self): 

+

151 cov = self.run_doit() 

+

152 cov.xml_report(["sub/doit.py"]) 

+

153 dom = ElementTree.parse("coverage.xml") 

+

154 elts = dom.findall(".//class[@name='doit.py']") 

+

155 assert len(elts) == 1 

+

156 assert elts[0].get('filename') == "sub/doit.py" 

+

157 

+

158 def test_filename_format_including_module(self): 

+

159 cov = self.run_doit() 

+

160 import sub.doit # pylint: disable=import-error 

+

161 cov.xml_report([sub.doit]) 

+

162 dom = ElementTree.parse("coverage.xml") 

+

163 elts = dom.findall(".//class[@name='doit.py']") 

+

164 assert len(elts) == 1 

+

165 assert elts[0].get('filename') == "sub/doit.py" 

+

166 

+

167 def test_reporting_on_nothing(self): 

+

168 # Used to raise a zero division error: 

+

169 # https://github.com/nedbat/coveragepy/issues/250 

+

170 self.make_file("empty.py", "") 

+

171 cov = coverage.Coverage() 

+

172 empty = self.start_import_stop(cov, "empty") 

+

173 cov.xml_report([empty]) 

+

174 dom = ElementTree.parse("coverage.xml") 

+

175 elts = dom.findall(".//class[@name='empty.py']") 

+

176 assert len(elts) == 1 

+

177 assert elts[0].get('filename') == "empty.py" 

+

178 assert elts[0].get('line-rate') == '1' 

+

179 

+

180 def test_empty_file_is_100_not_0(self): 

+

181 # https://github.com/nedbat/coveragepy/issues/345 

+

182 cov = self.run_doit() 

+

183 cov.xml_report() 

+

184 dom = ElementTree.parse("coverage.xml") 

+

185 elts = dom.findall(".//class[@name='__init__.py']") 

+

186 assert len(elts) == 1 

+

187 assert elts[0].get('line-rate') == '1' 

+

188 

+

189 def test_empty_file_is_skipped(self): 

+

190 cov = self.run_doit() 

+

191 cov.xml_report(skip_empty=True) 

+

192 dom = ElementTree.parse("coverage.xml") 

+

193 elts = dom.findall(".//class[@name='__init__.py']") 

+

194 assert len(elts) == 0 

+

195 

+

196 def test_curdir_source(self): 

+

197 # With no source= option, the XML report should explain that the source 

+

198 # is in the current directory. 

+

199 cov = self.run_doit() 

+

200 cov.xml_report() 

+

201 dom = ElementTree.parse("coverage.xml") 

+

202 self.assert_source(dom, ".") 

+

203 sources = dom.findall(".//source") 

+

204 assert len(sources) == 1 

+

205 

+

206 def test_deep_source(self): 

+

207 # When using source=, the XML report needs to mention those directories 

+

208 # in the <source> elements. 

+

209 # https://github.com/nedbat/coveragepy/issues/439 

+

210 self.make_file("src/main/foo.py", "a = 1") 

+

211 self.make_file("also/over/there/bar.py", "b = 2") 

+

212 cov = coverage.Coverage(source=["src/main", "also/over/there", "not/really"]) 

+

213 cov.start() 

+

214 mod_foo = import_local_file("foo", "src/main/foo.py") # pragma: nested 

+

215 mod_bar = import_local_file("bar", "also/over/there/bar.py") # pragma: nested 

+

216 cov.stop() # pragma: nested 

+

217 cov.xml_report([mod_foo, mod_bar]) 

+

218 dom = ElementTree.parse("coverage.xml") 

+

219 

+

220 self.assert_source(dom, "src/main") 

+

221 self.assert_source(dom, "also/over/there") 

+

222 sources = dom.findall(".//source") 

+

223 assert len(sources) == 2 

+

224 

+

225 foo_class = dom.findall(".//class[@name='foo.py']") 

+

226 assert len(foo_class) == 1 

+

227 assert foo_class[0].attrib == { 

+

228 'branch-rate': '0', 

+

229 'complexity': '0', 

+

230 'filename': 'foo.py', 

+

231 'line-rate': '1', 

+

232 'name': 'foo.py', 

+

233 } 

+

234 

+

235 bar_class = dom.findall(".//class[@name='bar.py']") 

+

236 assert len(bar_class) == 1 

+

237 assert bar_class[0].attrib == { 

+

238 'branch-rate': '0', 

+

239 'complexity': '0', 

+

240 'filename': 'bar.py', 

+

241 'line-rate': '1', 

+

242 'name': 'bar.py', 

+

243 } 

+

244 

+

245 def test_nonascii_directory(self): 

+

246 # https://github.com/nedbat/coveragepy/issues/573 

+

247 self.make_file("테스트/program.py", "a = 1") 

+

248 with change_dir("테스트"): 

+

249 cov = coverage.Coverage() 

+

250 self.start_import_stop(cov, "program") 

+

251 cov.xml_report() 

+

252 

+

253 

+

254def unbackslash(v): 

+

255 """Find strings in `v`, and replace backslashes with slashes throughout.""" 

+

256 if isinstance(v, (tuple, list)): 

+

257 return [unbackslash(vv) for vv in v] 

+

258 elif isinstance(v, dict): 

+

259 return {k: unbackslash(vv) for k, vv in v.items()} 

+

260 else: 

+

261 assert isinstance(v, str) 

+

262 return v.replace("\\", "/") 

+

263 

+

264 

+

265class XmlPackageStructureTest(XmlTestHelpers, CoverageTest): 

+

266 """Tests about the package structure reported in the coverage.xml file.""" 

+

267 

+

268 def package_and_class_tags(self, cov): 

+

269 """Run an XML report on `cov`, and get the package and class tags.""" 

+

270 cov.xml_report() 

+

271 dom = ElementTree.parse("coverage.xml") 

+

272 for node in dom.iter(): 

+

273 if node.tag in ('package', 'class'): 

+

274 yield (node.tag, {a:v for a,v in node.items() if a in ('name', 'filename')}) 

+

275 

+

276 def assert_package_and_class_tags(self, cov, result): 

+

277 """Check the XML package and class tags from `cov` match `result`.""" 

+

278 assert unbackslash(list(self.package_and_class_tags(cov))) == unbackslash(result) 

+

279 

+

280 def test_package_names(self): 

+

281 self.make_tree(width=1, depth=3) 

+

282 self.make_file("main.py", """\ 

+

283 from d0.d0 import f0 

+

284 """) 

+

285 cov = coverage.Coverage(source=".") 

+

286 self.start_import_stop(cov, "main") 

+

287 self.assert_package_and_class_tags(cov, [ 

+

288 ('package', {'name': "."}), 

+

289 ('class', {'filename': "main.py", 'name': "main.py"}), 

+

290 ('package', {'name': "d0"}), 

+

291 ('class', {'filename': "d0/__init__.py", 'name': "__init__.py"}), 

+

292 ('class', {'filename': "d0/f0.py", 'name': "f0.py"}), 

+

293 ('package', {'name': "d0.d0"}), 

+

294 ('class', {'filename': "d0/d0/__init__.py", 'name': "__init__.py"}), 

+

295 ('class', {'filename': "d0/d0/f0.py", 'name': "f0.py"}), 

+

296 ]) 

+

297 

+

298 def test_package_depth_1(self): 

+

299 self.make_tree(width=1, depth=4) 

+

300 self.make_file("main.py", """\ 

+

301 from d0.d0 import f0 

+

302 """) 

+

303 cov = coverage.Coverage(source=".") 

+

304 self.start_import_stop(cov, "main") 

+

305 

+

306 cov.set_option("xml:package_depth", 1) 

+

307 self.assert_package_and_class_tags(cov, [ 

+

308 ('package', {'name': "."}), 

+

309 ('class', {'filename': "main.py", 'name': "main.py"}), 

+

310 ('package', {'name': "d0"}), 

+

311 ('class', {'filename': "d0/__init__.py", 'name': "__init__.py"}), 

+

312 ('class', {'filename': "d0/d0/__init__.py", 'name': "d0/__init__.py"}), 

+

313 ('class', {'filename': "d0/d0/d0/__init__.py", 'name': "d0/d0/__init__.py"}), 

+

314 ('class', {'filename': "d0/d0/d0/f0.py", 'name': "d0/d0/f0.py"}), 

+

315 ('class', {'filename': "d0/d0/f0.py", 'name': "d0/f0.py"}), 

+

316 ('class', {'filename': "d0/f0.py", 'name': "f0.py"}), 

+

317 ]) 

+

318 

+

319 def test_package_depth_2(self): 

+

320 self.make_tree(width=1, depth=4) 

+

321 self.make_file("main.py", """\ 

+

322 from d0.d0 import f0 

+

323 """) 

+

324 cov = coverage.Coverage(source=".") 

+

325 self.start_import_stop(cov, "main") 

+

326 

+

327 cov.set_option("xml:package_depth", 2) 

+

328 self.assert_package_and_class_tags(cov, [ 

+

329 ('package', {'name': "."}), 

+

330 ('class', {'filename': "main.py", 'name': "main.py"}), 

+

331 ('package', {'name': "d0"}), 

+

332 ('class', {'filename': "d0/__init__.py", 'name': "__init__.py"}), 

+

333 ('class', {'filename': "d0/f0.py", 'name': "f0.py"}), 

+

334 ('package', {'name': "d0.d0"}), 

+

335 ('class', {'filename': "d0/d0/__init__.py", 'name': "__init__.py"}), 

+

336 ('class', {'filename': "d0/d0/d0/__init__.py", 'name': "d0/__init__.py"}), 

+

337 ('class', {'filename': "d0/d0/d0/f0.py", 'name': "d0/f0.py"}), 

+

338 ('class', {'filename': "d0/d0/f0.py", 'name': "f0.py"}), 

+

339 ]) 

+

340 

+

341 def test_package_depth_3(self): 

+

342 self.make_tree(width=1, depth=4) 

+

343 self.make_file("main.py", """\ 

+

344 from d0.d0 import f0 

+

345 """) 

+

346 cov = coverage.Coverage(source=".") 

+

347 self.start_import_stop(cov, "main") 

+

348 

+

349 cov.set_option("xml:package_depth", 3) 

+

350 self.assert_package_and_class_tags(cov, [ 

+

351 ('package', {'name': "."}), 

+

352 ('class', {'filename': "main.py", 'name': "main.py"}), 

+

353 ('package', {'name': "d0"}), 

+

354 ('class', {'filename': "d0/__init__.py", 'name': "__init__.py"}), 

+

355 ('class', {'filename': "d0/f0.py", 'name': "f0.py"}), 

+

356 ('package', {'name': "d0.d0"}), 

+

357 ('class', {'filename': "d0/d0/__init__.py", 'name': "__init__.py"}), 

+

358 ('class', {'filename': "d0/d0/f0.py", 'name': "f0.py"}), 

+

359 ('package', {'name': "d0.d0.d0"}), 

+

360 ('class', {'filename': "d0/d0/d0/__init__.py", 'name': "__init__.py"}), 

+

361 ('class', {'filename': "d0/d0/d0/f0.py", 'name': "f0.py"}), 

+

362 ]) 

+

363 

+

364 def test_source_prefix(self): 

+

365 # https://github.com/nedbat/coveragepy/issues/465 

+

366 # https://github.com/nedbat/coveragepy/issues/526 

+

367 self.make_file("src/mod.py", "print(17)") 

+

368 cov = coverage.Coverage(source=["src"]) 

+

369 self.start_import_stop(cov, "mod", modfile="src/mod.py") 

+

370 

+

371 self.assert_package_and_class_tags(cov, [ 

+

372 ('package', {'name': "."}), 

+

373 ('class', {'filename': "mod.py", 'name': "mod.py"}), 

+

374 ]) 

+

375 dom = ElementTree.parse("coverage.xml") 

+

376 self.assert_source(dom, "src") 

+

377 

+

378 def test_relative_source(self): 

+

379 self.make_file("src/mod.py", "print(17)") 

+

380 cov = coverage.Coverage(source=["src"]) 

+

381 cov.set_option("run:relative_files", True) 

+

382 self.start_import_stop(cov, "mod", modfile="src/mod.py") 

+

383 cov.xml_report() 

+

384 

+

385 with open("coverage.xml") as x: 

+

386 print(x.read()) 

+

387 dom = ElementTree.parse("coverage.xml") 

+

388 elts = dom.findall(".//sources/source") 

+

389 assert [elt.text for elt in elts] == ["src"] 

+

390 

+

391 

+

392def compare_xml(expected, actual, **kwargs): 

+

393 """Specialized compare function for our XML files.""" 

+

394 source_path = coverage.files.relative_directory().rstrip(r"\/") 

+

395 

+

396 scrubs=[ 

+

397 (r' timestamp="\d+"', ' timestamp="TIMESTAMP"'), 

+

398 (r' version="[-.\w]+"', ' version="VERSION"'), 

+

399 (r'<source>\s*.*?\s*</source>', '<source>%s</source>' % re.escape(source_path)), 

+

400 (r'/coverage.readthedocs.io/?[-.\w/]*', '/coverage.readthedocs.io/VER'), 

+

401 ] 

+

402 compare(expected, actual, scrubs=scrubs, **kwargs) 

+

403 

+

404 

+

405class XmlGoldTest(CoverageTest): 

+

406 """Tests of XML reporting that use gold files.""" 

+

407 

+

408 def test_a_xml_1(self): 

+

409 self.make_file("a.py", """\ 

+

410 if 1 < 2: 

+

411 # Needed a < to look at HTML entities. 

+

412 a = 3 

+

413 else: 

+

414 a = 4 

+

415 """) 

+

416 

+

417 cov = coverage.Coverage() 

+

418 a = self.start_import_stop(cov, "a") 

+

419 cov.xml_report(a, outfile="coverage.xml") 

+

420 compare_xml(gold_path("xml/x_xml"), ".", actual_extra=True) 

+

421 

+

422 def test_a_xml_2(self): 

+

423 self.make_file("a.py", """\ 

+

424 if 1 < 2: 

+

425 # Needed a < to look at HTML entities. 

+

426 a = 3 

+

427 else: 

+

428 a = 4 

+

429 """) 

+

430 

+

431 self.make_file("run_a_xml_2.ini", """\ 

+

432 # Put all the XML output in xml_2 

+

433 [xml] 

+

434 output = xml_2/coverage.xml 

+

435 """) 

+

436 

+

437 cov = coverage.Coverage(config_file="run_a_xml_2.ini") 

+

438 a = self.start_import_stop(cov, "a") 

+

439 cov.xml_report(a) 

+

440 compare_xml(gold_path("xml/x_xml"), "xml_2") 

+

441 

+

442 def test_y_xml_branch(self): 

+

443 self.make_file("y.py", """\ 

+

444 def choice(x): 

+

445 if x < 2: 

+

446 return 3 

+

447 else: 

+

448 return 4 

+

449 

+

450 assert choice(1) == 3 

+

451 """) 

+

452 

+

453 cov = coverage.Coverage(branch=True) 

+

454 y = self.start_import_stop(cov, "y") 

+

455 cov.xml_report(y, outfile="y_xml_branch/coverage.xml") 

+

456 compare_xml(gold_path("xml/y_xml_branch"), "y_xml_branch") 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_zipmods_zip_encoded_cp1252_py.html b/reports/20210322_66173dc24d/htmlcov/tests_zipmods_zip_encoded_cp1252_py.html new file mode 100644 index 000000000..ff0ffeaf4 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_zipmods_zip_encoded_cp1252_py.html @@ -0,0 +1,71 @@ + + + + + + Coverage for tests/zipmods.zip/encoded_cp1252.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# coding: cp1252 

+

2text = u"“hi”" 

+

3ords = [8220, 104, 105, 8221] 

+

4assert [ord(c) for c in text] == ords 

+

5print(u"All OK with cp1252") 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_zipmods_zip_encoded_gb2312_py.html b/reports/20210322_66173dc24d/htmlcov/tests_zipmods_zip_encoded_gb2312_py.html new file mode 100644 index 000000000..9d8686aa9 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_zipmods_zip_encoded_gb2312_py.html @@ -0,0 +1,71 @@ + + + + + + Coverage for tests/zipmods.zip/encoded_gb2312.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# coding: gb2312 

+

2text = u"你好,世界" 

+

3ords = [20320, 22909, 65292, 19990, 30028] 

+

4assert [ord(c) for c in text] == ords 

+

5print(u"All OK with gb2312") 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_zipmods_zip_encoded_hebrew_py.html b/reports/20210322_66173dc24d/htmlcov/tests_zipmods_zip_encoded_hebrew_py.html new file mode 100644 index 000000000..159673942 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_zipmods_zip_encoded_hebrew_py.html @@ -0,0 +1,71 @@ + + + + + + Coverage for tests/zipmods.zip/encoded_hebrew.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# coding: hebrew 

+

2text = u"שלום, עולם" 

+

3ords = [1513, 1500, 1493, 1501, 44, 32, 1506, 1493, 1500, 1501] 

+

4assert [ord(c) for c in text] == ords 

+

5print(u"All OK with hebrew") 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_zipmods_zip_encoded_shift_jis_py.html b/reports/20210322_66173dc24d/htmlcov/tests_zipmods_zip_encoded_shift_jis_py.html new file mode 100644 index 000000000..f54f71f00 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_zipmods_zip_encoded_shift_jis_py.html @@ -0,0 +1,71 @@ + + + + + + Coverage for tests/zipmods.zip/encoded_shift_jis.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# coding: shift_jis 

+

2text = u"こんにちは世界" 

+

3ords = [12371, 12435, 12395, 12385, 12399, 19990, 30028] 

+

4assert [ord(c) for c in text] == ords 

+

5print(u"All OK with shift_jis") 

+
+ + + diff --git a/reports/20210322_66173dc24d/htmlcov/tests_zipmods_zip_encoded_utf8_py.html b/reports/20210322_66173dc24d/htmlcov/tests_zipmods_zip_encoded_utf8_py.html new file mode 100644 index 000000000..43754d969 --- /dev/null +++ b/reports/20210322_66173dc24d/htmlcov/tests_zipmods_zip_encoded_utf8_py.html @@ -0,0 +1,71 @@ + + + + + + Coverage for tests/zipmods.zip/encoded_utf8.py: 100.000% + + + + + + + + + + +
+ Hide keyboard shortcuts +

Hot-keys on this page

+
+

+ r + m + x + p   toggle line displays +

+

+ j + k   next/prev highlighted chunk +

+

+ 0   (zero) top of page +

+

+ 1   (one) first highlighted chunk +

+
+
+
+

1# coding: utf8 

+

2text = u"ⓗⓔⓛⓛⓞ, ⓦⓞⓡⓛⓓ" 

+

3ords = [9431, 9428, 9435, 9435, 9438, 44, 32, 9446, 9438, 9441, 9435, 9427] 

+

4assert [ord(c) for c in text] == ords 

+

5print(u"All OK with utf8") 

+
+ + +