From 015cf8395db28ca202df029d030717ad498e7a79 Mon Sep 17 00:00:00 2001 From: Andrey Bienkowski Date: Tue, 8 Feb 2022 18:15:36 +0300 Subject: [PATCH] pipx run black --line-length=100 . --- conftest.py | 6 +- docs/conf.py | 4 +- scripts/glibc_check.py | 4 +- scripts/py36-blake2.py | 4 +- setup.py | 15 +- setup_compress.py | 4 +- setup_crypto.py | 4 +- setup_docs.py | 51 +- src/borg/archive.py | 189 ++------ src/borg/archiver.py | 412 +++++----------- src/borg/cache.py | 142 ++---- src/borg/constants.py | 8 +- src/borg/crypto/file_integrity.py | 12 +- src/borg/crypto/key.py | 32 +- src/borg/crypto/keymanager.py | 23 +- src/borg/crypto/nonces.py | 12 +- src/borg/fuse.py | 62 +-- src/borg/fuse_impl.py | 4 +- src/borg/helpers/checks.py | 5 +- src/borg/helpers/fs.py | 15 +- src/borg/helpers/manifest.py | 8 +- src/borg/helpers/msgpack.py | 10 +- src/borg/helpers/parseformat.py | 41 +- src/borg/helpers/process.py | 35 +- src/borg/helpers/progress.py | 16 +- src/borg/helpers/time.py | 4 +- src/borg/logger.py | 12 +- src/borg/lrucache.py | 3 +- src/borg/nanorst.py | 10 +- src/borg/patterns.py | 20 +- src/borg/platform/base.py | 4 +- src/borg/remote.py | 115 ++--- src/borg/repository.py | 151 ++---- src/borg/selftest.py | 4 +- src/borg/testsuite/__init__.py | 13 +- src/borg/testsuite/archive.py | 20 +- src/borg/testsuite/archiver.py | 677 +++++++-------------------- src/borg/testsuite/cache.py | 12 +- src/borg/testsuite/chunker.py | 62 +-- src/borg/testsuite/chunker_slow.py | 9 +- src/borg/testsuite/compress.py | 16 +- src/borg/testsuite/crypto.py | 64 +-- src/borg/testsuite/file_integrity.py | 34 +- src/borg/testsuite/hashindex.py | 8 +- src/borg/testsuite/helpers.py | 61 +-- src/borg/testsuite/key.py | 16 +- src/borg/testsuite/locking.py | 10 +- src/borg/testsuite/nanorst.py | 9 +- src/borg/testsuite/nonces.py | 12 +- src/borg/testsuite/patterns.py | 9 +- src/borg/testsuite/platform.py | 16 +- src/borg/testsuite/remote.py | 4 +- src/borg/testsuite/repository.py | 85 +--- src/borg/testsuite/xattr.py | 8 +- src/borg/upgrader.py | 31 +- src/borg/xattr.py | 12 +- 56 files changed, 654 insertions(+), 1975 deletions(-) diff --git a/conftest.py b/conftest.py index c90788fbb81..f83c89e7277 100644 --- a/conftest.py +++ b/conftest.py @@ -37,11 +37,7 @@ def clean_env(tmpdir_factory, monkeypatch): monkeypatch.setenv("XDG_CONFIG_HOME", str(tmpdir_factory.mktemp("xdg-config-home"))) monkeypatch.setenv("XDG_CACHE_HOME", str(tmpdir_factory.mktemp("xdg-cache-home"))) # also avoid to use anything from the outside environment: - keys = [ - key - for key in os.environ - if key.startswith("BORG_") and key not in ("BORG_FUSE_IMPL",) - ] + keys = [key for key in os.environ if key.startswith("BORG_") and key not in ("BORG_FUSE_IMPL",)] for key in keys: monkeypatch.delenv(key, raising=False) diff --git a/docs/conf.py b/docs/conf.py index 5225c5e9169..fd9f2be00c7 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -41,9 +41,7 @@ # General information about the project. project = "Borg - Deduplicating Archiver" -copyright = ( - "2010-2014 Jonas Borgström, 2015-2022 The Borg Collective (see AUTHORS file)" -) +copyright = "2010-2014 Jonas Borgström, 2015-2022 The Borg Collective (see AUTHORS file)" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/scripts/glibc_check.py b/scripts/glibc_check.py index 8d479a25b70..346cc166975 100755 --- a/scripts/glibc_check.py +++ b/scripts/glibc_check.py @@ -36,9 +36,7 @@ def main(): objdump % filename, shell=True, stderr=subprocess.STDOUT ) output = output.decode() - versions = set( - parse_version(match.group(1)) for match in glibc_re.finditer(output) - ) + versions = set(parse_version(match.group(1)) for match in glibc_re.finditer(output)) requires_glibc = max(versions) overall_versions.add(requires_glibc) if verbose: diff --git a/scripts/py36-blake2.py b/scripts/py36-blake2.py index a82e8817601..e3253221185 100644 --- a/scripts/py36-blake2.py +++ b/scripts/py36-blake2.py @@ -20,9 +20,7 @@ def test_b2(b2_input, b2_output): test_b2( - bytes.fromhex( - "037fb9b75b20d623f1d5a568050fccde4a1b7c5f5047432925e941a17c7a2d0d7061796c6f6164" - ), + bytes.fromhex("037fb9b75b20d623f1d5a568050fccde4a1b7c5f5047432925e941a17c7a2d0d7061796c6f6164"), bytes.fromhex("a22d4fc81bb61c3846c334a09eaf28d22dd7df08c9a7a41e713ef28d80eebd45"), ) diff --git a/setup.py b/setup.py index 3b97a49df5d..73d1ef861b9 100644 --- a/setup.py +++ b/setup.py @@ -205,9 +205,7 @@ def members_appended(*ds): checksums_ext_kwargs = members_appended( dict(sources=[checksums_source]), - setup_checksums.xxhash_ext_kwargs( - pc, prefer_system_libxxhash, system_prefix_libxxhash - ), + setup_checksums.xxhash_ext_kwargs(pc, prefer_system_libxxhash, system_prefix_libxxhash), ) ext_modules += [ @@ -220,12 +218,8 @@ def members_appended(*ds): ] posix_ext = Extension("borg.platform.posix", [platform_posix_source]) - linux_ext = Extension( - "borg.platform.linux", [platform_linux_source], libraries=["acl"] - ) - syncfilerange_ext = Extension( - "borg.platform.syncfilerange", [platform_syncfilerange_source] - ) + linux_ext = Extension("borg.platform.linux", [platform_linux_source], libraries=["acl"]) + syncfilerange_ext = Extension("borg.platform.syncfilerange", [platform_syncfilerange_source]) freebsd_ext = Extension("borg.platform.freebsd", [platform_freebsd_source]) darwin_ext = Extension("borg.platform.darwin", [platform_darwin_source]) windows_ext = Extension("borg.platform.windows", [platform_windows_source]) @@ -246,8 +240,7 @@ def members_appended(*ds): # this breaks chained commands like 'clean sdist' cythonizing = ( len(sys.argv) > 1 - and sys.argv[1] - not in (("clean", "clean2", "egg_info", "--help-commands", "--version")) + and sys.argv[1] not in (("clean", "clean2", "egg_info", "--help-commands", "--version")) and "--help" not in sys.argv[1:] ) diff --git a/setup_compress.py b/setup_compress.py index e4a6b378b65..2145ff9d488 100644 --- a/setup_compress.py +++ b/setup_compress.py @@ -72,9 +72,7 @@ def multi_join(paths, *path_segments): ] -def zstd_ext_kwargs( - pc, prefer_system, system_prefix, multithreaded=False, legacy=False -): +def zstd_ext_kwargs(pc, prefer_system, system_prefix, multithreaded=False, legacy=False): if prefer_system: if system_prefix: print("Detected and preferring libzstd [via BORG_LIBZSTD_PREFIX]") diff --git a/setup_crypto.py b/setup_crypto.py index 9b9463b0686..859d07aaf22 100644 --- a/setup_crypto.py +++ b/setup_crypto.py @@ -31,6 +31,4 @@ def crypto_ext_kwargs(pc, system_prefix): print("Detected OpenSSL [via pkg-config]") return pc.parse("libcrypto") - raise Exception( - "Could not find OpenSSL lib/headers, please set BORG_OPENSSL_PREFIX" - ) + raise Exception("Could not find OpenSSL lib/headers, please set BORG_OPENSSL_PREFIX") diff --git a/setup_docs.py b/setup_docs.py index e8c2451827a..83684b56aff 100644 --- a/setup_docs.py +++ b/setup_docs.py @@ -19,13 +19,11 @@ def long_desc_from_readme(): assert start >= 0 long_description = "\n" + long_description[start:] # remove badges - long_description = re.compile( - r"^\.\. start-badges.*^\.\. end-badges", re.M | re.S - ).sub("", long_description) - # remove unknown directives - long_description = re.compile(r"^\.\. highlight:: \w+$", re.M).sub( + long_description = re.compile(r"^\.\. start-badges.*^\.\. end-badges", re.M | re.S).sub( "", long_description ) + # remove unknown directives + long_description = re.compile(r"^\.\. highlight:: \w+$", re.M).sub("", long_description) return long_description @@ -38,8 +36,7 @@ def format_metavar(option): return option.metavar else: raise ValueError( - "Can't format metavar %s, unknown nargs %s!" - % (option.metavar, option.nargs) + "Can't format metavar %s, unknown nargs %s!" % (option.metavar, option.nargs) ) @@ -77,9 +74,7 @@ def generate_level(self, prefix, parser, Archiver, extra_choices=None): is_subcommand = False choices = {} for action in parser._actions: - if action.choices is not None and "SubParsersAction" in str( - action.__class__ - ): + if action.choices is not None and "SubParsersAction" in str(action.__class__): is_subcommand = True for cmd, parser in action.choices.items(): choices[prefix + cmd] = parser @@ -165,9 +160,7 @@ def html_write(s): for group in parser._action_groups: if group.title == "Common options": # (no of columns used, columns, ...) - rows.append( - (1, ".. class:: borg-common-opt-ref\n\n:ref:`common_options`") - ) + rows.append((1, ".. class:: borg-common-opt-ref\n\n:ref:`common_options`")) else: if not group._group_actions: continue @@ -177,21 +170,15 @@ def html_write(s): rows.append((1, group_header)) if is_positional_group(group): for option in group._group_actions: - rows.append( - (3, "", "``%s``" % option.metavar, option.help or "") - ) + rows.append((3, "", "``%s``" % option.metavar, option.help or "")) else: for option in group._group_actions: if option.metavar: option_fmt = "``%s " + option.metavar + "``" else: option_fmt = "``%s``" - option_str = ", ".join( - option_fmt % s for s in option.option_strings - ) - option_desc = textwrap.dedent( - (option.help or "") % option.__dict__ - ) + option_str = ", ".join(option_fmt % s for s in option.option_strings) + option_desc = textwrap.dedent((option.help or "") % option.__dict__) rows.append((3, "", option_str, option_desc)) fp.write(".. only:: html\n\n") @@ -393,9 +380,7 @@ def generate_level(self, prefix, parser, Archiver, extra_choices=None): is_subcommand = False choices = {} for action in parser._actions: - if action.choices is not None and "SubParsersAction" in str( - action.__class__ - ): + if action.choices is not None and "SubParsersAction" in str(action.__class__): is_subcommand = True for cmd, parser in action.choices.items(): choices[prefix + cmd] = parser @@ -428,9 +413,7 @@ def generate_level(self, prefix, parser, Archiver, extra_choices=None): ][0] for subcommand in subparsers.choices: write("| borg", "[common options]", command, subcommand, "...") - self.see_also.setdefault(command, []).append( - "%s-%s" % (command, subcommand) - ) + self.see_also.setdefault(command, []).append("%s-%s" % (command, subcommand)) else: if command == "borgfs": write(command, end="") @@ -499,9 +482,7 @@ def build_intro_page(self): with open("docs/man_intro.rst") as fd: man_intro = fd.read() - self.write_man_header( - write, man_title, "deduplicating and encrypting backup tool" - ) + self.write_man_header(write, man_title, "deduplicating and encrypting backup tool") self.gen_man_page(man_title, doc.getvalue() + man_intro) def new_doc(self): @@ -551,9 +532,7 @@ def write_examples(self, write, command): examples = examples.replace( "``docs/misc/prune-example.txt``:", "``docs/misc/prune-example.txt``." ) - examples = examples.replace( - ".. highlight:: none\n", "" - ) # we don't support highlight + examples = examples.replace(".. highlight:: none\n", "") # we don't support highlight examples = re.sub( "^(~+)$", lambda matches: "+" * len(matches.group(0)), @@ -602,9 +581,7 @@ def write_options(self, write, parser): for group in parser._action_groups: if group.title == "Common options" or not group._group_actions: continue - title = ( - "arguments" if group.title == "positional arguments" else group.title - ) + title = "arguments" if group.title == "positional arguments" else group.title self.write_heading(write, title, "+") self.write_options_group(write, group) diff --git a/src/borg/archive.py b/src/borg/archive.py index e52c46906c8..ad1ee4a0caf 100644 --- a/src/borg/archive.py +++ b/src/borg/archive.py @@ -251,9 +251,7 @@ def stat_update_check(st_old, st_curr): @contextmanager def OsOpen(*, flags, path=None, parent_fd=None, name=None, noatime=False, op="open"): with backup_io(op): - fd = os_open( - path=path, parent_fd=parent_fd, name=name, flags=flags, noatime=noatime - ) + fd = os_open(path=path, parent_fd=parent_fd, name=name, flags=flags, noatime=noatime) try: yield fd finally: @@ -333,9 +331,7 @@ def _preload(chunks): yield item def fetch_many(self, ids, is_preloaded=False): - for id_, data in zip( - ids, self.repository.get_many(ids, is_preloaded=is_preloaded) - ): + for id_, data in zip(ids, self.repository.get_many(ids, is_preloaded=is_preloaded)): yield self.key.decrypt(id_, data) @@ -387,9 +383,7 @@ def __init__(self, cache, key, stats, chunker_params=ITEMS_CHUNKER_PARAMS): self.stats = stats def write_chunk(self, chunk): - id_, _, _ = self.cache.add_chunk( - self.key.id_hash(chunk), chunk, self.stats, wait=False - ) + id_, _, _ = self.cache.add_chunk(self.key.id_hash(chunk), chunk, self.stats, wait=False) self.cache.repository.async_response(wait=False) return id_ @@ -458,7 +452,9 @@ def __init__( self.iec = iec self.show_progress = progress self.name = name # overwritten later with name from archive metadata - self.name_in_manifest = name # can differ from .name later (if borg check fixed duplicate archive names) + self.name_in_manifest = ( + name # can differ from .name later (if borg check fixed duplicate archive names) + ) self.comment = None self.checkpoint_interval = checkpoint_interval self.numeric_ids = numeric_ids @@ -601,14 +597,10 @@ def item_filter(self, item, filter=None): return False return filter(item) if filter else True - def iter_items( - self, filter=None, partial_extract=False, preload=False, hardlink_masters=None - ): + def iter_items(self, filter=None, partial_extract=False, preload=False, hardlink_masters=None): # note: when calling this with preload=True, later fetch_many() must be called with # is_preloaded=True or the RemoteRepository code will leak memory! - assert ( - not (filter and partial_extract and preload) or hardlink_masters is not None - ) + assert not (filter and partial_extract and preload) or hardlink_masters is not None for item in self.pipeline.unpack_many( self.metadata.items, partial_extract=partial_extract, @@ -676,9 +668,7 @@ def save( ) metadata.update(additional_metadata or {}) metadata = ArchiveItem(metadata) - data = self.key.pack_and_authenticate_metadata( - metadata.as_dict(), context=b"archive" - ) + data = self.key.pack_and_authenticate_metadata(metadata.as_dict(), context=b"archive") self.id = self.key.id_hash(data) try: self.cache.add_chunk(self.id, data, self.stats) @@ -754,9 +744,7 @@ def extract_helper( hardlink_set = False # Hard link? if "source" in item: - source = os.path.join( - dest, *item.source.split(os.sep)[stripped_components:] - ) + source = os.path.join(dest, *item.source.split(os.sep)[stripped_components:]) chunks, link_target = hardlink_masters.get(item.source, (None, source)) if link_target and has_link: # Hard link was extracted previously, just link @@ -870,9 +858,7 @@ def make_parent(path): ids = [c.id for c in item.chunks] for data in self.pipeline.fetch_many(ids, is_preloaded=True): if pi: - pi.show( - increase=len(data), info=[remove_surrogates(item.path)] - ) + pi.show(increase=len(data), info=[remove_surrogates(item.path)]) with backup_io("write"): if sparse and zeros.startswith(data): # all-zero chunk: create a hole in a sparse file @@ -983,9 +969,7 @@ def restore_attrs(self, path, item, symlink=False, fd=None): if fd: os.utime(fd, None, ns=(atime, birthtime)) else: - os.utime( - path, None, ns=(atime, birthtime), follow_symlinks=False - ) + os.utime(path, None, ns=(atime, birthtime), follow_symlinks=False) except OSError: # some systems don't support calling utime on a symlink pass @@ -1002,9 +986,7 @@ def restore_attrs(self, path, item, symlink=False, fd=None): if not self.noxattrs: # chown removes Linux capabilities, so set the extended attributes at the end, after chown, since they include # the Linux capabilities in the "security.capability" attribute. - warning = xattr.set_all( - fd or path, item.get("xattrs", {}), follow_symlinks=False - ) + warning = xattr.set_all(fd or path, item.get("xattrs", {}), follow_symlinks=False) if warning: set_ec(EXIT_WARNING) # bsdflags include the immutable flag and need to be set last: @@ -1105,15 +1087,11 @@ def chunk_decref(id, stats, part=False): # so there is nothing pending when we return and our caller wants to commit. pass if error: - logger.warning( - "forced deletion succeeded, but the deleted archive was corrupted." - ) + logger.warning("forced deletion succeeded, but the deleted archive was corrupted.") logger.warning("borg check --repair is required to free all space.") @staticmethod - def compare_archives_iter( - archive1, archive2, matcher=None, can_compare_chunk_ids=False - ): + def compare_archives_iter(archive1, archive2, matcher=None, can_compare_chunk_ids=False): """ Yields tuples with a path and an ItemDiff instance describing changes/indicating equality. @@ -1207,9 +1185,7 @@ def defer_if_necessary(item1, item2): class MetadataCollector: - def __init__( - self, *, noatime, noctime, nobirthtime, numeric_ids, noflags, noacls, noxattrs - ): + def __init__(self, *, noatime, noctime, nobirthtime, numeric_ids, noflags, noacls, noxattrs): self.noatime = noatime self.noctime = noctime self.numeric_ids = numeric_ids @@ -1246,11 +1222,7 @@ def stat_ext_attrs(self, st, path, fd=None): attrs = {} with backup_io("extended stat"): flags = 0 if self.noflags else get_flags(path, st, fd=fd) - xattrs = ( - {} - if self.noxattrs - else xattr.get_all(fd or path, follow_symlinks=False) - ) + xattrs = {} if self.noxattrs else xattr.get_all(fd or path, follow_symlinks=False) if not self.noacls: acl_get(path, attrs, st, self.numeric_ids, fd=fd) if xattrs: @@ -1294,9 +1266,7 @@ def cached_hash(chunk, id_hash): class ChunksProcessor: # Processes an iterator of chunks for an Item - def __init__( - self, *, key, cache, add_item, write_checkpoint, checkpoint_interval, rechunkify - ): + def __init__(self, *, key, cache, add_item, write_checkpoint, checkpoint_interval, rechunkify): self.key = key self.cache = cache self.add_item = add_item @@ -1330,9 +1300,7 @@ def maybe_checkpoint(self, item, from_chunk, part_number, forced=False): ): if sig_int_triggered: logger.info("checkpoint requested: starting checkpoint creation...") - from_chunk, part_number = self.write_part_file( - item, from_chunk, part_number - ) + from_chunk, part_number = self.write_part_file(item, from_chunk, part_number) self.last_checkpoint = time.monotonic() if sig_int_triggered: sig_int.action_completed() @@ -1498,9 +1466,7 @@ def process_dev(self, *, path, parent_fd, name, st, dev_type): with backup_io("stat"): st = stat_update_check( st, - os_stat( - path=path, parent_fd=parent_fd, name=name, follow_symlinks=False - ), + os_stat(path=path, parent_fd=parent_fd, name=name, follow_symlinks=False), ) item.rdev = st.st_rdev item.update(self.metadata_collector.stat_attrs(st, path)) @@ -1520,9 +1486,7 @@ def process_symlink(self, *, path, parent_fd, name, st): with backup_io("readlink"): source = os.readlink(fname, dir_fd=parent_fd) item.source = source - item.update( - self.metadata_collector.stat_attrs(st, path) - ) # can't use FD here? + item.update(self.metadata_collector.stat_attrs(st, path)) # can't use FD here? return status def process_pipe(self, *, path, cache, fd, mode, user, group): @@ -1566,9 +1530,7 @@ def process_file(self, *, path, parent_fd, name, st, cache, flags=flags_normal): hardlinked, hardlink_master, ): # no status yet - with OsOpen( - path=path, parent_fd=parent_fd, name=name, flags=flags, noatime=True - ) as fd: + with OsOpen(path=path, parent_fd=parent_fd, name=name, flags=flags, noatime=True) as fd: with backup_io("fstat"): st = stat_update_check(st, os.fstat(fd)) item.update(self.metadata_collector.stat_simple_attrs(st)) @@ -1582,9 +1544,7 @@ def process_file(self, *, path, parent_fd, name, st, cache, flags=flags_normal): if not is_special_file: hashed_path = safe_encode(os.path.join(self.cwd, path)) path_hash = self.key.id_hash(hashed_path) - known, ids = cache.file_known_and_unchanged( - hashed_path, path_hash, st - ) + known, ids = cache.file_known_and_unchanged(hashed_path, path_hash, st) else: # in --read-special mode, we may be called for special files. # there should be no information in the cache about special files processed in @@ -1599,14 +1559,10 @@ def process_file(self, *, path, parent_fd, name, st, cache, flags=flags_normal): status = "M" # cache said it is unmodified, but we lost a chunk: process file like modified break else: - chunks = [ - cache.chunk_incref(id_, self.stats) for id_ in ids - ] + chunks = [cache.chunk_incref(id_, self.stats) for id_ in ids] status = "U" # regular file, unchanged else: - status = ( - "M" if known else "A" - ) # regular file, modified or added + status = "M" if known else "A" # regular file, modified or added self.print_file_status(status, path) status = None # we already printed the status item.hardlink_master = hardlinked @@ -1631,8 +1587,7 @@ def process_file(self, *, path, parent_fd, name, st, cache, flags=flags_normal): # - fifos change naturally, because they are fed from the other side. no problem. # - blk/chr devices don't change ctime anyway. changed_while_backup = ( - not is_special_file - and st.st_ctime_ns != st2.st_ctime_ns + not is_special_file and st.st_ctime_ns != st2.st_ctime_ns ) if changed_while_backup: status = "C" # regular file changed while we backed it up, might be inconsistent/corrupt! @@ -1857,9 +1812,7 @@ def check( self.error_found = True del self.chunks[Manifest.MANIFEST_ID] self.manifest = self.rebuild_manifest() - self.rebuild_refcounts( - archive=archive, first=first, last=last, sort_by=sort_by, glob=glob - ) + self.rebuild_refcounts(archive=archive, first=first, last=last, sort_by=sort_by, glob=glob) self.orphan_chunks_check() self.finish(save_space=save_space) if self.error_found: @@ -1944,9 +1897,7 @@ def verify_data(self): defect_chunks.append(chunk_id) pi.finish() if chunks_count_index != chunks_count_segments: - logger.error( - "Repo/Chunks index object count vs. segment files object count mismatch." - ) + logger.error("Repo/Chunks index object count vs. segment files object count mismatch.") logger.error( "Repo/Chunks index: %d objects != segment files: %d objects", chunks_count_index, @@ -1971,11 +1922,7 @@ def verify_data(self): # from the underlying media. try: encrypted_data = self.repository.get(defect_chunk) - _chunk_id = ( - None - if defect_chunk == Manifest.MANIFEST_ID - else defect_chunk - ) + _chunk_id = None if defect_chunk == Manifest.MANIFEST_ID else defect_chunk self.key.decrypt(_chunk_id, encrypted_data) except IntegrityErrorBase: # failed twice -> get rid of this chunk @@ -2021,9 +1968,7 @@ def valid_archive(obj): # lost manifest on a older borg version than the most recent one that was ever used # within this repository (assuming that newer borg versions support more item keys). manifest = Manifest(self.key, self.repository) - archive_keys_serialized = [ - msgpack.packb(name.encode()) for name in ARCHIVE_KEYS - ] + archive_keys_serialized = [msgpack.packb(name.encode()) for name in ARCHIVE_KEYS] pi = ProgressIndicatorPercent( total=len(self.chunks), msg="Rebuilding manifest %6.2f%%", @@ -2059,9 +2004,7 @@ def valid_archive(obj): if new_name not in manifest.archives: break i += 1 - logger.warning( - "Duplicate archive name %s, storing as %s", name, new_name - ) + logger.warning("Duplicate archive name %s, storing as %s", name, new_name) name = new_name manifest.archives[name] = (chunk_id, archive.time) pi.finish() @@ -2114,9 +2057,7 @@ def replacement_chunk(size): chunks_replaced = False has_chunks_healthy = "chunks_healthy" in item chunks_current = item.chunks - chunks_healthy = ( - item.chunks_healthy if has_chunks_healthy else chunks_current - ) + chunks_healthy = item.chunks_healthy if has_chunks_healthy else chunks_current if has_chunks_healthy and len(chunks_current) != len(chunks_healthy): # should never happen, but there was issue #3218. logger.warning( @@ -2247,10 +2188,7 @@ def report(msg, chunk_id, chunk_no): def list_keys_safe(keys): return ", ".join( - ( - k.decode(errors="replace") if isinstance(k, bytes) else str(k) - for k in keys - ) + (k.decode(errors="replace") if isinstance(k, bytes) else str(k) for k in keys) ) def valid_item(obj): @@ -2312,9 +2250,7 @@ def valid_item(obj): sort_by=sort_by, glob=glob, first=first, last=last ) if glob and not archive_infos: - logger.warning( - "--glob-archives %s does not match any archives", glob - ) + logger.warning("--glob-archives %s does not match any archives", glob) if first and len(archive_infos) < first: logger.warning( "--first %d archives: only found %d archives", @@ -2348,11 +2284,7 @@ def valid_item(obj): with cache_if_remote(self.repository) as repository: for i, info in enumerate(archive_infos): pi.show(i) - logger.info( - "Analyzing archive {} ({}/{})".format( - info.name, i + 1, num_archives - ) - ) + logger.info("Analyzing archive {} ({}/{})".format(info.name, i + 1, num_archives)) archive_id = info.id if archive_id not in self.chunks: logger.error("Archive metadata block is missing!") @@ -2385,9 +2317,7 @@ def valid_item(obj): def orphan_chunks_check(self): if self.check_all: - unused = { - id_ for id_, entry in self.chunks.iteritems() if entry.refcount == 0 - } + unused = {id_ for id_, entry in self.chunks.iteritems() if entry.refcount == 0} orphaned = unused - self.possibly_superseded if orphaned: logger.error("{} orphaned objects found!".format(len(orphaned))) @@ -2563,19 +2493,10 @@ def chunk_processor(self, target, chunk): if chunk_id in self.seen_chunks: return self.cache.chunk_incref(chunk_id, target.stats) overwrite = self.recompress - if ( - self.recompress - and not self.always_recompress - and chunk_id in self.cache.chunks - ): + if self.recompress and not self.always_recompress and chunk_id in self.cache.chunks: # Check if this chunk is already compressed the way we want it - old_chunk = self.key.decrypt( - None, self.repository.get(chunk_id), decompress=False - ) - if ( - Compressor.detect(old_chunk).name - == self.key.compressor.decide(data).name - ): + old_chunk = self.key.decrypt(None, self.repository.get(chunk_id), decompress=False) + if Compressor.detect(old_chunk).name == self.key.compressor.decide(data).name: # Stored chunk has the same compression we wanted overwrite = False chunk_entry = self.cache.add_chunk( @@ -2586,9 +2507,7 @@ def chunk_processor(self, target, chunk): return chunk_entry def iter_chunks(self, archive, target, chunks): - chunk_iterator = archive.pipeline.fetch_many( - [chunk_id for chunk_id, _, _ in chunks] - ) + chunk_iterator = archive.pipeline.fetch_many([chunk_id for chunk_id, _, _ in chunks]) if target.recreate_rechunkify: # The target.chunker will read the file contents through ChunkIteratorFileWrapper chunk-by-chunk # (does not load the entire file into memory) @@ -2635,9 +2554,7 @@ def save(self, archive, target, comment=None, replace_original=True): if self.stats: target.start = _start target.end = datetime.utcnow() - log_multi( - DASHES, str(target), DASHES, str(target.stats), str(self.cache), DASHES - ) + log_multi(DASHES, str(target), DASHES, str(target.stats), str(self.cache), DASHES) def matcher_add_tagged_dirs(self, archive): """Add excludes to the matcher created by exclude_cache and exclude_if_present.""" @@ -2665,11 +2582,7 @@ def exclude(dir, tag_item): for item in archive.iter_items( filter=lambda item: os.path.basename(item.path) == CACHE_TAG_NAME ): - if ( - stat.S_ISREG(item.mode) - and "chunks" not in item - and "source" in item - ): + if stat.S_ISREG(item.mode) and "chunks" not in item and "source" in item: # this is a hardlink slave, referring back to its hardlink master (via item.source) cachedir_masters[ item.source @@ -2684,14 +2597,8 @@ def exclude(dir, tag_item): dir, tag_file = os.path.split(item.path) if tag_file in self.exclude_if_present: exclude(dir, item) - elif ( - self.exclude_caches - and tag_file == CACHE_TAG_NAME - and stat.S_ISREG(item.mode) - ): - content_item = ( - item if "chunks" in item else cachedir_masters[item.source] - ) + elif self.exclude_caches and tag_file == CACHE_TAG_NAME and stat.S_ISREG(item.mode): + content_item = item if "chunks" in item else cachedir_masters[item.source] file = open_item(archive, content_item) if file.read(len(CACHE_TAG_CONTENTS)) == CACHE_TAG_CONTENTS: exclude(dir, item) @@ -2704,9 +2611,7 @@ def create_target(self, archive, target_name=None): target = self.create_target_archive(target_name) # If the archives use the same chunker params, then don't rechunkify source_chunker_params = tuple(archive.metadata.get("chunker_params", [])) - if len(source_chunker_params) == 4 and isinstance( - source_chunker_params[0], int - ): + if len(source_chunker_params) == 4 and isinstance(source_chunker_params[0], int): # this is a borg < 1.2 chunker_params tuple, no chunker algo specified, but we only had buzhash: source_chunker_params = (CH_BUZHASH,) + source_chunker_params target.recreate_rechunkify = ( @@ -2744,6 +2649,4 @@ def create_target_archive(self, name): return target def open_archive(self, name, **kwargs): - return Archive( - self.repository, self.key, self.manifest, name, cache=self.cache, **kwargs - ) + return Archive(self.repository, self.key, self.manifest, name, cache=self.cache, **kwargs) diff --git a/src/borg/archiver.py b/src/borg/archiver.py index 66c4a3e69eb..531b8a9583f 100644 --- a/src/borg/archiver.py +++ b/src/borg/archiver.py @@ -154,13 +154,9 @@ ), "EXIT_ERROR is not 2, as expected - fix assert AND exception handler right above this line." -STATS_HEADER = ( - " Original size Compressed size Deduplicated size" -) +STATS_HEADER = " Original size Compressed size Deduplicated size" -PURE_PYTHON_MSGPACK_WARNING = ( - "Using a pure-python msgpack! This will result in lower performance." -) +PURE_PYTHON_MSGPACK_WARNING = "Using a pure-python msgpack! This will result in lower performance." def argument(args, str_or_bool): @@ -199,9 +195,7 @@ def with_repository( if not create and (manifest or cache): if compatibility is None: - raise AssertionError( - "with_repository decorator used without compatibility argument" - ) + raise AssertionError("with_repository decorator used without compatibility argument") if type(compatibility) is not tuple: raise AssertionError( "with_repository decorator compatibility argument must be of type tuple" @@ -258,9 +252,7 @@ def wrapper(self, args, **kwargs): ) with repository: if manifest or cache: - kwargs["manifest"], kwargs["key"] = Manifest.load( - repository, compatibility - ) + kwargs["manifest"], kwargs["key"] = Manifest.load(repository, compatibility) if "compression" in args: kwargs["key"].compressor = args.compression.compressor if secure: @@ -272,15 +264,11 @@ def wrapper(self, args, **kwargs): kwargs["manifest"], progress=getattr(args, "progress", False), lock_wait=self.lock_wait, - cache_mode=getattr( - args, "files_cache_mode", DEFAULT_FILES_CACHE_MODE - ), + cache_mode=getattr(args, "files_cache_mode", DEFAULT_FILES_CACHE_MODE), consider_part_files=getattr(args, "consider_part_files", False), iec=getattr(args, "iec", False), ) as cache_: - return method( - self, args, repository=repository, cache=cache_, **kwargs - ) + return method(self, args, repository=repository, cache=cache_, **kwargs) else: return method(self, args, repository=repository, **kwargs) @@ -298,8 +286,7 @@ def wrapper(self, args, repository, key, manifest, **kwargs): manifest, args.location.archive, numeric_ids=getattr(args, "numeric_ids", False), - noflags=getattr(args, "nobsdflags", False) - or getattr(args, "noflags", False), + noflags=getattr(args, "nobsdflags", False) or getattr(args, "noflags", False), noacls=getattr(args, "noacls", False), noxattrs=getattr(args, "noxattrs", False), cache=kwargs.get("cache"), @@ -308,13 +295,7 @@ def wrapper(self, args, repository, key, manifest, **kwargs): iec=args.iec, ) return method( - self, - args, - repository=repository, - manifest=manifest, - key=key, - archive=archive, - **kwargs + self, args, repository=repository, manifest=manifest, key=key, archive=archive, **kwargs ) return wrapper @@ -489,9 +470,7 @@ def do_check(self, args, repository): # we can't build a fresh repo index in memory to verify the on-disk index against it. # thus, we should not do an archives check based on a unknown-quality on-disk repo index. # also, there is no max_duration support in the archives check code anyway. - self.print_error( - "--repository-only is required for --max-duration support." - ) + self.print_error("--repository-only is required for --max-duration support.") return EXIT_ERROR if not args.archives_only: if not repository.check( @@ -543,9 +522,7 @@ def do_key_export(self, args, repository): else: manager.export(args.path) except IsADirectoryError: - self.print_error( - "'{}' must be a file, not a directory".format(args.path) - ) + self.print_error("'{}' must be a file, not a directory".format(args.path)) return EXIT_ERROR return EXIT_SUCCESS @@ -607,25 +584,19 @@ def measurement_run(repo, path): dt_create = t_end - t_start assert rc == 0 # now build files cache - rc1 = self.do_create( - self.parse_args(["create", compression, archive + "2", path]) - ) + rc1 = self.do_create(self.parse_args(["create", compression, archive + "2", path])) rc2 = self.do_delete(self.parse_args(["delete", archive + "2"])) assert rc1 == rc2 == 0 # measure a no-change update (archive1 is still present) t_start = time.monotonic() - rc1 = self.do_create( - self.parse_args(["create", compression, archive + "3", path]) - ) + rc1 = self.do_create(self.parse_args(["create", compression, archive + "3", path])) t_end = time.monotonic() dt_update = t_end - t_start rc2 = self.do_delete(self.parse_args(["delete", archive + "3"])) assert rc1 == rc2 == 0 # measure extraction (dry-run: without writing result to disk) t_start = time.monotonic() - rc = self.do_extract( - self.parse_args(["extract", "--dry-run", archive + "1"]) - ) + rc = self.do_extract(self.parse_args(["extract", "--dry-run", archive + "1"])) t_end = time.monotonic() dt_extract = t_end - t_start assert rc == 0 @@ -652,9 +623,7 @@ def test_files(path, count, size, random): for i in range(count): fname = os.path.join(path, "file_%d" % i) data = z_buff if not random else os.urandom(size) - with SyncFile( - fname, binary=True - ) as fd: # used for posix_fadvise's sake + with SyncFile(fname, binary=True) as fd: # used for posix_fadvise's sake fd.write(data) yield path finally: @@ -735,9 +704,7 @@ def test_files(path, count, size, random): return 0 - @with_repository( - fake="dry_run", exclusive=True, compatibility=(Manifest.Operation.WRITE,) - ) + @with_repository(fake="dry_run", exclusive=True, compatibility=(Manifest.Operation.WRITE,)) def do_create(self, args, repository, manifest=None, key=None): """Create new archive""" matcher = PatternMatcher(fallback=True) @@ -781,9 +748,7 @@ def create_inner(archive, cache, fso): ) rc = proc.wait() if rc != 0: - self.print_error( - "Command %r exited with status %d", args.paths[0], rc - ) + self.print_error("Command %r exited with status %d", args.paths[0], rc) return self.exit_code except BackupOSError as e: self.print_error("%s: %s", path, e) @@ -793,9 +758,7 @@ def create_inner(archive, cache, fso): self.print_file_status(status, path) elif args.paths_from_command or args.paths_from_stdin: paths_sep = ( - eval_escapes(args.paths_delimiter) - if args.paths_delimiter is not None - else "\n" + eval_escapes(args.paths_delimiter) if args.paths_delimiter is not None else "\n" ) if args.paths_from_command: try: @@ -830,16 +793,12 @@ def create_inner(archive, cache, fso): self.print_warning("%s: %s", path, e) status = "E" if status == "C": - self.print_warning( - "%s: file changed while we backed it up", path - ) + self.print_warning("%s: file changed while we backed it up", path) self.print_file_status(status, path) if args.paths_from_command: rc = proc.wait() if rc != 0: - self.print_error( - "Command %r exited with status %d", args.paths[0], rc - ) + self.print_error("Command %r exited with status %d", args.paths[0], rc) return self.exit_code else: for path in args.paths: @@ -1017,9 +976,7 @@ def create_inner(archive, cache, fso): create_inner(None, None, None) return self.exit_code - def _process_any( - self, *, path, parent_fd, name, st, fso, cache, read_special, dry_run - ): + def _process_any(self, *, path, parent_fd, name, st, fso, cache, read_special, dry_run): """ Call the right method on the given FilesystemObjectProcessor. """ @@ -1027,16 +984,12 @@ def _process_any( if dry_run: return "-" elif stat.S_ISREG(st.st_mode): - return fso.process_file( - path=path, parent_fd=parent_fd, name=name, st=st, cache=cache - ) + return fso.process_file(path=path, parent_fd=parent_fd, name=name, st=st, cache=cache) elif stat.S_ISDIR(st.st_mode): return fso.process_dir(path=path, parent_fd=parent_fd, name=name, st=st) elif stat.S_ISLNK(st.st_mode): if not read_special: - return fso.process_symlink( - path=path, parent_fd=parent_fd, name=name, st=st - ) + return fso.process_symlink(path=path, parent_fd=parent_fd, name=name, st=st) else: try: st_target = os_stat( @@ -1056,14 +1009,10 @@ def _process_any( flags=flags_special_follow, ) else: - return fso.process_symlink( - path=path, parent_fd=parent_fd, name=name, st=st - ) + return fso.process_symlink(path=path, parent_fd=parent_fd, name=name, st=st) elif stat.S_ISFIFO(st.st_mode): if not read_special: - return fso.process_fifo( - path=path, parent_fd=parent_fd, name=name, st=st - ) + return fso.process_fifo(path=path, parent_fd=parent_fd, name=name, st=st) else: return fso.process_file( path=path, @@ -1145,9 +1094,7 @@ def _rec_walk( recurse_excluded_dir = False if matcher.match(path): with backup_io("stat"): - st = os_stat( - path=path, parent_fd=parent_fd, name=name, follow_symlinks=False - ) + st = os_stat(path=path, parent_fd=parent_fd, name=name, follow_symlinks=False) else: self.print_file_status("x", path) # get out here as quickly as possible: @@ -1158,9 +1105,7 @@ def _rec_walk( return recurse_excluded_dir = True with backup_io("stat"): - st = os_stat( - path=path, parent_fd=parent_fd, name=name, follow_symlinks=False - ) + st = os_stat(path=path, parent_fd=parent_fd, name=name, follow_symlinks=False) if not stat.S_ISDIR(st.st_mode): return @@ -1205,18 +1150,14 @@ def _rec_walk( with backup_io("fstat"): st = stat_update_check(st, os.fstat(child_fd)) if recurse: - tag_names = dir_is_tagged( - path, exclude_caches, exclude_if_present - ) + tag_names = dir_is_tagged(path, exclude_caches, exclude_if_present) if tag_names: # if we are already recursing in an excluded dir, we do not need to do anything else than # returning (we do not need to archive or recurse into tagged directories), see #3991: if not recurse_excluded_dir: if keep_exclude_tags: if not dry_run: - fso.process_dir_with_fd( - path=path, fd=child_fd, st=st - ) + fso.process_dir_with_fd(path=path, fd=child_fd, st=st) for tag_name in tag_names: tag_path = os.path.join(path, tag_name) self._rec_walk( @@ -1330,19 +1271,14 @@ def peek_and_store_hardlink_masters(item, matched): # we do not extract the very first hardlink, so we need to remember the chunks # in hardlinks_master, so we can use them when we extract some 2nd+ hardlink item # that has no chunks list. - if ( - not has_link - or (partial_extract and not matched and hardlinkable(item.mode)) - ) and (item.get("hardlink_master", True) and "source" not in item): + if (not has_link or (partial_extract and not matched and hardlinkable(item.mode))) and ( + item.get("hardlink_master", True) and "source" not in item + ): hardlink_masters[item.get("path")] = (item.get("chunks"), None) - filter = self.build_filter( - matcher, peek_and_store_hardlink_masters, strip_components - ) + filter = self.build_filter(matcher, peek_and_store_hardlink_masters, strip_components) if progress: - pi = ProgressIndicatorPercent( - msg="%5.1f%% Extracting: %s", step=0.1, msgid="extract" - ) + pi = ProgressIndicatorPercent(msg="%5.1f%% Extracting: %s", step=0.1, msgid="extract") pi.output( "Calculating total archive size for the progress indicator (might take long for large archives)" ) @@ -1368,9 +1304,7 @@ def peek_and_store_hardlink_masters(item, matched): try: archive.extract_item(dir_item, stdout=stdout) except BackupOSError as e: - self.print_warning( - "%s: %s", remove_surrogates(dir_item.path), e - ) + self.print_warning("%s: %s", remove_surrogates(dir_item.path), e) if output_list: logging.getLogger("borg.output.list").info(remove_surrogates(item.path)) try: @@ -1471,18 +1405,14 @@ def peek_and_store_hardlink_masters(item, matched): ): hardlink_masters[item.get("path")] = (item.get("chunks"), None) - filter = self.build_filter( - matcher, peek_and_store_hardlink_masters, strip_components - ) + filter = self.build_filter(matcher, peek_and_store_hardlink_masters, strip_components) # The | (pipe) symbol instructs tarfile to use a streaming mode of operation # where it never seeks on the passed fileobj. tar = tarfile.open(fileobj=tarstream, mode="w|", format=tarfile.GNU_FORMAT) if progress: - pi = ProgressIndicatorPercent( - msg="%5.1f%% Processing: %s", step=0.1, msgid="extract" - ) + pi = ProgressIndicatorPercent(msg="%5.1f%% Processing: %s", step=0.1, msgid="extract") pi.output("Calculating size") extracted_size = sum( item.get_size(hardlink_masters) for item in archive.iter_items(filter) @@ -1547,9 +1477,7 @@ def item_to_tarinfo(item, original_path): if hardlink_masters is None: linkname = source else: - chunks, linkname = hardlink_masters.get( - item.source, (None, source) - ) + chunks, linkname = hardlink_masters.get(item.source, (None, source)) if linkname: # Master was already added to the archive, add a hardlink reference to it. tarinfo.type = tarfile.LNKTYPE @@ -1604,9 +1532,7 @@ def item_to_tarinfo(item, original_path): tarinfo, stream = item_to_tarinfo(item, orig_path) if tarinfo: if output_list: - logging.getLogger("borg.output.list").info( - remove_surrogates(orig_path) - ) + logging.getLogger("borg.output.list").info(remove_surrogates(orig_path)) tar.addfile(tarinfo, stream) if pi: @@ -1672,9 +1598,7 @@ def print_text_output(diff, path): return self.exit_code - @with_repository( - exclusive=True, cache=True, compatibility=(Manifest.Operation.CHECK,) - ) + @with_repository(exclusive=True, cache=True, compatibility=(Manifest.Operation.CHECK,)) @with_archive def do_rename(self, args, repository, manifest, key, cache, archive): """Rename an existing archive""" @@ -1715,9 +1639,7 @@ def _delete_archives(self, args, repository): archive_names = tuple(archives) else: args.consider_checkpoints = True - archive_names = tuple( - x.name for x in manifest.archives.list_considering(args) - ) + archive_names = tuple(x.name for x in manifest.archives.list_considering(args)) if not archive_names: return self.exit_code @@ -1730,22 +1652,16 @@ def _delete_archives(self, args, repository): except KeyError: self.exit_code = EXIT_WARNING logger.warning( - "Archive {} not found ({}/{}).".format( - archive_name, i, len(archive_names) - ) + "Archive {} not found ({}/{}).".format(archive_name, i, len(archive_names)) ) else: deleted = True if self.output_list: msg = ( - "Would delete: {} ({}/{})" - if dry_run - else "Deleted archive: {} ({}/{})" + "Would delete: {} ({}/{})" if dry_run else "Deleted archive: {} ({}/{})" ) logger_list.info( - msg.format( - format_archive(current_archive), i, len(archive_names) - ) + msg.format(format_archive(current_archive), i, len(archive_names)) ) if dry_run: logger.info("Finished dry-run.") @@ -1763,9 +1679,7 @@ def _delete_archives(self, args, repository): repository, key, manifest, progress=args.progress, lock_wait=self.lock_wait ) as cache: msg_delete = ( - "Would delete archive: {} ({}/{})" - if dry_run - else "Deleting archive: {} ({}/{})" + "Would delete archive: {} ({}/{})" if dry_run else "Deleting archive: {} ({}/{})" ) msg_not_found = "Archive {} not found ({}/{})." logger_list = logging.getLogger("borg.output.list") @@ -1774,15 +1688,11 @@ def _delete_archives(self, args, repository): try: archive_info = manifest.archives[archive_name] except KeyError: - logger.warning( - msg_not_found.format(archive_name, i, len(archive_names)) - ) + logger.warning(msg_not_found.format(archive_name, i, len(archive_names))) else: if self.output_list: logger_list.info( - msg_delete.format( - format_archive(archive_info), i, len(archive_names) - ) + msg_delete.format(format_archive(archive_info), i, len(archive_names)) ) if not dry_run: @@ -1794,9 +1704,7 @@ def _delete_archives(self, args, repository): cache=cache, consider_part_files=args.consider_part_files, ) - archive.delete( - stats, progress=args.progress, forced=args.forced - ) + archive.delete(stats, progress=args.progress, forced=args.forced) delete_count += 1 if delete_count > 0: # only write/commit if we actually changed something, see #6060. @@ -1826,9 +1734,7 @@ def _delete_repository(self, args, repository): ): # without --force, we let the user see the archives list and confirm. msg = [] try: - manifest, key = Manifest.load( - repository, Manifest.NO_OPERATION_CHECK - ) + manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) except NoManifestError: msg.append( "You requested to completely DELETE the repository *including* all archives it may " @@ -1871,8 +1777,7 @@ def _delete_repository(self, args, repository): else: logger.info("Would delete repository.") logger.info( - "Would %s security info." - % ("keep" if keep_security_info else "delete") + "Would %s security info." % ("keep" if keep_security_info else "delete") ) if not dry_run: Cache.destroy(repository) @@ -1889,17 +1794,14 @@ def do_mount(self, args): if llfuse is None: self.print_error( - "borg mount not available: no FUSE support, BORG_FUSE_IMPL=%s." - % BORG_FUSE_IMPL + "borg mount not available: no FUSE support, BORG_FUSE_IMPL=%s." % BORG_FUSE_IMPL ) return self.exit_code if not os.path.isdir(args.mountpoint) or not os.access( args.mountpoint, os.R_OK | os.W_OK | os.X_OK ): - self.print_error( - "%s: Mountpoint must be a writable directory" % args.mountpoint - ) + self.print_error("%s: Mountpoint must be a writable directory" % args.mountpoint) return self.exit_code return self._do_mount(args) @@ -2020,9 +1922,7 @@ def format_cmdline(cmdline): archive_names = (args.location.archive,) else: args.consider_checkpoints = True - archive_names = tuple( - x.name for x in manifest.archives.list_considering(args) - ) + archive_names = tuple(x.name for x in manifest.archives.list_considering(args)) output_data = [] @@ -2157,15 +2057,9 @@ def do_prune(self, args, repository, manifest, key): reverse=True, ) is_checkpoint = re.compile(r"(%s)\Z" % checkpoint_re).search - checkpoints = [ - arch for arch in archives_checkpoints if is_checkpoint(arch.name) - ] + checkpoints = [arch for arch in archives_checkpoints if is_checkpoint(arch.name)] # keep the latest checkpoint, if there is no later non-checkpoint archive - if ( - archives_checkpoints - and checkpoints - and archives_checkpoints[0] is checkpoints[0] - ): + if archives_checkpoints and checkpoints and archives_checkpoints[0] is checkpoints[0]: keep_checkpoints = checkpoints[:1] else: keep_checkpoints = [] @@ -2249,9 +2143,7 @@ def do_prune(self, args, repository, manifest, key): ) return self.exit_code - @with_repository( - fake=("tam", "disable_tam"), invert_fake=True, manifest=False, exclusive=True - ) + @with_repository(fake=("tam", "disable_tam"), invert_fake=True, manifest=False, exclusive=True) def do_upgrade(self, args, repository, manifest=None, key=None): """upgrade a repository from a previous version""" if args.tam: @@ -2265,9 +2157,7 @@ def do_upgrade(self, args, repository, manifest=None, key=None): print("This repository is not encrypted, cannot enable TAM.") return EXIT_ERROR - if not manifest.tam_verified or not manifest.config.get( - b"tam_required", False - ): + if not manifest.tam_verified or not manifest.config.get(b"tam_required", False): # The standard archive listing doesn't include the archive ID like in borg 1.1.x print("Manifest contents:") for archive_info in manifest.archives.list(sort_by=["ts"]): @@ -2319,9 +2209,7 @@ def do_upgrade(self, args, repository, manifest=None, key=None): print("warning: %s" % e) return self.exit_code - @with_repository( - cache=True, exclusive=True, compatibility=(Manifest.Operation.CHECK,) - ) + @with_repository(cache=True, exclusive=True, compatibility=(Manifest.Operation.CHECK,)) def do_recreate(self, args, repository, manifest, key, cache): """Re-create archives""" matcher = self.build_matcher(args.patterns, args.paths) @@ -2383,9 +2271,7 @@ def do_recreate(self, args, repository, manifest, key, cache): cache.commit() return self.exit_code - @with_repository( - cache=True, exclusive=True, compatibility=(Manifest.Operation.WRITE,) - ) + @with_repository(cache=True, exclusive=True, compatibility=(Manifest.Operation.WRITE,)) def do_import_tar(self, args, repository, manifest, key, cache): """Create a backup archive from a tarball""" self.output_filter = args.output_filter @@ -2452,36 +2338,26 @@ def _import_tar(self, args, repository, manifest, key, cache, tarstream): if not tarinfo: break if tarinfo.isreg(): - status = tfo.process_file( - tarinfo=tarinfo, status="A", type=stat.S_IFREG, tar=tar - ) + status = tfo.process_file(tarinfo=tarinfo, status="A", type=stat.S_IFREG, tar=tar) archive.stats.nfiles += 1 elif tarinfo.isdir(): status = tfo.process_dir(tarinfo=tarinfo, status="d", type=stat.S_IFDIR) elif tarinfo.issym(): - status = tfo.process_link( - tarinfo=tarinfo, status="s", type=stat.S_IFLNK - ) + status = tfo.process_link(tarinfo=tarinfo, status="s", type=stat.S_IFLNK) elif tarinfo.islnk(): # tar uses the same hardlink model as borg (rather vice versa); the first instance of a hardlink # is stored as a regular file, later instances are special entries referencing back to the # first instance. - status = tfo.process_link( - tarinfo=tarinfo, status="h", type=stat.S_IFREG - ) + status = tfo.process_link(tarinfo=tarinfo, status="h", type=stat.S_IFREG) elif tarinfo.isblk(): status = tfo.process_dev(tarinfo=tarinfo, status="b", type=stat.S_IFBLK) elif tarinfo.ischr(): status = tfo.process_dev(tarinfo=tarinfo, status="c", type=stat.S_IFCHR) elif tarinfo.isfifo(): - status = tfo.process_fifo( - tarinfo=tarinfo, status="f", type=stat.S_IFIFO - ) + status = tfo.process_fifo(tarinfo=tarinfo, status="f", type=stat.S_IFIFO) else: status = "E" - self.print_warning( - "%s: Unsupported tarinfo type %s", tarinfo.name, tarinfo.type - ) + self.print_warning("%s: Unsupported tarinfo type %s", tarinfo.name, tarinfo.type) self.print_file_status(status, tarinfo.name) # This does not close the fileobj (tarstream) we passed to it -- a side effect of the | mode. @@ -2552,9 +2428,7 @@ def do_compact(self, args, repository): data = repository.get(Manifest.MANIFEST_ID) repository.put(Manifest.MANIFEST_ID, data) threshold = args.threshold / 100 - repository.commit( - compact=True, threshold=threshold, cleanup_commits=args.cleanup_commits - ) + repository.commit(compact=True, threshold=threshold, cleanup_commits=args.cleanup_commits) return EXIT_SUCCESS @with_repository(exclusive=True, manifest=False) @@ -2591,8 +2465,7 @@ def repo_validate(section, name, value=None, check_value=True): elif name == "max_segment_size": if parse_file_size(value) >= MAX_SEGMENT_SIZE_LIMIT: raise ValueError( - "Invalid value: max_segment_size >= %d" - % MAX_SEGMENT_SIZE_LIMIT + "Invalid value: max_segment_size >= %d" % MAX_SEGMENT_SIZE_LIMIT ) elif name in [ "append_only", @@ -2606,9 +2479,7 @@ def repo_validate(section, name, value=None, check_value=True): try: bin_id = unhexlify(value) except: - raise ValueError( - "Invalid value, must be 64 hex digits" - ) from None + raise ValueError("Invalid value, must be 64 hex digits") from None if len(bin_id) != 32: raise ValueError("Invalid value, must be 64 hex digits") else: @@ -2687,9 +2558,7 @@ def list_config(config): validate = cache_validate else: config = repository.config - save = lambda: repository.save_config( - repository.path, repository.config - ) # noqa + save = lambda: repository.save_config(repository.path, repository.config) # noqa validate = repo_validate if args.delete: @@ -2750,9 +2619,7 @@ def do_debug_dump_archive(self, args, repository, manifest, key): """dump decoded archive metadata (not: data)""" try: - archive_meta_orig = manifest.archives.get_raw_dict()[ - safe_encode(args.location.archive) - ] + archive_meta_orig = manifest.archives.get_raw_dict()[safe_encode(args.location.archive)] except KeyError: raise Archive.DoesNotExist(args.location.archive) @@ -2769,9 +2636,7 @@ def output(fd): fd.write(do_indent(prepare_dump_dict(archive_meta_orig))) fd.write(",\n") - data = key.decrypt( - archive_meta_orig[b"id"], repository.get(archive_meta_orig[b"id"]) - ) + data = key.decrypt(archive_meta_orig[b"id"], repository.get(archive_meta_orig[b"id"])) archive_org_dict = msgpack.unpackb(data, object_hook=StableDict) fd.write(' "_meta":\n') @@ -2848,15 +2713,11 @@ def decrypt_dump(i, id, cdata, tag=None, segment=None, offset=None): i = 0 for id, cdata, tag, segment, offset in repository.scan_low_level(): if tag == TAG_PUT: - decrypt_dump( - i, id, cdata, tag="put", segment=segment, offset=offset - ) + decrypt_dump(i, id, cdata, tag="put", segment=segment, offset=offset) elif tag == TAG_DELETE: decrypt_dump(i, id, None, tag="del", segment=segment, offset=offset) elif tag == TAG_COMMIT: - decrypt_dump( - i, None, None, tag="commit", segment=segment, offset=offset - ) + decrypt_dump(i, None, None, tag="commit", segment=segment, offset=offset) i += 1 else: # set up the key without depending on a manifest obj @@ -2930,9 +2791,7 @@ def print_finding(info, wanted, data, offset): data = key.decrypt(give_id, cdata) # try to locate wanted sequence crossing the border of last_data and data - boundary_data = ( - last_data[-(len(wanted) - 1) :] + data[: len(wanted) - 1] - ) + boundary_data = last_data[-(len(wanted) - 1) :] + data[: len(wanted) - 1] if wanted in boundary_data: boundary_data = ( last_data[-(len(wanted) - 1 + context) :] @@ -2945,9 +2804,7 @@ def print_finding(info, wanted, data, offset): # try to locate wanted sequence in data count = data.count(wanted) if count: - offset = data.find( - wanted - ) # only determine first occurrence's offset + offset = data.find(wanted) # only determine first occurrence's offset info = "%d %s #%d" % (i, id.hex(), count) print_finding(info, wanted, data, offset) @@ -3027,8 +2884,7 @@ def do_debug_refcount_obj(self, args, repository, manifest, key, cache): try: refcount = cache.chunks[id][0] print( - "object %s has %d referrers [info from chunks cache]." - % (hex_id, refcount) + "object %s has %d referrers [info from chunks cache]." % (hex_id, refcount) ) except KeyError: print("object %s not found [info from chunks cache]." % hex_id) @@ -3044,9 +2900,7 @@ def do_debug_dump_hints(self, args, repository): segments=repository.segments, compact=repository.compact, storage_quota_use=repository.storage_quota_use, - shadow_index={ - hexlify(k).decode(): v for k, v in repository.shadow_index.items() - }, + shadow_index={hexlify(k).decode(): v for k, v in repository.shadow_index.items()}, ) with dash_open(args.path, "w") as fd: json.dump(hints, fd, indent=4) @@ -3059,9 +2913,7 @@ def do_debug_convert_profile(self, args): import marshal with args.output, args.input: - marshal.dump( - msgpack.unpack(args.input, use_list=False, raw=False), args.output - ) + marshal.dump(msgpack.unpack(args.input, use_list=False, raw=False), args.output) return EXIT_SUCCESS @with_repository(lock=False, manifest=False) @@ -3584,9 +3436,7 @@ def add_argument(*args, **kwargs): kwargs["default"] == [] ), "The default is explicitly constructed as an empty list in resolve()" else: - self.common_options.setdefault(suffix, set()).add( - kwargs["dest"] - ) + self.common_options.setdefault(suffix, set()).add(kwargs["dest"]) kwargs["dest"] += suffix if not provide_defaults: # Interpolate help now, in case the %(default)d (or so) is mentioned, @@ -3670,9 +3520,7 @@ def process_epilog(epilog): return epilog def define_common_options(add_common_option): - add_common_option( - "-h", "--help", action="help", help="show this help message and exit" - ) + add_common_option("-h", "--help", action="help", help="show this help message and exit") add_common_option( "--critical", dest="log_level", @@ -3833,9 +3681,7 @@ def define_common_options(add_common_option): help="Use this command to connect to the 'borg serve' process (default: 'ssh')", ) - def define_exclude_and_patterns( - add_option, *, tag_files=False, strip_components=False - ): + def define_exclude_and_patterns(add_option, *, tag_files=False, strip_components=False): add_option( "-e", "--exclude", @@ -4040,9 +3886,7 @@ def define_borg_mount(parser): version="%(prog)s " + __version__, help="show version number and exit", ) - parser.common_options.add_common_group( - parser, "_maincommand", provide_defaults=True - ) + parser.common_options.add_common_group(parser, "_maincommand", provide_defaults=True) common_parser = argparse.ArgumentParser(add_help=False, prog=self.prog) common_parser.set_defaults(paths=[], patterns=[]) @@ -4119,9 +3963,7 @@ def define_borg_mount(parser): define_borg_mount(parser) return parser - subparsers = parser.add_subparsers( - title="required arguments", metavar="" - ) + subparsers = parser.add_subparsers(title="required arguments", metavar="") # borg benchmark benchmark_epilog = process_epilog("These commands do various benchmarks.") @@ -4139,9 +3981,7 @@ def define_borg_mount(parser): benchmark_parsers = subparser.add_subparsers( title="required arguments", metavar="" ) - subparser.set_defaults( - fallback_func=functools.partial(self.do_subcommand_help, subparser) - ) + subparser.set_defaults(fallback_func=functools.partial(self.do_subcommand_help, subparser)) bench_crud_epilog = process_epilog( """ @@ -4485,12 +4325,8 @@ def define_borg_mount(parser): type=location_validator(archive=False, proto="file"), help="repository to configure", ) - subparser.add_argument( - "name", metavar="NAME", nargs="?", help="name of config key" - ) - subparser.add_argument( - "value", metavar="VALUE", nargs="?", help="new value for key" - ) + subparser.add_argument("name", metavar="NAME", nargs="?", help="name of config key") + subparser.add_argument("value", metavar="VALUE", nargs="?", help="new value for key") # borg create create_epilog = process_epilog( @@ -4870,8 +4706,7 @@ def define_borg_mount(parser): action=Highlander, type=FilesCacheMode, default=DEFAULT_FILES_CACHE_MODE_UI, - help="operate files cache in MODE. default: %s" - % DEFAULT_FILES_CACHE_MODE_UI, + help="operate files cache in MODE. default: %s" % DEFAULT_FILES_CACHE_MODE_UI, ) fs_group.add_argument( "--read-special", @@ -4916,8 +4751,7 @@ def define_borg_mount(parser): default=CHUNKER_PARAMS, action=Highlander, help="specify the chunker parameters (ALGO, CHUNK_MIN_EXP, CHUNK_MAX_EXP, " - "HASH_MASK_BITS, HASH_WINDOW_SIZE). default: %s,%d,%d,%d,%d" - % CHUNKER_PARAMS, + "HASH_MASK_BITS, HASH_WINDOW_SIZE). default: %s,%d,%d,%d,%d" % CHUNKER_PARAMS, ) archive_group.add_argument( "-C", @@ -4961,12 +4795,8 @@ def define_borg_mount(parser): help="debugging command (not intended for normal use)", ) - debug_parsers = subparser.add_subparsers( - title="required arguments", metavar="" - ) - subparser.set_defaults( - fallback_func=functools.partial(self.do_subcommand_help, subparser) - ) + debug_parsers = subparser.add_subparsers(title="required arguments", metavar="") + subparser.set_defaults(fallback_func=functools.partial(self.do_subcommand_help, subparser)) debug_info_epilog = process_epilog( """ @@ -5029,9 +4859,7 @@ def define_borg_mount(parser): type=location_validator(archive=True), help="archive to dump", ) - subparser.add_argument( - "path", metavar="PATH", type=str, help="file to dump data into" - ) + subparser.add_argument("path", metavar="PATH", type=str, help="file to dump data into") debug_dump_manifest_epilog = process_epilog( """ @@ -5054,9 +4882,7 @@ def define_borg_mount(parser): type=location_validator(archive=False), help="repository to dump", ) - subparser.add_argument( - "path", metavar="PATH", type=str, help="file to dump data into" - ) + subparser.add_argument("path", metavar="PATH", type=str, help="file to dump data into") debug_dump_repo_objs_epilog = process_epilog( """ @@ -5250,9 +5076,7 @@ def define_borg_mount(parser): type=location_validator(archive=False), help="repository to dump", ) - subparser.add_argument( - "path", metavar="PATH", type=str, help="file to dump data into" - ) + subparser.add_argument("path", metavar="PATH", type=str, help="file to dump data into") debug_convert_profile_epilog = process_epilog( """ @@ -5371,9 +5195,7 @@ def define_borg_mount(parser): type=location_validator(), help="repository or archive to delete", ) - subparser.add_argument( - "archives", metavar="ARCHIVE", nargs="*", help="archives to delete" - ) + subparser.add_argument("archives", metavar="ARCHIVE", nargs="*", help="archives to delete") define_archive_filters_group(subparser) # borg diff @@ -5656,9 +5478,7 @@ def define_borg_mount(parser): ) subparser.add_argument("--epilog-only", dest="epilog_only", action="store_true") subparser.add_argument("--usage-only", dest="usage_only", action="store_true") - subparser.set_defaults( - func=functools.partial(self.do_help, parser, subparsers.choices) - ) + subparser.set_defaults(func=functools.partial(self.do_help, parser, subparsers.choices)) subparser.add_argument( "topic", metavar="TOPIC", @@ -5705,9 +5525,7 @@ def define_borg_mount(parser): type=location_validator(), help="repository or archive to display information about", ) - subparser.add_argument( - "--json", action="store_true", help="format output as JSON" - ) + subparser.add_argument("--json", action="store_true", help="format output as JSON") define_archive_filters_group(subparser) # borg init @@ -5889,12 +5707,8 @@ def define_borg_mount(parser): help="manage repository key", ) - key_parsers = subparser.add_subparsers( - title="required arguments", metavar="" - ) - subparser.set_defaults( - fallback_func=functools.partial(self.do_subcommand_help, subparser) - ) + key_parsers = subparser.add_subparsers(title="required arguments", metavar="") + subparser.set_defaults(fallback_func=functools.partial(self.do_subcommand_help, subparser)) key_export_epilog = process_epilog( """ @@ -6810,9 +6624,7 @@ def define_borg_mount(parser): help="rewrite repository in place, with no chance of going back " "to older versions of the repository.", ) - subparser.add_argument( - "--force", dest="force", action="store_true", help="Force upgrade" - ) + subparser.add_argument("--force", dest="force", action="store_true", help="Force upgrade") subparser.add_argument( "--tam", dest="tam", @@ -6984,8 +6796,7 @@ def define_borg_mount(parser): default=CHUNKER_PARAMS, metavar="PARAMS", help="specify the chunker parameters (ALGO, CHUNK_MIN_EXP, CHUNK_MAX_EXP, " - "HASH_MASK_BITS, HASH_WINDOW_SIZE). default: %s,%d,%d,%d,%d" - % CHUNKER_PARAMS, + "HASH_MASK_BITS, HASH_WINDOW_SIZE). default: %s,%d,%d,%d,%d" % CHUNKER_PARAMS, ) archive_group.add_argument( "-C", @@ -7072,9 +6883,7 @@ def parse_args(self, args=None): elif not args.paths_from_stdin: # need at least 1 path but args.paths may also be populated from patterns parser.error("Need at least one PATH argument.") - if not getattr( - args, "lock", True - ): # Option --bypass-lock sets args.lock = False + if not getattr(args, "lock", True): # Option --bypass-lock sets args.lock = False bypass_allowed = { self.do_check, self.do_config, @@ -7087,9 +6896,7 @@ def parse_args(self, args=None): self.do_umount, } if func not in bypass_allowed: - raise Error( - "Not allowed to bypass locking mechanism for chosen command" - ) + raise Error("Not allowed to bypass locking mechanism for chosen command") if getattr(args, "timestamp", None): args.location = args.location.with_timestamp(args.timestamp) return args @@ -7137,9 +6944,7 @@ def run(self, args): if getattr(args, "stats", False) and getattr(args, "dry_run", False): # the data needed for --stats is not computed when using --dry-run, so we can't do it. # for ease of scripting, we just ignore --stats when given with --dry-run. - logger.warning( - "Ignoring --stats. It is not supported when using --dry-run." - ) + logger.warning("Ignoring --stats. It is not supported when using --dry-run.") args.stats = False if args.show_version: logging.getLogger("borg.output.show-version").info( @@ -7206,9 +7011,7 @@ def sig_info_handler(sig_no, stack): # pragma: no cover except Exception: pos, total = 0, 0 logger.info( - "{0} {1}/{2}".format( - path, format_file_size(pos), format_file_size(total) - ) + "{0} {1}/{2}".format(path, format_file_size(pos), format_file_size(total)) ) break if func in ("extract_item",): # extract op @@ -7223,8 +7026,7 @@ def sig_info_handler(sig_no, stack): # pragma: no cover def sig_trace_handler(sig_no, stack): # pragma: no cover print( - "\nReceived SIGUSR2 at %s, dumping trace..." - % datetime.now().replace(microsecond=0), + "\nReceived SIGUSR2 at %s, dumping trace..." % datetime.now().replace(microsecond=0), file=sys.stderr, ) faulthandler.dump_traceback() @@ -7249,11 +7051,9 @@ def main(): # pragma: no cover # Register fault handler for SIGSEGV, SIGFPE, SIGABRT, SIGBUS and SIGILL. faulthandler.enable() - with signal_handler( - "SIGINT", raising_signal_handler(KeyboardInterrupt) - ), signal_handler("SIGHUP", raising_signal_handler(SigHup)), signal_handler( - "SIGTERM", raising_signal_handler(SigTerm) - ), signal_handler( + with signal_handler("SIGINT", raising_signal_handler(KeyboardInterrupt)), signal_handler( + "SIGHUP", raising_signal_handler(SigHup) + ), signal_handler("SIGTERM", raising_signal_handler(SigTerm)), signal_handler( "SIGUSR1", sig_info_handler ), signal_handler( "SIGUSR2", sig_trace_handler diff --git a/src/borg/cache.py b/src/borg/cache.py index 07d636b300e..888a83b78e5 100644 --- a/src/borg/cache.py +++ b/src/borg/cache.py @@ -95,9 +95,7 @@ def key_matches(self, key): logger.warning("Could not read/parse key type file: %s", exc) def save(self, manifest, key): - logger.debug( - "security: saving state for %s to %s", self.repository.id_str, self.dir - ) + logger.debug("security: saving state for %s to %s", self.repository.id_str, self.dir) current_location = self.repository._location.canonical_path() logger.debug("security: current location %s", current_location) logger.debug("security: key type %s", str(key.TYPE)) @@ -116,9 +114,7 @@ def assert_location_matches(self, cache_config=None): previous_location = fd.read() logger.debug("security: read previous location %r", previous_location) except FileNotFoundError: - logger.debug( - "security: previous location file %s not found", self.location_file - ) + logger.debug("security: previous location file %s not found", self.location_file) previous_location = None except OSError as exc: logger.warning("Could not read previous location file: %s", exc) @@ -130,9 +126,7 @@ def assert_location_matches(self, cache_config=None): ): # Reconcile cache and security dir; we take the cache location. previous_location = cache_config.previous_location - logger.debug( - "security: using previous_location of cache: %r", previous_location - ) + logger.debug("security: using previous_location of cache: %r", previous_location) repository_location = self.repository._location.canonical_path() if previous_location and previous_location != repository_location: @@ -163,9 +157,7 @@ def assert_no_manifest_replay(self, manifest, key, cache_config=None): timestamp = fd.read() logger.debug("security: read manifest timestamp %r", timestamp) except FileNotFoundError: - logger.debug( - "security: manifest timestamp file %s not found", self.manifest_ts_file - ) + logger.debug("security: manifest timestamp file %s not found", self.manifest_ts_file) timestamp = "" except OSError as exc: logger.warning("Could not read previous location file: %s", exc) @@ -192,13 +184,7 @@ def assert_key_type(self, key, cache_config=None): raise Cache.EncryptionMethodMismatch() def assert_secure( - self, - manifest, - key, - *, - cache_config=None, - warn_if_unencrypted=True, - lock_wait=None + self, manifest, key, *, cache_config=None, warn_if_unencrypted=True, lock_wait=None ): # warn_if_unencrypted=False is only used for initializing a new repository. # Thus, avoiding asking about a repository that's currently initializing. @@ -332,14 +318,10 @@ def load(self): self.timestamp = self._config.get("cache", "timestamp", fallback=None) self.key_type = self._config.get("cache", "key_type", fallback=None) self.ignored_features = set( - parse_stringified_list( - self._config.get("cache", "ignored_features", fallback="") - ) + parse_stringified_list(self._config.get("cache", "ignored_features", fallback="")) ) self.mandatory_features = set( - parse_stringified_list( - self._config.get("cache", "mandatory_features", fallback="") - ) + parse_stringified_list(self._config.get("cache", "mandatory_features", fallback="")) ) try: self.integrity = dict(self._config.items("integrity")) @@ -359,29 +341,21 @@ def load(self): "Cache integrity: No integrity data found (files, chunks). Cache is from old version." ) self.integrity = {} - previous_location = self._config.get( - "cache", "previous_location", fallback=None - ) + previous_location = self._config.get("cache", "previous_location", fallback=None) if previous_location: self.previous_location = recanonicalize_relative_location( previous_location, self.repository ) else: self.previous_location = None - self._config.set( - "cache", "previous_location", self.repository._location.canonical_path() - ) + self._config.set("cache", "previous_location", self.repository._location.canonical_path()) def save(self, manifest=None, key=None): if manifest: self._config.set("cache", "manifest", manifest.id_str) self._config.set("cache", "timestamp", manifest.timestamp) - self._config.set( - "cache", "ignored_features", ",".join(self.ignored_features) - ) - self._config.set( - "cache", "mandatory_features", ",".join(self.mandatory_features) - ) + self._config.set("cache", "ignored_features", ",".join(self.ignored_features)) + self._config.set("cache", "mandatory_features", ",".join(self.mandatory_features)) if not self._config.has_section("integrity"): self._config.add_section("integrity") for file, integrity_data in self.integrity.items(): @@ -409,9 +383,7 @@ def _check_upgrade(self, config_path): ) except configparser.NoSectionError: self.close() - raise Exception( - "%s does not look like a Borg cache." % config_path - ) from None + raise Exception("%s does not look like a Borg cache." % config_path) from None class Cache: @@ -502,9 +474,7 @@ def adhoc(): # Local cache is in sync, use it logger.debug("Cache: choosing local cache (in sync)") return local() - logger.debug( - "Cache: choosing ad-hoc cache (local cache does not exist or is not in sync)" - ) + logger.debug("Cache: choosing ad-hoc cache (local cache does not exist or is not in sync)") return adhoc() @@ -624,16 +594,12 @@ def __init__( # Warn user before sending data to a never seen before unencrypted repository if not os.path.exists(self.path): - self.security_manager.assert_access_unknown( - warn_if_unencrypted, manifest, key - ) + self.security_manager.assert_access_unknown(warn_if_unencrypted, manifest, key) self.create() self.open() try: - self.security_manager.assert_secure( - manifest, key, cache_config=self.cache_config - ) + self.security_manager.assert_secure(manifest, key, cache_config=self.cache_config) if not self.check_cache_compatibility(): self.wipe_cache() @@ -710,9 +676,7 @@ def _read_files(self): for path_hash, item in u: entry = FileCacheEntry(*item) # in the end, this takes about 240 Bytes per file - self.files[path_hash] = msgpack.packb( - entry._replace(age=entry.age + 1) - ) + self.files[path_hash] = msgpack.packb(entry._replace(age=entry.age + 1)) except (TypeError, ValueError) as exc: msg = "The files cache seems invalid. [%s]" % str(exc) break @@ -724,9 +688,7 @@ def _read_files(self): logger.warning(msg) logger.warning("Continuing without files cache - expect lower performance.") self.files = {} - files_cache_logger.debug( - "FILES-CACHE-LOAD: finished, %d entries loaded.", len(self.files) - ) + files_cache_logger.debug("FILES-CACHE-LOAD: finished, %d entries loaded.", len(self.files)) def begin_txn(self): # Initialize transaction snapshot @@ -743,9 +705,7 @@ def begin_txn(self): except FileNotFoundError: with SaveFile(os.path.join(txn_dir, files_cache_name()), binary=True): pass # empty file - os.rename( - os.path.join(self.path, "txn.tmp"), os.path.join(self.path, "txn.active") - ) + os.rename(os.path.join(self.path, "txn.tmp"), os.path.join(self.path, "txn.active")) self.txn_active = True pi.finish() @@ -791,16 +751,12 @@ def commit(self): ) self.cache_config.integrity[files_cache_name()] = fd.integrity_data pi.output("Saving chunks cache") - with IntegrityCheckedFile( - path=os.path.join(self.path, "chunks"), write=True - ) as fd: + with IntegrityCheckedFile(path=os.path.join(self.path, "chunks"), write=True) as fd: self.chunks.write(fd) self.cache_config.integrity["chunks"] = fd.integrity_data pi.output("Saving cache config") self.cache_config.save(self.manifest, self.key) - os.rename( - os.path.join(self.path, "txn.active"), os.path.join(self.path, "txn.tmp") - ) + os.rename(os.path.join(self.path, "txn.active"), os.path.join(self.path, "txn.tmp")) shutil.rmtree(os.path.join(self.path, "txn.tmp")) self.txn_active = False pi.finish() @@ -853,9 +809,7 @@ def cached_archives(): # filenames with 64 hex digits == 256bit, # or compact indices which are 64 hex digits + ".compact" return set(unhexlify(fn) for fn in fns if len(fn) == 64) | set( - unhexlify(fn[:64]) - for fn in fns - if len(fn) == 72 and fn.endswith(".compact") + unhexlify(fn[:64]) for fn in fns if len(fn) == 72 and fn.endswith(".compact") ) else: return set() @@ -897,12 +851,8 @@ def fetch_missing_csize(chunk_idx): for id_ in all_missing_ids: already_fetched_entry = chunks_fetched_size_index.get(id_) if already_fetched_entry: - entry = chunk_idx[id_]._replace( - csize=already_fetched_entry.csize - ) - assert ( - entry.size == already_fetched_entry.size - ), "Chunk size mismatch" + entry = chunk_idx[id_]._replace(csize=already_fetched_entry.csize) + assert entry.size == already_fetched_entry.size, "Chunk size mismatch" chunk_idx[id_] = entry else: fetch_ids.append(id_) @@ -911,9 +861,7 @@ def fetch_missing_csize(chunk_idx): # This is potentially a rather expensive operation, but it's hard to tell at this point # if it's a problem in practice (hence the experimental status of --no-cache-sync). - for id_, data in zip( - fetch_ids, decrypted_repository.repository.get_many(fetch_ids) - ): + for id_, data in zip(fetch_ids, decrypted_repository.repository.get_many(fetch_ids)): entry = chunk_idx[id_]._replace(csize=len(data)) chunk_idx[id_] = entry chunks_fetched_size_index[id_] = entry @@ -989,9 +937,7 @@ def read_archive_index(archive_id, archive_name): return None # Convert to compact index. Delete the existing index first. - logger.debug( - "Found non-compact index for %s, converting to compact.", archive_name - ) + logger.debug("Found non-compact index for %s, converting to compact.", archive_name) cleanup_cached_archive(archive_id) write_archive_index(archive_id, archive_chunk_idx) return archive_chunk_idx @@ -1026,11 +972,7 @@ def create_master_idx(chunk_idx): # due to hash table "resonance". master_index_capacity = len(self.repository) if archive_ids: - chunk_idx = ( - None - if not self.do_cache - else ChunkIndex(usable=master_index_capacity) - ) + chunk_idx = None if not self.do_cache else ChunkIndex(usable=master_index_capacity) pi = ProgressIndicatorPercent( total=len(archive_ids), step=0.1, @@ -1042,9 +984,7 @@ def create_master_idx(chunk_idx): pi.show(info=[remove_surrogates(archive_name)]) if self.do_cache: if archive_id in cached_ids: - archive_chunk_idx = read_archive_index( - archive_id, archive_name - ) + archive_chunk_idx = read_archive_index(archive_id, archive_name) if archive_chunk_idx is None: cached_ids.remove(archive_id) if archive_id not in cached_ids: @@ -1055,15 +995,11 @@ def create_master_idx(chunk_idx): archive_name, ) archive_chunk_idx = ChunkIndex() - fetch_and_build_idx( - archive_id, decrypted_repository, archive_chunk_idx - ) + fetch_and_build_idx(archive_id, decrypted_repository, archive_chunk_idx) logger.info("Merging into master chunks index ...") chunk_idx.merge(archive_chunk_idx) else: - chunk_idx = chunk_idx or ChunkIndex( - usable=master_index_capacity - ) + chunk_idx = chunk_idx or ChunkIndex(usable=master_index_capacity) logger.info("Fetching archive index for %s ...", archive_name) fetch_and_build_idx(archive_id, decrypted_repository, chunk_idx) if not self.do_cache: @@ -1108,9 +1044,7 @@ def legacy_cleanup(): self.manifest.check_repository_compatibility((Manifest.Operation.READ,)) self.begin_txn() - with cache_if_remote( - self.repository, decrypted_cache=self.key - ) as decrypted_repository: + with cache_if_remote(self.repository, decrypted_cache=self.key) as decrypted_repository: legacy_cleanup() # TEMPORARY HACK: to avoid archive index caching, create a FILE named ~/.cache/borg/REPOID/chunks.archive.d - # this is only recommended if you have a fast, low latency connection to your repo (e.g. if repo is local disk) @@ -1222,16 +1156,12 @@ def file_known_and_unchanged(self, hashed_path, path_hash, st): return False, None entry = self.files.get(path_hash) if not entry: - files_cache_logger.debug( - "UNKNOWN: no file metadata in cache for: %r", hashed_path - ) + files_cache_logger.debug("UNKNOWN: no file metadata in cache for: %r", hashed_path) return False, None # we know the file! entry = FileCacheEntry(*msgpack.unpackb(entry)) if "s" in cache_mode and entry.size != st.st_size: - files_cache_logger.debug( - "KNOWN-CHANGED: file size has changed: %r", hashed_path - ) + files_cache_logger.debug("KNOWN-CHANGED: file size has changed: %r", hashed_path) return True, None if "i" in cache_mode and entry.inode != st.st_ino: files_cache_logger.debug( @@ -1239,14 +1169,10 @@ def file_known_and_unchanged(self, hashed_path, path_hash, st): ) return True, None if "c" in cache_mode and bigint_to_int(entry.cmtime) != st.st_ctime_ns: - files_cache_logger.debug( - "KNOWN-CHANGED: file ctime has changed: %r", hashed_path - ) + files_cache_logger.debug("KNOWN-CHANGED: file ctime has changed: %r", hashed_path) return True, None elif "m" in cache_mode and bigint_to_int(entry.cmtime) != st.st_mtime_ns: - files_cache_logger.debug( - "KNOWN-CHANGED: file mtime has changed: %r", hashed_path - ) + files_cache_logger.debug("KNOWN-CHANGED: file mtime has changed: %r", hashed_path) return True, None # we ignored the inode number in the comparison above or it is still same. # if it is still the same, replacing it in the tuple doesn't change it. diff --git a/src/borg/constants.py b/src/borg/constants.py index 65cd8e47932..24744086f59 100644 --- a/src/borg/constants.py +++ b/src/borg/constants.py @@ -146,12 +146,8 @@ CH_DATA, CH_ALLOC, CH_HOLE = 0, 1, 2 # operating mode of the files cache (for fast skipping of unchanged files) -DEFAULT_FILES_CACHE_MODE_UI = ( - "ctime,size,inode" # default for "borg create" command (CLI UI) -) -DEFAULT_FILES_CACHE_MODE = ( - "d" # most borg commands do not use the files cache at all (disable) -) +DEFAULT_FILES_CACHE_MODE_UI = "ctime,size,inode" # default for "borg create" command (CLI UI) +DEFAULT_FILES_CACHE_MODE = "d" # most borg commands do not use the files cache at all (disable) # return codes returned by borg command # when borg is killed by signal N, rc = 128 + N diff --git a/src/borg/crypto/file_integrity.py b/src/borg/crypto/file_integrity.py index 5ec2e909a29..c7acb818713 100644 --- a/src/borg/crypto/file_integrity.py +++ b/src/borg/crypto/file_integrity.py @@ -122,9 +122,7 @@ class FileIntegrityError(IntegrityError): class IntegrityCheckedFile(FileLikeWrapper): - def __init__( - self, path, write, filename=None, override_fd=None, integrity_data=None - ): + def __init__(self, path, write, filename=None, override_fd=None, integrity_data=None): self.path = path self.writing = write mode = "wb" if write else "rb" @@ -190,9 +188,7 @@ def hash_part(self, partname, is_final=False): digest = self.hasher.hexdigest() if self.writing: self.digests[partname] = digest - elif self.digests and not compare_digest( - self.digests.get(partname, ""), digest - ): + elif self.digests and not compare_digest(self.digests.get(partname, ""), digest): raise FileIntegrityError(self.path) def __exit__(self, exc_type, exc_val, exc_tb): @@ -223,9 +219,7 @@ def __init__(self, path, write, filename=None, override_fd=None): super().__init__(path, write, filename, override_fd) filename = filename or os.path.basename(path) output_dir = os.path.dirname(path) - self.output_integrity_file = self.integrity_file_path( - os.path.join(output_dir, filename) - ) + self.output_integrity_file = self.integrity_file_path(os.path.join(output_dir, filename)) def load_integrity_data(self, path, integrity_data): assert ( diff --git a/src/borg/crypto/key.py b/src/borg/crypto/key.py index fe14fed7a44..c9ae3bbfe4c 100644 --- a/src/borg/crypto/key.py +++ b/src/borg/crypto/key.py @@ -205,9 +205,7 @@ def assert_id(self, id, data): if id: id_computed = self.id_hash(data) if not compare_digest(id_computed, id): - raise IntegrityError( - "Chunk %s: id verification failed" % bin_to_hex(id) - ) + raise IntegrityError("Chunk %s: id verification failed" % bin_to_hex(id)) def _tam_key(self, salt, context): return hkdf_hmac_sha512( @@ -394,18 +392,14 @@ def encrypt(self, chunk): def decrypt(self, id, data, decompress=True): if not ( - data[0] == self.TYPE - or data[0] == PassphraseKey.TYPE - and isinstance(self, RepoKey) + data[0] == self.TYPE or data[0] == PassphraseKey.TYPE and isinstance(self, RepoKey) ): id_str = bin_to_hex(id) if id is not None else "(unknown)" raise IntegrityError("Chunk %s: Invalid encryption envelope" % id_str) try: payload = self.cipher.decrypt(data) except IntegrityError as e: - raise IntegrityError( - "Chunk %s: Could not decrypt [%s]" % (bin_to_hex(id), str(e)) - ) + raise IntegrityError("Chunk %s: Could not decrypt [%s]" % (bin_to_hex(id), str(e))) if not decompress: return payload data = self.decompress(payload) @@ -754,13 +748,9 @@ def sanity_check(self, filename, id): # we do the magic / id check in binary mode to avoid stumbling over # decoding errors if somebody has binary files in the keys dir for some reason. if fd.read(len(file_id)) != file_id: - raise KeyfileInvalidError( - self.repository._location.canonical_path(), filename - ) + raise KeyfileInvalidError(self.repository._location.canonical_path(), filename) if fd.read(len(repo_id)) != repo_id: - raise KeyfileMismatchError( - self.repository._location.canonical_path(), filename - ) + raise KeyfileMismatchError(self.repository._location.canonical_path(), filename) return filename def find_key(self): @@ -770,9 +760,7 @@ def find_key(self): keyfile = self._find_key_in_keys_dir() if keyfile is not None: return keyfile - raise KeyfileNotFoundError( - self.repository._location.canonical_path(), get_keys_dir() - ) + raise KeyfileNotFoundError(self.repository._location.canonical_path(), get_keys_dir()) def get_existing_or_new_target(self, args): keyfile = self._find_key_file_from_environment() @@ -860,9 +848,7 @@ def load(self, target, passphrase): # what we get in target is just a repo location, but we already have the repo obj: target = self.repository key_data = target.load_key() - key_data = key_data.decode( - "utf-8" - ) # remote repo: msgpack issue #99, getting bytes + key_data = key_data.decode("utf-8") # remote repo: msgpack issue #99, getting bytes success = self._load(key_data, passphrase) if success: self.target = target @@ -871,9 +857,7 @@ def load(self, target, passphrase): def save(self, target, passphrase, create=False): self.logically_encrypted = passphrase != "" key_data = self._save(passphrase) - key_data = key_data.encode( - "utf-8" - ) # remote repo: msgpack issue #99, giving bytes + key_data = key_data.encode("utf-8") # remote repo: msgpack issue #99, giving bytes target.save_key(key_data) self.target = target diff --git a/src/borg/crypto/keymanager.py b/src/borg/crypto/keymanager.py index ce1865eb6e2..95b54bb16e4 100644 --- a/src/borg/crypto/keymanager.py +++ b/src/borg/crypto/keymanager.py @@ -128,9 +128,7 @@ def grouped(s): idx += 1 binline = binary[:18] checksum = sha256_truncated(idx.to_bytes(2, byteorder="big") + binline, 2) - export += "{0:2d}: {1} - {2}\n".format( - idx, grouped(bin_to_hex(binline)), checksum - ) + export += "{0:2d}: {1} - {2}\n".format(idx, grouped(bin_to_hex(binline)), checksum) binary = binary[18:] with dash_open(path, "w") as fd: @@ -207,19 +205,10 @@ def import_paperkey(self, args): try: part = unhexlify(data) except binascii.Error: - print( - "only characters 0-9 and a-f and '-' are valid, try again" - ) + print("only characters 0-9 and a-f and '-' are valid, try again") continue - if ( - sha256_truncated(idx.to_bytes(2, byteorder="big") + part, 2) - != checksum - ): - print( - "line checksum did not match, try line {0} again".format( - idx - ) - ) + if sha256_truncated(idx.to_bytes(2, byteorder="big") + part, 2) != checksum: + print("line checksum did not match, try line {0} again".format(idx)) continue result += part if idx == lines: @@ -232,9 +221,7 @@ def import_paperkey(self, args): ) continue - self.keyblob = ( - "\n".join(textwrap.wrap(b2a_base64(result).decode("ascii"))) + "\n" - ) + self.keyblob = "\n".join(textwrap.wrap(b2a_base64(result).decode("ascii"))) + "\n" self.store_keyblob(args) break diff --git a/src/borg/crypto/nonces.py b/src/borg/crypto/nonces.py index 2034c68c365..c5a7912c8a9 100644 --- a/src/borg/crypto/nonces.py +++ b/src/borg/crypto/nonces.py @@ -18,9 +18,7 @@ def __init__(self, repository, manifest_nonce): self.repository = repository self.end_of_nonce_reservation = None self.manifest_nonce = manifest_nonce - self.nonce_file = os.path.join( - get_security_dir(self.repository.id_str), "nonce" - ) + self.nonce_file = os.path.join(get_security_dir(self.repository.id_str), "nonce") def get_local_free_nonce(self): try: @@ -44,9 +42,7 @@ def get_repo_free_nonce(self): "Please upgrade to borg version 1.1+ on the server for safer AES-CTR nonce handling.\n" ) self.get_repo_free_nonce = lambda: None - self.commit_repo_nonce_reservation = ( - lambda next_unreserved, start_nonce: None - ) + self.commit_repo_nonce_reservation = lambda next_unreserved, start_nonce: None return None def commit_repo_nonce_reservation(self, next_unreserved, start_nonce): @@ -94,9 +90,7 @@ def ensure_reservation(self, nonce, nonce_space_needed): ) if x is not None ) - reservation_end = ( - free_nonce_space + nonce_space_needed + NONCE_SPACE_RESERVATION - ) + reservation_end = free_nonce_space + nonce_space_needed + NONCE_SPACE_RESERVATION assert reservation_end < MAX_REPRESENTABLE_NONCE self.commit_repo_nonce_reservation(reservation_end, repo_free_nonce) self.commit_local_nonce_reservation(reservation_end, local_free_nonce) diff --git a/src/borg/fuse.py b/src/borg/fuse.py index c82a3c58c4f..65e67689fb2 100644 --- a/src/borg/fuse.py +++ b/src/borg/fuse.py @@ -156,9 +156,7 @@ def get(self, inode): else: raise ValueError("Invalid entry type in self.meta") - def iter_archive_items( - self, archive_item_ids, filter=None, consider_part_files=False - ): + def iter_archive_items(self, archive_item_ids, filter=None, consider_part_files=False): unpacker = msgpack.Unpacker() # Current offset in the metadata stream, which consists of all metadata chunks glued together @@ -198,9 +196,7 @@ def iter_archive_items( # tell() is not helpful for the need_more_data case, but we know it is the remainder # of the data in that case. in the other case, tell() works as expected. length = ( - (len(data) - start) - if need_more_data - else (unpacker.tell() - stream_offset) + (len(data) - start) if need_more_data else (unpacker.tell() - stream_offset) ) msgpacked_bytes += data[start : start + length] stream_offset += length @@ -210,12 +206,7 @@ def iter_archive_items( break item = Item(internal_dict=item) - if ( - filter - and not filter(item) - or not consider_part_files - and "part" in item - ): + if filter and not filter(item) or not consider_part_files and "part" in item: msgpacked_bytes = b"" continue @@ -246,9 +237,7 @@ def iter_archive_items( if current_spans_chunks: pos = self.fd.seek(0, io.SEEK_END) self.fd.write(current_item) - meta[write_offset : write_offset + 9] = b"S" + pos.to_bytes( - 8, "little" - ) + meta[write_offset : write_offset + 9] = b"S" + pos.to_bytes(8, "little") self.direct_items += 1 else: item_offset = stream_offset - current_item_length - chunk_begin @@ -387,9 +376,7 @@ def peek_and_store_hardlink_masters(item, matched): ): hardlink_masters[item.get("path")] = (item.get("chunks"), None) - filter = Archiver.build_filter( - matcher, peek_and_store_hardlink_masters, strip_components - ) + filter = Archiver.build_filter(matcher, peek_and_store_hardlink_masters, strip_components) for item_inode, item in self.cache.iter_archive_items( archive.metadata.items, filter=filter, @@ -449,13 +436,9 @@ def _process_leaf( def file_version(item, path): if "chunks" in item: file_id = blake2b_128(path) - current_version, previous_id = self.versions_index.get( - file_id, (0, None) - ) + current_version, previous_id = self.versions_index.get(file_id, (0, None)) - contents_id = blake2b_128( - b"".join(chunk_id for chunk_id, _, _ in item.chunks) - ) + contents_id = blake2b_128(b"".join(chunk_id for chunk_id, _, _ in item.chunks)) if contents_id != previous_id: current_version += 1 @@ -482,9 +465,7 @@ def make_versioned_name(name, version, add_dir=False): if self.versions: # adjust link target name with version version = self.file_versions[link_target] - link_target = make_versioned_name( - link_target, version, add_dir=True - ) + link_target = make_versioned_name(link_target, version, add_dir=True) try: inode = self.find_inode(link_target, prefix) except KeyError: @@ -533,9 +514,7 @@ class FuseOperations(llfuse.Operations, FuseBackend): def __init__(self, key, repository, manifest, args, decrypted_repository): llfuse.Operations.__init__(self) - FuseBackend.__init__( - self, key, manifest, repository, args, decrypted_repository - ) + FuseBackend.__init__(self, key, manifest, repository, args, decrypted_repository) self.decrypted_repository = decrypted_repository data_cache_capacity = int( os.environ.get("BORG_MOUNT_DATA_CACHE_ENTRIES", os.cpu_count() or 1) @@ -552,8 +531,7 @@ def sig_info_handler(self, sig_no, stack): # getsizeof is the size of the dict itself; key and value are two small-ish integers, # which are shared due to code structure (this has been verified). format_file_size( - sys.getsizeof(self.parent) - + len(self.parent) * sys.getsizeof(self.inode_count) + sys.getsizeof(self.parent) + len(self.parent) * sys.getsizeof(self.inode_count) ), ) logger.debug("fuse: %d pending archives", len(self.pending_archives)) @@ -596,15 +574,11 @@ def pop_option(options, key, present, not_present, wanted_type, int_base=0): try: return int(value, base=int_base) except ValueError: - raise ValueError( - "unsupported value in option: %s" % option - ) from None + raise ValueError("unsupported value in option: %s" % option) from None try: return wanted_type(value) except ValueError: - raise ValueError( - "unsupported value in option: %s" % option - ) from None + raise ValueError("unsupported value in option: %s" % option) from None else: return not_present @@ -616,17 +590,13 @@ def pop_option(options, key, present, not_present, wanted_type, int_base=0): options = ["fsname=borgfs", "ro", "default_permissions"] if mount_options: options.extend(mount_options.split(",")) - ignore_permissions = pop_option( - options, "ignore_permissions", True, False, bool - ) + ignore_permissions = pop_option(options, "ignore_permissions", True, False, bool) if ignore_permissions: # in case users have a use-case that requires NOT giving "default_permissions", # this is enabled by the custom "ignore_permissions" mount option which just # removes "default_permissions" again: pop_option(options, "default_permissions", True, False, bool) - self.allow_damaged_files = pop_option( - options, "allow_damaged_files", True, False, bool - ) + self.allow_damaged_files = pop_option(options, "allow_damaged_files", True, False, bool) self.versions = pop_option(options, "versions", True, False, bool) self.uid_forced = pop_option(options, "uid", None, None, int) self.gid_forced = pop_option(options, "gid", None, None, int) @@ -656,9 +626,7 @@ def pop_option(options, key, present, not_present, wanted_type, int_base=0): else: with daemonizing() as (old_id, new_id): # local repo: the locking process' PID is changing, migrate it: - logger.debug( - "fuse: mount local repo, going to background: migrating lock." - ) + logger.debug("fuse: mount local repo, going to background: migrating lock.") self.repository_uncached.migrate_lock(old_id, new_id) # If the file system crashes, we do not want to umount because in that diff --git a/src/borg/fuse_impl.py b/src/borg/fuse_impl.py index 96f79a2ebcb..b63d9579443 100644 --- a/src/borg/fuse_impl.py +++ b/src/borg/fuse_impl.py @@ -29,9 +29,7 @@ elif FUSE_IMPL == "none": pass else: - raise RuntimeError( - "unknown fuse implementation in BORG_FUSE_IMPL: '%s'" % BORG_FUSE_IMPL - ) + raise RuntimeError("unknown fuse implementation in BORG_FUSE_IMPL: '%s'" % BORG_FUSE_IMPL) else: llfuse = None has_llfuse = False diff --git a/src/borg/helpers/checks.py b/src/borg/helpers/checks.py index 8345e8cac18..c4746f5dc01 100644 --- a/src/borg/helpers/checks.py +++ b/src/borg/helpers/checks.py @@ -36,8 +36,5 @@ def check_extension_modules(): raise ExtensionModuleError if item.API_VERSION != "1.2_01": raise ExtensionModuleError - if ( - platform.API_VERSION != platform.OS_API_VERSION - or platform.API_VERSION != "1.2_05" - ): + if platform.API_VERSION != platform.OS_API_VERSION or platform.API_VERSION != "1.2_05": raise ExtensionModuleError diff --git a/src/borg/helpers/fs.py b/src/borg/helpers/fs.py index ff9a11710a3..215206ca94a 100644 --- a/src/borg/helpers/fs.py +++ b/src/borg/helpers/fs.py @@ -179,12 +179,7 @@ def make_path_safe(path): def hardlinkable(mode): """return True if we support hardlinked items of this type""" - return ( - stat.S_ISREG(mode) - or stat.S_ISBLK(mode) - or stat.S_ISCHR(mode) - or stat.S_ISFIFO(mode) - ) + return stat.S_ISREG(mode) or stat.S_ISBLK(mode) or stat.S_ISCHR(mode) or stat.S_ISFIFO(mode) def scandir_keyfunc(dirent): @@ -255,12 +250,8 @@ def O_(*flags): flags_base = O_("BINARY", "NOCTTY", "RDONLY") -flags_special = flags_base | O_( - "NOFOLLOW" -) # BLOCK == wait when reading devices or fifos -flags_special_follow = ( - flags_base # BLOCK == wait when reading symlinked devices or fifos -) +flags_special = flags_base | O_("NOFOLLOW") # BLOCK == wait when reading devices or fifos +flags_special_follow = flags_base # BLOCK == wait when reading symlinked devices or fifos flags_normal = flags_base | O_("NONBLOCK", "NOFOLLOW") flags_noatime = flags_normal | O_("NOATIME") flags_root = O_("RDONLY") diff --git a/src/borg/helpers/manifest.py b/src/borg/helpers/manifest.py index e2dfe918a7d..56c2bd8b556 100644 --- a/src/borg/helpers/manifest.py +++ b/src/borg/helpers/manifest.py @@ -218,9 +218,7 @@ def load(cls, repository, operations, key=None, force_tam_not_required=False): manifest.timestamp = m.get("timestamp") manifest.config = m.config # valid item keys are whatever is known in the repo or every key we know - manifest.item_keys = ITEM_KEYS | frozenset( - key.decode() for key in m.get("item_keys", []) - ) + manifest.item_keys = ITEM_KEYS | frozenset(key.decode() for key in m.get("item_keys", [])) if manifest.tam_verified: manifest_required = manifest.config.get(b"tam_required", False) @@ -249,9 +247,7 @@ def check_repository_compatibility(self, operations): continue requirements = feature_flags[operation.value.encode()] if b"mandatory" in requirements: - unsupported = ( - set(requirements[b"mandatory"]) - self.SUPPORTED_REPO_FEATURES - ) + unsupported = set(requirements[b"mandatory"]) - self.SUPPORTED_REPO_FEATURES if unsupported: raise MandatoryFeatureUnsupported([f.decode() for f in unsupported]) diff --git a/src/borg/helpers/msgpack.py b/src/borg/helpers/msgpack.py index ca27377be8a..403a5647e3a 100644 --- a/src/borg/helpers/msgpack.py +++ b/src/borg/helpers/msgpack.py @@ -72,9 +72,7 @@ def pack(self, obj): def packb(o, *, use_bin_type=False, unicode_errors=None, **kwargs): assert unicode_errors is None try: - return mp_packb( - o, use_bin_type=use_bin_type, unicode_errors=unicode_errors, **kwargs - ) + return mp_packb(o, use_bin_type=use_bin_type, unicode_errors=unicode_errors, **kwargs) except Exception as e: raise PackException(e) @@ -83,11 +81,7 @@ def pack(o, stream, *, use_bin_type=False, unicode_errors=None, **kwargs): assert unicode_errors is None try: return mp_pack( - o, - stream, - use_bin_type=use_bin_type, - unicode_errors=unicode_errors, - **kwargs + o, stream, use_bin_type=use_bin_type, unicode_errors=unicode_errors, **kwargs ) except Exception as e: raise PackException(e) diff --git a/src/borg/helpers/parseformat.py b/src/borg/helpers/parseformat.py index 372a9cd8faf..fa1a6577f68 100644 --- a/src/borg/helpers/parseformat.py +++ b/src/borg/helpers/parseformat.py @@ -91,8 +91,7 @@ def interval(s): if hours <= 0: raise argparse.ArgumentTypeError( - 'Unexpected interval number "%s": expected an integer greater than 0' - % number + 'Unexpected interval number "%s": expected an integer greater than 0' % number ) return hours @@ -116,8 +115,7 @@ def ChunkerParams(s): raise ValueError("block_size must not be less than 64 Bytes") if block_size > MAX_DATA_SIZE or header_size > MAX_DATA_SIZE: raise ValueError( - "block_size and header_size must not exceed MAX_DATA_SIZE [%d]" - % MAX_DATA_SIZE + "block_size and header_size must not exceed MAX_DATA_SIZE [%d]" % MAX_DATA_SIZE ) return algo, block_size, header_size if algo == "default" and count == 1: # default @@ -126,9 +124,7 @@ def ChunkerParams(s): if ( algo == CH_BUZHASH and count == 5 or count == 4 ): # [buzhash, ]chunk_min, chunk_max, chunk_mask, window_size - chunk_min, chunk_max, chunk_mask, window_size = [ - int(p) for p in params[count - 4 :] - ] + chunk_min, chunk_max, chunk_mask, window_size = [int(p) for p in params[count - 4 :]] if not (chunk_min <= chunk_mask <= chunk_max): raise ValueError("required: chunk_min <= chunk_mask <= chunk_max") if chunk_min < 6: @@ -145,9 +141,7 @@ def ChunkerParams(s): def FilesCacheMode(s): - ENTRIES_MAP = dict( - ctime="c", mtime="m", size="s", inode="i", rechunk="r", disabled="d" - ) + ENTRIES_MAP = dict(ctime="c", mtime="m", size="s", inode="i", rechunk="r", disabled="d") VALID_MODES = ( "cis", "ims", @@ -161,8 +155,7 @@ def FilesCacheMode(s): entries = set(s.strip().split(",")) if not entries <= set(ENTRIES_MAP): raise ValueError( - "cache mode must be a comma-separated list of: %s" - % ",".join(sorted(ENTRIES_MAP)) + "cache mode must be a comma-separated list of: %s" % ",".join(sorted(ENTRIES_MAP)) ) short_entries = {ENTRIES_MAP[entry] for entry in entries} mode = "".join(sorted(short_entries)) @@ -292,9 +285,7 @@ def parse_file_size(s): return int(float(s) * factor) -def sizeof_fmt( - num, suffix="B", units=None, power=None, sep="", precision=2, sign=False -): +def sizeof_fmt(num, suffix="B", units=None, power=None, sep="", precision=2, sign=False): sign = "+" if sign and num > 0 else "" fmt = "{0:{1}.{2}f}{3}{4}{5}" prec = 0 @@ -597,13 +588,9 @@ def validator(text): raise argparse.ArgumentTypeError('"%s": No archive can be specified' % text) if proto is not None and loc.proto != proto: if proto == "file": - raise argparse.ArgumentTypeError( - '"%s": Repository must be local' % text - ) + raise argparse.ArgumentTypeError('"%s": Repository must be local' % text) else: - raise argparse.ArgumentTypeError( - '"%s": Repository must be remote' % text - ) + raise argparse.ArgumentTypeError('"%s": Repository must be remote' % text) return loc return validator @@ -887,9 +874,7 @@ def __init__(self, archive, format, *, json_lines=False): "size": self.calculate_size, "csize": self.calculate_csize, "dsize": partial(self.sum_unique_chunks_metadata, lambda chunk: chunk.size), - "dcsize": partial( - self.sum_unique_chunks_metadata, lambda chunk: chunk.csize - ), + "dcsize": partial(self.sum_unique_chunks_metadata, lambda chunk: chunk.csize), "num_chunks": self.calculate_num_chunks, "unique_chunks": partial(self.sum_unique_chunks_metadata, lambda chunk: 1), "isomtime": partial(self.format_iso_time, "mtime"), @@ -954,9 +939,7 @@ def sum_unique_chunks_metadata(self, metadata_func, item): chunks = item.get("chunks", []) chunks_counter = Counter(c.id for c in chunks) return sum( - metadata_func(c) - for c in chunks - if chunk_index[c.id].refcount == chunks_counter[c.id] + metadata_func(c) for c in chunks if chunk_index[c.id].refcount == chunks_counter[c.id] ) def calculate_num_chunks(self, item): @@ -1004,9 +987,7 @@ def file_status(mode): return "?" -def clean_lines( - lines, lstrip=None, rstrip=None, remove_empty=True, remove_comments=True -): +def clean_lines(lines, lstrip=None, rstrip=None, remove_empty=True, remove_comments=True): """ clean lines (usually read from a config file): diff --git a/src/borg/helpers/process.py b/src/borg/helpers/process.py index 07d02f878de..735dbbcbf01 100644 --- a/src/borg/helpers/process.py +++ b/src/borg/helpers/process.py @@ -32,9 +32,7 @@ def _daemonize(): except _ExitCodeException as e: exit_code = e.exit_code finally: - logger.debug( - "Daemonizing: Foreground process (%s, %s, %s) is now dying." % old_id - ) + logger.debug("Daemonizing: Foreground process (%s, %s, %s) is now dying." % old_id) os._exit(exit_code) os.setsid() pid = os.fork() @@ -106,17 +104,14 @@ def daemonizing(*, timeout=5): except KeyboardInterrupt: # Manual termination. logger.debug( - "Daemonizing: Foreground process (%s, %s, %s) received SIGINT." - % old_id + "Daemonizing: Foreground process (%s, %s, %s) received SIGINT." % old_id ) exit_code = EXIT_SIGNAL_BASE + 2 except BaseException as e: # Just in case... logger.warning( "Daemonizing: Foreground process received an exception while waiting:\n" - + "".join( - traceback.format_exception(e.__class__, e, e.__traceback__) - ) + + "".join(traceback.format_exception(e.__class__, e, e.__traceback__)) ) exit_code = EXIT_WARNING else: @@ -131,9 +126,7 @@ def daemonizing(*, timeout=5): # The background / grandchild process. sig_to_foreground = signal.SIGTERM - logger.debug( - "Daemonizing: Background process (%s, %s, %s) is starting..." % new_id - ) + logger.debug("Daemonizing: Background process (%s, %s, %s) is starting..." % new_id) try: yield old_id, new_id except BaseException as e: @@ -144,18 +137,14 @@ def daemonizing(*, timeout=5): ) raise e else: - logger.debug( - "Daemonizing: Background process (%s, %s, %s) has started." % new_id - ) + logger.debug("Daemonizing: Background process (%s, %s, %s) has started." % new_id) finally: try: os.kill(old_id[1], sig_to_foreground) except BaseException as e: logger.error( "Daemonizing: Trying to kill the foreground process raised an exception:\n" - + "".join( - traceback.format_exception(e.__class__, e, e.__traceback__) - ) + + "".join(traceback.format_exception(e.__class__, e, e.__traceback__)) ) @@ -288,11 +277,7 @@ def popen_with_error_handling(cmd_line: str, log_prefix="", **kwargs): def is_terminal(fd=sys.stdout): - return ( - hasattr(fd, "isatty") - and fd.isatty() - and (not is_win32 or "ANSICON" in os.environ) - ) + return hasattr(fd, "isatty") and fd.isatty() and (not is_win32 or "ANSICON" in os.environ) def prepare_subprocess_env(system, env=None): @@ -324,11 +309,7 @@ def prepare_subprocess_env(system, env=None): # in this case, we must kill LDLP. # We can recognize this via sys.frozen and sys._MEIPASS being set. lp = env.get(lp_key) - if ( - lp is not None - and getattr(sys, "frozen", False) - and hasattr(sys, "_MEIPASS") - ): + if lp is not None and getattr(sys, "frozen", False) and hasattr(sys, "_MEIPASS"): env.pop(lp_key) # security: do not give secrets to subprocess env.pop("BORG_PASSPHRASE", None) diff --git a/src/borg/helpers/progress.py b/src/borg/helpers/progress.py index 3fcce77a5fc..a4d4a2ef14b 100644 --- a/src/borg/helpers/progress.py +++ b/src/borg/helpers/progress.py @@ -122,9 +122,7 @@ def __init__(self, total=0, step=5, start=0, msg="%3.0f%%", msgid=None): """ self.counter = 0 # 0 .. (total-1) self.total = total - self.trigger_at = ( - start # output next percentage value when reaching (at least) this - ) + self.trigger_at = start # output next percentage value when reaching (at least) this self.step = step self.msg = msg @@ -155,21 +153,15 @@ def show(self, current=None, increase=1, info=None): # no need to truncate if we're not outputting to a terminal terminal_space = get_terminal_size(fallback=(-1, -1))[0] if terminal_space != -1: - space = terminal_space - len( - self.msg % tuple([pct] + info[:-1] + [""]) - ) + space = terminal_space - len(self.msg % tuple([pct] + info[:-1] + [""])) info[-1] = ellipsis_truncate(info[-1], space) - return self.output( - self.msg % tuple([pct] + info), justify=False, info=info - ) + return self.output(self.msg % tuple([pct] + info), justify=False, info=info) return self.output(self.msg % pct) def output(self, message, justify=True, info=None): if self.json: - self.output_json( - message=message, current=self.counter, total=self.total, info=info - ) + self.output_json(message=message, current=self.counter, total=self.total, info=info) else: if justify: message = justify_to_terminal_size(message) diff --git a/src/borg/helpers/time.py b/src/borg/helpers/time.py index ab211cd2ae8..b7a9165d1dd 100644 --- a/src/borg/helpers/time.py +++ b/src/borg/helpers/time.py @@ -8,9 +8,7 @@ def to_localtime(ts): """Convert datetime object from UTC to local time zone""" return datetime( - *time.localtime( - (ts - datetime(1970, 1, 1, tzinfo=timezone.utc)).total_seconds() - )[:6] + *time.localtime((ts - datetime(1970, 1, 1, tzinfo=timezone.utc)).total_seconds())[:6] ) diff --git a/src/borg/logger.py b/src/borg/logger.py index 619140e9b5f..2311f9873dd 100644 --- a/src/borg/logger.py +++ b/src/borg/logger.py @@ -89,9 +89,7 @@ def setup_logging( logger = logging.getLogger(__name__) borg_logger = logging.getLogger("borg") borg_logger.json = json - logger.debug( - 'using logging configuration read from "{0}"'.format(conf_fname) - ) + logger.debug('using logging configuration read from "{0}"'.format(conf_fname)) warnings.showwarning = _log_warning return None except Exception as err: # XXX be more precise @@ -119,9 +117,7 @@ def setup_logging( configured = True logger = logging.getLogger(__name__) if err_msg: - logger.warning( - 'setup_logging for "{0}" failed with "{1}".'.format(conf_fname, err_msg) - ) + logger.warning('setup_logging for "{0}" failed with "{1}".'.format(conf_fname, err_msg)) logger.debug("using builtin fallback logging configuration") warnings.showwarning = _log_warning return handler @@ -172,9 +168,7 @@ def __init__(self, name=None): def __logger(self): if self.__real_logger is None: if not configured: - raise Exception( - "tried to call a logger before setup_logging() was called" - ) + raise Exception("tried to call a logger before setup_logging() was called") self.__real_logger = logging.getLogger(self.__name) if ( self.__name.startswith("borg.debug.") diff --git a/src/borg/lrucache.py b/src/borg/lrucache.py index 0227d776700..097fe4c52f1 100644 --- a/src/borg/lrucache.py +++ b/src/borg/lrucache.py @@ -10,8 +10,7 @@ def __init__(self, capacity, dispose): def __setitem__(self, key, value): assert key not in self._cache, ( - "Unexpected attempt to replace a cached item," - " without first deleting the old item." + "Unexpected attempt to replace a cached item," " without first deleting the old item." ) self._lru.append(key) while len(self._lru) > self._capacity: diff --git a/src/borg/nanorst.py b/src/borg/nanorst.py index 0932e326804..71d9e947021 100644 --- a/src/borg/nanorst.py +++ b/src/borg/nanorst.py @@ -147,11 +147,7 @@ def rst_to_text(text, state_hook=None, references=None): state = "text" text.read(1) continue - if ( - state == "code-block" - and char == next == "\n" - and text.peek(5)[1:] != " " - ): + if state == "code-block" and char == next == "\n" and text.peek(5)[1:] != " ": # Foo:: # # *stuff* *code* *ignore .. all markup* @@ -163,9 +159,7 @@ def rst_to_text(text, state_hook=None, references=None): state = "text" out.write(char) - assert state == "text", ( - "Invalid final state %r (This usually indicates unmatched */**)" % state - ) + assert state == "text", "Invalid final state %r (This usually indicates unmatched */**)" % state return out.getvalue() diff --git a/src/borg/patterns.py b/src/borg/patterns.py index 57efadcfb25..6638a4c276c 100644 --- a/src/borg/patterns.py +++ b/src/borg/patterns.py @@ -218,9 +218,7 @@ class PathFullPattern(PatternBase): PREFIX = "pf" def _prepare(self, pattern): - self.pattern = os.path.normpath(pattern).lstrip( - os.path.sep - ) # sep at beginning is removed + self.pattern = os.path.normpath(pattern).lstrip(os.path.sep) # sep at beginning is removed def _match(self, path): return path == self.pattern @@ -261,10 +259,7 @@ class FnmatchPattern(PatternBase): def _prepare(self, pattern): if pattern.endswith(os.path.sep): pattern = ( - os.path.normpath(pattern).rstrip(os.path.sep) - + os.path.sep - + "*" - + os.path.sep + os.path.normpath(pattern).rstrip(os.path.sep) + os.path.sep + "*" + os.path.sep ) else: pattern = os.path.normpath(pattern) + os.path.sep + "*" @@ -290,9 +285,7 @@ def _prepare(self, pattern): sep = os.path.sep if pattern.endswith(sep): - pattern = ( - os.path.normpath(pattern).rstrip(sep) + sep + "**" + sep + "*" + sep - ) + pattern = os.path.normpath(pattern).rstrip(sep) + sep + "**" + sep + "*" + sep else: pattern = os.path.normpath(pattern) + sep + "**" + sep + "*" @@ -389,8 +382,7 @@ def parse_inclexcl_command(cmd_line_str, fallback=ShellPattern): cmd = cmd_prefix_map.get(cmd_line_str[0]) if cmd is None: raise argparse.ArgumentTypeError( - "A pattern/command must start with anyone of: %s" - % ", ".join(cmd_prefix_map) + "A pattern/command must start with anyone of: %s" % ", ".join(cmd_prefix_map) ) # remaining text on command-line following the command character @@ -406,9 +398,7 @@ def parse_inclexcl_command(cmd_line_str, fallback=ShellPattern): try: val = get_pattern_class(remainder_str) except ValueError: - raise argparse.ArgumentTypeError( - "Invalid pattern style: {}".format(remainder_str) - ) + raise argparse.ArgumentTypeError("Invalid pattern style: {}".format(remainder_str)) else: # determine recurse_dir based on command type recurse_dir = command_recurses_dir(cmd) diff --git a/src/borg/platform/base.py b/src/borg/platform/base.py index 568a3c2aa95..8e175989a6d 100644 --- a/src/borg/platform/base.py +++ b/src/borg/platform/base.py @@ -254,9 +254,7 @@ def getfqdn(name=""): if not name or name == "0.0.0.0": name = socket.gethostname() try: - addrs = socket.getaddrinfo( - name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME - ) + addrs = socket.getaddrinfo(name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME) except socket.error: pass else: diff --git a/src/borg/remote.py b/src/borg/remote.py index c1e4eda70f8..0308e463eed 100644 --- a/src/borg/remote.py +++ b/src/borg/remote.py @@ -183,9 +183,7 @@ class RepositoryServer: # pragma: no cover "inject_exception", ) - def __init__( - self, restrict_to_paths, restrict_to_repositories, append_only, storage_quota - ): + def __init__(self, restrict_to_paths, restrict_to_repositories, append_only, storage_quota): self.repository = None self.restrict_to_paths = restrict_to_paths self.restrict_to_repositories = restrict_to_repositories @@ -306,9 +304,7 @@ def serve(self): MSGID: msgid, b"exception_class": e.__class__.__name__, b"exception_args": [ - x - if isinstance(x, (str, bytes, int)) - else None + x if isinstance(x, (str, bytes, int)) else None for x in e.args ], b"exception_full": ex_full, @@ -334,16 +330,11 @@ def serve(self): pass else: if isinstance(e, Error): - tb_log_level = ( - logging.ERROR if e.traceback else logging.DEBUG - ) + tb_log_level = logging.ERROR if e.traceback else logging.DEBUG msg = e.get_message() else: tb_log_level = logging.ERROR - msg = ( - "%s Exception in RPC call" - % e.__class__.__name__ - ) + msg = "%s Exception in RPC call" % e.__class__.__name__ tb = "%s\n%s" % (traceback.format_exc(), sysinfo()) logging.error(msg) logging.log(tb_log_level, tb) @@ -354,9 +345,7 @@ def serve(self): ) else: if dictFormat: - os_write( - stdout_fd, msgpack.packb({MSGID: msgid, RESULT: res}) - ) + os_write(stdout_fd, msgpack.packb({MSGID: msgid, RESULT: res})) else: os_write(stdout_fd, msgpack.packb((1, msgid, None, res))) if es: @@ -375,7 +364,9 @@ def negotiate(self, client_data): setup_logging(is_serve=True, json=True, level=level) logger.debug("Initialized logging system for JSON-based protocol") else: - self.client_version = BORG_VERSION # seems to be newer than current version (no known old format) + self.client_version = ( + BORG_VERSION # seems to be newer than current version (no known old format) + ) # not a known old format, send newest negotiate this version knows return {"server_version": BORG_VERSION} @@ -409,9 +400,7 @@ def open( logging.debug("Resolving repository path %r", path) path = self._resolve_path(path) logging.debug("Resolved repository path to %r", path) - path_with_sep = os.path.join( - path, "" - ) # make sure there is a trailing slash (os.sep) + path_with_sep = os.path.join(path, "") # make sure there is a trailing slash (os.sep) if self.restrict_to_paths: # if --restrict-to-path P is given, we make sure that we only operate in/below path P. # for the prefix check, it is important that the compared paths both have trailing slashes, @@ -548,10 +537,7 @@ def do_rpc(self, *args, **kwargs): for name, restriction in kwargs_decorator.items(): if restriction["since"] <= self.server_version: continue - if ( - "previously" in restriction - and named[name] == restriction["previously"] - ): + if "previously" in restriction and named[name] == restriction["previously"]: continue if restriction.get("dontcare", False): continue @@ -596,10 +582,7 @@ def exception_full(self): if b"exception_full" in self.unpacked: return b"\n".join(self.unpacked[b"exception_full"]).decode() else: - return ( - self.get_message() - + "\nRemote Exception (see remote log for the traceback)" - ) + return self.get_message() + "\nRemote Exception (see remote log for the traceback)" @property def sysinfo(self): @@ -665,9 +648,7 @@ def __init__( if not testing: borg_cmd = self.ssh_cmd(location) + borg_cmd logger.debug("SSH command line: %s", borg_cmd) - self.p = Popen( - borg_cmd, bufsize=0, stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env - ) + self.p = Popen(borg_cmd, bufsize=0, stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env) self.stdin_fd = self.p.stdin.fileno() self.stdout_fd = self.p.stdout.fileno() self.stderr_fd = self.p.stderr.fileno() @@ -688,9 +669,7 @@ def __init__( }, ) except ConnectionClosed: - raise ConnectionClosedWithHint( - "Is borg working on the server?" - ) from None + raise ConnectionClosedWithHint("Is borg working on the server?") from None if version == RPC_PROTOCOL_VERSION: self.dictFormat = False elif isinstance(version, dict) and b"server_version" in version: @@ -747,8 +726,7 @@ def do_open(): def __del__(self): if len(self.responses): logging.debug( - "still %d cached responses left in RemoteRepository" - % (len(self.responses),) + "still %d cached responses left in RemoteRepository" % (len(self.responses),) ) if self.p: self.close() @@ -858,9 +836,7 @@ def call_many(self, cmd, calls, wait=True, is_preloaded=False, async_wait=True): def send_buffer(): if self.to_send: try: - written = self.ratelimit.write( - self.stdin_fd, self.to_send.peek_front() - ) + written = self.ratelimit.write(self.stdin_fd, self.to_send.peek_front()) self.tx_bytes += written self.to_send.pop_front(written) except OSError as e: @@ -906,13 +882,9 @@ def handle_error(unpacked): raise Repository.ParentPathDoesNotExist(args[0].decode()) elif error == "ObjectNotFound": if old_server: - raise Repository.ObjectNotFound( - "(not available)", self.location.processed - ) + raise Repository.ObjectNotFound("(not available)", self.location.processed) else: - raise Repository.ObjectNotFound( - args[0].decode(), self.location.processed - ) + raise Repository.ObjectNotFound(args[0].decode(), self.location.processed) elif error == "InvalidRPCMethod": if old_server: raise InvalidRPCMethod("(not available)") @@ -963,9 +935,7 @@ def handle_error(unpacked): handle_error(unpacked) else: yield unpacked[RESULT] - if self.to_send or ( - (calls or self.preload_ids) and len(waiting_for) < MAX_INFLIGHT - ): + if self.to_send or ((calls or self.preload_ids) and len(waiting_for) < MAX_INFLIGHT): w_fds = [self.stdin_fd] else: w_fds = [] @@ -1029,13 +999,9 @@ def handle_error(unpacked): ): if calls: if is_preloaded: - assert ( - cmd == "get" - ), "is_preload is only supported for 'get'" + assert cmd == "get", "is_preload is only supported for 'get'" if calls[0]["id"] in self.chunkid_to_msgids: - waiting_for.append( - pop_preload_msgid(calls.pop(0)["id"]) - ) + waiting_for.append(pop_preload_msgid(calls.pop(0)["id"])) else: args = calls.pop(0) if cmd == "get" and args["id"] in self.chunkid_to_msgids: @@ -1045,9 +1011,7 @@ def handle_error(unpacked): waiting_for.append(self.msgid) if self.dictFormat: self.to_send.push_back( - msgpack.packb( - {MSGID: self.msgid, MSG: cmd, ARGS: args} - ) + msgpack.packb({MSGID: self.msgid, MSG: cmd, ARGS: args}) ) else: self.to_send.push_back( @@ -1064,14 +1028,10 @@ def handle_error(unpacked): chunk_id = self.preload_ids.pop(0) args = {"id": chunk_id} self.msgid += 1 - self.chunkid_to_msgids.setdefault(chunk_id, []).append( - self.msgid - ) + self.chunkid_to_msgids.setdefault(chunk_id, []).append(self.msgid) if self.dictFormat: self.to_send.push_back( - msgpack.packb( - {MSGID: self.msgid, MSG: "get", ARGS: args} - ) + msgpack.packb({MSGID: self.msgid, MSG: "get", ARGS: args}) ) else: self.to_send.push_back( @@ -1130,9 +1090,7 @@ def check(self, repair=False, save_space=False, max_duration=0): "dontcare": True, }, ) - def commit( - self, save_space=False, compact=True, threshold=0.1, cleanup_commits=False - ): + def commit(self, save_space=False, compact=True, threshold=0.1, cleanup_commits=False): """actual remoting is done via self.call in the @api decorator""" @api(since=parse_version("1.0.0")) @@ -1160,9 +1118,7 @@ def get(self, id): return resp def get_many(self, ids, is_preloaded=False): - for resp in self.call_many( - "get", [{"id": id} for id in ids], is_preloaded=is_preloaded - ): + for resp in self.call_many("get", [{"id": id} for id in ids], is_preloaded=is_preloaded): yield resp @api(since=parse_version("1.0.0")) @@ -1201,9 +1157,7 @@ def close(self): self.p = None def async_response(self, wait=True): - for resp in self.call_many( - "async_responses", calls=[], wait=True, async_wait=wait - ): + for resp in self.call_many("async_responses", calls=[], wait=True, async_wait=wait): return resp def preload(self, ids): @@ -1223,9 +1177,7 @@ def handle_remote_line(line): msg = json.loads(line) if msg["type"] not in ("progress_message", "progress_percent", "log_message"): - logger.warning( - "Dropped remote log message with unknown type %r: %s", msg["type"], line - ) + logger.warning("Dropped remote log message with unknown type %r: %s", msg["type"], line) return if msg["type"] == "log_message": @@ -1235,10 +1187,7 @@ def handle_remote_line(line): target_logger = logging.getLogger(msg["name"]) msg["message"] = "Remote: " + msg["message"] # In JSON mode, we manually check whether the log message should be propagated. - if ( - logging.getLogger("borg").json - and level >= target_logger.getEffectiveLevel() - ): + if logging.getLogger("borg").json and level >= target_logger.getEffectiveLevel(): sys.stderr.write(json.dumps(msg) + "\n") else: target_logger.log(level, "%s", msg["message"]) @@ -1444,13 +1393,7 @@ def get_many(self, keys, cache=True): def cache_if_remote( - repository, - *, - decrypted_cache=False, - pack=None, - unpack=None, - transform=None, - force_cache=False + repository, *, decrypted_cache=False, pack=None, unpack=None, transform=None, force_cache=False ): """ Return a Repository(No)Cache for *repository*. diff --git a/src/borg/repository.py b/src/borg/repository.py index c910c12c1ad..82d0e74a8cf 100644 --- a/src/borg/repository.py +++ b/src/borg/repository.py @@ -212,16 +212,12 @@ def __enter__(self): self.do_create = False self.create(self.path) self.created = True - self.open( - self.path, bool(self.exclusive), lock_wait=self.lock_wait, lock=self.do_lock - ) + self.open(self.path, bool(self.exclusive), lock_wait=self.lock_wait, lock=self.do_lock) return self def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None: - no_space_left_on_device = ( - exc_type is OSError and exc_val.errno == errno.ENOSPC - ) + no_space_left_on_device = exc_type is OSError and exc_val.errno == errno.ENOSPC # The ENOSPC could have originated somewhere else besides the Repository. The cleanup is always safe, unless # EIO or FS corruption ensues, which is why we specifically check for ENOSPC. if self._active_txn and no_space_left_on_device: @@ -248,10 +244,7 @@ def is_repository(path): readme_head = fd.read(100) # The first comparison captures our current variant (REPOSITORY_README), the second comparison # is an older variant of the README file (used by 1.0.x). - return ( - b"Borg Backup repository" in readme_head - or b"Borg repository" in readme_head - ) + return b"Borg Backup repository" in readme_head or b"Borg repository" in readme_head except OSError: # Ignore FileNotFound, PermissionError, ... return False @@ -324,9 +317,7 @@ def save_config(self, path, config): old_config_path = os.path.join(path, "config.old") if os.path.isfile(old_config_path): - logger.warning( - "Old config file not securely erased on previous config update" - ) + logger.warning("Old config file not securely erased on previous config update") secure_erase(old_config_path) if os.path.isfile(config_path): @@ -369,17 +360,13 @@ def save_config(self, path, config): def save_key(self, keydata): assert self.config - keydata = keydata.decode( - "utf-8" - ) # remote repo: msgpack issue #99, getting bytes + keydata = keydata.decode("utf-8") # remote repo: msgpack issue #99, getting bytes self.config.set("repository", "key", keydata) self.save_config(self.path, self.config) def load_key(self): keydata = self.config.get("repository", "key") - return keydata.encode( - "utf-8" - ) # remote repo: msgpack issue #99, returning bytes + return keydata.encode("utf-8") # remote repo: msgpack issue #99, returning bytes def get_free_nonce(self): if self.do_lock and not self.lock.got_exclusive_lock(): @@ -441,17 +428,12 @@ def check_transaction(self): # this can happen if a lot of segment files are lost, e.g. due to a # filesystem or hardware malfunction. it means we have no identifiable # valid (committed) state of the repo which we could use. - msg = ( - '%s" - although likely this is "beyond repair' % self.path - ) # dirty hack + msg = '%s" - although likely this is "beyond repair' % self.path # dirty hack raise self.CheckNeeded(msg) # Attempt to automatically rebuild index if we crashed between commit # tag write and index save if index_transaction_id != segments_transaction_id: - if ( - index_transaction_id is not None - and index_transaction_id > segments_transaction_id - ): + if index_transaction_id is not None and index_transaction_id > segments_transaction_id: replay_from = None else: replay_from = index_transaction_id @@ -478,9 +460,7 @@ def open(self, path, exclusive, lock_wait=None, lock=True): if not stat.S_ISDIR(st.st_mode): raise self.InvalidRepository(path) if lock: - self.lock = Lock( - os.path.join(path, "lock"), exclusive, timeout=lock_wait - ).acquire() + self.lock = Lock(os.path.join(path, "lock"), exclusive, timeout=lock_wait).acquire() else: self.lock = None self.config = ConfigParser(interpolation=None) @@ -496,9 +476,7 @@ def open(self, path, exclusive, lock_wait=None, lock=True): ): self.close() raise self.InvalidRepository(path) - self.max_segment_size = parse_file_size( - self.config.get("repository", "max_segment_size") - ) + self.max_segment_size = parse_file_size(self.config.get("repository", "max_segment_size")) if self.max_segment_size >= MAX_SEGMENT_SIZE_LIMIT: self.close() raise self.InvalidRepositoryConfig( @@ -523,10 +501,7 @@ def open(self, path, exclusive, lock_wait=None, lock=True): if self.check_segment_magic: # read a segment and check whether we are dealing with a non-upgraded Attic repository segment = self.io.get_latest_segment() - if ( - segment is not None - and self.io.get_segment_magic(segment) == ATTIC_MAGIC - ): + if segment is not None and self.io.get_segment_magic(segment) == ATTIC_MAGIC: self.close() raise self.AtticRepository(path) @@ -538,9 +513,7 @@ def close(self): self.lock.release() self.lock = None - def commit( - self, save_space=False, compact=True, threshold=0.1, cleanup_commits=False - ): + def commit(self, save_space=False, compact=True, threshold=0.1, cleanup_commits=False): """Commit transaction""" # save_space is not used anymore, but stays for RPC/API compatibility. if self.transaction_doomed: @@ -587,14 +560,10 @@ def open_index(self, transaction_id, auto_recover=True): index_path = os.path.join(self.path, "index.%d" % transaction_id) integrity_data = self._read_integrity(transaction_id, b"index") try: - with IntegrityCheckedFile( - index_path, write=False, integrity_data=integrity_data - ) as fd: + with IntegrityCheckedFile(index_path, write=False, integrity_data=integrity_data) as fd: return NSIndex.read(fd) except (ValueError, OSError, FileIntegrityError) as exc: - logger.warning( - "Repository index missing or corrupted, trying to recover from: %s", exc - ) + logger.warning("Repository index missing or corrupted, trying to recover from: %s", exc) os.unlink(index_path) if not auto_recover: raise @@ -626,15 +595,11 @@ def prepare_txn(self, transaction_id, do_cleanup=True): try: self.index = self.open_index(transaction_id, auto_recover=False) except (ValueError, OSError, FileIntegrityError) as exc: - logger.warning( - "Checking repository transaction due to previous error: %s", exc - ) + logger.warning("Checking repository transaction due to previous error: %s", exc) self.check_transaction() self.index = self.open_index(transaction_id, auto_recover=False) if transaction_id is None: - self.segments = ( - {} - ) # XXX bad name: usage_count_of_segment_x = self.segments[x] + self.segments = {} # XXX bad name: usage_count_of_segment_x = self.segments[x] self.compact = ( FreeSpace() ) # XXX bad name: freeable_space_of_segment_x = self.compact[x] @@ -726,9 +691,7 @@ def rename_tmp(file): # Write hints file hints_name = "hints.%d" % transaction_id hints_file = os.path.join(self.path, hints_name) - with IntegrityCheckedFile( - hints_file + ".tmp", filename=hints_name, write=True - ) as fd: + with IntegrityCheckedFile(hints_file + ".tmp", filename=hints_name, write=True) as fd: msgpack.pack(hints, fd) flush_and_sync(fd) integrity[b"hints"] = fd.integrity_data @@ -736,9 +699,7 @@ def rename_tmp(file): # Write repository index index_name = "index.%d" % transaction_id index_file = os.path.join(self.path, index_name) - with IntegrityCheckedFile( - index_file + ".tmp", filename=index_name, write=True - ) as fd: + with IntegrityCheckedFile(index_file + ".tmp", filename=index_name, write=True) as fd: # XXX: Consider using SyncFile for index write-outs. self.index.write(fd) flush_and_sync(fd) @@ -802,9 +763,7 @@ def check_free_space(self): except FileNotFoundError: # looks like self.compact is referring to a non-existent segment file, ignore it. pass - logger.debug( - "check_free_space: few segments, not requiring a full free segment" - ) + logger.debug("check_free_space: few segments, not requiring a full free segment") compact_working_space = min(compact_working_space, full_segment_size) logger.debug( "check_free_space: calculated working space for compact as %d bytes", @@ -818,9 +777,7 @@ def check_free_space(self): try: free_space = shutil.disk_usage(self.path).free except OSError as os_error: - logger.warning( - "Failed to check free space before committing: " + str(os_error) - ) + logger.warning("Failed to check free space before committing: " + str(os_error)) return logger.debug( "check_free_space: required bytes {}, free bytes {}".format( @@ -829,9 +786,7 @@ def check_free_space(self): ) if free_space < required_free_space: if self.created: - logger.error( - "Not enough free space to initialize repository at this location." - ) + logger.error("Not enough free space to initialize repository at this location.") self.destroy() else: self._rollback(cleanup=True) @@ -873,9 +828,7 @@ def complete_xfer(intermediate=True): for segment in unused: logger.debug("complete_xfer: deleting unused segment %d", segment) count = self.segments.pop(segment) - assert ( - count == 0 - ), "Corrupted segment reference count - corrupted index or hints" + assert count == 0, "Corrupted segment reference count - corrupted index or hints" self.io.delete_segment(segment) del self.compact[segment] unused = [] @@ -889,9 +842,7 @@ def complete_xfer(intermediate=True): ) for segment, freeable_space in sorted(self.compact.items()): if not self.io.segment_exists(segment): - logger.warning( - "segment %d not found, but listed in compaction data", segment - ) + logger.warning("segment %d not found, but listed in compaction data", segment) del self.compact[segment] pi.show() continue @@ -917,18 +868,14 @@ def complete_xfer(intermediate=True): freeable_ratio * 100.0, freeable_space, ) - for tag, key, offset, data in self.io.iter_objects( - segment, include_data=True - ): + for tag, key, offset, data in self.io.iter_objects(segment, include_data=True): if tag == TAG_COMMIT: continue in_index = self.index.get(key) is_index_object = in_index == (segment, offset) if tag == TAG_PUT and is_index_object: try: - new_segment, offset = self.io.write_put( - key, data, raise_full=True - ) + new_segment, offset = self.io.write_put(key, data, raise_full=True) except LoggedIO.SegmentFull: complete_xfer() new_segment, offset = self.io.write_put(key, data) @@ -994,9 +941,7 @@ def complete_xfer(intermediate=True): # Note that in this check the index state is the proxy for a "most definitely settled" repository state, # ie. the assumption is that *all* operations on segments <= index state are completed and stable. try: - new_segment, size = self.io.write_delete( - key, raise_full=True - ) + new_segment, size = self.io.write_delete(key, raise_full=True) except LoggedIO.SegmentFull: complete_xfer() new_segment, size = self.io.write_delete(key) @@ -1014,9 +959,7 @@ def complete_xfer(intermediate=True): pi.show() pi.finish() complete_xfer(intermediate=False) - logger.info( - "compaction freed about %s repository space.", format_file_size(freed_space) - ) + logger.info("compaction freed about %s repository space.", format_file_size(freed_space)) logger.debug("compaction completed.") def replay_segments(self, index_transaction_id, segments_transaction_id): @@ -1229,25 +1172,19 @@ def report_error(msg): for key, value in self.index.iteritems(): current_value = current_index.get(key, not_found) if current_value != value: - logger.warning( - line_format, bin_to_hex(key), value, current_value - ) + logger.warning(line_format, bin_to_hex(key), value, current_value) for key, current_value in current_index.iteritems(): if key in self.index: continue value = self.index.get(key, not_found) if current_value != value: - logger.warning( - line_format, bin_to_hex(key), value, current_value - ) + logger.warning(line_format, bin_to_hex(key), value, current_value) if repair: self.write_index() self.rollback() if error_found: if repair: - logger.info( - "Finished %s repository check, errors found and repaired.", mode - ) + logger.info("Finished %s repository check, errors found and repaired.", mode) else: logger.error("Finished %s repository check, errors found.", mode) else: @@ -1266,9 +1203,7 @@ def scan_low_level(self): """ for segment, filename in self.io.segment_iterator(): try: - for tag, key, offset, data in self.io.iter_objects( - segment, include_data=True - ): + for tag, key, offset, data in self.io.iter_objects(segment, include_data=True): yield key, data, tag, segment, offset except IntegrityError as err: logger.error( @@ -1481,13 +1416,9 @@ def segment_iterator(self, segment=None, reverse=False): start_segment_dir = segment // self.segments_per_dir dirs = os.listdir(data_path) if not reverse: - dirs = [ - dir for dir in dirs if dir.isdigit() and int(dir) >= start_segment_dir - ] + dirs = [dir for dir in dirs if dir.isdigit() and int(dir) >= start_segment_dir] else: - dirs = [ - dir for dir in dirs if dir.isdigit() and int(dir) <= start_segment_dir - ] + dirs = [dir for dir in dirs if dir.isdigit() and int(dir) <= start_segment_dir] dirs = sorted(dirs, key=int, reverse=reverse) for dir in dirs: filenames = os.listdir(os.path.join(data_path, dir)) @@ -1571,9 +1502,7 @@ def is_committed_segment(self, segment): return seen_commit def segment_filename(self, segment): - return os.path.join( - self.path, "data", str(segment // self.segments_per_dir), str(segment) - ) + return os.path.join(self.path, "data", str(segment // self.segments_per_dir), str(segment)) def get_write_fd(self, no_new=False, want_new=False, raise_full=False): if not no_new and (want_new or self.offset and self.offset > self.limit): @@ -1726,9 +1655,7 @@ def recover_segment(self, segment, filename): try: dst_fd.write(MAGIC) while len(d) >= self.header_fmt.size: - crc, size, tag = self.header_fmt.unpack( - d[: self.header_fmt.size] - ) + crc, size, tag = self.header_fmt.unpack(d[: self.header_fmt.size]) if ( size > MAX_OBJECT_SIZE or tag > MAX_TAG_ID @@ -1855,9 +1782,7 @@ def write_put(self, id, data, raise_full=False): raise IntegrityError( "More than allowed put data [{} > {}]".format(data_size, MAX_DATA_SIZE) ) - fd = self.get_write_fd( - want_new=(id == Manifest.MANIFEST_ID), raise_full=raise_full - ) + fd = self.get_write_fd(want_new=(id == Manifest.MANIFEST_ID), raise_full=raise_full) size = data_size + self.put_header_fmt.size offset = self.offset header = self.header_no_crc_fmt.pack(size, TAG_PUT) @@ -1867,9 +1792,7 @@ def write_put(self, id, data, raise_full=False): return self.segment, offset def write_delete(self, id, raise_full=False): - fd = self.get_write_fd( - want_new=(id == Manifest.MANIFEST_ID), raise_full=raise_full - ) + fd = self.get_write_fd(want_new=(id == Manifest.MANIFEST_ID), raise_full=raise_full) header = self.header_no_crc_fmt.pack(self.put_header_fmt.size, TAG_DELETE) crc = self.crc_fmt.pack(crc32(id, crc32(header)) & 0xFFFFFFFF) fd.write(b"".join((crc, header, id))) diff --git a/src/borg/selftest.py b/src/borg/selftest.py index 0d792d14db2..61916ab2b58 100644 --- a/src/borg/selftest.py +++ b/src/borg/selftest.py @@ -87,6 +87,4 @@ def selftest(logger): sys.exit(2) assert False, "sanity assertion failed: ran beyond sys.exit()" selftest_elapsed = time.perf_counter() - selftest_started - logger.debug( - "%d self tests completed in %.2f seconds", successful_tests, selftest_elapsed - ) + logger.debug("%d self tests completed in %.2f seconds", successful_tests, selftest_elapsed) diff --git a/src/borg/testsuite/__init__.py b/src/borg/testsuite/__init__.py index 325fd36b166..2d8eaca2d69 100644 --- a/src/borg/testsuite/__init__.py +++ b/src/borg/testsuite/__init__.py @@ -64,10 +64,7 @@ def are_symlinks_supported(): with unopened_tempfile() as filepath: try: os.symlink("somewhere", filepath) - if ( - os.stat(filepath, follow_symlinks=False) - and os.readlink(filepath) == "somewhere" - ): + if os.stat(filepath, follow_symlinks=False) and os.readlink(filepath) == "somewhere": return True except OSError: pass @@ -244,9 +241,7 @@ def _assert_dirs_equal_cmp( ) @contextmanager - def fuse_mount( - self, location, mountpoint=None, *options, fork=True, os_fork=False, **kwargs - ): + def fuse_mount(self, location, mountpoint=None, *options, fork=True, os_fork=False, **kwargs): # For a successful mount, `fork = True` is required for # the borg mount daemon to work properly or the tests # will just freeze. Therefore, if argument `fork` is not @@ -342,9 +337,7 @@ def read_only(self, path): cmd_immutable = 'chmod S+vimmutable "%s"' % path cmd_mutable = 'chmod S-vimmutable "%s"' % path else: - message = ( - "Testing read-only repos is not supported on platform %s" % sys.platform - ) + message = "Testing read-only repos is not supported on platform %s" % sys.platform self.skipTest(message) try: os.system('LD_PRELOAD= chmod -R ugo-w "%s"' % path) diff --git a/src/borg/testsuite/archive.py b/src/borg/testsuite/archive.py index c7cf2eae79c..8f1d8b2134c 100644 --- a/src/borg/testsuite/archive.py +++ b/src/borg/testsuite/archive.py @@ -215,17 +215,13 @@ def test_missing_chunk(self): chunks = self.split(self.make_chunks([b"foo", b"bar", b"boo", b"baz"]), 4) input = [(False, chunks[:3]), (True, chunks[4:])] result = self.process(input) - self.assert_equal( - result, [{b"path": b"foo"}, {b"path": b"boo"}, {b"path": b"baz"}] - ) + self.assert_equal(result, [{b"path": b"foo"}, {b"path": b"boo"}, {b"path": b"baz"}]) def test_corrupt_chunk(self): chunks = self.split(self.make_chunks([b"foo", b"bar", b"boo", b"baz"]), 4) input = [(False, chunks[:3]), (True, [b"gar", b"bage"] + chunks[3:])] result = self.process(input) - self.assert_equal( - result, [{b"path": b"foo"}, {b"path": b"boo"}, {b"path": b"baz"}] - ) + self.assert_equal(result, [{b"path": b"foo"}, {b"path": b"boo"}, {b"path": b"baz"}]) @pytest.fixture @@ -271,9 +267,7 @@ def test_invalid_msgpacked_item(packed, item_keys_serialized): for o in [ {b"path": b"/a/b/c"}, # small (different msgpack mapping type!) OrderedDict((k, b"") for k in IK), # as big (key count) as it gets - OrderedDict( - (k, b"x" * 1000) for k in IK - ), # as big (key count and volume) as it gets + OrderedDict((k, b"x" * 1000) for k in IK), # as big (key count and volume) as it gets ] ], ) @@ -353,9 +347,7 @@ def test_get_item_uid_gid(): assert gid == 6 # item metadata broken, has negative ids and non-existing user/group names. - item = Item( - path="filename", uid=-3, gid=-4, user="udoesnotexist", group="gdoesnotexist" - ) + item = Item(path="filename", uid=-3, gid=-4, user="udoesnotexist", group="gdoesnotexist") uid, gid = get_item_uid_gid(item, numeric=False) # use the uid/gid defaults (which both default to 0). @@ -368,9 +360,7 @@ def test_get_item_uid_gid(): assert gid == 8 # item metadata has valid uid/gid, but non-existing user/group names. - item = Item( - path="filename", uid=9, gid=10, user="udoesnotexist", group="gdoesnotexist" - ) + item = Item(path="filename", uid=9, gid=10, user="udoesnotexist", group="gdoesnotexist") uid, gid = get_item_uid_gid(item, numeric=False) # because user/group name does not exist here, use valid numeric ids from item metadata. diff --git a/src/borg/testsuite/archiver.py b/src/borg/testsuite/archiver.py index bc11b5d3670..45d3c9d9635 100644 --- a/src/borg/testsuite/archiver.py +++ b/src/borg/testsuite/archiver.py @@ -76,9 +76,7 @@ src_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) -def exec_cmd( - *args, archiver=None, fork=False, exe=None, input=b"", binary_output=False, **kw -): +def exec_cmd(*args, archiver=None, fork=False, exe=None, input=b"", binary_output=False, **kw): if fork: try: if exe is None: @@ -87,9 +85,7 @@ def exec_cmd( borg = (exe,) elif not isinstance(exe, tuple): raise ValueError("exe must be None, a tuple or a str") - output = subprocess.check_output( - borg + args, stderr=subprocess.STDOUT, input=input - ) + output = subprocess.check_output(borg + args, stderr=subprocess.STDOUT, input=input) ret = 0 except subprocess.CalledProcessError as e: output = e.output @@ -108,9 +104,7 @@ def exec_cmd( sys.stdin.buffer = BytesIO(input) output = BytesIO() # Always use utf-8 here, to simply .decode() below - output_text = sys.stdout = sys.stderr = io.TextIOWrapper( - output, encoding="utf-8" - ) + output_text = sys.stdout = sys.stderr = io.TextIOWrapper(output, encoding="utf-8") if archiver is None: archiver = Archiver() archiver.prerun_checks = lambda *args: None @@ -208,9 +202,7 @@ def test_return_codes(cmd, tmpdir): DF_MOUNT = "/tmp/borg-mount" -@pytest.mark.skipif( - not os.path.exists(DF_MOUNT), reason="needs a 16MB fs mounted on %s" % DF_MOUNT -) +@pytest.mark.skipif(not os.path.exists(DF_MOUNT), reason="needs a 16MB fs mounted on %s" % DF_MOUNT) def test_disk_full(cmd): def make_files(dir, count, size, rnd=True): shutil.rmtree(dir, ignore_errors=True) @@ -258,9 +250,7 @@ def make_files(dir, count, size, rnd=True): print("create", rc, out) finally: # make sure repo is not locked - shutil.rmtree( - os.path.join(repo, "lock.exclusive"), ignore_errors=True - ) + shutil.rmtree(os.path.join(repo, "lock.exclusive"), ignore_errors=True) os.remove(os.path.join(repo, "lock.roster")) finally: # now some error happened, likely we are out of disk space. @@ -323,23 +313,17 @@ def cmd(self, *args, **kw): binary_output = kw.get("binary_output", False) if fork is None: fork = self.FORK_DEFAULT - ret, output = exec_cmd( - *args, fork=fork, exe=self.EXE, archiver=self.archiver, **kw - ) + ret, output = exec_cmd(*args, fork=fork, exe=self.EXE, archiver=self.archiver, **kw) if ret != exit_code: print(output) self.assert_equal(ret, exit_code) # if tests are run with the pure-python msgpack, there will be warnings about # this in the output, which would make a lot of tests fail. pp_msg = ( - PURE_PYTHON_MSGPACK_WARNING.encode() - if binary_output - else PURE_PYTHON_MSGPACK_WARNING + PURE_PYTHON_MSGPACK_WARNING.encode() if binary_output else PURE_PYTHON_MSGPACK_WARNING ) empty = b"" if binary_output else "" - output = empty.join( - line for line in output.splitlines(keepends=True) if pp_msg not in line - ) + output = empty.join(line for line in output.splitlines(keepends=True) if pp_msg not in line) return output def create_src_archive(self, name): @@ -410,9 +394,7 @@ def create_test_files(self): if are_fifos_supported(): os.mkfifo(os.path.join(self.input_path, "fifo1")) if has_lchflags: - platform.set_flags( - os.path.join(self.input_path, "flagfile"), stat.UF_NODUMP - ) + platform.set_flags(os.path.join(self.input_path, "flagfile"), stat.UF_NODUMP) try: # Block device os.mknod("input/bdev", 0o600 | stat.S_IFBLK, os.makedev(10, 20)) @@ -423,9 +405,7 @@ def create_test_files(self): "input/dir2", 0o555 ) # if we take away write perms, we need root to remove contents # File owner - os.chown( - "input/file1", 100, 200 - ) # raises OSError invalid argument on cygwin + os.chown("input/file1", 100, 200) # raises OSError invalid argument on cygwin have_root = True # we have (fake)root except PermissionError: have_root = False @@ -461,9 +441,7 @@ def test_basic_functionality(self): ) self.assert_in("borgbackup version", output) self.assert_in("terminating with success status, rc 0", output) - self.cmd( - "create", "--exclude-nodump", self.repository_location + "::test", "input" - ) + self.cmd("create", "--exclude-nodump", self.repository_location + "::test", "input") output = self.cmd( "create", "--exclude-nodump", @@ -548,9 +526,7 @@ def test_create_duplicate_root(self): "create", self.repository_location + "::test", "input", "input" ) # give input twice! # test if created archive has 'input' contents twice: - archive_list = self.cmd( - "list", "--json-lines", self.repository_location + "::test" - ) + archive_list = self.cmd("list", "--json-lines", self.repository_location + "::test") paths = [json.loads(line)["path"] for line in archive_list.split("\n") if line] # we have all fs items exactly once! assert sorted(paths) == [ @@ -650,11 +626,7 @@ def test_birthtime(self): self.cmd("extract", self.repository_location + "::test") sti = os.stat("input/file1") sto = os.stat("output/input/file1") - assert ( - int(sti.st_birthtime * 1e9) - == int(sto.st_birthtime * 1e9) - == birthtime * 1e9 - ) + assert int(sti.st_birthtime * 1e9) == int(sto.st_birthtime * 1e9) == birthtime * 1e9 assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9 @pytest.mark.skipif( @@ -671,9 +643,7 @@ def test_nobirthtime(self): os.utime("input/file1", (atime, birthtime)) os.utime("input/file1", (atime, mtime)) self.cmd("init", "--encryption=repokey", self.repository_location) - self.cmd( - "create", "--nobirthtime", self.repository_location + "::test", "input" - ) + self.cmd("create", "--nobirthtime", self.repository_location + "::test", "input") with changedir("output"): self.cmd("extract", self.repository_location + "::test") sti = os.stat("input/file1") @@ -776,9 +746,7 @@ def test_repository_swap_detection(self): shutil.rmtree(self.repository_path) self.cmd("init", "--encryption=none", self.repository_location) self._set_repository_id(self.repository_path, repository_id) - self.assert_equal( - repository_id, self._extract_repository_id(self.repository_path) - ) + self.assert_equal(repository_id, self._extract_repository_id(self.repository_path)) if self.FORK_DEFAULT: self.cmd( "create", @@ -794,14 +762,10 @@ def test_repository_swap_detection2(self): self.create_test_files() self.cmd("init", "--encryption=none", self.repository_location + "_unencrypted") os.environ["BORG_PASSPHRASE"] = "passphrase" - self.cmd( - "init", "--encryption=repokey", self.repository_location + "_encrypted" - ) + self.cmd("init", "--encryption=repokey", self.repository_location + "_encrypted") self.cmd("create", self.repository_location + "_encrypted::test", "input") shutil.rmtree(self.repository_path + "_encrypted") - os.rename( - self.repository_path + "_unencrypted", self.repository_path + "_encrypted" - ) + os.rename(self.repository_path + "_unencrypted", self.repository_path + "_encrypted") if self.FORK_DEFAULT: self.cmd( "create", @@ -811,9 +775,7 @@ def test_repository_swap_detection2(self): ) else: with pytest.raises(Cache.RepositoryAccessAborted): - self.cmd( - "create", self.repository_location + "_encrypted::test.2", "input" - ) + self.cmd("create", self.repository_location + "_encrypted::test.2", "input") def test_repository_swap_detection_no_cache(self): self.create_test_files() @@ -824,9 +786,7 @@ def test_repository_swap_detection_no_cache(self): shutil.rmtree(self.repository_path) self.cmd("init", "--encryption=none", self.repository_location) self._set_repository_id(self.repository_path, repository_id) - self.assert_equal( - repository_id, self._extract_repository_id(self.repository_path) - ) + self.assert_equal(repository_id, self._extract_repository_id(self.repository_path)) self.cmd("delete", "--cache-only", self.repository_location) if self.FORK_DEFAULT: self.cmd( @@ -843,16 +803,12 @@ def test_repository_swap_detection2_no_cache(self): self.create_test_files() self.cmd("init", "--encryption=none", self.repository_location + "_unencrypted") os.environ["BORG_PASSPHRASE"] = "passphrase" - self.cmd( - "init", "--encryption=repokey", self.repository_location + "_encrypted" - ) + self.cmd("init", "--encryption=repokey", self.repository_location + "_encrypted") self.cmd("create", self.repository_location + "_encrypted::test", "input") self.cmd("delete", "--cache-only", self.repository_location + "_unencrypted") self.cmd("delete", "--cache-only", self.repository_location + "_encrypted") shutil.rmtree(self.repository_path + "_encrypted") - os.rename( - self.repository_path + "_unencrypted", self.repository_path + "_encrypted" - ) + os.rename(self.repository_path + "_unencrypted", self.repository_path + "_encrypted") if self.FORK_DEFAULT: self.cmd( "create", @@ -862,9 +818,7 @@ def test_repository_swap_detection2_no_cache(self): ) else: with pytest.raises(Cache.RepositoryAccessAborted): - self.cmd( - "create", self.repository_location + "_encrypted::test.2", "input" - ) + self.cmd("create", self.repository_location + "_encrypted::test.2", "input") def test_repository_swap_detection_repokey_blank_passphrase(self): # Check that a repokey repo with a blank passphrase is considered like a plaintext repo. @@ -902,9 +856,7 @@ def test_repository_move(self): self.cmd("info", self.repository_location + "_new") with open(os.path.join(security_dir, "location")) as fd: location = fd.read() - assert ( - location == Location(self.repository_location + "_new").canonical_path() - ) + assert location == Location(self.repository_location + "_new").canonical_path() # Needs no confirmation anymore self.cmd("info", self.repository_location + "_new") shutil.rmtree(self.cache_path) @@ -1016,10 +968,7 @@ def test_fuse_mount_hardlinks(self): else: ignore_perms = ["-o", "ignore_permissions"] with self.fuse_mount( - self.repository_location + "::test", - mountpoint, - "--strip-components=2", - *ignore_perms + self.repository_location + "::test", mountpoint, "--strip-components=2", *ignore_perms ), changedir(mountpoint): assert os.stat("hardlink").st_nlink == 2 assert os.stat("subdir/hardlink").st_nlink == 2 @@ -1122,9 +1071,7 @@ def test_extract_include_exclude(self): ) self.assert_equal(sorted(os.listdir("output/input")), ["file1"]) with changedir("output"): - self.cmd( - "extract", "--exclude=input/file2", self.repository_location + "::test" - ) + self.cmd("extract", "--exclude=input/file2", self.repository_location + "::test") self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file3"]) with changedir("output"): self.cmd( @@ -1158,9 +1105,7 @@ def test_extract_include_exclude_regex(self): # Extract with regular expression exclusion with changedir("output"): - self.cmd( - "extract", "--exclude=re:file3+", self.repository_location + "::test" - ) + self.cmd("extract", "--exclude=re:file3+", self.repository_location + "::test") self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2"]) shutil.rmtree("output/input") @@ -1291,9 +1236,7 @@ def test_extract_with_pattern(self): "fm:*file33*", "input/file2", ) - self.assert_equal( - sorted(os.listdir("output/input")), ["file1", "file2", "file333"] - ) + self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2", "file333"]) def test_extract_list_output(self): self.cmd("init", "--encryption=repokey", self.repository_location) @@ -1317,9 +1260,7 @@ def test_extract_list_output(self): shutil.rmtree("output/input") with changedir("output"): - output = self.cmd( - "extract", "--list", "--info", self.repository_location + "::test" - ) + output = self.cmd("extract", "--list", "--info", self.repository_location + "::test") self.assert_in("input/file", output) def test_extract_progress(self): @@ -1328,9 +1269,7 @@ def test_extract_progress(self): self.cmd("create", self.repository_location + "::test", "input") with changedir("output"): - output = self.cmd( - "extract", self.repository_location + "::test", "--progress" - ) + output = self.cmd("extract", self.repository_location + "::test", "--progress") assert "Extracting:" in output def _create_test_caches(self): @@ -1339,14 +1278,10 @@ def _create_test_caches(self): self.create_regular_file( "cache1/%s" % CACHE_TAG_NAME, contents=CACHE_TAG_CONTENTS + b" extra stuff" ) - self.create_regular_file( - "cache2/%s" % CACHE_TAG_NAME, contents=b"invalid signature" - ) + self.create_regular_file("cache2/%s" % CACHE_TAG_NAME, contents=b"invalid signature") os.mkdir("input/cache3") if are_hardlinks_supported(): - os.link( - "input/cache1/%s" % CACHE_TAG_NAME, "input/cache3/%s" % CACHE_TAG_NAME - ) + os.link("input/cache1/%s" % CACHE_TAG_NAME, "input/cache3/%s" % CACHE_TAG_NAME) else: self.create_regular_file( "cache3/%s" % CACHE_TAG_NAME, @@ -1357,9 +1292,7 @@ def test_create_stdin(self): self.cmd("init", "--encryption=repokey", self.repository_location) input_data = b"\x00foo\n\nbar\n \n" self.cmd("create", self.repository_location + "::test", "-", input=input_data) - item = json.loads( - self.cmd("list", "--json-lines", self.repository_location + "::test") - ) + item = json.loads(self.cmd("list", "--json-lines", self.repository_location + "::test")) assert item["uid"] == 0 assert item["gid"] == 0 assert item["size"] == len(input_data) @@ -1386,16 +1319,12 @@ def test_create_content_from_command(self): "echo", input_data, ) - item = json.loads( - self.cmd("list", "--json-lines", self.repository_location + "::test") - ) + item = json.loads(self.cmd("list", "--json-lines", self.repository_location + "::test")) assert item["uid"] == 0 assert item["gid"] == 0 assert item["size"] == len(input_data) + 1 # `echo` adds newline assert item["path"] == name - extracted_data = self.cmd( - "extract", "--stdout", self.repository_location + "::test" - ) + extracted_data = self.cmd("extract", "--stdout", self.repository_location + "::test") assert extracted_data == input_data + "\n" def test_create_content_from_command_with_failed_command(self): @@ -1440,9 +1369,7 @@ def test_create_paths_from_stdin(self): self.repository_location + "::test", input=input_data, ) - archive_list = self.cmd( - "list", "--json-lines", self.repository_location + "::test" - ) + archive_list = self.cmd("list", "--json-lines", self.repository_location + "::test") paths = [json.loads(line)["path"] for line in archive_list.split("\n") if line] assert paths == ["input/file1", "input/dir1", "input/file4"] @@ -1462,9 +1389,7 @@ def test_create_paths_from_command(self): "echo", input_data, ) - archive_list = self.cmd( - "list", "--json-lines", self.repository_location + "::test" - ) + archive_list = self.cmd("list", "--json-lines", self.repository_location + "::test") paths = [json.loads(line)["path"] for line in archive_list.split("\n") if line] assert paths == ["input/file1", "input/file2", "input/file3"] @@ -1618,9 +1543,7 @@ def test_create_pattern_intermediate_folders_first(self): # list the archive and verify that the "intermediate" folders appear before # their contents - out = self.cmd( - "list", "--format", "{type} {path}{NL}", self.repository_location + "::test" - ) + out = self.cmd("list", "--format", "{type} {path}{NL}", self.repository_location + "::test") out_list = out.splitlines() self.assert_in("d x/a", out_list) @@ -1643,16 +1566,12 @@ def test_create_no_cache_sync(self): "--error", ) ) # ignore experimental warning - info_json = json.loads( - self.cmd("info", self.repository_location + "::test", "--json") - ) + info_json = json.loads(self.cmd("info", self.repository_location + "::test", "--json")) create_stats = create_json["cache"]["stats"] info_stats = info_json["cache"]["stats"] assert create_stats == info_stats self.cmd("delete", "--cache-only", self.repository_location) - self.cmd( - "create", "--no-cache-sync", self.repository_location + "::test2", "input" - ) + self.cmd("create", "--no-cache-sync", self.repository_location + "::test2", "input") self.cmd("info", self.repository_location) self.cmd("check", self.repository_location) @@ -1679,9 +1598,7 @@ def _assert_test_caches(self): def test_exclude_caches(self): self._create_test_caches() - self.cmd( - "create", "--exclude-caches", self.repository_location + "::test", "input" - ) + self.cmd("create", "--exclude-caches", self.repository_location + "::test", "input") self._assert_test_caches() def test_recreate_exclude_caches(self): @@ -1835,7 +1752,9 @@ def patched_fchown(fd, uid, gid): fchown(fd, uid, gid) # The capability descriptor used here is valid and taken from a /usr/bin/ping - capabilities = b"\x01\x00\x00\x02\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + capabilities = ( + b"\x01\x00\x00\x02\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + ) self.create_regular_file("file") xattr.setxattr(b"input/file", b"security.capability", capabilities) self.cmd("init", "--encryption=repokey", self.repository_location) @@ -1894,8 +1813,7 @@ def patched_setxattr_EACCES(*args, **kwargs): exit_code=EXIT_WARNING, ) assert ( - ": when setting extended attribute user.attribute: Permission denied\n" - in out + ": when setting extended attribute user.attribute: Permission denied\n" in out ) assert os.path.isfile(input_abspath) @@ -1917,16 +1835,12 @@ def test_exclude_normalization(self): self.create_regular_file("file1", size=1024 * 80) self.create_regular_file("file2", size=1024 * 80) with changedir("input"): - self.cmd( - "create", "--exclude=file1", self.repository_location + "::test1", "." - ) + self.cmd("create", "--exclude=file1", self.repository_location + "::test1", ".") with changedir("output"): self.cmd("extract", self.repository_location + "::test1") self.assert_equal(sorted(os.listdir("output")), ["file2"]) with changedir("input"): - self.cmd( - "create", "--exclude=./file1", self.repository_location + "::test2", "." - ) + self.cmd("create", "--exclude=./file1", self.repository_location + "::test2", ".") with changedir("output"): self.cmd("extract", self.repository_location + "::test2") self.assert_equal(sorted(os.listdir("output")), ["file2"]) @@ -2003,9 +1917,7 @@ def test_info_json(self): repository = info_repo["repository"] assert len(repository["id"]) == 64 assert "last_modified" in repository - assert datetime.strptime( - repository["last_modified"], ISO_FORMAT - ) # must not raise + assert datetime.strptime(repository["last_modified"], ISO_FORMAT) # must not raise assert info_repo["encryption"]["mode"] == "repokey" assert "keyfile" not in info_repo["encryption"] cache = info_repo["cache"] @@ -2023,9 +1935,7 @@ def test_info_json(self): ) ) - info_archive = json.loads( - self.cmd("info", "--json", self.repository_location + "::test") - ) + info_archive = json.loads(self.cmd("info", "--json", self.repository_location + "::test")) assert info_repo["repository"] == info_archive["repository"] assert info_repo["cache"] == info_archive["cache"] archives = info_archive["archives"] @@ -2042,13 +1952,9 @@ def test_info_json(self): def test_info_json_of_empty_archive(self): """See https://github.com/borgbackup/borg/issues/6120""" self.cmd("init", "--encryption=repokey", self.repository_location) - info_repo = json.loads( - self.cmd("info", "--json", "--first=1", self.repository_location) - ) + info_repo = json.loads(self.cmd("info", "--json", "--first=1", self.repository_location)) assert info_repo["archives"] == [] - info_repo = json.loads( - self.cmd("info", "--json", "--last=1", self.repository_location) - ) + info_repo = json.loads(self.cmd("info", "--json", "--last=1", self.repository_location)) assert info_repo["archives"] == [] def test_comment(self): @@ -2095,12 +2001,8 @@ def test_comment(self): ) self.cmd("recreate", self.repository_location + "::test3", "--comment", "") self.cmd("recreate", self.repository_location + "::test4", "12345") - assert "Comment: added comment" in self.cmd( - "info", self.repository_location + "::test1" - ) - assert "Comment: modified comment" in self.cmd( - "info", self.repository_location + "::test2" - ) + assert "Comment: added comment" in self.cmd("info", self.repository_location + "::test1") + assert "Comment: modified comment" in self.cmd("info", self.repository_location + "::test2") assert "Comment: \n" in self.cmd("info", self.repository_location + "::test3") assert "Comment: preserved comment" in self.cmd( "info", self.repository_location + "::test4" @@ -2190,26 +2092,18 @@ def test_corrupted_repository(self): self.create_src_archive("test") self.cmd("extract", "--dry-run", self.repository_location + "::test") output = self.cmd("check", "--show-version", self.repository_location) - self.assert_in( - "borgbackup version", output - ) # implied output even without --info given - self.assert_not_in( - "Starting repository check", output - ) # --info not given for root logger + self.assert_in("borgbackup version", output) # implied output even without --info given + self.assert_not_in("Starting repository check", output) # --info not given for root logger name = sorted( os.listdir(os.path.join(self.tmpdir, "repository", "data", "0")), reverse=True, )[1] - with open( - os.path.join(self.tmpdir, "repository", "data", "0", name), "r+b" - ) as fd: + with open(os.path.join(self.tmpdir, "repository", "data", "0", name), "r+b") as fd: fd.seek(100) fd.write(b"XXXX") output = self.cmd("check", "--info", self.repository_location, exit_code=1) - self.assert_in( - "Starting repository check", output - ) # --info given for root logger + self.assert_in("Starting repository check", output) # --info given for root logger def test_readonly_check(self): self.cmd("init", "--encryption=repokey", self.repository_location) @@ -2229,9 +2123,7 @@ def test_readonly_check(self): if isinstance(excinfo.value, RemoteRepository.RPCError): assert excinfo.value.exception_class == "LockFailed" # verify that command works with read-only repo when using --bypass-lock - self.cmd( - "check", "--verify-data", self.repository_location, "--bypass-lock" - ) + self.cmd("check", "--verify-data", self.repository_location, "--bypass-lock") def test_readonly_diff(self): self.cmd("init", "--encryption=repokey", self.repository_location) @@ -2268,9 +2160,7 @@ def test_readonly_export_tar(self): ) else: with pytest.raises((LockFailed, RemoteRepository.RPCError)) as excinfo: - self.cmd( - "export-tar", "%s::test" % self.repository_location, "test.tar" - ) + self.cmd("export-tar", "%s::test" % self.repository_location, "test.tar") if isinstance(excinfo.value, RemoteRepository.RPCError): assert excinfo.value.exception_class == "LockFailed" # verify that command works with read-only repo when using --bypass-lock @@ -2350,9 +2240,7 @@ def test_readonly_mount(self): with self.fuse_mount(self.repository_location, None, "--bypass-lock"): pass - @pytest.mark.skipif( - "BORG_TESTS_IGNORE_MODES" in os.environ, reason="modes unreliable" - ) + @pytest.mark.skipif("BORG_TESTS_IGNORE_MODES" in os.environ, reason="modes unreliable") def test_umask(self): self.create_regular_file("file1", size=1024 * 80) self.cmd("init", "--encryption=repokey", self.repository_location) @@ -2388,33 +2276,25 @@ def cmd_raises_unknown_feature(self, args): def test_unknown_feature_on_create(self): print(self.cmd("init", "--encryption=repokey", self.repository_location)) self.add_unknown_feature(Manifest.Operation.WRITE) - self.cmd_raises_unknown_feature( - ["create", self.repository_location + "::test", "input"] - ) + self.cmd_raises_unknown_feature(["create", self.repository_location + "::test", "input"]) def test_unknown_feature_on_cache_sync(self): self.cmd("init", "--encryption=repokey", self.repository_location) self.cmd("delete", "--cache-only", self.repository_location) self.add_unknown_feature(Manifest.Operation.READ) - self.cmd_raises_unknown_feature( - ["create", self.repository_location + "::test", "input"] - ) + self.cmd_raises_unknown_feature(["create", self.repository_location + "::test", "input"]) def test_unknown_feature_on_change_passphrase(self): print(self.cmd("init", "--encryption=repokey", self.repository_location)) self.add_unknown_feature(Manifest.Operation.CHECK) - self.cmd_raises_unknown_feature( - ["key", "change-passphrase", self.repository_location] - ) + self.cmd_raises_unknown_feature(["key", "change-passphrase", self.repository_location]) def test_unknown_feature_on_read(self): print(self.cmd("init", "--encryption=repokey", self.repository_location)) self.cmd("create", self.repository_location + "::test", "input") self.add_unknown_feature(Manifest.Operation.READ) with changedir("output"): - self.cmd_raises_unknown_feature( - ["extract", self.repository_location + "::test"] - ) + self.cmd_raises_unknown_feature(["extract", self.repository_location + "::test"]) self.cmd_raises_unknown_feature(["list", self.repository_location]) self.cmd_raises_unknown_feature(["info", self.repository_location + "::test"]) @@ -2423,9 +2303,7 @@ def test_unknown_feature_on_rename(self): print(self.cmd("init", "--encryption=repokey", self.repository_location)) self.cmd("create", self.repository_location + "::test", "input") self.add_unknown_feature(Manifest.Operation.CHECK) - self.cmd_raises_unknown_feature( - ["rename", self.repository_location + "::test", "other"] - ) + self.cmd_raises_unknown_feature(["rename", self.repository_location + "::test", "other"]) def test_unknown_feature_on_delete(self): print(self.cmd("init", "--encryption=repokey", self.repository_location)) @@ -2433,9 +2311,7 @@ def test_unknown_feature_on_delete(self): self.add_unknown_feature(Manifest.Operation.DELETE) # delete of an archive raises self.cmd_raises_unknown_feature(["delete", self.repository_location + "::test"]) - self.cmd_raises_unknown_feature( - ["prune", "--keep-daily=3", self.repository_location] - ) + self.cmd_raises_unknown_feature(["prune", "--keep-daily=3", self.repository_location]) # delete of the whole repository ignores features self.cmd("delete", self.repository_location) @@ -2447,9 +2323,7 @@ def test_unknown_feature_on_mount(self): mountpoint = os.path.join(self.tmpdir, "mountpoint") os.mkdir(mountpoint) # XXX this might hang if it doesn't raise an error - self.cmd_raises_unknown_feature( - ["mount", self.repository_location + "::test", mountpoint] - ) + self.cmd_raises_unknown_feature(["mount", self.repository_location + "::test", mountpoint]) @pytest.mark.allow_cache_wipe def test_unknown_mandatory_feature_in_cache(self): @@ -2495,9 +2369,7 @@ def wipe_wrapper(*args): def test_progress_on(self): self.create_regular_file("file1", size=1024 * 80) self.cmd("init", "--encryption=repokey", self.repository_location) - output = self.cmd( - "create", "--progress", self.repository_location + "::test4", "input" - ) + output = self.cmd("create", "--progress", self.repository_location + "::test4", "input") self.assert_in("\r", output) def test_progress_off(self): @@ -2514,15 +2386,11 @@ def test_file_status(self): time.sleep(1) # file2 must have newer timestamps than file1 self.create_regular_file("file2", size=1024 * 80) self.cmd("init", "--encryption=repokey", self.repository_location) - output = self.cmd( - "create", "--list", self.repository_location + "::test", "input" - ) + output = self.cmd("create", "--list", self.repository_location + "::test", "input") self.assert_in("A input/file1", output) self.assert_in("A input/file2", output) # should find first file as unmodified - output = self.cmd( - "create", "--list", self.repository_location + "::test1", "input" - ) + output = self.cmd("create", "--list", self.repository_location + "::test1", "input") self.assert_in("U input/file1", output) # this is expected, although surprising, for why, see: # https://borgbackup.readthedocs.org/en/latest/faq.html#i-am-seeing-a-added-status-for-a-unchanged-file @@ -2857,23 +2725,15 @@ def test_prune_repository_example(self): assert re.search(r"Would prune:\s+test24", output) # Must keep the other 21 backups # Yearly is kept as oldest archive - assert re.search( - r"Keeping archive \(rule: yearly\[oldest\] #1\):\s+test01", output - ) + assert re.search(r"Keeping archive \(rule: yearly\[oldest\] #1\):\s+test01", output) for i in range(1, 7): assert re.search( - r"Keeping archive \(rule: monthly #" - + str(i) - + r"\):\s+test" - + ("%02d" % (8 - i)), + r"Keeping archive \(rule: monthly #" + str(i) + r"\):\s+test" + ("%02d" % (8 - i)), output, ) for i in range(1, 15): assert re.search( - r"Keeping archive \(rule: daily #" - + str(i) - + r"\):\s+test" - + ("%02d" % (22 - i)), + r"Keeping archive \(rule: daily #" + str(i) + r"\):\s+test" + ("%02d" % (22 - i)), output, ) output = self.cmd("list", self.repository_location) @@ -2903,15 +2763,11 @@ def test_prune_retain_and_expire_oldest(self): # Archive and prune daily for 30 days for i in range(1, 31): self._create_archive_ts("september%02d" % i, 2020, 9, i, 12) - self.cmd( - "prune", self.repository_location, "--keep-daily=7", "--keep-monthly=1" - ) + self.cmd("prune", self.repository_location, "--keep-daily=7", "--keep-monthly=1") # Archive and prune 6 days into the next month for i in range(1, 7): self._create_archive_ts("october%02d" % i, 2020, 10, i, 12) - self.cmd( - "prune", self.repository_location, "--keep-daily=7", "--keep-monthly=1" - ) + self.cmd("prune", self.repository_location, "--keep-daily=7", "--keep-monthly=1") # Oldest backup is still retained output = self.cmd( "prune", @@ -2927,9 +2783,7 @@ def test_prune_retain_and_expire_oldest(self): ) # Archive one more day and prune. self._create_archive_ts("october07", 2020, 10, 7, 12) - self.cmd( - "prune", self.repository_location, "--keep-daily=7", "--keep-monthly=1" - ) + self.cmd("prune", self.repository_location, "--keep-daily=7", "--keep-monthly=1") # Last day of previous month is retained as monthly, and oldest is expired. output = self.cmd( "prune", @@ -2973,9 +2827,7 @@ def test_prune_repository_prefix(self): "--keep-daily=1", "--prefix=foo-", ) - assert re.search( - r"Keeping archive \(rule: daily #1\):\s+foo-2015-08-12-20:00", output - ) + assert re.search(r"Keeping archive \(rule: daily #1\):\s+foo-2015-08-12-20:00", output) assert re.search(r"Would prune:\s+foo-2015-08-12-10:00", output) output = self.cmd("list", self.repository_location) self.assert_in("foo-2015-08-12-10:00", output) @@ -3003,9 +2855,7 @@ def test_prune_repository_glob(self): "--keep-daily=1", "--glob-archives=2015-*-foo", ) - assert re.search( - r"Keeping archive \(rule: daily #1\):\s+2015-08-12-20:00-foo", output - ) + assert re.search(r"Keeping archive \(rule: daily #1\):\s+2015-08-12-20:00-foo", output) assert re.search(r"Would prune:\s+2015-08-12-10:00-foo", output) output = self.cmd("list", self.repository_location) self.assert_in("2015-08-12-10:00-foo", output) @@ -3027,9 +2877,7 @@ def test_prune_repository_glob(self): def test_list_prefix(self): self.cmd("init", "--encryption=repokey", self.repository_location) self.cmd("create", self.repository_location + "::test-1", src_dir) - self.cmd( - "create", self.repository_location + "::something-else-than-test-1", src_dir - ) + self.cmd("create", self.repository_location + "::something-else-than-test-1", src_dir) self.cmd("create", self.repository_location + "::test-2", src_dir) output = self.cmd("list", "--prefix=test-", self.repository_location) self.assert_in("test-1", output) @@ -3079,9 +2927,7 @@ def test_list_repository_format(self): self.assertEqual(output_1, "test-1\ntest-2\n") output_1 = self.cmd("list", "--format", "{barchive}/", self.repository_location) self.assertEqual(output_1, "test-1/test-2/") - output_3 = self.cmd( - "list", "--format", "{name} {comment}{NL}", self.repository_location - ) + output_3 = self.cmd("list", "--format", "{name} {comment}{NL}", self.repository_location) self.assert_in("test-1 comment 1\n", output_3) self.assert_in("test-2 comment 2\n", output_3) @@ -3093,8 +2939,7 @@ def test_list_hash(self): self.cmd("create", test_archive, "input") output = self.cmd("list", "--format", "{sha256} {path}{NL}", test_archive) assert ( - "cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0 input/amb" - in output + "cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0 input/amb" in output ) assert ( "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 input/empty_file" @@ -3155,17 +3000,13 @@ def test_list_json(self): list_repo = json.loads(self.cmd("list", "--json", self.repository_location)) repository = list_repo["repository"] assert len(repository["id"]) == 64 - assert datetime.strptime( - repository["last_modified"], ISO_FORMAT - ) # must not raise + assert datetime.strptime(repository["last_modified"], ISO_FORMAT) # must not raise assert list_repo["encryption"]["mode"] == "repokey" assert "keyfile" not in list_repo["encryption"] archive0 = list_repo["archives"][0] assert datetime.strptime(archive0["time"], ISO_FORMAT) # must not raise - list_archive = self.cmd( - "list", "--json-lines", self.repository_location + "::test" - ) + list_archive = self.cmd("list", "--json-lines", self.repository_location + "::test") items = [json.loads(s) for s in list_archive.splitlines()] assert len(items) == 2 file1 = items[1] @@ -3183,10 +3024,7 @@ def test_list_json(self): assert len(items) == 2 file1 = items[1] assert file1["path"] == "input/file1" - assert ( - file1["sha256"] - == "b2915eb69f260d8d3c25249195f2c8f4f716ea82ec760ae929732c0262442b2b" - ) + assert file1["sha256"] == "b2915eb69f260d8d3c25249195f2c8f4f716ea82ec760ae929732c0262442b2b" def test_list_json_args(self): self.cmd("init", "--encryption=repokey", self.repository_location) @@ -3215,9 +3053,7 @@ def test_log_json(self): log_message = messages["log_message"] assert isinstance(log_message["time"], float) - assert ( - log_message["levelname"] == "DEBUG" - ) # there should only be DEBUG messages + assert log_message["levelname"] == "DEBUG" # there should only be DEBUG messages assert isinstance(log_message["message"], str) def test_debug_profile(self): @@ -3247,9 +3083,7 @@ def test_debug_profile(self): def test_common_options(self): self.create_test_files() self.cmd("init", "--encryption=repokey", self.repository_location) - log = self.cmd( - "--debug", "create", self.repository_location + "::test", "input" - ) + log = self.cmd("--debug", "create", self.repository_location + "::test", "input") assert "security: read previous location" in log def _get_sizes(self, compression, compressible, size=10000): @@ -3336,9 +3170,7 @@ def test_help(self): assert "patterns" in self.cmd("help", "patterns") assert "Initialize" in self.cmd("help", "init") assert "positional arguments" not in self.cmd("help", "init", "--epilog-only") - assert "This command initializes" not in self.cmd( - "help", "init", "--usage-only" - ) + assert "This command initializes" not in self.cmd("help", "init", "--usage-only") @unittest.skipUnless(llfuse, "llfuse not installed") def test_fuse(self): @@ -3460,14 +3292,9 @@ def has_noatime(some_file): except OSError as e: assert e.errno == llfuse.ENOATTR else: - assert ( - False - ), "expected OSError(ENOATTR), but no error was raised" + assert False, "expected OSError(ENOATTR), but no error was raised" except OSError as err: - if ( - sys.platform.startswith(("nothing_here_now",)) - and err.errno == errno.ENOTSUP - ): + if sys.platform.startswith(("nothing_here_now",)) and err.errno == errno.ENOTSUP: # some systems have no xattr support on FUSE pass else: @@ -3487,13 +3314,9 @@ def test_fuse_versions_view(self): mountpoint = os.path.join(self.tmpdir, "mountpoint") # mount the whole repository, archive contents shall show up in versioned view: with self.fuse_mount(self.repository_location, mountpoint, "-o", "versions"): - path = os.path.join( - mountpoint, "input", "test" - ) # filename shows up as directory ... + path = os.path.join(mountpoint, "input", "test") # filename shows up as directory ... files = os.listdir(path) - assert all( - f.startswith("test.") for f in files - ) # ... with files test.xxxxx in there + assert all(f.startswith("test.") for f in files) # ... with files test.xxxxx in there assert {b"first", b"second"} == { open(os.path.join(path, f), "rb").read() for f in files } @@ -3557,13 +3380,9 @@ def test_fuse_mount_options(self): self.create_src_archive("arch22") mountpoint = os.path.join(self.tmpdir, "mountpoint") - with self.fuse_mount( - self.repository_location, mountpoint, "--first=2", "--sort=name" - ): + with self.fuse_mount(self.repository_location, mountpoint, "--first=2", "--sort=name"): assert sorted(os.listdir(os.path.join(mountpoint))) == ["arch11", "arch12"] - with self.fuse_mount( - self.repository_location, mountpoint, "--last=2", "--sort=name" - ): + with self.fuse_mount(self.repository_location, mountpoint, "--last=2", "--sort=name"): assert sorted(os.listdir(os.path.join(mountpoint))) == ["arch21", "arch22"] with self.fuse_mount(self.repository_location, mountpoint, "--prefix=arch1"): assert sorted(os.listdir(os.path.join(mountpoint))) == ["arch11", "arch12"] @@ -3617,9 +3436,7 @@ def wrapper(self, old_id, new_id): return migrate_lock(self, old_id, new_id) except BaseException as e: assert_data["exception"] = e - assert_data["exception.extr_tb"] = traceback.extract_tb( - e.__traceback__ - ) + assert_data["exception.extr_tb"] = traceback.extract_tb(e.__traceback__) finally: assert_data["after"].update( { @@ -3637,9 +3454,7 @@ def wrapper(self, old_id, new_id): return wrapper # Decorate - borg.locking.Lock.migrate_lock = write_assert_data( - borg.locking.Lock.migrate_lock - ) + borg.locking.Lock.migrate_lock = write_assert_data(borg.locking.Lock.migrate_lock) try: self.cmd("init", "--encryption=none", self.repository_location) self.create_src_archive("arch") @@ -3669,9 +3484,7 @@ def wrapper(self, old_id, new_id): flush=True ) - assert ( - assert_data["num_calls"] == 1 - ), "Lock.migrate_lock() must be called exactly once." + assert assert_data["num_calls"] == 1, "Lock.migrate_lock() must be called exactly once." assert exception is None, "Lock.migrate_lock() may not raise an exception." assert_data_before = assert_data["before"] @@ -3699,9 +3512,7 @@ def verify_aes_counter_uniqueness(self, method): def verify_uniqueness(): with Repository(self.repository_path) as repository: - for id, _ in repository.open_index( - repository.get_transaction_id() - ).iteritems(): + for id, _ in repository.open_index(repository.get_transaction_id()).iteritems(): data = repository.get(id) hash = sha256(data).digest() if hash not in seen: @@ -3734,9 +3545,7 @@ def test_debug_dump_archive_items(self): self.cmd("init", "--encryption=repokey", self.repository_location) self.cmd("create", self.repository_location + "::test", "input") with changedir("output"): - output = self.cmd( - "debug", "dump-archive-items", self.repository_location + "::test" - ) + output = self.cmd("debug", "dump-archive-items", self.repository_location + "::test") output_dir = sorted(os.listdir("output")) assert len(output_dir) > 0 and output_dir[0].startswith("000000_") assert "Done." in output @@ -3758,9 +3567,7 @@ def test_debug_put_get_delete_obj(self): self.create_regular_file("file", contents=data) output = self.cmd("debug", "put-obj", self.repository_location, "input/file") assert hexkey in output - output = self.cmd( - "debug", "get-obj", self.repository_location, hexkey, "output/file" - ) + output = self.cmd("debug", "get-obj", self.repository_location, hexkey, "output/file") assert hexkey in output with open("output/file", "rb") as f: data_read = f.read() @@ -3777,9 +3584,7 @@ def raise_eof(*args): raise EOFError with patch.object(KeyfileKeyBase, "create", raise_eof): - self.cmd( - "init", "--encryption=repokey", self.repository_location, exit_code=1 - ) + self.cmd("init", "--encryption=repokey", self.repository_location, exit_code=1) assert not os.path.exists(self.repository_location) def test_init_requires_encryption_option(self): @@ -3796,9 +3601,7 @@ def test_init_nested_repositories(self): ) else: with pytest.raises(Repository.AlreadyExists): - self.cmd( - "init", "--encryption=repokey", self.repository_location + "/nested" - ) + self.cmd("init", "--encryption=repokey", self.repository_location + "/nested") def test_init_refuse_to_overwrite_keyfile(self): """BORG_KEY_FILE=something borg init should quit if "something" already exists. @@ -3855,9 +3658,7 @@ def test_check_cache(self): def test_recreate_target_rc(self): self.cmd("init", "--encryption=repokey", self.repository_location) - output = self.cmd( - "recreate", self.repository_location, "--target=asdf", exit_code=2 - ) + output = self.cmd("recreate", self.repository_location, "--target=asdf", exit_code=2) assert "Need to specify single archive" in output def test_recreate_target(self): @@ -4075,9 +3876,7 @@ def test_recreate_list_output(self): self.assert_in("input/file1", output) self.assert_in("x input/file3", output) - output = self.cmd( - "recreate", self.repository_location + "::test", "-e", "input/file4" - ) + output = self.cmd("recreate", self.repository_location + "::test", "-e", "input/file4") self.check_cache() self.assert_not_in("input/file1", output) self.assert_not_in("x input/file4", output) @@ -4148,9 +3947,7 @@ def test_key_import_keyfile_with_borg_key_file(self): imported_key_file = os.path.join(self.output_path, "imported") with environment_variable(BORG_KEY_FILE=imported_key_file): self.cmd("key", "import", self.repository_location, exported_key_file) - assert not os.path.isfile( - key_file - ), '"borg key import" should respect BORG_KEY_FILE' + assert not os.path.isfile(key_file), '"borg key import" should respect BORG_KEY_FILE' with open(imported_key_file, "r") as fd: imported_key_contents = fd.read() @@ -4218,17 +4015,13 @@ def test_key_import_errors(self): export_file = self.output_path + "/exported" self.cmd("init", self.repository_location, "--encryption", "keyfile") - self.cmd( - "key", "import", self.repository_location, export_file, exit_code=EXIT_ERROR - ) + self.cmd("key", "import", self.repository_location, export_file, exit_code=EXIT_ERROR) with open(export_file, "w") as fd: fd.write("something not a key\n") if self.FORK_DEFAULT: - self.cmd( - "key", "import", self.repository_location, export_file, exit_code=2 - ) + self.cmd("key", "import", self.repository_location, export_file, exit_code=2) else: with pytest.raises(NotABorgKeyFile): self.cmd("key", "import", self.repository_location, export_file) @@ -4237,9 +4030,7 @@ def test_key_import_errors(self): fd.write("BORG_KEY a0a0a0\n") if self.FORK_DEFAULT: - self.cmd( - "key", "import", self.repository_location, export_file, exit_code=2 - ) + self.cmd("key", "import", self.repository_location, export_file, exit_code=2) else: with pytest.raises(RepoIdMismatch): self.cmd("key", "import", self.repository_location, export_file) @@ -4309,19 +4100,13 @@ def test_key_import_paperkey(self): # print(i.to_bytes(2, 'big')) # break - self.cmd( - "key", "import", "--paper", self.repository_location, input=typed_input - ) + self.cmd("key", "import", "--paper", self.repository_location, input=typed_input) # Test abort paths typed_input = b"\ny\n" - self.cmd( - "key", "import", "--paper", self.repository_location, input=typed_input - ) + self.cmd("key", "import", "--paper", self.repository_location, input=typed_input) typed_input = b"2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n\ny\n" - self.cmd( - "key", "import", "--paper", self.repository_location, input=typed_input - ) + self.cmd("key", "import", "--paper", self.repository_location, input=typed_input) def test_debug_dump_manifest(self): self.create_regular_file("file1", size=1024 * 80) @@ -4343,9 +4128,7 @@ def test_debug_dump_archive(self): self.cmd("init", "--encryption=repokey", self.repository_location) self.cmd("create", self.repository_location + "::test", "input") dump_file = self.output_path + "/dump" - output = self.cmd( - "debug", "dump-archive", self.repository_location + "::test", dump_file - ) + output = self.cmd("debug", "dump-archive", self.repository_location + "::test", dump_file) assert output == "" with open(dump_file, "r") as f: result = json.load(f) @@ -4356,9 +4139,7 @@ def test_debug_dump_archive(self): def test_debug_refcount_obj(self): self.cmd("init", "--encryption=repokey", self.repository_location) - output = self.cmd( - "debug", "refcount-obj", self.repository_location, "0" * 64 - ).strip() + output = self.cmd("debug", "refcount-obj", self.repository_location, "0" * 64).strip() assert ( output == "object 0000000000000000000000000000000000000000000000000000000000000000 not found [info from chunks cache]." @@ -4368,18 +4149,11 @@ def test_debug_refcount_obj(self): self.cmd("create", "--json", self.repository_location + "::test", "input") ) archive_id = create_json["archive"]["id"] - output = self.cmd( - "debug", "refcount-obj", self.repository_location, archive_id - ).strip() - assert ( - output - == "object " + archive_id + " has 1 referrers [info from chunks cache]." - ) + output = self.cmd("debug", "refcount-obj", self.repository_location, archive_id).strip() + assert output == "object " + archive_id + " has 1 referrers [info from chunks cache]." # Invalid IDs do not abort or return an error - output = self.cmd( - "debug", "refcount-obj", self.repository_location, "124", "xyza" - ).strip() + output = self.cmd("debug", "refcount-obj", self.repository_location, "124", "xyza").strip() assert output == "object id 124 is invalid.\nobject id xyza is invalid." def test_debug_info(self): @@ -4406,9 +4180,7 @@ def test_config(self): self.assert_in("id", output) self.assert_not_in("last_segment_checked", output) - output = self.cmd( - "config", self.repository_location, "last_segment_checked", exit_code=1 - ) + output = self.cmd("config", self.repository_location, "last_segment_checked", exit_code=1) self.assert_in("No option ", output) self.cmd("config", self.repository_location, "last_segment_checked", "123") output = self.cmd("config", self.repository_location, "last_segment_checked") @@ -4454,9 +4226,7 @@ def test_export_tar(self): ) with changedir("output"): # This probably assumes GNU tar. Note -p switch to extract permissions regardless of umask. - subprocess.check_call( - ["tar", "xpf", "../simple.tar", "--warning=no-timestamp"] - ) + subprocess.check_call(["tar", "xpf", "../simple.tar", "--warning=no-timestamp"]) self.assert_dirs_equal( "input", "output/input", @@ -4480,9 +4250,7 @@ def test_export_tar_gz(self): assert "input/file1\n" in list assert "input/dir2\n" in list with changedir("output"): - subprocess.check_call( - ["tar", "xpf", "../simple.tar.gz", "--warning=no-timestamp"] - ) + subprocess.check_call(["tar", "xpf", "../simple.tar.gz", "--warning=no-timestamp"]) self.assert_dirs_equal( "input", "output/input", @@ -4510,9 +4278,7 @@ def test_export_tar_strip_components(self): assert "input/file1\n" in list assert "input/dir2\n" in list with changedir("output"): - subprocess.check_call( - ["tar", "xpf", "../simple.tar", "--warning=no-timestamp"] - ) + subprocess.check_call(["tar", "xpf", "../simple.tar", "--warning=no-timestamp"]) self.assert_dirs_equal( "input", "output/", ignore_flags=True, ignore_xattrs=True, ignore_ns=True ) @@ -4528,9 +4294,7 @@ def test_export_tar_strip_components_links(self): "--strip-components=2", ) with changedir("output"): - subprocess.check_call( - ["tar", "xpf", "../output.tar", "--warning=no-timestamp"] - ) + subprocess.check_call(["tar", "xpf", "../output.tar", "--warning=no-timestamp"]) assert os.stat("hardlink").st_nlink == 2 assert os.stat("subdir/hardlink").st_nlink == 2 assert os.stat("aaaa").st_nlink == 2 @@ -4547,9 +4311,7 @@ def test_extract_hardlinks_tar(self): "input/dir1", ) with changedir("output"): - subprocess.check_call( - ["tar", "xpf", "../output.tar", "--warning=no-timestamp"] - ) + subprocess.check_call(["tar", "xpf", "../output.tar", "--warning=no-timestamp"]) assert os.stat("input/dir1/hardlink").st_nlink == 2 assert os.stat("input/dir1/subdir/hardlink").st_nlink == 2 assert os.stat("input/dir1/aaaa").st_nlink == 2 @@ -4564,9 +4326,7 @@ def test_import_tar(self): self.cmd("import-tar", self.repository_location + "::dst", "simple.tar") with changedir(self.output_path): self.cmd("extract", self.repository_location + "::dst") - self.assert_dirs_equal( - "input", "output/input", ignore_ns=True, ignore_xattrs=True - ) + self.assert_dirs_equal("input", "output/input", ignore_ns=True, ignore_xattrs=True) @requires_gzip def test_import_tar_gz(self): @@ -4580,9 +4340,7 @@ def test_import_tar_gz(self): self.cmd("import-tar", self.repository_location + "::dst", "simple.tgz") with changedir(self.output_path): self.cmd("extract", self.repository_location + "::dst") - self.assert_dirs_equal( - "input", "output/input", ignore_ns=True, ignore_xattrs=True - ) + self.assert_dirs_equal("input", "output/input", ignore_ns=True, ignore_xattrs=True) def test_detect_attic_repo(self): path = make_attic_repo(self.repository_path) @@ -4652,9 +4410,7 @@ def patched_setxattr_EACCES(*args, **kwargs): def test_do_not_mention_archive_if_you_can_not_find_repo(self): """https://github.com/borgbackup/borg/issues/6014""" - archive = ( - self.repository_location + "-this-repository-does-not-exist" + "::test" - ) + archive = self.repository_location + "-this-repository-does-not-exist" + "::test" output = self.cmd("info", archive, exit_code=2, fork=True) self.assert_in("this-repository-does-not-exist", output) self.assert_not_in("this-repository-does-not-exist::test", output) @@ -4728,9 +4484,7 @@ def test_extract_capabilities(self): def test_extract_xattrs_errors(self): pass - @unittest.skip( - "test_basic_functionality seems incompatible with fakeroot and/or the binary." - ) + @unittest.skip("test_basic_functionality seems incompatible with fakeroot and/or the binary.") def test_basic_functionality(self): pass @@ -4762,23 +4516,17 @@ def setUp(self): self.create_src_archive("archive2") def test_check_usage(self): - output = self.cmd( - "check", "-v", "--progress", self.repository_location, exit_code=0 - ) + output = self.cmd("check", "-v", "--progress", self.repository_location, exit_code=0) self.assert_in("Starting repository check", output) self.assert_in("Starting archive consistency check", output) self.assert_in("Checking segments", output) # reset logging to new process default to avoid need for fork=True on next check logging.getLogger("borg.output.progress").setLevel(logging.NOTSET) - output = self.cmd( - "check", "-v", "--repository-only", self.repository_location, exit_code=0 - ) + output = self.cmd("check", "-v", "--repository-only", self.repository_location, exit_code=0) self.assert_in("Starting repository check", output) self.assert_not_in("Starting archive consistency check", output) self.assert_not_in("Checking segments", output) - output = self.cmd( - "check", "-v", "--archives-only", self.repository_location, exit_code=0 - ) + output = self.cmd("check", "-v", "--archives-only", self.repository_location, exit_code=0) self.assert_not_in("Starting repository check", output) self.assert_in("Starting archive consistency check", output) output = self.cmd( @@ -4849,13 +4597,9 @@ def test_missing_file_chunk(self): with patch.object(ChunkBuffer, "BUFFER_SIZE", 10): self.create_src_archive("archive3") # check should be able to heal the file now: - output = self.cmd( - "check", "-v", "--repair", self.repository_location, exit_code=0 - ) + output = self.cmd("check", "-v", "--repair", self.repository_location, exit_code=0) self.assert_in("Healed previously missing file chunk", output) - self.assert_in( - "testsuite/archiver.py: Completely healed previously damaged file!", output - ) + self.assert_in("testsuite/archiver.py: Completely healed previously damaged file!", output) # check that the file in the old archives has the correct chunks again for archive_name in ("archive1", "archive2"): archive, repository = self.open_archive(archive_name) @@ -4899,9 +4643,7 @@ def test_missing_manifest(self): repository.delete(Manifest.MANIFEST_ID) repository.commit(compact=False) self.cmd("check", self.repository_location, exit_code=1) - output = self.cmd( - "check", "-v", "--repair", self.repository_location, exit_code=0 - ) + output = self.cmd("check", "-v", "--repair", self.repository_location, exit_code=0) self.assert_in("archive1", output) self.assert_in("archive2", output) self.cmd("check", self.repository_location, exit_code=0) @@ -4914,9 +4656,7 @@ def test_corrupted_manifest(self): repository.put(Manifest.MANIFEST_ID, corrupted_manifest) repository.commit(compact=False) self.cmd("check", self.repository_location, exit_code=1) - output = self.cmd( - "check", "-v", "--repair", self.repository_location, exit_code=0 - ) + output = self.cmd("check", "-v", "--repair", self.repository_location, exit_code=0) self.assert_in("archive1", output) self.assert_in("archive2", output) self.cmd("check", self.repository_location, exit_code=0) @@ -4933,9 +4673,7 @@ def test_manifest_rebuild_corrupted_chunk(self): repository.put(archive.id, corrupted_chunk) repository.commit(compact=False) self.cmd("check", self.repository_location, exit_code=1) - output = self.cmd( - "check", "-v", "--repair", self.repository_location, exit_code=0 - ) + output = self.cmd("check", "-v", "--repair", self.repository_location, exit_code=0) self.assert_in("archive2", output) self.cmd("check", self.repository_location, exit_code=0) @@ -4977,9 +4715,7 @@ def test_extra_chunks(self): self.cmd("check", self.repository_location, exit_code=1) self.cmd("check", "--repair", self.repository_location, exit_code=0) self.cmd("check", self.repository_location, exit_code=0) - self.cmd( - "extract", "--dry-run", self.repository_location + "::archive1", exit_code=0 - ) + self.cmd("extract", "--dry-run", self.repository_location + "::archive1", exit_code=0) def _test_verify_data(self, *init_args): shutil.rmtree(self.repository_path) @@ -4995,9 +4731,7 @@ def _test_verify_data(self, *init_args): break repository.commit(compact=False) self.cmd("check", self.repository_location, exit_code=0) - output = self.cmd( - "check", "--verify-data", self.repository_location, exit_code=1 - ) + output = self.cmd("check", "--verify-data", self.repository_location, exit_code=1) assert bin_to_hex(chunk.id) + ", integrity error" in output # repair (heal is tested in another test) output = self.cmd( @@ -5042,9 +4776,7 @@ def as_dict(self): with repository: manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK) with Cache(repository, key, manifest) as cache: - archive = Archive( - repository, key, manifest, "0.13", cache=cache, create=True - ) + archive = Archive(repository, key, manifest, "0.13", cache=cache, create=True) archive.items_buffer.add(Attic013Item()) archive.save() self.cmd("check", self.repository_location, exit_code=0) @@ -5063,9 +4795,9 @@ def spoof_manifest(self, repository): "version": 1, "archives": {}, "config": {}, - "timestamp": ( - datetime.utcnow() + timedelta(days=1) - ).strftime(ISO_FORMAT), + "timestamp": (datetime.utcnow() + timedelta(days=1)).strftime( + ISO_FORMAT + ), } ) ), @@ -5084,9 +4816,9 @@ def test_fresh_init_tam_required(self): { "version": 1, "archives": {}, - "timestamp": ( - datetime.utcnow() + timedelta(days=1) - ).strftime(ISO_FORMAT), + "timestamp": (datetime.utcnow() + timedelta(days=1)).strftime( + ISO_FORMAT + ), } ) ), @@ -5106,9 +4838,7 @@ def test_not_required(self): key.tam_required = False key.change_passphrase(key._passphrase) - manifest = msgpack.unpackb( - key.decrypt(None, repository.get(Manifest.MANIFEST_ID)) - ) + manifest = msgpack.unpackb(key.decrypt(None, repository.get(Manifest.MANIFEST_ID))) del manifest[b"tam"] repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb(manifest))) repository.commit(compact=False) @@ -5168,23 +4898,15 @@ def test_remote_repo_restrict_to_path(self): ["--restrict-to-path", self.repository_path], ): with pytest.raises(PathNotAllowed): - self.cmd( - "init", "--encryption=repokey", self.repository_location + "_0" - ) + self.cmd("init", "--encryption=repokey", self.repository_location + "_0") # restricted to a completely different path: - with patch.object( - RemoteRepository, "extra_test_args", ["--restrict-to-path", "/foo"] - ): + with patch.object(RemoteRepository, "extra_test_args", ["--restrict-to-path", "/foo"]): with pytest.raises(PathNotAllowed): - self.cmd( - "init", "--encryption=repokey", self.repository_location + "_1" - ) + self.cmd("init", "--encryption=repokey", self.repository_location + "_1") path_prefix = os.path.dirname(self.repository_path) # restrict to repo directory's parent directory: - with patch.object( - RemoteRepository, "extra_test_args", ["--restrict-to-path", path_prefix] - ): + with patch.object(RemoteRepository, "extra_test_args", ["--restrict-to-path", path_prefix]): self.cmd("init", "--encryption=repokey", self.repository_location + "_2") # restrict to repo directory's parent directory and another directory: with patch.object( @@ -5275,9 +4997,9 @@ def setUp(self): super().setUp() self.create_test_files() self.cmd("init", "--encryption=repokey", self.repository_location) - self.cache_path = json.loads( - self.cmd("info", self.repository_location, "--json") - )["cache"]["path"] + self.cache_path = json.loads(self.cmd("info", self.repository_location, "--json"))["cache"][ + "path" + ] def corrupt(self, file, amount=1): with open(file, "r+b") as fd: @@ -5306,9 +5028,7 @@ def test_cache_files(self): def test_chunks_archive(self): self.cmd("create", self.repository_location + "::test1", "input") # Find ID of test1 so we can corrupt it later :) - target_id = self.cmd( - "list", self.repository_location, "--format={id}{LF}" - ).strip() + target_id = self.cmd("list", self.repository_location, "--format={id}{LF}").strip() self.cmd("create", self.repository_location + "::test2", "input") # Force cache sync, creating archive chunks of test1 and test2 in chunks.archive.d @@ -5331,9 +5051,7 @@ def test_chunks_archive(self): config.write(fd) # Cache sync notices corrupted archive chunks, but automatically recovers. - out = self.cmd( - "create", "-v", self.repository_location + "::test3", "input", exit_code=1 - ) + out = self.cmd("create", "-v", self.repository_location + "::test3", "input", exit_code=1) assert "Reading cached archive chunk index for test1" in out assert "Cached archive chunk index of test1 is corrupted" in out assert "Fetching and building archive index for test1" in out @@ -5349,10 +5067,7 @@ def test_old_version_interfered(self): config.write(fd) out = self.cmd("info", self.repository_location) - assert ( - "Cache integrity data not available: old Borg version modified the cache." - in out - ) + assert "Cache integrity data not available: old Borg version modified the cache." in out class DiffArchiverTestCase(ArchiverTestCaseBase): @@ -5433,9 +5148,7 @@ def do_asserts(output, can_compare_ids): # Directory replaced with a regular file if "BORG_TESTS_IGNORE_MODES" not in os.environ: - assert ( - "[drwxr-xr-x -> -rwxr-xr-x] input/dir_replaced_with_file" in output - ) + assert "[drwxr-xr-x -> -rwxr-xr-x] input/dir_replaced_with_file" in output # Basic directory cases assert "added directory input/dir_added" in output @@ -5520,24 +5233,14 @@ def get_changes(filename, data): } in get_changes("input/dir_replaced_with_file", joutput) # Basic directory cases - assert {"type": "added directory"} in get_changes( - "input/dir_added", joutput - ) - assert {"type": "removed directory"} in get_changes( - "input/dir_removed", joutput - ) + assert {"type": "added directory"} in get_changes("input/dir_added", joutput) + assert {"type": "removed directory"} in get_changes("input/dir_removed", joutput) if are_symlinks_supported(): # Basic symlink cases - assert {"type": "changed link"} in get_changes( - "input/link_changed", joutput - ) - assert {"type": "added link"} in get_changes( - "input/link_added", joutput - ) - assert {"type": "removed link"} in get_changes( - "input/link_removed", joutput - ) + assert {"type": "changed link"} in get_changes("input/link_changed", joutput) + assert {"type": "added link"} in get_changes("input/link_added", joutput) + assert {"type": "removed link"} in get_changes("input/link_removed", joutput) # Symlink replacing or being replaced assert any( @@ -5562,34 +5265,24 @@ def get_changes(filename, data): ) assert expected in get_changes("input/empty", joutput) if are_hardlinks_supported(): - assert expected in get_changes( - "input/hardlink_contents_changed", joutput - ) + assert expected in get_changes("input/hardlink_contents_changed", joutput) if are_symlinks_supported(): - assert not any( - get_changes("input/link_target_contents_changed", joutput) - ) + assert not any(get_changes("input/link_target_contents_changed", joutput)) # Added a new file and a hard link to it. Both links to the same # inode should appear as separate files. - assert {"type": "added", "size": 2048} in get_changes( - "input/file_added", joutput - ) + assert {"type": "added", "size": 2048} in get_changes("input/file_added", joutput) if are_hardlinks_supported(): assert {"type": "added", "size": 2048} in get_changes( "input/hardlink_added", joutput ) # check if a diff between non-existent and empty new file is found - assert {"type": "added", "size": 0} in get_changes( - "input/file_empty_added", joutput - ) + assert {"type": "added", "size": 0} in get_changes("input/file_empty_added", joutput) # The inode has two links and both of them are deleted. They should # appear as two deleted files. - assert {"type": "removed", "size": 256} in get_changes( - "input/file_removed", joutput - ) + assert {"type": "removed", "size": 256} in get_changes("input/file_removed", joutput) if are_hardlinks_supported(): assert {"type": "removed", "size": 256} in get_changes( "input/hardlink_removed", joutput @@ -5606,20 +5299,14 @@ def get_changes(filename, data): if are_hardlinks_supported(): assert not any(get_changes("input/hardlink_target_replaced", joutput)) - do_asserts( - self.cmd("diff", self.repository_location + "::test0", "test1a"), True - ) + do_asserts(self.cmd("diff", self.repository_location + "::test0", "test1a"), True) # We expect exit_code=1 due to the chunker params warning do_asserts( - self.cmd( - "diff", self.repository_location + "::test0", "test1b", exit_code=1 - ), + self.cmd("diff", self.repository_location + "::test0", "test1b", exit_code=1), False, ) do_json_asserts( - self.cmd( - "diff", self.repository_location + "::test0", "test1a", "--json-lines" - ), + self.cmd("diff", self.repository_location + "::test0", "test1a", "--json-lines"), True, ) @@ -5642,9 +5329,7 @@ def test_sort_option(self): self.create_regular_file("d_file_added", size=256) self.cmd("create", self.repository_location + "::test1", "input") - output = self.cmd( - "diff", "--sort", self.repository_location + "::test0", "test1" - ) + output = self.cmd("diff", "--sort", self.repository_location + "::test0", "test1") expected = [ "a_file_removed", "b_file_added", @@ -5792,9 +5477,7 @@ def test_strip_components(self): matcher, self.peek_and_store_hardlink_masters, strip_components=1 ) assert not filter(Item(path="shallow")) - assert not filter( - Item(path="shallow/") - ) # can this even happen? paths are normalized... + assert not filter(Item(path="shallow/")) # can this even happen? paths are normalized... assert filter(Item(path="deep enough/file")) assert filter(Item(path="something/dir/file")) @@ -5802,9 +5485,7 @@ def test_strip_components(self): class TestCommonOptions: @staticmethod def define_common_options(add_common_option): - add_common_option( - "-h", "--help", action="help", help="show this help message and exit" - ) + add_common_option("-h", "--help", action="help", help="show this help message and exit") add_common_option( "--critical", dest="log_level", @@ -5829,9 +5510,7 @@ def define_common_options(add_common_option): metavar="TOPIC", default=[], ) - add_common_option( - "-p", "--progress", dest="progress", action="store_true", help="foo" - ) + add_common_option("-p", "--progress", dest="progress", action="store_true", help="foo") add_common_option( "--lock-wait", dest="lock_wait", @@ -5843,9 +5522,7 @@ def define_common_options(add_common_option): @pytest.fixture def basic_parser(self): - parser = argparse.ArgumentParser( - prog="test", description="test parser", add_help=False - ) + parser = argparse.ArgumentParser(prog="test", description="test parser", add_help=False) parser.common_options = Archiver.CommonOptions( self.define_common_options, suffix_precedence=("_level0", "_level1") ) @@ -5853,15 +5530,11 @@ def basic_parser(self): @pytest.fixture def subparsers(self, basic_parser): - return basic_parser.add_subparsers( - title="required arguments", metavar="" - ) + return basic_parser.add_subparsers(title="required arguments", metavar="") @pytest.fixture def parser(self, basic_parser): - basic_parser.common_options.add_common_group( - basic_parser, "_level0", provide_defaults=True - ) + basic_parser.common_options.add_common_group(basic_parser, "_level0", provide_defaults=True) return basic_parser @pytest.fixture @@ -5971,9 +5644,7 @@ def get_all_parsers(): def discover_level(prefix, parser, Archiver, extra_choices=None): choices = {} for action in parser._actions: - if action.choices is not None and "SubParsersAction" in str( - action.__class__ - ): + if action.choices is not None and "SubParsersAction" in str(action.__class__): for cmd, parser in action.choices.items(): choices[prefix + cmd] = parser if extra_choices is not None: diff --git a/src/borg/testsuite/cache.py b/src/borg/testsuite/cache.py index b8ca85ad6b4..d1fe1f31332 100644 --- a/src/borg/testsuite/cache.py +++ b/src/borg/testsuite/cache.py @@ -26,9 +26,7 @@ def sync(self, index): return CacheSynchronizer(index) def test_no_chunks(self, index, sync): - data = packb( - {"foo": "bar", "baz": 1234, "bar": 5678, "user": "chunks", "chunks": []} - ) + data = packb({"foo": "bar", "baz": 1234, "bar": 5678, "user": "chunks", "chunks": []}) sync.feed(data) assert not len(index) @@ -237,9 +235,7 @@ class TestAdHocCache: @pytest.fixture def repository(self, tmpdir): self.repository_location = os.path.join(str(tmpdir), "repository") - with Repository( - self.repository_location, exclusive=True, create=True - ) as repository: + with Repository(self.repository_location, exclusive=True, create=True) as repository: repository.put(H(1), b"1234") repository.put(Manifest.MANIFEST_ID, b"5678") yield repository @@ -254,9 +250,7 @@ def key(self, repository, monkeypatch): @pytest.fixture def manifest(self, repository, key): Manifest(key, repository).write() - return Manifest.load( - repository, key=key, operations=Manifest.NO_OPERATION_CHECK - )[0] + return Manifest.load(repository, key=key, operations=Manifest.NO_OPERATION_CHECK)[0] @pytest.fixture def cache(self, repository, key, manifest): diff --git a/src/borg/testsuite/chunker.py b/src/borg/testsuite/chunker.py index cbe5eec6617..6286620face 100644 --- a/src/borg/testsuite/chunker.py +++ b/src/borg/testsuite/chunker.py @@ -86,9 +86,7 @@ def test_chunkify_header_and_blocks_fmap_zeros(self): ] parts = cf(chunker.chunkify(BytesIO(data), fmap=fmap)) # because we marked the '_' ranges as holes, we will get hole ranges instead! - self.assert_equal( - parts, [data[0:123], 4096, data[123 + 4096 : 123 + 8192], 4096] - ) + self.assert_equal(parts, [data[0:123], 4096, data[123 + 4096 : 123 + 8192], 4096]) def test_chunkify_header_and_blocks_fmap_partial(self): data = b"H" * 123 + b"_" * 4096 + b"X" * 4096 + b"_" * 4096 @@ -110,23 +108,13 @@ def test_chunkify(self): parts = cf(Chunker(0, 1, CHUNK_MAX_EXP, 2, 2).chunkify(BytesIO(data))) self.assert_equal(len(parts), 2) self.assert_equal(b"".join(parts), data) + self.assert_equal(cf(Chunker(0, 1, CHUNK_MAX_EXP, 2, 2).chunkify(BytesIO(b""))), []) self.assert_equal( - cf(Chunker(0, 1, CHUNK_MAX_EXP, 2, 2).chunkify(BytesIO(b""))), [] - ) - self.assert_equal( - cf( - Chunker(0, 1, CHUNK_MAX_EXP, 2, 2).chunkify( - BytesIO(b"foobarboobaz" * 3) - ) - ), + cf(Chunker(0, 1, CHUNK_MAX_EXP, 2, 2).chunkify(BytesIO(b"foobarboobaz" * 3))), [b"fooba", b"rboobaz", b"fooba", b"rboobaz", b"fooba", b"rboobaz"], ) self.assert_equal( - cf( - Chunker(1, 1, CHUNK_MAX_EXP, 2, 2).chunkify( - BytesIO(b"foobarboobaz" * 3) - ) - ), + cf(Chunker(1, 1, CHUNK_MAX_EXP, 2, 2).chunkify(BytesIO(b"foobarboobaz" * 3))), [ b"fo", b"obarb", @@ -140,59 +128,31 @@ def test_chunkify(self): ], ) self.assert_equal( - cf( - Chunker(2, 1, CHUNK_MAX_EXP, 2, 2).chunkify( - BytesIO(b"foobarboobaz" * 3) - ) - ), + cf(Chunker(2, 1, CHUNK_MAX_EXP, 2, 2).chunkify(BytesIO(b"foobarboobaz" * 3))), [b"foob", b"ar", b"boobazfoob", b"ar", b"boobazfoob", b"ar", b"boobaz"], ) self.assert_equal( - cf( - Chunker(0, 2, CHUNK_MAX_EXP, 2, 3).chunkify( - BytesIO(b"foobarboobaz" * 3) - ) - ), + cf(Chunker(0, 2, CHUNK_MAX_EXP, 2, 3).chunkify(BytesIO(b"foobarboobaz" * 3))), [b"foobarboobaz" * 3], ) self.assert_equal( - cf( - Chunker(1, 2, CHUNK_MAX_EXP, 2, 3).chunkify( - BytesIO(b"foobarboobaz" * 3) - ) - ), + cf(Chunker(1, 2, CHUNK_MAX_EXP, 2, 3).chunkify(BytesIO(b"foobarboobaz" * 3))), [b"foobar", b"boobazfo", b"obar", b"boobazfo", b"obar", b"boobaz"], ) self.assert_equal( - cf( - Chunker(2, 2, CHUNK_MAX_EXP, 2, 3).chunkify( - BytesIO(b"foobarboobaz" * 3) - ) - ), + cf(Chunker(2, 2, CHUNK_MAX_EXP, 2, 3).chunkify(BytesIO(b"foobarboobaz" * 3))), [b"foob", b"arboobaz", b"foob", b"arboobaz", b"foob", b"arboobaz"], ) self.assert_equal( - cf( - Chunker(0, 3, CHUNK_MAX_EXP, 2, 3).chunkify( - BytesIO(b"foobarboobaz" * 3) - ) - ), + cf(Chunker(0, 3, CHUNK_MAX_EXP, 2, 3).chunkify(BytesIO(b"foobarboobaz" * 3))), [b"foobarboobaz" * 3], ) self.assert_equal( - cf( - Chunker(1, 3, CHUNK_MAX_EXP, 2, 3).chunkify( - BytesIO(b"foobarboobaz" * 3) - ) - ), + cf(Chunker(1, 3, CHUNK_MAX_EXP, 2, 3).chunkify(BytesIO(b"foobarboobaz" * 3))), [b"foobarbo", b"obazfoobar", b"boobazfo", b"obarboobaz"], ) self.assert_equal( - cf( - Chunker(2, 3, CHUNK_MAX_EXP, 2, 3).chunkify( - BytesIO(b"foobarboobaz" * 3) - ) - ), + cf(Chunker(2, 3, CHUNK_MAX_EXP, 2, 3).chunkify(BytesIO(b"foobarboobaz" * 3))), [b"foobarboobaz", b"foobarboobaz", b"foobarboobaz"], ) diff --git a/src/borg/testsuite/chunker_slow.py b/src/borg/testsuite/chunker_slow.py index 8c20e615bad..1de19bd0935 100644 --- a/src/borg/testsuite/chunker_slow.py +++ b/src/borg/testsuite/chunker_slow.py @@ -30,10 +30,7 @@ def twist(size): for seed in (1849058162, 1234567653): fh = BytesIO(data) chunker = Chunker(seed, minexp, maxexp, maskbits, winsize) - chunks = [ - blake2b_256(b"", c) - for c in cf(chunker.chunkify(fh, -1)) - ] + chunks = [blake2b_256(b"", c) for c in cf(chunker.chunkify(fh, -1))] runs.append(blake2b_256(b"", b"".join(chunks))) # The "correct" hash below matches the existing chunker behavior. @@ -41,7 +38,5 @@ def twist(size): overall_hash = blake2b_256(b"", b"".join(runs)) self.assert_equal( overall_hash, - unhexlify( - "b559b0ac8df8daaa221201d018815114241ea5c6609d98913cd2246a702af4e3" - ), + unhexlify("b559b0ac8df8daaa221201d018815114241ea5c6609d98913cd2246a702af4e3"), ) diff --git a/src/borg/testsuite/compress.py b/src/borg/testsuite/compress.py index 4bbda5bc55f..461cf5672ab 100644 --- a/src/borg/testsuite/compress.py +++ b/src/borg/testsuite/compress.py @@ -170,11 +170,7 @@ def test_obfuscate(): 0.2, 0.001, ) # estimate compression factor outer boundaries - assert ( - max_compress * len(data) + 8 - <= len(compressed) - <= min_compress * len(data) * 1001 + 8 - ) + assert max_compress * len(data) + 8 <= len(compressed) <= min_compress * len(data) * 1001 + 8 # compressing 100 times the same data should give multiple different result sizes assert len(set(len(compressor.compress(data)) for i in range(100))) > 10 @@ -189,9 +185,7 @@ def test_obfuscate(): 0.001, ) # estimate compression factor outer boundaries assert ( - max_compress * len(data) + 8 - <= len(compressed) - <= min_compress * len(data) * 10000001 + 8 + max_compress * len(data) + 8 <= len(compressed) <= min_compress * len(data) * 10000001 + 8 ) # compressing 100 times the same data should give multiple different result sizes assert len(set(len(compressor.compress(data)) for i in range(100))) > 90 @@ -206,11 +200,7 @@ def test_obfuscate(): 0.2, 0.001, ) # estimate compression factor outer boundaries - assert ( - max_compress * len(data) + 8 - <= len(compressed) - <= min_compress * len(data) * 1001 + 8 - ) + assert max_compress * len(data) + 8 <= len(compressed) <= min_compress * len(data) * 1001 + 8 # compressing 100 times the same data should give multiple different result sizes assert len(set(len(compressor.compress(data)) for i in range(100))) > 10 diff --git a/src/borg/testsuite/crypto.py b/src/borg/testsuite/crypto.py index 420166fea53..ffd88727eb3 100644 --- a/src/borg/testsuite/crypto.py +++ b/src/borg/testsuite/crypto.py @@ -64,22 +64,14 @@ def test_AES256_CTR_HMAC_SHA256(self): ) self.assert_equal(cs.next_iv(), 2) # auth-then-decrypt - cs = AES256_CTR_HMAC_SHA256( - mac_key, enc_key, header_len=len(header), aad_offset=1 - ) + cs = AES256_CTR_HMAC_SHA256(mac_key, enc_key, header_len=len(header), aad_offset=1) pdata = cs.decrypt(hdr_mac_iv_cdata) self.assert_equal(data, pdata) self.assert_equal(cs.next_iv(), 2) # auth-failure due to corruption (corrupted data) - cs = AES256_CTR_HMAC_SHA256( - mac_key, enc_key, header_len=len(header), aad_offset=1 - ) - hdr_mac_iv_cdata_corrupted = ( - hdr_mac_iv_cdata[:41] + b"\0" + hdr_mac_iv_cdata[42:] - ) - self.assert_raises( - IntegrityError, lambda: cs.decrypt(hdr_mac_iv_cdata_corrupted) - ) + cs = AES256_CTR_HMAC_SHA256(mac_key, enc_key, header_len=len(header), aad_offset=1) + hdr_mac_iv_cdata_corrupted = hdr_mac_iv_cdata[:41] + b"\0" + hdr_mac_iv_cdata[42:] + self.assert_raises(IntegrityError, lambda: cs.decrypt(hdr_mac_iv_cdata_corrupted)) def test_AES256_CTR_HMAC_SHA256_aad(self): mac_key = b"Y" * 32 @@ -106,20 +98,14 @@ def test_AES256_CTR_HMAC_SHA256_aad(self): ) self.assert_equal(cs.next_iv(), 2) # auth-then-decrypt - cs = AES256_CTR_HMAC_SHA256( - mac_key, enc_key, header_len=len(header), aad_offset=1 - ) + cs = AES256_CTR_HMAC_SHA256(mac_key, enc_key, header_len=len(header), aad_offset=1) pdata = cs.decrypt(hdr_mac_iv_cdata) self.assert_equal(data, pdata) self.assert_equal(cs.next_iv(), 2) # auth-failure due to corruption (corrupted aad) - cs = AES256_CTR_HMAC_SHA256( - mac_key, enc_key, header_len=len(header), aad_offset=1 - ) + cs = AES256_CTR_HMAC_SHA256(mac_key, enc_key, header_len=len(header), aad_offset=1) hdr_mac_iv_cdata_corrupted = hdr_mac_iv_cdata[:1] + b"\0" + hdr_mac_iv_cdata[2:] - self.assert_raises( - IntegrityError, lambda: cs.decrypt(hdr_mac_iv_cdata_corrupted) - ) + self.assert_raises(IntegrityError, lambda: cs.decrypt(hdr_mac_iv_cdata_corrupted)) def test_AE(self): # used in legacy-like layout (1 type byte, no aad) @@ -165,12 +151,8 @@ def test_AE(self): self.assert_equal(cs.next_iv(), 1) # auth-failure due to corruption (corrupted data) cs = cs_cls(mac_key, enc_key, header_len=len(header), aad_offset=1) - hdr_mac_iv_cdata_corrupted = ( - hdr_mac_iv_cdata[:29] + b"\0" + hdr_mac_iv_cdata[30:] - ) - self.assert_raises( - IntegrityError, lambda: cs.decrypt(hdr_mac_iv_cdata_corrupted) - ) + hdr_mac_iv_cdata_corrupted = hdr_mac_iv_cdata[:29] + b"\0" + hdr_mac_iv_cdata[30:] + self.assert_raises(IntegrityError, lambda: cs.decrypt(hdr_mac_iv_cdata_corrupted)) def test_AEAD(self): # test with aad @@ -216,39 +198,27 @@ def test_AEAD(self): self.assert_equal(cs.next_iv(), 1) # auth-failure due to corruption (corrupted aad) cs = cs_cls(mac_key, enc_key, header_len=len(header), aad_offset=1) - hdr_mac_iv_cdata_corrupted = ( - hdr_mac_iv_cdata[:1] + b"\0" + hdr_mac_iv_cdata[2:] - ) - self.assert_raises( - IntegrityError, lambda: cs.decrypt(hdr_mac_iv_cdata_corrupted) - ) + hdr_mac_iv_cdata_corrupted = hdr_mac_iv_cdata[:1] + b"\0" + hdr_mac_iv_cdata[2:] + self.assert_raises(IntegrityError, lambda: cs.decrypt(hdr_mac_iv_cdata_corrupted)) def test_hmac_sha256(self): # RFC 4231 test vectors key = b"\x0b" * 20 # Also test that this works with memory views data = memoryview(unhexlify("4869205468657265")) - hmac = unhexlify( - "b0344c61d8db38535ca8afceaf0bf12b" "881dc200c9833da726e9376c2e32cff7" - ) + hmac = unhexlify("b0344c61d8db38535ca8afceaf0bf12b" "881dc200c9833da726e9376c2e32cff7") assert hmac_sha256(key, data) == hmac key = unhexlify("4a656665") data = unhexlify("7768617420646f2079612077616e7420" "666f72206e6f7468696e673f") - hmac = unhexlify( - "5bdcc146bf60754e6a042426089575c7" "5a003f089d2739839dec58b964ec3843" - ) + hmac = unhexlify("5bdcc146bf60754e6a042426089575c7" "5a003f089d2739839dec58b964ec3843") assert hmac_sha256(key, data) == hmac key = b"\xaa" * 20 data = b"\xdd" * 50 - hmac = unhexlify( - "773ea91e36800e46854db8ebd09181a7" "2959098b3ef8c122d9635514ced565fe" - ) + hmac = unhexlify("773ea91e36800e46854db8ebd09181a7" "2959098b3ef8c122d9635514ced565fe") assert hmac_sha256(key, data) == hmac key = unhexlify("0102030405060708090a0b0c0d0e0f10" "111213141516171819") data = b"\xcd" * 50 - hmac = unhexlify( - "82558a389a443c0ea4cc819899f2083a" "85f0faa3e578f8077a2e3ff46729665b" - ) + hmac = unhexlify("82558a389a443c0ea4cc819899f2083a" "85f0faa3e578f8077a2e3ff46729665b") assert hmac_sha256(key, data) == hmac def test_blake2b_256(self): @@ -271,9 +241,7 @@ def test_blake2b_256(self): "bddd813c634239723171ef3fee98579b94964e3bb1cb3e427262c8c068d52319" ) - key = unhexlify( - "e944973af2256d4d670c12dd75304c319f58f4e40df6fb18ef996cb47e063676" - ) + key = unhexlify("e944973af2256d4d670c12dd75304c319f58f4e40df6fb18ef996cb47e063676") data = memoryview(b"1234567890" * 100) assert blake2b_256(key, data) == unhexlify( "97ede832378531dd0f4c668685d166e797da27b47d8cd441e885b60abd5e0cb2" diff --git a/src/borg/testsuite/file_integrity.py b/src/borg/testsuite/file_integrity.py index faee79b7e4a..16a7a671359 100644 --- a/src/borg/testsuite/file_integrity.py +++ b/src/borg/testsuite/file_integrity.py @@ -11,10 +11,7 @@ class TestReadIntegrityFile: def test_no_integrity(self, tmpdir): protected_file = tmpdir.join("file") protected_file.write("1234") - assert ( - DetachedIntegrityCheckedFile.read_integrity_file(str(protected_file)) - is None - ) + assert DetachedIntegrityCheckedFile.read_integrity_file(str(protected_file)) is None def test_truncated_integrity(self, tmpdir): protected_file = tmpdir.join("file") @@ -26,13 +23,8 @@ def test_truncated_integrity(self, tmpdir): def test_unknown_algorithm(self, tmpdir): protected_file = tmpdir.join("file") protected_file.write("1234") - tmpdir.join("file.integrity").write( - '{"algorithm": "HMAC_SERIOUSHASH", "digests": "1234"}' - ) - assert ( - DetachedIntegrityCheckedFile.read_integrity_file(str(protected_file)) - is None - ) + tmpdir.join("file.integrity").write('{"algorithm": "HMAC_SERIOUSHASH", "digests": "1234"}') + assert DetachedIntegrityCheckedFile.read_integrity_file(str(protected_file)) is None @pytest.mark.parametrize( "json", @@ -70,18 +62,14 @@ def test_corrupted_file(self, integrity_protected_file): with open(integrity_protected_file, "ab") as fd: fd.write(b" extra data") with pytest.raises(FileIntegrityError): - with DetachedIntegrityCheckedFile( - integrity_protected_file, write=False - ) as fd: + with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd: assert fd.read() == b"foo and bar extra data" def test_corrupted_file_partial_read(self, integrity_protected_file): with open(integrity_protected_file, "ab") as fd: fd.write(b" extra data") with pytest.raises(FileIntegrityError): - with DetachedIntegrityCheckedFile( - integrity_protected_file, write=False - ) as fd: + with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd: data = b"foo and bar" assert fd.read(len(data)) == data @@ -136,9 +124,7 @@ def test_wrong_part_name(self, integrity_protected_file): # Because some hash_part failed, the final digest will fail as well - again - even if we catch # the failing hash_part. This is intentional: (1) it makes the code simpler (2) it's a good fail-safe # against overly broad exception handling. - with DetachedIntegrityCheckedFile( - integrity_protected_file, write=False - ) as fd: + with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd: data1 = b"foo and bar" assert fd.read(len(data1)) == data1 with pytest.raises(FileIntegrityError): @@ -151,17 +137,13 @@ def test_part_independence(self, integrity_protected_file, partial_read): with open(integrity_protected_file, "ab") as fd: fd.write(b"some extra stuff that does not belong") with pytest.raises(FileIntegrityError): - with DetachedIntegrityCheckedFile( - integrity_protected_file, write=False - ) as fd: + with DetachedIntegrityCheckedFile(integrity_protected_file, write=False) as fd: data1 = b"foo and bar" try: assert fd.read(len(data1)) == data1 fd.hash_part("foopart") except FileIntegrityError: - assert ( - False - ), "This part must not raise, since this part is still valid." + assert False, "This part must not raise, since this part is still valid." if not partial_read: fd.read() # But overall it explodes with the final digest. Neat, eh? diff --git a/src/borg/testsuite/hashindex.py b/src/borg/testsuite/hashindex.py index d70694836e2..ae8731617b1 100644 --- a/src/borg/testsuite/hashindex.py +++ b/src/borg/testsuite/hashindex.py @@ -559,15 +559,11 @@ def HH(x, y): # now check if hashtable contents is as expected: - assert [idx.get(HH(0, y)) for y in range(400, 700)] == [ - (0, y) for y in range(400, 700) - ] + assert [idx.get(HH(0, y)) for y in range(400, 700)] == [(0, y) for y in range(400, 700)] assert [HH(0, y) in idx for y in range(400)] == [ False for y in range(400) ] # deleted entries # this will fail at HH(600, 259) if the bug is present. - assert [idx.get(HH(600, y)) for y in range(330)] == [ - (600, y) for y in range(330) - ] + assert [idx.get(HH(600, y)) for y in range(330)] == [(600, y) for y in range(330)] diff --git a/src/borg/testsuite/helpers.py b/src/borg/testsuite/helpers.py index a11cc233e29..3dbdf9d3027 100644 --- a/src/borg/testsuite/helpers.py +++ b/src/borg/testsuite/helpers.py @@ -53,12 +53,8 @@ def test_bigint(self): self.assert_equal(int_to_bigint(0), 0) self.assert_equal(int_to_bigint(2**63 - 1), 2**63 - 1) self.assert_equal(int_to_bigint(-(2**63) + 1), -(2**63) + 1) - self.assert_equal( - int_to_bigint(2**63), b"\x00\x00\x00\x00\x00\x00\x00\x80\x00" - ) - self.assert_equal( - int_to_bigint(-(2**63)), b"\x00\x00\x00\x00\x00\x00\x00\x80\xff" - ) + self.assert_equal(int_to_bigint(2**63), b"\x00\x00\x00\x00\x00\x00\x00\x80\x00") + self.assert_equal(int_to_bigint(-(2**63)), b"\x00\x00\x00\x00\x00\x00\x00\x80\xff") self.assert_equal(bigint_to_int(int_to_bigint(-(2**70))), -(2**70)) self.assert_equal(bigint_to_int(int_to_bigint(2**70)), 2**70) @@ -255,17 +251,13 @@ def test_abspath(self, monkeypatch, keys_dir): repr(Location("/some/absolute/path")) == "Location(proto='file', user=None, host=None, port=None, path='/some/absolute/path', archive=None)" ) - assert ( - Location("/some/absolute/path").to_key_filename() - == keys_dir + "some_absolute_path" - ) + assert Location("/some/absolute/path").to_key_filename() == keys_dir + "some_absolute_path" assert ( repr(Location("ssh://user@host/some/path")) == "Location(proto='ssh', user='user', host='host', port=None, path='/some/path', archive=None)" ) assert ( - Location("ssh://user@host/some/path").to_key_filename() - == keys_dir + "host__some_path" + Location("ssh://user@host/some/path").to_key_filename() == keys_dir + "host__some_path" ) def test_relpath(self, monkeypatch, keys_dir): @@ -278,10 +270,7 @@ def test_relpath(self, monkeypatch, keys_dir): repr(Location("some/relative/path")) == "Location(proto='file', user=None, host=None, port=None, path='some/relative/path', archive=None)" ) - assert ( - Location("some/relative/path").to_key_filename() - == keys_dir + "some_relative_path" - ) + assert Location("some/relative/path").to_key_filename() == keys_dir + "some_relative_path" assert ( repr(Location("ssh://user@host/./some/path")) == "Location(proto='ssh', user='user', host='host', port=None, path='/./some/path', archive=None)" @@ -322,8 +311,7 @@ def test_with_colons(self, monkeypatch, keys_dir): == "Location(proto='file', user=None, host=None, port=None, path='/abs/path:with:colons', archive=None)" ) assert ( - Location("/abs/path:with:colons").to_key_filename() - == keys_dir + "abs_path_with_colons" + Location("/abs/path:with:colons").to_key_filename() == keys_dir + "abs_path_with_colons" ) def test_user_parsing(self): @@ -402,9 +390,7 @@ def test_omit_archive(self): loc_without_archive = loc.omit_archive() assert loc_without_archive.archive is None assert loc_without_archive.raw == "ssh://user@host:1234/repos/{hostname}" - assert ( - loc_without_archive.processed == "ssh://user@host:1234/repos/%s" % hostname - ) + assert loc_without_archive.processed == "ssh://user@host:1234/repos/%s" % hostname class TestLocationWithEnv: @@ -714,9 +700,7 @@ def test_interval_number(self): interval("5") self.assert_equal( exc.value.args, - ( - "Unexpected interval time unit \"5\": expected one of ['H', 'd', 'w', 'm', 'y']", - ), + ("Unexpected interval time unit \"5\": expected one of ['H', 'd', 'w', 'm', 'y']",), ) @@ -754,9 +738,7 @@ def dotest(test_archives, within, indices): class StableDictTestCase(BaseTestCase): def test(self): d = StableDict(foo=1, bar=2, boo=3, baz=4) - self.assert_equal( - list(d.items()), [("bar", 2), ("baz", 4), ("boo", 3), ("foo", 1)] - ) + self.assert_equal(list(d.items()), [("bar", 2), ("baz", 4), ("boo", 3), ("foo", 1)]) self.assert_equal( hashlib.md5(msgpack.packb(d)).hexdigest(), "fc78df42cd60691b3ac3dd2a2b39903f", @@ -815,9 +797,7 @@ def test_get_keys_dir(monkeypatch): """test that get_keys_dir respects environment""" monkeypatch.delenv("BORG_KEYS_DIR", raising=False) monkeypatch.delenv("XDG_CONFIG_HOME", raising=False) - assert get_keys_dir() == os.path.join( - os.path.expanduser("~"), ".config", "borg", "keys" - ) + assert get_keys_dir() == os.path.join(os.path.expanduser("~"), ".config", "borg", "keys") monkeypatch.setenv("XDG_CONFIG_HOME", "/var/tmp/.config") assert get_keys_dir() == os.path.join("/var/tmp/.config", "borg", "keys") monkeypatch.setenv("BORG_KEYS_DIR", "/var/tmp") @@ -893,9 +873,7 @@ def test_file_size_iec(): def test_file_size_precision(): assert format_file_size(1234, precision=1) == "1.2 kB" # rounded down assert format_file_size(1254, precision=1) == "1.3 kB" # rounded up - assert ( - format_file_size(999990000, precision=1) == "1.0 GB" - ) # and not 999.9 MB or 1000.0 MB + assert format_file_size(999990000, precision=1) == "1.0 GB" # and not 999.9 MB or 1000.0 MB def test_file_size_sign(): @@ -1235,9 +1213,7 @@ def test_progress_endless_step(capfd): def test_partial_format(): assert partial_format("{space:10}", {"space": " "}) == " " * 10 - assert ( - partial_format("{foobar}", {"bar": "wrong", "foobar": "correct"}) == "correct" - ) + assert partial_format("{foobar}", {"bar": "wrong", "foobar": "correct"}) == "correct" assert partial_format("{unknown_key}", {}) == "{unknown_key}" assert partial_format("{key}{{escaped_key}}", {}) == "{key}{{escaped_key}}" assert partial_format("{{escaped_key}}", {"escaped_key": 1234}) == "{{escaped_key}}" @@ -1336,10 +1312,7 @@ def test_replace_placeholders(): def test_override_placeholders(): - assert ( - replace_placeholders("{uuid4}", overrides={"uuid4": "overridden"}) - == "overridden" - ) + assert replace_placeholders("{uuid4}", overrides={"uuid4": "overridden"}) == "overridden" def working_swidth(): @@ -1375,9 +1348,7 @@ def test_safe_timestamps(): with pytest.raises(OverflowError): datetime.utcfromtimestamp(beyond_y10k) assert datetime.utcfromtimestamp(safe_s(beyond_y10k)) > datetime(2038, 1, 1) - assert datetime.utcfromtimestamp(safe_ns(beyond_y10k) / 1000000000) > datetime( - 2038, 1, 1 - ) + assert datetime.utcfromtimestamp(safe_ns(beyond_y10k) / 1000000000) > datetime(2038, 1, 1) else: # ns fit into int64 assert safe_ns(2**64) <= 2**63 - 1 @@ -1390,9 +1361,7 @@ def test_safe_timestamps(): with pytest.raises(OverflowError): datetime.utcfromtimestamp(beyond_y10k) assert datetime.utcfromtimestamp(safe_s(beyond_y10k)) > datetime(2262, 1, 1) - assert datetime.utcfromtimestamp(safe_ns(beyond_y10k) / 1000000000) > datetime( - 2262, 1, 1 - ) + assert datetime.utcfromtimestamp(safe_ns(beyond_y10k) / 1000000000) > datetime(2262, 1, 1) class TestPopenWithErrorHandling: diff --git a/src/borg/testsuite/key.py b/src/borg/testsuite/key.py index efeb69c6eb0..3e1c767dde6 100644 --- a/src/borg/testsuite/key.py +++ b/src/borg/testsuite/key.py @@ -58,9 +58,7 @@ class MockArgs: """, ) ) - keyfile2_id = unhexlify( - "c3fbf14bc001ebcc3cd86e696c13482ed071740927cd7cbe1b01b4bfcee49314" - ) + keyfile2_id = unhexlify("c3fbf14bc001ebcc3cd86e696c13482ed071740927cd7cbe1b01b4bfcee49314") keyfile_blake2_key_file = """ BORG_KEY 0000000000000000000000000000000000000000000000000000000000000000 @@ -168,9 +166,7 @@ def test_keyfile(self, monkeypatch, keys_dir): def test_keyfile_nonce_rollback_protection(self, monkeypatch, keys_dir): monkeypatch.setenv("BORG_PASSPHRASE", "test") repository = self.MockRepository() - with open( - os.path.join(get_security_dir(repository.id_str), "nonce"), "w" - ) as fd: + with open(os.path.join(get_security_dir(repository.id_str), "nonce"), "w") as fd: fd.write("0000000000002000") key = KeyfileKey.create(repository, self.MockArgs()) data = key.encrypt(b"ABC") @@ -214,9 +210,7 @@ def test_keyfile_blake2(self, monkeypatch, keys_dir): fd.write(self.keyfile_blake2_key_file) monkeypatch.setenv("BORG_PASSPHRASE", "passphrase") key = Blake2KeyfileKey.detect(self.MockRepository(), self.keyfile_blake2_cdata) - assert ( - key.decrypt(self.keyfile_blake2_id, self.keyfile_blake2_cdata) == b"payload" - ) + assert key.decrypt(self.keyfile_blake2_id, self.keyfile_blake2_cdata) == b"payload" def test_passphrase(self, keys_dir, monkeypatch): monkeypatch.setenv("BORG_PASSPHRASE", "test") @@ -359,9 +353,7 @@ def test_passphrase_new_empty(self, capsys, monkeypatch): def test_passphrase_new_retries(self, monkeypatch): monkeypatch.delenv("BORG_PASSPHRASE", False) ascending_numbers = iter(range(20)) - monkeypatch.setattr( - getpass, "getpass", lambda prompt: str(next(ascending_numbers)) - ) + monkeypatch.setattr(getpass, "getpass", lambda prompt: str(next(ascending_numbers))) with pytest.raises(PasswordRetriesExceeded): Passphrase.new() diff --git a/src/borg/testsuite/locking.py b/src/borg/testsuite/locking.py index b8f5563c495..58095b3a58c 100644 --- a/src/borg/testsuite/locking.py +++ b/src/borg/testsuite/locking.py @@ -183,8 +183,7 @@ def acquire_release_loop( ) print_locked( - "Thread %2d: Loop timed out--terminating after %d loop cycles." - % (thread_id, cycle) + "Thread %2d: Loop timed out--terminating after %d loop cycles." % (thread_id, cycle) ) if last_thread is not None: # joining its predecessor, if any last_thread.join() @@ -213,9 +212,10 @@ def acquire_release_loop( assert ( lock_owner_counter.maxvalue() > 0 ), "Never gained the lock? Something went wrong here..." - assert lock_owner_counter.maxvalue() <= 1, ( - "Maximal number of concurrent lock holders was %d. So exclusivity is broken." - % (lock_owner_counter.maxvalue()) + assert ( + lock_owner_counter.maxvalue() <= 1 + ), "Maximal number of concurrent lock holders was %d. So exclusivity is broken." % ( + lock_owner_counter.maxvalue() ) assert ( exception_counter.value() == 0 diff --git a/src/borg/testsuite/nanorst.py b/src/borg/testsuite/nanorst.py index 22b3e98cee6..fad21d8428e 100644 --- a/src/borg/testsuite/nanorst.py +++ b/src/borg/testsuite/nanorst.py @@ -8,9 +8,7 @@ def test_inline(): def test_inline_spread(): - assert ( - rst_to_text("*foo and bar, thusly\nfoobar*.") == "foo and bar, thusly\nfoobar." - ) + assert rst_to_text("*foo and bar, thusly\nfoobar*.") == "foo and bar, thusly\nfoobar." def test_comment_inline(): @@ -26,10 +24,7 @@ def test_comment(): def test_directive_note(): - assert ( - rst_to_text(".. note::\n Note this and that") - == "Note:\n Note this and that" - ) + assert rst_to_text(".. note::\n Note this and that") == "Note:\n Note this and that" def test_ref(): diff --git a/src/borg/testsuite/nonces.py b/src/borg/testsuite/nonces.py index e56b7e2b422..f7227b1e5a5 100644 --- a/src/borg/testsuite/nonces.py +++ b/src/borg/testsuite/nonces.py @@ -30,23 +30,17 @@ def get_free_nonce(self): raise InvalidRPCMethod("") def commit_nonce_reservation(self, next_unreserved, start_nonce): - pytest.fail( - "commit_nonce_reservation should never be called on an old repository" - ) + pytest.fail("commit_nonce_reservation should never be called on an old repository") def setUp(self): self.repository = None def cache_nonce(self): - with open( - os.path.join(get_security_dir(self.repository.id_str), "nonce"), "r" - ) as fd: + with open(os.path.join(get_security_dir(self.repository.id_str), "nonce"), "r") as fd: return fd.read() def set_cache_nonce(self, nonce): - with open( - os.path.join(get_security_dir(self.repository.id_str), "nonce"), "w" - ) as fd: + with open(os.path.join(get_security_dir(self.repository.id_str), "nonce"), "w") as fd: assert fd.write(nonce) def test_empty_cache_and_old_server(self, monkeypatch): diff --git a/src/borg/testsuite/patterns.py b/src/borg/testsuite/patterns.py index 6e60722007c..3becd82c1ef 100644 --- a/src/borg/testsuite/patterns.py +++ b/src/borg/testsuite/patterns.py @@ -361,9 +361,7 @@ def _make_test_patterns(pattern): ] -@pytest.mark.parametrize( - "pattern", _make_test_patterns("b\N{LATIN SMALL LETTER A WITH ACUTE}") -) +@pytest.mark.parametrize("pattern", _make_test_patterns("b\N{LATIN SMALL LETTER A WITH ACUTE}")) def test_composed_unicode_pattern(pattern): assert pattern.match("b\N{LATIN SMALL LETTER A WITH ACUTE}/foo") assert pattern.match("ba\N{COMBINING ACUTE ACCENT}/foo") == use_normalized_unicode() @@ -371,10 +369,7 @@ def test_composed_unicode_pattern(pattern): @pytest.mark.parametrize("pattern", _make_test_patterns("ba\N{COMBINING ACUTE ACCENT}")) def test_decomposed_unicode_pattern(pattern): - assert ( - pattern.match("b\N{LATIN SMALL LETTER A WITH ACUTE}/foo") - == use_normalized_unicode() - ) + assert pattern.match("b\N{LATIN SMALL LETTER A WITH ACUTE}/foo") == use_normalized_unicode() assert pattern.match("ba\N{COMBINING ACUTE ACCENT}/foo") diff --git a/src/borg/testsuite/platform.py b/src/borg/testsuite/platform.py index d3f2ada66a3..ba7a5025409 100644 --- a/src/borg/testsuite/platform.py +++ b/src/borg/testsuite/platform.py @@ -105,9 +105,7 @@ def test_access_acl(self): ) self.assert_in(b"user:root:rw-:0", self.get_acl(file.name)["acl_access"]) self.assert_in(b"group:root:rw-:0", self.get_acl(file.name)["acl_access"]) - self.assert_in( - b"user:0:rw-:0", self.get_acl(file.name, numeric_ids=True)["acl_access"] - ) + self.assert_in(b"user:0:rw-:0", self.get_acl(file.name, numeric_ids=True)["acl_access"]) file2 = tempfile.NamedTemporaryFile() self.set_acl( file2.name, @@ -132,9 +130,7 @@ def test_non_ascii_acl(self): # but in practice they seem to be out there and must not make our code explode. file = tempfile.NamedTemporaryFile() self.assert_equal(self.get_acl(file.name), {}) - nothing_special = "user::rw-\ngroup::r--\nmask::rw-\nother::---\n".encode( - "ascii" - ) + nothing_special = "user::rw-\ngroup::r--\nmask::rw-\nother::---\n".encode("ascii") # TODO: can this be tested without having an existing system user übel with uid 666 gid 666? user_entry = "user:übel:rw-:666".encode("utf-8") user_entry_numeric = "user:666:rw-:666".encode("ascii") @@ -160,9 +156,7 @@ def test_non_ascii_acl(self): def test_utils(self): from ..platform.linux import acl_use_local_uid_gid - self.assert_equal( - acl_use_local_uid_gid(b"user:nonexistent1234:rw-:1234"), b"user:1234:rw-" - ) + self.assert_equal(acl_use_local_uid_gid(b"user:nonexistent1234:rw-:1234"), b"user:1234:rw-") self.assert_equal( acl_use_local_uid_gid(b"group:nonexistent1234:rw-:1234"), b"group:1234:rw-" ) @@ -221,9 +215,7 @@ def test_access_acl(self): ) -@unittest.skipUnless( - sys.platform.startswith(("linux", "freebsd", "darwin")), "POSIX only tests" -) +@unittest.skipUnless(sys.platform.startswith(("linux", "freebsd", "darwin")), "POSIX only tests") class PlatformPosixTestCase(BaseTestCase): def test_swidth_ascii(self): self.assert_equal(swidth("borg"), 4) diff --git a/src/borg/testsuite/remote.py b/src/borg/testsuite/remote.py index c621bcab919..f18f584b94b 100644 --- a/src/borg/testsuite/remote.py +++ b/src/borg/testsuite/remote.py @@ -73,9 +73,7 @@ class TestRepositoryCache: @pytest.fixture def repository(self, tmpdir): self.repository_location = os.path.join(str(tmpdir), "repository") - with Repository( - self.repository_location, exclusive=True, create=True - ) as repository: + with Repository(self.repository_location, exclusive=True, create=True) as repository: repository.put(H(1), b"1234") repository.put(H(2), b"5678") repository.put(H(3), bytes(100)) diff --git a/src/borg/testsuite/repository.py b/src/borg/testsuite/repository.py index cb767c0a471..44b9856a605 100644 --- a/src/borg/testsuite/repository.py +++ b/src/borg/testsuite/repository.py @@ -91,15 +91,11 @@ def test1(self): key50 = H(50) self.assert_equal(self.repository.get(key50), b"SOMEDATA") self.repository.delete(key50) - self.assert_raises( - Repository.ObjectNotFound, lambda: self.repository.get(key50) - ) + self.assert_raises(Repository.ObjectNotFound, lambda: self.repository.get(key50)) self.repository.commit(compact=False) self.repository.close() with self.open() as repository2: - self.assert_raises( - Repository.ObjectNotFound, lambda: repository2.get(key50) - ) + self.assert_raises(Repository.ObjectNotFound, lambda: repository2.get(key50)) for x in range(100): if x == 50: continue @@ -194,9 +190,7 @@ def test_max_data_size(self): max_data = b"x" * MAX_DATA_SIZE self.repository.put(H(0), max_data) self.assert_equal(self.repository.get(H(0)), max_data) - self.assert_raises( - IntegrityError, lambda: self.repository.put(H(1), max_data + b"x") - ) + self.assert_raises(IntegrityError, lambda: self.repository.put(H(1), max_data + b"x")) class LocalRepositoryTestCase(RepositoryTestCaseBase): @@ -238,9 +232,7 @@ def test_sparse_delete(self): assert self.repository.compact[0] == 41 + 41 + 4 + len(MAGIC) self.repository.commit(compact=True) - assert 0 not in [ - segment for segment, _ in self.repository.io.segment_iterator() - ] + assert 0 not in [segment for segment, _ in self.repository.io.segment_iterator()] def test_uncommitted_garbage(self): # uncommitted garbage should be no problem, it is cleaned up automatically. @@ -301,9 +293,7 @@ def test_replay_lock_upgrade_old(self): if name.startswith("index."): os.unlink(os.path.join(self.repository.path, name)) with patch.object(Lock, "upgrade", side_effect=LockFailed) as upgrade: - self.reopen( - exclusive=None - ) # simulate old client that always does lock upgrades + self.reopen(exclusive=None) # simulate old client that always does lock upgrades with self.repository: # the repo is only locked by a shared read lock, but to replay segments, # we need an exclusive write lock - check if the lock gets upgraded. @@ -415,9 +405,7 @@ def test_shadow_index_rollback(self): self.repository.commit(compact=True) self.repo_dump("p1 d1 cc") # note how an empty list means that nothing is shadowed for sure - assert ( - self.repository.shadow_index[H(1)] == [] - ) # because the delete is considered unstable + assert self.repository.shadow_index[H(1)] == [] # because the delete is considered unstable self.repository.put(H(1), b"1") self.repository.delete(H(1)) self.repo_dump("p1 d1") @@ -427,9 +415,7 @@ def test_shadow_index_rollback(self): self.repo_dump("r") self.repository.put(H(2), b"1") # After the rollback segment 4 shouldn't be considered anymore - assert ( - self.repository.shadow_index[H(1)] == [] - ) # because the delete is considered unstable + assert self.repository.shadow_index[H(1)] == [] # because the delete is considered unstable class RepositoryAppendOnlyTestCase(RepositoryTestCaseBase): @@ -777,9 +763,7 @@ def open_index(self): def corrupt_object(self, id_): idx = self.open_index() segment, offset = idx[H(id_)] - with open( - os.path.join(self.tmppath, "repository", "data", "0", str(segment)), "r+b" - ) as fd: + with open(os.path.join(self.tmppath, "repository", "data", "0", str(segment)), "r+b") as fd: fd.seek(offset) fd.write(b"BOOM") @@ -787,15 +771,11 @@ def delete_segment(self, segment): os.unlink(os.path.join(self.tmppath, "repository", "data", "0", str(segment))) def delete_index(self): - os.unlink( - os.path.join(self.tmppath, "repository", "index.{}".format(self.get_head())) - ) + os.unlink(os.path.join(self.tmppath, "repository", "index.{}".format(self.get_head()))) def rename_index(self, new_name): os.rename( - os.path.join( - self.tmppath, "repository", "index.{}".format(self.get_head()) - ), + os.path.join(self.tmppath, "repository", "index.{}".format(self.get_head())), os.path.join(self.tmppath, "repository", new_name), ) @@ -835,9 +815,7 @@ def test_repair_missing_commit_segment(self): def test_repair_corrupted_commit_segment(self): self.add_objects([[1, 2, 3], [4, 5, 6]]) - with open( - os.path.join(self.tmppath, "repository", "data", "0", "3"), "r+b" - ) as fd: + with open(os.path.join(self.tmppath, "repository", "data", "0", "3"), "r+b") as fd: fd.seek(-1, os.SEEK_END) fd.write(b"X") self.assert_raises(Repository.ObjectNotFound, lambda: self.get_objects(4)) @@ -847,9 +825,7 @@ def test_repair_corrupted_commit_segment(self): def test_repair_no_commits(self): self.add_objects([[1, 2, 3]]) - with open( - os.path.join(self.tmppath, "repository", "data", "0", "1"), "r+b" - ) as fd: + with open(os.path.join(self.tmppath, "repository", "data", "0", "1"), "r+b") as fd: fd.seek(-1, os.SEEK_END) fd.write(b"X") self.assert_raises(Repository.CheckNeeded, lambda: self.get_objects(4)) @@ -910,9 +886,7 @@ def test_hints_persistence(self): # check if hints persistence worked: self.assert_equal(shadow_index_expected, self.repository.shadow_index) self.assert_equal(compact_expected, self.repository.compact) - del self.repository.segments[ - 2 - ] # ignore the segment created by put(H(42), ...) + del self.repository.segments[2] # ignore the segment created by put(H(42), ...) self.assert_equal(segments_expected, self.repository.segments) def test_hints_behaviour(self): @@ -958,9 +932,7 @@ def __contains__(self, item): return MockArgs() def test_invalid_rpc(self): - self.assert_raises( - InvalidRPCMethod, lambda: self.repository.call("__init__", {}) - ) + self.assert_raises(InvalidRPCMethod, lambda: self.repository.call("__init__", {})) def test_rpc_exception_transport(self): s1 = "test string" @@ -1012,10 +984,7 @@ def test_rpc_exception_transport(self): self.repository.call("inject_exception", {"kind": "divide"}) except RemoteRepository.RPCError as e: assert e.unpacked - assert ( - e.get_message() - == "ZeroDivisionError: integer division or modulo by zero\n" - ) + assert e.get_message() == "ZeroDivisionError: integer division or modulo by zero\n" assert e.exception_class == "ZeroDivisionError" assert len(e.exception_full) > 0 @@ -1160,17 +1129,13 @@ def test_stderr_messages(self): handle_remote_line("unstructured stderr message\n") self.assert_equal(self.stream.getvalue(), "") # stderr messages don't get an implicit newline - self.assert_equal( - self.stderr.getvalue(), "Remote: unstructured stderr message\n" - ) + self.assert_equal(self.stderr.getvalue(), "Remote: unstructured stderr message\n") def test_stderr_progress_messages(self): handle_remote_line("unstructured stderr progress message\r") self.assert_equal(self.stream.getvalue(), "") # stderr messages don't get an implicit newline - self.assert_equal( - self.stderr.getvalue(), "Remote: unstructured stderr progress message\r" - ) + self.assert_equal(self.stderr.getvalue(), "Remote: unstructured stderr progress message\r") def test_pre11_format_messages(self): self.handler.setLevel(logging.DEBUG) @@ -1184,12 +1149,8 @@ def test_post11_format_messages(self): self.handler.setLevel(logging.DEBUG) logging.getLogger().setLevel(logging.DEBUG) - handle_remote_line( - "$LOG INFO borg.repository Remote: borg >= 1.1 format message\n" - ) - self.assert_equal( - self.stream.getvalue(), "Remote: borg >= 1.1 format message\n" - ) + handle_remote_line("$LOG INFO borg.repository Remote: borg >= 1.1 format message\n") + self.assert_equal(self.stream.getvalue(), "Remote: borg >= 1.1 format message\n") self.assert_equal(self.stderr.getvalue(), "") def test_remote_messages_screened(self): @@ -1197,9 +1158,7 @@ def test_remote_messages_screened(self): self.handler.setLevel(logging.WARNING) logging.getLogger().setLevel(logging.WARNING) - handle_remote_line( - "$LOG INFO borg.repository Remote: new format info message\n" - ) + handle_remote_line("$LOG INFO borg.repository Remote: new format info message\n") self.assert_equal(self.stream.getvalue(), "") self.assert_equal(self.stderr.getvalue(), "") @@ -1219,9 +1178,7 @@ def test_info_to_correct_local_child(self): foo_handler.setLevel(logging.INFO) logging.getLogger("borg.repository.foo").handlers[:] = [foo_handler] - handle_remote_line( - "$LOG INFO borg.repository Remote: new format child message\n" - ) + handle_remote_line("$LOG INFO borg.repository Remote: new format child message\n") self.assert_equal(foo_stream.getvalue(), "") self.assert_equal(child_stream.getvalue(), "Remote: new format child message\n") self.assert_equal(self.stream.getvalue(), "") diff --git a/src/borg/testsuite/xattr.py b/src/borg/testsuite/xattr.py index c22baa5b683..5405dd3114e 100644 --- a/src/borg/testsuite/xattr.py +++ b/src/borg/testsuite/xattr.py @@ -39,12 +39,8 @@ def test(self): if not is_linux: # linux does not allow setting user.* xattrs on symlinks setxattr(tmp_lfn, b"user.linkxattr", b"baz") - self.assert_equal_se( - listxattr(tmp_fn), [b"user.foo", b"user.bar", b"user.empty"] - ) - self.assert_equal_se( - listxattr(tmp_fd), [b"user.foo", b"user.bar", b"user.empty"] - ) + self.assert_equal_se(listxattr(tmp_fn), [b"user.foo", b"user.bar", b"user.empty"]) + self.assert_equal_se(listxattr(tmp_fd), [b"user.foo", b"user.bar", b"user.empty"]) self.assert_equal_se( listxattr(tmp_lfn, follow_symlinks=True), [b"user.foo", b"user.bar", b"user.empty"], diff --git a/src/borg/upgrader.py b/src/borg/upgrader.py index 0e9308e7108..bd3ed005628 100644 --- a/src/borg/upgrader.py +++ b/src/borg/upgrader.py @@ -44,9 +44,7 @@ def upgrade(self, dryrun=True, inplace=False, progress=False): shutil.copytree(self.path, backup, copy_function=os.link) logger.info("opening attic repository with borg and converting") # now lock the repo, after we have made the copy - self.lock = Lock( - os.path.join(self.path, "lock"), exclusive=True, timeout=1.0 - ).acquire() + self.lock = Lock(os.path.join(self.path, "lock"), exclusive=True, timeout=1.0).acquire() segments = [filename for i, filename in self.io.segment_iterator()] try: keyfile = self.find_attic_keyfile() @@ -59,9 +57,7 @@ def upgrade(self, dryrun=True, inplace=False, progress=False): try: self.convert_cache(dryrun) self.convert_repo_index(dryrun=dryrun, inplace=inplace) - self.convert_segments( - segments, dryrun=dryrun, inplace=inplace, progress=progress - ) + self.convert_segments(segments, dryrun=dryrun, inplace=inplace, progress=progress) self.borg_readme() finally: self.lock.release() @@ -231,10 +227,7 @@ def copy_cache_file(path): borg_file, ) else: - logger.info( - "copying attic cache file from %s to %s" - % (attic_file, borg_file) - ) + logger.info("copying attic cache file from %s to %s" % (attic_file, borg_file)) if not dryrun: shutil.copyfile(attic_file, borg_file) return borg_file @@ -257,9 +250,7 @@ def copy_cache_file(path): cache = copy_cache_file(cache) logger.info("converting cache %s" % cache) if not dryrun: - AtticRepositoryUpgrader.header_replace( - cache, b"ATTICIDX", b"BORG_IDX" - ) + AtticRepositoryUpgrader.header_replace(cache, b"ATTICIDX", b"BORG_IDX") class AtticKeyfileKey(KeyfileKey): @@ -271,9 +262,7 @@ class AtticKeyfileKey(KeyfileKey): @staticmethod def get_keys_dir(): """Determine where to repository keys and cache""" - return os.environ.get( - "ATTIC_KEYS_DIR", os.path.join(get_base_dir(), ".attic", "keys") - ) + return os.environ.get("ATTIC_KEYS_DIR", os.path.join(get_base_dir(), ".attic", "keys")) @classmethod def find_key_file(cls, repository): @@ -296,11 +285,7 @@ def find_key_file(cls, repository): filename = os.path.join(keys_dir, name) with open(filename, "r") as fd: line = fd.readline().strip() - if ( - line - and line.startswith(cls.FILE_ID) - and line[10:] == repository.id_str - ): + if line and line.startswith(cls.FILE_ID) and line[10:] == repository.id_str: return filename raise KeyfileNotFoundError(repository.path, keys_dir) @@ -335,9 +320,7 @@ class Borg0xxKeyfileKey(KeyfileKey): @staticmethod def get_keys_dir(): - return os.environ.get( - "BORG_KEYS_DIR", os.path.join(get_base_dir(), ".borg", "keys") - ) + return os.environ.get("BORG_KEYS_DIR", os.path.join(get_base_dir(), ".borg", "keys")) @classmethod def find_key_file(cls, repository): diff --git a/src/borg/xattr.py b/src/borg/xattr.py index 5b499b1cda7..bd473ba2ce5 100644 --- a/src/borg/xattr.py +++ b/src/borg/xattr.py @@ -29,9 +29,7 @@ if preload.startswith("libfakeroot"): env = prepare_subprocess_env(system=True) fakeroot_output = subprocess.check_output(["fakeroot", "-v"], env=env) - fakeroot_version = parse_version( - fakeroot_output.decode("ascii").split()[-1] - ) + fakeroot_version = parse_version(fakeroot_output.decode("ascii").split()[-1]) if fakeroot_version >= parse_version("1.20.2"): # 1.20.2 has been confirmed to have xattr support # 1.18.2 has been confirmed not to have xattr support @@ -79,9 +77,7 @@ def get_all(path, follow_symlinks=False): # xattr name is a bytes object, we directly use it. # if we get an empty xattr value (b''), we store None into the result dict - # borg always did it like that... - result[name] = ( - getxattr(path, name, follow_symlinks=follow_symlinks) or None - ) + result[name] = getxattr(path, name, follow_symlinks=follow_symlinks) or None except OSError as e: name_str = name.decode() if isinstance(path, int): @@ -150,7 +146,5 @@ def set_all(path, xattrs, follow_symlinks=False): # EACCES: permission denied to set this specific xattr (this may happen related to security.* keys) # EPERM: operation not permitted err_str = os.strerror(e.errno) - logger.warning( - "%s: when setting extended attribute %s: %s", path_str, k_str, err_str - ) + logger.warning("%s: when setting extended attribute %s: %s", path_str, k_str, err_str) return warning