From 4cca621ac4fb4347c5703604a2c2b54c7690cc70 Mon Sep 17 00:00:00 2001 From: Alan Rominger Date: Fri, 10 Dec 2021 09:09:09 -0500 Subject: [PATCH] Add --ident option to process This will allow users to write artifacts in a way that matches normal non-remote ansible-runner commands It makes the most sense to have the process --ident match the --ident value passed to transmit, but you do not have to. The intention is that this will be used by AWX so that jobs ran and processed on the same node will not write artifacts twice. --- ansible_runner/__main__.py | 8 ++++ ansible_runner/streaming.py | 11 ++++-- docs/remote_jobs.rst | 11 ++++++ .../test_transmit_worker_process.py | 39 +++++++++++++++++-- 4 files changed, 62 insertions(+), 7 deletions(-) diff --git a/ansible_runner/__main__.py b/ansible_runner/__main__.py index f072b40bc..0413f9f50 100644 --- a/ansible_runner/__main__.py +++ b/ansible_runner/__main__.py @@ -656,6 +656,14 @@ def main(sys_args=None): help="Receive the output of remote ansible-runner work and distribute the results" ) add_args_to_parser(process_subparser, DEFAULT_CLI_ARGS['positional_args']) + process_subparser.add_argument( + "-i", "--ident", + default=None, + help=( + "An identifier to use as a subdirectory when saving artifacts. " + "Generally intended to match the --ident passed to the transmit command." + ) + ) # generic args for all subparsers add_args_to_parser(run_subparser, DEFAULT_CLI_ARGS['generic_args']) diff --git a/ansible_runner/streaming.py b/ansible_runner/streaming.py index 475759d25..1a64be740 100644 --- a/ansible_runner/streaming.py +++ b/ansible_runner/streaming.py @@ -178,9 +178,14 @@ def __init__(self, _input=None, status_handler=None, event_handler=None, settings = {} self.config = MockConfig(settings) - artifact_dir = kwargs.get('artifact_dir') - self.artifact_dir = os.path.abspath( - artifact_dir or os.path.join(self.private_data_dir, 'artifacts')) + if kwargs.get('artifact_dir'): + self.artifact_dir = os.path.abspath(kwargs.get('artifact_dir')) + else: + project_artifacts = os.path.abspath(os.path.join(self.private_data_dir, 'artifacts')) + if kwargs.get('ident'): + self.artifact_dir = os.path.join(project_artifacts, "{}".format(kwargs.get('ident'))) + else: + self.artifact_dir = project_artifacts self.status_handler = status_handler self.event_handler = event_handler diff --git a/docs/remote_jobs.rst b/docs/remote_jobs.rst index 7e9c26374..a4c031ece 100644 --- a/docs/remote_jobs.rst +++ b/docs/remote_jobs.rst @@ -55,6 +55,17 @@ There is otherwise no automatic cleanup of images used by a run, even if ``container_auth_data`` is used to pull from a private container registry. To be sure that layers are deleted as well, the ``--image-prune`` flag is necessary. +Artifact Directory Specification +-------------------------------- + +The ``worker`` command does not write artifacts, these are streamed instead, and +the ``process`` command is what ultimately writes the artifacts folder contents. + +With the default behavior, ``ansible-runner process ./demo`` would write artifacts to ``./demo/artifacts``. +If you wish to better align with normal ansible-runner use, you can pass the +``--ident`` option to save to a subfolder, so ``ansible-runner process ./demo --ident=43`` +would extract artifacts to the folder ``./demo/artifacts/43``. + Python API ---------- diff --git a/test/integration/test_transmit_worker_process.py b/test/integration/test_transmit_worker_process.py index c325c8011..cabc3c08e 100644 --- a/test/integration/test_transmit_worker_process.py +++ b/test/integration/test_transmit_worker_process.py @@ -169,7 +169,7 @@ def process_method(results_sockfile_read): self.check_artifacts(str(process_dir), job_type) -@pytest.fixture() +@pytest.fixture def transmit_stream(project_fixtures, tmp_path): outgoing_buffer = tmp_path / 'buffer' outgoing_buffer.touch() @@ -179,9 +179,26 @@ def transmit_stream(project_fixtures, tmp_path): transmitter = Transmitter(_output=f, private_data_dir=transmit_dir, playbook='debug.yml') status, rc = transmitter.run() - assert rc in (None, 0) - assert status == 'unstarted' - return outgoing_buffer + assert rc in (None, 0) + assert status == 'unstarted' + return outgoing_buffer + + +@pytest.fixture +def worker_stream(transmit_stream, tmp_path): + ingoing_buffer = tmp_path / 'buffer2' # basically how some demos work + ingoing_buffer.touch() + + worker_dir = tmp_path / 'worker_dir' + worker_dir.mkdir() + with transmit_stream.open('rb') as out: + with ingoing_buffer.open('wb') as f: + worker = Worker(_input=out, _output=f, private_data_dir=worker_dir) + status, rc = worker.run() + + assert rc in (None, 0) + assert status == 'successful' + return ingoing_buffer def test_worker_without_delete_no_dir(tmp_path, cli, transmit_stream): @@ -247,6 +264,20 @@ def test_worker_delete_dir_exists(tmp_path, cli, transmit_stream): assert not worker_dir.joinpath('project', 'debug.yml').exists() +def test_process_with_custom_ident(tmp_path, cli, worker_stream): + process_dir = tmp_path / 'for_process' + process_dir.mkdir() + + with open(worker_stream, 'rb') as f: + process_args = ['process', str(process_dir), '--ident', 'custom_ident'] + r = cli(process_args, stdin=f) + + assert 'Hello world!' in r.stdout + assert (process_dir / 'artifacts').exists() + assert (process_dir / 'artifacts' / 'custom_ident').exists() + assert (process_dir / 'artifacts' / 'custom_ident' / 'job_events').exists() + + def test_missing_private_dir_transmit(tmpdir): outgoing_buffer = io.BytesIO()