diff --git a/test/conftest.py b/test/conftest.py index 9cc5cfc97..5757d11e2 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -1,5 +1,6 @@ import pytest from distutils.version import LooseVersion +import subprocess import pkg_resources import os @@ -9,22 +10,49 @@ def mock_env_user(monkeypatch): monkeypatch.setenv("ANSIBLE_DEVEL_WARNING", "False") -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def is_pre_ansible28(): try: - if LooseVersion(pkg_resources.get_distribution('ansible').version) < LooseVersion('2.8'): + if LooseVersion( + pkg_resources.get_distribution("ansible").version + ) < LooseVersion("2.8"): return True except pkg_resources.DistributionNotFound: # ansible-base (e.g. ansible 2.10 and beyond) is not accessible in this way pass -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") +def is_pre_ansible212(): + try: + base_version = ( + subprocess.run( + "python -c 'import ansible; print(ansible.__version__)'", + capture_output=True, + shell=True, + ) + .stdout.strip() + .decode() + ) + if LooseVersion(base_version) < LooseVersion("2.12"): + return True + except pkg_resources.DistributionNotFound: + # ansible-base (e.g. ansible 2.10 and beyond) is not accessible in this way + pass + + +@pytest.fixture(scope="session") def skipif_pre_ansible28(is_pre_ansible28): if is_pre_ansible28: pytest.skip("Valid only on Ansible 2.8+") +@pytest.fixture(scope="session") +def skipif_pre_ansible212(is_pre_ansible212): + if is_pre_ansible212: + pytest.skip("Valid only on Ansible 2.12+") + + @pytest.fixture def test_data_dir(): - return os.path.join(os.path.dirname(__file__), 'data') + return os.path.join(os.path.dirname(__file__), "data") diff --git a/test/integration/test_display_callback.py b/test/integration/test_display_callback.py index 3bbee1f63..bfa0c4a9b 100644 --- a/test/integration/test_display_callback.py +++ b/test/integration/test_display_callback.py @@ -16,39 +16,48 @@ @pytest.fixture() def executor(tmpdir, request, is_pre_ansible28): - private_data_dir = six.text_type(tmpdir.mkdir('foo')) + private_data_dir = six.text_type(tmpdir.mkdir("foo")) - playbooks = request.node.callspec.params.get('playbook') + playbooks = request.node.callspec.params.get("playbook") playbook = list(playbooks.values())[0] - envvars = request.node.callspec.params.get('envvars') + envvars = request.node.callspec.params.get("envvars") if envvars is None: envvars = {} # warning messages create verbose events and interfere with assertions envvars["ANSIBLE_DEPRECATION_WARNINGS"] = "False" # python interpreter used is not of much interest, we really want to silence warnings - envvars['ANSIBLE_PYTHON_INTERPRETER'] = 'auto_silent' + envvars["ANSIBLE_PYTHON_INTERPRETER"] = "auto_silent" if is_pre_ansible28: inventory = 'localhost ansible_connection=local ansible_python_interpreter="/usr/bin/env python"' else: - inventory = 'localhost ansible_connection=local' + inventory = "localhost ansible_connection=local" r = init_runner( private_data_dir=private_data_dir, inventory=inventory, envvars=envvars, - playbook=yaml.safe_load(playbook) + playbook=yaml.safe_load(playbook), ) return r -@pytest.mark.parametrize('event', ['playbook_on_start', - 'playbook_on_play_start', - 'playbook_on_task_start', 'runner_on_ok', - 'playbook_on_stats']) -@pytest.mark.parametrize('playbook', [ -{'helloworld.yml': ''' +@pytest.mark.parametrize( + "event", + [ + "playbook_on_start", + "playbook_on_play_start", + "playbook_on_task_start", + "runner_on_ok", + "playbook_on_stats", + ], +) +@pytest.mark.parametrize( + "playbook", + [ + { + "helloworld.yml": """ - name: Hello World Sample connection: local hosts: all @@ -57,8 +66,10 @@ def executor(tmpdir, request, is_pre_ansible28): - name: Hello Message debug: msg: "Hello World!" -'''}, # noqa -{'results_included.yml': ''' +""" + }, # noqa + { + "results_included.yml": """ - name: Run module which generates results list connection: local hosts: all @@ -69,21 +80,30 @@ def executor(tmpdir, request, is_pre_ansible28): - name: Generate results list debug: var: results -'''} # noqa -], ids=['helloworld.yml', 'results_included.yml']) -@pytest.mark.parametrize('envvars', [ - {'ANSIBLE_CALLBACK_PLUGINS': os.path.join(HERE, 'callback')}, - {'ANSIBLE_CALLBACK_PLUGINS': ''}], - ids=['local-callback-plugin', 'no-callback-plugin'] +""" + }, # noqa + ], + ids=["helloworld.yml", "results_included.yml"], +) +@pytest.mark.parametrize( + "envvars", + [ + {"ANSIBLE_CALLBACK_PLUGINS": os.path.join(HERE, "callback")}, + {"ANSIBLE_CALLBACK_PLUGINS": ""}, + ], + ids=["local-callback-plugin", "no-callback-plugin"], ) def test_callback_plugin_receives_events(executor, event, playbook, envvars): executor.run() assert len(list(executor.events)) - assert event in [task['event'] for task in executor.events] + assert event in [task["event"] for task in executor.events] -@pytest.mark.parametrize('playbook', [ -{'no_log_on_ok.yml': ''' +@pytest.mark.parametrize( + "playbook", + [ + { + "no_log_on_ok.yml": """ - name: args should not be logged when task-level no_log is set connection: local hosts: all @@ -91,8 +111,10 @@ def test_callback_plugin_receives_events(executor, event, playbook, envvars): tasks: - shell: echo "SENSITIVE" no_log: true -'''}, # noqa -{'no_log_on_fail.yml': ''' +""" + }, # noqa + { + "no_log_on_fail.yml": """ - name: failed args should not be logged when task-level no_log is set connection: local hosts: all @@ -102,8 +124,10 @@ def test_callback_plugin_receives_events(executor, event, playbook, envvars): no_log: true failed_when: true ignore_errors: true -'''}, # noqa -{'no_log_on_skip.yml': ''' +""" + }, # noqa + { + "no_log_on_skip.yml": """ - name: skipped task args should be suppressed with no_log connection: local hosts: all @@ -112,8 +136,10 @@ def test_callback_plugin_receives_events(executor, event, playbook, envvars): - shell: echo "SENSITIVE" no_log: true when: false -'''}, # noqa -{'no_log_on_play.yml': ''' +""" + }, # noqa + { + "no_log_on_play.yml": """ - name: args should not be logged when play-level no_log set connection: local hosts: all @@ -121,8 +147,10 @@ def test_callback_plugin_receives_events(executor, event, playbook, envvars): no_log: true tasks: - shell: echo "SENSITIVE" -'''}, # noqa -{'async_no_log.yml': ''' +""" + }, # noqa + { + "async_no_log.yml": """ - name: async task args should suppressed with no_log connection: local hosts: all @@ -133,8 +161,10 @@ def test_callback_plugin_receives_events(executor, event, playbook, envvars): poll: 1 shell: echo "SENSITIVE" no_log: true -'''}, # noqa -{'with_items.yml': ''' +""" + }, # noqa + { + "with_items.yml": """ - name: with_items tasks should be suppressed with no_log connection: local hosts: all @@ -146,8 +176,10 @@ def test_callback_plugin_receives_events(executor, event, playbook, envvars): when: item != "SENSITIVE-SKIPPED" failed_when: item == "SENSITIVE-FAILED" ignore_errors: yes -'''}, # noqa, NOTE: with_items will be deprecated in 2.9 -{'loop.yml': ''' +""" + }, # noqa, NOTE: with_items will be deprecated in 2.9 + { + "loop.yml": """ - name: loop tasks should be suppressed with no_log connection: local hosts: all @@ -159,16 +191,21 @@ def test_callback_plugin_receives_events(executor, event, playbook, envvars): when: item != "SENSITIVE-SKIPPED" failed_when: item == "SENSITIVE-FAILED" ignore_errors: yes -'''}, # noqa -]) +""" + }, # noqa + ], +) def test_callback_plugin_no_log_filters(executor, playbook): executor.run() assert len(list(executor.events)) - assert 'SENSITIVE' not in json.dumps(list(executor.events)) + assert "SENSITIVE" not in json.dumps(list(executor.events)) -@pytest.mark.parametrize('playbook', [ -{'no_log_on_ok.yml': ''' +@pytest.mark.parametrize( + "playbook", + [ + { + "no_log_on_ok.yml": """ - name: args should not be logged when no_log is set at the task or module level connection: local hosts: all @@ -178,32 +215,68 @@ def test_callback_plugin_no_log_filters(executor, playbook): - shell: echo "PRIVATE" no_log: true - uri: url=https://example.org url_username="PUBLIC" url_password="PRIVATE" -'''}, # noqa -]) +""" + }, # noqa + ], +) def test_callback_plugin_task_args_leak(executor, playbook, skipif_pre_ansible28): executor.run() events = list(executor.events) - assert events[0]['event'] == 'playbook_on_start' - assert events[1]['event'] == 'playbook_on_play_start' + assert events[0]["event"] == "playbook_on_start" + assert events[1]["event"] == "playbook_on_play_start" # task 1 - assert events[2]['event'] == 'playbook_on_task_start' - assert events[3]['event'] == 'runner_on_start' - assert events[4]['event'] == 'runner_on_ok' + assert events[2]["event"] == "playbook_on_task_start" + assert events[3]["event"] == "runner_on_start" + assert events[4]["event"] == "runner_on_ok" # task 2 no_log=True - assert events[5]['event'] == 'playbook_on_task_start' - assert events[6]['event'] == 'runner_on_start' - assert events[7]['event'] == 'runner_on_ok' - assert 'PUBLIC' in json.dumps(events), events + assert events[5]["event"] == "playbook_on_task_start" + assert events[6]["event"] == "runner_on_start" + assert events[7]["event"] == "runner_on_ok" + assert "PUBLIC" in json.dumps(events), events for event in events: - assert 'PRIVATE' not in json.dumps(event), event + assert "PRIVATE" not in json.dumps(event), event # make sure playbook was successful, so all tasks were hit - assert not events[-1]['event_data']['failures'], 'Unexpected playbook execution failure' + assert not events[-1]["event_data"][ + "failures" + ], "Unexpected playbook execution failure" + # assert "resolved_action" in event_types + # if not is_pre_ansible212: + # assert len("resolved_action") > 0 + + +@pytest.mark.parametrize( + "playbook", + [ + { + "simple.yml": """ +- name: simpletask + connection: local + hosts: all + gather_facts: no + tasks: + - shell: echo "resolved actions test!" +""" + }, # noqa + ], +) +def test_resolved_actions(executor, playbook, skipif_pre_ansible212): + executor.run() + events = list(executor.events) -@pytest.mark.parametrize('playbook', [ -{'loop_with_no_log.yml': ''' + # task 1 + assert events[2]["event"] == "playbook_on_task_start" + assert "resolved_action" in events[2]["event_data"] + assert events[2]["event_data"]["resolved_action"] == "ansible.builtin.shell" + + +@pytest.mark.parametrize( + "playbook", + [ + { + "loop_with_no_log.yml": """ - name: playbook variable should not be overwritten when using no log connection: local hosts: all @@ -215,44 +288,58 @@ def test_callback_plugin_task_args_leak(executor, playbook, skipif_pre_ansible28 with_items: - "echo helloworld!" - debug: msg="{{ command_register.results|map(attribute='stdout')|list }}" -'''}, # noqa -]) -def test_callback_plugin_censoring_does_not_overwrite(executor, playbook, skipif_pre_ansible28): +""" + }, # noqa + ], +) +def test_callback_plugin_censoring_does_not_overwrite( + executor, playbook, skipif_pre_ansible28 +): executor.run() events = list(executor.events) - assert events[0]['event'] == 'playbook_on_start' - assert events[1]['event'] == 'playbook_on_play_start' + assert events[0]["event"] == "playbook_on_start" + assert events[1]["event"] == "playbook_on_play_start" # task 1 - assert events[2]['event'] == 'playbook_on_task_start' + assert events[2]["event"] == "playbook_on_task_start" # Ordering of task and item events may differ randomly - assert set(['runner_on_start', 'runner_item_on_ok', 'runner_on_ok']) == set([data['event'] for data in events[3:6]]) + assert set(["runner_on_start", "runner_item_on_ok", "runner_on_ok"]) == set( + [data["event"] for data in events[3:6]] + ) # task 2 no_log=True - assert events[6]['event'] == 'playbook_on_task_start' - assert events[7]['event'] == 'runner_on_start' - assert events[8]['event'] == 'runner_on_ok' - assert 'helloworld!' in events[8]['event_data']['res']['msg'] + assert events[6]["event"] == "playbook_on_task_start" + assert events[7]["event"] == "runner_on_start" + assert events[8]["event"] == "runner_on_ok" + assert "helloworld!" in events[8]["event_data"]["res"]["msg"] -@pytest.mark.parametrize('playbook', [ -{'strip_env_vars.yml': ''' +@pytest.mark.parametrize( + "playbook", + [ + { + "strip_env_vars.yml": """ - name: sensitive environment variables should be stripped from events connection: local hosts: all tasks: - shell: echo "Hello, World!" -'''}, # noqa -]) +""" + }, # noqa + ], +) def test_callback_plugin_strips_task_environ_variables(executor, playbook): executor.run() assert len(list(executor.events)) for event in list(executor.events): - assert os.environ['PATH'] not in json.dumps(event) + assert os.environ["PATH"] not in json.dumps(event) -@pytest.mark.parametrize('playbook', [ -{'custom_set_stat.yml': ''' +@pytest.mark.parametrize( + "playbook", + [ + { + "custom_set_stat.yml": """ - name: custom set_stat calls should persist to the local disk so awx can save them connection: local hosts: all @@ -260,21 +347,26 @@ def test_callback_plugin_strips_task_environ_variables(executor, playbook): - set_stats: data: foo: "bar" -'''}, # noqa -]) +""" + }, # noqa + ], +) def test_callback_plugin_saves_custom_stats(executor, playbook, skipif_pre_ansible28): executor.run() for event in executor.events: - event_data = event.get('event_data', {}) - if 'artifact_data' in event_data: - assert event_data['artifact_data'] == {'foo': 'bar'} + event_data = event.get("event_data", {}) + if "artifact_data" in event_data: + assert event_data["artifact_data"] == {"foo": "bar"} break else: - raise Exception('Did not find expected artifact data in event data') + raise Exception("Did not find expected artifact data in event data") -@pytest.mark.parametrize('playbook', [ -{'handle_playbook_on_notify.yml': ''' +@pytest.mark.parametrize( + "playbook", + [ + { + "handle_playbook_on_notify.yml": """ - name: handle playbook_on_notify events properly connection: local hosts: all @@ -286,21 +378,31 @@ def test_callback_plugin_saves_custom_stats(executor, playbook, skipif_pre_ansib changed_when: true notify: - my_handler -'''}, # noqa -]) -@pytest.mark.skipif(ANSIBLE_VERSION < '2.5', reason="v2_playbook_on_notify doesn't work before ansible 2.5") -def test_callback_plugin_records_notify_events(executor, playbook, skipif_pre_ansible28): +""" + }, # noqa + ], +) +@pytest.mark.skipif( + ANSIBLE_VERSION < "2.5", + reason="v2_playbook_on_notify doesn't work before ansible 2.5", +) +def test_callback_plugin_records_notify_events( + executor, playbook, skipif_pre_ansible28 +): executor.run() assert len(list(executor.events)) - notify_events = [x for x in executor.events if x['event'] == 'playbook_on_notify'] + notify_events = [x for x in executor.events if x["event"] == "playbook_on_notify"] assert len(notify_events) == 1 - assert notify_events[0]['event_data']['handler'] == 'my_handler' - assert notify_events[0]['event_data']['host'] == 'localhost' - assert notify_events[0]['event_data']['task'] == 'debug' + assert notify_events[0]["event_data"]["handler"] == "my_handler" + assert notify_events[0]["event_data"]["host"] == "localhost" + assert notify_events[0]["event_data"]["task"] == "debug" -@pytest.mark.parametrize('playbook', [ -{'no_log_module_with_var.yml': ''' +@pytest.mark.parametrize( + "playbook", + [ + { + "no_log_module_with_var.yml": """ - name: ensure that module-level secrets are redacted connection: local hosts: all @@ -311,16 +413,18 @@ def test_callback_plugin_records_notify_events(executor, playbook, skipif_pre_an url: https://example.org url_username: john-jacob-jingleheimer-schmidt url_password: "{{ pw }}" -'''}, # noqa -]) +""" + }, # noqa + ], +) def test_module_level_no_log(executor, playbook, skipif_pre_ansible28): # It's possible for `no_log=True` to be defined at the _module_ level, # e.g., for the URI module password parameter # This test ensures that we properly redact those executor.run() assert len(list(executor.events)) - assert 'john-jacob-jingleheimer-schmidt' in json.dumps(list(executor.events)) - assert 'SENSITIVE' not in json.dumps(list(executor.events)) + assert "john-jacob-jingleheimer-schmidt" in json.dumps(list(executor.events)) + assert "SENSITIVE" not in json.dumps(list(executor.events)) def test_output_when_given_invalid_playbook(tmpdir): @@ -339,7 +443,7 @@ def test_output_when_given_invalid_playbook(tmpdir): private_data_dir=private_data_dir, inventory="localhost ansible_connection=local", envvars={"ANSIBLE_DEPRECATION_WARNINGS": "False"}, - playbook=os.path.join(private_data_dir, 'fake_playbook.yml') + playbook=os.path.join(private_data_dir, "fake_playbook.yml"), ) executor.run() @@ -361,20 +465,20 @@ def test_output_when_given_non_playbook_script(tmpdir): # # https://github.com/AlanCoding/ansible-runner-examples/tree/master/non_playbook/sleep_with_writes private_data_dir = str(tmpdir) - with open(os.path.join(private_data_dir, "args"), 'w') as args_file: + with open(os.path.join(private_data_dir, "args"), "w") as args_file: args_file.write("bash sleep_and_write.sh\n") - with open(os.path.join(private_data_dir, "sleep_and_write.sh"), 'w') as script_file: + with open(os.path.join(private_data_dir, "sleep_and_write.sh"), "w") as script_file: script_file.write("echo 'hi world'\nsleep 0.5\necho 'goodbye world'\n") # Update the settings to make this test a bit faster :) os.mkdir(os.path.join(private_data_dir, "env")) - with open(os.path.join(private_data_dir, "env", "settings"), 'w') as settings_file: + with open(os.path.join(private_data_dir, "env", "settings"), "w") as settings_file: settings_file.write("pexpect_timeout: 0.2") executor = init_runner( private_data_dir=private_data_dir, inventory="localhost ansible_connection=local", - envvars={"ANSIBLE_DEPRECATION_WARNINGS": "False"} + envvars={"ANSIBLE_DEPRECATION_WARNINGS": "False"}, ) executor.run() @@ -385,14 +489,17 @@ def test_output_when_given_non_playbook_script(tmpdir): events = list(executor.events) assert len(events) == 2 - assert events[0]['event'] == 'verbose' - assert events[0]['stdout'] == 'hi world' - assert events[1]['event'] == 'verbose' - assert events[1]['stdout'] == 'goodbye world' + assert events[0]["event"] == "verbose" + assert events[0]["stdout"] == "hi world" + assert events[1]["event"] == "verbose" + assert events[1]["stdout"] == "goodbye world" -@pytest.mark.parametrize('playbook', [ -{'listvars.yml': ''' +@pytest.mark.parametrize( + "playbook", + [ + { + "listvars.yml": """ - name: List Variables connection: local hosts: localhost @@ -401,9 +508,13 @@ def test_output_when_given_non_playbook_script(tmpdir): - name: Print a lot of lines debug: msg: "{{ ('F' * 150) | list }}" -'''}, # noqa -]) -def test_large_stdout_parsing_when_using_json_output(executor, playbook, skipif_pre_ansible28): +""" + }, # noqa + ], +) +def test_large_stdout_parsing_when_using_json_output( + executor, playbook, skipif_pre_ansible28 +): # When the json flag is used, it is possible to output more data than # pexpect's maxread default of 2000 characters. As a result, if not # handled properly, the stdout can end up being corrupted with partial @@ -413,8 +524,8 @@ def test_large_stdout_parsing_when_using_json_output(executor, playbook, skipif_ # This tests to confirm we don't polute the stdout output with non-json # lines when a single event has a lot of output. if six.PY2: - pytest.skip('Ansible in python2 uses different syntax.') - executor.config.env['ANSIBLE_NOCOLOR'] = str(True) + pytest.skip("Ansible in python2 uses different syntax.") + executor.config.env["ANSIBLE_NOCOLOR"] = str(True) executor.run() text = executor.stdout.read() assert text.count('"F"') == 150