From 05471e34ad42e862b3a134c0e8aae1c7e7952b14 Mon Sep 17 00:00:00 2001 From: Rafael Porres Molina Date: Tue, 16 Nov 2021 13:19:54 +0100 Subject: [PATCH] Add openshift-tekton-resources integration (#1892) https://issues.redhat.com/browse/APPSRE-3389 This work has been split in two * The new integration in this commit * The openshift-saas-deploy bits in Openshift-tekton-resources: the saasherder part #1998 Signed-off-by: Rafa Porres Molina --- helm/qontract-reconcile/values-internal.yaml | 13 + openshift/qontract-reconcile-internal.yaml | 217 ++++++++++ reconcile/cli.py | 25 ++ reconcile/openshift_tekton_resources.py | 374 ++++++++++++++++++ reconcile/queries.py | 26 ++ reconcile/test/fixtures.py | 4 + ...-deploy-with-unknown-task.pipeline.yaml.j2 | 51 +++ .../openshift-saas-deploy.pipeline.yaml.j2 | 51 +++ .../openshift-saas-deploy.task.yaml.j2 | 75 ++++ .../openshift_tekton_resources/provider1.json | 45 +++ .../provider2-with-resources.json | 55 +++ .../provider3-ignored.json | 26 ++ .../provider4-with-task-duplicates.json | 51 +++ .../provider5-with-unknown-task.json | 45 +++ ...sh-gateway-task-status-metric.task.yaml.j2 | 98 +++++ .../openshift_tekton_resources/saas1.json | 10 + .../saas2-with-resources.json | 19 + .../openshift_tekton_resources/saas2.json | 10 + .../saas3-ignored.json | 10 + .../openshift_tekton_resources/saas4.json | 10 + .../openshift_tekton_resources/saas5.json | 10 + .../test/test_openshift_tekton_resources.py | 274 +++++++++++++ 22 files changed, 1499 insertions(+) create mode 100644 reconcile/openshift_tekton_resources.py create mode 100644 reconcile/test/fixtures/openshift_tekton_resources/openshift-saas-deploy-with-unknown-task.pipeline.yaml.j2 create mode 100644 reconcile/test/fixtures/openshift_tekton_resources/openshift-saas-deploy.pipeline.yaml.j2 create mode 100644 reconcile/test/fixtures/openshift_tekton_resources/openshift-saas-deploy.task.yaml.j2 create mode 100644 reconcile/test/fixtures/openshift_tekton_resources/provider1.json create mode 100644 reconcile/test/fixtures/openshift_tekton_resources/provider2-with-resources.json create mode 100644 reconcile/test/fixtures/openshift_tekton_resources/provider3-ignored.json create mode 100644 reconcile/test/fixtures/openshift_tekton_resources/provider4-with-task-duplicates.json create mode 100644 reconcile/test/fixtures/openshift_tekton_resources/provider5-with-unknown-task.json create mode 100644 reconcile/test/fixtures/openshift_tekton_resources/push-gateway-task-status-metric.task.yaml.j2 create mode 100644 reconcile/test/fixtures/openshift_tekton_resources/saas1.json create mode 100644 reconcile/test/fixtures/openshift_tekton_resources/saas2-with-resources.json create mode 100644 reconcile/test/fixtures/openshift_tekton_resources/saas2.json create mode 100644 reconcile/test/fixtures/openshift_tekton_resources/saas3-ignored.json create mode 100644 reconcile/test/fixtures/openshift_tekton_resources/saas4.json create mode 100644 reconcile/test/fixtures/openshift_tekton_resources/saas5.json create mode 100644 reconcile/test/test_openshift_tekton_resources.py diff --git a/helm/qontract-reconcile/values-internal.yaml b/helm/qontract-reconcile/values-internal.yaml index 06034c4d2..700498c46 100644 --- a/helm/qontract-reconcile/values-internal.yaml +++ b/helm/qontract-reconcile/values-internal.yaml @@ -216,6 +216,19 @@ integrations: logs: slack: true trigger: true +- name: openshift-tekton-resources + # resources are tentative for the moment + resources: + requests: + memory: 800Mi + cpu: 400m + limits: + memory: 1000Mi + cpu: 600m + extraArgs: --no-use-jump-host + logs: + slack: true + internalCertificates: true - name: terraform-resources resources: requests: diff --git a/openshift/qontract-reconcile-internal.yaml b/openshift/qontract-reconcile-internal.yaml index b71a9836f..94644d2e4 100644 --- a/openshift/qontract-reconcile-internal.yaml +++ b/openshift/qontract-reconcile-internal.yaml @@ -5224,6 +5224,215 @@ objects: emptyDir: {} - name: fluentd-config emptyDir: {} +- apiVersion: apps/v1 + kind: Deployment + metadata: + labels: + app: qontract-reconcile-openshift-tekton-resources + name: qontract-reconcile-openshift-tekton-resources + spec: + replicas: 1 + selector: + matchLabels: + app: qontract-reconcile-openshift-tekton-resources + template: + metadata: + labels: + app: qontract-reconcile-openshift-tekton-resources + component: qontract-reconcile + spec: + serviceAccountName: qontract-reconcile + initContainers: + - name: internal-certificates + image: ${INTERNAL_CERTIFICATES_IMAGE}:${INTERNAL_CERTIFICATES_IMAGE_TAG} + imagePullPolicy: ${INTERNAL_CERTIFICATES_IMAGE_PULL_POLICY} + command: ["/bin/sh", "-c"] + args: + - | + cp -r /etc/pki/. /tmp/etc-pki/ + volumeMounts: + - name: internal-certificates + mountPath: /tmp/etc-pki/ + - name: config + image: ${BUSYBOX_IMAGE}:${BUSYBOX_IMAGE_TAG} + imagePullPolicy: ${BUSYBOX_IMAGE_PULL_POLICY} + resources: + requests: + memory: 10Mi + cpu: 15m + limits: + memory: 20Mi + cpu: 25m + env: + - name: SLACK_WEBHOOK_URL + valueFrom: + secretKeyRef: + key: slack.webhook_url + name: app-interface + - name: SLACK_CHANNEL + value: ${SLACK_CHANNEL} + - name: SLACK_ICON_EMOJI + value: ${SLACK_ICON_EMOJI} + - name: LOG_GROUP_NAME + valueFrom: + secretKeyRef: + name: ${CLOUDWATCH_SECRET} + key: log_group_name + command: ["/bin/sh", "-c"] + args: + - | + # generate fluent.conf + cat > /fluentd/etc/fluent.conf < + @type tail + path /fluentd/log/integration.log + pos_file /fluentd/log/integration.log.pos + tag integration + + @type none + + + + + @type grep + + key message + pattern /using gql endpoint/ + + + + + @type grep + + key message + pattern /Certificate did not match expected hostname/ + + + + + @type copy + + @type slack + webhook_url ${SLACK_WEBHOOK_URL} + channel ${SLACK_CHANNEL} + icon_emoji ${SLACK_ICON_EMOJI} + username sd-app-sre-bot + flush_interval 10s + message "\`\`\`[openshift-tekton-resources] %s\`\`\`" + + + @type cloudwatch_logs + log_group_name ${LOG_GROUP_NAME} + log_stream_name openshift-tekton-resources + auto_create_stream true + + + EOF + volumeMounts: + - name: fluentd-config + mountPath: /fluentd/etc/ + containers: + - name: int + image: ${IMAGE}:${IMAGE_TAG} + ports: + - name: http + containerPort: 9090 + env: + - name: SHARDS + value: "1" + - name: SHARD_ID + value: "0" + - name: DRY_RUN + value: ${DRY_RUN} + - name: INTEGRATION_NAME + value: openshift-tekton-resources + - name: INTEGRATION_EXTRA_ARGS + value: "--no-use-jump-host" + - name: SLEEP_DURATION_SECS + value: ${SLEEP_DURATION_SECS} + - name: GITHUB_API + valueFrom: + configMapKeyRef: + name: app-interface + key: GITHUB_API + - name: SENTRY_DSN + valueFrom: + configMapKeyRef: + name: app-interface + key: SENTRY_DSN + - name: LOG_FILE + value: "${LOG_FILE}" + - name: UNLEASH_API_URL + valueFrom: + secretKeyRef: + name: unleash + key: API_URL + - name: UNLEASH_CLIENT_ACCESS_TOKEN + valueFrom: + secretKeyRef: + name: unleash + key: CLIENT_ACCESS_TOKEN + - name: SLOW_OC_RECONCILE_THRESHOLD + value: "${SLOW_OC_RECONCILE_THRESHOLD}" + - name: LOG_SLOW_OC_RECONCILE + value: "${LOG_SLOW_OC_RECONCILE}" + - name: USE_NATIVE_CLIENT + value: "${USE_NATIVE_CLIENT}" + resources: + limits: + cpu: ${OPENSHIFT_TEKTON_RESOURCES_CPU_LIMIT} + memory: ${OPENSHIFT_TEKTON_RESOURCES_MEMORY_LIMIT} + requests: + cpu: ${OPENSHIFT_TEKTON_RESOURCES_CPU_REQUEST} + memory: ${OPENSHIFT_TEKTON_RESOURCES_MEMORY_REQUEST} + volumeMounts: + - name: qontract-reconcile-toml + mountPath: /config + - name: internal-certificates + mountPath: /etc/pki/ + - name: logs + mountPath: /fluentd/log/ + - name: fluentd + image: ${FLUENTD_IMAGE}:${FLUENTD_IMAGE_TAG} + imagePullPolicy: ${FLUENTD_IMAGE_PULL_POLICY} + env: + - name: AWS_REGION + valueFrom: + secretKeyRef: + name: ${CLOUDWATCH_SECRET} + key: aws_region + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: ${CLOUDWATCH_SECRET} + key: aws_access_key_id + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: ${CLOUDWATCH_SECRET} + key: aws_secret_access_key + resources: + requests: + memory: 30Mi + cpu: 15m + limits: + memory: 120Mi + cpu: 25m + volumeMounts: + - name: logs + mountPath: /fluentd/log/ + - name: fluentd-config + mountPath: /fluentd/etc/ + volumes: + - name: qontract-reconcile-toml + secret: + secretName: qontract-reconcile-toml + - name: logs + emptyDir: {} + - name: fluentd-config + emptyDir: {} + - name: internal-certificates + emptyDir: {} - apiVersion: apps/v1 kind: Deployment metadata: @@ -8366,6 +8575,14 @@ parameters: value: 400m - name: OPENSHIFT_SAAS_DEPLOY_TRIGGER_CLEANER_MEMORY_REQUEST value: 800Mi +- name: OPENSHIFT_TEKTON_RESOURCES_CPU_LIMIT + value: 600m +- name: OPENSHIFT_TEKTON_RESOURCES_MEMORY_LIMIT + value: 1000Mi +- name: OPENSHIFT_TEKTON_RESOURCES_CPU_REQUEST + value: 400m +- name: OPENSHIFT_TEKTON_RESOURCES_MEMORY_REQUEST + value: 800Mi - name: TERRAFORM_RESOURCES_CPU_LIMIT value: 600m - name: TERRAFORM_RESOURCES_MEMORY_LIMIT diff --git a/reconcile/cli.py b/reconcile/cli.py index 5092a7c46..a0372a0d3 100644 --- a/reconcile/cli.py +++ b/reconcile/cli.py @@ -33,6 +33,7 @@ import reconcile.openshift_saas_deploy_trigger_upstream_jobs import reconcile.openshift_saas_deploy_trigger_configs import reconcile.openshift_saas_deploy_trigger_cleaner +import reconcile.openshift_tekton_resources import reconcile.saas_file_owners import reconcile.gitlab_ci_skipper import reconcile.gitlab_labeler @@ -355,6 +356,14 @@ def gitlab_project_id(function): return function +def saas_file_name(function): + function = click.option('--saas-file-name', + help='saas-file to act on.', + default=None)(function) + + return function + + def enable_deletion(**kwargs): def f(function): opt = '--enable-deletion/--no-enable-deletion' @@ -862,6 +871,22 @@ def openshift_saas_deploy_trigger_cleaner(ctx, thread_pool_size, ctx.obj, thread_pool_size, internal, use_jump_host) +@integration.command() +@threaded() +@internal() +@use_jump_host() +@saas_file_name +@click.pass_context +def openshift_tekton_resources(ctx, thread_pool_size, + internal, use_jump_host, saas_file_name): + run_integration(reconcile.openshift_tekton_resources, + ctx.obj, + thread_pool_size, + internal, + use_jump_host, + saas_file_name) + + @integration.command() @throughput @click.argument('gitlab-project-id') diff --git a/reconcile/openshift_tekton_resources.py b/reconcile/openshift_tekton_resources.py new file mode 100644 index 000000000..c968ba69b --- /dev/null +++ b/reconcile/openshift_tekton_resources.py @@ -0,0 +1,374 @@ +import sys +import logging +import json +from typing import Any, Optional, Union + +import yaml +import jinja2 + +from reconcile import queries +from reconcile import openshift_base as ob +from reconcile.status import ExitCodes +from reconcile.utils import gql +from reconcile.utils.defer import defer +from reconcile.utils.openshift_resource import OpenshiftResource as OR +from reconcile.utils.semver_helper import make_semver +from reconcile.utils.saasherder import Providers + +LOG = logging.getLogger(__name__) +QONTRACT_INTEGRATION = 'openshift-tekton-resources' +QONTRACT_INTEGRATION_VERSION = make_semver(0, 1, 0) + +# it must be a single character due to resource max length +OBJECTS_PREFIX = 'o' +RESOURCE_MAX_LENGTH = 63 + +# Defaults +DEFAULT_DEPLOY_RESOURCES_STEP_NAME = 'qontract-reconcile' +DEFAULT_DEPLOY_RESOURCES = {'requests': {'cpu': '50m', + 'memory': '200Mi'}, + 'limits': {'cpu': '200m', + 'memory': '300Mi'}} +# Queries +SAAS_FILES_QUERY = ''' +{ + saas_files: saas_files_v2 { + path + name + configurableResources + pipelinesProvider { + name + provider + } + deployResources { + requests { + cpu + memory + } + limits { + cpu + memory + } + } + } +} +''' + + +class OpenshiftTektonResourcesNameTooLongError(Exception): + pass + + +class OpenshiftTektonResourcesBadConfigError(Exception): + pass + + +def fetch_saas_files(saas_file_name: Optional[str]) -> list[dict[str, Any]]: + '''Fetch saas files that can be handled by this integration: those with + configurableResources set to True''' + saas_files = [ + s for s in gql.get_api().query(SAAS_FILES_QUERY)['saas_files'] + if s.get('configurableResources')] + + if saas_file_name: + saas_file = None + for sf in saas_files: + if sf['name'] == saas_file_name: + saas_file = sf + break + + return [saas_file] if saas_file else [] + + return saas_files + + +def fetch_tkn_providers(saas_file_name: Optional[str]) -> dict[str, Any]: + '''Fetch tekton providers data for the saas files handled here''' + saas_files = fetch_saas_files(saas_file_name) + if not saas_files: + return {} + + duplicates: set[str] = set() + all_tkn_providers = {} + for pipeline_provider in queries.get_pipelines_providers(): + if pipeline_provider['provider'] != Providers.TEKTON: + continue + + if pipeline_provider['name'] in all_tkn_providers: + duplicates.add(pipeline_provider['name']) + else: + all_tkn_providers[pipeline_provider['name']] = pipeline_provider + + if duplicates: + raise OpenshiftTektonResourcesBadConfigError( + 'There are duplicates in tekton providers names: ' + f'{", ".join(duplicates)}') + + # Only get the providers that are used by the saas files + # Add the saas files belonging to it + tkn_providers = {} + for sf in saas_files: + provider_name = sf['pipelinesProvider']['name'] + if provider_name not in tkn_providers: + tkn_providers[provider_name] = all_tkn_providers[provider_name] + + if 'saas_files' not in tkn_providers[provider_name]: + tkn_providers[provider_name]['saas_files'] = [] + + tkn_providers[provider_name]['saas_files'].append(sf) + + return tkn_providers + + +def fetch_desired_resources(tkn_providers: dict[str, Any]) \ + -> list[dict[str, Union[str, OR]]]: + '''Create an array of dicts that will be used as args of ri.add_desired + This will also add resourceNames inside tkn_providers['namespace'] + while we are migrating from the current system to this integration''' + desired_resources = [] + for tknp in tkn_providers.values(): + namespace = tknp['namespace']['name'] + cluster = tknp['namespace']['cluster']['name'] + deploy_resources = tknp.get('deployResources') or \ + DEFAULT_DEPLOY_RESOURCES + + # a dict with task template names as keys and types as values + # we'll use it when building the pipeline object to make sure + # that all tasks referenced exist and to be able to set the + # the corresponding ['taskRef']['name'] + task_templates_types = {} + + # desired tasks. We need to keep track of the tasks added in this + # namespace, hence we will use this instead of adding data + # directly to desired_resources + desired_tasks = [] + for task_template_config in tknp['taskTemplates']: + task_templates_types[task_template_config['name']] = \ + task_template_config['type'] + + if task_template_config['type'] == 'onePerNamespace': + task = build_one_per_namespace_task(task_template_config) + desired_tasks.append( + build_desired_resource(task, + task_template_config['path'], + cluster, + namespace)) + elif task_template_config['type'] == 'onePerSaasFile': + for sf in tknp['saas_files']: + task = build_one_per_saas_file_task( + task_template_config, sf, deploy_resources) + desired_tasks.append( + build_desired_resource(task, + task_template_config['path'], + cluster, + namespace)) + else: + raise OpenshiftTektonResourcesBadConfigError( + f"Unknown type [{task_template_config['type']}] in tekton " + f"provider [{tknp['name']}]") + + if len(tknp['taskTemplates']) != len(task_templates_types.keys()): + raise OpenshiftTektonResourcesBadConfigError( + 'There are duplicates in task templates names in tekton ' + f"provider {tknp['name']}") + + # TODO: remove when tknp objects are managed with this integration + tknp['namespace']['managedResourceNames'] = [{ + 'resource': 'Task', + 'resourceNames': [t['name'] for t in desired_tasks] + }] + + desired_resources.extend(desired_tasks) + + # We only support pipelines from OpenshiftSaasDeploy + pipeline_template_config = \ + tknp['pipelineTemplates']['openshiftSaasDeploy'] + desired_pipelines = [] + for sf in tknp['saas_files']: + pipeline = build_one_per_saas_file_pipeline( + pipeline_template_config, sf, task_templates_types) + desired_pipelines.append( + build_desired_resource(pipeline, + pipeline_template_config['path'], + cluster, + namespace)) + + tknp['namespace']['managedResourceNames'].append({ + 'resource': 'Pipeline', + 'resourceNames': [p['name'] for p in desired_pipelines] + }) + + desired_resources.extend(desired_pipelines) + + return desired_resources + + +def build_one_per_namespace_task(task_template_config: dict[str, str]) \ + -> dict[str, Any]: + '''Builds onePerNamespace Task objects. The name of the task template + will be used as Task name and there won't be any resource configuration''' + variables = json.loads(task_template_config['variables']) \ + if task_template_config.get('variables') else {} + task = load_tkn_template(task_template_config['path'], variables) + task['metadata']['name'] = \ + build_one_per_namespace_tkn_object_name(task_template_config['name']) + + return task + + +def build_one_per_saas_file_task(task_template_config: dict[str, str], + saas_file: dict[str, Any], + deploy_resources: dict[str, dict[str, str]]) \ + -> dict[str, Any]: + '''Builds onePerSaasFile Task objects. The name of the Task will be set + using the template config name and the saas file name. The step + corresponding to the openshift-saas-deploy run will get its resources + configured using either the defaults, the provider defaults or the saas + file configuration''' + variables = json.loads(task_template_config['variables']) \ + if task_template_config.get('variables') else {} + task = load_tkn_template(task_template_config['path'], variables) + task['metadata']['name'] = \ + build_one_per_saas_file_tkn_object_name(task_template_config['name'], + saas_file['name']) + step_name = task_template_config.get('deployResourcesStepName', + DEFAULT_DEPLOY_RESOURCES_STEP_NAME) + + resources_configured = False + for step in task['spec']['steps']: + if step['name'] == step_name: + step['resources'] = saas_file.get('deployResources') or \ + deploy_resources + resources_configured = True + break + + if not resources_configured: + raise OpenshiftTektonResourcesBadConfigError( + f"Cannot find a step named {step_name} to set resources " + f"in task template {task_template_config['name']}") + + return task + + +def build_one_per_saas_file_pipeline(pipeline_template_config: dict[str, str], + saas_file: dict[str, Any], + task_templates_types: dict[str, str]) \ + -> dict[str, Any]: + '''Builds onePerSaasFile Pipeline objects. The task references names will + be set depending if the tasks are onePerNamespace or onePerSaasFile''' + variables = json.loads(pipeline_template_config['variables']) \ + if pipeline_template_config.get('variables') else {} + pipeline = load_tkn_template(pipeline_template_config['path'], variables) + pipeline['metadata']['name'] = build_one_per_saas_file_tkn_object_name( + pipeline_template_config['name'], saas_file['name']) + + for section in ['tasks', 'finally']: + for task in pipeline['spec'][section]: + if task['name'] not in task_templates_types: + raise OpenshiftTektonResourcesBadConfigError( + f"Unknown task {task['name']} in pipeline template " + f"{pipeline_template_config['name']}") + + if task_templates_types[task['name']] == "onePerNamespace": + task['taskRef']['name'] = \ + build_one_per_namespace_tkn_object_name(task['name']) + else: + task['taskRef']['name'] = \ + build_one_per_saas_file_tkn_object_name(task['name'], + saas_file['name']) + + return pipeline + + +def load_tkn_template(path: str, variables: dict[str, str]) -> dict[str, Any]: + '''Fetches a yaml resource from qontract-server and parses it''' + resource = gql.get_api().get_resource(path) + body = jinja2.Template(resource['content'], + undefined=jinja2.StrictUndefined).render(variables) + + return yaml.safe_load(body) + + +def build_desired_resource(tkn_object: dict[str, Any], path: str, cluster: str, + namespace: str) -> dict[str, Union[str, OR]]: + '''Returns a dict with ResourceInventory.add_desired args''' + openshift_resource = OR(tkn_object, + QONTRACT_INTEGRATION, + QONTRACT_INTEGRATION_VERSION, + error_details=path) + + return {'cluster': cluster, + 'namespace': namespace, + 'resource_type': openshift_resource.kind, + 'name': openshift_resource.name, + 'value': openshift_resource} + + +def check_resource_max_length(name: str) -> None: + '''Checks the resource name is not too long as it may have problems while + being applied''' + if len(name) > RESOURCE_MAX_LENGTH: + raise OpenshiftTektonResourcesNameTooLongError( + f"name {name} is longer than {RESOURCE_MAX_LENGTH} characters") + + +def build_one_per_namespace_tkn_object_name(name: str) -> str: + '''Builds a onePerNamespace object name''' + name = f'{OBJECTS_PREFIX}-{name}' + check_resource_max_length(name) + return name + + +def build_one_per_saas_file_tkn_object_name(template_name: str, + saas_file_name: str) -> str: + '''Builds a onePerSaasFile object name. Given a saas file name, it returns + the openshift-saas-deploy names used by Tasks and Pipelines created by this + integration''' + name = f"{OBJECTS_PREFIX}-{template_name}-{saas_file_name}" + check_resource_max_length(name) + return name + + +def run(dry_run: bool, + thread_pool_size: int = 10, + internal: Optional[bool] = None, + use_jump_host: bool = True, + saas_file_name: Optional[str] = None) -> None: + + tkn_providers = fetch_tkn_providers(saas_file_name) + + # TODO: This will need to be an error condition in the future + if not tkn_providers: + LOG.info("No saas files found to be processed") + sys.exit(0) + + # We need to start with the desired state to know the names of the + # tekton objects that will be created in the providers' namespaces. We + # need to make sure that this integration only manages its resources + # and not the tekton resources already created via openshift-resources + LOG.debug("Fetching desired resources") + desired_resources = fetch_desired_resources(tkn_providers) + + tkn_namespaces = [tknp['namespace'] for tknp in tkn_providers.values()] + LOG.debug("Fetching current resources") + ri, oc_map = ob.fetch_current_state( + namespaces=tkn_namespaces, + integration=QONTRACT_INTEGRATION, + integration_version=QONTRACT_INTEGRATION_VERSION, + override_managed_types=['Pipeline', 'Task'], + internal=internal, + use_jump_host=use_jump_host, + thread_pool_size=thread_pool_size) + defer(oc_map.cleanup) + + LOG.debug("Adding desired resources to inventory") + for desired_resource in desired_resources: + ri.add_desired(**desired_resource) + + LOG.debug("Realizing data") + ob.realize_data(dry_run, oc_map, ri, thread_pool_size) + + if ri.has_error_registered(): + sys.exit(ExitCodes.ERROR) + + sys.exit(0) diff --git a/reconcile/queries.py b/reconcile/queries.py index 47dc543b1..3b8f78e51 100644 --- a/reconcile/queries.py +++ b/reconcile/queries.py @@ -1718,6 +1718,32 @@ def get_saas_files_minimal(v1=True, v2=False): } } } + taskTemplates { + ...on PipelinesProviderTektonObjectTemplate_v1 { + name + type + path + variables + } + } + pipelineTemplates { + openshiftSaasDeploy { + name + type + path + variables + } + } + deployResources { + requests { + cpu + memory + } + limits { + cpu + memory + } + } } } } diff --git a/reconcile/test/fixtures.py b/reconcile/test/fixtures.py index a594ddb72..490261996 100644 --- a/reconcile/test/fixtures.py +++ b/reconcile/test/fixtures.py @@ -1,4 +1,5 @@ import os +import json import anymarkup @@ -21,3 +22,6 @@ def get(self, fixture): def get_anymarkup(self, fixture): return anymarkup.parse(self.get(fixture), force_types=None) + + def get_json(self, fixture): + return json.loads(self.get(fixture)) diff --git a/reconcile/test/fixtures/openshift_tekton_resources/openshift-saas-deploy-with-unknown-task.pipeline.yaml.j2 b/reconcile/test/fixtures/openshift_tekton_resources/openshift-saas-deploy-with-unknown-task.pipeline.yaml.j2 new file mode 100644 index 000000000..bad2f5b31 --- /dev/null +++ b/reconcile/test/fixtures/openshift_tekton_resources/openshift-saas-deploy-with-unknown-task.pipeline.yaml.j2 @@ -0,0 +1,51 @@ +apiVersion: tekton.dev/v1beta1 +kind: Pipeline +metadata: + name: openshift-saas-deploy +spec: + params: + - name: saas_file_name + type: string + - name: env_name + type: string + - name: tkn_cluster_console_url + type: string + - name: tkn_namespace_name + type: string + tasks: + - name: this-is-an-unknown-task + taskRef: + name: openshift-saas-deploy + params: + - name: saas_file_name + value: "$(params.saas_file_name)" + - name: env_name + value: "$(params.env_name)" + finally: + - name: push-gateway-openshift-saas-deploy-task-status-metric + retries: 10 + taskRef: + name: push-gateway-openshift-saas-deploy-task-status-metric + params: + - name: saas_file_name + value: "$(params.saas_file_name)" + - name: env_name + value: "$(params.env_name)" + - name: metric_name + value: app_sre_tekton_pipelinerun_task_status + - name: job_name + value: openshift-saas-deploy-push-metric + - name: task_name + value: openshift-saas-deploy + - name: task_status + value: "$(tasks.openshift-saas-deploy.status)" + - name: pipeline_name + value: openshift-saas-deploy + - name: pipelinerun_name + value: "$(context.pipelineRun.name)" + - name: retry_cooldown_seconds + value: "60" + - name: tkn_cluster_console_url + value: "$(params.tkn_cluster_console_url)" + - name: tkn_namespace_name + value: "$(params.tkn_namespace_name)" diff --git a/reconcile/test/fixtures/openshift_tekton_resources/openshift-saas-deploy.pipeline.yaml.j2 b/reconcile/test/fixtures/openshift_tekton_resources/openshift-saas-deploy.pipeline.yaml.j2 new file mode 100644 index 000000000..da1567dfc --- /dev/null +++ b/reconcile/test/fixtures/openshift_tekton_resources/openshift-saas-deploy.pipeline.yaml.j2 @@ -0,0 +1,51 @@ +apiVersion: tekton.dev/v1beta1 +kind: Pipeline +metadata: + name: openshift-saas-deploy +spec: + params: + - name: saas_file_name + type: string + - name: env_name + type: string + - name: tkn_cluster_console_url + type: string + - name: tkn_namespace_name + type: string + tasks: + - name: openshift-saas-deploy + taskRef: + name: openshift-saas-deploy + params: + - name: saas_file_name + value: "$(params.saas_file_name)" + - name: env_name + value: "$(params.env_name)" + finally: + - name: push-gateway-openshift-saas-deploy-task-status-metric + retries: 10 + taskRef: + name: push-gateway-openshift-saas-deploy-task-status-metric + params: + - name: saas_file_name + value: "$(params.saas_file_name)" + - name: env_name + value: "$(params.env_name)" + - name: metric_name + value: app_sre_tekton_pipelinerun_task_status + - name: job_name + value: openshift-saas-deploy-push-metric + - name: task_name + value: openshift-saas-deploy + - name: task_status + value: "$(tasks.openshift-saas-deploy.status)" + - name: pipeline_name + value: openshift-saas-deploy + - name: pipelinerun_name + value: "$(context.pipelineRun.name)" + - name: retry_cooldown_seconds + value: "60" + - name: tkn_cluster_console_url + value: "$(params.tkn_cluster_console_url)" + - name: tkn_namespace_name + value: "$(params.tkn_namespace_name)" diff --git a/reconcile/test/fixtures/openshift_tekton_resources/openshift-saas-deploy.task.yaml.j2 b/reconcile/test/fixtures/openshift_tekton_resources/openshift-saas-deploy.task.yaml.j2 new file mode 100644 index 000000000..00a0ff11c --- /dev/null +++ b/reconcile/test/fixtures/openshift_tekton_resources/openshift-saas-deploy.task.yaml.j2 @@ -0,0 +1,75 @@ +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: openshift-saas-deploy +spec: + params: + - name: saas_file_name + type: string + - name: env_name + type: string + steps: + - name: qontract-reconcile-prepare + image: quay.io/app-sre/qontract-reconcile:{{ qontract_reconcile_image_tag }} + command: ["/bin/sh", "-c"] + args: ['mkdir artifacts'] + resources: + requests: + cpu: 10m + memory: 20Mi + limits: + cpu: 20m + memory: 30Mi + - name: qontract-reconcile-config + image: quay.io/app-sre/qontract-reconcile:{{ qontract_reconcile_image_tag }} + command: ["/bin/sh", "-c"] + args: ['echo "$(CONFIG_TOML)" | base64 -d > config.toml'] + resources: + requests: + cpu: 10m + memory: 20Mi + limits: + cpu: 20m + memory: 30Mi + env: + - name: CONFIG_TOML + valueFrom: + secretKeyRef: + name: qontract-reconcile + key: config.toml.base64 + - name: qontract-reconcile + image: quay.io/app-sre/qontract-reconcile:{{ qontract_reconcile_image_tag }} + command: ["/bin/sh", "-c"] + args: ['qontract-reconcile --config config.toml openshift-saas-deploy --saas-file-name $(params.saas_file_name) --env-name $(params.env_name) --io-dir artifacts/'] + env: + - name: APP_INTERFACE_STATE_BUCKET + valueFrom: + secretKeyRef: + name: qontract-reconcile + key: app-interface.state.bucket.name + - name: APP_INTERFACE_STATE_BUCKET_ACCOUNT + valueFrom: + secretKeyRef: + name: qontract-reconcile + key: app-interface.state.bucket.account.name + - name: gitlab_pr_submitter_queue_url + valueFrom: + secretKeyRef: + name: qontract-reconcile + key: gitlab.pr-submitter.queue.url + - name: GITHUB_API + valueFrom: + secretKeyRef: + name: qontract-reconcile + key: github.api.url + - name: qontract-reconcile-publish + image: quay.io/app-sre/qontract-reconcile:{{ qontract_reconcile_image_tag }} + command: ["/bin/sh", "-c"] + args: ['for f in $(find artifacts/ -type f); do echo $f; cat $f; done'] + resources: + requests: + cpu: 10m + memory: 20Mi + limits: + cpu: 20m + memory: 30Mi diff --git a/reconcile/test/fixtures/openshift_tekton_resources/provider1.json b/reconcile/test/fixtures/openshift_tekton_resources/provider1.json new file mode 100644 index 000000000..9afd308fb --- /dev/null +++ b/reconcile/test/fixtures/openshift_tekton_resources/provider1.json @@ -0,0 +1,45 @@ +{ + "name": "provider1", + "provider": "tekton", + "retention": { + "days": 7, + "minimum": 100 + }, + "namespace": { + "name": "provider1", + "cluster": { + "name": "appsres03ue1", + "serverUrl": "", + "jumpHost": null, + "automationToken": { + "path": "", + "field": "token", + "format": null + }, + "internal": true, + "disable": null + } + }, + "taskTemplates": [ + { + "name": "openshift-saas-deploy", + "type": "onePerSaasFile", + "path": "openshift-saas-deploy.task.yaml.j2", + "variables": "{\"qontract_reconcile_image_tag\":\"latest\"}" + }, + { + "name": "push-gateway-openshift-saas-deploy-task-status-metric", + "type": "onePerNamespace", + "path": "push-gateway-task-status-metric.task.yaml.j2", + "variables": "{\"ubi8_ubi_minimal_image_tag\":\"latest\"}" + } + ], + "pipelineTemplates": { + "openshiftSaasDeploy": { + "name": "openshift-saas-deploy", + "type": "onePerSaasFile", + "path": "openshift-saas-deploy.pipeline.yaml.j2", + "variables": null + } + } +} diff --git a/reconcile/test/fixtures/openshift_tekton_resources/provider2-with-resources.json b/reconcile/test/fixtures/openshift_tekton_resources/provider2-with-resources.json new file mode 100644 index 000000000..c29cb036b --- /dev/null +++ b/reconcile/test/fixtures/openshift_tekton_resources/provider2-with-resources.json @@ -0,0 +1,55 @@ +{ + "name": "provider2-with-resources", + "provider": "tekton", + "retention": { + "days": 7, + "minimum": 100 + }, + "namespace": { + "name": "provider2-with-resources", + "cluster": { + "name": "appsres03ue1", + "serverUrl": "", + "jumpHost": null, + "automationToken": { + "path": "", + "field": "token", + "format": null + }, + "internal": true, + "disable": null + } + }, + "taskTemplates": [ + { + "name": "openshift-saas-deploy", + "type": "onePerSaasFile", + "path": "openshift-saas-deploy.task.yaml.j2", + "variables": "{\"qontract_reconcile_image_tag\":\"latest\"}" + }, + { + "name": "push-gateway-openshift-saas-deploy-task-status-metric", + "type": "onePerNamespace", + "path": "push-gateway-task-status-metric.task.yaml.j2", + "variables": "{\"ubi8_ubi_minimal_image_tag\":\"latest\"}" + } + ], + "pipelineTemplates": { + "openshiftSaasDeploy": { + "name": "openshift-saas-deploy", + "type": "onePerSaasFile", + "path": "openshift-saas-deploy.pipeline.yaml.j2", + "variables": null + } + }, + "deployResources": { + "requests": { + "cpu": "10m", + "memory": "1Mi" + }, + "limits": { + "cpu": "10m", + "memory": "100Mi" + } + } +} diff --git a/reconcile/test/fixtures/openshift_tekton_resources/provider3-ignored.json b/reconcile/test/fixtures/openshift_tekton_resources/provider3-ignored.json new file mode 100644 index 000000000..a3e0e54f0 --- /dev/null +++ b/reconcile/test/fixtures/openshift_tekton_resources/provider3-ignored.json @@ -0,0 +1,26 @@ +{ + "name": "provider3-ignored", + "provider": "tekton", + "retention": { + "days": 7, + "minimum": 100 + }, + "namespace": { + "name": "provider3-ignored", + "cluster": { + "name": "appsrep05ue1", + "serverUrl": "", + "jumpHost": null, + "automationToken": { + "path": "", + "field": "token", + "format": null + }, + "internal": true, + "disable": null + } + }, + "taskTemplates": null, + "pipelineTemplates": null, + "deployResources": null +} diff --git a/reconcile/test/fixtures/openshift_tekton_resources/provider4-with-task-duplicates.json b/reconcile/test/fixtures/openshift_tekton_resources/provider4-with-task-duplicates.json new file mode 100644 index 000000000..3b86a696c --- /dev/null +++ b/reconcile/test/fixtures/openshift_tekton_resources/provider4-with-task-duplicates.json @@ -0,0 +1,51 @@ +{ + "name": "provider4-with-task-duplicates", + "provider": "tekton", + "retention": { + "days": 7, + "minimum": 100 + }, + "namespace": { + "name": "provider1", + "cluster": { + "name": "appsres03ue1", + "serverUrl": "", + "jumpHost": null, + "automationToken": { + "path": "", + "field": "token", + "format": null + }, + "internal": true, + "disable": null + } + }, + "taskTemplates": [ + { + "name": "openshift-saas-deploy", + "type": "onePerSaasFile", + "path": "openshift-saas-deploy.task.yaml.j2", + "variables": "{\"qontract_reconcile_image_tag\":\"latest\"}" + }, + { + "name": "openshift-saas-deploy", + "type": "onePerSaasFile", + "path": "openshift-saas-deploy.task.yaml.j2", + "variables": "{\"qontract_reconcile_image_tag\":\"latest\"}" + }, + { + "name": "push-gateway-openshift-saas-deploy-task-status-metric", + "type": "onePerNamespace", + "path": "push-gateway-task-status-metric.task.yaml.j2", + "variables": "{\"ubi8_ubi_minimal_image_tag\":\"latest\"}" + } + ], + "pipelineTemplates": { + "openshiftSaasDeploy": { + "name": "openshift-saas-deploy", + "type": "onePerSaasFile", + "path": "openshift-saas-deploy.pipeline.yaml.j2", + "variables": null + } + } +} diff --git a/reconcile/test/fixtures/openshift_tekton_resources/provider5-with-unknown-task.json b/reconcile/test/fixtures/openshift_tekton_resources/provider5-with-unknown-task.json new file mode 100644 index 000000000..67e86f085 --- /dev/null +++ b/reconcile/test/fixtures/openshift_tekton_resources/provider5-with-unknown-task.json @@ -0,0 +1,45 @@ +{ + "name": "provider5-with-unknown-task", + "provider": "tekton", + "retention": { + "days": 7, + "minimum": 100 + }, + "namespace": { + "name": "provider1", + "cluster": { + "name": "appsres03ue1", + "serverUrl": "", + "jumpHost": null, + "automationToken": { + "path": "", + "field": "token", + "format": null + }, + "internal": true, + "disable": null + } + }, + "taskTemplates": [ + { + "name": "openshift-saas-deploy", + "type": "onePerSaasFile", + "path": "openshift-saas-deploy.task.yaml.j2", + "variables": "{\"qontract_reconcile_image_tag\":\"latest\"}" + }, + { + "name": "push-gateway-openshift-saas-deploy-task-status-metric", + "type": "onePerNamespace", + "path": "push-gateway-task-status-metric.task.yaml.j2", + "variables": "{\"ubi8_ubi_minimal_image_tag\":\"latest\"}" + } + ], + "pipelineTemplates": { + "openshiftSaasDeploy": { + "name": "openshift-saas-deploy", + "type": "onePerSaasFile", + "path": "openshift-saas-deploy-with-unknown-task.pipeline.yaml.j2", + "variables": null + } + } +} diff --git a/reconcile/test/fixtures/openshift_tekton_resources/push-gateway-task-status-metric.task.yaml.j2 b/reconcile/test/fixtures/openshift_tekton_resources/push-gateway-task-status-metric.task.yaml.j2 new file mode 100644 index 000000000..91e7e5d9c --- /dev/null +++ b/reconcile/test/fixtures/openshift_tekton_resources/push-gateway-task-status-metric.task.yaml.j2 @@ -0,0 +1,98 @@ +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: push-gateway-task-status-metric +spec: + params: + - name: saas_file_name + type: string + - name: env_name + type: string + - name: metric_name + type: string + - name: job_name + type: string + - name: task_name + type: string + - name: task_status + type: string + - name: pipeline_name + type: string + - name: pipelinerun_name + type: string + - name: retry_cooldown_seconds + type: string + - name: tkn_cluster_console_url + type: string + - name: tkn_namespace_name + type: string + steps: + - name: push-gateway-task-status-metric + image: quay.io/app-sre/ubi8-ubi-minimal:{{ ubi8_ubi_minimal_image_tag }} + resources: + requests: + cpu: 10m + memory: 20Mi + limits: + cpu: 20m + memory: 30Mi + env: + - name: PUSHGW_URL + valueFrom: + secretKeyRef: + name: push-gateway-basic-auth + key: server + - name: PUSHGW_CREDS_B64 + valueFrom: + secretKeyRef: + name: push-gateway-basic-auth + key: credentials_b64 + script: | + #!/usr/bin/env bash + # + # This script pushes a metric such as: + # app_sre_tekton_pipelinerun_task_status{container="pushgateway",endpoint="scrape",env_name="insights-production",job="openshift-saas-deploy-push-metric",namespace="app-sre-observability-production",pipeline_name="openshift-saas-deploy",pipelinerun_name="xjoin-operator-insights-production-20210504152500",pod="pushgateway-5-dkksf",saas_file_name="xjoin-operator",service="pushgateway-nginx-gate",task_name="openshift-saas-deploy",tkn_namespace_name="app-sre-pipelines",tkn_cluster_console_url="https://console-openshift-console.apps.appsrep05ue1.zqxk.p1.openshiftapps.com"} + # into Prometheus PushGateway + # There is an important detail here: We'll push into a group identified by the combination of saas_file_name and env_name, + # which we know it is unique. Even if we are pushing metrics with a different pipelinerun, the PushGateway will only keep + # one metric per group, the last one. + + function log() { + echo "`date '+%Y-%m-%d %H:%M:%S'` -- $@" 1>&2 + } + + # From https://tekton.dev/docs/pipelines/pipelines/#adding-finally-to-the-pipeline + # Succeeded all tasks have succeeded + # Failed one or more tasks failed + # Completed all tasks completed successfully including one or more skipped tasks + # None no aggregate execution status available (i.e. none of the above), one or more tasks could be pending/running/cancelled/timedout + case "$(params.task_status)" in + 'Succeeded') STATUS=0;; + 'Failed') STATUS=1;; + 'Completed') STATUS=2;; + 'None') STATUS=3;; + *) STATUS=9;; + esac + + GROUP_URL="$PUSHGW_URL/metrics/job/$(params.job_name)/saas_file_name/$(params.saas_file_name)/env_name/$(params.env_name)" + PUSH_METRIC="$(params.metric_name){saas_file_name=\"$(params.saas_file_name)\",env_name=\"$(params.env_name)\",pipeline_name=\"$(params.pipeline_name)\",pipelinerun_name=\"$(params.pipelinerun_name)\",task_name=\"$(params.task_name)\",tkn_cluster_console_url=\"$(params.tkn_cluster_console_url)\",tkn_namespace_name=\"$(params.tkn_namespace_name)\"}" + + log "PUTting '$PUSH_METRIC $STATUS' to $GROUP_URL" + + HTTP_RESPONSE_CODE=$(curl -X PUT -m 10 -sS -w '%{http_code}' -o /dev/null -H "Authorization: Basic $PUSHGW_CREDS_B64" --data-binary @- "$GROUP_URL" < list[dict[str, Any]]: + return self._providers + + @property + def saas_files(self) -> list[dict[str, Any]]: + return self._saas_files + + @providers.setter # type: ignore[no-redef, attr-defined] + def providers(self, providers: list[dict[str, Any]]) -> None: + if not isinstance(providers, list): + raise TypeError(f'Expecting list, have {type(providers)}') + self._providers = providers + + @saas_files.setter # type: ignore[no-redef, attr-defined] + def saas_files(self, saas_files: list[dict[str, Any]]) -> None: + if not isinstance(saas_files, list): + raise TypeError(f'Expecting list, have {type(saas_files)}') + self._saas_files = saas_files + + +class TestOpenshiftTektonResources(TestCase): + + def _test_deploy_resources_in_task(self, desired_resources, task_name, + deploy_resources) -> None: + '''Helper method to test if deploy resources have been properly set''' + for dr in desired_resources: + if dr['name'] == task_name: + task = dr['value'].body + for step in task['spec']['steps']: + if step['name'] == otr.DEFAULT_DEPLOY_RESOURCES_STEP_NAME: + self.assertEqual(step['resources'], deploy_resources) + break + + def mock_gql_get_resource(self, path: str) -> dict[str, str]: + '''Mock for GqlApi.get_resources using fixtures''' + content = self.fxt.get(path) + return {'path': path, + 'content': content, + 'sha256sum': ''} # we do not need it for these tests + + def mock_gql_query(self, query: str) -> dict[str, Any]: + '''Mock for GqlApi.query using test_data set in setUp''' + if query == otr.SAAS_FILES_QUERY: + return {'saas_files': self.test_data.saas_files} + elif query == PIPELINES_PROVIDERS_QUERY: + return {'pipelines_providers': self.test_data.providers} + else: + raise TstUnsupportedGqlQueryError("Unsupported query") + + def setUp(self) -> None: + self.test_data = TstData() + + self.fxt = Fixtures('openshift_tekton_resources') + + # Common fixtures + self.saas1 = self.fxt.get_json('saas1.json') + self.saas2 = self.fxt.get_json('saas2.json') + self.saas2_wr = self.fxt.get_json('saas2-with-resources.json') + self.saas3_ignored = self.fxt.get_json('saas3-ignored.json') + self.provider1 = self.fxt.get_json('provider1.json') + self.provider2_wr = self.fxt.get_json('provider2-with-resources.json') + self.provider3_ignored = self.fxt.get_json('provider3-ignored.json') + + # Patcher for GqlApi methods + self.gql_patcher = patch.object(gql, 'get_api', autospec=True) + self.gql = self.gql_patcher.start() + gqlapi_mock = create_autospec(gql.GqlApi) + self.gql.return_value = gqlapi_mock + gqlapi_mock.query.side_effect = self.mock_gql_query + gqlapi_mock.get_resource.side_effect = self.mock_gql_get_resource + + def tearDown(self) -> None: + """ cleanup patches created in self.setUp""" + self.gql_patcher.stop() + + def test_get_all_saas_files(self) -> None: + self.test_data.saas_files = [self.saas1, self.saas3_ignored] + self.assertEqual(otr.fetch_saas_files(None), [self.saas1]) + + def test_get_one_saas_file(self) -> None: + self.test_data.saas_files = [self.saas1, self.saas2] + saas_files = otr.fetch_saas_files(self.saas1['name']) + self.assertEqual(saas_files, [self.saas1]) + + def test_fetch_tkn_providers(self) -> None: + self.test_data.saas_files = [ + self.saas1, self.saas2, self.saas3_ignored] + self.test_data.providers = [ + self.provider1, self.provider2_wr, self.provider3_ignored] + + tkn_providers = otr.fetch_tkn_providers(None) + keys_expected = set([self.provider1['name'], + self.provider2_wr['name']]) + self.assertEqual(tkn_providers.keys(), keys_expected) + + def test_duplicate_providers(self) -> None: + self.test_data.saas_files = [self.saas1] + self.test_data.providers = [self.provider1, self.provider1] + msg = r'There are duplicates in tekton providers names: provider1' + self.assertRaisesRegex(otr.OpenshiftTektonResourcesBadConfigError, msg, + otr.fetch_tkn_providers, None) + + def test_fetch_desired_resources(self) -> None: + self.test_data.saas_files = [self.saas1, self.saas2, self.saas2_wr] + self.test_data.providers = [self.provider1, self.provider2_wr] + + desired_resources = otr.fetch_desired_resources( + otr.fetch_tkn_providers(None)) + + # we have one task per namespace and a pipeline + task per saas file + self.assertEqual(len(desired_resources), 8) + + def test_fetch_desired_resources_names(self) -> None: + self.test_data.saas_files = [self.saas1] + self.test_data.providers = [self.provider1] + desired_resources = otr.fetch_desired_resources( + otr.fetch_tkn_providers(None)) + + expected_task_names = set([ + 'o-push-gateway-openshift-saas-deploy-task-status-metric', + 'o-openshift-saas-deploy-saas1']) + expected_pipeline_name = 'o-openshift-saas-deploy-saas1' + + task_names = set() + for dr in desired_resources: + body = dr['value'].body + if body['kind'] == 'Task': + task_names.add(body['metadata']['name']) + else: + pipeline_name = body['metadata']['name'] + + self.assertEqual(task_names, expected_task_names) + self.assertEqual(pipeline_name, expected_pipeline_name) + + # we check we have what we need in tkn_providers. This test should + # be removed when this integration controls all tekton resources + def test_managed_resources_from_desired_resources(self) -> None: + self.test_data.saas_files = [self.saas1, self.saas2, self.saas2_wr] + self.test_data.providers = [self.provider1, self.provider2_wr] + + tkn_providers = otr.fetch_tkn_providers(None) + _ = otr.fetch_desired_resources(tkn_providers) + p1_managed = tkn_providers[self.provider1['name']]['namespace'][ + 'managedResourceNames'] + p2_managed = tkn_providers[self.provider2_wr['name']]['namespace'][ + 'managedResourceNames'] + + self.assertEqual(len(p1_managed), 2) + self.assertEqual(len(p2_managed), 2) + + # 1 namespace task, 1 saas file task, 1 saas file pipeline + for managed in p1_managed: + if managed['resource'] == 'Task': + self.assertEqual(len(managed['resourceNames']), 2) + else: + self.assertEqual(len(managed['resourceNames']), 1) + + # 1 namespace task, 2 saas file tasks, 2 saas file pipelines + for managed in p2_managed: + if managed['resource'] == 'Task': + self.assertEqual(len(managed['resourceNames']), 3) + else: + self.assertEqual(len(managed['resourceNames']), 2) + + def test_set_deploy_resources_default(self) -> None: + self.test_data.saas_files = [self.saas1] + self.test_data.providers = [self.provider1] + desired_resources = otr.fetch_desired_resources( + otr.fetch_tkn_providers(None)) + + # we need to locate the onePerSaasFile task in the desired resources + # we could be very strict and find the onePerSaasFile task in + # self.provider1 or just use the actual structure of the fixtures + task_name = otr.build_one_per_saas_file_tkn_object_name( + template_name=self.provider1['taskTemplates'][0]['name'], + saas_file_name=self.saas1['name']) + self._test_deploy_resources_in_task(desired_resources, task_name, + otr.DEFAULT_DEPLOY_RESOURCES) + + def test_set_deploy_resources_from_provider(self) -> None: + self.test_data.saas_files = [self.saas2] + self.test_data.providers = [self.provider2_wr] + desired_resources = otr.fetch_desired_resources( + otr.fetch_tkn_providers(None)) + + task_name = otr.build_one_per_saas_file_tkn_object_name( + template_name=self.provider2_wr['taskTemplates'][0]['name'], + saas_file_name=self.saas2['name']) + self._test_deploy_resources_in_task( + desired_resources, task_name, self.provider2_wr['deployResources']) + + def test_set_deploy_resources_from_saas_file(self) -> None: + self.test_data.saas_files = [self.saas2_wr] + self.test_data.providers = [self.provider2_wr] + desired_resources = otr.fetch_desired_resources( + otr.fetch_tkn_providers(None)) + + task_name = otr.build_one_per_saas_file_tkn_object_name( + template_name=self.provider2_wr['taskTemplates'][0]['name'], + saas_file_name=self.saas2['name']) + self._test_deploy_resources_in_task( + desired_resources, task_name, self.saas2_wr['deployResources']) + + def test_task_templates_name_duplicates(self) -> None: + self.provider4_wtd = \ + self.fxt.get_json('provider4-with-task-duplicates.json') + self.saas4 = self.fxt.get_json('saas4.json') + self.test_data.saas_files = [self.saas4] + self.test_data.providers = [self.provider4_wtd] + + msg = r'There are duplicates in task templates names in tekton ' \ + r'provider provider4-with-task-duplicates' + self.assertRaisesRegex(otr.OpenshiftTektonResourcesBadConfigError, msg, + otr.fetch_desired_resources, + otr.fetch_tkn_providers(None)) + + def test_task_templates_unknown_task(self) -> None: + self.provider5_wut = \ + self.fxt.get_json('provider5-with-unknown-task.json') + self.saas5 = self.fxt.get_json('saas5.json') + self.test_data.saas_files = [self.saas5] + self.test_data.providers = [self.provider5_wut] + + msg = r'Unknown task this-is-an-unknown-task in pipeline template ' \ + r'openshift-saas-deploy' + self.assertRaisesRegex(otr.OpenshiftTektonResourcesBadConfigError, msg, + otr.fetch_desired_resources, + otr.fetch_tkn_providers(None)) + + @patch(f'{MODULE}.DEFAULT_DEPLOY_RESOURCES_STEP_NAME', 'unknown-step') + def test_task_templates_unknown_deploy_resources_step(self) -> None: + self.test_data.saas_files = [self.saas1] + self.test_data.providers = [self.provider1] + msg = r'Cannot find a step named unknown-step to set resources in ' \ + r'task template openshift-saas-deploy' + self.assertRaisesRegex(otr.OpenshiftTektonResourcesBadConfigError, msg, + otr.fetch_desired_resources, + otr.fetch_tkn_providers(None)) + + @patch(f'{MODULE}.RESOURCE_MAX_LENGTH', 1) + def test_task_templates_resource_too_long(self) -> None: + self.test_data.saas_files = [self.saas1] + self.test_data.providers = [self.provider1] + msg = r'name o-openshift-saas-deploy-saas1 is longer than 1 characters' + self.assertRaisesRegex(otr.OpenshiftTektonResourcesNameTooLongError, + msg, otr.fetch_desired_resources, + otr.fetch_tkn_providers(None))