Skip to content

Commit

Permalink
chore(eks): Integ Test for OIDCP Certificate Retrieval (#22608)
Browse files Browse the repository at this point in the history
This adds an integ test which verifies that granting permissions to a service account enables any Kubernetes pods using it to perform AWS API calls. If the OIDC provider for that cluster is using the wrong thumbprint, this will fail.

----

### All Submissions:

* [x] Have you followed the guidelines in our [Contributing guide?](https://github.com/aws/aws-cdk/blob/main/CONTRIBUTING.md)

### Adding new Unconventional Dependencies:

* [ ] This PR adds new unconventional dependencies following the process described [here](https://github.com/aws/aws-cdk/blob/main/CONTRIBUTING.md/#adding-new-unconventional-dependencies)

### New Features

* [x] Have you added the new feature to an [integration test](https://github.com/aws/aws-cdk/blob/main/INTEGRATION_TESTS.md)?
	* [x] Did you use `yarn integ` to deploy the infrastructure and generate the snapshot (i.e. `yarn integ` without `--dry-run`)?

*By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license*
  • Loading branch information
comcalvi authored and Naumel committed Nov 4, 2022
1 parent 8733cbd commit 1123ced
Show file tree
Hide file tree
Showing 62 changed files with 12,861 additions and 4 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/yarn-upgrade.yml
Expand Up @@ -38,8 +38,6 @@ jobs:
- name: Install Tools
run: |-
npm -g install lerna npm-check-updates@^9.0.0
- name: Build CLI
run: cd packages/aws-cdk && ../../scripts/buildup
- name: Build Integ Runner
run: cd packages/@aws-cdk/integ-runner && ../../../scripts/buildup
- name: List Mono-Repo Packages
Expand Down Expand Up @@ -69,6 +67,8 @@ jobs:
for pj in $(find packages/aws-cdk/lib/init-templates -name package.json); do
(cd $(dirname $pj) && ncu --upgrade --reject='@types/jest,@types/node,@types/prettier,@types/fs-extra,constructs,typescript,aws-sdk,aws-sdk-mock,ts-jest,jest,${{ steps.list-packages.outputs.list }}')
done
# Upgrade dependencies at an aws-eks integ test docker image
cd packages/@aws-cdk/aws-eks/test/sdk-call-integ-test-docker-app/app/ && ncu --upgrade --reject='@types/jest,@types/node,@types/prettier,@types/fs-extra,constructs,typescript,aws-sdk,aws-sdk-mock,ts-jest,jest,${{ steps.list-packages.outputs.list }}'
# This will ensure the current lockfile is up-to-date with the dependency specifications (necessary for "yarn update" to run)
- name: Run "yarn install"
Expand Down
4 changes: 3 additions & 1 deletion packages/@aws-cdk/aws-eks/package.json
Expand Up @@ -102,6 +102,7 @@
"dependencies": {
"@aws-cdk/aws-autoscaling": "0.0.0",
"@aws-cdk/aws-ec2": "0.0.0",
"@aws-cdk/aws-ecr-assets": "0.0.0",
"@aws-cdk/aws-s3-assets": "0.0.0",
"@aws-cdk/aws-iam": "0.0.0",
"@aws-cdk/aws-kms": "0.0.0",
Expand Down Expand Up @@ -134,7 +135,8 @@
"@aws-cdk/lambda-layer-awscli": "0.0.0",
"@aws-cdk/lambda-layer-kubectl": "0.0.0",
"@aws-cdk/lambda-layer-node-proxy-agent": "0.0.0",
"constructs": "^10.0.0"
"constructs": "^10.0.0",
"@aws-cdk/aws-ecr-assets": "0.0.0"
},
"engines": {
"node": ">= 14.15.0"
Expand Down
51 changes: 51 additions & 0 deletions packages/@aws-cdk/aws-eks/test/bucket-pinger/bucket-pinger.ts
@@ -0,0 +1,51 @@
import * as ec2 from '@aws-cdk/aws-ec2';
import * as iam from '@aws-cdk/aws-iam';
import * as lambda from '@aws-cdk/aws-lambda';
import { CustomResource, Token, Duration } from '@aws-cdk/core';
import * as cr from '@aws-cdk/custom-resources';
import { Construct } from 'constructs';

export interface PingerProps {
readonly securityGroup?: ec2.SecurityGroup;
readonly vpc?: ec2.IVpc;
readonly subnets?: ec2.ISubnet[];
}
export class BucketPinger extends Construct {

private _resource: CustomResource;

constructor(scope: Construct, id: string, props: PingerProps) {
super(scope, id);

const func = new lambda.Function(this, 'Function', {
code: lambda.Code.fromAsset(`${__dirname}/function`),
handler: 'index.handler',
runtime: lambda.Runtime.PYTHON_3_9,
vpc: props.vpc,
vpcSubnets: props.subnets ? { subnets: props.subnets } : undefined,
securityGroups: props.securityGroup ? [props.securityGroup] : undefined,
timeout: Duration.minutes(1),
});

if (!func.role) {
throw new Error('pinger lambda has no execution role!');
}

func.role.addToPrincipalPolicy(new iam.PolicyStatement({
actions: ['s3:DeleteBucket', 's3:ListBucket'],
resources: ['arn:aws:s3:::*'],
}));

const provider = new cr.Provider(this, 'Provider', {
onEventHandler: func,
});

this._resource = new CustomResource(this, 'Resource', {
serviceToken: provider.serviceToken,
});
}

public get response() {
return Token.asString(this._resource.getAtt('Value'));
}
}
34 changes: 34 additions & 0 deletions packages/@aws-cdk/aws-eks/test/bucket-pinger/function/index.py
@@ -0,0 +1,34 @@
import json
import logging
import boto3

logger = logging.getLogger()
logger.setLevel(logging.INFO)

def handler(event, context):
print(json.dumps(event))

request_type = event['RequestType']
props = event['ResourceProperties']

s3_bucket_name = 'amazingly-made-sdk-call-created-eks-bucket'
s3 = boto3.client('s3')

if request_type in ['Create', 'Update']:
logger.info(f'making sdk call to check if bucket with name {s3_bucket_name} exists')

try:
s3.head_bucket(Bucket=s3_bucket_name)
except Exception as error:
raise RuntimeError(f'failed to head bucket with error: {str(error)}')
return {'Data': {'Value': f'confirmed that bucket with name {s3_bucket_name} exists' }}

elif request_type == 'Delete':
logger.info(f'making sdk call to delete bucket with name {s3_bucket_name}')

try:
s3.delete_bucket(Bucket=s3_bucket_name)
except Exception as error:
# If the bucket does not exist, then this error will be thrown
raise RuntimeError(f'failed to delete bucket: {str(error)}')
return {'Data': {'Value': f'bucket with name {s3_bucket_name} has been deleted' }}
@@ -0,0 +1,95 @@
import json
import logging
import os
import subprocess

logger = logging.getLogger()
logger.setLevel(logging.INFO)

# these are coming from the kubectl layer
os.environ['PATH'] = '/opt/kubectl:/opt/awscli:' + os.environ['PATH']

outdir = os.environ.get('TEST_OUTDIR', '/tmp')
kubeconfig = os.path.join(outdir, 'kubeconfig')


def apply_handler(event, context):
logger.info(json.dumps(dict(event, ResponseURL='...')))

request_type = event['RequestType']
props = event['ResourceProperties']

# resource properties (all required)
cluster_name = props['ClusterName']
manifest_text = props['Manifest']
role_arn = props['RoleArn']
prune_label = props.get('PruneLabel', None)
overwrite = props.get('Overwrite', 'false').lower() == 'true'
skip_validation = props.get('SkipValidation', 'false').lower() == 'true'

# "log in" to the cluster
cmd = [ 'aws', 'eks', 'update-kubeconfig',
'--role-arn', role_arn,
'--name', cluster_name,
'--kubeconfig', kubeconfig
]
logger.info(f'Running command: {cmd}')
subprocess.check_call(cmd)

if os.path.isfile(kubeconfig):
os.chmod(kubeconfig, 0o600)

# write resource manifests in sequence: { r1 }{ r2 }{ r3 } (this is how
# a stream of JSON objects can be included in a k8s manifest).
manifest_list = json.loads(manifest_text)
manifest_file = os.path.join(outdir, 'manifest.yaml')
with open(manifest_file, "w") as f:
f.writelines(map(lambda obj: json.dumps(obj), manifest_list))

logger.info("manifest written to: %s" % manifest_file)

kubectl_opts = []
if skip_validation:
kubectl_opts.extend(['--validate=false'])

if request_type == 'Create':
# if "overwrite" is enabled, then we use "apply" for CREATE operations
# which technically means we can determine the desired state of an
# existing resource.
if overwrite:
kubectl('apply', manifest_file, *kubectl_opts)
else:
# --save-config will allow us to use "apply" later
kubectl_opts.extend(['--save-config'])
kubectl('create', manifest_file, *kubectl_opts)
elif request_type == 'Update':
if prune_label is not None:
kubectl_opts.extend(['--prune', '-l', prune_label])

kubectl('apply', manifest_file, *kubectl_opts)
elif request_type == "Delete":
try:
kubectl('delete', manifest_file)
except Exception as e:
logger.info("delete error: %s" % e)


def kubectl(verb, file, *opts):
maxAttempts = 3
retry = maxAttempts
while retry > 0:
try:
cmd = ['kubectl', verb, '--kubeconfig', kubeconfig, '-f', file] + list(opts)
logger.info(f'Running command: {cmd}')
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exc:
output = exc.output
if b'i/o timeout' in output and retry > 0:
retry = retry - 1
logger.info("kubectl timed out, retries left: %s" % retry)
else:
raise Exception(output)
else:
logger.info(output)
return
raise Exception(f'Operation failed after {maxAttempts} attempts: {output}')
@@ -0,0 +1,88 @@
import json
import logging
import os
import subprocess
import time

logger = logging.getLogger()
logger.setLevel(logging.INFO)

# these are coming from the kubectl layer
os.environ['PATH'] = '/opt/kubectl:/opt/awscli:' + os.environ['PATH']

outdir = os.environ.get('TEST_OUTDIR', '/tmp')
kubeconfig = os.path.join(outdir, 'kubeconfig')


def get_handler(event, context):
logger.info(json.dumps(dict(event, ResponseURL='...')))

request_type = event['RequestType']
props = event['ResourceProperties']

# resource properties (all required)
cluster_name = props['ClusterName']
role_arn = props['RoleArn']

# "log in" to the cluster
subprocess.check_call([ 'aws', 'eks', 'update-kubeconfig',
'--role-arn', role_arn,
'--name', cluster_name,
'--kubeconfig', kubeconfig
])

if os.path.isfile(kubeconfig):
os.chmod(kubeconfig, 0o600)

object_type = props['ObjectType']
object_name = props['ObjectName']
object_namespace = props['ObjectNamespace']
json_path = props['JsonPath']
timeout_seconds = props['TimeoutSeconds']

# json path should be surrouded with '{}'
path = '{{{0}}}'.format(json_path)
if request_type == 'Create' or request_type == 'Update':
output = wait_for_output(['get', '-n', object_namespace, object_type, object_name, "-o=jsonpath='{{{0}}}'".format(json_path)], int(timeout_seconds))
return {'Data': {'Value': output}}
elif request_type == 'Delete':
pass
else:
raise Exception("invalid request type %s" % request_type)

def wait_for_output(args, timeout_seconds):

end_time = time.time() + timeout_seconds
error = None

while time.time() < end_time:
try:
# the output is surrounded with '', so we unquote
output = kubectl(args).decode('utf-8')[1:-1]
if output:
return output
except Exception as e:
error = str(e)
# also a recoverable error
if 'NotFound' in error:
pass
time.sleep(10)

raise RuntimeError(f'Timeout waiting for output from kubectl command: {args} (last_error={error})')

def kubectl(args):
retry = 3
while retry > 0:
try:
cmd = [ 'kubectl', '--kubeconfig', kubeconfig ] + args
output = subprocess.check_output(cmd, stderr=subprocess.PIPE)
except subprocess.CalledProcessError as exc:
output = exc.output + exc.stderr
if b'i/o timeout' in output and retry > 0:
logger.info("kubectl timed out, retries left: %s" % retry)
retry = retry - 1
else:
raise Exception(output)
else:
logger.info(output)
return output

0 comments on commit 1123ced

Please sign in to comment.