From 19a287f02bc427644837956466213ee65457a857 Mon Sep 17 00:00:00 2001 From: Petrovskyi Anatolii Date: Sun, 19 Dec 2021 22:12:22 +0200 Subject: [PATCH] feat(eks): imported kubectl provider for imported clusters (#14689) This resolves [issue#12107](https://github.com/aws/aws-cdk/issues/12107) we bring the ability to provide existing kubectl provider to the imported cluster so we can create k8s kubectl role and tight it's trusted entity to single lambda and pass this single lambda to all cdk stacks that works with imported clusters @iliapolo can you please take a look on this if this approach is fine? if it is I will add documentation and tests ---- *By submitting this pull request, I confirm that my contribution is made under the terms of the Apache-2.0 license* --- packages/@aws-cdk/aws-eks/README.md | 16 +++ packages/@aws-cdk/aws-eks/lib/cluster.ts | 20 +++- packages/@aws-cdk/aws-eks/lib/index.ts | 1 + .../@aws-cdk/aws-eks/lib/kubectl-provider.ts | 100 +++++++++++++++++- .../@aws-cdk/aws-eks/test/cluster.test.ts | 83 +++++++++++++++ 5 files changed, 215 insertions(+), 5 deletions(-) diff --git a/packages/@aws-cdk/aws-eks/README.md b/packages/@aws-cdk/aws-eks/README.md index 1245b02812ee4..1f0840d64a851 100644 --- a/packages/@aws-cdk/aws-eks/README.md +++ b/packages/@aws-cdk/aws-eks/README.md @@ -640,6 +640,22 @@ const cluster = new eks.Cluster(this, 'hello-eks', { The resources are created in the cluster by running `kubectl apply` from a python lambda function. +By default, CDK will create a new python lambda function to apply your k8s manifests. If you want to use an existing kubectl provider function, for example with tight trusted entities on your IAM Roles - you can import the existing provider and then use the imported provider when importing the cluster: + +```ts +const handlerRole = iam.Role.fromRoleArn(this, 'HandlerRole', 'arn:aws:iam::123456789012:role/lambda-role'); +const kubectlProvider = eks.KubectlProvider.fromKubectlProviderAttributes(this, 'KubectlProvider', { + functionArn: 'arn:aws:lambda:us-east-2:123456789012:function:my-function:1', + kubectlRoleArn: 'arn:aws:iam::123456789012:role/kubectl-role', + handlerRole, +}); + +const cluster = eks.Cluster.fromClusterAttributes(this, 'Cluster', { + clusterName: 'cluster', + kubectlProvider, +}); +``` + #### Environment You can configure the environment of this function by specifying it at cluster instantiation. For example, this can be useful in order to configure an http proxy: diff --git a/packages/@aws-cdk/aws-eks/lib/cluster.ts b/packages/@aws-cdk/aws-eks/lib/cluster.ts index 5489fa9fb8249..2b917f8f93f7e 100644 --- a/packages/@aws-cdk/aws-eks/lib/cluster.ts +++ b/packages/@aws-cdk/aws-eks/lib/cluster.ts @@ -18,7 +18,7 @@ import { INSTANCE_TYPES } from './instance-types'; import { KubernetesManifest, KubernetesManifestOptions } from './k8s-manifest'; import { KubernetesObjectValue } from './k8s-object-value'; import { KubernetesPatch } from './k8s-patch'; -import { KubectlProvider } from './kubectl-provider'; +import { IKubectlProvider, KubectlProvider } from './kubectl-provider'; import { Nodegroup, NodegroupOptions } from './managed-nodegroup'; import { OpenIdConnectProvider } from './oidc-provider'; import { BottleRocketImage } from './private/bottlerocket'; @@ -134,6 +134,13 @@ export interface ICluster extends IResource, ec2.IConnectable { */ readonly kubectlLayer?: lambda.ILayerVersion; + /** + * Kubectl Provider for issuing kubectl commands against it + * + * If not defined, a default provider will be used + */ + readonly kubectlProvider?: IKubectlProvider; + /** * Amount of memory to allocate to the provider's lambda function. */ @@ -335,6 +342,13 @@ export interface ClusterAttributes { */ readonly kubectlLayer?: lambda.ILayerVersion; + /** + * KubectlProvider for issuing kubectl commands. + * + * @default - Default CDK provider + */ + readonly kubectlProvider?: IKubectlProvider; + /** * Amount of memory to allocate to the provider's lambda function. * @@ -1969,9 +1983,10 @@ class ImportedCluster extends ClusterBase { public readonly kubectlSecurityGroup?: ec2.ISecurityGroup | undefined; public readonly kubectlPrivateSubnets?: ec2.ISubnet[] | undefined; public readonly kubectlLayer?: lambda.ILayerVersion; + public readonly kubectlProvider?: IKubectlProvider; + public readonly onEventLayer?: lambda.ILayerVersion; public readonly kubectlMemory?: Size; public readonly clusterHandlerSecurityGroup?: ec2.ISecurityGroup | undefined; - public readonly onEventLayer?: lambda.ILayerVersion; public readonly prune: boolean; // so that `clusterSecurityGroup` on `ICluster` can be configured without optionality, avoiding users from having @@ -1990,6 +2005,7 @@ class ImportedCluster extends ClusterBase { this.kubectlLayer = props.kubectlLayer; this.kubectlMemory = props.kubectlMemory; this.clusterHandlerSecurityGroup = props.clusterHandlerSecurityGroupId ? ec2.SecurityGroup.fromSecurityGroupId(this, 'ClusterHandlerSecurityGroup', props.clusterHandlerSecurityGroupId) : undefined; + this.kubectlProvider = props.kubectlProvider; this.onEventLayer = props.onEventLayer; this.prune = props.prune ?? true; diff --git a/packages/@aws-cdk/aws-eks/lib/index.ts b/packages/@aws-cdk/aws-eks/lib/index.ts index 286662f7581d5..34b44a7c69cba 100644 --- a/packages/@aws-cdk/aws-eks/lib/index.ts +++ b/packages/@aws-cdk/aws-eks/lib/index.ts @@ -7,6 +7,7 @@ export * from './helm-chart'; export * from './k8s-patch'; export * from './k8s-manifest'; export * from './k8s-object-value'; +export * from './kubectl-provider'; export * from './fargate-cluster'; export * from './service-account'; export * from './managed-nodegroup'; diff --git a/packages/@aws-cdk/aws-eks/lib/kubectl-provider.ts b/packages/@aws-cdk/aws-eks/lib/kubectl-provider.ts index 0e5db3c6a51e3..161ef77cfe3f3 100644 --- a/packages/@aws-cdk/aws-eks/lib/kubectl-provider.ts +++ b/packages/@aws-cdk/aws-eks/lib/kubectl-provider.ts @@ -1,7 +1,7 @@ import * as path from 'path'; import * as iam from '@aws-cdk/aws-iam'; import * as lambda from '@aws-cdk/aws-lambda'; -import { Duration, Stack, NestedStack, Names } from '@aws-cdk/core'; +import { Duration, Stack, NestedStack, Names, IConstruct } from '@aws-cdk/core'; import * as cr from '@aws-cdk/custom-resources'; import { AwsCliLayer } from '@aws-cdk/lambda-layer-awscli'; import { KubectlLayer } from '@aws-cdk/lambda-layer-kubectl'; @@ -12,6 +12,9 @@ import { ICluster, Cluster } from './cluster'; // eslint-disable-next-line import { Construct as CoreConstruct } from '@aws-cdk/core'; +/** + * Kubectl Provider Properties + */ export interface KubectlProviderProps { /** * The cluster to control. @@ -19,15 +22,69 @@ export interface KubectlProviderProps { readonly cluster: ICluster; } -export class KubectlProvider extends NestedStack { +/** + * Kubectl Provider Attributes + */ +export interface KubectlProviderAttributes { + /** + * The kubectl provider lambda arn + */ + readonly functionArn: string; + + /** + * The IAM role to assume in order to perform kubectl operations against this cluster. + */ + readonly kubectlRoleArn: string; + + /** + * The IAM execution role of the handler. This role must be able to assume kubectlRoleArn + */ + readonly handlerRole: iam.IRole; +} + +/** + * Imported KubectlProvider that can be used in place of the default one created by CDK + */ +export interface IKubectlProvider extends IConstruct { + /** + * The custom resource provider's service token. + */ + readonly serviceToken: string; + /** + * The IAM role to assume in order to perform kubectl operations against this cluster. + */ + readonly roleArn: string; + + /** + * The IAM execution role of the handler. + */ + readonly handlerRole: iam.IRole; +} + +/** + * Implementation of Kubectl Lambda + */ +export class KubectlProvider extends NestedStack implements IKubectlProvider { + + /** + * Take existing provider or create new based on cluster + * + * @param scope Construct + * @param cluster k8s cluster + */ public static getOrCreate(scope: Construct, cluster: ICluster) { // if this is an "owned" cluster, it has a provider associated with it if (cluster instanceof Cluster) { return cluster._attachKubectlResourceScope(scope); } - // if this is an imported cluster, we need to provision a custom resource provider in this stack + // if this is an imported cluster, it maybe has a predefined kubectl provider? + if (cluster.kubectlProvider) { + return cluster.kubectlProvider; + } + + // if this is an imported cluster and there is no kubectl provider defined, we need to provision a custom resource provider in this stack // we will define one per stack for each cluster based on the cluster uniqueid const uid = `${Names.nodeUniqueId(cluster.node)}-KubectlProvider`; const stack = Stack.of(scope); @@ -39,6 +96,17 @@ export class KubectlProvider extends NestedStack { return provider; } + /** + * Import an existing provider + * + * @param scope Construct + * @param id an id of resource + * @param attrs attributes for the provider + */ + public static fromKubectlProviderAttributes(scope: Construct, id: string, attrs: KubectlProviderAttributes): IKubectlProvider { + return new ImportedKubectlProvider(scope, id, attrs); + } + /** * The custom resource provider's service token. */ @@ -115,3 +183,29 @@ export class KubectlProvider extends NestedStack { } } + +class ImportedKubectlProvider extends CoreConstruct implements IKubectlProvider { + + /** + * The custom resource provider's service token. + */ + public readonly serviceToken: string; + + /** + * The IAM role to assume in order to perform kubectl operations against this cluster. + */ + public readonly roleArn: string; + + /** + * The IAM execution role of the handler. + */ + public readonly handlerRole: iam.IRole; + + constructor(scope: Construct, id: string, props: KubectlProviderAttributes) { + super(scope, id); + + this.serviceToken = props.functionArn; + this.roleArn = props.kubectlRoleArn; + this.handlerRole = props.handlerRole; + } +} diff --git a/packages/@aws-cdk/aws-eks/test/cluster.test.ts b/packages/@aws-cdk/aws-eks/test/cluster.test.ts index 0cdf55100598d..5520d86d532ef 100644 --- a/packages/@aws-cdk/aws-eks/test/cluster.test.ts +++ b/packages/@aws-cdk/aws-eks/test/cluster.test.ts @@ -12,6 +12,7 @@ import * as cdk8s from 'cdk8s'; import * as constructs from 'constructs'; import * as YAML from 'yaml'; import * as eks from '../lib'; +import { KubectlProvider } from '../lib/kubectl-provider'; import { BottleRocketImage } from '../lib/private/bottlerocket'; import { testFixture, testFixtureNoVpc } from './util'; @@ -980,6 +981,88 @@ describe('cluster', () => { }); + test('import cluster with existing kubectl provider function', () => { + + const { stack } = testFixture(); + + const handlerRole = iam.Role.fromRoleArn(stack, 'HandlerRole', 'arn:aws:iam::123456789012:role/lambda-role'); + const kubectlProvider = KubectlProvider.fromKubectlProviderAttributes(stack, 'KubectlProvider', { + functionArn: 'arn:aws:lambda:us-east-2:123456789012:function:my-function:1', + kubectlRoleArn: 'arn:aws:iam::123456789012:role/kubectl-role', + handlerRole: handlerRole, + }); + + const cluster = eks.Cluster.fromClusterAttributes(stack, 'Cluster', { + clusterName: 'cluster', + kubectlProvider: kubectlProvider, + }); + + expect(cluster.kubectlProvider).toEqual(kubectlProvider); + }); + + test('import cluster with existing kubectl provider function should work as expected with resources relying on kubectl getOrCreate', () => { + + const { stack } = testFixture(); + + const handlerRole = iam.Role.fromRoleArn(stack, 'HandlerRole', 'arn:aws:iam::123456789012:role/lambda-role'); + const kubectlProvider = KubectlProvider.fromKubectlProviderAttributes(stack, 'KubectlProvider', { + functionArn: 'arn:aws:lambda:us-east-2:123456789012:function:my-function:1', + kubectlRoleArn: 'arn:aws:iam::123456789012:role/kubectl-role', + handlerRole: handlerRole, + }); + + const cluster = eks.Cluster.fromClusterAttributes(stack, 'Cluster', { + clusterName: 'cluster', + kubectlProvider: kubectlProvider, + }); + + new eks.HelmChart(stack, 'Chart', { + cluster: cluster, + chart: 'chart', + }); + + expect(stack).toHaveResourceLike('Custom::AWSCDK-EKS-HelmChart', { + ServiceToken: kubectlProvider.serviceToken, + RoleArn: kubectlProvider.roleArn, + }); + + new eks.KubernetesPatch(stack, 'Patch', { + cluster: cluster, + applyPatch: {}, + restorePatch: {}, + resourceName: 'PatchResource', + }); + + expect(stack).toHaveResourceLike('Custom::AWSCDK-EKS-KubernetesPatch', { + ServiceToken: kubectlProvider.serviceToken, + RoleArn: kubectlProvider.roleArn, + }); + + new eks.KubernetesManifest(stack, 'Manifest', { + cluster: cluster, + manifest: [], + }); + + expect(stack).toHaveResourceLike('Custom::AWSCDK-EKS-KubernetesResource', { + ServiceToken: kubectlProvider.serviceToken, + RoleArn: kubectlProvider.roleArn, + }); + + new eks.KubernetesObjectValue(stack, 'ObjectValue', { + cluster: cluster, + jsonPath: '', + objectName: 'name', + objectType: 'type', + }); + + expect(stack).toHaveResourceLike('Custom::AWSCDK-EKS-KubernetesObjectValue', { + ServiceToken: kubectlProvider.serviceToken, + RoleArn: kubectlProvider.roleArn, + }); + + expect(cluster.kubectlProvider).not.toBeInstanceOf(eks.KubectlProvider); + }); + test('import cluster with new kubectl private subnets', () => { const { stack, vpc } = testFixture();