diff --git a/packages/@aws-cdk/aws-eks-v2-alpha/lib/cluster.ts b/packages/@aws-cdk/aws-eks-v2-alpha/lib/cluster.ts index d58fada0a1720..f9ecedb9efce4 100644 --- a/packages/@aws-cdk/aws-eks-v2-alpha/lib/cluster.ts +++ b/packages/@aws-cdk/aws-eks-v2-alpha/lib/cluster.ts @@ -1,7 +1,6 @@ import * as fs from 'fs'; import * as path from 'path'; import { Construct, Node } from 'constructs'; -import * as semver from 'semver'; import * as YAML from 'yaml'; import { IAccessPolicy, IAccessEntry, AccessEntry, AccessPolicy, AccessScopeType } from './access-entry'; import { IAddon, Addon } from './addon'; @@ -12,19 +11,18 @@ import { INSTANCE_TYPES } from './instance-types'; import { KubernetesManifest, KubernetesManifestOptions } from './k8s-manifest'; import { KubernetesObjectValue } from './k8s-object-value'; import { KubernetesPatch } from './k8s-patch'; -import { IKubectlProvider, KubectlProvider } from './kubectl-provider'; +import { IKubectlProvider, KubectlProvider, KubectlProviderOptions } from './kubectl-provider'; import { Nodegroup, NodegroupOptions } from './managed-nodegroup'; import { OpenIdConnectProvider } from './oidc-provider'; import { BottleRocketImage } from './private/bottlerocket'; import { ServiceAccount, ServiceAccountOptions } from './service-account'; -import { LifecycleLabel, renderAmazonLinuxUserData, renderBottlerocketUserData } from './user-data'; +import { renderAmazonLinuxUserData, renderBottlerocketUserData } from './user-data'; import * as autoscaling from 'aws-cdk-lib/aws-autoscaling'; import * as ec2 from 'aws-cdk-lib/aws-ec2'; import * as iam from 'aws-cdk-lib/aws-iam'; import * as kms from 'aws-cdk-lib/aws-kms'; -import * as lambda from 'aws-cdk-lib/aws-lambda'; import * as ssm from 'aws-cdk-lib/aws-ssm'; -import { Annotations, CfnOutput, CfnResource, IResource, Resource, Stack, Tags, Token, Duration, Size, CfnTag, ArnComponents } from 'aws-cdk-lib/core'; +import { Annotations, CfnOutput, CfnResource, IResource, Resource, Tags, Token, Duration, ArnComponents } from 'aws-cdk-lib/core'; import { CfnCluster } from 'aws-cdk-lib/aws-eks'; // defaults are based on https://eksctl.io @@ -102,50 +100,6 @@ export interface ICluster extends IResource, ec2.IConnectable { */ readonly eksPodIdentityAgent?: IAddon; - /** - * An IAM role that can perform kubectl operations against this cluster. - * - * The role should be mapped to the `system:masters` Kubernetes RBAC role. - */ - readonly kubectlRole?: iam.IRole; - - /** - * Custom environment variables when running `kubectl` against this cluster. - */ - readonly kubectlEnvironment?: { [key: string]: string }; - - /** - * A security group to use for `kubectl` execution. - * - * If this is undefined, the k8s endpoint is expected to be accessible - * publicly. - */ - readonly kubectlSecurityGroup?: ec2.ISecurityGroup; - - /** - * Subnets to host the `kubectl` compute resources. - * - * If this is undefined, the k8s endpoint is expected to be accessible - * publicly. - */ - readonly kubectlPrivateSubnets?: ec2.ISubnet[]; - - /** - * An IAM role that can perform kubectl operations against this cluster. - * - * The role should be mapped to the `system:masters` Kubernetes RBAC role. - * - * This role is directly passed to the lambda handler that sends Kube Ctl commands to the cluster. - */ - readonly kubectlLambdaRole?: iam.IRole; - - /** - * An AWS Lambda layer that includes `kubectl` and `helm` - * - * If not defined, a default layer will be used containing Kubectl 1.20 and Helm 3.8 - */ - readonly kubectlLayer?: lambda.ILayerVersion; - /** * Specify which IP family is used to assign Kubernetes pod and service IP addresses. * @@ -155,11 +109,12 @@ export interface ICluster extends IResource, ec2.IConnectable { readonly ipFamily?: IpFamily; /** - * An AWS Lambda layer that contains the `aws` CLI. + * Options for creating the kubectl provider - a lambda function that executes `kubectl` and `helm` + * against the cluster. If defined, `kubectlLayer` is a required property. * - * If not defined, a default layer will be used containing the AWS CLI 1.x. + * If not defined, kubectl provider will not be created by default. */ - readonly awscliLayer?: lambda.ILayerVersion; + readonly kubectlProviderOptions?: KubectlProviderOptions; /** * Kubectl Provider for issuing kubectl commands against it @@ -168,18 +123,6 @@ export interface ICluster extends IResource, ec2.IConnectable { */ readonly kubectlProvider?: IKubectlProvider; - /** - * Amount of memory to allocate to the provider's lambda function. - */ - readonly kubectlMemory?: Size; - - /** - * An AWS Lambda layer that includes the NPM dependency `proxy-agent`. - * - * If not defined, a default layer will be used. - */ - readonly onEventLayer?: lambda.ILayerVersion; - /** * Indicates whether Kubernetes resources can be automatically pruned. When * this is enabled (default), prune labels will be allocated and injected to @@ -305,45 +248,6 @@ export interface ClusterAttributes { */ readonly securityGroupIds?: string[]; - /** - * An IAM role with cluster administrator and "system:masters" permissions. - * @default - if not specified, it not be possible to issue `kubectl` commands - * against an imported cluster. - */ - readonly kubectlRoleArn?: string; - - /** - * An IAM role that can perform kubectl operations against this cluster. - * - * The role should be mapped to the `system:masters` Kubernetes RBAC role. - * - * This role is directly passed to the lambda handler that sends Kube Ctl commands - * to the cluster. - * @default - if not specified, the default role created by a lambda function will - * be used. - */ - readonly kubectlLambdaRole?: iam.IRole; - - /** - * Environment variables to use when running `kubectl` against this cluster. - * @default - no additional variables - */ - readonly kubectlEnvironment?: { [name: string]: string }; - - /** - * A security group to use for `kubectl` execution. If not specified, the k8s - * endpoint is expected to be accessible publicly. - * @default - k8s endpoint is expected to be accessible publicly - */ - readonly kubectlSecurityGroupId?: string; - - /** - * Subnets to host the `kubectl` compute resources. If not specified, the k8s - * endpoint is expected to be accessible publicly. - * @default - k8s endpoint is expected to be accessible publicly - */ - readonly kubectlPrivateSubnetIds?: string[]; - /** * An Open ID Connect provider for this cluster that can be used to configure service accounts. * You can either import an existing provider using `iam.OpenIdConnectProvider.fromProviderArn`, @@ -352,39 +256,6 @@ export interface ClusterAttributes { */ readonly openIdConnectProvider?: iam.IOpenIdConnectProvider; - /** - * An AWS Lambda Layer which includes `kubectl` and Helm. - * - * This layer is used by the kubectl handler to apply manifests and install - * helm charts. You must pick an appropriate releases of one of the - * `@aws-cdk/layer-kubectl-vXX` packages, that works with the version of - * Kubernetes you have chosen. If you don't supply this value `kubectl` - * 1.20 will be used, but that version is most likely too old. - * - * The handler expects the layer to include the following executables: - * - * ``` - * /opt/helm/helm - * /opt/kubectl/kubectl - * ``` - * - * @default - a default layer with Kubectl 1.20 and helm 3.8. - */ - readonly kubectlLayer?: lambda.ILayerVersion; - - /** - * An AWS Lambda layer that contains the `aws` CLI. - * - * The handler expects the layer to include the following executables: - * - * ``` - * /opt/awscli/aws - * ``` - * - * @default - a default layer with the AWS CLI 1.x - */ - readonly awscliLayer?: lambda.ILayerVersion; - /** * KubectlProvider for issuing kubectl commands. * @@ -393,23 +264,12 @@ export interface ClusterAttributes { readonly kubectlProvider?: IKubectlProvider; /** - * Amount of memory to allocate to the provider's lambda function. + * Options for creating the kubectl provider - a lambda function that executes `kubectl` and `helm` + * against the cluster. If defined, `kubectlLayer` is a required property. * - * @default Size.gibibytes(1) + * If not defined, kubectl provider will not be created by default. */ - readonly kubectlMemory?: Size; - - /** - * An AWS Lambda Layer which includes the NPM dependency `proxy-agent`. This layer - * is used by the onEvent handler to route AWS SDK requests through a proxy. - * - * The handler expects the layer to include the following node_modules: - * - * proxy-agent - * - * @default - a layer bundled with this module. - */ - readonly onEventLayer?: lambda.ILayerVersion; + readonly kubectlProviderOptions?: KubectlProviderOptions; /** * Indicates whether Kubernetes resources added through `addManifest()` can be @@ -425,7 +285,7 @@ export interface ClusterAttributes { /** * Options for configuring an EKS cluster. */ -export interface CommonClusterOptions { +export interface ClusterCommonOptions { /** * The VPC in which to create the Cluster. * @@ -470,28 +330,6 @@ export interface CommonClusterOptions { */ readonly version: KubernetesVersion; - /** - * Determines whether a CloudFormation output with the name of the cluster - * will be synthesized. - * - * @default false - */ - readonly outputClusterName?: boolean; - - /** - * Determines whether a CloudFormation output with the `aws eks - * update-kubeconfig` command will be synthesized. This command will include - * the cluster name and, if applicable, the ARN of the masters IAM role. - * - * @default true - */ - readonly outputConfigCommand?: boolean; -} - -/** - * Options for EKS clusters. - */ -export interface ClusterOptions extends CommonClusterOptions { /** * An IAM role that will be added to the `system:masters` Kubernetes RBAC * group. @@ -511,14 +349,6 @@ export interface ClusterOptions extends CommonClusterOptions { */ readonly coreDnsComputeType?: CoreDnsComputeType; - /** - * Determines whether a CloudFormation output with the ARN of the "masters" - * IAM role will be synthesized (if `mastersRole` is specified). - * - * @default false - */ - readonly outputMastersRoleArn?: boolean; - /** * Configure access to the Kubernetes API server endpoint.. * @@ -528,74 +358,6 @@ export interface ClusterOptions extends CommonClusterOptions { */ readonly endpointAccess?: EndpointAccess; - /** - * Environment variables for the kubectl execution. Only relevant for kubectl enabled clusters. - * - * @default - No environment variables. - */ - readonly kubectlEnvironment?: { [key: string]: string }; - - /** - * An AWS Lambda Layer which includes `kubectl` and Helm. - * - * This layer is used by the kubectl handler to apply manifests and install - * helm charts. You must pick an appropriate releases of one of the - * `@aws-cdk/layer-kubectl-vXX` packages, that works with the version of - * Kubernetes you have chosen. If you don't supply this value `kubectl` - * 1.20 will be used, but that version is most likely too old. - * - * The handler expects the layer to include the following executables: - * - * ``` - * /opt/helm/helm - * /opt/kubectl/kubectl - * ``` - * - * @default - a default layer with Kubectl 1.20. - */ - readonly kubectlLayer?: lambda.ILayerVersion; - - /** - * An AWS Lambda layer that contains the `aws` CLI. - * - * The handler expects the layer to include the following executables: - * - * ``` - * /opt/awscli/aws - * ``` - * - * @default - a default layer with the AWS CLI 1.x - */ - readonly awscliLayer?: lambda.ILayerVersion; - - /** - * Amount of memory to allocate to the provider's lambda function. - * - * @default Size.gibibytes(1) - */ - readonly kubectlMemory?: Size; - - /** - * An AWS Lambda Layer which includes the NPM dependency `proxy-agent`. This layer - * is used by the onEvent handler to route AWS SDK requests through a proxy. - * - * By default, the provider will use the layer included in the - * "aws-lambda-layer-node-proxy-agent" SAR application which is available in all - * commercial regions. - * - * To deploy the layer locally define it in your app as follows: - * - * ```ts - * const layer = new lambda.LayerVersion(this, 'proxy-agent-layer', { - * code: lambda.Code.fromAsset(`${__dirname}/layer.zip`), - * compatibleRuntimes: [lambda.Runtime.NODEJS_LATEST], - * }); - * ``` - * - * @default - a layer bundled with this module. - */ - readonly onEventLayer?: lambda.ILayerVersion; - /** * Indicates whether Kubernetes resources added through `addManifest()` can be * automatically pruned. When this is enabled (default), prune labels will be @@ -640,12 +402,28 @@ export interface ClusterOptions extends CommonClusterOptions { * @default - The controller is not installed. */ readonly albController?: AlbControllerOptions; + /** * The cluster log types which you want to enable. * * @default - none */ readonly clusterLogging?: ClusterLoggingTypes[]; + + /** + * The tags assigned to the EKS cluster + * + * @default - none + */ + readonly tags?: { [key: string]: string }; + + /** + * Options for creating the kubectl provider - a lambda function that executes `kubectl` and `helm` + * against the cluster. If defined, `kubectlLayer` is a required property. + * + * If not defined, kubectl provider will not be created by default. + */ + readonly kubectlProviderOptions?: KubectlProviderOptions; } /** @@ -741,9 +519,9 @@ export class EndpointAccess { } /** - * Common configuration props for EKS clusters. + * Properties for configuring a standard EKS cluster (non-Fargate) */ -export interface ClusterProps extends ClusterOptions { +export interface ClusterProps extends ClusterCommonOptions { /** * Number of instances to allocate as an initial capacity for this cluster. @@ -772,13 +550,6 @@ export interface ClusterProps extends ClusterOptions { */ readonly defaultCapacityType?: DefaultCapacityType; - /** - * The IAM role to pass to the Kubectl Lambda Handler. - * - * @default - Default Lambda IAM Execution Role - */ - readonly kubectlLambdaRole?: iam.IRole; - /** * Whether or not IAM principal of the cluster creator was set as a cluster admin access entry * during cluster creation time. @@ -788,14 +559,6 @@ export interface ClusterProps extends ClusterOptions { * @default true */ readonly bootstrapClusterCreatorAdminPermissions?: boolean; - - /** - * The tags assigned to the EKS cluster - * TODO: revisit the tag type - * - * @default - none - */ - readonly tags?: CfnTag[]; } /** @@ -1007,17 +770,9 @@ abstract class ClusterBase extends Resource implements ICluster { public abstract readonly clusterSecurityGroup: ec2.ISecurityGroup; public abstract readonly clusterEncryptionConfigKeyArn: string; public abstract readonly ipFamily?: IpFamily; - public abstract readonly kubectlRole?: iam.IRole; - public abstract readonly kubectlLambdaRole?: iam.IRole; - public abstract readonly kubectlEnvironment?: { [key: string]: string }; - public abstract readonly kubectlSecurityGroup?: ec2.ISecurityGroup; - public abstract readonly kubectlPrivateSubnets?: ec2.ISubnet[]; - public abstract readonly kubectlMemory?: Size; public abstract readonly prune: boolean; public abstract readonly openIdConnectProvider: iam.IOpenIdConnectProvider; - private _spotInterruptHandler?: HelmChart; - /** * Defines a Kubernetes resource in this cluster. * @@ -1074,28 +829,6 @@ abstract class ClusterBase extends Resource implements ICluster { }); } - /** - * Installs the AWS spot instance interrupt handler on the cluster if it's not - * already added. - */ - private addSpotInterruptHandler() { - if (!this._spotInterruptHandler) { - this._spotInterruptHandler = this.addHelmChart('spot-interrupt-handler', { - chart: 'aws-node-termination-handler', - version: '0.18.0', - repository: 'https://aws.github.io/eks-charts', - namespace: 'kube-system', - values: { - nodeSelector: { - lifecycle: LifecycleLabel.SPOT, - }, - }, - }); - } - - return this._spotInterruptHandler; - } - /** * Connect capacity in the form of an existing AutoScalingGroup to the EKS cluster. * @@ -1166,12 +899,6 @@ abstract class ClusterBase extends Resource implements ICluster { value: autoScalingGroup.role.roleArn, }); - const addSpotInterruptHandler = options.spotInterruptHandler ?? true; - // if this is an ASG with spot instances, install the spot interrupt handler (only if kubectl is enabled). - if (autoScalingGroup.spotPrice && addSpotInterruptHandler) { - this.addSpotInterruptHandler(); - } - if (this instanceof Cluster && this.albController) { // the controller runs on the worker nodes so they cannot // be deleted before the controller. @@ -1300,46 +1027,6 @@ export class Cluster extends ClusterBase { */ public readonly defaultNodegroup?: Nodegroup; - /** - * An IAM role that can perform kubectl operations against this cluster. - * - * The role should be mapped to the `system:masters` Kubernetes RBAC role. - */ - public readonly kubectlRole?: iam.IRole; - - /** - * An IAM role that can perform kubectl operations against this cluster. - * - * The role should be mapped to the `system:masters` Kubernetes RBAC role. - * - * This role is directly passed to the lambda handler that sends Kube Ctl commands to the cluster. - * @default - if not specified, the default role created by a lambda function will - * be used. - */ - - public readonly kubectlLambdaRole?: iam.IRole; - - /** - * Custom environment variables when running `kubectl` against this cluster. - */ - public readonly kubectlEnvironment?: { [key: string]: string }; - - /** - * A security group to use for `kubectl` execution. - * - * @default - If not specified, the k8s endpoint is expected to be accessible - * publicly. - */ - public readonly kubectlSecurityGroup?: ec2.ISecurityGroup; - - /** - * Subnets to host the `kubectl` compute resources. - * - * @default - If not specified, the k8s endpoint is expected to be accessible - * publicly. - */ - public readonly kubectlPrivateSubnets?: ec2.ISubnet[]; - /** * Specify which IP family is used to assign Kubernetes pod and service IP addresses. * @@ -1348,12 +1035,6 @@ export class Cluster extends ClusterBase { */ public readonly ipFamily?: IpFamily; - /** - * An IAM role with administrative permissions to create or update the - * cluster. This role also has `systems:master` permissions. - */ - public readonly adminRole: iam.Role; - /** * If the cluster has one (or more) FargateProfiles associated, this array * will hold a reference to each. @@ -1370,31 +1051,6 @@ export class Cluster extends ClusterBase { */ private _eksPodIdentityAgent?: IAddon; - /** - * An AWS Lambda layer that includes `kubectl` and `helm` - * - * If not defined, a default layer will be used containing Kubectl 1.20 and Helm 3.8 - */ - readonly kubectlLayer?: lambda.ILayerVersion; - - /** - * An AWS Lambda layer that contains the `aws` CLI. - * - * If not defined, a default layer will be used containing the AWS CLI 1.x. - */ - readonly awscliLayer?: lambda.ILayerVersion; - - /** - * The amount of memory allocated to the kubectl provider's lambda function. - */ - public readonly kubectlMemory?: Size; - - /** - * The AWS Lambda layer that contains the NPM dependency `proxy-agent`. If - * undefined, a SAR app that contains this layer will be used. - */ - readonly onEventLayer?: lambda.ILayerVersion; - /** * Determines if Kubernetes resources can be pruned automatically. */ @@ -1433,7 +1089,9 @@ export class Cluster extends ClusterBase { */ private readonly _kubectlReadyBarrier: CfnResource; - private readonly _kubectlResourceProvider: KubectlProvider; + private readonly _kubectlProviderOptions?: KubectlProviderOptions; + + private readonly _kubectlProvider?: IKubectlProvider; /** * Initiates an EKS Cluster with the supplied arguments @@ -1447,24 +1105,11 @@ export class Cluster extends ClusterBase { physicalName: props.clusterName, }); - const stack = Stack.of(this); - this.prune = props.prune ?? true; this.vpc = props.vpc || new ec2.Vpc(this, 'DefaultVpc'); - - const kubectlVersion = new semver.SemVer(`${props.version.version}.0`); - if (semver.gte(kubectlVersion, '1.22.0') && !props.kubectlLayer) { - Annotations.of(this).addWarningV2('@aws-cdk/aws-eks:clusterKubectlLayerNotSpecified', `You created a cluster with Kubernetes Version ${props.version.version} without specifying the kubectlLayer property. This may cause failures as the kubectl version provided with aws-cdk-lib is 1.20, which is only guaranteed to be compatible with Kubernetes versions 1.19-1.21. Please provide a kubectlLayer from @aws-cdk/lambda-layer-kubectl-v${kubectlVersion.minor}.`); - } this.version = props.version; - // since this lambda role needs to be added to the trust policy of the creation role, - // we must create it in this scope (instead of the KubectlProvider nested stack) to avoid - // a circular dependency. - this.kubectlLambdaRole = props.kubectlLambdaRole ? props.kubectlLambdaRole : new iam.Role(this, 'KubectlHandlerRole', { - assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com'), - managedPolicies: [iam.ManagedPolicy.fromAwsManagedPolicyName('service-role/AWSLambdaBasicExecutionRole')], - }); + this._kubectlProviderOptions = props.kubectlProviderOptions; this.tagSubnets(); @@ -1498,12 +1143,7 @@ export class Cluster extends ClusterBase { } : undefined; this.endpointAccess = props.endpointAccess ?? EndpointAccess.PUBLIC_AND_PRIVATE; - this.kubectlEnvironment = props.kubectlEnvironment; - this.kubectlLayer = props.kubectlLayer; - this.awscliLayer = props.awscliLayer; - this.kubectlMemory = props.kubectlMemory; this.ipFamily = props.ipFamily ?? IpFamily.IP_V4; - this.onEventLayer = props.onEventLayer; const privateSubnets = this.selectPrivateSubnets().slice(0, 16); const publicAccessDisabled = !this.endpointAccess._config.publicAccess; @@ -1554,10 +1194,12 @@ export class Cluster extends ClusterBase { ipFamily: this.ipFamily, serviceIpv4Cidr: props.serviceIpv4Cidr, }, - tags: props.tags, + tags: Object.keys(props.tags ?? {}).map(k => ({ key: k, value: props.tags![k] })), logging: this.logging, }); + let kubectlSubnets = this._kubectlProviderOptions?.privateSubnets; + if (this.endpointAccess._config.privateAccess && privateSubnets.length !== 0) { // when private access is enabled and the vpc has private subnets, lets connect @@ -1568,7 +1210,7 @@ export class Cluster extends ClusterBase { throw new Error('Private endpoint access requires the VPC to have DNS support and DNS hostnames enabled. Use `enableDnsHostnames: true` and `enableDnsSupport: true` when creating the VPC.'); } - this.kubectlPrivateSubnets = privateSubnets; + kubectlSubnets = privateSubnets; // the vpc must exist in order to properly delete the cluster (since we run `kubectl delete`). // this ensures that. @@ -1602,47 +1244,33 @@ export class Cluster extends ClusterBase { defaultPort: ec2.Port.tcp(443), // Control Plane has an HTTPS API }); - // we can use the cluster security group since its already attached to the cluster - // and configured to allow connections from itself. - this.kubectlSecurityGroup = this.clusterSecurityGroup; - - this.adminRole = new iam.Role(this, 'kubectlRole', { - assumedBy: this.kubectlLambdaRole, - }); - - this.kubectlRole = this.adminRole; - - this.grantAccess('ClusterAdminRoleAccess', this.kubectlRole.roleArn, [ - AccessPolicy.fromAccessPolicyName('AmazonEKSClusterAdminPolicy', { - accessScopeType: AccessScopeType.CLUSTER, - }), - ]); - - this._kubectlResourceProvider = this.defineKubectlProvider(); - - const updateConfigCommandPrefix = `aws eks update-kubeconfig --name ${this.clusterName}`; - const getTokenCommandPrefix = `aws eks get-token --cluster-name ${this.clusterName}`; - const commonCommandOptions = [`--region ${stack.region}`]; + if (props.kubectlProviderOptions) { + this._kubectlProvider = new KubectlProvider(this, 'KubectlProvider', { + cluster: this, + role: this._kubectlProviderOptions?.role, + awscliLayer: this._kubectlProviderOptions?.awscliLayer, + kubectlLayer: this._kubectlProviderOptions!.kubectlLayer, + environment: this._kubectlProviderOptions?.environment, + memory: this._kubectlProviderOptions?.memory, + privateSubnets: kubectlSubnets, + }); - if (props.outputClusterName) { - new CfnOutput(this, 'ClusterName', { value: this.clusterName }); + // give the handler role admin access to the cluster + // so it can deploy/query any resource. + this.grantAccess('ClusterAdminRoleAccess', this._kubectlProvider?.role!.roleArn, [ + AccessPolicy.fromAccessPolicyName('AmazonEKSClusterAdminPolicy', { + accessScopeType: AccessScopeType.CLUSTER, + }), + ]); } // do not create a masters role if one is not provided. Trusting the accountRootPrincipal() is too permissive. if (props.mastersRole) { - const mastersRole = props.mastersRole; - this.grantAccess('mastersRoleAccess', props.mastersRole.roleArn, [ AccessPolicy.fromAccessPolicyName('AmazonEKSClusterAdminPolicy', { accessScopeType: AccessScopeType.CLUSTER, }), ]); - - if (props.outputMastersRoleArn) { - new CfnOutput(this, 'MastersRoleArn', { value: mastersRole.roleArn }); - } - - commonCommandOptions.push(`--role-arn ${mastersRole.roleArn}`); } if (props.albController) { @@ -1660,15 +1288,7 @@ export class Cluster extends ClusterBase { this.addNodegroupCapacity('DefaultCapacity', { instanceTypes: [instanceType], minSize: minCapacity }) : undefined; } - const outputConfigCommand = (props.outputConfigCommand ?? true) && props.mastersRole; - if (outputConfigCommand) { - const postfix = commonCommandOptions.join(' '); - new CfnOutput(this, 'ConfigCommand', { value: `${updateConfigCommandPrefix} ${postfix}` }); - new CfnOutput(this, 'GetTokenCommand', { value: `${getTokenCommandPrefix} ${postfix}` }); - } - this.defineCoreDnsComputeType(props.coreDnsComputeType ?? CoreDnsComputeType.EC2); - } /** @@ -1765,7 +1385,6 @@ export class Cluster extends ClusterBase { bootstrapOptions: options.bootstrapOptions, bootstrapEnabled: options.bootstrapEnabled, machineImageType: options.machineImageType, - spotInterruptHandler: options.spotInterruptHandler, }); if (nodeTypeForInstanceType(options.instanceType) === NodeType.INFERENTIA || @@ -1827,6 +1446,10 @@ export class Cluster extends ClusterBase { return this._openIdConnectProvider; } + public get kubectlProvider() { + return this._kubectlProvider; + } + /** * Retrieves the EKS Pod Identity Agent addon for the EKS cluster. * @@ -1878,22 +1501,6 @@ export class Cluster extends ClusterBase { return this._fargateProfiles; } - /** - * Adds a resource scope that requires `kubectl` to this cluster and returns - * the `KubectlProvider` which is the custom resource provider that should be - * used as the resource provider. - * - * Called from `HelmResource` and `KubernetesResource` - * - * @param resourceScope the construct scope in which kubectl resources are defined. - * - * @internal - */ - public _attachKubectlResourceScope(resourceScope: Construct): KubectlProvider { - Node.of(resourceScope).addDependency(this._kubectlReadyBarrier); - return this._kubectlResourceProvider; - } - /** * Adds an access entry to the cluster's access entries map. * @@ -1921,17 +1528,13 @@ export class Cluster extends ClusterBase { } } - private defineKubectlProvider() { - const uid = '@aws-cdk/aws-eks.KubectlProvider'; - - // since we can't have the provider connect to multiple networks, and we - // wanted to avoid resource tear down, we decided for now that we will only - // support a single EKS cluster per CFN stack. - if (this.stack.node.tryFindChild(uid)) { - throw new Error('Only a single EKS cluster can be defined within a CloudFormation stack'); - } - - return new KubectlProvider(this.stack, uid, { cluster: this }); + /** + * Adds a resource scope that requires `kubectl` to this cluster and returns + * + * @internal + */ + public _dependOnKubectlBarrier(resource: Construct) { + resource.node.addDependency(this._kubectlReadyBarrier); } private selectPrivateSubnets(): ec2.ISubnet[] { @@ -2079,14 +1682,6 @@ export interface AutoScalingGroupCapacityOptions extends autoscaling.CommonAutoS * @default MachineImageType.AMAZON_LINUX_2 */ readonly machineImageType?: MachineImageType; - - /** - * Installs the AWS spot instance interrupt handler on the cluster if it's not - * already added. Only relevant if `spotPrice` is used. - * - * @default true - */ - readonly spotInterruptHandler?: boolean; } /** @@ -2179,14 +1774,6 @@ export interface AutoScalingGroupOptions { * @default MachineImageType.AMAZON_LINUX_2 */ readonly machineImageType?: MachineImageType; - - /** - * Installs the AWS spot instance interrupt handler on the cluster if it's not - * already added. Only relevant if `spotPrice` is configured on the auto-scaling group. - * - * @default true - */ - readonly spotInterruptHandler?: boolean; } /** @@ -2196,18 +1783,9 @@ class ImportedCluster extends ClusterBase { public readonly clusterName: string; public readonly clusterArn: string; public readonly connections = new ec2.Connections(); - public readonly kubectlRole?: iam.IRole; - public readonly kubectlLambdaRole?: iam.IRole; - public readonly kubectlEnvironment?: { [key: string]: string } | undefined; - public readonly kubectlSecurityGroup?: ec2.ISecurityGroup | undefined; - public readonly kubectlPrivateSubnets?: ec2.ISubnet[] | undefined; - public readonly kubectlLayer?: lambda.ILayerVersion; public readonly ipFamily?: IpFamily; - public readonly awscliLayer?: lambda.ILayerVersion; - public readonly kubectlProvider?: IKubectlProvider; - public readonly onEventLayer?: lambda.ILayerVersion; - public readonly kubectlMemory?: Size; public readonly prune: boolean; + public readonly kubectlProvider?: IKubectlProvider; // so that `clusterSecurityGroup` on `ICluster` can be configured without optionality, avoiding users from having // to null check on an instance of `Cluster`, which will always have this configured. @@ -2218,17 +1796,8 @@ class ImportedCluster extends ClusterBase { this.clusterName = props.clusterName; this.clusterArn = this.stack.formatArn(clusterArnComponents(props.clusterName)); - this.kubectlRole = props.kubectlRoleArn ? iam.Role.fromRoleArn(this, 'KubectlRole', props.kubectlRoleArn) : undefined; - this.kubectlLambdaRole = props.kubectlLambdaRole; - this.kubectlSecurityGroup = props.kubectlSecurityGroupId ? ec2.SecurityGroup.fromSecurityGroupId(this, 'KubectlSecurityGroup', props.kubectlSecurityGroupId) : undefined; - this.kubectlEnvironment = props.kubectlEnvironment; - this.kubectlPrivateSubnets = props.kubectlPrivateSubnetIds ? props.kubectlPrivateSubnetIds.map((subnetid, index) => ec2.Subnet.fromSubnetId(this, `KubectlSubnet${index}`, subnetid)) : undefined; - this.kubectlLayer = props.kubectlLayer; this.ipFamily = props.ipFamily; - this.awscliLayer = props.awscliLayer; - this.kubectlMemory = props.kubectlMemory; this.kubectlProvider = props.kubectlProvider; - this.onEventLayer = props.onEventLayer; this.prune = props.prune ?? true; let i = 1; diff --git a/packages/@aws-cdk/aws-eks-v2-alpha/lib/fargate-cluster.ts b/packages/@aws-cdk/aws-eks-v2-alpha/lib/fargate-cluster.ts index 022d6bc6ecdbf..cc7e6a93a7350 100644 --- a/packages/@aws-cdk/aws-eks-v2-alpha/lib/fargate-cluster.ts +++ b/packages/@aws-cdk/aws-eks-v2-alpha/lib/fargate-cluster.ts @@ -1,11 +1,11 @@ import { Construct } from 'constructs'; -import { Cluster, ClusterOptions, CoreDnsComputeType } from './cluster'; +import { Cluster, ClusterCommonOptions, CoreDnsComputeType } from './cluster'; import { FargateProfile, FargateProfileOptions } from './fargate-profile'; /** * Configuration props for EKS Fargate. */ -export interface FargateClusterProps extends ClusterOptions { +export interface FargateClusterProps extends ClusterCommonOptions { /** * Fargate Profile to create along with the cluster. * diff --git a/packages/@aws-cdk/aws-eks-v2-alpha/lib/fargate-profile.ts b/packages/@aws-cdk/aws-eks-v2-alpha/lib/fargate-profile.ts index e3aec1839b7d8..51842dc2fe227 100644 --- a/packages/@aws-cdk/aws-eks-v2-alpha/lib/fargate-profile.ts +++ b/packages/@aws-cdk/aws-eks-v2-alpha/lib/fargate-profile.ts @@ -146,8 +146,6 @@ export class FargateProfile extends Construct implements ITaggable { managedPolicies: [iam.ManagedPolicy.fromAwsManagedPolicyName('AmazonEKSFargatePodExecutionRolePolicy')], }); - this.podExecutionRole.grantPassRole(props.cluster.adminRole); - if (props.subnetSelection && !props.vpc) { Annotations.of(this).addWarningV2('@aws-cdk/aws-eks:fargateProfileDefaultToPrivateSubnets', 'Vpc must be defined to use a custom subnet selection. All private subnets belonging to the EKS cluster will be used by default'); } diff --git a/packages/@aws-cdk/aws-eks-v2-alpha/lib/helm-chart.ts b/packages/@aws-cdk/aws-eks-v2-alpha/lib/helm-chart.ts index fffdba99f81dd..23a8590b94eb1 100644 --- a/packages/@aws-cdk/aws-eks-v2-alpha/lib/helm-chart.ts +++ b/packages/@aws-cdk/aws-eks-v2-alpha/lib/helm-chart.ts @@ -133,7 +133,10 @@ export class HelmChart extends Construct { const stack = Stack.of(this); - const provider = KubectlProvider.getOrCreate(this, props.cluster); + const provider = KubectlProvider.getKubectlProvider(this, props.cluster); + if (!provider) { + throw new Error('Kubectl Provider is not defined in this cluster. Define it when creating the cluster'); + } const timeout = props.timeout?.toSeconds(); if (timeout && timeout > 900) { @@ -159,14 +162,13 @@ export class HelmChart extends Construct { // default to set atomic as false const atomic = props.atomic ?? false; - this.chartAsset?.grantRead(provider.handlerRole); + this.chartAsset?.grantRead(provider.role!); new CustomResource(this, 'Resource', { serviceToken: provider.serviceToken, resourceType: HelmChart.RESOURCE_TYPE, properties: { ClusterName: props.cluster.clusterName, - RoleArn: provider.roleArn, // TODO: bake into the provider's environment Release: props.release ?? Names.uniqueId(this).slice(-53).toLowerCase(), // Helm has a 53 character limit for the name Chart: this.chart, ChartAssetURL: this.chartAsset?.s3ObjectUrl, diff --git a/packages/@aws-cdk/aws-eks-v2-alpha/lib/k8s-manifest.ts b/packages/@aws-cdk/aws-eks-v2-alpha/lib/k8s-manifest.ts index ea763bb96cc85..e0b127ac96179 100644 --- a/packages/@aws-cdk/aws-eks-v2-alpha/lib/k8s-manifest.ts +++ b/packages/@aws-cdk/aws-eks-v2-alpha/lib/k8s-manifest.ts @@ -124,7 +124,10 @@ export class KubernetesManifest extends Construct { super(scope, id); const stack = Stack.of(this); - const provider = KubectlProvider.getOrCreate(this, props.cluster); + const provider = KubectlProvider.getKubectlProvider(this, props.cluster); + if (!provider) { + throw new Error('Kubectl Provider is not defined in this cluster. Define it when creating the cluster'); + } const prune = props.prune ?? props.cluster.prune; const pruneLabel = prune @@ -144,7 +147,6 @@ export class KubernetesManifest extends Construct { // StepFunctions, CloudWatch Dashboards etc). Manifest: stack.toJsonString(props.manifest), ClusterName: props.cluster.clusterName, - RoleArn: provider.roleArn, // TODO: bake into provider's environment PruneLabel: pruneLabel, Overwrite: props.overwrite, SkipValidation: props.skipValidation, diff --git a/packages/@aws-cdk/aws-eks-v2-alpha/lib/k8s-object-value.ts b/packages/@aws-cdk/aws-eks-v2-alpha/lib/k8s-object-value.ts index 9be1bd5a33293..a49a13066b239 100644 --- a/packages/@aws-cdk/aws-eks-v2-alpha/lib/k8s-object-value.ts +++ b/packages/@aws-cdk/aws-eks-v2-alpha/lib/k8s-object-value.ts @@ -62,14 +62,17 @@ export class KubernetesObjectValue extends Construct { constructor(scope: Construct, id: string, props: KubernetesObjectValueProps) { super(scope, id); - const provider = KubectlProvider.getOrCreate(this, props.cluster); + const provider = KubectlProvider.getKubectlProvider(this, props.cluster); + + if (!provider) { + throw new Error('Kubectl Provider is not defined in this cluster. Define it when creating the cluster'); + } this._resource = new CustomResource(this, 'Resource', { resourceType: KubernetesObjectValue.RESOURCE_TYPE, serviceToken: provider.serviceToken, properties: { ClusterName: props.cluster.clusterName, - RoleArn: provider.roleArn, ObjectType: props.objectType, ObjectName: props.objectName, ObjectNamespace: props.objectNamespace ?? 'default', diff --git a/packages/@aws-cdk/aws-eks-v2-alpha/lib/k8s-patch.ts b/packages/@aws-cdk/aws-eks-v2-alpha/lib/k8s-patch.ts index 944b0bedd3a85..36b8f9dea5fca 100644 --- a/packages/@aws-cdk/aws-eks-v2-alpha/lib/k8s-patch.ts +++ b/packages/@aws-cdk/aws-eks-v2-alpha/lib/k8s-patch.ts @@ -72,7 +72,11 @@ export class KubernetesPatch extends Construct { super(scope, id); const stack = Stack.of(this); - const provider = KubectlProvider.getOrCreate(this, props.cluster); + + const provider = KubectlProvider.getKubectlProvider(this, props.cluster); + if (!provider) { + throw new Error('Kubectl Provider is not defined in this cluster. Define it when creating the cluster'); + } new CustomResource(this, 'Resource', { serviceToken: provider.serviceToken, @@ -83,7 +87,6 @@ export class KubernetesPatch extends Construct { ApplyPatchJson: stack.toJsonString(props.applyPatch), RestorePatchJson: stack.toJsonString(props.restorePatch), ClusterName: props.cluster.clusterName, - RoleArn: provider.roleArn, // TODO: bake into provider's environment PatchType: props.patchType ?? PatchType.STRATEGIC, }, }); diff --git a/packages/@aws-cdk/aws-eks-v2-alpha/lib/kubectl-provider.ts b/packages/@aws-cdk/aws-eks-v2-alpha/lib/kubectl-provider.ts index 8c861ed5de2fe..78552d29c9e46 100644 --- a/packages/@aws-cdk/aws-eks-v2-alpha/lib/kubectl-provider.ts +++ b/packages/@aws-cdk/aws-eks-v2-alpha/lib/kubectl-provider.ts @@ -1,17 +1,67 @@ import { Construct, IConstruct } from 'constructs'; -import { ICluster, Cluster } from './cluster'; +import { Cluster, ICluster } from './cluster'; +import * as ec2 from 'aws-cdk-lib/aws-ec2'; import * as iam from 'aws-cdk-lib/aws-iam'; import * as lambda from 'aws-cdk-lib/aws-lambda'; -import { Duration, Stack, Names, CfnCondition, Fn, Aws } from 'aws-cdk-lib/core'; +import { Duration, CfnCondition, Fn, Aws, Size } from 'aws-cdk-lib/core'; import * as cr from 'aws-cdk-lib/custom-resources'; import { AwsCliLayer } from 'aws-cdk-lib/lambda-layer-awscli'; -import { KubectlLayer } from 'aws-cdk-lib/lambda-layer-kubectl'; import * as path from 'path'; +export interface KubectlProviderOptions { + /** + * An IAM role that can perform kubectl operations against this cluster. + * + * The role should be mapped to the `system:masters` Kubernetes RBAC role. + * + * This role is directly passed to the lambda handler that sends Kube Ctl commands to the cluster. + * @default - if not specified, the default role created by a lambda function will + * be used. + */ + readonly role?: iam.IRole; + + /** + * An AWS Lambda layer that contains the `aws` CLI. + * + * If not defined, a default layer will be used containing the AWS CLI 2.x. + */ + readonly awscliLayer?: lambda.ILayerVersion; + + /** + * + * Custom environment variables when running `kubectl` against this cluster. + */ + readonly environment?: { [key: string]: string }; + + /** + * A security group to use for `kubectl` execution. + * + * @default - If not specified, the k8s endpoint is expected to be accessible + * publicly. + */ + readonly securityGroup?: ec2.ISecurityGroup; + + /** + * The amount of memory allocated to the kubectl provider's lambda function. + */ + readonly memory?: Size; + + /** + * An AWS Lambda layer that includes `kubectl` and `helm` + */ + readonly kubectlLayer: lambda.ILayerVersion; + + /** + * Subnets to host the `kubectl` compute resources. If not specified, the k8s + * endpoint is expected to be accessible publicly. + */ + readonly privateSubnets?: ec2.ISubnet[]; +} + /** * Properties for a KubectlProvider */ -export interface KubectlProviderProps { +export interface KubectlProviderProps extends KubectlProviderOptions { /** * The cluster to control. */ @@ -23,19 +73,17 @@ export interface KubectlProviderProps { */ export interface KubectlProviderAttributes { /** - * The custom resource provider's service token. - */ - readonly functionArn: string; - - /** - * The IAM role to assume in order to perform kubectl operations against this cluster. + * The kubectl provider lambda arn */ - readonly kubectlRoleArn: string; + readonly serviceToken: string; /** - * The IAM execution role of the handler. This role must be able to assume kubectlRoleArn + * The role of the provider lambda function. + * Only required if you deploy helm charts using this imported provider. + * + * @default - no role. */ - readonly handlerRole: iam.IRole; + readonly role?: iam.IRole; } /** @@ -48,14 +96,10 @@ export interface IKubectlProvider extends IConstruct { readonly serviceToken: string; /** - * The IAM role to assume in order to perform kubectl operations against this cluster. + * The role of the provider lambda function. If undefined, + * you cannot use this provider to deploy helm charts. */ - readonly roleArn: string; - - /** - * The IAM execution role of the handler. - */ - readonly handlerRole: iam.IRole; + readonly role?: iam.IRole; } /** @@ -63,32 +107,19 @@ export interface IKubectlProvider extends IConstruct { */ export class KubectlProvider extends Construct implements IKubectlProvider { /** - * Take existing provider or create new based on cluster + * Take existing provider on cluster * * @param scope Construct * @param cluster k8s cluster */ - public static getOrCreate(scope: Construct, cluster: ICluster) { - // if this is an "owned" cluster, it has a provider associated with it + public static getKubectlProvider(scope: Construct, cluster: ICluster) { + // if this is an "owned" cluster, we need to wait for the kubectl barrier + // before applying any resources. if (cluster instanceof Cluster) { - return cluster._attachKubectlResourceScope(scope); - } - - // if this is an imported cluster, it maybe has a predefined kubectl provider? - if (cluster.kubectlProvider) { - return cluster.kubectlProvider; + cluster._dependOnKubectlBarrier(scope); } - // if this is an imported cluster and there is no kubectl provider defined, we need to provision a custom resource provider in this stack - // we will define one per stack for each cluster based on the cluster uniqueid - const uid = `${Names.nodeUniqueId(cluster.node)}-KubectlProvider`; - const stack = Stack.of(scope); - let provider = stack.node.tryFindChild(uid) as KubectlProvider; - if (!provider) { - provider = new KubectlProvider(stack, uid, { cluster }); - } - - return provider; + return cluster.kubectlProvider; } /** @@ -99,7 +130,11 @@ export class KubectlProvider extends Construct implements IKubectlProvider { * @param attrs attributes for the provider */ public static fromKubectlProviderAttributes(scope: Construct, id: string, attrs: KubectlProviderAttributes): IKubectlProvider { - return new ImportedKubectlProvider(scope, id, attrs); + class Import extends Construct implements IKubectlProvider { + public readonly serviceToken: string = attrs.serviceToken; + public readonly role?: iam.IRole = attrs.role; + } + return new Import(scope, id); } /** @@ -107,69 +142,59 @@ export class KubectlProvider extends Construct implements IKubectlProvider { */ public readonly serviceToken: string; - /** - * The IAM role to assume in order to perform kubectl operations against this cluster. - */ - public readonly roleArn: string; - /** * The IAM execution role of the handler. */ - public readonly handlerRole: iam.IRole; + public readonly role?: iam.IRole; public constructor(scope: Construct, id: string, props: KubectlProviderProps) { super(scope, id); - const cluster = props.cluster; - - if (!cluster.kubectlRole) { - throw new Error('"kubectlRole" is not defined, cannot issue kubectl commands against this cluster'); - } - - if (cluster.kubectlPrivateSubnets && !cluster.kubectlSecurityGroup) { - throw new Error('"kubectlSecurityGroup" is required if "kubectlSubnets" is specified'); + const vpc = props.privateSubnets ? props.cluster.vpc : undefined; + let securityGroups; + if (props.privateSubnets && props.cluster.clusterSecurityGroup) { + securityGroups = [props.cluster.clusterSecurityGroup]; } - - const memorySize = cluster.kubectlMemory ? cluster.kubectlMemory.toMebibytes() : 1024; + const privateSubnets = props.privateSubnets ? { subnets: props.privateSubnets } : undefined; const handler = new lambda.Function(this, 'Handler', { timeout: Duration.minutes(15), description: 'onEvent handler for EKS kubectl resource provider', - memorySize, + memorySize: props.memory?.toMebibytes() ?? 1024, environment: { // required and recommended for boto3 AWS_STS_REGIONAL_ENDPOINTS: 'regional', - ...cluster.kubectlEnvironment, + ...props.environment, }, - role: cluster.kubectlLambdaRole ? cluster.kubectlLambdaRole : undefined, + role: props.role, code: lambda.Code.fromAsset(path.join(__dirname, 'kubectl-handler')), handler: 'index.handler', runtime: lambda.Runtime.PYTHON_3_11, // defined only when using private access - vpc: cluster.kubectlPrivateSubnets ? cluster.vpc : undefined, - securityGroups: cluster.kubectlPrivateSubnets && cluster.kubectlSecurityGroup ? [cluster.kubectlSecurityGroup] : undefined, - vpcSubnets: cluster.kubectlPrivateSubnets ? { subnets: cluster.kubectlPrivateSubnets } : undefined, + vpc, + securityGroups, + vpcSubnets: privateSubnets, }); // allow user to customize the layers with the tools we need - handler.addLayers(props.cluster.awscliLayer ?? new AwsCliLayer(this, 'AwsCliLayer')); - handler.addLayers(props.cluster.kubectlLayer ?? new KubectlLayer(this, 'KubectlLayer')); + handler.addLayers(props.awscliLayer ?? new AwsCliLayer(this, 'AwsCliLayer')); + handler.addLayers(props.kubectlLayer); - this.handlerRole = handler.role!; + const handlerRole = handler.role!; - this.handlerRole.addToPrincipalPolicy(new iam.PolicyStatement({ + handlerRole.addToPrincipalPolicy(new iam.PolicyStatement({ actions: ['eks:DescribeCluster'], - resources: [cluster.clusterArn], + resources: [props.cluster.clusterArn], })); // taken from the lambda default role logic. // makes it easier for roles to be passed in. if (handler.isBoundToVpc) { - handler.role?.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName('service-role/AWSLambdaVPCAccessExecutionRole')); + handlerRole.addManagedPolicy(iam.ManagedPolicy.fromAwsManagedPolicyName('service-role/AWSLambdaVPCAccessExecutionRole')); } // For OCI helm chart authorization. - this.handlerRole.addManagedPolicy( + handlerRole.addManagedPolicy( iam.ManagedPolicy.fromAwsManagedPolicyName('AmazonEC2ContainerRegistryReadOnly'), ); @@ -177,7 +202,7 @@ export class KubectlProvider extends Construct implements IKubectlProvider { * For OCI helm chart public ECR authorization. As ECR public is only available in `aws` partition, * we conditionally attach this policy when the AWS partition is `aws`. */ - const hasEcrPublicCondition = new CfnCondition(this.handlerRole.node.scope!, 'HasEcrPublic', { + const hasEcrPublicCondition = new CfnCondition(handlerRole.node.scope!, 'HasEcrPublic', { expression: Fn.conditionEquals(Aws.PARTITION, 'aws'), }); @@ -187,44 +212,16 @@ export class KubectlProvider extends Construct implements IKubectlProvider { Aws.NO_VALUE).toString(), ); - this.handlerRole.addManagedPolicy(iam.ManagedPolicy.fromManagedPolicyArn(this, 'conditionalPolicy', conditionalPolicy.managedPolicyArn)); - - // allow this handler to assume the kubectl role - cluster.kubectlRole.grant(this.handlerRole, 'sts:AssumeRole'); + handlerRole.addManagedPolicy(iam.ManagedPolicy.fromManagedPolicyArn(this, 'conditionalPolicy', conditionalPolicy.managedPolicyArn)); const provider = new cr.Provider(this, 'Provider', { onEventHandler: handler, - vpc: cluster.kubectlPrivateSubnets ? cluster.vpc : undefined, - vpcSubnets: cluster.kubectlPrivateSubnets ? { subnets: cluster.kubectlPrivateSubnets } : undefined, - securityGroups: cluster.kubectlPrivateSubnets && cluster.kubectlSecurityGroup ? [cluster.kubectlSecurityGroup] : undefined, + vpc: props.cluster.vpc, + vpcSubnets: privateSubnets, + securityGroups, }); this.serviceToken = provider.serviceToken; - this.roleArn = cluster.kubectlRole.roleArn; - } -} - -class ImportedKubectlProvider extends Construct implements IKubectlProvider { - /** - * The custom resource provider's service token. - */ - public readonly serviceToken: string; - - /** - * The IAM role to assume in order to perform kubectl operations against this cluster. - */ - public readonly roleArn: string; - - /** - * The IAM execution role of the handler. - */ - public readonly handlerRole: iam.IRole; - - constructor(scope: Construct, id: string, props: KubectlProviderAttributes) { - super(scope, id); - - this.serviceToken = props.functionArn; - this.roleArn = props.kubectlRoleArn; - this.handlerRole = props.handlerRole; + this.role = handlerRole; } } diff --git a/packages/@aws-cdk/aws-eks-v2-alpha/package.json b/packages/@aws-cdk/aws-eks-v2-alpha/package.json index 334048c6a37de..43a22b4dfb2c2 100644 --- a/packages/@aws-cdk/aws-eks-v2-alpha/package.json +++ b/packages/@aws-cdk/aws-eks-v2-alpha/package.json @@ -100,7 +100,6 @@ "cdk8s-plus-27": "2.9.5" }, "dependencies": { - "semver": "^7.5.1", "yaml": "1.10.2" }, "peerDependencies": { @@ -108,7 +107,6 @@ "constructs": "^10.0.0" }, "bundledDependencies": [ - "semver", "yaml" ], "engines": { diff --git a/packages/@aws-cdk/aws-eks-v2-alpha/test/access-entry.test.ts b/packages/@aws-cdk/aws-eks-v2-alpha/test/access-entry.test.ts index 729b170c01958..2e019f679ed5e 100644 --- a/packages/@aws-cdk/aws-eks-v2-alpha/test/access-entry.test.ts +++ b/packages/@aws-cdk/aws-eks-v2-alpha/test/access-entry.test.ts @@ -131,6 +131,6 @@ describe('AccessEntry', () => { expect(importedAccessEntry.accessEntryName).toEqual(importedAccessEntryName); expect(importedAccessEntry.accessEntryArn).toEqual(importedAccessEntryArn); - Template.fromStack(stack).resourceCountIs('AWS::EKS::AccessEntry', 1); + Template.fromStack(stack).resourceCountIs('AWS::EKS::AccessEntry', 0); }); }); diff --git a/packages/@aws-cdk/aws-eks-v2-alpha/test/alb-controller.test.ts b/packages/@aws-cdk/aws-eks-v2-alpha/test/alb-controller.test.ts index ff5e21f66ddc0..ad853de629f3b 100644 --- a/packages/@aws-cdk/aws-eks-v2-alpha/test/alb-controller.test.ts +++ b/packages/@aws-cdk/aws-eks-v2-alpha/test/alb-controller.test.ts @@ -3,6 +3,7 @@ import * as path from 'path'; import { testFixture } from './util'; import { Template } from 'aws-cdk-lib/assertions'; import * as iam from 'aws-cdk-lib/aws-iam'; +import { KubectlV31Layer } from '@aws-cdk/lambda-layer-kubectl-v31'; import { Cluster, KubernetesVersion, AlbController, AlbControllerVersion, HelmChart } from '../lib'; const versions = Object.values(AlbControllerVersion); @@ -12,6 +13,9 @@ test.each(versions)('support AlbControllerVersion (%s)', (version) => { const cluster = new Cluster(stack, 'Cluster', { version: KubernetesVersion.V1_27, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); AlbController.create(stack, { cluster, @@ -63,6 +67,9 @@ test('can configure a custom repository', () => { const cluster = new Cluster(stack, 'Cluster', { version: KubernetesVersion.V1_27, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); AlbController.create(stack, { @@ -96,6 +103,9 @@ test('throws when a policy is not defined for a custom version', () => { const cluster = new Cluster(stack, 'Cluster', { version: KubernetesVersion.V1_27, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); expect(() => AlbController.create(stack, { @@ -108,6 +118,9 @@ test.each(['us-gov-west-1', 'cn-north-1'])('stack does not include hard-coded pa const { stack } = testFixture(region); const cluster = new Cluster(stack, 'Cluster', { version: KubernetesVersion.V1_27, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); AlbController.create(stack, { @@ -124,6 +137,9 @@ test('correct helm chart version is set for selected alb controller version', () const cluster = new Cluster(stack, 'Cluster', { version: KubernetesVersion.V1_27, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); AlbController.create(stack, { diff --git a/packages/@aws-cdk/aws-eks-v2-alpha/test/cluster.test.ts b/packages/@aws-cdk/aws-eks-v2-alpha/test/cluster.test.ts index 0648bf10684bb..adbc767327e7b 100644 --- a/packages/@aws-cdk/aws-eks-v2-alpha/test/cluster.test.ts +++ b/packages/@aws-cdk/aws-eks-v2-alpha/test/cluster.test.ts @@ -3,8 +3,9 @@ import * as path from 'path'; import * as cdk8s from 'cdk8s'; import { Construct } from 'constructs'; import * as YAML from 'yaml'; +import { KubectlV31Layer } from '@aws-cdk/lambda-layer-kubectl-v31'; import { testFixture, testFixtureNoVpc } from './util'; -import { Annotations, Match, Template } from 'aws-cdk-lib/assertions'; +import { Match, Template } from 'aws-cdk-lib/assertions'; import * as asg from 'aws-cdk-lib/aws-autoscaling'; import * as ec2 from 'aws-cdk-lib/aws-ec2'; import * as iam from 'aws-cdk-lib/aws-iam'; @@ -29,6 +30,9 @@ describe('cluster', () => { albController: { version: eks.AlbControllerVersion.V2_4_1, }, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); Template.fromStack(stack).hasResourceProperties('Custom::AWSCDK-EKS-HelmChart', { @@ -216,29 +220,6 @@ describe('cluster', () => { }); }); - test('spot interrupt handler is not added if spotInterruptHandler is false when connecting self-managed nodes', () => { - // GIVEN - const { stack, vpc } = testFixture(); - const cluster = new eks.Cluster(stack, 'Cluster', { - vpc, - defaultCapacity: 0, - version: CLUSTER_VERSION, - prune: false, - }); - - const selfManaged = new asg.AutoScalingGroup(stack, 'self-managed', { - instanceType: new ec2.InstanceType('t2.medium'), - vpc: vpc, - machineImage: new ec2.AmazonLinuxImage(), - spotPrice: '0.1', - }); - - // WHEN - cluster.connectAutoScalingGroupCapacity(selfManaged, { spotInterruptHandler: false }); - - expect(cluster.node.findAll().filter(c => c.node.id === 'chart-spot-interrupt-handler').length).toEqual(0); - }); - test('throws when a non cdk8s chart construct is added as cdk8s chart', () => { const { stack } = testFixture(); @@ -259,6 +240,9 @@ describe('cluster', () => { const cluster = new eks.Cluster(stack, 'Cluster', { version: CLUSTER_VERSION, prune: false, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); const app = new cdk8s.App(); @@ -351,6 +335,9 @@ describe('cluster', () => { this.eksCluster = new eks.Cluster(this, 'Cluster', { version: CLUSTER_VERSION, prune: false, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(this, 'kubectlLayer'), + }, }); } } @@ -400,6 +387,9 @@ describe('cluster', () => { this.eksCluster = new eks.Cluster(this, 'Cluster', { version: CLUSTER_VERSION, prune: false, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(this, 'kubectlLayer'), + }, }); } } @@ -440,6 +430,9 @@ describe('cluster', () => { this.eksCluster = new eks.Cluster(this, 'Cluster', { version: CLUSTER_VERSION, prune: false, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(this, 'kubectlLayer'), + }, }); } } @@ -471,6 +464,9 @@ describe('cluster', () => { this.eksCluster = new eks.Cluster(this, 'EKSCluster', { version: CLUSTER_VERSION, prune: false, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(this, 'kubectlLayer'), + }, }); } } @@ -760,10 +756,10 @@ describe('cluster', () => { const { stack } = testFixture(); const handlerRole = iam.Role.fromRoleArn(stack, 'HandlerRole', 'arn:aws:iam::123456789012:role/lambda-role'); + const kubectlProvider = KubectlProvider.fromKubectlProviderAttributes(stack, 'KubectlProvider', { - functionArn: 'arn:aws:lambda:us-east-2:123456789012:function:my-function:1', - kubectlRoleArn: 'arn:aws:iam::123456789012:role/kubectl-role', - handlerRole: handlerRole, + serviceToken: 'arn:aws:lambda:us-east-2:123456789012:function:my-function:1', + role: handlerRole, }); const cluster = eks.Cluster.fromClusterAttributes(stack, 'Cluster', { @@ -780,9 +776,8 @@ describe('cluster', () => { const handlerRole = iam.Role.fromRoleArn(stack, 'HandlerRole', 'arn:aws:iam::123456789012:role/lambda-role'); const kubectlProvider = KubectlProvider.fromKubectlProviderAttributes(stack, 'KubectlProvider', { - functionArn: 'arn:aws:lambda:us-east-2:123456789012:function:my-function:1', - kubectlRoleArn: 'arn:aws:iam::123456789012:role/kubectl-role', - handlerRole: handlerRole, + serviceToken: 'arn:aws:lambda:us-east-2:123456789012:function:my-function:1', + role: handlerRole, }); const cluster = eks.Cluster.fromClusterAttributes(stack, 'Cluster', { @@ -797,7 +792,6 @@ describe('cluster', () => { Template.fromStack(stack).hasResourceProperties('Custom::AWSCDK-EKS-HelmChart', { ServiceToken: kubectlProvider.serviceToken, - RoleArn: kubectlProvider.roleArn, }); }); @@ -806,9 +800,8 @@ describe('cluster', () => { const handlerRole = iam.Role.fromRoleArn(stack, 'HandlerRole', 'arn:aws:iam::123456789012:role/lambda-role'); const kubectlProvider = KubectlProvider.fromKubectlProviderAttributes(stack, 'KubectlProvider', { - functionArn: 'arn:aws:lambda:us-east-2:123456789012:function:my-function:1', - kubectlRoleArn: 'arn:aws:iam::123456789012:role/kubectl-role', - handlerRole: handlerRole, + serviceToken: 'arn:aws:lambda:us-east-2:123456789012:function:my-function:1', + role: handlerRole, }); const cluster = eks.Cluster.fromClusterAttributes(stack, 'Cluster', { @@ -830,7 +823,6 @@ describe('cluster', () => { Template.fromStack(stack).hasResourceProperties('Custom::AWSCDK-EKS-KubernetesPatch', { ServiceToken: kubectlProvider.serviceToken, - RoleArn: kubectlProvider.roleArn, }); }); @@ -839,9 +831,8 @@ describe('cluster', () => { const handlerRole = iam.Role.fromRoleArn(stack, 'HandlerRole', 'arn:aws:iam::123456789012:role/lambda-role'); const kubectlProvider = KubectlProvider.fromKubectlProviderAttributes(stack, 'KubectlProvider', { - functionArn: 'arn:aws:lambda:us-east-2:123456789012:function:my-function:1', - kubectlRoleArn: 'arn:aws:iam::123456789012:role/kubectl-role', - handlerRole: handlerRole, + serviceToken: 'arn:aws:lambda:us-east-2:123456789012:function:my-function:1', + role: handlerRole, }); const cluster = eks.Cluster.fromClusterAttributes(stack, 'Cluster', { @@ -875,32 +866,12 @@ describe('cluster', () => { Template.fromStack(stack).hasResourceProperties('Custom::AWSCDK-EKS-KubernetesObjectValue', { ServiceToken: kubectlProvider.serviceToken, - RoleArn: kubectlProvider.roleArn, }); expect(cluster.kubectlProvider).not.toBeInstanceOf(eks.KubectlProvider); }); }); - test('import cluster with new kubectl private subnets', () => { - const { stack, vpc } = testFixture(); - - const cluster = eks.Cluster.fromClusterAttributes(stack, 'Cluster', { - clusterName: 'cluster', - kubectlPrivateSubnetIds: vpc.privateSubnets.map(s => s.subnetId), - }); - - expect(cluster.kubectlPrivateSubnets?.map(s => stack.resolve(s.subnetId))).toEqual([ - { Ref: 'VPCPrivateSubnet1Subnet8BCA10E0' }, - { Ref: 'VPCPrivateSubnet2SubnetCFCDAA7A' }, - ]); - - expect(cluster.kubectlPrivateSubnets?.map(s => s.node.id)).toEqual([ - 'KubectlSubnet0', - 'KubectlSubnet1', - ]); - }); - test('exercise export/import', () => { // GIVEN const { stack: stack1, vpc, app } = testFixture(); @@ -962,6 +933,9 @@ describe('cluster', () => { defaultCapacity: 0, version: CLUSTER_VERSION, prune: false, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); // WHEN @@ -981,7 +955,13 @@ describe('cluster', () => { test('kubectl resources can be created in a separate stack', () => { // GIVEN const { stack, app } = testFixture(); - const cluster = new eks.Cluster(stack, 'cluster', { version: CLUSTER_VERSION, prune: false }); // cluster is under stack2 + const cluster = new eks.Cluster(stack, 'cluster', { + version: CLUSTER_VERSION, + prune: false, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); // cluster is under stack2 // WHEN resource is under stack2 const stack2 = new cdk.Stack(app, 'stack2', { env: { account: stack.account, region: stack.region } }); @@ -1000,11 +980,10 @@ describe('cluster', () => { Type: 'Custom::AWSCDK-EKS-KubernetesResource', Properties: { ServiceToken: { - 'Fn::ImportValue': 'Stack:ExportsOutputFnGetAttawscdkawseksKubectlProviderframeworkonEvent0A650005Arn27EC41A8', + 'Fn::ImportValue': 'Stack:ExportsOutputFnGetAttclusterKubectlProviderframeworkonEvent7E8470F1Arn6086AAA4', }, Manifest: '[{\"foo\":\"bar\"}]', ClusterName: { 'Fn::ImportValue': 'Stack:ExportsOutputRefcluster611F8AFFA07FC079' }, - RoleArn: { 'Fn::ImportValue': 'Stack:ExportsOutputFnGetAttclusterkubectlRoleC33D3B63Arn85DD3402' }, }, UpdateReplacePolicy: 'Delete', DeletionPolicy: 'Delete', @@ -1027,87 +1006,6 @@ describe('cluster', () => { expect(template.Outputs).toBeUndefined(); // no outputs }); - test('if masters role is defined, it should be included in the config command', () => { - // GIVEN - const { app, stack } = testFixtureNoVpc(); - - // WHEN - const mastersRole = new iam.Role(stack, 'masters', { assumedBy: new iam.AccountRootPrincipal() }); - new eks.Cluster(stack, 'Cluster', { - mastersRole, - version: CLUSTER_VERSION, - prune: false, - }); - - // THEN - const assembly = app.synth(); - const template = assembly.getStackByName(stack.stackName).template; - expect(template.Outputs).toEqual({ - ClusterConfigCommand43AAE40F: { Value: { 'Fn::Join': ['', ['aws eks update-kubeconfig --name ', { Ref: 'ClusterEB0386A7' }, ' --region us-east-1 --role-arn ', { 'Fn::GetAtt': ['masters0D04F23D', 'Arn'] }]] } }, - ClusterGetTokenCommand06AE992E: { Value: { 'Fn::Join': ['', ['aws eks get-token --cluster-name ', { Ref: 'ClusterEB0386A7' }, ' --region us-east-1 --role-arn ', { 'Fn::GetAtt': ['masters0D04F23D', 'Arn'] }]] } }, - }); - }); - - test('if `outputConfigCommand=false` will disabled the output', () => { - // GIVEN - const { app, stack } = testFixtureNoVpc(); - - // WHEN - const mastersRole = new iam.Role(stack, 'masters', { assumedBy: new iam.AccountRootPrincipal() }); - new eks.Cluster(stack, 'Cluster', { - mastersRole, - outputConfigCommand: false, - version: CLUSTER_VERSION, - prune: false, - }); - - // THEN - const assembly = app.synth(); - const template = assembly.getStackByName(stack.stackName).template; - expect(template.Outputs).toBeUndefined(); // no outputs - }); - - test('`outputClusterName` can be used to synthesize an output with the cluster name', () => { - // GIVEN - const { app, stack } = testFixtureNoVpc(); - - // WHEN - new eks.Cluster(stack, 'Cluster', { - outputConfigCommand: false, - outputClusterName: true, - version: CLUSTER_VERSION, - prune: false, - }); - - // THEN - const assembly = app.synth(); - const template = assembly.getStackByName(stack.stackName).template; - expect(template.Outputs).toEqual({ - ClusterClusterNameEB26049E: { Value: { Ref: 'ClusterEB0386A7' } }, - }); - }); - - test('`outputMastersRoleArn` can be used to synthesize an output with the arn of the masters role if defined', () => { - // GIVEN - const { app, stack } = testFixtureNoVpc(); - - // WHEN - new eks.Cluster(stack, 'Cluster', { - outputConfigCommand: false, - outputMastersRoleArn: true, - mastersRole: new iam.Role(stack, 'masters', { assumedBy: new iam.AccountRootPrincipal() }), - version: CLUSTER_VERSION, - prune: false, - }); - - // THEN - const assembly = app.synth(); - const template = assembly.getStackByName(stack.stackName).template; - expect(template.Outputs).toEqual({ - ClusterMastersRoleArnB15964B1: { Value: { 'Fn::GetAtt': ['masters0D04F23D', 'Arn'] } }, - }); - }); - describe('boostrap user-data', () => { test('rendered by default for ASGs', () => { // GIVEN @@ -1165,7 +1063,14 @@ describe('cluster', () => { test('nodes labeled an tainted accordingly', () => { // GIVEN const { app, stack } = testFixtureNoVpc(); - const cluster = new eks.Cluster(stack, 'Cluster', { defaultCapacity: 0, version: CLUSTER_VERSION, prune: false }); + const cluster = new eks.Cluster(stack, 'Cluster', { + defaultCapacity: 0, + version: CLUSTER_VERSION, + prune: false, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); // WHEN cluster.addAutoScalingGroupCapacity('MyCapcity', { @@ -1178,63 +1083,6 @@ describe('cluster', () => { const userData = template.Resources.ClusterMyCapcityLaunchConfig58583345.Properties.UserData; expect(userData).toEqual({ 'Fn::Base64': { 'Fn::Join': ['', ['#!/bin/bash\nset -o xtrace\n/etc/eks/bootstrap.sh ', { Ref: 'ClusterEB0386A7' }, ' --kubelet-extra-args "--node-labels lifecycle=Ec2Spot --register-with-taints=spotInstance=true:PreferNoSchedule" --apiserver-endpoint \'', { 'Fn::GetAtt': ['ClusterEB0386A7', 'Endpoint'] }, '\' --b64-cluster-ca \'', { 'Fn::GetAtt': ['ClusterEB0386A7', 'CertificateAuthorityData'] }, '\' --use-max-pods true\n/opt/aws/bin/cfn-signal --exit-code $? --stack Stack --resource ClusterMyCapcityASGD4CD8B97 --region us-east-1']] } }); }); - - test('interrupt handler is added', () => { - // GIVEN - const { stack } = testFixtureNoVpc(); - const cluster = new eks.Cluster(stack, 'Cluster', { defaultCapacity: 0, version: CLUSTER_VERSION, prune: false }); - - // WHEN - cluster.addAutoScalingGroupCapacity('MyCapcity', { - instanceType: new ec2.InstanceType('m3.xlarge'), - spotPrice: '0.01', - }); - - // THEN - Template.fromStack(stack).hasResourceProperties(eks.HelmChart.RESOURCE_TYPE, { - Release: 'stackclusterchartspotinterrupthandlerdec62e07', - Chart: 'aws-node-termination-handler', - Values: '{\"nodeSelector\":{\"lifecycle\":\"Ec2Spot\"}}', - Namespace: 'kube-system', - Repository: 'https://aws.github.io/eks-charts', - }); - }); - - test('interrupt handler is not added when spotInterruptHandler is false', () => { - // GIVEN - const { stack } = testFixtureNoVpc(); - const cluster = new eks.Cluster(stack, 'Cluster', { defaultCapacity: 0, version: CLUSTER_VERSION, prune: false }); - - // WHEN - cluster.addAutoScalingGroupCapacity('MyCapcity', { - instanceType: new ec2.InstanceType('m3.xlarge'), - spotPrice: '0.01', - spotInterruptHandler: false, - }); - - // THEN - expect(cluster.node.findAll().filter(c => c.node.id === 'chart-spot-interrupt-handler').length).toEqual(0); - }); - - test('its possible to add two capacities with spot instances and only one stop handler will be installed', () => { - // GIVEN - const { stack } = testFixtureNoVpc(); - const cluster = new eks.Cluster(stack, 'Cluster', { defaultCapacity: 0, version: CLUSTER_VERSION, prune: false }); - - // WHEN - cluster.addAutoScalingGroupCapacity('Spot1', { - instanceType: new ec2.InstanceType('m3.xlarge'), - spotPrice: '0.01', - }); - - cluster.addAutoScalingGroupCapacity('Spot2', { - instanceType: new ec2.InstanceType('m4.xlarge'), - spotPrice: '0.01', - }); - - // THEN - Template.fromStack(stack).resourceCountIs(eks.HelmChart.RESOURCE_TYPE, 1); - }); }); }); @@ -1476,115 +1324,6 @@ describe('cluster', () => { )).toEqual(true); }); - test('if helm charts are used, the provider role is allowed to assume the creation role', () => { - // GIVEN - const { stack } = testFixture(); - const cluster = new eks.Cluster(stack, 'MyCluster', { - clusterName: 'my-cluster-name', - version: CLUSTER_VERSION, - prune: false, - }); - - // WHEN - cluster.addHelmChart('MyChart', { - chart: 'foo', - }); - - // THEN - Template.fromStack(stack).hasCondition('MyClusterHasEcrPublicC68AA246', { - 'Fn::Equals': [ - { - Ref: 'AWS::Partition', - }, - 'aws', - ], - }); - - Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { - PolicyDocument: { - Statement: [ - { - Action: 'eks:DescribeCluster', - Effect: 'Allow', - Resource: { - 'Fn::GetAtt': ['MyCluster4C1BA579', 'Arn'], - }, - }, - { - Action: 'sts:AssumeRole', - Effect: 'Allow', - Resource: { - 'Fn::GetAtt': ['MyClusterkubectlRole29979636', 'Arn'], - }, - }, - ], - Version: '2012-10-17', - }, - PolicyName: 'MyClusterKubectlHandlerRoleDefaultPolicy7FB0AE53', - Roles: [ - { - Ref: 'MyClusterKubectlHandlerRole42303817', - }, - ], - }); - - Template.fromStack(stack).hasResourceProperties('AWS::IAM::Role', { - AssumeRolePolicyDocument: { - Statement: [ - { - Action: 'sts:AssumeRole', - Effect: 'Allow', - Principal: { Service: 'lambda.amazonaws.com' }, - }, - ], - Version: '2012-10-17', - }, - ManagedPolicyArns: [ - { - 'Fn::Join': ['', [ - 'arn:', - { Ref: 'AWS::Partition' }, - ':iam::aws:policy/service-role/AWSLambdaBasicExecutionRole', - ]], - }, - { - 'Fn::Join': ['', [ - 'arn:', - { Ref: 'AWS::Partition' }, - ':iam::aws:policy/service-role/AWSLambdaVPCAccessExecutionRole', - ]], - }, - { - 'Fn::Join': ['', [ - 'arn:', - { Ref: 'AWS::Partition' }, - ':iam::aws:policy/AmazonEC2ContainerRegistryReadOnly', - ]], - }, - { - 'Fn::If': [ - 'MyClusterHasEcrPublicC68AA246', - { - 'Fn::Join': [ - '', - [ - 'arn:', - { - Ref: 'AWS::Partition', - }, - ':iam::aws:policy/AmazonElasticContainerRegistryPublicReadOnly', - ], - ], - }, - { - Ref: 'AWS::NoValue', - }, - ], - }, - ], - }); - }); - test('coreDnsComputeType will patch the coreDNS configuration to use a "fargate" compute type and restore to "ec2" upon removal', () => { // GIVEN const stack = new cdk.Stack(); @@ -1594,6 +1333,9 @@ describe('cluster', () => { coreDnsComputeType: eks.CoreDnsComputeType.FARGATE, version: CLUSTER_VERSION, prune: false, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); // THEN @@ -1605,12 +1347,6 @@ describe('cluster', () => { ClusterName: { Ref: 'MyCluster4C1BA579', }, - RoleArn: { - 'Fn::GetAtt': [ - 'MyClusterkubectlRole29979636', - 'Arn', - ], - }, }); }); @@ -1645,7 +1381,14 @@ describe('cluster', () => { test('inf1 instances are supported', () => { // GIVEN const { stack } = testFixtureNoVpc(); - const cluster = new eks.Cluster(stack, 'Cluster', { defaultCapacity: 0, version: CLUSTER_VERSION, prune: false }); + const cluster = new eks.Cluster(stack, 'Cluster', { + defaultCapacity: 0, + version: CLUSTER_VERSION, + prune: false, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); // WHEN cluster.addAutoScalingGroupCapacity('InferenceInstances', { @@ -1663,7 +1406,14 @@ describe('cluster', () => { test('inf2 instances are supported', () => { // GIVEN const { stack } = testFixtureNoVpc(); - const cluster = new eks.Cluster(stack, 'Cluster', { defaultCapacity: 0, version: CLUSTER_VERSION, prune: false }); + const cluster = new eks.Cluster(stack, 'Cluster', { + defaultCapacity: 0, + version: CLUSTER_VERSION, + prune: false, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); // WHEN cluster.addAutoScalingGroupCapacity('InferenceInstances', { @@ -1681,7 +1431,14 @@ describe('cluster', () => { test('trn1 instances are supported', () => { // GIVEN const { stack } = testFixtureNoVpc(); - const cluster = new eks.Cluster(stack, 'Cluster', { defaultCapacity: 0, version: CLUSTER_VERSION, prune: false }); + const cluster = new eks.Cluster(stack, 'Cluster', { + defaultCapacity: 0, + version: CLUSTER_VERSION, + prune: false, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); // WHEN cluster.addAutoScalingGroupCapacity('TrainiumInstances', { @@ -1699,7 +1456,14 @@ describe('cluster', () => { test('trn1n instances are supported', () => { // GIVEN const { stack } = testFixtureNoVpc(); - const cluster = new eks.Cluster(stack, 'Cluster', { defaultCapacity: 0, version: CLUSTER_VERSION, prune: false }); + const cluster = new eks.Cluster(stack, 'Cluster', { + defaultCapacity: 0, + version: CLUSTER_VERSION, + prune: false, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); // WHEN cluster.addAutoScalingGroupCapacity('TrainiumInstances', { @@ -1718,7 +1482,14 @@ describe('cluster', () => { test('inf1 instances are supported in addNodegroupCapacity', () => { // GIVEN const { stack } = testFixtureNoVpc(); - const cluster = new eks.Cluster(stack, 'Cluster', { defaultCapacity: 0, version: CLUSTER_VERSION, prune: false }); + const cluster = new eks.Cluster(stack, 'Cluster', { + defaultCapacity: 0, + version: CLUSTER_VERSION, + prune: false, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); // WHEN cluster.addNodegroupCapacity('InferenceInstances', { @@ -1735,7 +1506,14 @@ describe('cluster', () => { test('inf2 instances are supported in addNodegroupCapacity', () => { // GIVEN const { stack } = testFixtureNoVpc(); - const cluster = new eks.Cluster(stack, 'Cluster', { defaultCapacity: 0, version: CLUSTER_VERSION, prune: false }); + const cluster = new eks.Cluster(stack, 'Cluster', { + defaultCapacity: 0, + version: CLUSTER_VERSION, + prune: false, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); // WHEN cluster.addNodegroupCapacity('InferenceInstances', { @@ -1753,7 +1531,13 @@ describe('cluster', () => { test('kubectl resources are always created after all fargate profiles', () => { // GIVEN const { stack, app } = testFixture(); - const cluster = new eks.Cluster(stack, 'Cluster', { version: CLUSTER_VERSION, prune: false }); + const cluster = new eks.Cluster(stack, 'Cluster', { + version: CLUSTER_VERSION, + prune: false, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); // WHEN cluster.addFargateProfile('profile1', { selectors: [{ namespace: 'profile1' }] }); @@ -1794,13 +1578,18 @@ describe('cluster', () => { } }); - test('kubectl provider role can assume creation role', () => { + test('kubectl provider role have right policy', () => { // GIVEN const { stack } = testFixture(); - const c1 = new eks.Cluster(stack, 'Cluster1', { version: CLUSTER_VERSION, prune: false }); + const c1 = new eks.Cluster(stack, 'Cluster1', { + version: CLUSTER_VERSION, + prune: false, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); // WHEN - // activate kubectl provider c1.addManifest('c1a', { foo: 123 }); c1.addManifest('c1b', { foo: 123 }); @@ -1819,16 +1608,6 @@ describe('cluster', () => { ], }, }, - { - Action: 'sts:AssumeRole', - Effect: 'Allow', - Resource: { - 'Fn::GetAtt': [ - 'Cluster1kubectlRole4852DA20', - 'Arn', - ], - }, - }, ], Version: '2012-10-17', }, @@ -1869,7 +1648,7 @@ describe('cluster', () => { }, { 'Fn::If': [ - 'Cluster1HasEcrPublicC08E47E3', + 'Cluster1KubectlProviderHandlerHasEcrPublic0B1C9820', { 'Fn::Join': [ '', @@ -1899,8 +1678,11 @@ describe('cluster', () => { version: CLUSTER_VERSION, prune: false, endpointAccess: eks.EndpointAccess.PRIVATE, - kubectlEnvironment: { - Foo: 'Bar', + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + environment: { + Foo: 'Bar', + }, }, }); @@ -1918,8 +1700,11 @@ describe('cluster', () => { version: CLUSTER_VERSION, prune: false, endpointAccess: eks.EndpointAccess.PRIVATE, - kubectlEnvironment: { - Foo: 'Bar', + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + environment: { + Foo: 'Bar', + }, }, }); @@ -1943,7 +1728,7 @@ describe('cluster', () => { }); }); - describe('kubectl provider passes iam role environment to kube ctl lambda', () => { + describe('kubectl provider passes iam role environment to kubectl lambda', () => { test('new cluster', () => { const { stack } = testFixture(); @@ -1956,7 +1741,10 @@ describe('cluster', () => { version: CLUSTER_VERSION, prune: false, endpointAccess: eks.EndpointAccess.PRIVATE, - kubectlLambdaRole: kubectlRole, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + role: kubectlRole, + }, }); cluster.addManifest('resource', { @@ -1972,7 +1760,7 @@ describe('cluster', () => { Template.fromStack(stack).hasResourceProperties('AWS::Lambda::Function', { Role: { - 'Fn::GetAtt': ['awscdkawseksKubectlProviderframeworkonEventServiceRoleF4FAF053', 'Arn'], + 'Fn::GetAtt': ['Cluster1KubectlProviderframeworkonEventServiceRole67819AA9', 'Arn'], }, }); }); @@ -1980,13 +1768,17 @@ describe('cluster', () => { test('imported cluster', () => { const clusterName = 'my-cluster'; const stack = new cdk.Stack(); - const kubectlLambdaRole = new iam.Role(stack, 'KubectlLambdaRole', { - assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com'), + + const handlerRole = iam.Role.fromRoleArn(stack, 'HandlerRole', 'arn:aws:iam::123456789012:role/lambda-role'); + + const kubectlProvider = KubectlProvider.fromKubectlProviderAttributes(stack, 'KubectlProvider', { + serviceToken: 'arn:aws:lambda:us-east-2:123456789012:function:my-function:1', + role: handlerRole, }); + const cluster = eks.Cluster.fromClusterAttributes(stack, 'Imported', { clusterName, - kubectlRoleArn: 'arn:aws:iam::1111111:role/iam-role-that-has-masters-access', - kubectlLambdaRole: kubectlLambdaRole, + kubectlProvider: kubectlProvider, }); const chart = 'hello-world'; @@ -1994,14 +1786,8 @@ describe('cluster', () => { chart, }); - Template.fromStack(stack).hasResourceProperties('AWS::Lambda::Function', { - Role: { - 'Fn::GetAtt': ['ImportedKubectlProviderframeworkonEventServiceRole6603B49A', 'Arn'], - }, - }); Template.fromStack(stack).hasResourceProperties(HelmChart.RESOURCE_TYPE, { ClusterName: clusterName, - RoleArn: 'arn:aws:iam::1111111:role/iam-role-that-has-masters-access', Release: 'importedcharttestchartf3acd6e5', Chart: chart, Namespace: 'default', @@ -2025,6 +1811,9 @@ describe('cluster', () => { prune: false, endpointAccess: eks.EndpointAccess.PUBLIC, vpcSubnets: [{ subnetType: ec2.SubnetType.PUBLIC }], + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); // we don't attach vpc config in case endpoint is public only, regardless of whether @@ -2039,8 +1828,10 @@ describe('cluster', () => { new eks.Cluster(stack, 'Cluster', { version: CLUSTER_VERSION, - prune: false, endpointAccess: eks.EndpointAccess.PUBLIC, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); // we don't attach vpc config in case endpoint is public only, regardless of whether @@ -2070,11 +1861,14 @@ describe('cluster', () => { version: CLUSTER_VERSION, prune: false, endpointAccess: eks.EndpointAccess.PRIVATE, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); const functions = Template.fromStack(stack).findResources('AWS::Lambda::Function'); - expect(functions.awscdkawseksKubectlProviderHandlerAABA4423.Properties.VpcConfig.SubnetIds.length).not.toEqual(0); - expect(functions.awscdkawseksKubectlProviderHandlerAABA4423.Properties.VpcConfig.SecurityGroupIds.length).not.toEqual(0); + expect(functions.ClusterKubectlProviderframeworkonEvent68E0CF80.Properties.VpcConfig.SubnetIds.length).not.toEqual(0); + expect(functions.ClusterKubectlProviderframeworkonEvent68E0CF80.Properties.VpcConfig.SecurityGroupIds.length).not.toEqual(0); }); test('private and non restricted public without private subnets', () => { @@ -2085,6 +1879,9 @@ describe('cluster', () => { prune: false, endpointAccess: eks.EndpointAccess.PUBLIC_AND_PRIVATE, vpcSubnets: [{ subnetType: ec2.SubnetType.PUBLIC }], + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); // we don't have private subnets, but we don't need them since public access @@ -2101,12 +1898,15 @@ describe('cluster', () => { version: CLUSTER_VERSION, prune: false, endpointAccess: eks.EndpointAccess.PUBLIC_AND_PRIVATE, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); // we have private subnets so we should use them. const functions = Template.fromStack(stack).findResources('AWS::Lambda::Function'); - expect(functions.awscdkawseksKubectlProviderHandlerAABA4423.Properties.VpcConfig.SubnetIds.length).not.toEqual(0); - expect(functions.awscdkawseksKubectlProviderHandlerAABA4423.Properties.VpcConfig.SecurityGroupIds.length).not.toEqual(0); + expect(functions.ClusterKubectlProviderframeworkonEvent68E0CF80.Properties.VpcConfig.SubnetIds.length).not.toEqual(0); + expect(functions.ClusterKubectlProviderframeworkonEvent68E0CF80.Properties.VpcConfig.SecurityGroupIds.length).not.toEqual(0); }); test('private and restricted public without private subnets', () => { @@ -2129,12 +1929,15 @@ describe('cluster', () => { version: CLUSTER_VERSION, prune: false, endpointAccess: eks.EndpointAccess.PUBLIC_AND_PRIVATE.onlyFrom('1.2.3.4/32'), + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); // we have private subnets so we should use them. const functions = Template.fromStack(stack).findResources('AWS::Lambda::Function'); - expect(functions.awscdkawseksKubectlProviderHandlerAABA4423.Properties.VpcConfig.SubnetIds.length).not.toEqual(0); - expect(functions.awscdkawseksKubectlProviderHandlerAABA4423.Properties.VpcConfig.SecurityGroupIds.length).not.toEqual(0); + expect(functions.ClusterKubectlProviderframeworkonEvent68E0CF80.Properties.VpcConfig.SubnetIds.length).not.toEqual(0); + expect(functions.ClusterKubectlProviderframeworkonEvent68E0CF80.Properties.VpcConfig.SecurityGroupIds.length).not.toEqual(0); }); test('private endpoint access selects only private subnets from looked up vpc', () => { @@ -2186,6 +1989,9 @@ describe('cluster', () => { version: CLUSTER_VERSION, prune: false, endpointAccess: eks.EndpointAccess.PRIVATE, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); Template.fromStack(stack).hasResourceProperties('AWS::Lambda::Function', { @@ -2250,6 +2056,9 @@ describe('cluster', () => { ec2.Subnet.fromSubnetId(stack, 'Public', 'subnet-public-in-us-east-1c'), ], }], + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); Template.fromStack(stack).hasResourceProperties('AWS::Lambda::Function', { @@ -2274,6 +2083,9 @@ describe('cluster', () => { ec2.Subnet.fromSubnetId(stack, 'Private', 'subnet-unknown'), ], }], + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); Template.fromStack(stack).hasResourceProperties('AWS::Lambda::Function', { @@ -2299,6 +2111,9 @@ describe('cluster', () => { availabilityZone: 'us-east-1a', })], }], + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); Template.fromStack(stack).hasResourceProperties('AWS::Lambda::Function', { @@ -2340,6 +2155,9 @@ describe('cluster', () => { prune: false, endpointAccess: eks.EndpointAccess.PRIVATE, vpc, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); cluster.addManifest('resource', { @@ -2372,52 +2190,6 @@ describe('cluster', () => { }); }); - test('kubectl provider limits number of subnets to 16', () => { - const { stack } = testFixture(); - - const subnetConfiguration: ec2.SubnetConfiguration[] = []; - - for (let i = 0; i < 20; i++) { - subnetConfiguration.push({ - subnetType: ec2.SubnetType.PRIVATE_WITH_EGRESS, - name: `Private${i}`, - }, - ); - } - - subnetConfiguration.push({ - subnetType: ec2.SubnetType.PUBLIC, - name: 'Public1', - }); - - const vpc2 = new ec2.Vpc(stack, 'Vpc', { - maxAzs: 2, - natGateways: 1, - subnetConfiguration, - }); - - const cluster = new eks.Cluster(stack, 'Cluster1', { - version: CLUSTER_VERSION, - prune: false, - endpointAccess: eks.EndpointAccess.PRIVATE, - vpc: vpc2, - }); - - cluster.addManifest('resource', { - kind: 'ConfigMap', - apiVersion: 'v1', - data: { - hello: 'world', - }, - metadata: { - name: 'config-map', - }, - }); - - const functions = Template.fromStack(stack).findResources('AWS::Lambda::Function'); - expect(functions.awscdkawseksKubectlProviderHandlerAABA4423.Properties.VpcConfig.SubnetIds.length).toEqual(16); - }); - test('kubectl provider considers vpc subnet selection', () => { const { stack } = testFixture(); @@ -2448,6 +2220,9 @@ describe('cluster', () => { endpointAccess: eks.EndpointAccess.PRIVATE, vpc: vpc2, vpcSubnets: [{ subnetGroupName: 'Private1' }, { subnetGroupName: 'Private2' }], + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); cluster.addManifest('resource', { @@ -2523,7 +2298,13 @@ describe('cluster', () => { test('getServiceLoadBalancerAddress', () => { const { stack } = testFixture(); - const cluster = new eks.Cluster(stack, 'Cluster1', { version: CLUSTER_VERSION, prune: false }); + const cluster = new eks.Cluster(stack, 'Cluster1', { + version: CLUSTER_VERSION, + prune: false, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); const loadBalancerAddress = cluster.getServiceLoadBalancerAddress('myservice'); @@ -2540,19 +2321,13 @@ describe('cluster', () => { expect(resources[expectedKubernetesGetId].Properties).toEqual({ ServiceToken: { 'Fn::GetAtt': [ - 'awscdkawseksKubectlProviderframeworkonEvent0A650005', + 'Cluster1KubectlProviderframeworkonEventBB398CAE', 'Arn', ], }, ClusterName: { Ref: 'Cluster192CD0375', }, - RoleArn: { - 'Fn::GetAtt': [ - 'Cluster1kubectlRole4852DA20', - 'Arn', - ], - }, ObjectType: 'service', ObjectName: 'myservice', ObjectNamespace: 'default', @@ -2575,56 +2350,20 @@ describe('cluster', () => { new eks.Cluster(stack, 'Cluster1', { version: CLUSTER_VERSION, prune: false, - kubectlLayer: layer, + kubectlProviderOptions: { + kubectlLayer: layer, + }, }); // THEN Template.fromStack(stack).hasResourceProperties('AWS::Lambda::Function', { Layers: [ - { Ref: 'awscdkawseksKubectlProviderAwsCliLayerF72FE066' }, + { Ref: 'Cluster1KubectlProviderAwsCliLayer5CF50321' }, 'arn:of:layer', ], }); }); - describe('kubectlLayer annotation', () => { - function message(version: string) { - return [ - `You created a cluster with Kubernetes Version 1.${version} without specifying the kubectlLayer property.`, - 'This may cause failures as the kubectl version provided with aws-cdk-lib is 1.20, which is only guaranteed to be compatible with Kubernetes versions 1.19-1.21.', - `Please provide a kubectlLayer from @aws-cdk/lambda-layer-kubectl-v${version}. [ack: @aws-cdk/aws-eks:clusterKubectlLayerNotSpecified]`, - ].join(' '); - } - - test('not added when version < 1.22 and no kubectl layer provided', () => { - // GIVEN - const { stack } = testFixture(); - - // WHEN - new eks.Cluster(stack, 'Cluster1', { - version: eks.KubernetesVersion.V1_21, - prune: false, - }); - - // THEN - Annotations.fromStack(stack).hasNoWarning('/Stack/Cluster1', message('21')); - }); - - test('added when version >= 1.22 and no kubectl layer provided', () => { - // GIVEN - const { stack } = testFixture(); - - // WHEN - new eks.Cluster(stack, 'Cluster1', { - version: eks.KubernetesVersion.V1_24, - prune: false, - }); - - // THEN - Annotations.fromStack(stack).hasWarning('/Stack/Cluster1', message('24')); - }); - }); - test('custom awscli layer can be provided', () => { // GIVEN const { stack } = testFixture(); @@ -2634,14 +2373,17 @@ describe('cluster', () => { new eks.Cluster(stack, 'Cluster1', { version: CLUSTER_VERSION, prune: false, - awscliLayer: layer, + kubectlProviderOptions: { + awscliLayer: layer, + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); // THEN Template.fromStack(stack).hasResourceProperties('AWS::Lambda::Function', { Layers: [ 'arn:of:layer', - { Ref: 'awscdkawseksKubectlProviderKubectlLayerA7F2FE55' }, + { Ref: 'kubectlLayer44321E08' }, ], }); }); @@ -2726,16 +2468,11 @@ describe('cluster', () => { const { stack, vpc } = testFixture(); // WHEN const mastersRole = new iam.Role(stack, 'role', { assumedBy: new iam.AccountRootPrincipal() }); - const cluster = new eks.Cluster(stack, 'Cluster', { + new eks.Cluster(stack, 'Cluster', { vpc, mastersRole, version: CLUSTER_VERSION, }); - cluster.grantAccess('mastersAccess', mastersRole.roleArn, [ - eks.AccessPolicy.fromAccessPolicyName('AmazonEKSClusterAdminPolicy', { - accessScopeType: eks.AccessScopeType.CLUSTER, - }), - ]); // THEN Template.fromStack(stack).hasResourceProperties('AWS::EKS::AccessEntry', { AccessPolicies: [ diff --git a/packages/@aws-cdk/aws-eks-v2-alpha/test/fargate.test.ts b/packages/@aws-cdk/aws-eks-v2-alpha/test/fargate.test.ts index 22d416761834d..2ad70abe2e09b 100644 --- a/packages/@aws-cdk/aws-eks-v2-alpha/test/fargate.test.ts +++ b/packages/@aws-cdk/aws-eks-v2-alpha/test/fargate.test.ts @@ -3,6 +3,7 @@ import * as ec2 from 'aws-cdk-lib/aws-ec2'; import * as iam from 'aws-cdk-lib/aws-iam'; import * as kms from 'aws-cdk-lib/aws-kms'; import { Stack, Tags } from 'aws-cdk-lib/core'; +import { KubectlV31Layer } from '@aws-cdk/lambda-layer-kubectl-v31'; import * as eks from '../lib'; const CLUSTER_VERSION = eks.KubernetesVersion.V1_25; @@ -11,7 +12,12 @@ describe('fargate', () => { test('can be added to a cluster', () => { // GIVEN const stack = new Stack(); - const cluster = new eks.Cluster(stack, 'MyCluster', { version: CLUSTER_VERSION }); + const cluster = new eks.Cluster(stack, 'MyCluster', { + version: CLUSTER_VERSION, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); // WHEN cluster.addFargateProfile('MyProfile', { @@ -29,7 +35,12 @@ describe('fargate', () => { test('supports specifying a profile name', () => { // GIVEN const stack = new Stack(); - const cluster = new eks.Cluster(stack, 'MyCluster', { version: CLUSTER_VERSION }); + const cluster = new eks.Cluster(stack, 'MyCluster', { + version: CLUSTER_VERSION, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); // WHEN cluster.addFargateProfile('MyProfile', { @@ -49,7 +60,12 @@ describe('fargate', () => { test('supports custom execution role', () => { // GIVEN const stack = new Stack(); - const cluster = new eks.Cluster(stack, 'MyCluster', { version: CLUSTER_VERSION }); + const cluster = new eks.Cluster(stack, 'MyCluster', { + version: CLUSTER_VERSION, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); const myRole = new iam.Role(stack, 'MyRole', { assumedBy: new iam.AnyPrincipal() }); // WHEN @@ -69,7 +85,12 @@ describe('fargate', () => { test('supports tags through aspects', () => { // GIVEN const stack = new Stack(); - const cluster = new eks.Cluster(stack, 'MyCluster', { version: CLUSTER_VERSION }); + const cluster = new eks.Cluster(stack, 'MyCluster', { + version: CLUSTER_VERSION, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); // WHEN cluster.addFargateProfile('MyProfile', { @@ -100,7 +121,12 @@ describe('fargate', () => { test('supports specifying vpc', () => { // GIVEN const stack = new Stack(); - const cluster = new eks.Cluster(stack, 'MyCluster', { version: CLUSTER_VERSION }); + const cluster = new eks.Cluster(stack, 'MyCluster', { + version: CLUSTER_VERSION, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); const vpc = ec2.Vpc.fromVpcAttributes(stack, 'MyVpc', { vpcId: 'vpc123', availabilityZones: ['az1'], @@ -125,7 +151,12 @@ describe('fargate', () => { test('fails if there are no selectors or if there are more than 5', () => { // GIVEN const stack = new Stack(); - const cluster = new eks.Cluster(stack, 'MyCluster', { version: CLUSTER_VERSION }); + const cluster = new eks.Cluster(stack, 'MyCluster', { + version: CLUSTER_VERSION, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); // THEN expect(() => cluster.addFargateProfile('MyProfile', { selectors: [] })); @@ -146,7 +177,12 @@ describe('fargate', () => { const stack = new Stack(); // WHEN - new eks.FargateCluster(stack, 'FargateCluster', { version: CLUSTER_VERSION }); + new eks.FargateCluster(stack, 'FargateCluster', { + version: CLUSTER_VERSION, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); // THEN Template.fromStack(stack).hasResourceProperties('Custom::AWSCDK-EKS-KubernetesPatch', { @@ -186,6 +222,9 @@ describe('fargate', () => { fargateProfileName: 'my-app', selectors: [{ namespace: 'foo' }, { namespace: 'bar' }], }, version: CLUSTER_VERSION, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); // THEN @@ -217,6 +256,9 @@ describe('fargate', () => { selectors: [{ namespace: 'foo' }, { namespace: 'bar' }], }, version: CLUSTER_VERSION, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); // THEN @@ -240,7 +282,12 @@ describe('fargate', () => { test('multiple Fargate profiles added to a cluster are processed sequentially', () => { // GIVEN const stack = new Stack(); - const cluster = new eks.Cluster(stack, 'MyCluster', { version: CLUSTER_VERSION }); + const cluster = new eks.Cluster(stack, 'MyCluster', { + version: CLUSTER_VERSION, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); // WHEN cluster.addFargateProfile('MyProfile1', { @@ -269,32 +316,6 @@ describe('fargate', () => { }); }); - test('allow cluster creation role to iam:PassRole on fargate pod execution role', () => { - // GIVEN - const stack = new Stack(); - - // WHEN - new eks.FargateCluster(stack, 'FargateCluster', { version: CLUSTER_VERSION }); - - // THEN - Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { - PolicyDocument: { - Statement: [ - { - Action: 'iam:PassRole', - Effect: 'Allow', - Resource: { - 'Fn::GetAtt': [ - 'FargateClusterfargateprofiledefaultPodExecutionRole66F2610E', - 'Arn', - ], - }, - }, - ], - }, - }); - }); - test('supports passing secretsEncryptionKey with FargateCluster', () => { // GIVEN const stack = new Stack(); @@ -304,6 +325,9 @@ describe('fargate', () => { new eks.FargateCluster(stack, 'FargateCluster', { version: CLUSTER_VERSION, secretsEncryptionKey: new kms.Key(stack, 'Key'), + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); // THEN @@ -335,6 +359,9 @@ describe('fargate', () => { eks.ClusterLoggingTypes.AUTHENTICATOR, eks.ClusterLoggingTypes.SCHEDULER, ], + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); // THEN diff --git a/packages/@aws-cdk/aws-eks-v2-alpha/test/k8s-manifest.test.ts b/packages/@aws-cdk/aws-eks-v2-alpha/test/k8s-manifest.test.ts index 1c69bcdfd7a7c..7f5f7a66c7e47 100644 --- a/packages/@aws-cdk/aws-eks-v2-alpha/test/k8s-manifest.test.ts +++ b/packages/@aws-cdk/aws-eks-v2-alpha/test/k8s-manifest.test.ts @@ -1,7 +1,9 @@ import { testFixtureNoVpc, testFixtureCluster } from './util'; import { Template } from 'aws-cdk-lib/assertions'; import { CfnResource, Stack } from 'aws-cdk-lib/core'; -import { Cluster, KubernetesManifest, KubernetesVersion, HelmChart } from '../lib'; +import * as iam from 'aws-cdk-lib/aws-iam'; +import { KubectlV31Layer } from '@aws-cdk/lambda-layer-kubectl-v31'; +import { Cluster, KubernetesManifest, KubernetesVersion, HelmChart, KubectlProvider } from '../lib'; /* eslint-disable max-len */ @@ -11,7 +13,12 @@ describe('k8s manifest', () => { test('basic usage', () => { // GIVEN const { stack } = testFixtureNoVpc(); - const cluster = new Cluster(stack, 'cluster', { version: CLUSTER_VERSION }); + const cluster = new Cluster(stack, 'cluster', { + version: CLUSTER_VERSION, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); const manifest = [ { @@ -79,9 +86,14 @@ describe('k8s manifest', () => { test('can be added to an imported cluster with minimal config', () => { // GIVEN const stack = new Stack(); + const handlerRole = iam.Role.fromRoleArn(stack, 'HandlerRole', 'arn:aws:iam::123456789012:role/lambda-role'); + const kubectlProvider = KubectlProvider.fromKubectlProviderAttributes(stack, 'KubectlProvider', { + serviceToken: 'arn:aws:lambda:us-east-2:123456789012:function:my-function:1', + role: handlerRole, + }); const cluster = Cluster.fromClusterAttributes(stack, 'MyCluster', { clusterName: 'my-cluster-name', - kubectlRoleArn: 'arn:aws:iam::1111111:role/iam-role-that-has-masters-access', + kubectlProvider: kubectlProvider, }); // WHEN @@ -92,12 +104,10 @@ describe('k8s manifest', () => { Template.fromStack(stack).hasResourceProperties(KubernetesManifest.RESOURCE_TYPE, { Manifest: '[{"bar":2334}]', ClusterName: 'my-cluster-name', - RoleArn: 'arn:aws:iam::1111111:role/iam-role-that-has-masters-access', }); Template.fromStack(stack).hasResourceProperties(HelmChart.RESOURCE_TYPE, { ClusterName: 'my-cluster-name', - RoleArn: 'arn:aws:iam::1111111:role/iam-role-that-has-masters-access', Release: 'myclustercharthelm78d2c26a', Chart: 'hello-world', Namespace: 'default', @@ -107,9 +117,14 @@ describe('k8s manifest', () => { test('default child is a CfnResource', () => { const stack = new Stack(); + const handlerRole = iam.Role.fromRoleArn(stack, 'HandlerRole', 'arn:aws:iam::123456789012:role/lambda-role'); + const kubectlProvider = KubectlProvider.fromKubectlProviderAttributes(stack, 'KubectlProvider', { + serviceToken: 'arn:aws:lambda:us-east-2:123456789012:function:my-function:1', + role: handlerRole, + }); const cluster = Cluster.fromClusterAttributes(stack, 'MyCluster', { clusterName: 'my-cluster-name', - kubectlRoleArn: 'arn:aws:iam::1111111:role/iam-role-that-has-masters-access', + kubectlProvider: kubectlProvider, }); const manifest = cluster.addManifest('foo', { bar: 2334 }); @@ -123,7 +138,10 @@ describe('k8s manifest', () => { // prune is enabled by default const cluster = new Cluster(stack, 'Cluster', { - version: KubernetesVersion.V1_16, + version: KubernetesVersion.V1_31, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); expect(cluster.prune).toEqual(true); @@ -311,8 +329,11 @@ describe('k8s manifest', () => { // GIVEN const { stack } = testFixtureNoVpc(); const cluster = new Cluster(stack, 'Cluster', { - version: KubernetesVersion.V1_16, + version: KubernetesVersion.V1_31, prune: false, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, }); // WHEN diff --git a/packages/@aws-cdk/aws-eks-v2-alpha/test/k8s-object-value.test.ts b/packages/@aws-cdk/aws-eks-v2-alpha/test/k8s-object-value.test.ts index af23712d33d1c..920d4bf15152f 100644 --- a/packages/@aws-cdk/aws-eks-v2-alpha/test/k8s-object-value.test.ts +++ b/packages/@aws-cdk/aws-eks-v2-alpha/test/k8s-object-value.test.ts @@ -1,4 +1,5 @@ import { App, Stack, Duration } from 'aws-cdk-lib/core'; +import { KubectlV31Layer } from '@aws-cdk/lambda-layer-kubectl-v31'; import * as eks from '../lib'; import { KubernetesObjectValue } from '../lib/k8s-object-value'; @@ -8,7 +9,12 @@ describe('k8s object value', () => { test('creates the correct custom resource with explicit values for all properties', () => { // GIVEN const stack = new Stack(); - const cluster = new eks.Cluster(stack, 'MyCluster', { version: CLUSTER_VERSION }); + const cluster = new eks.Cluster(stack, 'MyCluster', { + version: CLUSTER_VERSION, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); // WHEN const attribute = new KubernetesObjectValue(stack, 'MyAttribute', { @@ -29,12 +35,11 @@ describe('k8s object value', () => { Properties: { ServiceToken: { 'Fn::GetAtt': [ - 'awscdkawseksKubectlProviderframeworkonEvent0A650005', + 'MyClusterKubectlProviderframeworkonEvent7B04B277', 'Arn', ], }, ClusterName: { Ref: 'MyCluster4C1BA579' }, - RoleArn: { 'Fn::GetAtt': ['MyClusterkubectlRole29979636', 'Arn'] }, ObjectType: 'deployment', ObjectName: 'mydeployment', ObjectNamespace: 'mynamespace', @@ -52,7 +57,12 @@ describe('k8s object value', () => { test('creates the correct custom resource with defaults', () => { // GIVEN const stack = new Stack(); - const cluster = new eks.Cluster(stack, 'MyCluster', { version: CLUSTER_VERSION }); + const cluster = new eks.Cluster(stack, 'MyCluster', { + version: CLUSTER_VERSION, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); // WHEN const attribute = new KubernetesObjectValue(stack, 'MyAttribute', { @@ -70,12 +80,11 @@ describe('k8s object value', () => { Properties: { ServiceToken: { 'Fn::GetAtt': [ - 'awscdkawseksKubectlProviderframeworkonEvent0A650005', + 'MyClusterKubectlProviderframeworkonEvent7B04B277', 'Arn', ], }, ClusterName: { Ref: 'MyCluster4C1BA579' }, - RoleArn: { 'Fn::GetAtt': ['MyClusterkubectlRole29979636', 'Arn'] }, ObjectType: 'deployment', ObjectName: 'mydeployment', ObjectNamespace: 'default', diff --git a/packages/@aws-cdk/aws-eks-v2-alpha/test/k8s-patch.test.ts b/packages/@aws-cdk/aws-eks-v2-alpha/test/k8s-patch.test.ts index c76e5dcfdd9ee..8b44b71abb39b 100644 --- a/packages/@aws-cdk/aws-eks-v2-alpha/test/k8s-patch.test.ts +++ b/packages/@aws-cdk/aws-eks-v2-alpha/test/k8s-patch.test.ts @@ -1,5 +1,6 @@ import { Template } from 'aws-cdk-lib/assertions'; import { Names, Stack } from 'aws-cdk-lib/core'; +import { KubectlV31Layer } from '@aws-cdk/lambda-layer-kubectl-v31'; import * as eks from '../lib'; import { KubernetesPatch, PatchType } from '../lib/k8s-patch'; @@ -9,7 +10,12 @@ describe('k8s patch', () => { test('applies a patch to k8s', () => { // GIVEN const stack = new Stack(); - const cluster = new eks.Cluster(stack, 'MyCluster', { version: CLUSTER_VERSION }); + const cluster = new eks.Cluster(stack, 'MyCluster', { + version: CLUSTER_VERSION, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); // WHEN const patch = new KubernetesPatch(stack, 'MyPatch', { @@ -23,7 +29,7 @@ describe('k8s patch', () => { Template.fromStack(stack).hasResourceProperties('Custom::AWSCDK-EKS-KubernetesPatch', { ServiceToken: { 'Fn::GetAtt': [ - 'awscdkawseksKubectlProviderframeworkonEvent0A650005', + 'MyClusterKubectlProviderframeworkonEvent7B04B277', 'Arn', ], }, @@ -34,12 +40,6 @@ describe('k8s patch', () => { ClusterName: { Ref: 'MyCluster4C1BA579', }, - RoleArn: { - 'Fn::GetAtt': [ - 'MyClusterkubectlRole29979636', - 'Arn', - ], - }, }); // also make sure a dependency on the barrier is added to the patch construct. @@ -49,7 +49,12 @@ describe('k8s patch', () => { test('defaults to "strategic" patch type if no patchType is specified', () => { // GIVEN const stack = new Stack(); - const cluster = new eks.Cluster(stack, 'MyCluster', { version: CLUSTER_VERSION }); + const cluster = new eks.Cluster(stack, 'MyCluster', { + version: CLUSTER_VERSION, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); // WHEN new KubernetesPatch(stack, 'MyPatch', { @@ -66,7 +71,12 @@ describe('k8s patch', () => { test('uses specified to patch type if specified', () => { // GIVEN const stack = new Stack(); - const cluster = new eks.Cluster(stack, 'MyCluster', { version: CLUSTER_VERSION }); + const cluster = new eks.Cluster(stack, 'MyCluster', { + version: CLUSTER_VERSION, + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, + }); // WHEN new KubernetesPatch(stack, 'jsonPatch', { diff --git a/packages/@aws-cdk/aws-eks-v2-alpha/test/kubectl-provider.test.ts b/packages/@aws-cdk/aws-eks-v2-alpha/test/kubectl-provider.test.ts deleted file mode 100644 index 3f426c21c70b1..0000000000000 --- a/packages/@aws-cdk/aws-eks-v2-alpha/test/kubectl-provider.test.ts +++ /dev/null @@ -1,21 +0,0 @@ -import { testFixtureCluster } from './util'; -import { Template } from 'aws-cdk-lib/assertions'; -import { Stack } from 'aws-cdk-lib/core'; -import * as eks from '../lib'; - -describe('KubectlProvider', () => { - test('creates AWS::Lambda::Function onEvent handler with correct AWS_STS_REGIONAL_ENDPOINTS environment variable', () => { - const { stack } = testFixtureCluster(); - // find the KubectlProvider - const provider = stack.node.tryFindChild('@aws-cdk--aws-eks.KubectlProvider') as eks.KubectlProvider; - const providerStackTemplate = Template.fromStack(Stack.of(provider)); - providerStackTemplate.hasResourceProperties('AWS::Lambda::Function', { - Description: 'onEvent handler for EKS kubectl resource provider', - Environment: { - Variables: { - AWS_STS_REGIONAL_ENDPOINTS: 'regional', - }, - }, - }); - }); -}); diff --git a/packages/@aws-cdk/aws-eks-v2-alpha/test/service-account.test.ts b/packages/@aws-cdk/aws-eks-v2-alpha/test/service-account.test.ts index 3ddbdabd9ea12..fb900f4e2d00b 100644 --- a/packages/@aws-cdk/aws-eks-v2-alpha/test/service-account.test.ts +++ b/packages/@aws-cdk/aws-eks-v2-alpha/test/service-account.test.ts @@ -18,7 +18,7 @@ describe('service account', () => { Template.fromStack(stack).hasResourceProperties(eks.KubernetesManifest.RESOURCE_TYPE, { ServiceToken: { 'Fn::GetAtt': [ - 'awscdkawseksKubectlProviderframeworkonEvent0A650005', + 'ClusterKubectlProviderframeworkonEvent68E0CF80', 'Arn', ], }, @@ -83,7 +83,7 @@ describe('service account', () => { Template.fromStack(stack).hasResourceProperties(eks.KubernetesManifest.RESOURCE_TYPE, { ServiceToken: { 'Fn::GetAtt': [ - 'awscdkawseksKubectlProviderframeworkonEvent0A650005', + 'ClusterKubectlProviderframeworkonEvent68E0CF80', 'Arn', ], }, @@ -141,7 +141,7 @@ describe('service account', () => { Template.fromStack(stack).hasResourceProperties(eks.KubernetesManifest.RESOURCE_TYPE, { ServiceToken: { 'Fn::GetAtt': [ - 'awscdkawseksKubectlProviderframeworkonEvent0A650005', + 'ClusterKubectlProviderframeworkonEvent68E0CF80', 'Arn', ], }, @@ -179,21 +179,23 @@ describe('service account', () => { const oidcProvider = new iam.OpenIdConnectProvider(stack, 'ClusterOpenIdConnectProvider', { url: 'oidc_issuer', }); + const handlerRole = iam.Role.fromRoleArn(stack, 'HandlerRole', 'arn:aws:iam::123456789012:role/lambda-role'); + + const kubectlProvider = eks.KubectlProvider.fromKubectlProviderAttributes(stack, 'KubectlProvider', { + serviceToken: 'arn:aws:lambda:us-east-2:123456789012:function:myfunc', + role: handlerRole, + }); + const cluster = eks.Cluster.fromClusterAttributes(stack, 'Cluster', { clusterName: 'Cluster', openIdConnectProvider: oidcProvider, - kubectlRoleArn: 'arn:aws:iam::123456:role/service-role/k8sservicerole', + kubectlProvider: kubectlProvider, }); cluster.addServiceAccount('MyServiceAccount'); Template.fromStack(stack).hasResourceProperties(eks.KubernetesManifest.RESOURCE_TYPE, { - ServiceToken: { - 'Fn::GetAtt': [ - 'StackClusterF0EB02FAKubectlProviderframeworkonEvent0A3AB271', - 'Arn', - ], - }, + ServiceToken: 'arn:aws:lambda:us-east-2:123456789012:function:myfunc', PruneLabel: 'aws.cdk.eks/prune-c8d8e1722a4f3ed332f8ac74cb3d962f01fbb62291', Manifest: { 'Fn::Join': [ diff --git a/packages/@aws-cdk/aws-eks-v2-alpha/test/util.ts b/packages/@aws-cdk/aws-eks-v2-alpha/test/util.ts index 158dc12f99250..f794118d3fa14 100644 --- a/packages/@aws-cdk/aws-eks-v2-alpha/test/util.ts +++ b/packages/@aws-cdk/aws-eks-v2-alpha/test/util.ts @@ -1,5 +1,6 @@ import * as ec2 from 'aws-cdk-lib/aws-ec2'; import { App, Stack } from 'aws-cdk-lib/core'; +import { KubectlV31Layer } from '@aws-cdk/lambda-layer-kubectl-v31'; import { Cluster, FargateCluster, ClusterProps, KubernetesVersion } from '../lib'; const CLUSTER_VERSION = KubernetesVersion.V1_25; @@ -39,6 +40,9 @@ export function testFixtureCluster(props: Omit = {}, re const clusterProps = { version: CLUSTER_VERSION, prune: false, // mainly because this feature was added later and we wanted to avoid having to update all test expectations.... + kubectlProviderOptions: { + kubectlLayer: new KubectlV31Layer(stack, 'kubectlLayer'), + }, ...props, }; const cluster = options?.isFargate ? new FargateCluster(stack, 'Cluster', clusterProps) : new Cluster(stack, 'Cluster', clusterProps); diff --git a/yarn.lock b/yarn.lock index 226a3b027c280..00cebe561b187 100644 --- a/yarn.lock +++ b/yarn.lock @@ -18515,7 +18515,7 @@ semver@^6.0.0, semver@^6.1.1, semver@^6.3.0, semver@^6.3.1: resolved "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== -semver@^7.0.0, semver@^7.1.1, semver@^7.3.2, semver@^7.3.4, semver@^7.3.5, semver@^7.3.7, semver@^7.3.8, semver@^7.5.1, semver@^7.5.3, semver@^7.5.4, semver@^7.6.0, semver@^7.6.3: +semver@^7.0.0, semver@^7.1.1, semver@^7.3.2, semver@^7.3.4, semver@^7.3.5, semver@^7.3.7, semver@^7.3.8, semver@^7.5.3, semver@^7.5.4, semver@^7.6.0, semver@^7.6.3: version "7.6.3" resolved "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz#980f7b5550bc175fb4dc09403085627f9eb33143" integrity sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==