Skip to content

Commit

Permalink
AWS SDK v2 migration (awslabs#2339)
Browse files Browse the repository at this point in the history
Signed-off-by: dependabot[bot] <[email protected]>
Co-authored-by: Trianz-Akshay <[email protected]>
Co-authored-by: Jithendar Trianz <[email protected]>
Co-authored-by: VenkatasivareddyTR <[email protected]>
Co-authored-by: ejeffrli <[email protected]>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: Jeffrey Lin <[email protected]>
Co-authored-by: Mario Rial <[email protected]>
Co-authored-by: AbdulRehman <[email protected]>
  • Loading branch information
10 people authored Oct 22, 2024
1 parent fb376ca commit 8fe875f
Show file tree
Hide file tree
Showing 422 changed files with 6,258 additions and 6,072 deletions.
9 changes: 9 additions & 0 deletions athena-aws-cmdb/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
FROM public.ecr.aws/lambda/java:11

# Copy function code and runtime dependencies from Maven layout
COPY target/athena-aws-cmdb-2022.47.1.jar ${LAMBDA_TASK_ROOT}
# Unpack the jar
RUN jar xf athena-aws-cmdb-2022.47.1.jar

# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile)
CMD [ "com.amazonaws.athena.connectors.aws.cmdb.AwsCmdbCompositeHandler" ]
5 changes: 2 additions & 3 deletions athena-aws-cmdb/athena-aws-cmdb.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -52,10 +52,9 @@ Resources:
spill_bucket: !Ref SpillBucket
spill_prefix: !Ref SpillPrefix
FunctionName: !Ref AthenaCatalogName
Handler: "com.amazonaws.athena.connectors.aws.cmdb.AwsCmdbCompositeHandler"
CodeUri: "./target/athena-aws-cmdb-2022.47.1.jar"
PackageType: "Image"
ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-aws-cmdb:2022.47.1'
Description: "Enables Amazon Athena to communicate with various AWS Services, making your resource inventories accessible via SQL."
Runtime: java11
Timeout: !Ref LambdaTimeout
MemorySize: !Ref LambdaMemory
PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ]
Expand Down
24 changes: 15 additions & 9 deletions athena-aws-cmdb/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@
<classifier>withdep</classifier>
</dependency>
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk-ec2</artifactId>
<version>${aws-sdk.version}</version>
<groupId>software.amazon.awssdk</groupId>
<artifactId>ec2</artifactId>
<version>${aws-sdk-v2.version}</version>
<exclusions>
<!-- replaced with jcl-over-slf4j -->
<exclusion>
Expand All @@ -28,14 +28,20 @@
</exclusions>
</dependency>
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk-emr</artifactId>
<version>${aws-sdk.version}</version>
<groupId>software.amazon.awssdk</groupId>
<artifactId>emr</artifactId>
<version>${aws-sdk-v2.version}</version>
</dependency>
<dependency>
<groupId>com.amazonaws</groupId>
<artifactId>aws-java-sdk-rds</artifactId>
<version>${aws-sdk.version}</version>
<groupId>software.amazon.awssdk</groupId>
<artifactId>rds</artifactId>
<version>${aws-sdk-v2.version}</version>
<exclusions>
<exclusion>
<groupId>software.amazon.awssdk</groupId>
<artifactId>netty-nio-client</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,9 @@
import com.amazonaws.athena.connector.lambda.security.EncryptionKey;
import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory;
import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider;
import com.amazonaws.services.athena.AmazonAthena;
import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import org.apache.arrow.util.VisibleForTesting;
import software.amazon.awssdk.services.athena.AthenaClient;
import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;

import java.util.List;
import java.util.Map;
Expand Down Expand Up @@ -77,8 +77,8 @@ public AwsCmdbMetadataHandler(java.util.Map<String, String> configOptions)
protected AwsCmdbMetadataHandler(
TableProviderFactory tableProviderFactory,
EncryptionKeyFactory keyFactory,
AWSSecretsManager secretsManager,
AmazonAthena athena,
SecretsManagerClient secretsManager,
AthenaClient athena,
String spillBucket,
String spillPrefix,
java.util.Map<String, String> configOptions)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,10 @@
import com.amazonaws.athena.connector.lambda.handlers.RecordHandler;
import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest;
import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider;
import com.amazonaws.services.athena.AmazonAthena;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import org.apache.arrow.util.VisibleForTesting;
import software.amazon.awssdk.services.athena.AthenaClient;
import software.amazon.awssdk.services.s3.S3Client;
import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;

import java.util.Map;

Expand Down Expand Up @@ -56,7 +56,7 @@ public AwsCmdbRecordHandler(java.util.Map<String, String> configOptions)
}

@VisibleForTesting
protected AwsCmdbRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, TableProviderFactory tableProviderFactory, java.util.Map<String, String> configOptions)
protected AwsCmdbRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, TableProviderFactory tableProviderFactory, java.util.Map<String, String> configOptions)
{
super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions);
tableProviders = tableProviderFactory.getTableProviders();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,15 +32,11 @@
import com.amazonaws.athena.connectors.aws.cmdb.tables.ec2.VpcTableProvider;
import com.amazonaws.athena.connectors.aws.cmdb.tables.s3.S3BucketsTableProvider;
import com.amazonaws.athena.connectors.aws.cmdb.tables.s3.S3ObjectsTableProvider;
import com.amazonaws.services.ec2.AmazonEC2;
import com.amazonaws.services.ec2.AmazonEC2ClientBuilder;
import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduce;
import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduceClientBuilder;
import com.amazonaws.services.rds.AmazonRDS;
import com.amazonaws.services.rds.AmazonRDSClientBuilder;
import com.amazonaws.services.s3.AmazonS3;
import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import org.apache.arrow.util.VisibleForTesting;
import software.amazon.awssdk.services.ec2.Ec2Client;
import software.amazon.awssdk.services.emr.EmrClient;
import software.amazon.awssdk.services.rds.RdsClient;
import software.amazon.awssdk.services.s3.S3Client;

import java.util.ArrayList;
import java.util.HashMap;
Expand All @@ -59,15 +55,15 @@ public class TableProviderFactory
public TableProviderFactory(java.util.Map<String, String> configOptions)
{
this(
AmazonEC2ClientBuilder.standard().build(),
AmazonElasticMapReduceClientBuilder.standard().build(),
AmazonRDSClientBuilder.standard().build(),
AmazonS3ClientBuilder.standard().build(),
Ec2Client.create(),
EmrClient.create(),
RdsClient.create(),
S3Client.create(),
configOptions);
}

@VisibleForTesting
protected TableProviderFactory(AmazonEC2 ec2, AmazonElasticMapReduce emr, AmazonRDS rds, AmazonS3 amazonS3, java.util.Map<String, String> configOptions)
protected TableProviderFactory(Ec2Client ec2, EmrClient emr, RdsClient rds, S3Client amazonS3, java.util.Map<String, String> configOptions)
{
addProvider(new Ec2TableProvider(ec2));
addProvider(new EbsTableProvider(ec2));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,15 +29,15 @@
import com.amazonaws.athena.connector.lambda.metadata.GetTableRequest;
import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse;
import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest;
import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduce;
import com.amazonaws.services.elasticmapreduce.model.Cluster;
import com.amazonaws.services.elasticmapreduce.model.ClusterSummary;
import com.amazonaws.services.elasticmapreduce.model.DescribeClusterRequest;
import com.amazonaws.services.elasticmapreduce.model.DescribeClusterResult;
import com.amazonaws.services.elasticmapreduce.model.ListClustersRequest;
import com.amazonaws.services.elasticmapreduce.model.ListClustersResult;
import org.apache.arrow.vector.types.Types;
import org.apache.arrow.vector.types.pojo.Schema;
import software.amazon.awssdk.services.emr.EmrClient;
import software.amazon.awssdk.services.emr.model.Cluster;
import software.amazon.awssdk.services.emr.model.ClusterSummary;
import software.amazon.awssdk.services.emr.model.DescribeClusterRequest;
import software.amazon.awssdk.services.emr.model.DescribeClusterResponse;
import software.amazon.awssdk.services.emr.model.ListClustersRequest;
import software.amazon.awssdk.services.emr.model.ListClustersResponse;

import java.util.List;
import java.util.stream.Collectors;
Expand All @@ -49,9 +49,9 @@ public class EmrClusterTableProvider
implements TableProvider
{
private static final Schema SCHEMA;
private AmazonElasticMapReduce emr;
private EmrClient emr;

public EmrClusterTableProvider(AmazonElasticMapReduce emr)
public EmrClusterTableProvider(EmrClient emr)
{
this.emr = emr;
}
Expand Down Expand Up @@ -93,23 +93,23 @@ public GetTableResponse getTable(BlockAllocator blockAllocator, GetTableRequest
public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsRequest, QueryStatusChecker queryStatusChecker)
{
boolean done = false;
ListClustersRequest request = new ListClustersRequest();
ListClustersRequest request = ListClustersRequest.builder().build();

while (!done) {
ListClustersResult response = emr.listClusters(request);
ListClustersResponse response = emr.listClusters(request);

for (ClusterSummary next : response.getClusters()) {
for (ClusterSummary next : response.clusters()) {
Cluster cluster = null;
if (!next.getStatus().getState().toLowerCase().contains("terminated")) {
DescribeClusterResult clusterResponse = emr.describeCluster(new DescribeClusterRequest().withClusterId(next.getId()));
cluster = clusterResponse.getCluster();
if (!next.status().stateAsString().toLowerCase().contains("terminated")) {
DescribeClusterResponse clusterResponse = emr.describeCluster(DescribeClusterRequest.builder().clusterId(next.id()).build());
cluster = clusterResponse.cluster();
}
clusterToRow(next, cluster, spiller);
}

request.setMarker(response.getMarker());
request = request.toBuilder().marker(response.marker()).build();

if (response.getMarker() == null || !queryStatusChecker.isQueryRunning()) {
if (response.marker() == null || !queryStatusChecker.isQueryRunning()) {
done = true;
}
}
Expand All @@ -131,31 +131,31 @@ private void clusterToRow(ClusterSummary clusterSummary,
spiller.writeRows((Block block, int row) -> {
boolean matched = true;

matched &= block.offerValue("id", row, clusterSummary.getId());
matched &= block.offerValue("name", row, clusterSummary.getName());
matched &= block.offerValue("instance_hours", row, clusterSummary.getNormalizedInstanceHours());
matched &= block.offerValue("state", row, clusterSummary.getStatus().getState());
matched &= block.offerValue("state_code", row, clusterSummary.getStatus().getStateChangeReason().getCode());
matched &= block.offerValue("state_msg", row, clusterSummary.getStatus().getStateChangeReason().getMessage());
matched &= block.offerValue("id", row, clusterSummary.id());
matched &= block.offerValue("name", row, clusterSummary.name());
matched &= block.offerValue("instance_hours", row, clusterSummary.normalizedInstanceHours());
matched &= block.offerValue("state", row, clusterSummary.status().stateAsString());
matched &= block.offerValue("state_code", row, clusterSummary.status().stateChangeReason().codeAsString());
matched &= block.offerValue("state_msg", row, clusterSummary.status().stateChangeReason().message());

if (cluster != null) {
matched &= block.offerValue("autoscaling_role", row, cluster.getAutoScalingRole());
matched &= block.offerValue("custom_ami", row, cluster.getCustomAmiId());
matched &= block.offerValue("instance_collection_type", row, cluster.getInstanceCollectionType());
matched &= block.offerValue("log_uri", row, cluster.getLogUri());
matched &= block.offerValue("master_public_dns", row, cluster.getMasterPublicDnsName());
matched &= block.offerValue("release_label", row, cluster.getReleaseLabel());
matched &= block.offerValue("running_ami", row, cluster.getRunningAmiVersion());
matched &= block.offerValue("scale_down_behavior", row, cluster.getScaleDownBehavior());
matched &= block.offerValue("service_role", row, cluster.getServiceRole());
matched &= block.offerValue("service_role", row, cluster.getServiceRole());

List<String> applications = cluster.getApplications().stream()
.map(next -> next.getName() + ":" + next.getVersion()).collect(Collectors.toList());
matched &= block.offerValue("autoscaling_role", row, cluster.autoScalingRole());
matched &= block.offerValue("custom_ami", row, cluster.customAmiId());
matched &= block.offerValue("instance_collection_type", row, cluster.instanceCollectionTypeAsString());
matched &= block.offerValue("log_uri", row, cluster.logUri());
matched &= block.offerValue("master_public_dns", row, cluster.masterPublicDnsName());
matched &= block.offerValue("release_label", row, cluster.releaseLabel());
matched &= block.offerValue("running_ami", row, cluster.runningAmiVersion());
matched &= block.offerValue("scale_down_behavior", row, cluster.scaleDownBehaviorAsString());
matched &= block.offerValue("service_role", row, cluster.serviceRole());
matched &= block.offerValue("service_role", row, cluster.serviceRole());

List<String> applications = cluster.applications().stream()
.map(next -> next.name() + ":" + next.version()).collect(Collectors.toList());
matched &= block.offerComplexValue("applications", row, FieldResolver.DEFAULT, applications);

List<String> tags = cluster.getTags().stream()
.map(next -> next.getKey() + ":" + next.getValue()).collect(Collectors.toList());
List<String> tags = cluster.tags().stream()
.map(next -> next.key() + ":" + next.value()).collect(Collectors.toList());
matched &= block.offerComplexValue("tags", row, FieldResolver.DEFAULT, tags);
}

Expand Down
Loading

0 comments on commit 8fe875f

Please sign in to comment.