diff --git a/athena-aws-cmdb/Dockerfile b/athena-aws-cmdb/Dockerfile
new file mode 100644
index 0000000000..a599a28963
--- /dev/null
+++ b/athena-aws-cmdb/Dockerfile
@@ -0,0 +1,9 @@
+FROM public.ecr.aws/lambda/java:11
+
+# Copy function code and runtime dependencies from Maven layout
+COPY target/athena-aws-cmdb-2022.47.1.jar ${LAMBDA_TASK_ROOT}
+# Unpack the jar
+RUN jar xf athena-aws-cmdb-2022.47.1.jar
+
+# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile)
+CMD [ "com.amazonaws.athena.connectors.aws.cmdb.AwsCmdbCompositeHandler" ]
\ No newline at end of file
diff --git a/athena-aws-cmdb/athena-aws-cmdb.yaml b/athena-aws-cmdb/athena-aws-cmdb.yaml
index b3265cd1eb..4365e6781d 100644
--- a/athena-aws-cmdb/athena-aws-cmdb.yaml
+++ b/athena-aws-cmdb/athena-aws-cmdb.yaml
@@ -52,10 +52,9 @@ Resources:
spill_bucket: !Ref SpillBucket
spill_prefix: !Ref SpillPrefix
FunctionName: !Ref AthenaCatalogName
- Handler: "com.amazonaws.athena.connectors.aws.cmdb.AwsCmdbCompositeHandler"
- CodeUri: "./target/athena-aws-cmdb-2022.47.1.jar"
+ PackageType: "Image"
+ ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-aws-cmdb:2022.47.1'
Description: "Enables Amazon Athena to communicate with various AWS Services, making your resource inventories accessible via SQL."
- Runtime: java11
Timeout: !Ref LambdaTimeout
MemorySize: !Ref LambdaMemory
PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ]
diff --git a/athena-aws-cmdb/pom.xml b/athena-aws-cmdb/pom.xml
index 90c1ad7b59..6cc732de9c 100644
--- a/athena-aws-cmdb/pom.xml
+++ b/athena-aws-cmdb/pom.xml
@@ -16,9 +16,9 @@
withdep
- com.amazonaws
- aws-java-sdk-ec2
- ${aws-sdk.version}
+ software.amazon.awssdk
+ ec2
+ ${aws-sdk-v2.version}
@@ -28,14 +28,20 @@
- com.amazonaws
- aws-java-sdk-emr
- ${aws-sdk.version}
+ software.amazon.awssdk
+ emr
+ ${aws-sdk-v2.version}
- com.amazonaws
- aws-java-sdk-rds
- ${aws-sdk.version}
+ software.amazon.awssdk
+ rds
+ ${aws-sdk-v2.version}
+
+
+ software.amazon.awssdk
+ netty-nio-client
+
+
org.slf4j
diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbMetadataHandler.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbMetadataHandler.java
index 4a4b61f694..f2626625ba 100644
--- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbMetadataHandler.java
+++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbMetadataHandler.java
@@ -39,9 +39,9 @@
import com.amazonaws.athena.connector.lambda.security.EncryptionKey;
import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory;
import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import org.apache.arrow.util.VisibleForTesting;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.util.List;
import java.util.Map;
@@ -77,8 +77,8 @@ public AwsCmdbMetadataHandler(java.util.Map configOptions)
protected AwsCmdbMetadataHandler(
TableProviderFactory tableProviderFactory,
EncryptionKeyFactory keyFactory,
- AWSSecretsManager secretsManager,
- AmazonAthena athena,
+ SecretsManagerClient secretsManager,
+ AthenaClient athena,
String spillBucket,
String spillPrefix,
java.util.Map configOptions)
diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbRecordHandler.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbRecordHandler.java
index 9dcfe3ffe6..dc530d3f90 100644
--- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbRecordHandler.java
+++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbRecordHandler.java
@@ -25,10 +25,10 @@
import com.amazonaws.athena.connector.lambda.handlers.RecordHandler;
import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest;
import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import org.apache.arrow.util.VisibleForTesting;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.util.Map;
@@ -56,7 +56,7 @@ public AwsCmdbRecordHandler(java.util.Map configOptions)
}
@VisibleForTesting
- protected AwsCmdbRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, TableProviderFactory tableProviderFactory, java.util.Map configOptions)
+ protected AwsCmdbRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, TableProviderFactory tableProviderFactory, java.util.Map configOptions)
{
super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions);
tableProviders = tableProviderFactory.getTableProviders();
diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactory.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactory.java
index d5868d33db..cdd1743950 100644
--- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactory.java
+++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactory.java
@@ -32,15 +32,11 @@
import com.amazonaws.athena.connectors.aws.cmdb.tables.ec2.VpcTableProvider;
import com.amazonaws.athena.connectors.aws.cmdb.tables.s3.S3BucketsTableProvider;
import com.amazonaws.athena.connectors.aws.cmdb.tables.s3.S3ObjectsTableProvider;
-import com.amazonaws.services.ec2.AmazonEC2;
-import com.amazonaws.services.ec2.AmazonEC2ClientBuilder;
-import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduce;
-import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduceClientBuilder;
-import com.amazonaws.services.rds.AmazonRDS;
-import com.amazonaws.services.rds.AmazonRDSClientBuilder;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.AmazonS3ClientBuilder;
import org.apache.arrow.util.VisibleForTesting;
+import software.amazon.awssdk.services.ec2.Ec2Client;
+import software.amazon.awssdk.services.emr.EmrClient;
+import software.amazon.awssdk.services.rds.RdsClient;
+import software.amazon.awssdk.services.s3.S3Client;
import java.util.ArrayList;
import java.util.HashMap;
@@ -59,15 +55,15 @@ public class TableProviderFactory
public TableProviderFactory(java.util.Map configOptions)
{
this(
- AmazonEC2ClientBuilder.standard().build(),
- AmazonElasticMapReduceClientBuilder.standard().build(),
- AmazonRDSClientBuilder.standard().build(),
- AmazonS3ClientBuilder.standard().build(),
+ Ec2Client.create(),
+ EmrClient.create(),
+ RdsClient.create(),
+ S3Client.create(),
configOptions);
}
@VisibleForTesting
- protected TableProviderFactory(AmazonEC2 ec2, AmazonElasticMapReduce emr, AmazonRDS rds, AmazonS3 amazonS3, java.util.Map configOptions)
+ protected TableProviderFactory(Ec2Client ec2, EmrClient emr, RdsClient rds, S3Client amazonS3, java.util.Map configOptions)
{
addProvider(new Ec2TableProvider(ec2));
addProvider(new EbsTableProvider(ec2));
diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/EmrClusterTableProvider.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/EmrClusterTableProvider.java
index ee3b15da91..c3d10c7233 100644
--- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/EmrClusterTableProvider.java
+++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/EmrClusterTableProvider.java
@@ -29,15 +29,15 @@
import com.amazonaws.athena.connector.lambda.metadata.GetTableRequest;
import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse;
import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest;
-import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduce;
-import com.amazonaws.services.elasticmapreduce.model.Cluster;
-import com.amazonaws.services.elasticmapreduce.model.ClusterSummary;
-import com.amazonaws.services.elasticmapreduce.model.DescribeClusterRequest;
-import com.amazonaws.services.elasticmapreduce.model.DescribeClusterResult;
-import com.amazonaws.services.elasticmapreduce.model.ListClustersRequest;
-import com.amazonaws.services.elasticmapreduce.model.ListClustersResult;
import org.apache.arrow.vector.types.Types;
import org.apache.arrow.vector.types.pojo.Schema;
+import software.amazon.awssdk.services.emr.EmrClient;
+import software.amazon.awssdk.services.emr.model.Cluster;
+import software.amazon.awssdk.services.emr.model.ClusterSummary;
+import software.amazon.awssdk.services.emr.model.DescribeClusterRequest;
+import software.amazon.awssdk.services.emr.model.DescribeClusterResponse;
+import software.amazon.awssdk.services.emr.model.ListClustersRequest;
+import software.amazon.awssdk.services.emr.model.ListClustersResponse;
import java.util.List;
import java.util.stream.Collectors;
@@ -49,9 +49,9 @@ public class EmrClusterTableProvider
implements TableProvider
{
private static final Schema SCHEMA;
- private AmazonElasticMapReduce emr;
+ private EmrClient emr;
- public EmrClusterTableProvider(AmazonElasticMapReduce emr)
+ public EmrClusterTableProvider(EmrClient emr)
{
this.emr = emr;
}
@@ -93,23 +93,23 @@ public GetTableResponse getTable(BlockAllocator blockAllocator, GetTableRequest
public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsRequest, QueryStatusChecker queryStatusChecker)
{
boolean done = false;
- ListClustersRequest request = new ListClustersRequest();
+ ListClustersRequest request = ListClustersRequest.builder().build();
while (!done) {
- ListClustersResult response = emr.listClusters(request);
+ ListClustersResponse response = emr.listClusters(request);
- for (ClusterSummary next : response.getClusters()) {
+ for (ClusterSummary next : response.clusters()) {
Cluster cluster = null;
- if (!next.getStatus().getState().toLowerCase().contains("terminated")) {
- DescribeClusterResult clusterResponse = emr.describeCluster(new DescribeClusterRequest().withClusterId(next.getId()));
- cluster = clusterResponse.getCluster();
+ if (!next.status().stateAsString().toLowerCase().contains("terminated")) {
+ DescribeClusterResponse clusterResponse = emr.describeCluster(DescribeClusterRequest.builder().clusterId(next.id()).build());
+ cluster = clusterResponse.cluster();
}
clusterToRow(next, cluster, spiller);
}
- request.setMarker(response.getMarker());
+ request = request.toBuilder().marker(response.marker()).build();
- if (response.getMarker() == null || !queryStatusChecker.isQueryRunning()) {
+ if (response.marker() == null || !queryStatusChecker.isQueryRunning()) {
done = true;
}
}
@@ -131,31 +131,31 @@ private void clusterToRow(ClusterSummary clusterSummary,
spiller.writeRows((Block block, int row) -> {
boolean matched = true;
- matched &= block.offerValue("id", row, clusterSummary.getId());
- matched &= block.offerValue("name", row, clusterSummary.getName());
- matched &= block.offerValue("instance_hours", row, clusterSummary.getNormalizedInstanceHours());
- matched &= block.offerValue("state", row, clusterSummary.getStatus().getState());
- matched &= block.offerValue("state_code", row, clusterSummary.getStatus().getStateChangeReason().getCode());
- matched &= block.offerValue("state_msg", row, clusterSummary.getStatus().getStateChangeReason().getMessage());
+ matched &= block.offerValue("id", row, clusterSummary.id());
+ matched &= block.offerValue("name", row, clusterSummary.name());
+ matched &= block.offerValue("instance_hours", row, clusterSummary.normalizedInstanceHours());
+ matched &= block.offerValue("state", row, clusterSummary.status().stateAsString());
+ matched &= block.offerValue("state_code", row, clusterSummary.status().stateChangeReason().codeAsString());
+ matched &= block.offerValue("state_msg", row, clusterSummary.status().stateChangeReason().message());
if (cluster != null) {
- matched &= block.offerValue("autoscaling_role", row, cluster.getAutoScalingRole());
- matched &= block.offerValue("custom_ami", row, cluster.getCustomAmiId());
- matched &= block.offerValue("instance_collection_type", row, cluster.getInstanceCollectionType());
- matched &= block.offerValue("log_uri", row, cluster.getLogUri());
- matched &= block.offerValue("master_public_dns", row, cluster.getMasterPublicDnsName());
- matched &= block.offerValue("release_label", row, cluster.getReleaseLabel());
- matched &= block.offerValue("running_ami", row, cluster.getRunningAmiVersion());
- matched &= block.offerValue("scale_down_behavior", row, cluster.getScaleDownBehavior());
- matched &= block.offerValue("service_role", row, cluster.getServiceRole());
- matched &= block.offerValue("service_role", row, cluster.getServiceRole());
-
- List applications = cluster.getApplications().stream()
- .map(next -> next.getName() + ":" + next.getVersion()).collect(Collectors.toList());
+ matched &= block.offerValue("autoscaling_role", row, cluster.autoScalingRole());
+ matched &= block.offerValue("custom_ami", row, cluster.customAmiId());
+ matched &= block.offerValue("instance_collection_type", row, cluster.instanceCollectionTypeAsString());
+ matched &= block.offerValue("log_uri", row, cluster.logUri());
+ matched &= block.offerValue("master_public_dns", row, cluster.masterPublicDnsName());
+ matched &= block.offerValue("release_label", row, cluster.releaseLabel());
+ matched &= block.offerValue("running_ami", row, cluster.runningAmiVersion());
+ matched &= block.offerValue("scale_down_behavior", row, cluster.scaleDownBehaviorAsString());
+ matched &= block.offerValue("service_role", row, cluster.serviceRole());
+ matched &= block.offerValue("service_role", row, cluster.serviceRole());
+
+ List applications = cluster.applications().stream()
+ .map(next -> next.name() + ":" + next.version()).collect(Collectors.toList());
matched &= block.offerComplexValue("applications", row, FieldResolver.DEFAULT, applications);
- List tags = cluster.getTags().stream()
- .map(next -> next.getKey() + ":" + next.getValue()).collect(Collectors.toList());
+ List tags = cluster.tags().stream()
+ .map(next -> next.key() + ":" + next.value()).collect(Collectors.toList());
matched &= block.offerComplexValue("tags", row, FieldResolver.DEFAULT, tags);
}
diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/RdsTableProvider.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/RdsTableProvider.java
index f3d9a18a8b..d424476646 100644
--- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/RdsTableProvider.java
+++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/RdsTableProvider.java
@@ -30,22 +30,22 @@
import com.amazonaws.athena.connector.lambda.metadata.GetTableRequest;
import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse;
import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest;
-import com.amazonaws.services.rds.AmazonRDS;
-import com.amazonaws.services.rds.model.DBInstance;
-import com.amazonaws.services.rds.model.DBInstanceStatusInfo;
-import com.amazonaws.services.rds.model.DBParameterGroupStatus;
-import com.amazonaws.services.rds.model.DBSecurityGroupMembership;
-import com.amazonaws.services.rds.model.DBSubnetGroup;
-import com.amazonaws.services.rds.model.DescribeDBInstancesRequest;
-import com.amazonaws.services.rds.model.DescribeDBInstancesResult;
-import com.amazonaws.services.rds.model.DomainMembership;
-import com.amazonaws.services.rds.model.Endpoint;
-import com.amazonaws.services.rds.model.Subnet;
-import com.amazonaws.services.rds.model.Tag;
import org.apache.arrow.vector.types.Types;
import org.apache.arrow.vector.types.pojo.ArrowType;
import org.apache.arrow.vector.types.pojo.Field;
import org.apache.arrow.vector.types.pojo.Schema;
+import software.amazon.awssdk.services.rds.RdsClient;
+import software.amazon.awssdk.services.rds.model.DBInstance;
+import software.amazon.awssdk.services.rds.model.DBInstanceStatusInfo;
+import software.amazon.awssdk.services.rds.model.DBParameterGroupStatus;
+import software.amazon.awssdk.services.rds.model.DBSecurityGroupMembership;
+import software.amazon.awssdk.services.rds.model.DBSubnetGroup;
+import software.amazon.awssdk.services.rds.model.DescribeDbInstancesRequest;
+import software.amazon.awssdk.services.rds.model.DescribeDbInstancesResponse;
+import software.amazon.awssdk.services.rds.model.DomainMembership;
+import software.amazon.awssdk.services.rds.model.Endpoint;
+import software.amazon.awssdk.services.rds.model.Subnet;
+import software.amazon.awssdk.services.rds.model.Tag;
import java.util.stream.Collectors;
@@ -56,9 +56,9 @@ public class RdsTableProvider
implements TableProvider
{
private static final Schema SCHEMA;
- private AmazonRDS rds;
+ private RdsClient rds;
- public RdsTableProvider(AmazonRDS rds)
+ public RdsTableProvider(RdsClient rds)
{
this.rds = rds;
}
@@ -99,27 +99,24 @@ public GetTableResponse getTable(BlockAllocator blockAllocator, GetTableRequest
@Override
public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsRequest, QueryStatusChecker queryStatusChecker)
{
- boolean done = false;
- DescribeDBInstancesRequest request = new DescribeDBInstancesRequest();
+ DescribeDbInstancesRequest.Builder requestBuilder = DescribeDbInstancesRequest.builder();
ValueSet idConstraint = recordsRequest.getConstraints().getSummary().get("instance_id");
if (idConstraint != null && idConstraint.isSingleValue()) {
- request.setDBInstanceIdentifier(idConstraint.getSingleValue().toString());
+ requestBuilder.dbInstanceIdentifier(idConstraint.getSingleValue().toString());
}
- while (!done) {
- DescribeDBInstancesResult response = rds.describeDBInstances(request);
+ DescribeDbInstancesResponse response;
+ do {
+ response = rds.describeDBInstances(requestBuilder.build());
- for (DBInstance instance : response.getDBInstances()) {
+ for (DBInstance instance : response.dbInstances()) {
instanceToRow(instance, spiller);
}
- request.setMarker(response.getMarker());
-
- if (response.getMarker() == null || !queryStatusChecker.isQueryRunning()) {
- done = true;
- }
+ requestBuilder.marker(response.marker());
}
+ while (response.marker() != null && queryStatusChecker.isQueryRunning());
}
/**
@@ -136,145 +133,145 @@ private void instanceToRow(DBInstance instance,
spiller.writeRows((Block block, int row) -> {
boolean matched = true;
- matched &= block.offerValue("instance_id", row, instance.getDBInstanceIdentifier());
- matched &= block.offerValue("primary_az", row, instance.getAvailabilityZone());
- matched &= block.offerValue("storage_gb", row, instance.getAllocatedStorage());
- matched &= block.offerValue("is_encrypted", row, instance.getStorageEncrypted());
- matched &= block.offerValue("storage_type", row, instance.getStorageType());
- matched &= block.offerValue("backup_retention_days", row, instance.getBackupRetentionPeriod());
- matched &= block.offerValue("auto_upgrade", row, instance.getAutoMinorVersionUpgrade());
- matched &= block.offerValue("instance_class", row, instance.getDBInstanceClass());
- matched &= block.offerValue("port", row, instance.getDbInstancePort());
- matched &= block.offerValue("status", row, instance.getDBInstanceStatus());
- matched &= block.offerValue("dbi_resource_id", row, instance.getDbiResourceId());
- matched &= block.offerValue("name", row, instance.getDBName());
- matched &= block.offerValue("engine", row, instance.getEngine());
- matched &= block.offerValue("engine_version", row, instance.getEngineVersion());
- matched &= block.offerValue("license_model", row, instance.getLicenseModel());
- matched &= block.offerValue("secondary_az", row, instance.getSecondaryAvailabilityZone());
- matched &= block.offerValue("backup_window", row, instance.getPreferredBackupWindow());
- matched &= block.offerValue("maint_window", row, instance.getPreferredMaintenanceWindow());
- matched &= block.offerValue("read_replica_source_id", row, instance.getReadReplicaSourceDBInstanceIdentifier());
- matched &= block.offerValue("create_time", row, instance.getInstanceCreateTime());
- matched &= block.offerValue("public_access", row, instance.getPubliclyAccessible());
- matched &= block.offerValue("iops", row, instance.getIops());
- matched &= block.offerValue("is_multi_az", row, instance.getMultiAZ());
+ matched &= block.offerValue("instance_id", row, instance.dbInstanceIdentifier());
+ matched &= block.offerValue("primary_az", row, instance.availabilityZone());
+ matched &= block.offerValue("storage_gb", row, instance.allocatedStorage());
+ matched &= block.offerValue("is_encrypted", row, instance.storageEncrypted());
+ matched &= block.offerValue("storage_type", row, instance.storageType());
+ matched &= block.offerValue("backup_retention_days", row, instance.backupRetentionPeriod());
+ matched &= block.offerValue("auto_upgrade", row, instance.autoMinorVersionUpgrade());
+ matched &= block.offerValue("instance_class", row, instance.dbInstanceClass());
+ matched &= block.offerValue("port", row, instance.dbInstancePort());
+ matched &= block.offerValue("status", row, instance.dbInstanceStatus());
+ matched &= block.offerValue("dbi_resource_id", row, instance.dbiResourceId());
+ matched &= block.offerValue("name", row, instance.dbName());
+ matched &= block.offerValue("engine", row, instance.engine());
+ matched &= block.offerValue("engine_version", row, instance.engineVersion());
+ matched &= block.offerValue("license_model", row, instance.licenseModel());
+ matched &= block.offerValue("secondary_az", row, instance.secondaryAvailabilityZone());
+ matched &= block.offerValue("backup_window", row, instance.preferredBackupWindow());
+ matched &= block.offerValue("maint_window", row, instance.preferredMaintenanceWindow());
+ matched &= block.offerValue("read_replica_source_id", row, instance.readReplicaSourceDBInstanceIdentifier());
+ matched &= block.offerValue("create_time", row, instance.instanceCreateTime());
+ matched &= block.offerValue("public_access", row, instance.publiclyAccessible());
+ matched &= block.offerValue("iops", row, instance.iops());
+ matched &= block.offerValue("is_multi_az", row, instance.multiAZ());
matched &= block.offerComplexValue("domains", row, (Field field, Object val) -> {
if (field.getName().equals("domain")) {
- return ((DomainMembership) val).getDomain();
+ return ((DomainMembership) val).domain();
}
else if (field.getName().equals("fqdn")) {
- return ((DomainMembership) val).getFQDN();
+ return ((DomainMembership) val).fqdn();
}
else if (field.getName().equals("iam_role")) {
- return ((DomainMembership) val).getIAMRoleName();
+ return ((DomainMembership) val).iamRoleName();
}
else if (field.getName().equals("status")) {
- return ((DomainMembership) val).getStatus();
+ return ((DomainMembership) val).status();
}
throw new RuntimeException("Unexpected field " + field.getName());
},
- instance.getDomainMemberships());
+ instance.domainMemberships());
matched &= block.offerComplexValue("param_groups", row, (Field field, Object val) -> {
if (field.getName().equals("name")) {
- return ((DBParameterGroupStatus) val).getDBParameterGroupName();
+ return ((DBParameterGroupStatus) val).dbParameterGroupName();
}
else if (field.getName().equals("status")) {
- return ((DBParameterGroupStatus) val).getParameterApplyStatus();
+ return ((DBParameterGroupStatus) val).parameterApplyStatus();
}
throw new RuntimeException("Unexpected field " + field.getName());
},
- instance.getDBParameterGroups());
+ instance.dbParameterGroups());
matched &= block.offerComplexValue("db_security_groups",
row,
(Field field, Object val) -> {
if (field.getName().equals("name")) {
- return ((DBSecurityGroupMembership) val).getDBSecurityGroupName();
+ return ((DBSecurityGroupMembership) val).dbSecurityGroupName();
}
else if (field.getName().equals("status")) {
- return ((DBSecurityGroupMembership) val).getStatus();
+ return ((DBSecurityGroupMembership) val).status();
}
throw new RuntimeException("Unexpected field " + field.getName());
},
- instance.getDBSecurityGroups());
+ instance.dbSecurityGroups());
matched &= block.offerComplexValue("subnet_group",
row,
(Field field, Object val) -> {
if (field.getName().equals("description")) {
- return ((DBSubnetGroup) val).getDBSubnetGroupDescription();
+ return ((DBSubnetGroup) val).dbSubnetGroupDescription();
}
else if (field.getName().equals("name")) {
- return ((DBSubnetGroup) val).getDBSubnetGroupName();
+ return ((DBSubnetGroup) val).dbSubnetGroupName();
}
else if (field.getName().equals("status")) {
- return ((DBSubnetGroup) val).getSubnetGroupStatus();
+ return ((DBSubnetGroup) val).subnetGroupStatus();
}
else if (field.getName().equals("vpc")) {
- return ((DBSubnetGroup) val).getVpcId();
+ return ((DBSubnetGroup) val).vpcId();
}
else if (field.getName().equals("subnets")) {
- return ((DBSubnetGroup) val).getSubnets().stream()
- .map(next -> next.getSubnetIdentifier()).collect(Collectors.toList());
+ return ((DBSubnetGroup) val).subnets().stream()
+ .map(next -> next.subnetIdentifier()).collect(Collectors.toList());
}
else if (val instanceof Subnet) {
- return ((Subnet) val).getSubnetIdentifier();
+ return ((Subnet) val).subnetIdentifier();
}
throw new RuntimeException("Unexpected field " + field.getName());
},
- instance.getDBSubnetGroup());
+ instance.dbSubnetGroup());
matched &= block.offerComplexValue("endpoint",
row,
(Field field, Object val) -> {
if (field.getName().equals("address")) {
- return ((Endpoint) val).getAddress();
+ return ((Endpoint) val).address();
}
else if (field.getName().equals("port")) {
- return ((Endpoint) val).getPort();
+ return ((Endpoint) val).port();
}
else if (field.getName().equals("zone")) {
- return ((Endpoint) val).getHostedZoneId();
+ return ((Endpoint) val).hostedZoneId();
}
throw new RuntimeException("Unexpected field " + field.getName());
},
- instance.getEndpoint());
+ instance.endpoint());
matched &= block.offerComplexValue("status_infos",
row,
(Field field, Object val) -> {
if (field.getName().equals("message")) {
- return ((DBInstanceStatusInfo) val).getMessage();
+ return ((DBInstanceStatusInfo) val).message();
}
else if (field.getName().equals("is_normal")) {
- return ((DBInstanceStatusInfo) val).getNormal();
+ return ((DBInstanceStatusInfo) val).normal();
}
else if (field.getName().equals("status")) {
- return ((DBInstanceStatusInfo) val).getStatus();
+ return ((DBInstanceStatusInfo) val).status();
}
else if (field.getName().equals("type")) {
- return ((DBInstanceStatusInfo) val).getStatusType();
+ return ((DBInstanceStatusInfo) val).statusType();
}
throw new RuntimeException("Unexpected field " + field.getName());
},
- instance.getStatusInfos());
+ instance.statusInfos());
matched &= block.offerComplexValue("tags", row,
(Field field, Object val) -> {
if (field.getName().equals("key")) {
- return ((Tag) val).getKey();
+ return ((Tag) val).key();
}
else if (field.getName().equals("value")) {
- return ((Tag) val).getValue();
+ return ((Tag) val).value();
}
throw new RuntimeException("Unexpected field " + field.getName());
},
- instance.getTagList());
+ instance.tagList());
return matched ? 1 : 0;
});
diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/EbsTableProvider.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/EbsTableProvider.java
index 48b6503757..7356a34ea7 100644
--- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/EbsTableProvider.java
+++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/EbsTableProvider.java
@@ -31,14 +31,14 @@
import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse;
import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest;
import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider;
-import com.amazonaws.services.ec2.AmazonEC2;
-import com.amazonaws.services.ec2.model.DescribeVolumesRequest;
-import com.amazonaws.services.ec2.model.DescribeVolumesResult;
-import com.amazonaws.services.ec2.model.Volume;
import org.apache.arrow.vector.types.Types;
import org.apache.arrow.vector.types.pojo.Schema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.ec2.Ec2Client;
+import software.amazon.awssdk.services.ec2.model.DescribeVolumesRequest;
+import software.amazon.awssdk.services.ec2.model.DescribeVolumesResponse;
+import software.amazon.awssdk.services.ec2.model.Volume;
import java.util.Collections;
import java.util.List;
@@ -52,9 +52,9 @@ public class EbsTableProvider
{
private static final Logger logger = LoggerFactory.getLogger(EbsTableProvider.class);
private static final Schema SCHEMA;
- private AmazonEC2 ec2;
+ private Ec2Client ec2;
- public EbsTableProvider(AmazonEC2 ec2)
+ public EbsTableProvider(Ec2Client ec2)
{
this.ec2 = ec2;
}
@@ -96,24 +96,24 @@ public GetTableResponse getTable(BlockAllocator blockAllocator, GetTableRequest
public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsRequest, QueryStatusChecker queryStatusChecker)
{
boolean done = false;
- DescribeVolumesRequest request = new DescribeVolumesRequest();
+ DescribeVolumesRequest.Builder request = DescribeVolumesRequest.builder();
ValueSet idConstraint = recordsRequest.getConstraints().getSummary().get("id");
if (idConstraint != null && idConstraint.isSingleValue()) {
- request.setVolumeIds(Collections.singletonList(idConstraint.getSingleValue().toString()));
+ request.volumeIds(Collections.singletonList(idConstraint.getSingleValue().toString()));
}
while (!done) {
- DescribeVolumesResult response = ec2.describeVolumes(request);
+ DescribeVolumesResponse response = ec2.describeVolumes(request.build());
- for (Volume volume : response.getVolumes()) {
+ for (Volume volume : response.volumes()) {
logger.info("readWithConstraint: {}", response);
instanceToRow(volume, spiller);
}
- request.setNextToken(response.getNextToken());
+ request.nextToken(response.nextToken());
- if (response.getNextToken() == null || !queryStatusChecker.isQueryRunning()) {
+ if (response.nextToken() == null || !queryStatusChecker.isQueryRunning()) {
done = true;
}
}
@@ -133,26 +133,26 @@ private void instanceToRow(Volume volume,
spiller.writeRows((Block block, int row) -> {
boolean matched = true;
- matched &= block.offerValue("id", row, volume.getVolumeId());
- matched &= block.offerValue("type", row, volume.getVolumeType());
- matched &= block.offerValue("availability_zone", row, volume.getAvailabilityZone());
- matched &= block.offerValue("created_time", row, volume.getCreateTime());
- matched &= block.offerValue("is_encrypted", row, volume.getEncrypted());
- matched &= block.offerValue("kms_key_id", row, volume.getKmsKeyId());
- matched &= block.offerValue("size", row, volume.getSize());
- matched &= block.offerValue("iops", row, volume.getIops());
- matched &= block.offerValue("snapshot_id", row, volume.getSnapshotId());
- matched &= block.offerValue("state", row, volume.getState());
-
- if (volume.getAttachments().size() == 1) {
- matched &= block.offerValue("target", row, volume.getAttachments().get(0).getInstanceId());
- matched &= block.offerValue("attached_device", row, volume.getAttachments().get(0).getDevice());
- matched &= block.offerValue("attachment_state", row, volume.getAttachments().get(0).getState());
- matched &= block.offerValue("attachment_time", row, volume.getAttachments().get(0).getAttachTime());
+ matched &= block.offerValue("id", row, volume.volumeId());
+ matched &= block.offerValue("type", row, volume.volumeTypeAsString());
+ matched &= block.offerValue("availability_zone", row, volume.availabilityZone());
+ matched &= block.offerValue("created_time", row, volume.createTime());
+ matched &= block.offerValue("is_encrypted", row, volume.encrypted());
+ matched &= block.offerValue("kms_key_id", row, volume.kmsKeyId());
+ matched &= block.offerValue("size", row, volume.size());
+ matched &= block.offerValue("iops", row, volume.iops());
+ matched &= block.offerValue("snapshot_id", row, volume.snapshotId());
+ matched &= block.offerValue("state", row, volume.stateAsString());
+
+ if (volume.attachments().size() == 1) {
+ matched &= block.offerValue("target", row, volume.attachments().get(0).instanceId());
+ matched &= block.offerValue("attached_device", row, volume.attachments().get(0).device());
+ matched &= block.offerValue("attachment_state", row, volume.attachments().get(0).stateAsString());
+ matched &= block.offerValue("attachment_time", row, volume.attachments().get(0).attachTime());
}
- List tags = volume.getTags().stream()
- .map(next -> next.getKey() + ":" + next.getValue()).collect(Collectors.toList());
+ List tags = volume.tags().stream()
+ .map(next -> next.key() + ":" + next.value()).collect(Collectors.toList());
matched &= block.offerComplexValue("tags", row, FieldResolver.DEFAULT, tags);
return matched ? 1 : 0;
diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/Ec2TableProvider.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/Ec2TableProvider.java
index dfa8876284..6bf9dbb58d 100644
--- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/Ec2TableProvider.java
+++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/Ec2TableProvider.java
@@ -32,19 +32,19 @@
import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse;
import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest;
import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider;
-import com.amazonaws.services.ec2.AmazonEC2;
-import com.amazonaws.services.ec2.model.DescribeInstancesRequest;
-import com.amazonaws.services.ec2.model.DescribeInstancesResult;
-import com.amazonaws.services.ec2.model.Instance;
-import com.amazonaws.services.ec2.model.InstanceNetworkInterface;
-import com.amazonaws.services.ec2.model.InstanceState;
-import com.amazonaws.services.ec2.model.Reservation;
-import com.amazonaws.services.ec2.model.StateReason;
-import com.amazonaws.services.ec2.model.Tag;
import org.apache.arrow.vector.types.Types;
import org.apache.arrow.vector.types.pojo.ArrowType;
import org.apache.arrow.vector.types.pojo.Field;
import org.apache.arrow.vector.types.pojo.Schema;
+import software.amazon.awssdk.services.ec2.Ec2Client;
+import software.amazon.awssdk.services.ec2.model.DescribeInstancesRequest;
+import software.amazon.awssdk.services.ec2.model.DescribeInstancesResponse;
+import software.amazon.awssdk.services.ec2.model.Instance;
+import software.amazon.awssdk.services.ec2.model.InstanceNetworkInterface;
+import software.amazon.awssdk.services.ec2.model.InstanceState;
+import software.amazon.awssdk.services.ec2.model.Reservation;
+import software.amazon.awssdk.services.ec2.model.StateReason;
+import software.amazon.awssdk.services.ec2.model.Tag;
import java.util.Collections;
import java.util.List;
@@ -57,9 +57,9 @@ public class Ec2TableProvider
implements TableProvider
{
private static final Schema SCHEMA;
- private AmazonEC2 ec2;
+ private Ec2Client ec2;
- public Ec2TableProvider(AmazonEC2 ec2)
+ public Ec2TableProvider(Ec2Client ec2)
{
this.ec2 = ec2;
}
@@ -101,25 +101,25 @@ public GetTableResponse getTable(BlockAllocator blockAllocator, GetTableRequest
public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsRequest, QueryStatusChecker queryStatusChecker)
{
boolean done = false;
- DescribeInstancesRequest request = new DescribeInstancesRequest();
+ DescribeInstancesRequest.Builder request = DescribeInstancesRequest.builder();
ValueSet idConstraint = recordsRequest.getConstraints().getSummary().get("instance_id");
if (idConstraint != null && idConstraint.isSingleValue()) {
- request.setInstanceIds(Collections.singletonList(idConstraint.getSingleValue().toString()));
+ request.instanceIds(Collections.singletonList(idConstraint.getSingleValue().toString()));
}
while (!done) {
- DescribeInstancesResult response = ec2.describeInstances(request);
+ DescribeInstancesResponse response = ec2.describeInstances(request.build());
- for (Reservation reservation : response.getReservations()) {
- for (Instance instance : reservation.getInstances()) {
+ for (Reservation reservation : response.reservations()) {
+ for (Instance instance : reservation.instances()) {
instanceToRow(instance, spiller);
}
}
- request.setNextToken(response.getNextToken());
+ request.nextToken(response.nextToken());
- if (response.getNextToken() == null || !queryStatusChecker.isQueryRunning()) {
+ if (response.nextToken() == null || !queryStatusChecker.isQueryRunning()) {
done = true;
}
}
@@ -139,106 +139,106 @@ private void instanceToRow(Instance instance,
spiller.writeRows((Block block, int row) -> {
boolean matched = true;
- matched &= block.offerValue("instance_id", row, instance.getInstanceId());
- matched &= block.offerValue("image_id", row, instance.getImageId());
- matched &= block.offerValue("instance_type", row, instance.getInstanceType());
- matched &= block.offerValue("platform", row, instance.getPlatform());
- matched &= block.offerValue("private_dns_name", row, instance.getPrivateDnsName());
- matched &= block.offerValue("private_ip_address", row, instance.getPrivateIpAddress());
- matched &= block.offerValue("public_dns_name", row, instance.getPublicDnsName());
- matched &= block.offerValue("public_ip_address", row, instance.getPublicIpAddress());
- matched &= block.offerValue("subnet_id", row, instance.getSubnetId());
- matched &= block.offerValue("vpc_id", row, instance.getVpcId());
- matched &= block.offerValue("architecture", row, instance.getArchitecture());
- matched &= block.offerValue("instance_lifecycle", row, instance.getInstanceLifecycle());
- matched &= block.offerValue("root_device_name", row, instance.getRootDeviceName());
- matched &= block.offerValue("root_device_type", row, instance.getRootDeviceType());
- matched &= block.offerValue("spot_instance_request_id", row, instance.getSpotInstanceRequestId());
- matched &= block.offerValue("virtualization_type", row, instance.getVirtualizationType());
- matched &= block.offerValue("key_name", row, instance.getKeyName());
- matched &= block.offerValue("kernel_id", row, instance.getKernelId());
- matched &= block.offerValue("capacity_reservation_id", row, instance.getCapacityReservationId());
- matched &= block.offerValue("launch_time", row, instance.getLaunchTime());
+ matched &= block.offerValue("instance_id", row, instance.instanceId());
+ matched &= block.offerValue("image_id", row, instance.imageId());
+ matched &= block.offerValue("instance_type", row, instance.instanceTypeAsString());
+ matched &= block.offerValue("platform", row, instance.platformAsString());
+ matched &= block.offerValue("private_dns_name", row, instance.privateDnsName());
+ matched &= block.offerValue("private_ip_address", row, instance.privateIpAddress());
+ matched &= block.offerValue("public_dns_name", row, instance.publicDnsName());
+ matched &= block.offerValue("public_ip_address", row, instance.publicIpAddress());
+ matched &= block.offerValue("subnet_id", row, instance.subnetId());
+ matched &= block.offerValue("vpc_id", row, instance.vpcId());
+ matched &= block.offerValue("architecture", row, instance.architectureAsString());
+ matched &= block.offerValue("instance_lifecycle", row, instance.instanceLifecycleAsString());
+ matched &= block.offerValue("root_device_name", row, instance.rootDeviceName());
+ matched &= block.offerValue("root_device_type", row, instance.rootDeviceTypeAsString());
+ matched &= block.offerValue("spot_instance_request_id", row, instance.spotInstanceRequestId());
+ matched &= block.offerValue("virtualization_type", row, instance.virtualizationTypeAsString());
+ matched &= block.offerValue("key_name", row, instance.keyName());
+ matched &= block.offerValue("kernel_id", row, instance.kernelId());
+ matched &= block.offerValue("capacity_reservation_id", row, instance.capacityReservationId());
+ matched &= block.offerValue("launch_time", row, instance.launchTime());
matched &= block.offerComplexValue("state",
row,
(Field field, Object val) -> {
if (field.getName().equals("name")) {
- return ((InstanceState) val).getName();
+ return ((InstanceState) val).nameAsString();
}
else if (field.getName().equals("code")) {
- return ((InstanceState) val).getCode();
+ return ((InstanceState) val).code();
}
throw new RuntimeException("Unknown field " + field.getName());
- }, instance.getState());
+ }, instance.state());
matched &= block.offerComplexValue("network_interfaces",
row,
(Field field, Object val) -> {
if (field.getName().equals("status")) {
- return ((InstanceNetworkInterface) val).getStatus();
+ return ((InstanceNetworkInterface) val).statusAsString();
}
else if (field.getName().equals("subnet")) {
- return ((InstanceNetworkInterface) val).getSubnetId();
+ return ((InstanceNetworkInterface) val).subnetId();
}
else if (field.getName().equals("vpc")) {
- return ((InstanceNetworkInterface) val).getVpcId();
+ return ((InstanceNetworkInterface) val).vpcId();
}
else if (field.getName().equals("mac")) {
- return ((InstanceNetworkInterface) val).getMacAddress();
+ return ((InstanceNetworkInterface) val).macAddress();
}
else if (field.getName().equals("private_dns")) {
- return ((InstanceNetworkInterface) val).getPrivateDnsName();
+ return ((InstanceNetworkInterface) val).privateDnsName();
}
else if (field.getName().equals("private_ip")) {
- return ((InstanceNetworkInterface) val).getPrivateIpAddress();
+ return ((InstanceNetworkInterface) val).privateIpAddress();
}
else if (field.getName().equals("security_groups")) {
- return ((InstanceNetworkInterface) val).getGroups().stream().map(next -> next.getGroupName() + ":" + next.getGroupId()).collect(Collectors.toList());
+ return ((InstanceNetworkInterface) val).groups().stream().map(next -> next.groupName() + ":" + next.groupId()).collect(Collectors.toList());
}
else if (field.getName().equals("interface_id")) {
- return ((InstanceNetworkInterface) val).getNetworkInterfaceId();
+ return ((InstanceNetworkInterface) val).networkInterfaceId();
}
throw new RuntimeException("Unknown field " + field.getName());
- }, instance.getNetworkInterfaces());
+ }, instance.networkInterfaces());
matched &= block.offerComplexValue("state_reason", row, (Field field, Object val) -> {
if (field.getName().equals("message")) {
- return ((StateReason) val).getMessage();
+ return ((StateReason) val).message();
}
else if (field.getName().equals("code")) {
- return ((StateReason) val).getCode();
+ return ((StateReason) val).code();
}
throw new RuntimeException("Unknown field " + field.getName());
- }, instance.getStateReason());
+ }, instance.stateReason());
- matched &= block.offerValue("ebs_optimized", row, instance.getEbsOptimized());
+ matched &= block.offerValue("ebs_optimized", row, instance.ebsOptimized());
- List securityGroups = instance.getSecurityGroups().stream()
- .map(next -> next.getGroupId()).collect(Collectors.toList());
+ List securityGroups = instance.securityGroups().stream()
+ .map(next -> next.groupId()).collect(Collectors.toList());
matched &= block.offerComplexValue("security_groups", row, FieldResolver.DEFAULT, securityGroups);
- List securityGroupNames = instance.getSecurityGroups().stream()
- .map(next -> next.getGroupName()).collect(Collectors.toList());
+ List securityGroupNames = instance.securityGroups().stream()
+ .map(next -> next.groupName()).collect(Collectors.toList());
matched &= block.offerComplexValue("security_group_names", row, FieldResolver.DEFAULT, securityGroupNames);
- List ebsVolumes = instance.getBlockDeviceMappings().stream()
- .map(next -> next.getEbs().getVolumeId()).collect(Collectors.toList());
+ List ebsVolumes = instance.blockDeviceMappings().stream()
+ .map(next -> next.ebs().volumeId()).collect(Collectors.toList());
matched &= block.offerComplexValue("ebs_volumes", row, FieldResolver.DEFAULT, ebsVolumes);
matched &= block.offerComplexValue("tags", row,
(Field field, Object val) -> {
if (field.getName().equals("key")) {
- return ((Tag) val).getKey();
+ return ((Tag) val).key();
}
else if (field.getName().equals("value")) {
- return ((Tag) val).getValue();
+ return ((Tag) val).value();
}
throw new RuntimeException("Unexpected field " + field.getName());
},
- instance.getTags());
+ instance.tags());
return matched ? 1 : 0;
});
diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/ImagesTableProvider.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/ImagesTableProvider.java
index 3858946948..a80ad779bf 100644
--- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/ImagesTableProvider.java
+++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/ImagesTableProvider.java
@@ -31,17 +31,17 @@
import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse;
import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest;
import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider;
-import com.amazonaws.services.ec2.AmazonEC2;
-import com.amazonaws.services.ec2.model.BlockDeviceMapping;
-import com.amazonaws.services.ec2.model.DescribeImagesRequest;
-import com.amazonaws.services.ec2.model.DescribeImagesResult;
-import com.amazonaws.services.ec2.model.EbsBlockDevice;
-import com.amazonaws.services.ec2.model.Image;
-import com.amazonaws.services.ec2.model.Tag;
import org.apache.arrow.vector.types.Types;
import org.apache.arrow.vector.types.pojo.ArrowType;
import org.apache.arrow.vector.types.pojo.Field;
import org.apache.arrow.vector.types.pojo.Schema;
+import software.amazon.awssdk.services.ec2.Ec2Client;
+import software.amazon.awssdk.services.ec2.model.BlockDeviceMapping;
+import software.amazon.awssdk.services.ec2.model.DescribeImagesRequest;
+import software.amazon.awssdk.services.ec2.model.DescribeImagesResponse;
+import software.amazon.awssdk.services.ec2.model.EbsBlockDevice;
+import software.amazon.awssdk.services.ec2.model.Image;
+import software.amazon.awssdk.services.ec2.model.Tag;
import java.util.Collections;
import java.util.List;
@@ -58,9 +58,9 @@ public class ImagesTableProvider
//query for a specific owner.
private final String defaultOwner;
private static final Schema SCHEMA;
- private AmazonEC2 ec2;
+ private Ec2Client ec2;
- public ImagesTableProvider(AmazonEC2 ec2, java.util.Map configOptions)
+ public ImagesTableProvider(Ec2Client ec2, java.util.Map configOptions)
{
this.ec2 = ec2;
this.defaultOwner = configOptions.get(DEFAULT_OWNER_ENV);
@@ -104,28 +104,28 @@ public GetTableResponse getTable(BlockAllocator blockAllocator, GetTableRequest
@Override
public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsRequest, QueryStatusChecker queryStatusChecker)
{
- DescribeImagesRequest request = new DescribeImagesRequest();
+ DescribeImagesRequest.Builder request = DescribeImagesRequest.builder();
ValueSet idConstraint = recordsRequest.getConstraints().getSummary().get("id");
ValueSet ownerConstraint = recordsRequest.getConstraints().getSummary().get("owner");
if (idConstraint != null && idConstraint.isSingleValue()) {
- request.setImageIds(Collections.singletonList(idConstraint.getSingleValue().toString()));
+ request.imageIds(Collections.singletonList(idConstraint.getSingleValue().toString()));
}
else if (ownerConstraint != null && ownerConstraint.isSingleValue()) {
- request.setOwners(Collections.singletonList(ownerConstraint.getSingleValue().toString()));
+ request.owners(Collections.singletonList(ownerConstraint.getSingleValue().toString()));
}
else if (defaultOwner != null) {
- request.setOwners(Collections.singletonList(defaultOwner));
+ request.owners(Collections.singletonList(defaultOwner));
}
else {
throw new RuntimeException("A default owner account must be set or the query must have owner" +
"in the where clause with exactly 1 value otherwise results may be too big.");
}
- DescribeImagesResult response = ec2.describeImages(request);
+ DescribeImagesResponse response = ec2.describeImages(request.build());
int count = 0;
- for (Image next : response.getImages()) {
+ for (Image next : response.images()) {
if (count++ > MAX_IMAGES) {
throw new RuntimeException("Too many images returned, add an owner or id filter.");
}
@@ -147,34 +147,34 @@ private void instanceToRow(Image image,
spiller.writeRows((Block block, int row) -> {
boolean matched = true;
- matched &= block.offerValue("id", row, image.getImageId());
- matched &= block.offerValue("architecture", row, image.getArchitecture());
- matched &= block.offerValue("created", row, image.getCreationDate());
- matched &= block.offerValue("description", row, image.getDescription());
- matched &= block.offerValue("hypervisor", row, image.getHypervisor());
- matched &= block.offerValue("location", row, image.getImageLocation());
- matched &= block.offerValue("type", row, image.getImageType());
- matched &= block.offerValue("kernel", row, image.getKernelId());
- matched &= block.offerValue("name", row, image.getName());
- matched &= block.offerValue("owner", row, image.getOwnerId());
- matched &= block.offerValue("platform", row, image.getPlatform());
- matched &= block.offerValue("ramdisk", row, image.getRamdiskId());
- matched &= block.offerValue("root_device", row, image.getRootDeviceName());
- matched &= block.offerValue("root_type", row, image.getRootDeviceType());
- matched &= block.offerValue("srvio_net", row, image.getSriovNetSupport());
- matched &= block.offerValue("state", row, image.getState());
- matched &= block.offerValue("virt_type", row, image.getVirtualizationType());
- matched &= block.offerValue("is_public", row, image.getPublic());
+ matched &= block.offerValue("id", row, image.imageId());
+ matched &= block.offerValue("architecture", row, image.architectureAsString());
+ matched &= block.offerValue("created", row, image.creationDate());
+ matched &= block.offerValue("description", row, image.description());
+ matched &= block.offerValue("hypervisor", row, image.hypervisorAsString());
+ matched &= block.offerValue("location", row, image.imageLocation());
+ matched &= block.offerValue("type", row, image.imageTypeAsString());
+ matched &= block.offerValue("kernel", row, image.kernelId());
+ matched &= block.offerValue("name", row, image.name());
+ matched &= block.offerValue("owner", row, image.ownerId());
+ matched &= block.offerValue("platform", row, image.platformAsString());
+ matched &= block.offerValue("ramdisk", row, image.ramdiskId());
+ matched &= block.offerValue("root_device", row, image.rootDeviceName());
+ matched &= block.offerValue("root_type", row, image.rootDeviceTypeAsString());
+ matched &= block.offerValue("srvio_net", row, image.sriovNetSupport());
+ matched &= block.offerValue("state", row, image.stateAsString());
+ matched &= block.offerValue("virt_type", row, image.virtualizationTypeAsString());
+ matched &= block.offerValue("is_public", row, image.publicLaunchPermissions());
- List tags = image.getTags();
+ List tags = image.tags();
matched &= block.offerComplexValue("tags",
row,
(Field field, Object val) -> {
if (field.getName().equals("key")) {
- return ((Tag) val).getKey();
+ return ((Tag) val).key();
}
else if (field.getName().equals("value")) {
- return ((Tag) val).getValue();
+ return ((Tag) val).value();
}
throw new RuntimeException("Unexpected field " + field.getName());
@@ -185,33 +185,33 @@ else if (field.getName().equals("value")) {
row,
(Field field, Object val) -> {
if (field.getName().equals("dev_name")) {
- return ((BlockDeviceMapping) val).getDeviceName();
+ return ((BlockDeviceMapping) val).deviceName();
}
else if (field.getName().equals("no_device")) {
- return ((BlockDeviceMapping) val).getNoDevice();
+ return ((BlockDeviceMapping) val).noDevice();
}
else if (field.getName().equals("virt_name")) {
- return ((BlockDeviceMapping) val).getVirtualName();
+ return ((BlockDeviceMapping) val).virtualName();
}
else if (field.getName().equals("ebs")) {
- return ((BlockDeviceMapping) val).getEbs();
+ return ((BlockDeviceMapping) val).ebs();
}
else if (field.getName().equals("ebs_size")) {
- return ((EbsBlockDevice) val).getVolumeSize();
+ return ((EbsBlockDevice) val).volumeSize();
}
else if (field.getName().equals("ebs_iops")) {
- return ((EbsBlockDevice) val).getIops();
+ return ((EbsBlockDevice) val).iops();
}
else if (field.getName().equals("ebs_type")) {
- return ((EbsBlockDevice) val).getVolumeType();
+ return ((EbsBlockDevice) val).volumeTypeAsString();
}
else if (field.getName().equals("ebs_kms_key")) {
- return ((EbsBlockDevice) val).getKmsKeyId();
+ return ((EbsBlockDevice) val).kmsKeyId();
}
throw new RuntimeException("Unexpected field " + field.getName());
},
- image.getBlockDeviceMappings());
+ image.blockDeviceMappings());
return matched ? 1 : 0;
});
diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/RouteTableProvider.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/RouteTableProvider.java
index 24583be45e..7c71183464 100644
--- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/RouteTableProvider.java
+++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/RouteTableProvider.java
@@ -31,13 +31,13 @@
import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse;
import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest;
import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider;
-import com.amazonaws.services.ec2.AmazonEC2;
-import com.amazonaws.services.ec2.model.DescribeRouteTablesRequest;
-import com.amazonaws.services.ec2.model.DescribeRouteTablesResult;
-import com.amazonaws.services.ec2.model.Route;
-import com.amazonaws.services.ec2.model.RouteTable;
import org.apache.arrow.vector.types.Types;
import org.apache.arrow.vector.types.pojo.Schema;
+import software.amazon.awssdk.services.ec2.Ec2Client;
+import software.amazon.awssdk.services.ec2.model.DescribeRouteTablesRequest;
+import software.amazon.awssdk.services.ec2.model.DescribeRouteTablesResponse;
+import software.amazon.awssdk.services.ec2.model.Route;
+import software.amazon.awssdk.services.ec2.model.RouteTable;
import java.util.Collections;
import java.util.List;
@@ -50,9 +50,9 @@ public class RouteTableProvider
implements TableProvider
{
private static final Schema SCHEMA;
- private AmazonEC2 ec2;
+ private Ec2Client ec2;
- public RouteTableProvider(AmazonEC2 ec2)
+ public RouteTableProvider(Ec2Client ec2)
{
this.ec2 = ec2;
}
@@ -94,25 +94,25 @@ public GetTableResponse getTable(BlockAllocator blockAllocator, GetTableRequest
public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsRequest, QueryStatusChecker queryStatusChecker)
{
boolean done = false;
- DescribeRouteTablesRequest request = new DescribeRouteTablesRequest();
+ DescribeRouteTablesRequest.Builder request = DescribeRouteTablesRequest.builder();
ValueSet idConstraint = recordsRequest.getConstraints().getSummary().get("route_table_id");
if (idConstraint != null && idConstraint.isSingleValue()) {
- request.setRouteTableIds(Collections.singletonList(idConstraint.getSingleValue().toString()));
+ request.routeTableIds(Collections.singletonList(idConstraint.getSingleValue().toString()));
}
while (!done) {
- DescribeRouteTablesResult response = ec2.describeRouteTables(request);
+ DescribeRouteTablesResponse response = ec2.describeRouteTables(request.build());
- for (RouteTable nextRouteTable : response.getRouteTables()) {
- for (Route route : nextRouteTable.getRoutes()) {
+ for (RouteTable nextRouteTable : response.routeTables()) {
+ for (Route route : nextRouteTable.routes()) {
instanceToRow(nextRouteTable, route, spiller);
}
}
- request.setNextToken(response.getNextToken());
+ request.nextToken(response.nextToken());
- if (response.getNextToken() == null || !queryStatusChecker.isQueryRunning()) {
+ if (response.nextToken() == null || !queryStatusChecker.isQueryRunning()) {
done = true;
}
}
@@ -134,33 +134,33 @@ private void instanceToRow(RouteTable routeTable,
spiller.writeRows((Block block, int row) -> {
boolean matched = true;
- matched &= block.offerValue("route_table_id", row, routeTable.getRouteTableId());
- matched &= block.offerValue("owner", row, routeTable.getOwnerId());
- matched &= block.offerValue("vpc", row, routeTable.getVpcId());
- matched &= block.offerValue("dst_cidr", row, route.getDestinationCidrBlock());
- matched &= block.offerValue("dst_cidr_v6", row, route.getDestinationIpv6CidrBlock());
- matched &= block.offerValue("dst_prefix_list", row, route.getDestinationPrefixListId());
- matched &= block.offerValue("egress_igw", row, route.getEgressOnlyInternetGatewayId());
- matched &= block.offerValue("gateway", row, route.getGatewayId());
- matched &= block.offerValue("instance_id", row, route.getInstanceId());
- matched &= block.offerValue("instance_owner", row, route.getInstanceOwnerId());
- matched &= block.offerValue("nat_gateway", row, route.getNatGatewayId());
- matched &= block.offerValue("interface", row, route.getNetworkInterfaceId());
- matched &= block.offerValue("origin", row, route.getOrigin());
- matched &= block.offerValue("state", row, route.getState());
- matched &= block.offerValue("transit_gateway", row, route.getTransitGatewayId());
- matched &= block.offerValue("vpc_peering_con", row, route.getVpcPeeringConnectionId());
-
- List associations = routeTable.getAssociations().stream()
- .map(next -> next.getSubnetId() + ":" + next.getRouteTableId()).collect(Collectors.toList());
+ matched &= block.offerValue("route_table_id", row, routeTable.routeTableId());
+ matched &= block.offerValue("owner", row, routeTable.ownerId());
+ matched &= block.offerValue("vpc", row, routeTable.vpcId());
+ matched &= block.offerValue("dst_cidr", row, route.destinationCidrBlock());
+ matched &= block.offerValue("dst_cidr_v6", row, route.destinationIpv6CidrBlock());
+ matched &= block.offerValue("dst_prefix_list", row, route.destinationPrefixListId());
+ matched &= block.offerValue("egress_igw", row, route.egressOnlyInternetGatewayId());
+ matched &= block.offerValue("gateway", row, route.gatewayId());
+ matched &= block.offerValue("instance_id", row, route.instanceId());
+ matched &= block.offerValue("instance_owner", row, route.instanceOwnerId());
+ matched &= block.offerValue("nat_gateway", row, route.natGatewayId());
+ matched &= block.offerValue("interface", row, route.networkInterfaceId());
+ matched &= block.offerValue("origin", row, route.originAsString());
+ matched &= block.offerValue("state", row, route.stateAsString());
+ matched &= block.offerValue("transit_gateway", row, route.transitGatewayId());
+ matched &= block.offerValue("vpc_peering_con", row, route.vpcPeeringConnectionId());
+
+ List associations = routeTable.associations().stream()
+ .map(next -> next.subnetId() + ":" + next.routeTableId()).collect(Collectors.toList());
matched &= block.offerComplexValue("associations", row, FieldResolver.DEFAULT, associations);
- List tags = routeTable.getTags().stream()
- .map(next -> next.getKey() + ":" + next.getValue()).collect(Collectors.toList());
+ List tags = routeTable.tags().stream()
+ .map(next -> next.key() + ":" + next.value()).collect(Collectors.toList());
matched &= block.offerComplexValue("tags", row, FieldResolver.DEFAULT, tags);
- List propagatingVgws = routeTable.getPropagatingVgws().stream()
- .map(next -> next.getGatewayId()).collect(Collectors.toList());
+ List propagatingVgws = routeTable.propagatingVgws().stream()
+ .map(next -> next.gatewayId()).collect(Collectors.toList());
matched &= block.offerComplexValue("propagating_vgws", row, FieldResolver.DEFAULT, propagatingVgws);
return matched ? 1 : 0;
diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/SecurityGroupsTableProvider.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/SecurityGroupsTableProvider.java
index 8f4f6dd3c3..94afbdf687 100644
--- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/SecurityGroupsTableProvider.java
+++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/SecurityGroupsTableProvider.java
@@ -31,13 +31,13 @@
import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse;
import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest;
import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider;
-import com.amazonaws.services.ec2.AmazonEC2;
-import com.amazonaws.services.ec2.model.DescribeSecurityGroupsRequest;
-import com.amazonaws.services.ec2.model.DescribeSecurityGroupsResult;
-import com.amazonaws.services.ec2.model.IpPermission;
-import com.amazonaws.services.ec2.model.SecurityGroup;
import org.apache.arrow.vector.types.Types;
import org.apache.arrow.vector.types.pojo.Schema;
+import software.amazon.awssdk.services.ec2.Ec2Client;
+import software.amazon.awssdk.services.ec2.model.DescribeSecurityGroupsRequest;
+import software.amazon.awssdk.services.ec2.model.DescribeSecurityGroupsResponse;
+import software.amazon.awssdk.services.ec2.model.IpPermission;
+import software.amazon.awssdk.services.ec2.model.SecurityGroup;
import java.util.Collections;
import java.util.List;
@@ -53,9 +53,9 @@ public class SecurityGroupsTableProvider
private static final String EGRESS = "egress";
private static final Schema SCHEMA;
- private AmazonEC2 ec2;
+ private Ec2Client ec2;
- public SecurityGroupsTableProvider(AmazonEC2 ec2)
+ public SecurityGroupsTableProvider(Ec2Client ec2)
{
this.ec2 = ec2;
}
@@ -97,34 +97,34 @@ public GetTableResponse getTable(BlockAllocator blockAllocator, GetTableRequest
public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsRequest, QueryStatusChecker queryStatusChecker)
{
boolean done = false;
- DescribeSecurityGroupsRequest request = new DescribeSecurityGroupsRequest();
+ DescribeSecurityGroupsRequest.Builder request = DescribeSecurityGroupsRequest.builder();
ValueSet idConstraint = recordsRequest.getConstraints().getSummary().get("id");
if (idConstraint != null && idConstraint.isSingleValue()) {
- request.setGroupIds(Collections.singletonList(idConstraint.getSingleValue().toString()));
+ request.groupIds(Collections.singletonList(idConstraint.getSingleValue().toString()));
}
ValueSet nameConstraint = recordsRequest.getConstraints().getSummary().get("name");
if (nameConstraint != null && nameConstraint.isSingleValue()) {
- request.setGroupNames(Collections.singletonList(nameConstraint.getSingleValue().toString()));
+ request.groupNames(Collections.singletonList(nameConstraint.getSingleValue().toString()));
}
while (!done) {
- DescribeSecurityGroupsResult response = ec2.describeSecurityGroups(request);
+ DescribeSecurityGroupsResponse response = ec2.describeSecurityGroups(request.build());
//Each rule is mapped to a row in the response. SGs have INGRESS and EGRESS rules.
- for (SecurityGroup next : response.getSecurityGroups()) {
- for (IpPermission nextPerm : next.getIpPermissions()) {
+ for (SecurityGroup next : response.securityGroups()) {
+ for (IpPermission nextPerm : next.ipPermissions()) {
instanceToRow(next, nextPerm, INGRESS, spiller);
}
- for (IpPermission nextPerm : next.getIpPermissionsEgress()) {
+ for (IpPermission nextPerm : next.ipPermissionsEgress()) {
instanceToRow(next, nextPerm, EGRESS, spiller);
}
}
- request.setNextToken(response.getNextToken());
- if (response.getNextToken() == null || !queryStatusChecker.isQueryRunning()) {
+ request.nextToken(response.nextToken());
+ if (response.nextToken() == null || !queryStatusChecker.isQueryRunning()) {
done = true;
}
}
@@ -148,28 +148,28 @@ private void instanceToRow(SecurityGroup securityGroup,
spiller.writeRows((Block block, int row) -> {
boolean matched = true;
- matched &= block.offerValue("id", row, securityGroup.getGroupId());
- matched &= block.offerValue("name", row, securityGroup.getGroupName());
- matched &= block.offerValue("description", row, securityGroup.getDescription());
- matched &= block.offerValue("from_port", row, permission.getFromPort());
- matched &= block.offerValue("to_port", row, permission.getFromPort());
- matched &= block.offerValue("protocol", row, permission.getIpProtocol());
- matched &= block.offerValue("direction", row, permission.getIpProtocol());
+ matched &= block.offerValue("id", row, securityGroup.groupId());
+ matched &= block.offerValue("name", row, securityGroup.groupName());
+ matched &= block.offerValue("description", row, securityGroup.description());
+ matched &= block.offerValue("from_port", row, permission.fromPort());
+ matched &= block.offerValue("to_port", row, permission.toPort());
+ matched &= block.offerValue("protocol", row, permission.ipProtocol());
+ matched &= block.offerValue("direction", row, direction);
- List ipv4Ranges = permission.getIpv4Ranges().stream()
- .map(next -> next.getCidrIp() + ":" + next.getDescription()).collect(Collectors.toList());
+ List ipv4Ranges = permission.ipRanges().stream()
+ .map(next -> next.cidrIp() + ":" + next.description()).collect(Collectors.toList());
matched &= block.offerComplexValue("ipv4_ranges", row, FieldResolver.DEFAULT, ipv4Ranges);
- List ipv6Ranges = permission.getIpv6Ranges().stream()
- .map(next -> next.getCidrIpv6() + ":" + next.getDescription()).collect(Collectors.toList());
+ List ipv6Ranges = permission.ipv6Ranges().stream()
+ .map(next -> next.cidrIpv6() + ":" + next.description()).collect(Collectors.toList());
matched &= block.offerComplexValue("ipv6_ranges", row, FieldResolver.DEFAULT, ipv6Ranges);
- List prefixLists = permission.getPrefixListIds().stream()
- .map(next -> next.getPrefixListId() + ":" + next.getDescription()).collect(Collectors.toList());
+ List prefixLists = permission.prefixListIds().stream()
+ .map(next -> next.prefixListId() + ":" + next.description()).collect(Collectors.toList());
matched &= block.offerComplexValue("prefix_lists", row, FieldResolver.DEFAULT, prefixLists);
- List userIdGroups = permission.getUserIdGroupPairs().stream()
- .map(next -> next.getUserId() + ":" + next.getGroupId())
+ List userIdGroups = permission.userIdGroupPairs().stream()
+ .map(next -> next.userId() + ":" + next.groupId())
.collect(Collectors.toList());
matched &= block.offerComplexValue("user_id_groups", row, FieldResolver.DEFAULT, userIdGroups);
diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/SubnetTableProvider.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/SubnetTableProvider.java
index f64bb9bd26..444fd39510 100644
--- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/SubnetTableProvider.java
+++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/SubnetTableProvider.java
@@ -31,12 +31,12 @@
import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse;
import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest;
import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider;
-import com.amazonaws.services.ec2.AmazonEC2;
-import com.amazonaws.services.ec2.model.DescribeSubnetsRequest;
-import com.amazonaws.services.ec2.model.DescribeSubnetsResult;
-import com.amazonaws.services.ec2.model.Subnet;
import org.apache.arrow.vector.types.Types;
import org.apache.arrow.vector.types.pojo.Schema;
+import software.amazon.awssdk.services.ec2.Ec2Client;
+import software.amazon.awssdk.services.ec2.model.DescribeSubnetsRequest;
+import software.amazon.awssdk.services.ec2.model.DescribeSubnetsResponse;
+import software.amazon.awssdk.services.ec2.model.Subnet;
import java.util.Collections;
import java.util.List;
@@ -49,9 +49,9 @@ public class SubnetTableProvider
implements TableProvider
{
private static final Schema SCHEMA;
- private AmazonEC2 ec2;
+ private Ec2Client ec2;
- public SubnetTableProvider(AmazonEC2 ec2)
+ public SubnetTableProvider(Ec2Client ec2)
{
this.ec2 = ec2;
}
@@ -92,15 +92,15 @@ public GetTableResponse getTable(BlockAllocator blockAllocator, GetTableRequest
@Override
public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsRequest, QueryStatusChecker queryStatusChecker)
{
- DescribeSubnetsRequest request = new DescribeSubnetsRequest();
+ DescribeSubnetsRequest.Builder request = DescribeSubnetsRequest.builder();
ValueSet idConstraint = recordsRequest.getConstraints().getSummary().get("id");
if (idConstraint != null && idConstraint.isSingleValue()) {
- request.setSubnetIds(Collections.singletonList(idConstraint.getSingleValue().toString()));
+ request.subnetIds(Collections.singletonList(idConstraint.getSingleValue().toString()));
}
- DescribeSubnetsResult response = ec2.describeSubnets(request);
- for (Subnet subnet : response.getSubnets()) {
+ DescribeSubnetsResponse response = ec2.describeSubnets(request.build());
+ for (Subnet subnet : response.subnets()) {
instanceToRow(subnet, spiller);
}
}
@@ -119,19 +119,18 @@ private void instanceToRow(Subnet subnet,
spiller.writeRows((Block block, int row) -> {
boolean matched = true;
- matched &= block.offerValue("id", row, subnet.getSubnetId());
- matched &= block.offerValue("availability_zone", row, subnet.getAvailabilityZone());
- matched &= block.offerValue("available_ip_count", row, subnet.getAvailableIpAddressCount());
- matched &= block.offerValue("cidr_block", row, subnet.getCidrBlock());
- matched &= block.offerValue("default_for_az", row, subnet.getDefaultForAz());
- matched &= block.offerValue("map_public_ip", row, subnet.getMapPublicIpOnLaunch());
- matched &= block.offerValue("owner", row, subnet.getOwnerId());
- matched &= block.offerValue("state", row, subnet.getState());
- matched &= block.offerValue("vpc", row, subnet.getVpcId());
- matched &= block.offerValue("vpc", row, subnet.getVpcId());
+ matched &= block.offerValue("id", row, subnet.subnetId());
+ matched &= block.offerValue("availability_zone", row, subnet.availabilityZone());
+ matched &= block.offerValue("available_ip_count", row, subnet.availableIpAddressCount());
+ matched &= block.offerValue("cidr_block", row, subnet.cidrBlock());
+ matched &= block.offerValue("default_for_az", row, subnet.defaultForAz());
+ matched &= block.offerValue("map_public_ip", row, subnet.mapPublicIpOnLaunch());
+ matched &= block.offerValue("owner", row, subnet.ownerId());
+ matched &= block.offerValue("state", row, subnet.stateAsString());
+ matched &= block.offerValue("vpc", row, subnet.vpcId());
- List tags = subnet.getTags().stream()
- .map(next -> next.getKey() + ":" + next.getValue()).collect(Collectors.toList());
+ List tags = subnet.tags().stream()
+ .map(next -> next.key() + ":" + next.value()).collect(Collectors.toList());
matched &= block.offerComplexValue("tags", row, FieldResolver.DEFAULT, tags);
return matched ? 1 : 0;
diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/VpcTableProvider.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/VpcTableProvider.java
index 18087ba5e5..44adc6a846 100644
--- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/VpcTableProvider.java
+++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/ec2/VpcTableProvider.java
@@ -31,12 +31,12 @@
import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse;
import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest;
import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider;
-import com.amazonaws.services.ec2.AmazonEC2;
-import com.amazonaws.services.ec2.model.DescribeVpcsRequest;
-import com.amazonaws.services.ec2.model.DescribeVpcsResult;
-import com.amazonaws.services.ec2.model.Vpc;
import org.apache.arrow.vector.types.Types;
import org.apache.arrow.vector.types.pojo.Schema;
+import software.amazon.awssdk.services.ec2.Ec2Client;
+import software.amazon.awssdk.services.ec2.model.DescribeVpcsRequest;
+import software.amazon.awssdk.services.ec2.model.DescribeVpcsResponse;
+import software.amazon.awssdk.services.ec2.model.Vpc;
import java.util.Collections;
import java.util.List;
@@ -49,9 +49,9 @@ public class VpcTableProvider
implements TableProvider
{
private static final Schema SCHEMA;
- private AmazonEC2 ec2;
+ private Ec2Client ec2;
- public VpcTableProvider(AmazonEC2 ec2)
+ public VpcTableProvider(Ec2Client ec2)
{
this.ec2 = ec2;
}
@@ -92,15 +92,15 @@ public GetTableResponse getTable(BlockAllocator blockAllocator, GetTableRequest
@Override
public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsRequest, QueryStatusChecker queryStatusChecker)
{
- DescribeVpcsRequest request = new DescribeVpcsRequest();
+ DescribeVpcsRequest.Builder request = DescribeVpcsRequest.builder();
ValueSet idConstraint = recordsRequest.getConstraints().getSummary().get("id");
if (idConstraint != null && idConstraint.isSingleValue()) {
- request.setVpcIds(Collections.singletonList(idConstraint.getSingleValue().toString()));
+ request.vpcIds(Collections.singletonList(idConstraint.getSingleValue().toString()));
}
- DescribeVpcsResult response = ec2.describeVpcs(request);
- for (Vpc vpc : response.getVpcs()) {
+ DescribeVpcsResponse response = ec2.describeVpcs(request.build());
+ for (Vpc vpc : response.vpcs()) {
instanceToRow(vpc, spiller);
}
}
@@ -119,16 +119,16 @@ private void instanceToRow(Vpc vpc,
spiller.writeRows((Block block, int row) -> {
boolean matched = true;
- matched &= block.offerValue("id", row, vpc.getVpcId());
- matched &= block.offerValue("cidr_block", row, vpc.getCidrBlock());
- matched &= block.offerValue("dhcp_opts", row, vpc.getDhcpOptionsId());
- matched &= block.offerValue("tenancy", row, vpc.getInstanceTenancy());
- matched &= block.offerValue("owner", row, vpc.getOwnerId());
- matched &= block.offerValue("state", row, vpc.getState());
- matched &= block.offerValue("is_default", row, vpc.getIsDefault());
+ matched &= block.offerValue("id", row, vpc.vpcId());
+ matched &= block.offerValue("cidr_block", row, vpc.cidrBlock());
+ matched &= block.offerValue("dhcp_opts", row, vpc.dhcpOptionsId());
+ matched &= block.offerValue("tenancy", row, vpc.instanceTenancyAsString());
+ matched &= block.offerValue("owner", row, vpc.ownerId());
+ matched &= block.offerValue("state", row, vpc.stateAsString());
+ matched &= block.offerValue("is_default", row, vpc.isDefault());
- List tags = vpc.getTags().stream()
- .map(next -> next.getKey() + ":" + next.getValue()).collect(Collectors.toList());
+ List tags = vpc.tags().stream()
+ .map(next -> next.key() + ":" + next.value()).collect(Collectors.toList());
matched &= block.offerComplexValue("tags", row, FieldResolver.DEFAULT, tags);
return matched ? 1 : 0;
diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/s3/S3BucketsTableProvider.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/s3/S3BucketsTableProvider.java
index 0387ac6bf7..7ff28b61e5 100644
--- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/s3/S3BucketsTableProvider.java
+++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/s3/S3BucketsTableProvider.java
@@ -29,10 +29,12 @@
import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse;
import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest;
import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.model.Bucket;
-import com.amazonaws.services.s3.model.Owner;
import org.apache.arrow.vector.types.pojo.Schema;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.s3.model.Bucket;
+import software.amazon.awssdk.services.s3.model.GetBucketAclRequest;
+import software.amazon.awssdk.services.s3.model.GetBucketAclResponse;
+import software.amazon.awssdk.services.s3.model.Owner;
/**
* Maps your S3 Objects to a table.
@@ -41,9 +43,9 @@ public class S3BucketsTableProvider
implements TableProvider
{
private static final Schema SCHEMA;
- private AmazonS3 amazonS3;
+ private S3Client amazonS3;
- public S3BucketsTableProvider(AmazonS3 amazonS3)
+ public S3BucketsTableProvider(S3Client amazonS3)
{
this.amazonS3 = amazonS3;
}
@@ -84,7 +86,7 @@ public GetTableResponse getTable(BlockAllocator blockAllocator, GetTableRequest
@Override
public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsRequest, QueryStatusChecker queryStatusChecker)
{
- for (Bucket next : amazonS3.listBuckets()) {
+ for (Bucket next : amazonS3.listBuckets().buckets()) {
toRow(next, spiller);
}
}
@@ -102,13 +104,15 @@ private void toRow(Bucket bucket,
{
spiller.writeRows((Block block, int row) -> {
boolean matched = true;
- matched &= block.offerValue("bucket_name", row, bucket.getName());
- matched &= block.offerValue("create_date", row, bucket.getCreationDate());
+ matched &= block.offerValue("bucket_name", row, bucket.name());
+ matched &= block.offerValue("create_date", row, bucket.creationDate());
- Owner owner = bucket.getOwner();
+ GetBucketAclResponse response = amazonS3.getBucketAcl(GetBucketAclRequest.builder().bucket(bucket.name()).build());
+
+ Owner owner = response.owner();
if (owner != null) {
- matched &= block.offerValue("owner_name", row, bucket.getOwner().getDisplayName());
- matched &= block.offerValue("owner_id", row, bucket.getOwner().getId());
+ matched &= block.offerValue("owner_name", row, owner.displayName());
+ matched &= block.offerValue("owner_id", row, owner.id());
}
return matched ? 1 : 0;
diff --git a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/s3/S3ObjectsTableProvider.java b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/s3/S3ObjectsTableProvider.java
index c58315f49e..88179b9382 100644
--- a/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/s3/S3ObjectsTableProvider.java
+++ b/athena-aws-cmdb/src/main/java/com/amazonaws/athena/connectors/aws/cmdb/tables/s3/S3ObjectsTableProvider.java
@@ -30,12 +30,12 @@
import com.amazonaws.athena.connector.lambda.metadata.GetTableResponse;
import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest;
import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.model.ListObjectsV2Request;
-import com.amazonaws.services.s3.model.ListObjectsV2Result;
-import com.amazonaws.services.s3.model.Owner;
-import com.amazonaws.services.s3.model.S3ObjectSummary;
import org.apache.arrow.vector.types.pojo.Schema;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.s3.model.ListObjectsV2Request;
+import software.amazon.awssdk.services.s3.model.ListObjectsV2Response;
+import software.amazon.awssdk.services.s3.model.Owner;
+import software.amazon.awssdk.services.s3.model.S3Object;
/**
* Maps your S3 Objects to a table.
@@ -45,9 +45,9 @@ public class S3ObjectsTableProvider
{
private static final int MAX_KEYS = 1000;
private static final Schema SCHEMA;
- private AmazonS3 amazonS3;
+ private S3Client amazonS3;
- public S3ObjectsTableProvider(AmazonS3 amazonS3)
+ public S3ObjectsTableProvider(S3Client amazonS3)
{
this.amazonS3 = amazonS3;
}
@@ -98,42 +98,44 @@ public void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recordsR
"(e.g. where bucket_name='my_bucket'.");
}
- ListObjectsV2Request req = new ListObjectsV2Request().withBucketName(bucket).withMaxKeys(MAX_KEYS);
- ListObjectsV2Result result;
+ ListObjectsV2Request req = ListObjectsV2Request.builder().bucket(bucket).maxKeys(MAX_KEYS).build();
+ ListObjectsV2Response response;
do {
- result = amazonS3.listObjectsV2(req);
- for (S3ObjectSummary objectSummary : result.getObjectSummaries()) {
- toRow(objectSummary, spiller);
+ response = amazonS3.listObjectsV2(req);
+ for (S3Object s3Object : response.contents()) {
+ toRow(s3Object, spiller, bucket);
}
- req.setContinuationToken(result.getNextContinuationToken());
+ req = req.toBuilder().continuationToken(response.nextContinuationToken()).build();
}
- while (result.isTruncated() && queryStatusChecker.isQueryRunning());
+ while (response.isTruncated() && queryStatusChecker.isQueryRunning());
}
/**
* Maps a DBInstance into a row in our Apache Arrow response block(s).
*
- * @param objectSummary The S3 ObjectSummary to map.
+ * @param s3Object The S3 object to map.
* @param spiller The BlockSpiller to use when we want to write a matching row to the response.
+ * @param bucket The name of the S3 bucket
* @note The current implementation is rather naive in how it maps fields. It leverages a static
* list of fields that we'd like to provide and then explicitly filters and converts each field.
*/
- private void toRow(S3ObjectSummary objectSummary,
- BlockSpiller spiller)
+ private void toRow(S3Object s3Object,
+ BlockSpiller spiller,
+ String bucket)
{
spiller.writeRows((Block block, int row) -> {
boolean matched = true;
- matched &= block.offerValue("bucket_name", row, objectSummary.getBucketName());
- matched &= block.offerValue("e_tag", row, objectSummary.getETag());
- matched &= block.offerValue("key", row, objectSummary.getKey());
- matched &= block.offerValue("bytes", row, objectSummary.getSize());
- matched &= block.offerValue("storage_class", row, objectSummary.getStorageClass());
- matched &= block.offerValue("last_modified", row, objectSummary.getLastModified());
+ matched &= block.offerValue("bucket_name", row, bucket);
+ matched &= block.offerValue("e_tag", row, s3Object.eTag());
+ matched &= block.offerValue("key", row, s3Object.key());
+ matched &= block.offerValue("bytes", row, s3Object.size());
+ matched &= block.offerValue("storage_class", row, s3Object.storageClassAsString());
+ matched &= block.offerValue("last_modified", row, s3Object.lastModified());
- Owner owner = objectSummary.getOwner();
+ Owner owner = s3Object.owner();
if (owner != null) {
- matched &= block.offerValue("owner_name", row, owner.getDisplayName());
- matched &= block.offerValue("owner_id", row, owner.getId());
+ matched &= block.offerValue("owner_name", row, owner.displayName());
+ matched &= block.offerValue("owner_id", row, owner.id());
}
return matched ? 1 : 0;
diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbMetadataHandlerTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbMetadataHandlerTest.java
index ba8f6f815e..6c755e65aa 100644
--- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbMetadataHandlerTest.java
+++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbMetadataHandlerTest.java
@@ -38,15 +38,15 @@
import com.amazonaws.athena.connector.lambda.security.FederatedIdentity;
import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory;
import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.junit.MockitoJUnitRunner;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.util.ArrayList;
import java.util.Collections;
@@ -75,7 +75,7 @@ public class AwsCmdbMetadataHandlerTest
private FederatedIdentity identity = new FederatedIdentity("arn", "account", Collections.emptyMap(), Collections.emptyList());
@Mock
- private AmazonS3 mockS3;
+ private S3Client mockS3;
@Mock
private TableProviderFactory mockTableProviderFactory;
@@ -98,10 +98,10 @@ public class AwsCmdbMetadataHandlerTest
private Block mockBlock;
@Mock
- private AWSSecretsManager mockSecretsManager;
+ private SecretsManagerClient mockSecretsManager;
@Mock
- private AmazonAthena mockAthena;
+ private AthenaClient mockAthena;
private AwsCmdbMetadataHandler handler;
diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbRecordHandlerTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbRecordHandlerTest.java
index 9c78bb1ab8..09000c9e60 100644
--- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbRecordHandlerTest.java
+++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/AwsCmdbRecordHandlerTest.java
@@ -32,15 +32,15 @@
import com.amazonaws.athena.connector.lambda.security.FederatedIdentity;
import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory;
import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.junit.MockitoJUnitRunner;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.util.Collections;
import java.util.UUID;
@@ -62,7 +62,7 @@ public class AwsCmdbRecordHandlerTest
private FederatedIdentity identity = new FederatedIdentity("arn", "account", Collections.emptyMap(), Collections.emptyList());
@Mock
- private AmazonS3 mockS3;
+ private S3Client mockS3;
@Mock
private TableProviderFactory mockTableProviderFactory;
@@ -77,10 +77,10 @@ public class AwsCmdbRecordHandlerTest
private TableProvider mockTableProvider;
@Mock
- private AWSSecretsManager mockSecretsManager;
+ private SecretsManagerClient mockSecretsManager;
@Mock
- private AmazonAthena mockAthena;
+ private AthenaClient mockAthena;
@Mock
private QueryStatusChecker queryStatusChecker;
diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactoryTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactoryTest.java
index 19a77878e4..83e2f72c3b 100644
--- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactoryTest.java
+++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/TableProviderFactoryTest.java
@@ -21,19 +21,19 @@
import com.amazonaws.athena.connector.lambda.domain.TableName;
import com.amazonaws.athena.connectors.aws.cmdb.tables.TableProvider;
-import com.amazonaws.services.ec2.AmazonEC2;
-import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduce;
-import com.amazonaws.services.rds.AmazonRDS;
-import com.amazonaws.services.s3.AmazonS3;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.junit.MockitoJUnitRunner;
+import software.amazon.awssdk.services.ec2.Ec2Client;
+import software.amazon.awssdk.services.emr.EmrClient;
+import software.amazon.awssdk.services.rds.RdsClient;
+import software.amazon.awssdk.services.s3.S3Client;
import java.util.List;
import java.util.Map;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
@RunWith(MockitoJUnitRunner.class)
public class TableProviderFactoryTest
@@ -42,16 +42,16 @@ public class TableProviderFactoryTest
private int expectedTables = 11;
@Mock
- private AmazonEC2 mockEc2;
+ private Ec2Client mockEc2;
@Mock
- private AmazonElasticMapReduce mockEmr;
+ private EmrClient mockEmr;
@Mock
- private AmazonRDS mockRds;
+ private RdsClient mockRds;
@Mock
- private AmazonS3 amazonS3;
+ private S3Client amazonS3;
private TableProviderFactory factory = new TableProviderFactory(mockEc2, mockEmr, mockRds, amazonS3, com.google.common.collect.ImmutableMap.of());
diff --git a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/AbstractTableProviderTest.java b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/AbstractTableProviderTest.java
index f4d6ba505a..8ab8620921 100644
--- a/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/AbstractTableProviderTest.java
+++ b/athena-aws-cmdb/src/test/java/com/amazonaws/athena/connectors/aws/cmdb/tables/AbstractTableProviderTest.java
@@ -43,11 +43,6 @@
import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory;
import com.amazonaws.athena.connector.lambda.security.FederatedIdentity;
import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.model.PutObjectRequest;
-import com.amazonaws.services.s3.model.PutObjectResult;
-import com.amazonaws.services.s3.model.S3Object;
-import com.amazonaws.services.s3.model.S3ObjectInputStream;
import com.google.common.io.ByteStreams;
import org.apache.arrow.vector.types.Types;
import org.apache.arrow.vector.types.pojo.Schema;
@@ -59,8 +54,16 @@
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.junit.MockitoJUnitRunner;
+import org.mockito.stubbing.Answer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.core.ResponseInputStream;
+import software.amazon.awssdk.core.sync.RequestBody;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.s3.model.GetObjectRequest;
+import software.amazon.awssdk.services.s3.model.GetObjectResponse;
+import software.amazon.awssdk.services.s3.model.PutObjectRequest;
+import software.amazon.awssdk.services.s3.model.PutObjectResponse;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
@@ -74,8 +77,6 @@
import static com.amazonaws.athena.connector.lambda.domain.predicate.Constraints.DEFAULT_NO_LIMIT;
import static org.junit.Assert.*;
import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.nullable;
-import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@RunWith(MockitoJUnitRunner.class)
@@ -99,7 +100,7 @@ public abstract class AbstractTableProviderTest
private final List mockS3Store = new ArrayList<>();
@Mock
- private AmazonS3 amazonS3;
+ private S3Client amazonS3;
@Mock
private QueryStatusChecker queryStatusChecker;
@@ -129,24 +130,24 @@ public void setUp()
{
allocator = new BlockAllocatorImpl();
- when(amazonS3.putObject(any()))
+ when(amazonS3.putObject(any(PutObjectRequest.class), any(RequestBody.class)))
.thenAnswer((InvocationOnMock invocationOnMock) -> {
- InputStream inputStream = ((PutObjectRequest) invocationOnMock.getArguments()[0]).getInputStream();
+ InputStream inputStream = ((RequestBody) invocationOnMock.getArguments()[1]).contentStreamProvider().newStream();
ByteHolder byteHolder = new ByteHolder();
byteHolder.setBytes(ByteStreams.toByteArray(inputStream));
mockS3Store.add(byteHolder);
- return mock(PutObjectResult.class);
+ return PutObjectResponse.builder().build();
});
- when(amazonS3.getObject(nullable(String.class), nullable(String.class)))
- .thenAnswer((InvocationOnMock invocationOnMock) -> {
- S3Object mockObject = mock(S3Object.class);
- ByteHolder byteHolder = mockS3Store.get(0);
- mockS3Store.remove(0);
- when(mockObject.getObjectContent()).thenReturn(
- new S3ObjectInputStream(
- new ByteArrayInputStream(byteHolder.getBytes()), null));
- return mockObject;
+ when(amazonS3.getObject(any(GetObjectRequest.class)))
+ .thenAnswer(new Answer
-
+
- com.amazonaws
- aws-java-sdk-rds
- ${aws-sdk.version}
+ software.amazon.awssdk
+ rds
+ ${aws-sdk-v2.version}
test
+
+
+ software.amazon.awssdk
+ netty-nio-client
+
+
diff --git a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMetadataHandler.java b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMetadataHandler.java
index 6f6b38b4bc..a307a63383 100644
--- a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMetadataHandler.java
+++ b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMetadataHandler.java
@@ -49,8 +49,6 @@
import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcArrowTypeConverter;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
@@ -61,6 +59,8 @@
import org.apache.arrow.vector.types.pojo.Schema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
@@ -96,8 +96,8 @@ public HiveMetadataHandler(DatabaseConnectionConfig databaseConnectionConfig, ja
@VisibleForTesting
protected HiveMetadataHandler(
DatabaseConnectionConfig databaseConnectionConfiguration,
- AWSSecretsManager secretManager,
- AmazonAthena athena,
+ SecretsManagerClient secretManager,
+ AthenaClient athena,
JdbcConnectionFactory jdbcConnectionFactory,
java.util.Map configOptions)
{
diff --git a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMuxMetadataHandler.java b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMuxMetadataHandler.java
index 6110d8cc25..b99cd881e6 100644
--- a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMuxMetadataHandler.java
+++ b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMuxMetadataHandler.java
@@ -25,9 +25,9 @@
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import org.apache.arrow.util.VisibleForTesting;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.util.Map;
@@ -56,7 +56,7 @@ public HiveMuxMetadataHandler(java.util.Map configOptions)
}
@VisibleForTesting
- protected HiveMuxMetadataHandler(AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory,
+ protected HiveMuxMetadataHandler(SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory,
Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions)
{
super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions);
diff --git a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMuxRecordHandler.java b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMuxRecordHandler.java
index f87ee06bef..3dd28acccc 100644
--- a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMuxRecordHandler.java
+++ b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveMuxRecordHandler.java
@@ -25,10 +25,10 @@
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import com.google.common.annotations.VisibleForTesting;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.util.Map;
@@ -57,7 +57,7 @@ public HiveMuxRecordHandler(java.util.Map configOptions)
}
@VisibleForTesting
- HiveMuxRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory,
+ HiveMuxRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory,
DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions)
{
super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions);
diff --git a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveRecordHandler.java b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveRecordHandler.java
index ed5af5284d..95ff9f6a3e 100644
--- a/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveRecordHandler.java
+++ b/athena-cloudera-hive/src/main/java/com/amazonaws/athena/connectors/cloudera/HiveRecordHandler.java
@@ -28,15 +28,12 @@
import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.athena.AmazonAthenaClientBuilder;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.AmazonS3ClientBuilder;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
-import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder;
import com.google.common.annotations.VisibleForTesting;
import org.apache.arrow.vector.types.pojo.Schema;
import org.apache.commons.lang3.Validate;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.sql.Connection;
import java.sql.PreparedStatement;
@@ -62,11 +59,11 @@ public HiveRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java
}
public HiveRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions)
{
- this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), AWSSecretsManagerClientBuilder.defaultClient(), AmazonAthenaClientBuilder.defaultClient(),
+ this(databaseConnectionConfig, S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(),
jdbcConnectionFactory, new HiveQueryStringBuilder(HIVE_QUOTE_CHARACTER, new HiveFederationExpressionParser(HIVE_QUOTE_CHARACTER)), configOptions);
}
@VisibleForTesting
- HiveRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions)
+ HiveRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions)
{
super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions);
this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null");
diff --git a/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMetadataHandlerTest.java b/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMetadataHandlerTest.java
index b1520d4a3d..abc43fc0b1 100644
--- a/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMetadataHandlerTest.java
+++ b/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMetadataHandlerTest.java
@@ -28,10 +28,6 @@
import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig;
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
-import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest;
-import com.amazonaws.services.secretsmanager.model.GetSecretValueResult;
import org.apache.arrow.vector.types.pojo.Field;
import org.apache.arrow.vector.types.pojo.Schema;
import org.junit.Assert;
@@ -39,7 +35,10 @@
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.Mockito;
-
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
+import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest;
+import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse;
import java.sql.*;
import java.util.*;
@@ -58,8 +57,8 @@ public class HiveMetadataHandlerTest
private JdbcConnectionFactory jdbcConnectionFactory;
private Connection connection;
private FederatedIdentity federatedIdentity;
- private AWSSecretsManager secretsManager;
- private AmazonAthena athena;
+ private SecretsManagerClient secretsManager;
+ private AthenaClient athena;
private BlockAllocator blockAllocator;
@BeforeClass
@@ -75,9 +74,9 @@ public void setup()
this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class, Mockito.RETURNS_DEEP_STUBS);
this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS);
Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection);
- this.secretsManager = Mockito.mock(AWSSecretsManager.class);
- this.athena = Mockito.mock(AmazonAthena.class);
- Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}"));
+ this.secretsManager = Mockito.mock(SecretsManagerClient.class);
+ this.athena = Mockito.mock(AthenaClient.class);
+ Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build());
this.hiveMetadataHandler = new HiveMetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of());
this.federatedIdentity = Mockito.mock(FederatedIdentity.class);
diff --git a/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMuxMetadataHandlerTest.java b/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMuxMetadataHandlerTest.java
index 8f0f47fc63..344b1e4915 100644
--- a/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMuxMetadataHandlerTest.java
+++ b/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMuxMetadataHandlerTest.java
@@ -43,8 +43,8 @@
import com.amazonaws.athena.connector.lambda.metadata.GetTableRequest;
import com.amazonaws.athena.connector.lambda.metadata.ListSchemasRequest;
import com.amazonaws.athena.connector.lambda.metadata.ListTablesRequest;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import static org.mockito.ArgumentMatchers.nullable;
@@ -54,8 +54,8 @@ public class HiveMuxMetadataHandlerTest
private HiveMetadataHandler hiveMetadataHandler;
private JdbcMetadataHandler jdbcMetadataHandler;
private BlockAllocator allocator;
- private AWSSecretsManager secretsManager;
- private AmazonAthena athena;
+ private SecretsManagerClient secretsManager;
+ private AthenaClient athena;
private QueryStatusChecker queryStatusChecker;
private JdbcConnectionFactory jdbcConnectionFactory;
@BeforeClass
@@ -68,8 +68,8 @@ public void setup()
this.allocator = new BlockAllocatorImpl();
this.hiveMetadataHandler = Mockito.mock(HiveMetadataHandler.class);
this.metadataHandlerMap = Collections.singletonMap("metaHive", this.hiveMetadataHandler);
- this.secretsManager = Mockito.mock(AWSSecretsManager.class);
- this.athena = Mockito.mock(AmazonAthena.class);
+ this.secretsManager = Mockito.mock(SecretsManagerClient.class);
+ this.athena = Mockito.mock(AthenaClient.class);
this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class);
this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class);
DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", HiveConstants.HIVE_NAME,
diff --git a/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMuxRecordHandlerTest.java b/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMuxRecordHandlerTest.java
index d3fb2d0ee3..31035ae1a8 100644
--- a/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMuxRecordHandlerTest.java
+++ b/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveMuxRecordHandlerTest.java
@@ -29,15 +29,15 @@
import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig;
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import org.apache.arrow.vector.types.pojo.Schema;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.Mockito;
import org.testng.Assert;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.sql.Connection;
import java.sql.SQLException;
@@ -50,9 +50,9 @@ public class HiveMuxRecordHandlerTest
private Map recordHandlerMap;
private HiveRecordHandler hiveRecordHandler;
private JdbcRecordHandler jdbcRecordHandler;
- private AmazonS3 amazonS3;
- private AWSSecretsManager secretsManager;
- private AmazonAthena athena;
+ private S3Client amazonS3;
+ private SecretsManagerClient secretsManager;
+ private AthenaClient athena;
private QueryStatusChecker queryStatusChecker;
private JdbcConnectionFactory jdbcConnectionFactory;
@BeforeClass
@@ -64,9 +64,9 @@ public void setup()
{
this.hiveRecordHandler = Mockito.mock(HiveRecordHandler.class);
this.recordHandlerMap = Collections.singletonMap("recordHive", this.hiveRecordHandler);
- this.amazonS3 = Mockito.mock(AmazonS3.class);
- this.secretsManager = Mockito.mock(AWSSecretsManager.class);
- this.athena = Mockito.mock(AmazonAthena.class);
+ this.amazonS3 = Mockito.mock(S3Client.class);
+ this.secretsManager = Mockito.mock(SecretsManagerClient.class);
+ this.athena = Mockito.mock(AthenaClient.class);
this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class);
this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class);
DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", HiveConstants.HIVE_NAME,
diff --git a/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveRecordHandlerTest.java b/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveRecordHandlerTest.java
index 8cfce879a6..108474f096 100644
--- a/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveRecordHandlerTest.java
+++ b/athena-cloudera-hive/src/test/java/com/amazonaws/athena/connectors/cloudera/HiveRecordHandlerTest.java
@@ -32,11 +32,6 @@
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
-import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest;
-import com.amazonaws.services.secretsmanager.model.GetSecretValueResult;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.apache.arrow.vector.types.Types;
@@ -46,6 +41,12 @@
import org.junit.Test;
import org.mockito.Mockito;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
+import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest;
+import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse;
+
import java.sql.Connection;
import java.sql.Date;
import java.sql.PreparedStatement;
@@ -62,18 +63,18 @@ public class HiveRecordHandlerTest
private Connection connection;
private JdbcConnectionFactory jdbcConnectionFactory;
private JdbcSplitQueryBuilder jdbcSplitQueryBuilder;
- private AmazonS3 amazonS3;
- private AWSSecretsManager secretsManager;
- private AmazonAthena athena;
+ private S3Client amazonS3;
+ private SecretsManagerClient secretsManager;
+ private AthenaClient athena;
@Before
public void setup()
throws Exception
{
- this.amazonS3 = Mockito.mock(AmazonS3.class);
- this.secretsManager = Mockito.mock(AWSSecretsManager.class);
- this.athena = Mockito.mock(AmazonAthena.class);
- Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}"));
+ this.amazonS3 = Mockito.mock(S3Client.class);
+ this.secretsManager = Mockito.mock(SecretsManagerClient.class);
+ this.athena = Mockito.mock(AthenaClient.class);
+ Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build());
this.connection = Mockito.mock(Connection.class);
this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class);
Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection);
diff --git a/athena-cloudera-impala/Dockerfile b/athena-cloudera-impala/Dockerfile
new file mode 100644
index 0000000000..2ed43aeaa9
--- /dev/null
+++ b/athena-cloudera-impala/Dockerfile
@@ -0,0 +1,9 @@
+FROM public.ecr.aws/lambda/java:11
+
+# Copy function code and runtime dependencies from Maven layout
+COPY target/athena-cloudera-impala-2022.47.1.jar ${LAMBDA_TASK_ROOT}
+# Unpack the jar
+RUN jar xf athena-cloudera-impala-2022.47.1.jar
+
+# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile)
+CMD [ "com.amazonaws.athena.connectors.cloudera.ImpalaMuxCompositeHandler" ]
\ No newline at end of file
diff --git a/athena-cloudera-impala/athena-cloudera-impala.yaml b/athena-cloudera-impala/athena-cloudera-impala.yaml
index ee292cea3a..60dc37ed9e 100644
--- a/athena-cloudera-impala/athena-cloudera-impala.yaml
+++ b/athena-cloudera-impala/athena-cloudera-impala.yaml
@@ -70,10 +70,9 @@ Resources:
spill_prefix: !Ref SpillPrefix
default: !Ref DefaultConnectionString
FunctionName: !Ref LambdaFunctionName
- Handler: "com.amazonaws.athena.connectors.cloudera.ImpalaMuxCompositeHandler"
- CodeUri: "./target/athena-cloudera-impala-2022.47.1.jar"
+ PackageType: "Image"
+ ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudera-impala:2022.47.1'
Description: "Enables Amazon Athena to communicate with Cloudera Impala using JDBC"
- Runtime: java11
Timeout: !Ref LambdaTimeout
MemorySize: !Ref LambdaMemory
PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ]
diff --git a/athena-cloudera-impala/pom.xml b/athena-cloudera-impala/pom.xml
index d3b2a73d3d..cfdb74e7b3 100644
--- a/athena-cloudera-impala/pom.xml
+++ b/athena-cloudera-impala/pom.xml
@@ -48,12 +48,18 @@
${mockito.version}
test
-
+
- com.amazonaws
- aws-java-sdk-rds
- ${aws-sdk.version}
+ software.amazon.awssdk
+ rds
+ ${aws-sdk-v2.version}
test
+
+
+ software.amazon.awssdk
+ netty-nio-client
+
+
diff --git a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaFederationExpressionParser.java b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaFederationExpressionParser.java
index 74469e70fb..3abc39655e 100644
--- a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaFederationExpressionParser.java
+++ b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaFederationExpressionParser.java
@@ -17,7 +17,7 @@
* limitations under the License.
* #L%
*/
-package com.amazonaws.athena.connectors.hortonworks;
+package com.amazonaws.athena.connectors.cloudera;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcFederationExpressionParser;
import com.google.common.base.Joiner;
diff --git a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMetadataHandler.java b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMetadataHandler.java
index 5d75bff3cd..609a424199 100644
--- a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMetadataHandler.java
+++ b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMetadataHandler.java
@@ -49,8 +49,6 @@
import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcArrowTypeConverter;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap;
import org.apache.arrow.vector.complex.reader.FieldReader;
@@ -60,6 +58,8 @@
import org.apache.arrow.vector.types.pojo.Schema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
@@ -92,8 +92,8 @@ public ImpalaMetadataHandler(DatabaseConnectionConfig databaseConnectionConfig,
@VisibleForTesting
protected ImpalaMetadataHandler(
DatabaseConnectionConfig databaseConnectionConfiguration,
- AWSSecretsManager secretManager,
- AmazonAthena athena,
+ SecretsManagerClient secretManager,
+ AthenaClient athena,
JdbcConnectionFactory jdbcConnectionFactory,
java.util.Map configOptions)
{
diff --git a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxMetadataHandler.java b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxMetadataHandler.java
index dbe810912f..ec55031198 100644
--- a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxMetadataHandler.java
+++ b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxMetadataHandler.java
@@ -25,9 +25,9 @@
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import org.apache.arrow.util.VisibleForTesting;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.util.Map;
@@ -56,7 +56,7 @@ public ImpalaMuxMetadataHandler(java.util.Map configOptions)
}
@VisibleForTesting
- protected ImpalaMuxMetadataHandler(AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory,
+ protected ImpalaMuxMetadataHandler(SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory,
Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions)
{
super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions);
diff --git a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxRecordHandler.java b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxRecordHandler.java
index d1461b523e..8dbac1f9e3 100644
--- a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxRecordHandler.java
+++ b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxRecordHandler.java
@@ -25,10 +25,10 @@
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import com.google.common.annotations.VisibleForTesting;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.util.Map;
@@ -57,7 +57,7 @@ public ImpalaMuxRecordHandler(java.util.Map configOptions)
}
@VisibleForTesting
- ImpalaMuxRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory,
+ ImpalaMuxRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory,
DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions)
{
super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions);
diff --git a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaRecordHandler.java b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaRecordHandler.java
index 8a336a0b5f..59912af693 100644
--- a/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaRecordHandler.java
+++ b/athena-cloudera-impala/src/main/java/com/amazonaws/athena/connectors/cloudera/ImpalaRecordHandler.java
@@ -22,22 +22,18 @@
import com.amazonaws.athena.connector.lambda.domain.Split;
import com.amazonaws.athena.connector.lambda.domain.TableName;
import com.amazonaws.athena.connector.lambda.domain.predicate.Constraints;
-import com.amazonaws.athena.connectors.hortonworks.ImpalaFederationExpressionParser;
import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig;
import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionInfo;
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.athena.AmazonAthenaClientBuilder;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.AmazonS3ClientBuilder;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
-import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder;
import com.google.common.annotations.VisibleForTesting;
import org.apache.arrow.vector.types.pojo.Schema;
import org.apache.commons.lang3.Validate;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.sql.Connection;
import java.sql.PreparedStatement;
@@ -63,11 +59,11 @@ public ImpalaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, ja
}
public ImpalaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, JdbcConnectionFactory jdbcConnectionFactory, java.util.Map configOptions)
{
- this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), AWSSecretsManagerClientBuilder.defaultClient(), AmazonAthenaClientBuilder.defaultClient(),
+ this(databaseConnectionConfig, S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(),
jdbcConnectionFactory, new ImpalaQueryStringBuilder(IMPALA_QUOTE_CHARACTER, new ImpalaFederationExpressionParser(IMPALA_QUOTE_CHARACTER)), configOptions);
}
@VisibleForTesting
- ImpalaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions)
+ ImpalaRecordHandler(DatabaseConnectionConfig databaseConnectionConfig, S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions)
{
super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions);
this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null");
diff --git a/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMetadataHandlerTest.java b/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMetadataHandlerTest.java
index 09746df6da..d87f00e757 100644
--- a/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMetadataHandlerTest.java
+++ b/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMetadataHandlerTest.java
@@ -28,10 +28,6 @@
import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig;
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
-import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest;
-import com.amazonaws.services.secretsmanager.model.GetSecretValueResult;
import org.apache.arrow.vector.types.pojo.Field;
import org.apache.arrow.vector.types.pojo.Schema;
import org.junit.Assert;
@@ -39,6 +35,10 @@
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.Mockito;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
+import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest;
+import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse;
import java.sql.*;
import java.util.*;
@@ -58,8 +58,8 @@ public class ImpalaMetadataHandlerTest
private JdbcConnectionFactory jdbcConnectionFactory;
private Connection connection;
private FederatedIdentity federatedIdentity;
- private AWSSecretsManager secretsManager;
- private AmazonAthena athena;
+ private SecretsManagerClient secretsManager;
+ private AthenaClient athena;
private BlockAllocator blockAllocator;
@BeforeClass
public static void dataSetUP() {
@@ -73,9 +73,9 @@ public void setup()
this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class, Mockito.RETURNS_DEEP_STUBS);
this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS);
Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection);
- this.secretsManager = Mockito.mock(AWSSecretsManager.class);
- this.athena = Mockito.mock(AmazonAthena.class);
- Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}"));
+ this.secretsManager = Mockito.mock(SecretsManagerClient.class);
+ this.athena = Mockito.mock(AthenaClient.class);
+ Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build());
this.impalaMetadataHandler = new ImpalaMetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of());
this.federatedIdentity = Mockito.mock(FederatedIdentity.class);
diff --git a/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxMetadataHandlerTest.java b/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxMetadataHandlerTest.java
index 8fe338fcb8..60f6a8af9e 100644
--- a/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxMetadataHandlerTest.java
+++ b/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxMetadataHandlerTest.java
@@ -43,8 +43,8 @@
import com.amazonaws.athena.connector.lambda.metadata.GetTableRequest;
import com.amazonaws.athena.connector.lambda.metadata.ListSchemasRequest;
import com.amazonaws.athena.connector.lambda.metadata.ListTablesRequest;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import static org.mockito.ArgumentMatchers.nullable;
@@ -54,8 +54,8 @@ public class ImpalaMuxMetadataHandlerTest
private ImpalaMetadataHandler impalaMetadataHandler;
private JdbcMetadataHandler jdbcMetadataHandler;
private BlockAllocator allocator;
- private AWSSecretsManager secretsManager;
- private AmazonAthena athena;
+ private SecretsManagerClient secretsManager;
+ private AthenaClient athena;
private QueryStatusChecker queryStatusChecker;
private JdbcConnectionFactory jdbcConnectionFactory;
@BeforeClass
@@ -68,8 +68,8 @@ public void setup()
this.allocator = new BlockAllocatorImpl();
this.impalaMetadataHandler = Mockito.mock(ImpalaMetadataHandler.class);
this.metadataHandlerMap = Collections.singletonMap("metaImpala", this.impalaMetadataHandler);
- this.secretsManager = Mockito.mock(AWSSecretsManager.class);
- this.athena = Mockito.mock(AmazonAthena.class);
+ this.secretsManager = Mockito.mock(SecretsManagerClient.class);
+ this.athena = Mockito.mock(AthenaClient.class);
this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class);
this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class);
DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", ImpalaConstants.IMPALA_NAME,
diff --git a/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxRecordHandlerTest.java b/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxRecordHandlerTest.java
index ec84d0ed0c..cff80beebb 100644
--- a/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxRecordHandlerTest.java
+++ b/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaMuxRecordHandlerTest.java
@@ -29,15 +29,15 @@
import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig;
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import org.apache.arrow.vector.types.pojo.Schema;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.mockito.Mockito;
import org.testng.Assert;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.sql.Connection;
import java.sql.SQLException;
@@ -50,9 +50,9 @@ public class ImpalaMuxRecordHandlerTest
private Map recordHandlerMap;
private ImpalaRecordHandler impalaRecordHandler;
private JdbcRecordHandler jdbcRecordHandler;
- private AmazonS3 amazonS3;
- private AWSSecretsManager secretsManager;
- private AmazonAthena athena;
+ private S3Client amazonS3;
+ private SecretsManagerClient secretsManager;
+ private AthenaClient athena;
private QueryStatusChecker queryStatusChecker;
private JdbcConnectionFactory jdbcConnectionFactory;
@BeforeClass
@@ -64,9 +64,9 @@ public void setup()
{
this.impalaRecordHandler = Mockito.mock(ImpalaRecordHandler.class);
this.recordHandlerMap = Collections.singletonMap("recordImpala", this.impalaRecordHandler);
- this.amazonS3 = Mockito.mock(AmazonS3.class);
- this.secretsManager = Mockito.mock(AWSSecretsManager.class);
- this.athena = Mockito.mock(AmazonAthena.class);
+ this.amazonS3 = Mockito.mock(S3Client.class);
+ this.secretsManager = Mockito.mock(SecretsManagerClient.class);
+ this.athena = Mockito.mock(AthenaClient.class);
this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class);
this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class);
DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", ImpalaConstants.IMPALA_NAME,
diff --git a/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaQueryStringBuilderTest.java b/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaQueryStringBuilderTest.java
index d87cc871c6..0b08ecfe45 100644
--- a/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaQueryStringBuilderTest.java
+++ b/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaQueryStringBuilderTest.java
@@ -20,7 +20,6 @@
package com.amazonaws.athena.connectors.cloudera;
import com.amazonaws.athena.connector.lambda.domain.Split;
-import com.amazonaws.athena.connectors.hortonworks.ImpalaFederationExpressionParser;
import org.junit.Assert;
import org.junit.Test;
import org.junit.runner.RunWith;
diff --git a/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaRecordHandlerTest.java b/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaRecordHandlerTest.java
index 5e222fd508..bd0909b48f 100644
--- a/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaRecordHandlerTest.java
+++ b/athena-cloudera-impala/src/test/java/com/amazonaws/athena/connectors/cloudera/ImpalaRecordHandlerTest.java
@@ -28,16 +28,15 @@
import com.amazonaws.athena.connector.lambda.domain.predicate.Range;
import com.amazonaws.athena.connector.lambda.domain.predicate.SortedRangeSet;
import com.amazonaws.athena.connector.lambda.domain.predicate.ValueSet;
-import com.amazonaws.athena.connectors.hortonworks.ImpalaFederationExpressionParser;
import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig;
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
-import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest;
-import com.amazonaws.services.secretsmanager.model.GetSecretValueResult;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
+import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest;
+import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.apache.arrow.vector.types.Types;
@@ -64,18 +63,18 @@ public class ImpalaRecordHandlerTest
private Connection connection;
private JdbcConnectionFactory jdbcConnectionFactory;
private JdbcSplitQueryBuilder jdbcSplitQueryBuilder;
- private AmazonS3 amazonS3;
- private AWSSecretsManager secretsManager;
- private AmazonAthena athena;
+ private S3Client amazonS3;
+ private SecretsManagerClient secretsManager;
+ private AthenaClient athena;
@Before
public void setup()
throws Exception
{
- this.amazonS3 = Mockito.mock(AmazonS3.class);
- this.secretsManager = Mockito.mock(AWSSecretsManager.class);
- this.athena = Mockito.mock(AmazonAthena.class);
- Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}"));
+ this.amazonS3 = Mockito.mock(S3Client.class);
+ this.secretsManager = Mockito.mock(SecretsManagerClient.class);
+ this.athena = Mockito.mock(AthenaClient.class);
+ Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"username\": \"testUser\", \"password\": \"testPassword\"}").build());
this.connection = Mockito.mock(Connection.class);
this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class);
Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection);
diff --git a/athena-cloudwatch-metrics/Dockerfile b/athena-cloudwatch-metrics/Dockerfile
new file mode 100644
index 0000000000..b3eafc1e38
--- /dev/null
+++ b/athena-cloudwatch-metrics/Dockerfile
@@ -0,0 +1,9 @@
+FROM public.ecr.aws/lambda/java:11
+
+# Copy function code and runtime dependencies from Maven layout
+COPY target/athena-cloudwatch-metrics-2022.47.1.jar ${LAMBDA_TASK_ROOT}
+# Unpack the jar
+RUN jar xf athena-cloudwatch-metrics-2022.47.1.jar
+
+# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile)
+CMD [ "com.amazonaws.athena.connectors.cloudwatch.metrics.MetricsCompositeHandler" ]
\ No newline at end of file
diff --git a/athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml b/athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml
index d1d815063c..974b979e37 100644
--- a/athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml
+++ b/athena-cloudwatch-metrics/athena-cloudwatch-metrics.yaml
@@ -52,10 +52,9 @@ Resources:
spill_bucket: !Ref SpillBucket
spill_prefix: !Ref SpillPrefix
FunctionName: !Ref AthenaCatalogName
- Handler: "com.amazonaws.athena.connectors.cloudwatch.metrics.MetricsCompositeHandler"
- CodeUri: "./target/athena-cloudwatch-metrics-2022.47.1.jar"
+ PackageType: "Image"
+ ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudwatch-metrics:2022.47.1'
Description: "Enables Amazon Athena to communicate with Cloudwatch Metrics, making your metrics data accessible via SQL"
- Runtime: java11
Timeout: !Ref LambdaTimeout
MemorySize: !Ref LambdaMemory
PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ]
diff --git a/athena-cloudwatch-metrics/pom.xml b/athena-cloudwatch-metrics/pom.xml
index 6c8bff216e..b249525238 100644
--- a/athena-cloudwatch-metrics/pom.xml
+++ b/athena-cloudwatch-metrics/pom.xml
@@ -16,9 +16,9 @@
withdep
- com.amazonaws
- aws-java-sdk-cloudwatch
- ${aws-sdk.version}
+ software.amazon.awssdk
+ cloudwatch
+ ${aws-sdk-v2.version}
diff --git a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricStatSerDe.java b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricStatSerDe.java
index 44bfcef8e0..e44c66e7f1 100644
--- a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricStatSerDe.java
+++ b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricStatSerDe.java
@@ -19,14 +19,14 @@
*/
package com.amazonaws.athena.connectors.cloudwatch.metrics;
-import com.amazonaws.services.cloudwatch.model.MetricStat;
-import com.fasterxml.jackson.annotation.JsonCreator;
-import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.type.CollectionType;
+import software.amazon.awssdk.services.cloudwatch.model.MetricStat;
import java.io.IOException;
import java.util.List;
+import java.util.stream.Collectors;
/**
* Used to serialize and deserialize Cloudwatch Metrics MetricStat objects. This is used
@@ -48,7 +48,7 @@ private MetricStatSerDe() {}
public static String serialize(List metricStats)
{
try {
- return mapper.writeValueAsString(new MetricStatHolder(metricStats));
+ return mapper.writeValueAsString(metricStats.stream().map(stat -> stat.toBuilder()).collect(Collectors.toList()));
}
catch (JsonProcessingException ex) {
throw new RuntimeException(ex);
@@ -64,30 +64,11 @@ public static String serialize(List metricStats)
public static List deserialize(String serializedMetricStats)
{
try {
- return mapper.readValue(serializedMetricStats, MetricStatHolder.class).getMetricStats();
+ CollectionType metricStatBuilderCollection = mapper.getTypeFactory().constructCollectionType(List.class, MetricStat.serializableBuilderClass());
+ return ((List) mapper.readValue(serializedMetricStats, metricStatBuilderCollection)).stream().map(stat -> stat.build()).collect(Collectors.toList());
}
catch (IOException ex) {
throw new RuntimeException(ex);
}
}
-
- /**
- * Helper which allows us to use Jackson's Object Mapper to serialize a List of MetricStats.
- */
- private static class MetricStatHolder
- {
- private final List metricStats;
-
- @JsonCreator
- public MetricStatHolder(@JsonProperty("metricStats") List metricStats)
- {
- this.metricStats = metricStats;
- }
-
- @JsonProperty
- public List getMetricStats()
- {
- return metricStats;
- }
- }
}
diff --git a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricUtils.java b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricUtils.java
index 40ebeacaeb..7c8b97aa90 100644
--- a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricUtils.java
+++ b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricUtils.java
@@ -26,15 +26,15 @@
import com.amazonaws.athena.connector.lambda.domain.predicate.SortedRangeSet;
import com.amazonaws.athena.connector.lambda.domain.predicate.ValueSet;
import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest;
-import com.amazonaws.services.cloudwatch.model.Dimension;
-import com.amazonaws.services.cloudwatch.model.DimensionFilter;
-import com.amazonaws.services.cloudwatch.model.GetMetricDataRequest;
-import com.amazonaws.services.cloudwatch.model.ListMetricsRequest;
-import com.amazonaws.services.cloudwatch.model.Metric;
-import com.amazonaws.services.cloudwatch.model.MetricDataQuery;
-import com.amazonaws.services.cloudwatch.model.MetricStat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.cloudwatch.model.Dimension;
+import software.amazon.awssdk.services.cloudwatch.model.DimensionFilter;
+import software.amazon.awssdk.services.cloudwatch.model.GetMetricDataRequest;
+import software.amazon.awssdk.services.cloudwatch.model.ListMetricsRequest;
+import software.amazon.awssdk.services.cloudwatch.model.Metric;
+import software.amazon.awssdk.services.cloudwatch.model.MetricDataQuery;
+import software.amazon.awssdk.services.cloudwatch.model.MetricStat;
import java.util.ArrayList;
import java.util.Collections;
@@ -70,11 +70,11 @@ private MetricUtils() {}
*/
protected static boolean applyMetricConstraints(ConstraintEvaluator evaluator, Metric metric, String statistic)
{
- if (!evaluator.apply(NAMESPACE_FIELD, metric.getNamespace())) {
+ if (!evaluator.apply(NAMESPACE_FIELD, metric.namespace())) {
return false;
}
- if (!evaluator.apply(METRIC_NAME_FIELD, metric.getMetricName())) {
+ if (!evaluator.apply(METRIC_NAME_FIELD, metric.metricName())) {
return false;
}
@@ -82,13 +82,13 @@ protected static boolean applyMetricConstraints(ConstraintEvaluator evaluator, M
return false;
}
- for (Dimension next : metric.getDimensions()) {
- if (evaluator.apply(DIMENSION_NAME_FIELD, next.getName()) && evaluator.apply(DIMENSION_VALUE_FIELD, next.getValue())) {
+ for (Dimension next : metric.dimensions()) {
+ if (evaluator.apply(DIMENSION_NAME_FIELD, next.name()) && evaluator.apply(DIMENSION_VALUE_FIELD, next.value())) {
return true;
}
}
- if (metric.getDimensions().isEmpty() &&
+ if (metric.dimensions().isEmpty() &&
evaluator.apply(DIMENSION_NAME_FIELD, null) &&
evaluator.apply(DIMENSION_VALUE_FIELD, null)) {
return true;
@@ -100,28 +100,29 @@ protected static boolean applyMetricConstraints(ConstraintEvaluator evaluator, M
/**
* Attempts to push the supplied predicate constraints onto the Cloudwatch Metrics request.
*/
- protected static void pushDownPredicate(Constraints constraints, ListMetricsRequest listMetricsRequest)
+ protected static void pushDownPredicate(Constraints constraints, ListMetricsRequest.Builder listMetricsRequest)
{
Map summary = constraints.getSummary();
ValueSet namespaceConstraint = summary.get(NAMESPACE_FIELD);
if (namespaceConstraint != null && namespaceConstraint.isSingleValue()) {
- listMetricsRequest.setNamespace(namespaceConstraint.getSingleValue().toString());
+ listMetricsRequest.namespace(namespaceConstraint.getSingleValue().toString());
}
ValueSet metricConstraint = summary.get(METRIC_NAME_FIELD);
if (metricConstraint != null && metricConstraint.isSingleValue()) {
- listMetricsRequest.setMetricName(metricConstraint.getSingleValue().toString());
+ listMetricsRequest.metricName(metricConstraint.getSingleValue().toString());
}
ValueSet dimensionNameConstraint = summary.get(DIMENSION_NAME_FIELD);
ValueSet dimensionValueConstraint = summary.get(DIMENSION_VALUE_FIELD);
if (dimensionNameConstraint != null && dimensionNameConstraint.isSingleValue() &&
dimensionValueConstraint != null && dimensionValueConstraint.isSingleValue()) {
- DimensionFilter filter = new DimensionFilter()
- .withName(dimensionNameConstraint.getSingleValue().toString())
- .withValue(dimensionValueConstraint.getSingleValue().toString());
- listMetricsRequest.setDimensions(Collections.singletonList(filter));
+ DimensionFilter filter = DimensionFilter.builder()
+ .name(dimensionNameConstraint.getSingleValue().toString())
+ .value(dimensionValueConstraint.getSingleValue().toString())
+ .build();
+ listMetricsRequest.dimensions(Collections.singletonList(filter));
}
}
@@ -136,18 +137,15 @@ protected static GetMetricDataRequest makeGetMetricDataRequest(ReadRecordsReques
Split split = readRecordsRequest.getSplit();
String serializedMetricStats = split.getProperty(MetricStatSerDe.SERIALIZED_METRIC_STATS_FIELD_NAME);
List metricStats = MetricStatSerDe.deserialize(serializedMetricStats);
- GetMetricDataRequest dataRequest = new GetMetricDataRequest();
- com.amazonaws.services.cloudwatch.model.Metric metric = new com.amazonaws.services.cloudwatch.model.Metric();
- metric.setNamespace(split.getProperty(NAMESPACE_FIELD));
- metric.setMetricName(split.getProperty(METRIC_NAME_FIELD));
+ GetMetricDataRequest.Builder dataRequestBuilder = GetMetricDataRequest.builder();
List metricDataQueries = new ArrayList<>();
int metricId = 1;
for (MetricStat nextMetricStat : metricStats) {
- metricDataQueries.add(new MetricDataQuery().withMetricStat(nextMetricStat).withId("m" + metricId++));
+ metricDataQueries.add(MetricDataQuery.builder().metricStat(nextMetricStat).id("m" + metricId++).build());
}
- dataRequest.withMetricDataQueries(metricDataQueries);
+ dataRequestBuilder.metricDataQueries(metricDataQueries);
ValueSet timeConstraint = readRecordsRequest.getConstraints().getSummary().get(TIMESTAMP_FIELD);
if (timeConstraint instanceof SortedRangeSet && !timeConstraint.isNullAllowed()) {
@@ -162,30 +160,30 @@ protected static GetMetricDataRequest makeGetMetricDataRequest(ReadRecordsReques
Long lowerBound = (Long) basicPredicate.getLow().getValue();
//TODO: confirm timezone handling
logger.info("makeGetMetricsRequest: with startTime " + (lowerBound * 1000) + " " + new Date(lowerBound * 1000));
- dataRequest.withStartTime(new Date(lowerBound * 1000));
+ dataRequestBuilder.startTime(new Date(lowerBound * 1000).toInstant());
}
else {
//TODO: confirm timezone handling
- dataRequest.withStartTime(new Date(0));
+ dataRequestBuilder.startTime(new Date(0).toInstant());
}
if (!basicPredicate.getHigh().isNullValue()) {
Long upperBound = (Long) basicPredicate.getHigh().getValue();
//TODO: confirm timezone handling
logger.info("makeGetMetricsRequest: with endTime " + (upperBound * 1000) + " " + new Date(upperBound * 1000));
- dataRequest.withEndTime(new Date(upperBound * 1000));
+ dataRequestBuilder.endTime(new Date(upperBound * 1000).toInstant());
}
else {
//TODO: confirm timezone handling
- dataRequest.withEndTime(new Date(System.currentTimeMillis()));
+ dataRequestBuilder.endTime(new Date(System.currentTimeMillis()).toInstant());
}
}
else {
//TODO: confirm timezone handling
- dataRequest.withStartTime(new Date(0));
- dataRequest.withEndTime(new Date(System.currentTimeMillis()));
+ dataRequestBuilder.startTime(new Date(0).toInstant());
+ dataRequestBuilder.endTime(new Date(System.currentTimeMillis()).toInstant());
}
- return dataRequest;
+ return dataRequestBuilder.build();
}
}
diff --git a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsExceptionFilter.java b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsExceptionFilter.java
index 4810c6a017..1efb757f46 100644
--- a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsExceptionFilter.java
+++ b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsExceptionFilter.java
@@ -20,8 +20,8 @@
package com.amazonaws.athena.connectors.cloudwatch.metrics;
import com.amazonaws.athena.connector.lambda.ThrottlingInvoker;
-import com.amazonaws.services.cloudwatch.model.AmazonCloudWatchException;
-import com.amazonaws.services.cloudwatch.model.LimitExceededException;
+import software.amazon.awssdk.services.cloudwatch.model.CloudWatchException;
+import software.amazon.awssdk.services.cloudwatch.model.LimitExceededException;
/**
* Used to identify Exceptions that are related to Cloudwatch Metrics throttling events.
@@ -36,11 +36,11 @@ private MetricsExceptionFilter() {}
@Override
public boolean isMatch(Exception ex)
{
- if (ex instanceof AmazonCloudWatchException && ex.getMessage().startsWith("Rate exceeded")) {
+ if (ex instanceof CloudWatchException && ex.getMessage().startsWith("Rate exceeded")) {
return true;
}
- if (ex instanceof AmazonCloudWatchException && ex.getMessage().startsWith("Request has been throttled")) {
+ if (ex instanceof CloudWatchException && ex.getMessage().startsWith("Request has been throttled")) {
return true;
}
diff --git a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandler.java b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandler.java
index 866b465162..2b64e7c129 100644
--- a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandler.java
+++ b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandler.java
@@ -42,19 +42,18 @@
import com.amazonaws.athena.connectors.cloudwatch.metrics.tables.MetricSamplesTable;
import com.amazonaws.athena.connectors.cloudwatch.metrics.tables.MetricsTable;
import com.amazonaws.athena.connectors.cloudwatch.metrics.tables.Table;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.cloudwatch.AmazonCloudWatch;
-import com.amazonaws.services.cloudwatch.AmazonCloudWatchClientBuilder;
-import com.amazonaws.services.cloudwatch.model.ListMetricsRequest;
-import com.amazonaws.services.cloudwatch.model.ListMetricsResult;
-import com.amazonaws.services.cloudwatch.model.Metric;
-import com.amazonaws.services.cloudwatch.model.MetricStat;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
-import com.amazonaws.util.CollectionUtils;
import com.google.common.collect.Lists;
import org.apache.arrow.util.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.cloudwatch.CloudWatchClient;
+import software.amazon.awssdk.services.cloudwatch.model.ListMetricsRequest;
+import software.amazon.awssdk.services.cloudwatch.model.ListMetricsResponse;
+import software.amazon.awssdk.services.cloudwatch.model.Metric;
+import software.amazon.awssdk.services.cloudwatch.model.MetricStat;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
+import software.amazon.awssdk.utils.CollectionUtils;
import java.util.ArrayList;
import java.util.Collections;
@@ -107,7 +106,7 @@ public class MetricsMetadataHandler
//Used to handle throttling events by applying AIMD congestion control
private final ThrottlingInvoker invoker;
- private final AmazonCloudWatch metrics;
+ private final CloudWatchClient metrics;
static {
//The statistics supported by Cloudwatch Metrics by default
@@ -133,16 +132,16 @@ public class MetricsMetadataHandler
public MetricsMetadataHandler(java.util.Map configOptions)
{
super(SOURCE_TYPE, configOptions);
- this.metrics = AmazonCloudWatchClientBuilder.standard().build();
+ this.metrics = CloudWatchClient.create();
this.invoker = ThrottlingInvoker.newDefaultBuilder(EXCEPTION_FILTER, configOptions).build();
}
@VisibleForTesting
protected MetricsMetadataHandler(
- AmazonCloudWatch metrics,
+ CloudWatchClient metrics,
EncryptionKeyFactory keyFactory,
- AWSSecretsManager secretsManager,
- AmazonAthena athena,
+ SecretsManagerClient secretsManager,
+ AthenaClient athena,
String spillBucket,
String spillPrefix,
java.util.Map configOptions)
@@ -235,33 +234,36 @@ public GetSplitsResponse doGetSplits(BlockAllocator blockAllocator, GetSplitsReq
try (ConstraintEvaluator constraintEvaluator = new ConstraintEvaluator(blockAllocator,
METRIC_DATA_TABLE.getSchema(),
getSplitsRequest.getConstraints())) {
- ListMetricsRequest listMetricsRequest = new ListMetricsRequest();
- MetricUtils.pushDownPredicate(getSplitsRequest.getConstraints(), listMetricsRequest);
- listMetricsRequest.setNextToken(getSplitsRequest.getContinuationToken());
+ ListMetricsRequest.Builder listMetricsRequestBuilder = ListMetricsRequest.builder();
+ MetricUtils.pushDownPredicate(getSplitsRequest.getConstraints(), listMetricsRequestBuilder);
+ listMetricsRequestBuilder.nextToken(getSplitsRequest.getContinuationToken());
String period = getPeriodFromConstraint(getSplitsRequest.getConstraints());
Set splits = new HashSet<>();
- ListMetricsResult result = invoker.invoke(() -> metrics.listMetrics(listMetricsRequest));
+ ListMetricsRequest listMetricsRequest = listMetricsRequestBuilder.build();
+ ListMetricsResponse result = invoker.invoke(() -> metrics.listMetrics(listMetricsRequest));
List metricStats = new ArrayList<>(100);
- for (Metric nextMetric : result.getMetrics()) {
+ for (Metric nextMetric : result.metrics()) {
for (String nextStatistic : STATISTICS) {
if (MetricUtils.applyMetricConstraints(constraintEvaluator, nextMetric, nextStatistic)) {
- metricStats.add(new MetricStat()
- .withMetric(new Metric()
- .withNamespace(nextMetric.getNamespace())
- .withMetricName(nextMetric.getMetricName())
- .withDimensions(nextMetric.getDimensions()))
- .withPeriod(Integer.valueOf(period))
- .withStat(nextStatistic));
+ metricStats.add(MetricStat.builder()
+ .metric(Metric.builder()
+ .namespace(nextMetric.namespace())
+ .metricName(nextMetric.metricName())
+ .dimensions(nextMetric.dimensions())
+ .build())
+ .period(Integer.valueOf(period))
+ .stat(nextStatistic)
+ .build());
}
}
}
String continuationToken = null;
- if (result.getNextToken() != null &&
- !result.getNextToken().equalsIgnoreCase(listMetricsRequest.getNextToken())) {
- continuationToken = result.getNextToken();
+ if (result.nextToken() != null &&
+ !result.nextToken().equalsIgnoreCase(listMetricsRequest.nextToken())) {
+ continuationToken = result.nextToken();
}
if (CollectionUtils.isNullOrEmpty(metricStats)) {
diff --git a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandler.java b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandler.java
index 93b18c62d3..3ca9219f96 100644
--- a/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandler.java
+++ b/athena-cloudwatch-metrics/src/main/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandler.java
@@ -29,29 +29,25 @@
import com.amazonaws.athena.connectors.cloudwatch.metrics.tables.MetricSamplesTable;
import com.amazonaws.athena.connectors.cloudwatch.metrics.tables.MetricsTable;
import com.amazonaws.athena.connectors.cloudwatch.metrics.tables.Table;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.athena.AmazonAthenaClientBuilder;
-import com.amazonaws.services.cloudwatch.AmazonCloudWatch;
-import com.amazonaws.services.cloudwatch.AmazonCloudWatchClientBuilder;
-import com.amazonaws.services.cloudwatch.model.Dimension;
-import com.amazonaws.services.cloudwatch.model.GetMetricDataRequest;
-import com.amazonaws.services.cloudwatch.model.GetMetricDataResult;
-import com.amazonaws.services.cloudwatch.model.ListMetricsRequest;
-import com.amazonaws.services.cloudwatch.model.ListMetricsResult;
-import com.amazonaws.services.cloudwatch.model.Metric;
-import com.amazonaws.services.cloudwatch.model.MetricDataQuery;
-import com.amazonaws.services.cloudwatch.model.MetricDataResult;
-import com.amazonaws.services.cloudwatch.model.MetricStat;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.AmazonS3ClientBuilder;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
-import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder;
import org.apache.arrow.util.VisibleForTesting;
import org.apache.arrow.vector.types.pojo.Field;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.cloudwatch.CloudWatchClient;
+import software.amazon.awssdk.services.cloudwatch.model.Dimension;
+import software.amazon.awssdk.services.cloudwatch.model.GetMetricDataRequest;
+import software.amazon.awssdk.services.cloudwatch.model.GetMetricDataResponse;
+import software.amazon.awssdk.services.cloudwatch.model.ListMetricsRequest;
+import software.amazon.awssdk.services.cloudwatch.model.ListMetricsResponse;
+import software.amazon.awssdk.services.cloudwatch.model.Metric;
+import software.amazon.awssdk.services.cloudwatch.model.MetricDataQuery;
+import software.amazon.awssdk.services.cloudwatch.model.MetricDataResult;
+import software.amazon.awssdk.services.cloudwatch.model.MetricStat;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
-import java.util.Date;
+import java.time.Instant;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@@ -99,23 +95,23 @@ public class MetricsRecordHandler
//Used to handle throttling events by applying AIMD congestion control
private final ThrottlingInvoker invoker;
- private final AmazonS3 amazonS3;
- private final AmazonCloudWatch metrics;
+ private final S3Client amazonS3;
+ private final CloudWatchClient cloudwatchClient;
public MetricsRecordHandler(java.util.Map configOptions)
{
- this(AmazonS3ClientBuilder.defaultClient(),
- AWSSecretsManagerClientBuilder.defaultClient(),
- AmazonAthenaClientBuilder.defaultClient(),
- AmazonCloudWatchClientBuilder.standard().build(), configOptions);
+ this(S3Client.create(),
+ SecretsManagerClient.create(),
+ AthenaClient.create(),
+ CloudWatchClient.create(), configOptions);
}
@VisibleForTesting
- protected MetricsRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, AmazonCloudWatch metrics, java.util.Map configOptions)
+ protected MetricsRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, CloudWatchClient metrics, java.util.Map configOptions)
{
super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions);
this.amazonS3 = amazonS3;
- this.metrics = metrics;
+ this.cloudwatchClient = metrics;
this.invoker = ThrottlingInvoker.newDefaultBuilder(EXCEPTION_FILTER, configOptions)
.withInitialDelayMs(THROTTLING_INITIAL_DELAY)
.withIncrease(THROTTLING_INCREMENTAL_INCREASE)
@@ -146,37 +142,39 @@ else if (readRecordsRequest.getTableName().getTableName().equalsIgnoreCase(METRI
private void readMetricsWithConstraint(BlockSpiller blockSpiller, ReadRecordsRequest request, QueryStatusChecker queryStatusChecker)
throws TimeoutException
{
- ListMetricsRequest listMetricsRequest = new ListMetricsRequest();
- MetricUtils.pushDownPredicate(request.getConstraints(), listMetricsRequest);
+ ListMetricsRequest.Builder listMetricsRequestBuilder = ListMetricsRequest.builder();
+ MetricUtils.pushDownPredicate(request.getConstraints(), listMetricsRequestBuilder);
String prevToken;
+ String nextToken;
Set requiredFields = new HashSet<>();
request.getSchema().getFields().stream().forEach(next -> requiredFields.add(next.getName()));
ValueSet dimensionNameConstraint = request.getConstraints().getSummary().get(DIMENSION_NAME_FIELD);
ValueSet dimensionValueConstraint = request.getConstraints().getSummary().get(DIMENSION_VALUE_FIELD);
do {
- prevToken = listMetricsRequest.getNextToken();
- ListMetricsResult result = invoker.invoke(() -> metrics.listMetrics(listMetricsRequest));
- for (Metric nextMetric : result.getMetrics()) {
+ ListMetricsRequest listMetricsRequest = listMetricsRequestBuilder.build();
+ prevToken = listMetricsRequest.nextToken();
+ ListMetricsResponse result = invoker.invoke(() -> cloudwatchClient.listMetrics(listMetricsRequest));
+ for (Metric nextMetric : result.metrics()) {
blockSpiller.writeRows((Block block, int row) -> {
boolean matches = MetricUtils.applyMetricConstraints(blockSpiller.getConstraintEvaluator(), nextMetric, null);
if (matches) {
- matches &= block.offerValue(METRIC_NAME_FIELD, row, nextMetric.getMetricName());
- matches &= block.offerValue(NAMESPACE_FIELD, row, nextMetric.getNamespace());
+ matches &= block.offerValue(METRIC_NAME_FIELD, row, nextMetric.metricName());
+ matches &= block.offerValue(NAMESPACE_FIELD, row, nextMetric.namespace());
matches &= block.offerComplexValue(STATISTIC_FIELD, row, DEFAULT, STATISTICS);
matches &= block.offerComplexValue(DIMENSIONS_FIELD,
row,
(Field field, Object val) -> {
if (field.getName().equals(DIMENSION_NAME_FIELD)) {
- return ((Dimension) val).getName();
+ return ((Dimension) val).name();
}
else if (field.getName().equals(DIMENSION_VALUE_FIELD)) {
- return ((Dimension) val).getValue();
+ return ((Dimension) val).value();
}
throw new RuntimeException("Unexpected field " + field.getName());
},
- nextMetric.getDimensions());
+ nextMetric.dimensions());
//This field is 'faked' in that we just use it as a convenient way to filter single dimensions. As such
//we always populate it with the value of the filter if the constraint passed and the filter was singleValue
@@ -193,9 +191,10 @@ else if (field.getName().equals(DIMENSION_VALUE_FIELD)) {
return matches ? 1 : 0;
});
}
- listMetricsRequest.setNextToken(result.getNextToken());
+ nextToken = result.nextToken();
+ listMetricsRequestBuilder.nextToken(nextToken);
}
- while (listMetricsRequest.getNextToken() != null && !listMetricsRequest.getNextToken().equalsIgnoreCase(prevToken) && queryStatusChecker.isQueryRunning());
+ while (nextToken != null && !nextToken.equalsIgnoreCase(prevToken) && queryStatusChecker.isQueryRunning());
}
/**
@@ -204,46 +203,49 @@ else if (field.getName().equals(DIMENSION_VALUE_FIELD)) {
private void readMetricSamplesWithConstraint(BlockSpiller blockSpiller, ReadRecordsRequest request, QueryStatusChecker queryStatusChecker)
throws TimeoutException
{
- GetMetricDataRequest dataRequest = MetricUtils.makeGetMetricDataRequest(request);
+ GetMetricDataRequest originalDataRequest = MetricUtils.makeGetMetricDataRequest(request);
Map queries = new HashMap<>();
- for (MetricDataQuery query : dataRequest.getMetricDataQueries()) {
- queries.put(query.getId(), query);
+ for (MetricDataQuery query : originalDataRequest.metricDataQueries()) {
+ queries.put(query.id(), query);
}
+ GetMetricDataRequest.Builder dataRequestBuilder = originalDataRequest.toBuilder();
String prevToken;
+ String nextToken;
ValueSet dimensionNameConstraint = request.getConstraints().getSummary().get(DIMENSION_NAME_FIELD);
ValueSet dimensionValueConstraint = request.getConstraints().getSummary().get(DIMENSION_VALUE_FIELD);
do {
- prevToken = dataRequest.getNextToken();
- GetMetricDataResult result = invoker.invoke(() -> metrics.getMetricData(dataRequest));
- for (MetricDataResult nextMetric : result.getMetricDataResults()) {
- MetricStat metricStat = queries.get(nextMetric.getId()).getMetricStat();
- List timestamps = nextMetric.getTimestamps();
- List values = nextMetric.getValues();
- for (int i = 0; i < nextMetric.getValues().size(); i++) {
+ GetMetricDataRequest dataRequest = dataRequestBuilder.build();
+ prevToken = dataRequest.nextToken();
+ GetMetricDataResponse result = invoker.invoke(() -> cloudwatchClient.getMetricData(dataRequest));
+ for (MetricDataResult nextMetric : result.metricDataResults()) {
+ MetricStat metricStat = queries.get(nextMetric.id()).metricStat();
+ List timestamps = nextMetric.timestamps();
+ List values = nextMetric.values();
+ for (int i = 0; i < nextMetric.values().size(); i++) {
int sampleNum = i;
blockSpiller.writeRows((Block block, int row) -> {
/**
* Most constraints were already applied at split generation so we only need to apply
* a subset.
*/
- block.offerValue(METRIC_NAME_FIELD, row, metricStat.getMetric().getMetricName());
- block.offerValue(NAMESPACE_FIELD, row, metricStat.getMetric().getNamespace());
- block.offerValue(STATISTIC_FIELD, row, metricStat.getStat());
+ block.offerValue(METRIC_NAME_FIELD, row, metricStat.metric().metricName());
+ block.offerValue(NAMESPACE_FIELD, row, metricStat.metric().namespace());
+ block.offerValue(STATISTIC_FIELD, row, metricStat.stat());
block.offerComplexValue(DIMENSIONS_FIELD,
row,
(Field field, Object val) -> {
if (field.getName().equals(DIMENSION_NAME_FIELD)) {
- return ((Dimension) val).getName();
+ return ((Dimension) val).name();
}
else if (field.getName().equals(DIMENSION_VALUE_FIELD)) {
- return ((Dimension) val).getValue();
+ return ((Dimension) val).value();
}
throw new RuntimeException("Unexpected field " + field.getName());
},
- metricStat.getMetric().getDimensions());
+ metricStat.metric().dimensions());
//This field is 'faked' in that we just use it as a convenient way to filter single dimensions. As such
//we always populate it with the value of the filter if the constraint passed and the filter was singleValue
@@ -257,19 +259,20 @@ else if (field.getName().equals(DIMENSION_VALUE_FIELD)) {
? null : dimensionValueConstraint.getSingleValue().toString();
block.offerValue(DIMENSION_VALUE_FIELD, row, dimVal);
- block.offerValue(PERIOD_FIELD, row, metricStat.getPeriod());
+ block.offerValue(PERIOD_FIELD, row, metricStat.period());
boolean matches = true;
block.offerValue(VALUE_FIELD, row, values.get(sampleNum));
- long timestamp = timestamps.get(sampleNum).getTime() / 1000;
+ long timestamp = timestamps.get(sampleNum).getEpochSecond() / 1000;
block.offerValue(TIMESTAMP_FIELD, row, timestamp);
return matches ? 1 : 0;
});
}
}
- dataRequest.setNextToken(result.getNextToken());
+ nextToken = result.nextToken();
+ dataRequestBuilder.nextToken(result.nextToken());
}
- while (dataRequest.getNextToken() != null && !dataRequest.getNextToken().equalsIgnoreCase(prevToken) && queryStatusChecker.isQueryRunning());
+ while (nextToken != null && !nextToken.equalsIgnoreCase(prevToken) && queryStatusChecker.isQueryRunning());
}
}
diff --git a/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricStatSerDeTest.java b/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricStatSerDeTest.java
index 63d15023bc..bfde6ac296 100644
--- a/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricStatSerDeTest.java
+++ b/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricStatSerDeTest.java
@@ -19,12 +19,12 @@
*/
package com.amazonaws.athena.connectors.cloudwatch.metrics;
-import com.amazonaws.services.cloudwatch.model.Dimension;
-import com.amazonaws.services.cloudwatch.model.Metric;
-import com.amazonaws.services.cloudwatch.model.MetricStat;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.cloudwatch.model.Dimension;
+import software.amazon.awssdk.services.cloudwatch.model.Metric;
+import software.amazon.awssdk.services.cloudwatch.model.MetricStat;
import java.util.ArrayList;
import java.util.List;
@@ -34,8 +34,8 @@
public class MetricStatSerDeTest
{
private static final Logger logger = LoggerFactory.getLogger(MetricStatSerDeTest.class);
- private static final String EXPECTED_SERIALIZATION = "{\"metricStats\":[{\"metric\":{\"namespace\":\"namespace\",\"metricName\":\"metricName\",\"dimensions\":[" +
- "{\"name\":\"dim_name1\",\"value\":\"dim_value1\"},{\"name\":\"dim_name2\",\"value\":\"dim_value2\"}]},\"period\":60,\"stat\":\"p90\",\"unit\":null}]}";
+ private static final String EXPECTED_SERIALIZATION = "[{\"metric\":{\"namespace\":\"namespace\",\"metricName\":\"metricName\",\"dimensions\":[" +
+ "{\"name\":\"dim_name1\",\"value\":\"dim_value1\"},{\"name\":\"dim_name2\",\"value\":\"dim_value2\"}]},\"period\":60,\"stat\":\"p90\",\"unit\":null}]";
@Test
public void serializeTest()
@@ -48,17 +48,19 @@ public void serializeTest()
String namespace = "namespace";
List dimensions = new ArrayList<>();
- dimensions.add(new Dimension().withName("dim_name1").withValue("dim_value1"));
- dimensions.add(new Dimension().withName("dim_name2").withValue("dim_value2"));
+ dimensions.add(Dimension.builder().name("dim_name1").value("dim_value1").build());
+ dimensions.add(Dimension.builder().name("dim_name2").value("dim_value2").build());
List metricStats = new ArrayList<>();
- metricStats.add(new MetricStat()
- .withMetric(new Metric()
- .withNamespace(namespace)
- .withMetricName(metricName)
- .withDimensions(dimensions))
- .withPeriod(60)
- .withStat(statistic));
+ metricStats.add(MetricStat.builder()
+ .metric(Metric.builder()
+ .namespace(namespace)
+ .metricName(metricName)
+ .dimensions(dimensions)
+ .build())
+ .period(60)
+ .stat(statistic)
+ .build());
String actualSerialization = MetricStatSerDe.serialize(metricStats);
logger.info("serializeTest: {}", actualSerialization);
List actual = MetricStatSerDe.deserialize(actualSerialization);
diff --git a/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricUtilsTest.java b/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricUtilsTest.java
index 7929635f31..c32cd6cd5c 100644
--- a/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricUtilsTest.java
+++ b/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricUtilsTest.java
@@ -31,18 +31,18 @@
import com.amazonaws.athena.connector.lambda.domain.predicate.ValueSet;
import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest;
import com.amazonaws.athena.connector.lambda.security.FederatedIdentity;
-import com.amazonaws.services.cloudwatch.model.Dimension;
-import com.amazonaws.services.cloudwatch.model.DimensionFilter;
-import com.amazonaws.services.cloudwatch.model.GetMetricDataRequest;
-import com.amazonaws.services.cloudwatch.model.ListMetricsRequest;
-import com.amazonaws.services.cloudwatch.model.Metric;
-import com.amazonaws.services.cloudwatch.model.MetricStat;
import org.apache.arrow.vector.types.pojo.Schema;
import com.google.common.collect.ImmutableList;
import org.apache.arrow.vector.types.Types;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
+import software.amazon.awssdk.services.cloudwatch.model.Dimension;
+import software.amazon.awssdk.services.cloudwatch.model.DimensionFilter;
+import software.amazon.awssdk.services.cloudwatch.model.GetMetricDataRequest;
+import software.amazon.awssdk.services.cloudwatch.model.ListMetricsRequest;
+import software.amazon.awssdk.services.cloudwatch.model.Metric;
+import software.amazon.awssdk.services.cloudwatch.model.MetricStat;
import java.util.ArrayList;
import java.util.Collections;
@@ -100,33 +100,21 @@ public void applyMetricConstraints()
ConstraintEvaluator constraintEvaluator = new ConstraintEvaluator(allocator, schema, new Constraints(constraintsMap, Collections.emptyList(), Collections.emptyList(), DEFAULT_NO_LIMIT));
- Metric metric = new Metric()
- .withNamespace("match1")
- .withMetricName("match2")
- .withDimensions(new Dimension().withName("match4").withValue("match5"));
+ Metric metric = Metric.builder()
+ .namespace("match1")
+ .metricName("match2")
+ .dimensions(Dimension.builder().name("match4").value("match5").build())
+ .build();
String statistic = "match3";
assertTrue(MetricUtils.applyMetricConstraints(constraintEvaluator, metric, statistic));
- assertFalse(MetricUtils.applyMetricConstraints(constraintEvaluator, copyMetric(metric).withNamespace("no_match"), statistic));
- assertFalse(MetricUtils.applyMetricConstraints(constraintEvaluator, copyMetric(metric).withMetricName("no_match"), statistic));
+ assertFalse(MetricUtils.applyMetricConstraints(constraintEvaluator, metric.toBuilder().namespace("no_match").build(), statistic));
+ assertFalse(MetricUtils.applyMetricConstraints(constraintEvaluator, metric.toBuilder().metricName("no_match").build(), statistic));
assertFalse(MetricUtils.applyMetricConstraints(constraintEvaluator,
- copyMetric(metric).withDimensions(Collections.singletonList(new Dimension().withName("no_match").withValue("match5"))), statistic));
+ metric.toBuilder().dimensions(Collections.singletonList(Dimension.builder().name("no_match").value("match5").build())).build(), statistic));
assertFalse(MetricUtils.applyMetricConstraints(constraintEvaluator,
- copyMetric(metric).withDimensions(Collections.singletonList(new Dimension().withName("match4").withValue("no_match"))), statistic));
- assertFalse(MetricUtils.applyMetricConstraints(constraintEvaluator, copyMetric(metric), "no_match"));
- }
-
- private Metric copyMetric(Metric metric)
- {
- Metric newMetric = new Metric()
- .withNamespace(metric.getNamespace())
- .withMetricName(metric.getMetricName());
-
- List dims = new ArrayList<>();
- for (Dimension next : metric.getDimensions()) {
- dims.add(new Dimension().withName(next.getName()).withValue(next.getValue()));
- }
- return newMetric.withDimensions(dims);
+ metric.toBuilder().dimensions(Collections.singletonList(Dimension.builder().name("match4").value("no_match").build())).build(), statistic));
+ assertFalse(MetricUtils.applyMetricConstraints(constraintEvaluator, metric, "no_match"));
}
@Test
@@ -139,13 +127,14 @@ public void pushDownPredicate()
constraintsMap.put(DIMENSION_NAME_FIELD, makeStringEquals(allocator, "match4"));
constraintsMap.put(DIMENSION_VALUE_FIELD, makeStringEquals(allocator, "match5"));
- ListMetricsRequest request = new ListMetricsRequest();
- MetricUtils.pushDownPredicate(new Constraints(constraintsMap, Collections.emptyList(), Collections.emptyList(), DEFAULT_NO_LIMIT), request);
+ ListMetricsRequest.Builder requestBuilder = ListMetricsRequest.builder();
+ MetricUtils.pushDownPredicate(new Constraints(constraintsMap, Collections.emptyList(), Collections.emptyList(), DEFAULT_NO_LIMIT), requestBuilder);
+ ListMetricsRequest request = requestBuilder.build();
- assertEquals("match1", request.getNamespace());
- assertEquals("match2", request.getMetricName());
- assertEquals(1, request.getDimensions().size());
- assertEquals(new DimensionFilter().withName("match4").withValue("match5"), request.getDimensions().get(0));
+ assertEquals("match1", request.namespace());
+ assertEquals("match2", request.metricName());
+ assertEquals(1, request.dimensions().size());
+ assertEquals(DimensionFilter.builder().name("match4").value("match5").build(), request.dimensions().get(0));
}
@Test
@@ -159,17 +148,19 @@ public void makeGetMetricDataRequest()
String namespace = "namespace";
List dimensions = new ArrayList<>();
- dimensions.add(new Dimension().withName("dim_name1").withValue("dim_value1"));
- dimensions.add(new Dimension().withName("dim_name2").withValue("dim_value2"));
+ dimensions.add(Dimension.builder().name("dim_name1").value("dim_value1").build());
+ dimensions.add(Dimension.builder().name("dim_name2").value("dim_value2").build());
List metricStats = new ArrayList<>();
- metricStats.add(new MetricStat()
- .withMetric(new Metric()
- .withNamespace(namespace)
- .withMetricName(metricName)
- .withDimensions(dimensions))
- .withPeriod(60)
- .withStat(statistic));
+ metricStats.add(MetricStat.builder()
+ .metric(Metric.builder()
+ .namespace(namespace)
+ .metricName(metricName)
+ .dimensions(dimensions)
+ .build())
+ .period(60)
+ .stat(statistic)
+ .build());
Split split = Split.newBuilder(null, null)
.add(NAMESPACE_FIELD, namespace)
@@ -198,16 +189,16 @@ public void makeGetMetricDataRequest()
);
GetMetricDataRequest actual = MetricUtils.makeGetMetricDataRequest(request);
- assertEquals(1, actual.getMetricDataQueries().size());
- assertNotNull(actual.getMetricDataQueries().get(0).getId());
- MetricStat metricStat = actual.getMetricDataQueries().get(0).getMetricStat();
+ assertEquals(1, actual.metricDataQueries().size());
+ assertNotNull(actual.metricDataQueries().get(0).id());
+ MetricStat metricStat = actual.metricDataQueries().get(0).metricStat();
assertNotNull(metricStat);
- assertEquals(metricName, metricStat.getMetric().getMetricName());
- assertEquals(namespace, metricStat.getMetric().getNamespace());
- assertEquals(statistic, metricStat.getStat());
- assertEquals(period, metricStat.getPeriod());
- assertEquals(2, metricStat.getMetric().getDimensions().size());
- assertEquals(1000L, actual.getStartTime().getTime());
- assertTrue(actual.getStartTime().getTime() <= System.currentTimeMillis() + 1_000);
+ assertEquals(metricName, metricStat.metric().metricName());
+ assertEquals(namespace, metricStat.metric().namespace());
+ assertEquals(statistic, metricStat.stat());
+ assertEquals(period, metricStat.period());
+ assertEquals(2, metricStat.metric().dimensions().size());
+ assertEquals(1000L, actual.startTime().toEpochMilli());
+ assertTrue(actual.startTime().toEpochMilli() <= System.currentTimeMillis() + 1_000);
}
}
diff --git a/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandlerTest.java b/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandlerTest.java
index 0dcf33d5c1..a194c74185 100644
--- a/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandlerTest.java
+++ b/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsMetadataHandlerTest.java
@@ -43,12 +43,6 @@
import com.amazonaws.athena.connector.lambda.metadata.MetadataResponse;
import com.amazonaws.athena.connector.lambda.security.FederatedIdentity;
import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.cloudwatch.AmazonCloudWatch;
-import com.amazonaws.services.cloudwatch.model.ListMetricsRequest;
-import com.amazonaws.services.cloudwatch.model.ListMetricsResult;
-import com.amazonaws.services.cloudwatch.model.Metric;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import org.apache.arrow.vector.types.Types;
import org.apache.arrow.vector.types.pojo.Schema;
import org.junit.After;
@@ -60,6 +54,12 @@
import org.mockito.junit.MockitoJUnitRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.cloudwatch.CloudWatchClient;
+import software.amazon.awssdk.services.cloudwatch.model.ListMetricsRequest;
+import software.amazon.awssdk.services.cloudwatch.model.ListMetricsResponse;
+import software.amazon.awssdk.services.cloudwatch.model.Metric;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.util.ArrayList;
import java.util.Collections;
@@ -92,13 +92,13 @@ public class MetricsMetadataHandlerTest
private BlockAllocator allocator;
@Mock
- private AmazonCloudWatch mockMetrics;
+ private CloudWatchClient mockMetrics;
@Mock
- private AWSSecretsManager mockSecretsManager;
+ private SecretsManagerClient mockSecretsManager;
@Mock
- private AmazonAthena mockAthena;
+ private AthenaClient mockAthena;
@Before
public void setUp()
@@ -273,17 +273,20 @@ public void doGetMetricSamplesSplits()
ListMetricsRequest request = invocation.getArgument(0, ListMetricsRequest.class);
//assert that the namespace filter was indeed pushed down
- assertEquals(namespaceFilter, request.getNamespace());
- String nextToken = (request.getNextToken() == null) ? "valid" : null;
+ assertEquals(namespaceFilter, request.namespace());
+ String nextToken = (request.nextToken() == null) ? "valid" : null;
List metrics = new ArrayList<>();
for (int i = 0; i < numMetrics; i++) {
//first page does not match constraints, but second page should
- String mockNamespace = (request.getNextToken() == null) ? "NotMyNameSpace" : namespaceFilter;
- metrics.add(new Metric().withNamespace(mockNamespace).withMetricName("metric-" + i));
+ String mockNamespace = (request.nextToken() == null) ? "NotMyNameSpace" : namespaceFilter;
+ metrics.add(Metric.builder()
+ .namespace(mockNamespace)
+ .metricName("metric-" + i)
+ .build());
}
- return new ListMetricsResult().withNextToken(nextToken).withMetrics(metrics);
+ return ListMetricsResponse.builder().nextToken(nextToken).metrics(metrics).build();
});
Schema schema = SchemaBuilder.newBuilder().addIntField("partitionId").build();
@@ -356,9 +359,12 @@ public void doGetMetricSamplesSplitsEmptyMetrics()
when(mockMetrics.listMetrics(nullable(ListMetricsRequest.class))).thenAnswer((InvocationOnMock invocation) -> {
List metrics = new ArrayList<>();
for (int i = 0; i < numMetrics; i++) {
- metrics.add(new Metric().withNamespace(namespace).withMetricName("metric-" + i));
+ metrics.add(Metric.builder()
+ .namespace(namespace)
+ .metricName("metric-" + i)
+ .build());
}
- return new ListMetricsResult().withNextToken(null).withMetrics(metrics);
+ return ListMetricsResponse.builder().nextToken(null).metrics(metrics).build();
});
Schema schema = SchemaBuilder.newBuilder().addIntField("partitionId").build();
diff --git a/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandlerTest.java b/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandlerTest.java
index bf90e3134a..8b50b97881 100644
--- a/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandlerTest.java
+++ b/athena-cloudwatch-metrics/src/test/java/com/amazonaws/athena/connectors/cloudwatch/metrics/MetricsRecordHandlerTest.java
@@ -37,23 +37,6 @@
import com.amazonaws.athena.connectors.cloudwatch.metrics.tables.MetricSamplesTable;
import com.amazonaws.athena.connectors.cloudwatch.metrics.tables.MetricsTable;
import com.amazonaws.athena.connectors.cloudwatch.metrics.tables.Table;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.cloudwatch.AmazonCloudWatch;
-import com.amazonaws.services.cloudwatch.model.Dimension;
-import com.amazonaws.services.cloudwatch.model.GetMetricDataRequest;
-import com.amazonaws.services.cloudwatch.model.GetMetricDataResult;
-import com.amazonaws.services.cloudwatch.model.ListMetricsRequest;
-import com.amazonaws.services.cloudwatch.model.ListMetricsResult;
-import com.amazonaws.services.cloudwatch.model.Metric;
-import com.amazonaws.services.cloudwatch.model.MetricDataQuery;
-import com.amazonaws.services.cloudwatch.model.MetricDataResult;
-import com.amazonaws.services.cloudwatch.model.MetricStat;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.model.PutObjectRequest;
-import com.amazonaws.services.s3.model.PutObjectResult;
-import com.amazonaws.services.s3.model.S3Object;
-import com.amazonaws.services.s3.model.S3ObjectInputStream;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import com.google.common.io.ByteStreams;
import org.junit.After;
import org.junit.Before;
@@ -65,9 +48,29 @@
import org.mockito.junit.MockitoJUnitRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.core.ResponseInputStream;
+import software.amazon.awssdk.core.sync.RequestBody;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.cloudwatch.CloudWatchClient;
+import software.amazon.awssdk.services.cloudwatch.model.Dimension;
+import software.amazon.awssdk.services.cloudwatch.model.GetMetricDataRequest;
+import software.amazon.awssdk.services.cloudwatch.model.GetMetricDataResponse;
+import software.amazon.awssdk.services.cloudwatch.model.ListMetricsRequest;
+import software.amazon.awssdk.services.cloudwatch.model.ListMetricsResponse;
+import software.amazon.awssdk.services.cloudwatch.model.Metric;
+import software.amazon.awssdk.services.cloudwatch.model.MetricDataQuery;
+import software.amazon.awssdk.services.cloudwatch.model.MetricDataResult;
+import software.amazon.awssdk.services.cloudwatch.model.MetricStat;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.s3.model.GetObjectRequest;
+import software.amazon.awssdk.services.s3.model.GetObjectResponse;
+import software.amazon.awssdk.services.s3.model.PutObjectRequest;
+import software.amazon.awssdk.services.s3.model.PutObjectResponse;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
+import java.time.Instant;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
@@ -112,16 +115,16 @@ public class MetricsRecordHandlerTest
private EncryptionKeyFactory keyFactory = new LocalKeyFactory();
@Mock
- private AmazonCloudWatch mockMetrics;
+ private CloudWatchClient mockMetrics;
@Mock
- private AmazonS3 mockS3;
+ private S3Client mockS3;
@Mock
- private AWSSecretsManager mockSecretsManager;
+ private SecretsManagerClient mockSecretsManager;
@Mock
- private AmazonAthena mockAthena;
+ private AthenaClient mockAthena;
@Before
public void setUp()
@@ -132,31 +135,27 @@ public void setUp()
handler = new MetricsRecordHandler(mockS3, mockSecretsManager, mockAthena, mockMetrics, com.google.common.collect.ImmutableMap.of());
spillReader = new S3BlockSpillReader(mockS3, allocator);
- Mockito.lenient().when(mockS3.putObject(any()))
+ Mockito.lenient().when(mockS3.putObject(any(PutObjectRequest.class), any(RequestBody.class)))
.thenAnswer((InvocationOnMock invocationOnMock) -> {
- InputStream inputStream = ((PutObjectRequest) invocationOnMock.getArguments()[0]).getInputStream();
+ InputStream inputStream = ((RequestBody) invocationOnMock.getArguments()[1]).contentStreamProvider().newStream();
ByteHolder byteHolder = new ByteHolder();
byteHolder.setBytes(ByteStreams.toByteArray(inputStream));
synchronized (mockS3Storage) {
mockS3Storage.add(byteHolder);
logger.info("puObject: total size " + mockS3Storage.size());
}
- return mock(PutObjectResult.class);
+ return PutObjectResponse.builder().build();
});
- Mockito.lenient().when(mockS3.getObject(nullable(String.class), nullable(String.class)))
+ Mockito.lenient().when(mockS3.getObject(any(GetObjectRequest.class)))
.thenAnswer((InvocationOnMock invocationOnMock) -> {
- S3Object mockObject = mock(S3Object.class);
ByteHolder byteHolder;
synchronized (mockS3Storage) {
byteHolder = mockS3Storage.get(0);
mockS3Storage.remove(0);
logger.info("getObject: total size " + mockS3Storage.size());
}
- when(mockObject.getObjectContent()).thenReturn(
- new S3ObjectInputStream(
- new ByteArrayInputStream(byteHolder.getBytes()), null));
- return mockObject;
+ return new ResponseInputStream<>(GetObjectResponse.builder().build(), new ByteArrayInputStream(byteHolder.getBytes()));
});
}
@@ -183,17 +182,23 @@ public void readMetricsWithConstraint()
ListMetricsRequest request = invocation.getArgument(0, ListMetricsRequest.class);
numCalls.incrementAndGet();
//assert that the namespace filter was indeed pushed down
- assertEquals(namespace, request.getNamespace());
- String nextToken = (request.getNextToken() == null) ? "valid" : null;
+ assertEquals(namespace, request.namespace());
+ String nextToken = (request.nextToken() == null) ? "valid" : null;
List metrics = new ArrayList<>();
for (int i = 0; i < numMetrics; i++) {
- metrics.add(new Metric().withNamespace(namespace).withMetricName("metric-" + i)
- .withDimensions(new Dimension().withName(dimName).withValue(dimValue)));
- metrics.add(new Metric().withNamespace(namespace + i).withMetricName("metric-" + i));
+ metrics.add(Metric.builder()
+ .namespace(namespace)
+ .metricName("metric-" + i)
+ .dimensions(Dimension.builder()
+ .name(dimName)
+ .value(dimValue)
+ .build())
+ .build());
+ metrics.add(Metric.builder().namespace(namespace + i).metricName("metric-" + i).build());
}
- return new ListMetricsResult().withNextToken(nextToken).withMetrics(metrics);
+ return ListMetricsResponse.builder().nextToken(nextToken).metrics(metrics).build();
});
Map constraintsMap = new HashMap<>();
@@ -246,7 +251,7 @@ public void readMetricSamplesWithConstraint()
String period = "60";
String dimName = "dimName";
String dimValue = "dimValue";
- List dimensions = Collections.singletonList(new Dimension().withName(dimName).withValue(dimValue));
+ List dimensions = Collections.singletonList(Dimension.builder().name(dimName).value(dimValue).build());
int numMetrics = 10;
int numSamples = 10;
@@ -270,13 +275,15 @@ public void readMetricSamplesWithConstraint()
.build();
List metricStats = new ArrayList<>();
- metricStats.add(new MetricStat()
- .withMetric(new Metric()
- .withNamespace(namespace)
- .withMetricName(metricName)
- .withDimensions(dimensions))
- .withPeriod(60)
- .withStat(statistic));
+ metricStats.add(MetricStat.builder()
+ .metric(Metric.builder()
+ .namespace(namespace)
+ .metricName(metricName)
+ .dimensions(dimensions)
+ .build())
+ .period(60)
+ .stat(statistic)
+ .build());
Split split = Split.newBuilder(spillLocation, keyFactory.create())
.add(MetricStatSerDe.SERIALIZED_METRIC_STATS_FIELD_NAME, MetricStatSerDe.serialize(metricStats))
@@ -310,40 +317,40 @@ public void readMetricSamplesWithConstraint()
logger.info("readMetricSamplesWithConstraint: exit");
}
- private GetMetricDataResult mockMetricData(InvocationOnMock invocation, int numMetrics, int numSamples)
+ private GetMetricDataResponse mockMetricData(InvocationOnMock invocation, int numMetrics, int numSamples)
{
GetMetricDataRequest request = invocation.getArgument(0, GetMetricDataRequest.class);
/**
* Confirm that all available criteria were pushed down into Cloudwatch Metrics
*/
- List queries = request.getMetricDataQueries();
+ List queries = request.metricDataQueries();
assertEquals(1, queries.size());
MetricDataQuery query = queries.get(0);
- MetricStat stat = query.getMetricStat();
- assertEquals("m1", query.getId());
- assertNotNull(stat.getPeriod());
- assertNotNull(stat.getMetric());
- assertNotNull(stat.getStat());
- assertNotNull(stat.getMetric().getMetricName());
- assertNotNull(stat.getMetric().getNamespace());
- assertNotNull(stat.getMetric().getDimensions());
- assertEquals(1, stat.getMetric().getDimensions().size());
-
- String nextToken = (request.getNextToken() == null) ? "valid" : null;
+ MetricStat stat = query.metricStat();
+ assertEquals("m1", query.id());
+ assertNotNull(stat.period());
+ assertNotNull(stat.metric());
+ assertNotNull(stat.stat());
+ assertNotNull(stat.metric().metricName());
+ assertNotNull(stat.metric().namespace());
+ assertNotNull(stat.metric().dimensions());
+ assertEquals(1, stat.metric().dimensions().size());
+
+ String nextToken = (request.nextToken() == null) ? "valid" : null;
List samples = new ArrayList<>();
for (int i = 0; i < numMetrics; i++) {
List values = new ArrayList<>();
- List timestamps = new ArrayList<>();
+ List timestamps = new ArrayList<>();
for (double j = 0; j < numSamples; j++) {
values.add(j);
- timestamps.add(new Date(System.currentTimeMillis() + (int) j));
+ timestamps.add(new Date(System.currentTimeMillis() + (int) j).toInstant());
}
- samples.add(new MetricDataResult().withValues(values).withTimestamps(timestamps).withId("m1"));
+ samples.add(MetricDataResult.builder().values(values).timestamps(timestamps).id("m1").build());
}
- return new GetMetricDataResult().withNextToken(nextToken).withMetricDataResults(samples);
+ return GetMetricDataResponse.builder().nextToken(nextToken).metricDataResults(samples).build();
}
private class ByteHolder
diff --git a/athena-cloudwatch/Dockerfile b/athena-cloudwatch/Dockerfile
new file mode 100644
index 0000000000..9859ff8b4c
--- /dev/null
+++ b/athena-cloudwatch/Dockerfile
@@ -0,0 +1,9 @@
+FROM public.ecr.aws/lambda/java:11
+
+# Copy function code and runtime dependencies from Maven layout
+COPY target/athena-cloudwatch-2022.47.1.jar ${LAMBDA_TASK_ROOT}
+# Unpack the jar
+RUN jar xf athena-cloudwatch-2022.47.1.jar
+
+# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile)
+CMD [ "com.amazonaws.athena.connectors.cloudwatch.CloudwatchCompositeHandler" ]
\ No newline at end of file
diff --git a/athena-cloudwatch/athena-cloudwatch.yaml b/athena-cloudwatch/athena-cloudwatch.yaml
index a5d69fbb2e..2e301dc882 100644
--- a/athena-cloudwatch/athena-cloudwatch.yaml
+++ b/athena-cloudwatch/athena-cloudwatch.yaml
@@ -66,10 +66,9 @@ Resources:
spill_prefix: !Ref SpillPrefix
kms_key_id: !If [HasKMSKeyId, !Ref KMSKeyId, !Ref "AWS::NoValue"]
FunctionName: !Ref AthenaCatalogName
- Handler: "com.amazonaws.athena.connectors.cloudwatch.CloudwatchCompositeHandler"
- CodeUri: "./target/athena-cloudwatch-2022.47.1.jar"
+ PackageType: "Image"
+ ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-cloudwatch:2022.47.1'
Description: "Enables Amazon Athena to communicate with Cloudwatch, making your log accessible via SQL"
- Runtime: java11
Timeout: !Ref LambdaTimeout
MemorySize: !Ref LambdaMemory
Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRole]
diff --git a/athena-cloudwatch/pom.xml b/athena-cloudwatch/pom.xml
index bd2dad00d8..95a34d1d37 100644
--- a/athena-cloudwatch/pom.xml
+++ b/athena-cloudwatch/pom.xml
@@ -29,15 +29,35 @@
test
- com.amazonaws
- aws-java-sdk-logs
- ${aws-sdk.version}
+ software.amazon.awssdk
+ cloudwatchlogs
+ 2.28.2
+
+
+
+ commons-logging
+ commons-logging
+
+
+ software.amazon.awssdk
+ netty-nio-client
+
+
+
+
+ software.amazon.awssdk
+ cloudwatch
+ ${aws-sdk-v2.version}
commons-logging
commons-logging
+
+ software.amazon.awssdk
+ netty-nio-client
+
diff --git a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchExceptionFilter.java b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchExceptionFilter.java
index c71db552cf..093aeedd7e 100644
--- a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchExceptionFilter.java
+++ b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchExceptionFilter.java
@@ -20,8 +20,8 @@
package com.amazonaws.athena.connectors.cloudwatch;
import com.amazonaws.athena.connector.lambda.ThrottlingInvoker;
-import com.amazonaws.services.logs.model.AWSLogsException;
-import com.amazonaws.services.logs.model.LimitExceededException;
+import software.amazon.awssdk.services.cloudwatch.model.LimitExceededException;
+import software.amazon.awssdk.services.cloudwatchlogs.model.CloudWatchLogsException;
/**
* Used to identify Exceptions that are related to Cloudwatch Logs throttling events.
@@ -36,7 +36,7 @@ private CloudwatchExceptionFilter() {}
@Override
public boolean isMatch(Exception ex)
{
- if (ex instanceof AWSLogsException && ex.getMessage().startsWith("Rate exceeded")) {
+ if (ex instanceof CloudWatchLogsException && ex.getMessage().startsWith("Rate exceeded")) {
return true;
}
diff --git a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandler.java b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandler.java
index cd52e12683..e62ca50477 100644
--- a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandler.java
+++ b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandler.java
@@ -43,17 +43,6 @@
import com.amazonaws.athena.connector.lambda.metadata.optimizations.OptimizationSubType;
import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory;
import com.amazonaws.athena.connectors.cloudwatch.qpt.CloudwatchQueryPassthrough;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.logs.AWSLogs;
-import com.amazonaws.services.logs.AWSLogsClientBuilder;
-import com.amazonaws.services.logs.model.DescribeLogGroupsRequest;
-import com.amazonaws.services.logs.model.DescribeLogGroupsResult;
-import com.amazonaws.services.logs.model.DescribeLogStreamsRequest;
-import com.amazonaws.services.logs.model.DescribeLogStreamsResult;
-import com.amazonaws.services.logs.model.GetQueryResultsResult;
-import com.amazonaws.services.logs.model.LogStream;
-import com.amazonaws.services.logs.model.ResultField;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import com.google.common.collect.ImmutableMap;
import org.apache.arrow.util.VisibleForTesting;
import org.apache.arrow.vector.complex.reader.FieldReader;
@@ -62,6 +51,16 @@
import org.apache.arrow.vector.types.pojo.Schema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient;
+import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogGroupsRequest;
+import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogGroupsResponse;
+import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogStreamsRequest;
+import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogStreamsResponse;
+import software.amazon.awssdk.services.cloudwatchlogs.model.GetQueryResultsResponse;
+import software.amazon.awssdk.services.cloudwatchlogs.model.LogStream;
+import software.amazon.awssdk.services.cloudwatchlogs.model.ResultField;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.util.ArrayList;
import java.util.Collections;
@@ -123,7 +122,7 @@ public class CloudwatchMetadataHandler
.build();
}
- private final AWSLogs awsLogs;
+ private final CloudWatchLogsClient awsLogs;
private final ThrottlingInvoker invoker;
private final CloudwatchTableResolver tableResolver;
private final CloudwatchQueryPassthrough queryPassthrough = new CloudwatchQueryPassthrough();
@@ -131,17 +130,17 @@ public class CloudwatchMetadataHandler
public CloudwatchMetadataHandler(java.util.Map configOptions)
{
super(SOURCE_TYPE, configOptions);
- this.awsLogs = AWSLogsClientBuilder.standard().build();
+ this.awsLogs = CloudWatchLogsClient.create();
this.invoker = ThrottlingInvoker.newDefaultBuilder(EXCEPTION_FILTER, configOptions).build();
- this.tableResolver = new CloudwatchTableResolver(this.invoker, awsLogs, MAX_RESULTS, MAX_RESULTS);
+ this.tableResolver = new CloudwatchTableResolver(this.invoker, awsLogs, MAX_RESULTS, MAX_RESULTS);
}
@VisibleForTesting
protected CloudwatchMetadataHandler(
- AWSLogs awsLogs,
+ CloudWatchLogsClient awsLogs,
EncryptionKeyFactory keyFactory,
- AWSSecretsManager secretsManager,
- AmazonAthena athena,
+ SecretsManagerClient secretsManager,
+ AthenaClient athena,
String spillBucket,
String spillPrefix,
java.util.Map configOptions)
@@ -161,19 +160,19 @@ protected CloudwatchMetadataHandler(
public ListSchemasResponse doListSchemaNames(BlockAllocator blockAllocator, ListSchemasRequest listSchemasRequest)
throws TimeoutException
{
- DescribeLogGroupsRequest request = new DescribeLogGroupsRequest();
- DescribeLogGroupsResult result;
+ DescribeLogGroupsRequest.Builder requestBuilder = DescribeLogGroupsRequest.builder();
+ DescribeLogGroupsResponse response;
List schemas = new ArrayList<>();
do {
if (schemas.size() > MAX_RESULTS) {
throw new RuntimeException("Too many log groups, exceeded max metadata results for schema count.");
}
- result = invoker.invoke(() -> awsLogs.describeLogGroups(request));
- result.getLogGroups().forEach(next -> schemas.add(next.getLogGroupName()));
- request.setNextToken(result.getNextToken());
- logger.info("doListSchemaNames: Listing log groups {} {}", result.getNextToken(), schemas.size());
+ response = invoker.invoke(() -> awsLogs.describeLogGroups(requestBuilder.build()));
+ response.logGroups().forEach(next -> schemas.add(next.logGroupName()));
+ requestBuilder.nextToken(response.nextToken());
+ logger.info("doListSchemaNames: Listing log groups {} {}", response.nextToken(), schemas.size());
}
- while (result.getNextToken() != null);
+ while (response.nextToken() != null);
return new ListSchemasResponse(listSchemasRequest.getCatalogName(), schemas);
}
@@ -189,28 +188,28 @@ public ListTablesResponse doListTables(BlockAllocator blockAllocator, ListTables
{
String nextToken = null;
String logGroupName = tableResolver.validateSchema(listTablesRequest.getSchemaName());
- DescribeLogStreamsRequest request = new DescribeLogStreamsRequest(logGroupName);
- DescribeLogStreamsResult result;
+ DescribeLogStreamsRequest.Builder requestBuilder = DescribeLogStreamsRequest.builder().logGroupName(logGroupName);
+ DescribeLogStreamsResponse response;
List tables = new ArrayList<>();
if (listTablesRequest.getPageSize() == UNLIMITED_PAGE_SIZE_VALUE) {
do {
if (tables.size() > MAX_RESULTS) {
throw new RuntimeException("Too many log streams, exceeded max metadata results for table count.");
}
- result = invoker.invoke(() -> awsLogs.describeLogStreams(request));
- result.getLogStreams().forEach(next -> tables.add(toTableName(listTablesRequest, next)));
- request.setNextToken(result.getNextToken());
- logger.info("doListTables: Listing log streams with token {} and size {}", result.getNextToken(), tables.size());
+ response = invoker.invoke(() -> awsLogs.describeLogStreams(requestBuilder.build()));
+ response.logStreams().forEach(next -> tables.add(toTableName(listTablesRequest, next)));
+ requestBuilder.nextToken(response.nextToken());
+ logger.info("doListTables: Listing log streams with token {} and size {}", response.nextToken(), tables.size());
}
- while (result.getNextToken() != null);
+ while (response.nextToken() != null);
}
else {
- request.setNextToken(listTablesRequest.getNextToken());
- request.setLimit(listTablesRequest.getPageSize());
- result = invoker.invoke(() -> awsLogs.describeLogStreams(request));
- result.getLogStreams().forEach(next -> tables.add(toTableName(listTablesRequest, next)));
- nextToken = result.getNextToken();
- logger.info("doListTables: Listing log streams with token {} and size {}", result.getNextToken(), tables.size());
+ requestBuilder.nextToken(listTablesRequest.getNextToken());
+ requestBuilder.limit(listTablesRequest.getPageSize());
+ response = invoker.invoke(() -> awsLogs.describeLogStreams(requestBuilder.build()));
+ response.logStreams().forEach(next -> tables.add(toTableName(listTablesRequest, next)));
+ nextToken = response.nextToken();
+ logger.info("doListTables: Listing log streams with token {} and size {}", response.nextToken(), tables.size());
}
// Don't add the ALL_LOG_STREAMS_TABLE unless we're at the end of listing out all the tables.
@@ -276,26 +275,26 @@ public void getPartitions(BlockWriter blockWriter, GetTableLayoutRequest request
CloudwatchTableName cwTableName = tableResolver.validateTable(request.getTableName());
- DescribeLogStreamsRequest cwRequest = new DescribeLogStreamsRequest(cwTableName.getLogGroupName());
+ DescribeLogStreamsRequest.Builder cwRequestBuilder = DescribeLogStreamsRequest.builder().logGroupName(cwTableName.getLogGroupName());
if (!ALL_LOG_STREAMS_TABLE.equals(cwTableName.getLogStreamName())) {
- cwRequest.setLogStreamNamePrefix(cwTableName.getLogStreamName());
+ cwRequestBuilder.logStreamNamePrefix(cwTableName.getLogStreamName());
}
- DescribeLogStreamsResult result;
+ DescribeLogStreamsResponse response;
do {
- result = invoker.invoke(() -> awsLogs.describeLogStreams(cwRequest));
- for (LogStream next : result.getLogStreams()) {
+ response = invoker.invoke(() -> awsLogs.describeLogStreams(cwRequestBuilder.build()));
+ for (LogStream next : response.logStreams()) {
//Each log stream that matches any possible partition pruning should be added to the partition list.
blockWriter.writeRows((Block block, int rowNum) -> {
- boolean matched = block.setValue(LOG_GROUP_FIELD, rowNum, cwRequest.getLogGroupName());
- matched &= block.setValue(LOG_STREAM_FIELD, rowNum, next.getLogStreamName());
- matched &= block.setValue(LOG_STREAM_SIZE_FIELD, rowNum, next.getStoredBytes());
+ boolean matched = block.setValue(LOG_GROUP_FIELD, rowNum, cwRequestBuilder.build().logGroupName());
+ matched &= block.setValue(LOG_STREAM_FIELD, rowNum, next.logStreamName());
+ matched &= block.setValue(LOG_STREAM_SIZE_FIELD, rowNum, next.storedBytes());
return matched ? 1 : 0;
});
}
- cwRequest.setNextToken(result.getNextToken());
+ cwRequestBuilder.nextToken(response.nextToken());
}
- while (result.getNextToken() != null && queryStatusChecker.isQueryRunning());
+ while (response.nextToken() != null && queryStatusChecker.isQueryRunning());
}
/**
@@ -367,11 +366,11 @@ public GetTableResponse doGetQueryPassthroughSchema(BlockAllocator allocator, Ge
throw new IllegalArgumentException("No Query passed through [{}]" + request);
}
// to get column names with limit 1
- GetQueryResultsResult getQueryResultsResult = getResult(invoker, awsLogs, request.getQueryPassthroughArguments(), 1);
+ GetQueryResultsResponse getQueryResultsResponse = getResult(invoker, awsLogs, request.getQueryPassthroughArguments(), 1);
SchemaBuilder schemaBuilder = SchemaBuilder.newBuilder();
- if (!getQueryResultsResult.getResults().isEmpty()) {
- for (ResultField field : getQueryResultsResult.getResults().get(0)) {
- schemaBuilder.addField(field.getField(), Types.MinorType.VARCHAR.getType());
+ if (!getQueryResultsResponse.results().isEmpty()) {
+ for (ResultField field : getQueryResultsResponse.results().get(0)) {
+ schemaBuilder.addField(field.field(), Types.MinorType.VARCHAR.getType());
}
}
@@ -415,6 +414,6 @@ private String encodeContinuationToken(int partition)
*/
private TableName toTableName(ListTablesRequest request, LogStream logStream)
{
- return new TableName(request.getSchemaName(), logStream.getLogStreamName());
+ return new TableName(request.getSchemaName(), logStream.logStreamName());
}
}
diff --git a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandler.java b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandler.java
index a5d29f0f9b..912b94d218 100644
--- a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandler.java
+++ b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandler.java
@@ -32,22 +32,18 @@
import com.amazonaws.athena.connector.lambda.handlers.RecordHandler;
import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest;
import com.amazonaws.athena.connectors.cloudwatch.qpt.CloudwatchQueryPassthrough;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.athena.AmazonAthenaClientBuilder;
-import com.amazonaws.services.logs.AWSLogs;
-import com.amazonaws.services.logs.AWSLogsClientBuilder;
-import com.amazonaws.services.logs.model.GetLogEventsRequest;
-import com.amazonaws.services.logs.model.GetLogEventsResult;
-import com.amazonaws.services.logs.model.GetQueryResultsResult;
-import com.amazonaws.services.logs.model.OutputLogEvent;
-import com.amazonaws.services.logs.model.ResultField;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.AmazonS3ClientBuilder;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
-import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder;
import org.apache.arrow.util.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient;
+import software.amazon.awssdk.services.cloudwatchlogs.model.GetLogEventsRequest;
+import software.amazon.awssdk.services.cloudwatchlogs.model.GetLogEventsResponse;
+import software.amazon.awssdk.services.cloudwatchlogs.model.GetQueryResultsResponse;
+import software.amazon.awssdk.services.cloudwatchlogs.model.OutputLogEvent;
+import software.amazon.awssdk.services.cloudwatchlogs.model.ResultField;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.util.List;
import java.util.Map;
@@ -78,21 +74,21 @@ public class CloudwatchRecordHandler
//Used to handle Throttling events and apply AIMD congestion control
private final ThrottlingInvoker invoker;
private final AtomicLong count = new AtomicLong(0);
- private final AWSLogs awsLogs;
+ private final CloudWatchLogsClient awsLogs;
private final CloudwatchQueryPassthrough queryPassthrough = new CloudwatchQueryPassthrough();
public CloudwatchRecordHandler(java.util.Map configOptions)
{
this(
- AmazonS3ClientBuilder.defaultClient(),
- AWSSecretsManagerClientBuilder.defaultClient(),
- AmazonAthenaClientBuilder.defaultClient(),
- AWSLogsClientBuilder.defaultClient(),
+ S3Client.create(),
+ SecretsManagerClient.create(),
+ AthenaClient.create(),
+ CloudWatchLogsClient.create(),
configOptions);
}
@VisibleForTesting
- protected CloudwatchRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, AWSLogs awsLogs, java.util.Map configOptions)
+ protected CloudwatchRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, CloudWatchLogsClient awsLogs, java.util.Map configOptions)
{
super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions);
this.awsLogs = awsLogs;
@@ -118,37 +114,38 @@ protected void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recor
invoker.setBlockSpiller(spiller);
do {
final String actualContinuationToken = continuationToken;
- GetLogEventsResult logEventsResult = invoker.invoke(() -> awsLogs.getLogEvents(
+ GetLogEventsResponse logEventsResponse = invoker.invoke(() -> awsLogs.getLogEvents(
pushDownConstraints(recordsRequest.getConstraints(),
- new GetLogEventsRequest()
- .withLogGroupName(split.getProperty(LOG_GROUP_FIELD))
+ GetLogEventsRequest.builder()
+ .logGroupName(split.getProperty(LOG_GROUP_FIELD))
//We use the property instead of the table name because of the special all_streams table
- .withLogStreamName(split.getProperty(LOG_STREAM_FIELD))
- .withNextToken(actualContinuationToken)
+ .logStreamName(split.getProperty(LOG_STREAM_FIELD))
+ .nextToken(actualContinuationToken)
// must be set to use nextToken correctly
- .withStartFromHead(true)
+ .startFromHead(true)
+ .build()
)));
- if (continuationToken == null || !continuationToken.equals(logEventsResult.getNextForwardToken())) {
- continuationToken = logEventsResult.getNextForwardToken();
+ if (continuationToken == null || !continuationToken.equals(logEventsResponse.nextForwardToken())) {
+ continuationToken = logEventsResponse.nextForwardToken();
}
else {
continuationToken = null;
}
- for (OutputLogEvent ole : logEventsResult.getEvents()) {
+ for (OutputLogEvent ole : logEventsResponse.events()) {
spiller.writeRows((Block block, int rowNum) -> {
boolean matched = true;
matched &= block.offerValue(LOG_STREAM_FIELD, rowNum, split.getProperty(LOG_STREAM_FIELD));
- matched &= block.offerValue(LOG_TIME_FIELD, rowNum, ole.getTimestamp());
- matched &= block.offerValue(LOG_MSG_FIELD, rowNum, ole.getMessage());
+ matched &= block.offerValue(LOG_TIME_FIELD, rowNum, ole.timestamp());
+ matched &= block.offerValue(LOG_MSG_FIELD, rowNum, ole.message());
return matched ? 1 : 0;
});
}
logger.info("readWithConstraint: LogGroup[{}] LogStream[{}] Continuation[{}] rows[{}]",
tableName.getSchemaName(), tableName.getTableName(), continuationToken,
- logEventsResult.getEvents().size());
+ logEventsResponse.events().size());
}
while (continuationToken != null && queryStatusChecker.isQueryRunning());
}
@@ -158,13 +155,13 @@ private void getQueryPassthreoughResults(BlockSpiller spiller, ReadRecordsReques
{
Map qptArguments = recordsRequest.getConstraints().getQueryPassthroughArguments();
queryPassthrough.verify(qptArguments);
- GetQueryResultsResult getQueryResultsResult = getResult(invoker, awsLogs, qptArguments, Integer.parseInt(qptArguments.get(CloudwatchQueryPassthrough.LIMIT)));
+ GetQueryResultsResponse getQueryResultsResponse = getResult(invoker, awsLogs, qptArguments, Integer.parseInt(qptArguments.get(CloudwatchQueryPassthrough.LIMIT)));
- for (List resultList : getQueryResultsResult.getResults()) {
+ for (List resultList : getQueryResultsResponse.results()) {
spiller.writeRows((Block block, int rowNum) -> {
for (ResultField resultField : resultList) {
boolean matched = true;
- matched &= block.offerValue(resultField.getField(), rowNum, resultField.getValue());
+ matched &= block.offerValue(resultField.field(), rowNum, resultField.value());
if (!matched) {
return 0;
}
@@ -184,6 +181,7 @@ private void getQueryPassthreoughResults(BlockSpiller spiller, ReadRecordsReques
*/
private GetLogEventsRequest pushDownConstraints(Constraints constraints, GetLogEventsRequest request)
{
+ GetLogEventsRequest.Builder requestBuilder = request.toBuilder();
ValueSet timeConstraint = constraints.getSummary().get(LOG_TIME_FIELD);
if (timeConstraint instanceof SortedRangeSet && !timeConstraint.isNullAllowed()) {
//SortedRangeSet is how >, <, between is represented which are easiest and most common when
@@ -195,15 +193,15 @@ private GetLogEventsRequest pushDownConstraints(Constraints constraints, GetLogE
if (!basicPredicate.getLow().isNullValue()) {
Long lowerBound = (Long) basicPredicate.getLow().getValue();
- request.setStartTime(lowerBound);
+ requestBuilder.startTime(lowerBound);
}
if (!basicPredicate.getHigh().isNullValue()) {
Long upperBound = (Long) basicPredicate.getHigh().getValue();
- request.setEndTime(upperBound);
+ requestBuilder.endTime(upperBound);
}
}
- return request;
+ return requestBuilder.build();
}
}
diff --git a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchTableResolver.java b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchTableResolver.java
index 4c7f25ec7e..d4059b0438 100644
--- a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchTableResolver.java
+++ b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchTableResolver.java
@@ -21,18 +21,18 @@
import com.amazonaws.athena.connector.lambda.ThrottlingInvoker;
import com.amazonaws.athena.connector.lambda.domain.TableName;
-import com.amazonaws.services.logs.AWSLogs;
-import com.amazonaws.services.logs.model.DescribeLogGroupsRequest;
-import com.amazonaws.services.logs.model.DescribeLogGroupsResult;
-import com.amazonaws.services.logs.model.DescribeLogStreamsRequest;
-import com.amazonaws.services.logs.model.DescribeLogStreamsResult;
-import com.amazonaws.services.logs.model.LogGroup;
-import com.amazonaws.services.logs.model.LogStream;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient;
+import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogGroupsRequest;
+import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogGroupsResponse;
+import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogStreamsRequest;
+import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogStreamsResponse;
+import software.amazon.awssdk.services.cloudwatchlogs.model.LogGroup;
+import software.amazon.awssdk.services.cloudwatchlogs.model.LogStream;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeoutException;
@@ -51,7 +51,7 @@ public class CloudwatchTableResolver
{
private static final Logger logger = LoggerFactory.getLogger(CloudwatchTableResolver.class);
- private AWSLogs awsLogs;
+ private CloudWatchLogsClient logsClient;
//Used to handle Throttling events using an AIMD strategy for congestion control.
private ThrottlingInvoker invoker;
//The LogStream pattern that is capitalized by LAMBDA
@@ -67,14 +67,14 @@ public class CloudwatchTableResolver
* Constructs an instance of the table resolver.
*
* @param invoker The ThrottlingInvoker to use to handle throttling events.
- * @param awsLogs The AWSLogs client to use for cache misses.
+ * @param logsClient The AWSLogs client to use for cache misses.
* @param maxSchemaCacheSize The max number of schemas to cache.
* @param maxTableCacheSize The max tables to cache.
*/
- public CloudwatchTableResolver(ThrottlingInvoker invoker, AWSLogs awsLogs, long maxSchemaCacheSize, long maxTableCacheSize)
+ public CloudwatchTableResolver(ThrottlingInvoker invoker, CloudWatchLogsClient logsClient, long maxSchemaCacheSize, long maxTableCacheSize)
{
this.invoker = invoker;
- this.awsLogs = awsLogs;
+ this.logsClient = logsClient;
this.tableCache = CacheBuilder.newBuilder()
.maximumSize(maxTableCacheSize)
.build(
@@ -119,12 +119,12 @@ private CloudwatchTableName loadLogStreams(String logGroup, String logStream)
logger.info("loadLogStreams: Did not find a match for the table, falling back to LogGroup scan for {}:{}",
logGroup, logStream);
- DescribeLogStreamsRequest validateTableRequest = new DescribeLogStreamsRequest(logGroup);
- DescribeLogStreamsResult validateTableResult;
+ DescribeLogStreamsRequest.Builder validateTableRequestBuilder = DescribeLogStreamsRequest.builder().logGroupName(logGroup);
+ DescribeLogStreamsResponse validateTableResponse;
do {
- validateTableResult = invoker.invoke(() -> awsLogs.describeLogStreams(validateTableRequest));
- for (LogStream nextStream : validateTableResult.getLogStreams()) {
- String logStreamName = nextStream.getLogStreamName();
+ validateTableResponse = invoker.invoke(() -> logsClient.describeLogStreams(validateTableRequestBuilder.build()));
+ for (LogStream nextStream : validateTableResponse.logStreams()) {
+ String logStreamName = nextStream.logStreamName();
CloudwatchTableName nextCloudwatch = new CloudwatchTableName(logGroup, logStreamName);
tableCache.put(nextCloudwatch.toTableName(), nextCloudwatch);
if (nextCloudwatch.getLogStreamName().equalsIgnoreCase(logStream)) {
@@ -134,9 +134,9 @@ private CloudwatchTableName loadLogStreams(String logGroup, String logStream)
return nextCloudwatch;
}
}
- validateTableRequest.setNextToken(validateTableResult.getNextToken());
+ validateTableRequestBuilder.nextToken(validateTableResponse.nextToken());
}
- while (validateTableResult.getNextToken() != null);
+ while (validateTableResponse.nextToken() != null);
//We could not find a match
throw new IllegalArgumentException("No such table " + logGroup + " " + logStream);
@@ -163,11 +163,11 @@ private CloudwatchTableName loadLogStream(String logGroup, String logStream)
LAMBDA_PATTERN, effectiveTableName);
effectiveTableName = effectiveTableName.replace(LAMBDA_PATTERN, LAMBDA_ACTUAL_PATTERN);
}
- DescribeLogStreamsRequest request = new DescribeLogStreamsRequest(logGroup)
- .withLogStreamNamePrefix(effectiveTableName);
- DescribeLogStreamsResult result = invoker.invoke(() -> awsLogs.describeLogStreams(request));
- for (LogStream nextStream : result.getLogStreams()) {
- String logStreamName = nextStream.getLogStreamName();
+ DescribeLogStreamsRequest request = DescribeLogStreamsRequest.builder().logGroupName(logGroup)
+ .logStreamNamePrefix(effectiveTableName).build();
+ DescribeLogStreamsResponse response = invoker.invoke(() -> logsClient.describeLogStreams(request));
+ for (LogStream nextStream : response.logStreams()) {
+ String logStreamName = nextStream.logStreamName();
CloudwatchTableName nextCloudwatch = new CloudwatchTableName(logGroup, logStreamName);
if (nextCloudwatch.getLogStreamName().equalsIgnoreCase(logStream)) {
logger.info("loadLogStream: Matched {} for {}:{}", nextCloudwatch, logGroup, logStream);
@@ -195,21 +195,21 @@ private String loadLogGroups(String schemaName)
}
logger.info("loadLogGroups: Did not find a match for the schema, falling back to LogGroup scan for {}", schemaName);
- DescribeLogGroupsRequest validateSchemaRequest = new DescribeLogGroupsRequest();
- DescribeLogGroupsResult validateSchemaResult;
+ DescribeLogGroupsRequest.Builder validateSchemaRequestBuilder = DescribeLogGroupsRequest.builder();
+ DescribeLogGroupsResponse validateSchemaResponse;
do {
- validateSchemaResult = invoker.invoke(() -> awsLogs.describeLogGroups(validateSchemaRequest));
- for (LogGroup next : validateSchemaResult.getLogGroups()) {
- String nextLogGroupName = next.getLogGroupName();
+ validateSchemaResponse = invoker.invoke(() -> logsClient.describeLogGroups(validateSchemaRequestBuilder.build()));
+ for (LogGroup next : validateSchemaResponse.logGroups()) {
+ String nextLogGroupName = next.logGroupName();
schemaCache.put(schemaName, nextLogGroupName);
if (nextLogGroupName.equalsIgnoreCase(schemaName)) {
logger.info("loadLogGroups: Matched {} for {}", nextLogGroupName, schemaName);
return nextLogGroupName;
}
}
- validateSchemaRequest.setNextToken(validateSchemaResult.getNextToken());
+ validateSchemaRequestBuilder.nextToken(validateSchemaResponse.nextToken());
}
- while (validateSchemaResult.getNextToken() != null);
+ while (validateSchemaResponse.nextToken() != null);
//We could not find a match
throw new IllegalArgumentException("No such schema " + schemaName);
@@ -224,10 +224,10 @@ private String loadLogGroups(String schemaName)
private String loadLogGroup(String schemaName)
throws TimeoutException
{
- DescribeLogGroupsRequest request = new DescribeLogGroupsRequest().withLogGroupNamePrefix(schemaName);
- DescribeLogGroupsResult result = invoker.invoke(() -> awsLogs.describeLogGroups(request));
- for (LogGroup next : result.getLogGroups()) {
- String nextLogGroupName = next.getLogGroupName();
+ DescribeLogGroupsRequest request = DescribeLogGroupsRequest.builder().logGroupNamePrefix(schemaName).build();
+ DescribeLogGroupsResponse response = invoker.invoke(() -> logsClient.describeLogGroups(request));
+ for (LogGroup next : response.logGroups()) {
+ String nextLogGroupName = next.logGroupName();
if (nextLogGroupName.equalsIgnoreCase(schemaName)) {
logger.info("loadLogGroup: Matched {} for {}", nextLogGroupName, schemaName);
return nextLogGroupName;
diff --git a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchUtils.java b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchUtils.java
index 5c19ec17ee..bb8a209d47 100644
--- a/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchUtils.java
+++ b/athena-cloudwatch/src/main/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchUtils.java
@@ -21,13 +21,14 @@
import com.amazonaws.athena.connector.lambda.ThrottlingInvoker;
import com.amazonaws.athena.connectors.cloudwatch.qpt.CloudwatchQueryPassthrough;
-import com.amazonaws.services.logs.AWSLogs;
-import com.amazonaws.services.logs.model.GetQueryResultsRequest;
-import com.amazonaws.services.logs.model.GetQueryResultsResult;
-import com.amazonaws.services.logs.model.StartQueryRequest;
-import com.amazonaws.services.logs.model.StartQueryResult;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient;
+import software.amazon.awssdk.services.cloudwatchlogs.model.GetQueryResultsRequest;
+import software.amazon.awssdk.services.cloudwatchlogs.model.GetQueryResultsResponse;
+import software.amazon.awssdk.services.cloudwatchlogs.model.QueryStatus;
+import software.amazon.awssdk.services.cloudwatchlogs.model.StartQueryRequest;
+import software.amazon.awssdk.services.cloudwatchlogs.model.StartQueryResponse;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
@@ -41,8 +42,8 @@ public final class CloudwatchUtils
private CloudwatchUtils() {}
public static StartQueryRequest startQueryRequest(Map qptArguments)
{
- return new StartQueryRequest().withEndTime(Long.valueOf(qptArguments.get(CloudwatchQueryPassthrough.ENDTIME))).withStartTime(Long.valueOf(qptArguments.get(CloudwatchQueryPassthrough.STARTTIME)))
- .withQueryString(qptArguments.get(CloudwatchQueryPassthrough.QUERYSTRING)).withLogGroupNames(getLogGroupNames(qptArguments));
+ return StartQueryRequest.builder().endTime(Long.valueOf(qptArguments.get(CloudwatchQueryPassthrough.ENDTIME))).startTime(Long.valueOf(qptArguments.get(CloudwatchQueryPassthrough.STARTTIME)))
+ .queryString(qptArguments.get(CloudwatchQueryPassthrough.QUERYSTRING)).logGroupNames(getLogGroupNames(qptArguments)).build();
}
private static String[] getLogGroupNames(Map qptArguments)
@@ -55,25 +56,25 @@ private static String[] getLogGroupNames(Map qptArguments)
return logGroupNames;
}
- public static StartQueryResult getQueryResult(AWSLogs awsLogs, StartQueryRequest startQueryRequest)
+ public static StartQueryResponse getQueryResult(CloudWatchLogsClient awsLogs, StartQueryRequest startQueryRequest)
{
return awsLogs.startQuery(startQueryRequest);
}
- public static GetQueryResultsResult getQueryResults(AWSLogs awsLogs, StartQueryResult startQueryResult)
+ public static GetQueryResultsResponse getQueryResults(CloudWatchLogsClient awsLogs, StartQueryResponse startQueryResponse)
{
- return awsLogs.getQueryResults(new GetQueryResultsRequest().withQueryId(startQueryResult.getQueryId()));
+ return awsLogs.getQueryResults(GetQueryResultsRequest.builder().queryId(startQueryResponse.queryId()).build());
}
- public static GetQueryResultsResult getResult(ThrottlingInvoker invoker, AWSLogs awsLogs, Map qptArguments, int limit) throws TimeoutException, InterruptedException
+ public static GetQueryResultsResponse getResult(ThrottlingInvoker invoker, CloudWatchLogsClient awsLogs, Map qptArguments, int limit) throws TimeoutException, InterruptedException
{
- StartQueryResult startQueryResult = invoker.invoke(() -> getQueryResult(awsLogs, startQueryRequest(qptArguments).withLimit(limit)));
- String status = null;
- GetQueryResultsResult getQueryResultsResult;
+ StartQueryResponse startQueryResponse = invoker.invoke(() -> getQueryResult(awsLogs, startQueryRequest(qptArguments).toBuilder().limit(limit).build()));
+ QueryStatus status = null;
+ GetQueryResultsResponse getQueryResultsResponse;
Instant startTime = Instant.now(); // Record the start time
do {
- getQueryResultsResult = invoker.invoke(() -> getQueryResults(awsLogs, startQueryResult));
- status = getQueryResultsResult.getStatus();
+ getQueryResultsResponse = invoker.invoke(() -> getQueryResults(awsLogs, startQueryResponse));
+ status = getQueryResultsResponse.status();
Thread.sleep(1000);
// Check if 10 minutes have passed
@@ -82,8 +83,8 @@ public static GetQueryResultsResult getResult(ThrottlingInvoker invoker, AWSLogs
if (elapsedMinutes >= RESULT_TIMEOUT) {
throw new RuntimeException("Query execution timeout exceeded.");
}
- } while (!status.equalsIgnoreCase("Complete"));
+ } while (!status.equals(QueryStatus.COMPLETE));
- return getQueryResultsResult;
+ return getQueryResultsResponse;
}
}
diff --git a/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandlerTest.java b/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandlerTest.java
index 22a876dbae..f615b3c7b1 100644
--- a/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandlerTest.java
+++ b/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchMetadataHandlerTest.java
@@ -43,15 +43,6 @@
import com.amazonaws.athena.connector.lambda.metadata.MetadataResponse;
import com.amazonaws.athena.connector.lambda.security.FederatedIdentity;
import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.logs.AWSLogs;
-import com.amazonaws.services.logs.model.DescribeLogGroupsRequest;
-import com.amazonaws.services.logs.model.DescribeLogGroupsResult;
-import com.amazonaws.services.logs.model.DescribeLogStreamsRequest;
-import com.amazonaws.services.logs.model.DescribeLogStreamsResult;
-import com.amazonaws.services.logs.model.LogGroup;
-import com.amazonaws.services.logs.model.LogStream;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import org.apache.arrow.vector.types.Types;
import org.apache.arrow.vector.types.pojo.ArrowType;
import org.apache.arrow.vector.types.pojo.Schema;
@@ -65,6 +56,15 @@
import org.mockito.junit.MockitoJUnitRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient;
+import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogGroupsRequest;
+import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogGroupsResponse;
+import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogStreamsRequest;
+import software.amazon.awssdk.services.cloudwatchlogs.model.DescribeLogStreamsResponse;
+import software.amazon.awssdk.services.cloudwatchlogs.model.LogGroup;
+import software.amazon.awssdk.services.cloudwatchlogs.model.LogStream;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.util.ArrayList;
import java.util.Collections;
@@ -92,26 +92,32 @@ public class CloudwatchMetadataHandlerTest
private BlockAllocator allocator;
@Mock
- private AWSLogs mockAwsLogs;
+ private CloudWatchLogsClient mockAwsLogs;
@Mock
- private AWSSecretsManager mockSecretsManager;
+ private SecretsManagerClient mockSecretsManager;
@Mock
- private AmazonAthena mockAthena;
+ private AthenaClient mockAthena;
@Before
public void setUp()
throws Exception
{
Mockito.lenient().when(mockAwsLogs.describeLogStreams(nullable(DescribeLogStreamsRequest.class))).thenAnswer((InvocationOnMock invocationOnMock) -> {
- return new DescribeLogStreamsResult().withLogStreams(new LogStream().withLogStreamName("table-9"),
- new LogStream().withLogStreamName("table-10"));
+ return DescribeLogStreamsResponse.builder()
+ .logStreams(
+ LogStream.builder().logStreamName("table-9").build(),
+ LogStream.builder().logStreamName("table-10").build())
+ .build();
});
when(mockAwsLogs.describeLogGroups(nullable(DescribeLogGroupsRequest.class))).thenAnswer((InvocationOnMock invocationOnMock) -> {
- return new DescribeLogGroupsResult().withLogGroups(new LogGroup().withLogGroupName("schema-1"),
- new LogGroup().withLogGroupName("schema-20"));
+ return DescribeLogGroupsResponse.builder()
+ .logGroups(
+ LogGroup.builder().logGroupName("schema-1").build(),
+ LogGroup.builder().logGroupName("schema-20").build())
+ .build();
});
handler = new CloudwatchMetadataHandler(mockAwsLogs, new LocalKeyFactory(), mockSecretsManager, mockAthena, "spillBucket", "spillPrefix", com.google.common.collect.ImmutableMap.of());
allocator = new BlockAllocatorImpl();
@@ -133,34 +139,33 @@ public void doListSchemaNames()
when(mockAwsLogs.describeLogGroups(nullable(DescribeLogGroupsRequest.class))).thenAnswer((InvocationOnMock invocationOnMock) -> {
DescribeLogGroupsRequest request = (DescribeLogGroupsRequest) invocationOnMock.getArguments()[0];
- DescribeLogGroupsResult result = new DescribeLogGroupsResult();
+ DescribeLogGroupsResponse.Builder responseBuilder = DescribeLogGroupsResponse.builder();
Integer nextToken;
- if (request.getNextToken() == null) {
+ if (request.nextToken() == null) {
nextToken = 1;
}
- else if (Integer.valueOf(request.getNextToken()) < 3) {
- nextToken = Integer.valueOf(request.getNextToken()) + 1;
+ else if (Integer.valueOf(request.nextToken()) < 3) {
+ nextToken = Integer.valueOf(request.nextToken()) + 1;
}
else {
nextToken = null;
}
List logGroups = new ArrayList<>();
- if (request.getNextToken() == null || Integer.valueOf(request.getNextToken()) < 3) {
+ if (request.nextToken() == null || Integer.valueOf(request.nextToken()) < 3) {
for (int i = 0; i < 10; i++) {
- LogGroup nextLogGroup = new LogGroup();
- nextLogGroup.setLogGroupName("schema-" + String.valueOf(i));
+ LogGroup nextLogGroup = LogGroup.builder().logGroupName("schema-" + String.valueOf(i)).build();
logGroups.add(nextLogGroup);
}
}
- result.withLogGroups(logGroups);
+ responseBuilder.logGroups(logGroups);
if (nextToken != null) {
- result.setNextToken(String.valueOf(nextToken));
+ responseBuilder.nextToken(String.valueOf(nextToken));
}
- return result;
+ return responseBuilder.build();
});
ListSchemasRequest req = new ListSchemasRequest(identity, "queryId", "default");
@@ -183,34 +188,33 @@ public void doListTables()
when(mockAwsLogs.describeLogStreams(nullable(DescribeLogStreamsRequest.class))).thenAnswer((InvocationOnMock invocationOnMock) -> {
DescribeLogStreamsRequest request = (DescribeLogStreamsRequest) invocationOnMock.getArguments()[0];
- DescribeLogStreamsResult result = new DescribeLogStreamsResult();
+ DescribeLogStreamsResponse.Builder responseBuilder = DescribeLogStreamsResponse.builder();
Integer nextToken;
- if (request.getNextToken() == null) {
+ if (request.nextToken() == null) {
nextToken = 1;
}
- else if (Integer.valueOf(request.getNextToken()) < 3) {
- nextToken = Integer.valueOf(request.getNextToken()) + 1;
+ else if (Integer.valueOf(request.nextToken()) < 3) {
+ nextToken = Integer.valueOf(request.nextToken()) + 1;
}
else {
nextToken = null;
}
List logStreams = new ArrayList<>();
- if (request.getNextToken() == null || Integer.valueOf(request.getNextToken()) < 3) {
+ if (request.nextToken() == null || Integer.valueOf(request.nextToken()) < 3) {
for (int i = 0; i < 10; i++) {
- LogStream nextLogStream = new LogStream();
- nextLogStream.setLogStreamName("table-" + String.valueOf(i));
+ LogStream nextLogStream = LogStream.builder().logStreamName("table-" + String.valueOf(i)).build();
logStreams.add(nextLogStream);
}
}
- result.withLogStreams(logStreams);
+ responseBuilder.logStreams(logStreams);
if (nextToken != null) {
- result.setNextToken(String.valueOf(nextToken));
+ responseBuilder.nextToken(String.valueOf(nextToken));
}
- return result;
+ return responseBuilder.build();
});
ListTablesRequest req = new ListTablesRequest(identity, "queryId", "default",
@@ -238,35 +242,34 @@ public void doGetTable()
when(mockAwsLogs.describeLogStreams(nullable(DescribeLogStreamsRequest.class))).thenAnswer((InvocationOnMock invocationOnMock) -> {
DescribeLogStreamsRequest request = (DescribeLogStreamsRequest) invocationOnMock.getArguments()[0];
- assertTrue(request.getLogGroupName().equals(expectedSchema));
- DescribeLogStreamsResult result = new DescribeLogStreamsResult();
+ assertTrue(request.logGroupName().equals(expectedSchema));
+ DescribeLogStreamsResponse.Builder responseBuilder = DescribeLogStreamsResponse.builder();
Integer nextToken;
- if (request.getNextToken() == null) {
+ if (request.nextToken() == null) {
nextToken = 1;
}
- else if (Integer.valueOf(request.getNextToken()) < 3) {
- nextToken = Integer.valueOf(request.getNextToken()) + 1;
+ else if (Integer.valueOf(request.nextToken()) < 3) {
+ nextToken = Integer.valueOf(request.nextToken()) + 1;
}
else {
nextToken = null;
}
List logStreams = new ArrayList<>();
- if (request.getNextToken() == null || Integer.valueOf(request.getNextToken()) < 3) {
+ if (request.nextToken() == null || Integer.valueOf(request.nextToken()) < 3) {
for (int i = 0; i < 10; i++) {
- LogStream nextLogStream = new LogStream();
- nextLogStream.setLogStreamName("table-" + String.valueOf(i));
+ LogStream nextLogStream = LogStream.builder().logStreamName("table-" + String.valueOf(i)).build();
logStreams.add(nextLogStream);
}
}
- result.withLogStreams(logStreams);
+ responseBuilder.logStreams(logStreams);
if (nextToken != null) {
- result.setNextToken(String.valueOf(nextToken));
+ responseBuilder.nextToken(String.valueOf(nextToken));
}
- return result;
+ return responseBuilder.build();
});
GetTableRequest req = new GetTableRequest(identity, "queryId", "default", new TableName(expectedSchema, "table-9"), Collections.emptyMap());
@@ -290,36 +293,37 @@ public void doGetTableLayout()
when(mockAwsLogs.describeLogStreams(nullable(DescribeLogStreamsRequest.class))).thenAnswer((InvocationOnMock invocationOnMock) -> {
DescribeLogStreamsRequest request = (DescribeLogStreamsRequest) invocationOnMock.getArguments()[0];
- DescribeLogStreamsResult result = new DescribeLogStreamsResult();
+ DescribeLogStreamsResponse.Builder responseBuilder = DescribeLogStreamsResponse.builder();
Integer nextToken;
- if (request.getNextToken() == null) {
+ if (request.nextToken() == null) {
nextToken = 1;
}
- else if (Integer.valueOf(request.getNextToken()) < 3) {
- nextToken = Integer.valueOf(request.getNextToken()) + 1;
+ else if (Integer.valueOf(request.nextToken()) < 3) {
+ nextToken = Integer.valueOf(request.nextToken()) + 1;
}
else {
nextToken = null;
}
List logStreams = new ArrayList<>();
- if (request.getNextToken() == null || Integer.valueOf(request.getNextToken()) < 3) {
- int continuation = request.getNextToken() == null ? 0 : Integer.valueOf(request.getNextToken());
+ if (request.nextToken() == null || Integer.valueOf(request.nextToken()) < 3) {
+ int continuation = request.nextToken() == null ? 0 : Integer.valueOf(request.nextToken());
for (int i = 0 + continuation * 100; i < 300; i++) {
- LogStream nextLogStream = new LogStream();
- nextLogStream.setLogStreamName("table-" + String.valueOf(i));
- nextLogStream.setStoredBytes(i * 1000L);
+ LogStream nextLogStream = LogStream.builder()
+ .logStreamName("table-" + String.valueOf(i))
+ .storedBytes(i * 1000L)
+ .build();
logStreams.add(nextLogStream);
}
}
- result.withLogStreams(logStreams);
+ responseBuilder.logStreams(logStreams);
if (nextToken != null) {
- result.setNextToken(String.valueOf(nextToken));
+ responseBuilder.nextToken(String.valueOf(nextToken));
}
- return result;
+ return responseBuilder.build();
});
Map constraintsMap = new HashMap<>();
diff --git a/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandlerTest.java b/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandlerTest.java
index 6e3ec73623..f8b95fdafc 100644
--- a/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandlerTest.java
+++ b/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/CloudwatchRecordHandlerTest.java
@@ -39,17 +39,6 @@
import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory;
import com.amazonaws.athena.connector.lambda.security.FederatedIdentity;
import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.logs.AWSLogs;
-import com.amazonaws.services.logs.model.GetLogEventsRequest;
-import com.amazonaws.services.logs.model.GetLogEventsResult;
-import com.amazonaws.services.logs.model.OutputLogEvent;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.model.PutObjectRequest;
-import com.amazonaws.services.s3.model.PutObjectResult;
-import com.amazonaws.services.s3.model.S3Object;
-import com.amazonaws.services.s3.model.S3ObjectInputStream;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import com.google.common.collect.ImmutableList;
import com.google.common.io.ByteStreams;
import org.apache.arrow.vector.types.Types;
@@ -63,6 +52,19 @@
import org.mockito.junit.MockitoJUnitRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.core.ResponseInputStream;
+import software.amazon.awssdk.core.sync.RequestBody;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient;
+import software.amazon.awssdk.services.cloudwatchlogs.model.GetLogEventsRequest;
+import software.amazon.awssdk.services.cloudwatchlogs.model.GetLogEventsResponse;
+import software.amazon.awssdk.services.cloudwatchlogs.model.OutputLogEvent;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.s3.model.GetObjectRequest;
+import software.amazon.awssdk.services.s3.model.GetObjectResponse;
+import software.amazon.awssdk.services.s3.model.PutObjectRequest;
+import software.amazon.awssdk.services.s3.model.PutObjectResponse;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
@@ -77,7 +79,6 @@
import static org.junit.Assert.*;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.nullable;
-import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@RunWith(MockitoJUnitRunner.class)
@@ -94,16 +95,16 @@ public class CloudwatchRecordHandlerTest
private EncryptionKeyFactory keyFactory = new LocalKeyFactory();
@Mock
- private AWSLogs mockAwsLogs;
+ private CloudWatchLogsClient mockAwsLogs;
@Mock
- private AmazonS3 mockS3;
+ private S3Client mockS3;
@Mock
- private AWSSecretsManager mockSecretsManager;
+ private SecretsManagerClient mockSecretsManager;
@Mock
- private AmazonAthena mockAthena;
+ private AthenaClient mockAthena;
@Before
public void setUp()
@@ -116,70 +117,67 @@ public void setUp()
handler = new CloudwatchRecordHandler(mockS3, mockSecretsManager, mockAthena, mockAwsLogs, com.google.common.collect.ImmutableMap.of());
spillReader = new S3BlockSpillReader(mockS3, allocator);
- when(mockS3.putObject(any()))
+ when(mockS3.putObject(any(PutObjectRequest.class), any(RequestBody.class)))
.thenAnswer((InvocationOnMock invocationOnMock) -> {
- InputStream inputStream = ((PutObjectRequest) invocationOnMock.getArguments()[0]).getInputStream();
+ InputStream inputStream = ((RequestBody) invocationOnMock.getArguments()[1]).contentStreamProvider().newStream();
ByteHolder byteHolder = new ByteHolder();
byteHolder.setBytes(ByteStreams.toByteArray(inputStream));
synchronized (mockS3Storage) {
mockS3Storage.add(byteHolder);
logger.info("puObject: total size " + mockS3Storage.size());
}
- return mock(PutObjectResult.class);
+ return PutObjectResponse.builder().build();
});
- when(mockS3.getObject(nullable(String.class), nullable(String.class)))
+ when(mockS3.getObject(any(GetObjectRequest.class)))
.thenAnswer((InvocationOnMock invocationOnMock) -> {
- S3Object mockObject = mock(S3Object.class);
ByteHolder byteHolder;
synchronized (mockS3Storage) {
byteHolder = mockS3Storage.get(0);
mockS3Storage.remove(0);
logger.info("getObject: total size " + mockS3Storage.size());
}
- when(mockObject.getObjectContent()).thenReturn(
- new S3ObjectInputStream(
- new ByteArrayInputStream(byteHolder.getBytes()), null));
- return mockObject;
+ return new ResponseInputStream<>(GetObjectResponse.builder().build(), new ByteArrayInputStream(byteHolder.getBytes()));
});
when(mockAwsLogs.getLogEvents(nullable(GetLogEventsRequest.class))).thenAnswer((InvocationOnMock invocationOnMock) -> {
GetLogEventsRequest request = (GetLogEventsRequest) invocationOnMock.getArguments()[0];
//Check that predicate pushdown was propagated to cloudwatch
- assertNotNull(request.getStartTime());
- assertNotNull(request.getEndTime());
+ assertNotNull(request.startTime());
+ assertNotNull(request.endTime());
- GetLogEventsResult result = new GetLogEventsResult();
+ GetLogEventsResponse.Builder responseBuilder = GetLogEventsResponse.builder();
Integer nextToken;
- if (request.getNextToken() == null) {
+ if (request.nextToken() == null) {
nextToken = 1;
}
- else if (Integer.valueOf(request.getNextToken()) < 3) {
- nextToken = Integer.valueOf(request.getNextToken()) + 1;
+ else if (Integer.valueOf(request.nextToken()) < 3) {
+ nextToken = Integer.valueOf(request.nextToken()) + 1;
}
else {
nextToken = null;
}
List logEvents = new ArrayList<>();
- if (request.getNextToken() == null || Integer.valueOf(request.getNextToken()) < 3) {
- long continuation = request.getNextToken() == null ? 0 : Integer.valueOf(request.getNextToken());
+ if (request.nextToken() == null || Integer.valueOf(request.nextToken()) < 3) {
+ long continuation = request.nextToken() == null ? 0 : Integer.valueOf(request.nextToken());
for (int i = 0; i < 100_000; i++) {
- OutputLogEvent outputLogEvent = new OutputLogEvent();
- outputLogEvent.setMessage("message-" + (continuation * i));
- outputLogEvent.setTimestamp(i * 100L);
+ OutputLogEvent outputLogEvent = OutputLogEvent.builder()
+ .message("message-" + (continuation * i))
+ .timestamp(i * 100L)
+ .build();
logEvents.add(outputLogEvent);
}
}
- result.withEvents(logEvents);
+ responseBuilder.events(logEvents);
if (nextToken != null) {
- result.setNextForwardToken(String.valueOf(nextToken));
+ responseBuilder.nextForwardToken(String.valueOf(nextToken));
}
- return result;
+ return responseBuilder.build();
});
}
diff --git a/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/integ/CloudwatchIntegTest.java b/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/integ/CloudwatchIntegTest.java
index 4f38711800..c9d1dd9f73 100644
--- a/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/integ/CloudwatchIntegTest.java
+++ b/athena-cloudwatch/src/test/java/com/amazonaws/athena/connectors/cloudwatch/integ/CloudwatchIntegTest.java
@@ -20,12 +20,6 @@
package com.amazonaws.athena.connectors.cloudwatch.integ;
import com.amazonaws.athena.connector.integ.IntegrationTestBase;
-import com.amazonaws.services.athena.model.Row;
-import com.amazonaws.services.logs.AWSLogs;
-import com.amazonaws.services.logs.AWSLogsClientBuilder;
-import com.amazonaws.services.logs.model.DeleteLogGroupRequest;
-import com.amazonaws.services.logs.model.InputLogEvent;
-import com.amazonaws.services.logs.model.PutLogEventsRequest;
import com.google.common.collect.ImmutableList;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -38,6 +32,10 @@
import software.amazon.awscdk.services.iam.PolicyStatement;
import software.amazon.awscdk.services.logs.LogGroup;
import software.amazon.awscdk.services.logs.LogStream;
+import software.amazon.awssdk.services.athena.model.Row;
+import software.amazon.awssdk.services.cloudwatchlogs.CloudWatchLogsClient;
+import software.amazon.awssdk.services.cloudwatchlogs.model.InputLogEvent;
+import software.amazon.awssdk.services.cloudwatchlogs.model.PutLogEventsRequest;
import java.util.ArrayList;
import java.util.List;
@@ -134,20 +132,21 @@ protected void setUpTableData()
logger.info("Setting up Log Group: {}, Log Stream: {}", logGroupName, logStreamName);
logger.info("----------------------------------------------------");
- AWSLogs logsClient = AWSLogsClientBuilder.defaultClient();
+ CloudWatchLogsClient logsClient = CloudWatchLogsClient.create();
try {
- logsClient.putLogEvents(new PutLogEventsRequest()
- .withLogGroupName(logGroupName)
- .withLogStreamName(logStreamName)
- .withLogEvents(
- new InputLogEvent().withTimestamp(currentTimeMillis).withMessage("Space, the final frontier."),
- new InputLogEvent().withTimestamp(fromTimeMillis).withMessage(logMessage),
- new InputLogEvent().withTimestamp(toTimeMillis + 5000)
- .withMessage("To boldly go where no man has gone before!")));
+ logsClient.putLogEvents(PutLogEventsRequest.builder()
+ .logGroupName(logGroupName)
+ .logStreamName(logStreamName)
+ .logEvents(
+ InputLogEvent.builder().timestamp(currentTimeMillis).message("Space, the final frontier.").build(),
+ InputLogEvent.builder().timestamp(fromTimeMillis).message(logMessage).build(),
+ InputLogEvent.builder().timestamp(toTimeMillis + 5000)
+ .message("To boldly go where no man has gone before!").build())
+ .build());
}
finally {
- logsClient.shutdown();
+ logsClient.close();
}
}
@@ -268,13 +267,13 @@ public void selectColumnWithPredicateIntegTest()
String query = String.format("select message from %s.\"%s\".\"%s\" where time between %d and %d;",
lambdaFunctionName, logGroupName, logStreamName, fromTimeMillis, toTimeMillis);
- List rows = startQueryExecution(query).getResultSet().getRows();
+ List rows = startQueryExecution(query).resultSet().rows();
if (!rows.isEmpty()) {
// Remove the column-header row
rows.remove(0);
}
List messages = new ArrayList<>();
- rows.forEach(row -> messages.add(row.getData().get(0).getVarCharValue()));
+ rows.forEach(row -> messages.add(row.data().get(0).varCharValue()));
logger.info("Messages: {}", messages);
assertEquals("Wrong number of log messages found.", 1, messages.size());
assertTrue("Expecting log message: " + logMessage, messages.contains(logMessage));
diff --git a/athena-datalakegen2/Dockerfile b/athena-datalakegen2/Dockerfile
new file mode 100644
index 0000000000..4e1929f607
--- /dev/null
+++ b/athena-datalakegen2/Dockerfile
@@ -0,0 +1,9 @@
+FROM public.ecr.aws/lambda/java:11
+
+# Copy function code and runtime dependencies from Maven layout
+COPY target/athena-datalakegen2-2022.47.1.jar ${LAMBDA_TASK_ROOT}
+# Unpack the jar
+RUN jar xf athena-datalakegen2-2022.47.1.jar
+
+# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile)
+CMD [ "com.amazonaws.athena.connectors.datalakegen2.DataLakeGen2MuxCompositeHandler" ]
\ No newline at end of file
diff --git a/athena-datalakegen2/athena-datalakegen2.yaml b/athena-datalakegen2/athena-datalakegen2.yaml
index 32da145587..5890402513 100644
--- a/athena-datalakegen2/athena-datalakegen2.yaml
+++ b/athena-datalakegen2/athena-datalakegen2.yaml
@@ -71,10 +71,9 @@ Resources:
spill_prefix: !Ref SpillPrefix
default: !Ref DefaultConnectionString
FunctionName: !Ref LambdaFunctionName
- Handler: "com.amazonaws.athena.connectors.datalakegen2.DataLakeGen2MuxCompositeHandler"
- CodeUri: "./target/athena-datalakegen2-2022.47.1.jar"
+ PackageType: "Image"
+ ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-datalakegen2:2022.47.1'
Description: "Enables Amazon Athena to communicate with DataLake Gen2 using JDBC"
- Runtime: java11
Timeout: !Ref LambdaTimeout
MemorySize: !Ref LambdaMemory
PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ]
diff --git a/athena-datalakegen2/pom.xml b/athena-datalakegen2/pom.xml
index c72c6c4813..670a4396b9 100644
--- a/athena-datalakegen2/pom.xml
+++ b/athena-datalakegen2/pom.xml
@@ -32,12 +32,18 @@
mssql-jdbc
${mssql.jdbc.version}
-
+
- com.amazonaws
- aws-java-sdk-rds
- ${aws-sdk.version}
+ software.amazon.awssdk
+ rds
+ ${aws-sdk-v2.version}
test
+
+
+ software.amazon.awssdk
+ netty-nio-client
+
+
diff --git a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MetadataHandler.java b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MetadataHandler.java
index 53fd9386fe..14d27bba34 100644
--- a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MetadataHandler.java
+++ b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MetadataHandler.java
@@ -47,8 +47,6 @@
import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcArrowTypeConverter;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
@@ -58,6 +56,8 @@
import org.apache.arrow.vector.types.pojo.Schema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
@@ -111,8 +111,8 @@ public DataLakeGen2MetadataHandler(
@VisibleForTesting
protected DataLakeGen2MetadataHandler(
DatabaseConnectionConfig databaseConnectionConfig,
- AWSSecretsManager secretsManager,
- AmazonAthena athena,
+ SecretsManagerClient secretsManager,
+ AthenaClient athena,
JdbcConnectionFactory jdbcConnectionFactory,
java.util.Map configOptions)
{
diff --git a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxMetadataHandler.java b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxMetadataHandler.java
index 0132af948d..577a193ec7 100644
--- a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxMetadataHandler.java
+++ b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxMetadataHandler.java
@@ -24,9 +24,9 @@
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import org.apache.arrow.util.VisibleForTesting;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.util.Map;
@@ -55,7 +55,7 @@ public DataLakeGen2MuxMetadataHandler(java.util.Map configOption
}
@VisibleForTesting
- protected DataLakeGen2MuxMetadataHandler(AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory,
+ protected DataLakeGen2MuxMetadataHandler(SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory,
Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions)
{
super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions);
diff --git a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxRecordHandler.java b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxRecordHandler.java
index f637195150..dd7c643f82 100644
--- a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxRecordHandler.java
+++ b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxRecordHandler.java
@@ -24,10 +24,10 @@
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import com.google.common.annotations.VisibleForTesting;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.util.Map;
@@ -54,7 +54,7 @@ public DataLakeGen2MuxRecordHandler(java.util.Map configOptions)
}
@VisibleForTesting
- DataLakeGen2MuxRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory,
+ DataLakeGen2MuxRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory,
DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions)
{
super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions);
diff --git a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2RecordHandler.java b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2RecordHandler.java
index 16b3e5b584..f80e8bd0c0 100644
--- a/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2RecordHandler.java
+++ b/athena-datalakegen2/src/main/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2RecordHandler.java
@@ -28,15 +28,12 @@
import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.athena.AmazonAthenaClientBuilder;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.AmazonS3ClientBuilder;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
-import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder;
import com.google.common.annotations.VisibleForTesting;
import org.apache.arrow.vector.types.pojo.Schema;
import org.apache.commons.lang3.Validate;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.sql.Connection;
import java.sql.PreparedStatement;
@@ -54,12 +51,12 @@ public DataLakeGen2RecordHandler(java.util.Map configOptions)
}
public DataLakeGen2RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions)
{
- this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), AWSSecretsManagerClientBuilder.defaultClient(), AmazonAthenaClientBuilder.defaultClient(),
+ this(databaseConnectionConfig, S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(),
new GenericJdbcConnectionFactory(databaseConnectionConfig, DataLakeGen2MetadataHandler.JDBC_PROPERTIES,
new DatabaseConnectionInfo(DataLakeGen2Constants.DRIVER_CLASS, DataLakeGen2Constants.DEFAULT_PORT)), new DataLakeGen2QueryStringBuilder(QUOTE_CHARACTER, new DataLakeGen2FederationExpressionParser(QUOTE_CHARACTER)), configOptions);
}
@VisibleForTesting
- DataLakeGen2RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions)
+ DataLakeGen2RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions)
{
super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions);
this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null");
diff --git a/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MetadataHandlerTest.java b/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MetadataHandlerTest.java
index c37359bab8..c69dcf613a 100644
--- a/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MetadataHandlerTest.java
+++ b/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MetadataHandlerTest.java
@@ -38,10 +38,6 @@
import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig;
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
-import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest;
-import com.amazonaws.services.secretsmanager.model.GetSecretValueResult;
import org.apache.arrow.vector.types.pojo.Field;
import org.apache.arrow.vector.types.pojo.Schema;
import org.junit.Assert;
@@ -50,6 +46,10 @@
import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
+import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest;
+import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse;
import java.sql.Connection;
import java.sql.ResultSet;
@@ -77,8 +77,8 @@ public class DataLakeGen2MetadataHandlerTest
private JdbcConnectionFactory jdbcConnectionFactory;
private Connection connection;
private FederatedIdentity federatedIdentity;
- private AWSSecretsManager secretsManager;
- private AmazonAthena athena;
+ private SecretsManagerClient secretsManager;
+ private AthenaClient athena;
@Before
public void setup()
@@ -89,9 +89,9 @@ public void setup()
this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS);
logger.info(" this.connection.."+ this.connection);
Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection);
- this.secretsManager = Mockito.mock(AWSSecretsManager.class);
- this.athena = Mockito.mock(AmazonAthena.class);
- Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"user\": \"testUser\", \"password\": \"testPassword\"}"));
+ this.secretsManager = Mockito.mock(SecretsManagerClient.class);
+ this.athena = Mockito.mock(AthenaClient.class);
+ Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"user\": \"testUser\", \"password\": \"testPassword\"}").build());
this.dataLakeGen2MetadataHandler = new DataLakeGen2MetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of());
this.federatedIdentity = Mockito.mock(FederatedIdentity.class);
}
diff --git a/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxMetadataHandlerTest.java b/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxMetadataHandlerTest.java
index 0608abdec3..a2ffc02ec4 100644
--- a/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxMetadataHandlerTest.java
+++ b/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxMetadataHandlerTest.java
@@ -32,11 +32,11 @@
import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig;
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.util.Collections;
import java.util.Map;
@@ -49,8 +49,8 @@ public class DataLakeGen2MuxMetadataHandlerTest
private DataLakeGen2MetadataHandler dataLakeGen2MetadataHandler;
private JdbcMetadataHandler jdbcMetadataHandler;
private BlockAllocator allocator;
- private AWSSecretsManager secretsManager;
- private AmazonAthena athena;
+ private SecretsManagerClient secretsManager;
+ private AthenaClient athena;
private QueryStatusChecker queryStatusChecker;
private JdbcConnectionFactory jdbcConnectionFactory;
@@ -60,8 +60,8 @@ public void setup()
this.allocator = new BlockAllocatorImpl();
this.dataLakeGen2MetadataHandler = Mockito.mock(DataLakeGen2MetadataHandler.class);
this.metadataHandlerMap = Collections.singletonMap("fakedatabase", this.dataLakeGen2MetadataHandler);
- this.secretsManager = Mockito.mock(AWSSecretsManager.class);
- this.athena = Mockito.mock(AmazonAthena.class);
+ this.secretsManager = Mockito.mock(SecretsManagerClient.class);
+ this.athena = Mockito.mock(AthenaClient.class);
this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class);
this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class);
DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", "fakedatabase",
diff --git a/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxRecordHandlerTest.java b/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxRecordHandlerTest.java
index 6b7f491bd0..dc2fa02473 100644
--- a/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxRecordHandlerTest.java
+++ b/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeGen2MuxRecordHandlerTest.java
@@ -28,13 +28,13 @@
import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig;
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import org.apache.arrow.vector.types.pojo.Schema;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.sql.Connection;
import java.sql.SQLException;
@@ -46,9 +46,9 @@ public class DataLakeGen2MuxRecordHandlerTest
private Map recordHandlerMap;
private DataLakeGen2RecordHandler dataLakeGen2RecordHandler;
private JdbcRecordHandler jdbcRecordHandler;
- private AmazonS3 amazonS3;
- private AWSSecretsManager secretsManager;
- private AmazonAthena athena;
+ private S3Client amazonS3;
+ private SecretsManagerClient secretsManager;
+ private AthenaClient athena;
private QueryStatusChecker queryStatusChecker;
private JdbcConnectionFactory jdbcConnectionFactory;
@@ -57,9 +57,9 @@ public void setup()
{
this.dataLakeGen2RecordHandler = Mockito.mock(DataLakeGen2RecordHandler.class);
this.recordHandlerMap = Collections.singletonMap(DataLakeGen2Constants.NAME, this.dataLakeGen2RecordHandler);
- this.amazonS3 = Mockito.mock(AmazonS3.class);
- this.secretsManager = Mockito.mock(AWSSecretsManager.class);
- this.athena = Mockito.mock(AmazonAthena.class);
+ this.amazonS3 = Mockito.mock(S3Client.class);
+ this.secretsManager = Mockito.mock(SecretsManagerClient.class);
+ this.athena = Mockito.mock(AthenaClient.class);
this.queryStatusChecker = Mockito.mock(QueryStatusChecker.class);
this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class);
DatabaseConnectionConfig databaseConnectionConfig = new DatabaseConnectionConfig("testCatalog", DataLakeGen2Constants.NAME,
diff --git a/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeRecordHandlerTest.java b/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeRecordHandlerTest.java
index 1dd198ae89..912d328fa3 100644
--- a/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeRecordHandlerTest.java
+++ b/athena-datalakegen2/src/test/java/com/amazonaws/athena/connectors/datalakegen2/DataLakeRecordHandlerTest.java
@@ -31,9 +31,6 @@
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import com.google.common.collect.ImmutableMap;
import org.apache.arrow.vector.types.Types;
import org.apache.arrow.vector.types.pojo.Schema;
@@ -41,6 +38,9 @@
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.sql.Connection;
import java.sql.PreparedStatement;
@@ -56,18 +56,18 @@ public class DataLakeRecordHandlerTest
private Connection connection;
private JdbcConnectionFactory jdbcConnectionFactory;
private JdbcSplitQueryBuilder jdbcSplitQueryBuilder;
- private AmazonS3 amazonS3;
- private AWSSecretsManager secretsManager;
- private AmazonAthena athena;
+ private S3Client amazonS3;
+ private SecretsManagerClient secretsManager;
+ private AthenaClient athena;
@Before
public void setup()
throws Exception
{
System.setProperty("aws.region", "us-east-1");
- this.amazonS3 = Mockito.mock(AmazonS3.class);
- this.secretsManager = Mockito.mock(AWSSecretsManager.class);
- this.athena = Mockito.mock(AmazonAthena.class);
+ this.amazonS3 = Mockito.mock(S3Client.class);
+ this.secretsManager = Mockito.mock(SecretsManagerClient.class);
+ this.athena = Mockito.mock(AthenaClient.class);
this.connection = Mockito.mock(Connection.class);
this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class);
Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection);
diff --git a/athena-db2-as400/Dockerfile b/athena-db2-as400/Dockerfile
new file mode 100644
index 0000000000..affd37e7bb
--- /dev/null
+++ b/athena-db2-as400/Dockerfile
@@ -0,0 +1,9 @@
+FROM public.ecr.aws/lambda/java:11
+
+# Copy function code and runtime dependencies from Maven layout
+COPY target/athena-db2-as400-2022.47.1.jar ${LAMBDA_TASK_ROOT}
+# Unpack the jar
+RUN jar xf athena-db2-as400-2022.47.1.jar
+
+# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile)
+CMD [ "com.amazonaws.athena.connectors.db2as400.Db2As400MuxCompositeHandler" ]
\ No newline at end of file
diff --git a/athena-db2-as400/athena-db2-as400.yaml b/athena-db2-as400/athena-db2-as400.yaml
index c84dac623e..ea0a331051 100644
--- a/athena-db2-as400/athena-db2-as400.yaml
+++ b/athena-db2-as400/athena-db2-as400.yaml
@@ -72,10 +72,9 @@ Resources:
spill_prefix: !Ref SpillPrefix
default: !Ref DefaultConnectionString
FunctionName: !Ref LambdaFunctionName
- Handler: "com.amazonaws.athena.connectors.db2as400.Db2As400MuxCompositeHandler"
- CodeUri: "./target/athena-db2-as400-2022.47.1.jar"
+ PackageType: "Image"
+ ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-db2-as400:2022.47.1'
Description: "Enables Amazon Athena to communicate with DB2 on iSeries (AS400) using JDBC"
- Runtime: java11
Timeout: !Ref LambdaTimeout
MemorySize: !Ref LambdaMemory
PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ]
diff --git a/athena-db2-as400/pom.xml b/athena-db2-as400/pom.xml
index 7c458b8caf..2165ff5019 100644
--- a/athena-db2-as400/pom.xml
+++ b/athena-db2-as400/pom.xml
@@ -33,12 +33,18 @@
jt400
20.0.7
-
+
- com.amazonaws
- aws-java-sdk-rds
- ${aws-sdk.version}
+ software.amazon.awssdk
+ rds
+ ${aws-sdk-v2.version}
test
+
+
+ software.amazon.awssdk
+ netty-nio-client
+
+
diff --git a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MetadataHandler.java b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MetadataHandler.java
index b083ceecb5..a589bbc33d 100644
--- a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MetadataHandler.java
+++ b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MetadataHandler.java
@@ -49,8 +49,6 @@
import com.amazonaws.athena.connectors.jdbc.manager.JdbcArrowTypeConverter;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler;
import com.amazonaws.athena.connectors.jdbc.manager.PreparedStatementBuilder;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap;
import org.apache.arrow.vector.complex.reader.FieldReader;
@@ -60,6 +58,8 @@
import org.apache.arrow.vector.types.pojo.Schema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
@@ -121,8 +121,8 @@ public Db2As400MetadataHandler(
@VisibleForTesting
protected Db2As400MetadataHandler(
DatabaseConnectionConfig databaseConnectionConfig,
- AWSSecretsManager secretsManager,
- AmazonAthena athena,
+ SecretsManagerClient secretsManager,
+ AthenaClient athena,
JdbcConnectionFactory jdbcConnectionFactory,
java.util.Map configOptions)
{
diff --git a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MuxMetadataHandler.java b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MuxMetadataHandler.java
index 490a72696b..705fe5e6ff 100644
--- a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MuxMetadataHandler.java
+++ b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MuxMetadataHandler.java
@@ -24,9 +24,9 @@
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import org.apache.arrow.util.VisibleForTesting;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.util.Map;
@@ -55,7 +55,7 @@ public Db2As400MuxMetadataHandler(java.util.Map configOptions)
}
@VisibleForTesting
- protected Db2As400MuxMetadataHandler(AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory,
+ protected Db2As400MuxMetadataHandler(SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory,
Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions)
{
super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions);
diff --git a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MuxRecordHandler.java b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MuxRecordHandler.java
index c2c19cc5d5..3d4706a208 100644
--- a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MuxRecordHandler.java
+++ b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400MuxRecordHandler.java
@@ -24,10 +24,10 @@
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import com.google.common.annotations.VisibleForTesting;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.util.Map;
@@ -54,7 +54,7 @@ public Db2As400MuxRecordHandler(java.util.Map configOptions)
}
@VisibleForTesting
- Db2As400MuxRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory,
+ Db2As400MuxRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory,
DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions)
{
super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions);
diff --git a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400RecordHandler.java b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400RecordHandler.java
index e78ae1964b..69d0711852 100644
--- a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400RecordHandler.java
+++ b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400RecordHandler.java
@@ -29,15 +29,12 @@
import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.athena.AmazonAthenaClientBuilder;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.AmazonS3ClientBuilder;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
-import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder;
import com.google.common.annotations.VisibleForTesting;
import org.apache.arrow.vector.types.pojo.Schema;
import org.apache.commons.lang3.Validate;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.sql.Connection;
import java.sql.PreparedStatement;
@@ -60,13 +57,13 @@ public Db2As400RecordHandler(java.util.Map configOptions)
*/
public Db2As400RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions)
{
- this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), AWSSecretsManagerClientBuilder.defaultClient(), AmazonAthenaClientBuilder.defaultClient(),
+ this(databaseConnectionConfig, S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(),
new GenericJdbcConnectionFactory(databaseConnectionConfig, null,
new DatabaseConnectionInfo(Db2As400Constants.DRIVER_CLASS, Db2As400Constants.DEFAULT_PORT)), new Db2As400QueryStringBuilder(QUOTE_CHARACTER), configOptions);
}
@VisibleForTesting
- Db2As400RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions)
+ Db2As400RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions)
{
super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions);
this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null");
diff --git a/athena-db2-as400/src/test/java/com/amazonaws/athena/connectors/db2as400/Db2As400MetadataHandlerTest.java b/athena-db2-as400/src/test/java/com/amazonaws/athena/connectors/db2as400/Db2As400MetadataHandlerTest.java
index 5f16236d1a..ce35bab8e8 100644
--- a/athena-db2-as400/src/test/java/com/amazonaws/athena/connectors/db2as400/Db2As400MetadataHandlerTest.java
+++ b/athena-db2-as400/src/test/java/com/amazonaws/athena/connectors/db2as400/Db2As400MetadataHandlerTest.java
@@ -41,10 +41,6 @@
import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig;
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
-import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest;
-import com.amazonaws.services.secretsmanager.model.GetSecretValueResult;
import org.apache.arrow.vector.types.pojo.Field;
import org.apache.arrow.vector.types.pojo.Schema;
import org.junit.Assert;
@@ -53,6 +49,10 @@
import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
+import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest;
+import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse;
import java.sql.Connection;
import java.sql.PreparedStatement;
@@ -80,9 +80,9 @@ public class Db2As400MetadataHandlerTest extends TestBase {
private JdbcConnectionFactory jdbcConnectionFactory;
private Connection connection;
private FederatedIdentity federatedIdentity;
- private AWSSecretsManager secretsManager;
+ private SecretsManagerClient secretsManager;
private BlockAllocator blockAllocator;
- private AmazonAthena athena;
+ private AthenaClient athena;
@Before
public void setup() throws Exception {
@@ -91,9 +91,9 @@ public void setup() throws Exception {
this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS);
logger.info(" this.connection.."+ this.connection);
Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection);
- this.secretsManager = Mockito.mock(AWSSecretsManager.class);
- this.athena = Mockito.mock(AmazonAthena.class);
- Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"user\": \"testUser\", \"password\": \"testPassword\"}"));
+ this.secretsManager = Mockito.mock(SecretsManagerClient.class);
+ this.athena = Mockito.mock(AthenaClient.class);
+ Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"user\": \"testUser\", \"password\": \"testPassword\"}").build());
this.db2As400MetadataHandler = new Db2As400MetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of());
this.federatedIdentity = Mockito.mock(FederatedIdentity.class);
this.blockAllocator = new BlockAllocatorImpl();
diff --git a/athena-db2-as400/src/test/java/com/amazonaws/athena/connectors/db2as400/Db2As400RecordHandlerTest.java b/athena-db2-as400/src/test/java/com/amazonaws/athena/connectors/db2as400/Db2As400RecordHandlerTest.java
index fa2314b253..4ca5b947a8 100644
--- a/athena-db2-as400/src/test/java/com/amazonaws/athena/connectors/db2as400/Db2As400RecordHandlerTest.java
+++ b/athena-db2-as400/src/test/java/com/amazonaws/athena/connectors/db2as400/Db2As400RecordHandlerTest.java
@@ -31,9 +31,6 @@
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import com.google.common.collect.ImmutableMap;
import org.apache.arrow.vector.types.Types;
import org.apache.arrow.vector.types.pojo.Schema;
@@ -41,6 +38,9 @@
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.sql.Connection;
import java.sql.PreparedStatement;
@@ -54,16 +54,16 @@ public class Db2As400RecordHandlerTest {
private Connection connection;
private JdbcConnectionFactory jdbcConnectionFactory;
private JdbcSplitQueryBuilder jdbcSplitQueryBuilder;
- private AmazonS3 amazonS3;
- private AWSSecretsManager secretsManager;
- private AmazonAthena athena;
+ private S3Client amazonS3;
+ private SecretsManagerClient secretsManager;
+ private AthenaClient athena;
@Before
public void setup() throws Exception {
System.setProperty("aws.region", "us-east-1");
- this.amazonS3 = Mockito.mock(AmazonS3.class);
- this.secretsManager = Mockito.mock(AWSSecretsManager.class);
- this.athena = Mockito.mock(AmazonAthena.class);
+ this.amazonS3 = Mockito.mock(S3Client.class);
+ this.secretsManager = Mockito.mock(SecretsManagerClient.class);
+ this.athena = Mockito.mock(AthenaClient.class);
this.connection = Mockito.mock(Connection.class);
this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class);
Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection);
diff --git a/athena-db2/Dockerfile b/athena-db2/Dockerfile
new file mode 100644
index 0000000000..0d8231fa29
--- /dev/null
+++ b/athena-db2/Dockerfile
@@ -0,0 +1,9 @@
+FROM public.ecr.aws/lambda/java:11
+
+# Copy function code and runtime dependencies from Maven layout
+COPY target/athena-db2-2022.47.1.jar ${LAMBDA_TASK_ROOT}
+# Unpack the jar
+RUN jar xf athena-db2-2022.47.1.jar
+
+# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile)
+CMD [ "com.amazonaws.athena.connectors.db2.Db2MuxCompositeHandler" ]
\ No newline at end of file
diff --git a/athena-db2/athena-db2.yaml b/athena-db2/athena-db2.yaml
index cbaaa93af9..7508f16712 100644
--- a/athena-db2/athena-db2.yaml
+++ b/athena-db2/athena-db2.yaml
@@ -72,10 +72,9 @@ Resources:
spill_prefix: !Ref SpillPrefix
default: !Ref DefaultConnectionString
FunctionName: !Ref LambdaFunctionName
- Handler: "com.amazonaws.athena.connectors.db2.Db2MuxCompositeHandler"
- CodeUri: "./target/athena-db2-2022.47.1.jar"
+ PackageType: "Image"
+ ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-db2:2022.47.1'
Description: "Enables Amazon Athena to communicate with DB2 using JDBC"
- Runtime: java11
Timeout: !Ref LambdaTimeout
MemorySize: !Ref LambdaMemory
PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ]
diff --git a/athena-db2/pom.xml b/athena-db2/pom.xml
index fbe105f1b7..e018349754 100644
--- a/athena-db2/pom.xml
+++ b/athena-db2/pom.xml
@@ -33,12 +33,18 @@
jcc
11.5.9.0
-
+
- com.amazonaws
- aws-java-sdk-rds
- ${aws-sdk.version}
+ software.amazon.awssdk
+ rds
+ ${aws-sdk-v2.version}
test
+
+
+ software.amazon.awssdk
+ netty-nio-client
+
+
diff --git a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MetadataHandler.java b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MetadataHandler.java
index d5dec08242..965197ff0a 100644
--- a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MetadataHandler.java
+++ b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MetadataHandler.java
@@ -55,8 +55,6 @@
import com.amazonaws.athena.connectors.jdbc.manager.JdbcArrowTypeConverter;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler;
import com.amazonaws.athena.connectors.jdbc.manager.PreparedStatementBuilder;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
@@ -67,6 +65,8 @@
import org.apache.arrow.vector.types.pojo.Schema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
@@ -130,8 +130,8 @@ public Db2MetadataHandler(
@VisibleForTesting
protected Db2MetadataHandler(
DatabaseConnectionConfig databaseConnectionConfig,
- AWSSecretsManager secretsManager,
- AmazonAthena athena,
+ SecretsManagerClient secretsManager,
+ AthenaClient athena,
JdbcConnectionFactory jdbcConnectionFactory,
java.util.Map configOptions)
{
diff --git a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MuxMetadataHandler.java b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MuxMetadataHandler.java
index ab596649ab..2fd0df2842 100644
--- a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MuxMetadataHandler.java
+++ b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MuxMetadataHandler.java
@@ -24,9 +24,9 @@
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandler;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcMetadataHandlerFactory;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import org.apache.arrow.util.VisibleForTesting;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.util.Map;
@@ -55,7 +55,7 @@ public Db2MuxMetadataHandler(java.util.Map configOptions)
}
@VisibleForTesting
- protected Db2MuxMetadataHandler(AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory,
+ protected Db2MuxMetadataHandler(SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory,
Map metadataHandlerMap, DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions)
{
super(secretsManager, athena, jdbcConnectionFactory, metadataHandlerMap, databaseConnectionConfig, configOptions);
diff --git a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MuxRecordHandler.java b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MuxRecordHandler.java
index 1919316e39..94fbe8c395 100644
--- a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MuxRecordHandler.java
+++ b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2MuxRecordHandler.java
@@ -24,10 +24,10 @@
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandlerFactory;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import com.google.common.annotations.VisibleForTesting;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.util.Map;
@@ -54,7 +54,7 @@ public Db2MuxRecordHandler(java.util.Map configOptions)
}
@VisibleForTesting
- Db2MuxRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory,
+ Db2MuxRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory,
DatabaseConnectionConfig databaseConnectionConfig, Map recordHandlerMap, java.util.Map configOptions)
{
super(amazonS3, secretsManager, athena, jdbcConnectionFactory, databaseConnectionConfig, recordHandlerMap, configOptions);
diff --git a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2RecordHandler.java b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2RecordHandler.java
index 442d19fee3..8e9941f220 100644
--- a/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2RecordHandler.java
+++ b/athena-db2/src/main/java/com/amazonaws/athena/connectors/db2/Db2RecordHandler.java
@@ -29,15 +29,12 @@
import com.amazonaws.athena.connectors.jdbc.manager.JDBCUtil;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcRecordHandler;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.athena.AmazonAthenaClientBuilder;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.AmazonS3ClientBuilder;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
-import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder;
import com.google.common.annotations.VisibleForTesting;
import org.apache.arrow.vector.types.pojo.Schema;
import org.apache.commons.lang3.Validate;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.sql.Connection;
import java.sql.PreparedStatement;
@@ -61,13 +58,13 @@ public Db2RecordHandler(java.util.Map configOptions)
*/
public Db2RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, java.util.Map configOptions)
{
- this(databaseConnectionConfig, AmazonS3ClientBuilder.defaultClient(), AWSSecretsManagerClientBuilder.defaultClient(), AmazonAthenaClientBuilder.defaultClient(),
+ this(databaseConnectionConfig, S3Client.create(), SecretsManagerClient.create(), AthenaClient.create(),
new GenericJdbcConnectionFactory(databaseConnectionConfig, null,
new DatabaseConnectionInfo(Db2Constants.DRIVER_CLASS, Db2Constants.DEFAULT_PORT)), new Db2QueryStringBuilder(QUOTE_CHARACTER, new Db2FederationExpressionParser(QUOTE_CHARACTER)), configOptions);
}
@VisibleForTesting
- Db2RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions)
+ Db2RecordHandler(DatabaseConnectionConfig databaseConnectionConfig, S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, JdbcConnectionFactory jdbcConnectionFactory, JdbcSplitQueryBuilder jdbcSplitQueryBuilder, java.util.Map configOptions)
{
super(amazonS3, secretsManager, athena, databaseConnectionConfig, jdbcConnectionFactory, configOptions);
this.jdbcSplitQueryBuilder = Validate.notNull(jdbcSplitQueryBuilder, "query builder must not be null");
diff --git a/athena-db2/src/test/java/com/amazonaws/athena/connectors/db2/Db2MetadataHandlerTest.java b/athena-db2/src/test/java/com/amazonaws/athena/connectors/db2/Db2MetadataHandlerTest.java
index 02ff20fa93..81a1ebb474 100644
--- a/athena-db2/src/test/java/com/amazonaws/athena/connectors/db2/Db2MetadataHandlerTest.java
+++ b/athena-db2/src/test/java/com/amazonaws/athena/connectors/db2/Db2MetadataHandlerTest.java
@@ -41,10 +41,6 @@
import com.amazonaws.athena.connectors.jdbc.connection.DatabaseConnectionConfig;
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
-import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest;
-import com.amazonaws.services.secretsmanager.model.GetSecretValueResult;
import org.apache.arrow.vector.types.pojo.Field;
import org.apache.arrow.vector.types.pojo.Schema;
import org.junit.Assert;
@@ -53,6 +49,10 @@
import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
+import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueRequest;
+import software.amazon.awssdk.services.secretsmanager.model.GetSecretValueResponse;
import java.sql.Connection;
import java.sql.PreparedStatement;
@@ -81,9 +81,9 @@ public class Db2MetadataHandlerTest extends TestBase {
private JdbcConnectionFactory jdbcConnectionFactory;
private Connection connection;
private FederatedIdentity federatedIdentity;
- private AWSSecretsManager secretsManager;
+ private SecretsManagerClient secretsManager;
private BlockAllocator blockAllocator;
- private AmazonAthena athena;
+ private AthenaClient athena;
@Before
public void setup() throws Exception {
@@ -92,9 +92,9 @@ public void setup() throws Exception {
this.connection = Mockito.mock(Connection.class, Mockito.RETURNS_DEEP_STUBS);
logger.info(" this.connection.."+ this.connection);
Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection);
- this.secretsManager = Mockito.mock(AWSSecretsManager.class);
- this.athena = Mockito.mock(AmazonAthena.class);
- Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(new GetSecretValueRequest().withSecretId("testSecret")))).thenReturn(new GetSecretValueResult().withSecretString("{\"user\": \"testUser\", \"password\": \"testPassword\"}"));
+ this.secretsManager = Mockito.mock(SecretsManagerClient.class);
+ this.athena = Mockito.mock(AthenaClient.class);
+ Mockito.when(this.secretsManager.getSecretValue(Mockito.eq(GetSecretValueRequest.builder().secretId("testSecret").build()))).thenReturn(GetSecretValueResponse.builder().secretString("{\"user\": \"testUser\", \"password\": \"testPassword\"}").build());
this.db2MetadataHandler = new Db2MetadataHandler(databaseConnectionConfig, this.secretsManager, this.athena, this.jdbcConnectionFactory, com.google.common.collect.ImmutableMap.of());
this.federatedIdentity = Mockito.mock(FederatedIdentity.class);
this.blockAllocator = new BlockAllocatorImpl();
diff --git a/athena-db2/src/test/java/com/amazonaws/athena/connectors/db2/Db2RecordHandlerTest.java b/athena-db2/src/test/java/com/amazonaws/athena/connectors/db2/Db2RecordHandlerTest.java
index 801db06233..b7de058f8d 100644
--- a/athena-db2/src/test/java/com/amazonaws/athena/connectors/db2/Db2RecordHandlerTest.java
+++ b/athena-db2/src/test/java/com/amazonaws/athena/connectors/db2/Db2RecordHandlerTest.java
@@ -31,9 +31,6 @@
import com.amazonaws.athena.connectors.jdbc.connection.JdbcConnectionFactory;
import com.amazonaws.athena.connectors.jdbc.connection.JdbcCredentialProvider;
import com.amazonaws.athena.connectors.jdbc.manager.JdbcSplitQueryBuilder;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import com.google.common.collect.ImmutableMap;
import org.apache.arrow.vector.types.Types;
import org.apache.arrow.vector.types.pojo.Schema;
@@ -41,6 +38,9 @@
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.sql.Connection;
import java.sql.PreparedStatement;
@@ -55,16 +55,16 @@ public class Db2RecordHandlerTest {
private Connection connection;
private JdbcConnectionFactory jdbcConnectionFactory;
private JdbcSplitQueryBuilder jdbcSplitQueryBuilder;
- private AmazonS3 amazonS3;
- private AWSSecretsManager secretsManager;
- private AmazonAthena athena;
+ private S3Client amazonS3;
+ private SecretsManagerClient secretsManager;
+ private AthenaClient athena;
@Before
public void setup() throws Exception {
System.setProperty("aws.region", "us-east-1");
- this.amazonS3 = Mockito.mock(AmazonS3.class);
- this.secretsManager = Mockito.mock(AWSSecretsManager.class);
- this.athena = Mockito.mock(AmazonAthena.class);
+ this.amazonS3 = Mockito.mock(S3Client.class);
+ this.secretsManager = Mockito.mock(SecretsManagerClient.class);
+ this.athena = Mockito.mock(AthenaClient.class);
this.connection = Mockito.mock(Connection.class);
this.jdbcConnectionFactory = Mockito.mock(JdbcConnectionFactory.class);
Mockito.when(this.jdbcConnectionFactory.getConnection(nullable(JdbcCredentialProvider.class))).thenReturn(this.connection);
diff --git a/athena-docdb/Dockerfile b/athena-docdb/Dockerfile
new file mode 100644
index 0000000000..06e8a5c907
--- /dev/null
+++ b/athena-docdb/Dockerfile
@@ -0,0 +1,9 @@
+FROM public.ecr.aws/lambda/java:11
+
+# Copy function code and runtime dependencies from Maven layout
+COPY target/athena-docdb-2022.47.1.jar ${LAMBDA_TASK_ROOT}
+# Unpack the jar
+RUN jar xf athena-docdb-2022.47.1.jar
+
+# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile)
+CMD [ "com.amazonaws.athena.connectors.docdb.DocDBCompositeHandler" ]
\ No newline at end of file
diff --git a/athena-docdb/athena-docdb.yaml b/athena-docdb/athena-docdb.yaml
index efb7fc0b2e..588b05f52e 100644
--- a/athena-docdb/athena-docdb.yaml
+++ b/athena-docdb/athena-docdb.yaml
@@ -66,10 +66,9 @@ Resources:
spill_prefix: !Ref SpillPrefix
default_docdb: !Ref DocDBConnectionString
FunctionName: !Ref AthenaCatalogName
- Handler: "com.amazonaws.athena.connectors.docdb.DocDBCompositeHandler"
- CodeUri: "./target/athena-docdb-2022.47.1.jar"
+ PackageType: "Image"
+ ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-docdb:2022.47.1'
Description: "Enables Amazon Athena to communicate with DocumentDB, making your DocumentDB data accessible via SQL."
- Runtime: java11
Timeout: !Ref LambdaTimeout
MemorySize: !Ref LambdaMemory
PermissionsBoundary: !If [ HasPermissionsBoundary, !Ref PermissionsBoundaryARN, !Ref "AWS::NoValue" ]
diff --git a/athena-docdb/pom.xml b/athena-docdb/pom.xml
index 5dd645c20a..8982ee0159 100644
--- a/athena-docdb/pom.xml
+++ b/athena-docdb/pom.xml
@@ -28,11 +28,11 @@
2022.47.1
test
-
+
- com.amazonaws
- aws-java-sdk-docdb
- ${aws-sdk.version}
+ software.amazon.awssdk
+ docdb
+ ${aws-sdk-v2.version}
test
diff --git a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandler.java b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandler.java
index 5a25b6f50c..191269fbd6 100644
--- a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandler.java
+++ b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandler.java
@@ -42,11 +42,6 @@
import com.amazonaws.athena.connector.lambda.metadata.optimizations.OptimizationSubType;
import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory;
import com.amazonaws.athena.connectors.docdb.qpt.DocDBQueryPassthrough;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.glue.AWSGlue;
-import com.amazonaws.services.glue.model.Database;
-import com.amazonaws.services.glue.model.Table;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableMap;
import com.mongodb.client.MongoClient;
@@ -58,6 +53,11 @@
import org.bson.Document;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.glue.GlueClient;
+import software.amazon.awssdk.services.glue.model.Database;
+import software.amazon.awssdk.services.glue.model.Table;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.util.ArrayList;
import java.util.LinkedHashSet;
@@ -95,13 +95,13 @@ public class DocDBMetadataHandler
//is indeed enabled for use by this connector.
private static final String DOCDB_METADATA_FLAG = "docdb-metadata-flag";
//Used to filter out Glue tables which lack a docdb metadata flag.
- private static final TableFilter TABLE_FILTER = (Table table) -> table.getParameters().containsKey(DOCDB_METADATA_FLAG);
+ private static final TableFilter TABLE_FILTER = (Table table) -> table.parameters().containsKey(DOCDB_METADATA_FLAG);
//The number of documents to scan when attempting to infer schema from an DocDB collection.
private static final int SCHEMA_INFERRENCE_NUM_DOCS = 10;
// used to filter out Glue databases which lack the docdb-metadata-flag in the URI.
- private static final DatabaseFilter DB_FILTER = (Database database) -> (database.getLocationUri() != null && database.getLocationUri().contains(DOCDB_METADATA_FLAG));
+ private static final DatabaseFilter DB_FILTER = (Database database) -> (database.locationUri() != null && database.locationUri().contains(DOCDB_METADATA_FLAG));
- private final AWSGlue glue;
+ private final GlueClient glue;
private final DocDBConnectionFactory connectionFactory;
private final DocDBQueryPassthrough queryPassthrough = new DocDBQueryPassthrough();
@@ -114,11 +114,11 @@ public DocDBMetadataHandler(java.util.Map configOptions)
@VisibleForTesting
protected DocDBMetadataHandler(
- AWSGlue glue,
+ GlueClient glue,
DocDBConnectionFactory connectionFactory,
EncryptionKeyFactory keyFactory,
- AWSSecretsManager secretsManager,
- AmazonAthena athena,
+ SecretsManagerClient secretsManager,
+ AthenaClient athena,
String spillBucket,
String spillPrefix,
java.util.Map configOptions)
diff --git a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandler.java b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandler.java
index ecba05bc18..4b0459f57e 100644
--- a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandler.java
+++ b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandler.java
@@ -28,12 +28,6 @@
import com.amazonaws.athena.connector.lambda.handlers.RecordHandler;
import com.amazonaws.athena.connector.lambda.records.ReadRecordsRequest;
import com.amazonaws.athena.connectors.docdb.qpt.DocDBQueryPassthrough;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.athena.AmazonAthenaClientBuilder;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.AmazonS3ClientBuilder;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
-import com.amazonaws.services.secretsmanager.AWSSecretsManagerClientBuilder;
import com.mongodb.client.MongoClient;
import com.mongodb.client.MongoCollection;
import com.mongodb.client.MongoCursor;
@@ -44,6 +38,9 @@
import org.bson.Document;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.util.Map;
import java.util.TreeMap;
@@ -81,15 +78,15 @@ public class DocDBRecordHandler
public DocDBRecordHandler(java.util.Map configOptions)
{
this(
- AmazonS3ClientBuilder.defaultClient(),
- AWSSecretsManagerClientBuilder.defaultClient(),
- AmazonAthenaClientBuilder.defaultClient(),
+ S3Client.create(),
+ SecretsManagerClient.create(),
+ AthenaClient.create(),
new DocDBConnectionFactory(),
configOptions);
}
@VisibleForTesting
- protected DocDBRecordHandler(AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, DocDBConnectionFactory connectionFactory, java.util.Map configOptions)
+ protected DocDBRecordHandler(S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, DocDBConnectionFactory connectionFactory, java.util.Map configOptions)
{
super(amazonS3, secretsManager, athena, SOURCE_TYPE, configOptions);
this.connectionFactory = connectionFactory;
diff --git a/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandlerTest.java b/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandlerTest.java
index 866ecf164b..a69d0f4d31 100644
--- a/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandlerTest.java
+++ b/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandlerTest.java
@@ -39,9 +39,6 @@
import com.amazonaws.athena.connector.lambda.metadata.MetadataRequestType;
import com.amazonaws.athena.connector.lambda.metadata.MetadataResponse;
import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.glue.AWSGlue;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import com.google.common.collect.ImmutableList;
import com.mongodb.client.FindIterable;
import com.mongodb.client.MongoClient;
@@ -63,6 +60,9 @@
import org.mockito.junit.MockitoJUnitRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.glue.GlueClient;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.util.ArrayList;
import java.util.Arrays;
@@ -100,13 +100,13 @@ public class DocDBMetadataHandlerTest
private MongoClient mockClient;
@Mock
- private AWSGlue awsGlue;
+ private GlueClient awsGlue;
@Mock
- private AWSSecretsManager secretsManager;
+ private SecretsManagerClient secretsManager;
@Mock
- private AmazonAthena mockAthena;
+ private AthenaClient mockAthena;
@Before
public void setUp()
diff --git a/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandlerTest.java b/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandlerTest.java
index 18a1947c79..866bc1ac41 100644
--- a/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandlerTest.java
+++ b/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandlerTest.java
@@ -40,14 +40,6 @@
import com.amazonaws.athena.connector.lambda.records.RemoteReadRecordsResponse;
import com.amazonaws.athena.connector.lambda.security.EncryptionKeyFactory;
import com.amazonaws.athena.connector.lambda.security.LocalKeyFactory;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.glue.AWSGlue;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.model.PutObjectRequest;
-import com.amazonaws.services.s3.model.PutObjectResult;
-import com.amazonaws.services.s3.model.S3Object;
-import com.amazonaws.services.s3.model.S3ObjectInputStream;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
import com.google.common.collect.ImmutableList;
import com.google.common.io.ByteStreams;
import com.mongodb.client.FindIterable;
@@ -71,6 +63,17 @@
import org.mockito.junit.MockitoJUnitRunner;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.athena.AthenaClient;
+import software.amazon.awssdk.services.glue.GlueClient;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
+
+import software.amazon.awssdk.core.ResponseInputStream;
+import software.amazon.awssdk.core.sync.RequestBody;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.s3.model.GetObjectRequest;
+import software.amazon.awssdk.services.s3.model.GetObjectResponse;
+import software.amazon.awssdk.services.s3.model.PutObjectRequest;
+import software.amazon.awssdk.services.s3.model.PutObjectResponse;
import java.io.ByteArrayInputStream;
import java.io.InputStream;
@@ -100,7 +103,7 @@ public class DocDBRecordHandlerTest
private DocDBRecordHandler handler;
private BlockAllocator allocator;
private List mockS3Storage = new ArrayList<>();
- private AmazonS3 amazonS3;
+ private S3Client amazonS3;
private S3BlockSpillReader spillReader;
private Schema schemaForRead;
private EncryptionKeyFactory keyFactory = new LocalKeyFactory();
@@ -116,16 +119,16 @@ public class DocDBRecordHandlerTest
private MongoClient mockClient;
@Mock
- private AWSSecretsManager mockSecretsManager;
+ private SecretsManagerClient mockSecretsManager;
@Mock
- private AmazonAthena mockAthena;
+ private AthenaClient mockAthena;
@Mock
- private AWSGlue awsGlue;
+ private GlueClient awsGlue;
@Mock
- private AWSSecretsManager secretsManager;
+ private SecretsManagerClient secretsManager;
@Mock
MongoDatabase mockDatabase;
@@ -171,7 +174,7 @@ public void setUp()
allocator = new BlockAllocatorImpl();
- amazonS3 = mock(AmazonS3.class);
+ amazonS3 = mock(S3Client.class);
mockDatabase = mock(MongoDatabase.class);
mockCollection = mock(MongoCollection.class);
mockIterable = mock(FindIterable.class);
@@ -179,31 +182,27 @@ public void setUp()
when(mockClient.getDatabase(eq(DEFAULT_SCHEMA))).thenReturn(mockDatabase);
when(mockDatabase.getCollection(eq(TEST_TABLE))).thenReturn(mockCollection);
- when(amazonS3.putObject(any()))
+ when(amazonS3.putObject(any(PutObjectRequest.class), any(RequestBody.class)))
.thenAnswer((InvocationOnMock invocationOnMock) -> {
- InputStream inputStream = ((PutObjectRequest) invocationOnMock.getArguments()[0]).getInputStream();
+ InputStream inputStream = ((RequestBody) invocationOnMock.getArguments()[1]).contentStreamProvider().newStream();
ByteHolder byteHolder = new ByteHolder();
byteHolder.setBytes(ByteStreams.toByteArray(inputStream));
synchronized (mockS3Storage) {
mockS3Storage.add(byteHolder);
logger.info("puObject: total size " + mockS3Storage.size());
}
- return mock(PutObjectResult.class);
+ return PutObjectResponse.builder().build();
});
- when(amazonS3.getObject(nullable(String.class), nullable(String.class)))
+ when(amazonS3.getObject(any(GetObjectRequest.class)))
.thenAnswer((InvocationOnMock invocationOnMock) -> {
- S3Object mockObject = mock(S3Object.class);
ByteHolder byteHolder;
synchronized (mockS3Storage) {
byteHolder = mockS3Storage.get(0);
mockS3Storage.remove(0);
logger.info("getObject: total size " + mockS3Storage.size());
}
- when(mockObject.getObjectContent()).thenReturn(
- new S3ObjectInputStream(
- new ByteArrayInputStream(byteHolder.getBytes()), null));
- return mockObject;
+ return new ResponseInputStream<>(GetObjectResponse.builder().build(), new ByteArrayInputStream(byteHolder.getBytes()));
});
handler = new DocDBRecordHandler(amazonS3, mockSecretsManager, mockAthena, connectionFactory, com.google.common.collect.ImmutableMap.of());
diff --git a/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/integ/DocDbIntegTest.java b/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/integ/DocDbIntegTest.java
index f20a65ceeb..bf0a314e8a 100644
--- a/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/integ/DocDbIntegTest.java
+++ b/athena-docdb/src/test/java/com/amazonaws/athena/connectors/docdb/integ/DocDbIntegTest.java
@@ -27,16 +27,6 @@
import com.amazonaws.athena.connector.integ.data.ConnectorVpcAttributes;
import com.amazonaws.athena.connector.integ.data.SecretsManagerCredentials;
import com.amazonaws.athena.connector.integ.providers.ConnectorPackagingAttributesProvider;
-import com.amazonaws.services.athena.model.Row;
-import com.amazonaws.services.docdb.AmazonDocDB;
-import com.amazonaws.services.docdb.AmazonDocDBClientBuilder;
-import com.amazonaws.services.docdb.model.DBCluster;
-import com.amazonaws.services.docdb.model.DescribeDBClustersRequest;
-import com.amazonaws.services.docdb.model.DescribeDBClustersResult;
-import com.amazonaws.services.lambda.AWSLambda;
-import com.amazonaws.services.lambda.AWSLambdaClientBuilder;
-import com.amazonaws.services.lambda.model.InvocationType;
-import com.amazonaws.services.lambda.model.InvokeRequest;
import com.google.common.collect.ImmutableMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -57,6 +47,14 @@
import software.amazon.awscdk.services.ec2.Vpc;
import software.amazon.awscdk.services.ec2.VpcAttributes;
import software.amazon.awscdk.services.iam.PolicyDocument;
+import software.amazon.awssdk.services.athena.model.Row;
+import software.amazon.awssdk.services.docdb.DocDbClient;
+import software.amazon.awssdk.services.docdb.model.DBCluster;
+import software.amazon.awssdk.services.docdb.model.DescribeDbClustersRequest;
+import software.amazon.awssdk.services.docdb.model.DescribeDbClustersResponse;
+import software.amazon.awssdk.services.lambda.LambdaClient;
+import software.amazon.awssdk.services.lambda.model.InvocationType;
+import software.amazon.awssdk.services.lambda.model.InvokeRequest;
import java.util.ArrayList;
import java.util.HashMap;
@@ -192,15 +190,16 @@ private Stack getDocDbStack() {
* Lambda. All exceptions thrown here will be caught in the calling function.
*/
private Endpoint getClusterData() {
- AmazonDocDB docDbClient = AmazonDocDBClientBuilder.defaultClient();
+ DocDbClient docDbClient = DocDbClient.create();
try {
- DescribeDBClustersResult dbClustersResult = docDbClient.describeDBClusters(new DescribeDBClustersRequest()
- .withDBClusterIdentifier(dbClusterName));
- DBCluster cluster = dbClustersResult.getDBClusters().get(0);
- return new Endpoint(cluster.getEndpoint(), cluster.getPort());
+ DescribeDbClustersResponse dbClustersResponse = docDbClient.describeDBClusters(DescribeDbClustersRequest.builder()
+ .dbClusterIdentifier(dbClusterName)
+ .build());
+ DBCluster cluster = dbClustersResponse.dbClusters().get(0);
+ return new Endpoint(cluster.endpoint(), cluster.port());
}
finally {
- docDbClient.shutdown();
+ docDbClient.close();
}
}
@@ -263,20 +262,21 @@ protected void setUpTableData()
logger.info("----------------------------------------------------");
String mongoLambdaName = "integ-mongodb-" + UUID.randomUUID();
- AWSLambda lambdaClient = AWSLambdaClientBuilder.defaultClient();
+ LambdaClient lambdaClient = LambdaClient.create();
CloudFormationClient cloudFormationMongoClient = new CloudFormationClient(getMongoLambdaStack(mongoLambdaName));
try {
// Create the Lambda function.
cloudFormationMongoClient.createStack();
// Invoke the Lambda function.
- lambdaClient.invoke(new InvokeRequest()
- .withFunctionName(mongoLambdaName)
- .withInvocationType(InvocationType.RequestResponse));
+ lambdaClient.invoke(InvokeRequest.builder()
+ .functionName(mongoLambdaName)
+ .invocationType(InvocationType.REQUEST_RESPONSE)
+ .build());
}
finally {
// Delete the Lambda function.
cloudFormationMongoClient.deleteStack();
- lambdaClient.shutdown();
+ lambdaClient.close();
}
}
@@ -371,13 +371,13 @@ public void selectColumnWithPredicateIntegTest()
String query = String.format("select title from %s.%s.%s where year > 2012;",
lambdaFunctionName, docdbDbName, docdbTableMovies);
- List rows = startQueryExecution(query).getResultSet().getRows();
+ List rows = startQueryExecution(query).resultSet().rows();
if (!rows.isEmpty()) {
// Remove the column-header row
rows.remove(0);
}
List titles = new ArrayList<>();
- rows.forEach(row -> titles.add(row.getData().get(0).getVarCharValue()));
+ rows.forEach(row -> titles.add(row.data().get(0).varCharValue()));
logger.info("Titles: {}", titles);
assertEquals("Wrong number of DB records found.", 1, titles.size());
assertTrue("Movie title not found: Interstellar.", titles.contains("Interstellar"));
diff --git a/athena-dynamodb/Dockerfile b/athena-dynamodb/Dockerfile
new file mode 100644
index 0000000000..868346d735
--- /dev/null
+++ b/athena-dynamodb/Dockerfile
@@ -0,0 +1,9 @@
+FROM public.ecr.aws/lambda/java:11
+
+# Copy function code and runtime dependencies from Maven layout
+COPY target/athena-dynamodb-2022.47.1.jar ${LAMBDA_TASK_ROOT}
+# Unpack the jar
+RUN jar xf athena-dynamodb-2022.47.1.jar
+
+# Set the CMD to your handler (could also be done as a parameter override outside of the Dockerfile)
+CMD [ "com.amazonaws.athena.connectors.dynamodb.DynamoDBCompositeHandler" ]
\ No newline at end of file
diff --git a/athena-dynamodb/athena-dynamodb.yaml b/athena-dynamodb/athena-dynamodb.yaml
index ae3e023f58..f44ac89665 100644
--- a/athena-dynamodb/athena-dynamodb.yaml
+++ b/athena-dynamodb/athena-dynamodb.yaml
@@ -66,10 +66,9 @@ Resources:
spill_prefix: !Ref SpillPrefix
kms_key_id: !If [HasKMSKeyId, !Ref KMSKeyId, !Ref "AWS::NoValue"]
FunctionName: !Ref AthenaCatalogName
- Handler: "com.amazonaws.athena.connectors.dynamodb.DynamoDBCompositeHandler"
- CodeUri: "./target/athena-dynamodb-2022.47.1.jar"
+ PackageType: "Image"
+ ImageUri: !Sub '292517598671.dkr.ecr.${AWS::Region}.amazonaws.com/athena-federation-repository-dynamodb:2022.47.1'
Description: "Enables Amazon Athena to communicate with DynamoDB, making your tables accessible via SQL"
- Runtime: java11
Timeout: !Ref LambdaTimeout
MemorySize: !Ref LambdaMemory
Role: !If [NotHasLambdaRole, !GetAtt FunctionRole.Arn, !Ref LambdaRole]
diff --git a/athena-dynamodb/pom.xml b/athena-dynamodb/pom.xml
index 28d8f26fdb..81b5e92c61 100644
--- a/athena-dynamodb/pom.xml
+++ b/athena-dynamodb/pom.xml
@@ -8,17 +8,6 @@
4.0.0
athena-dynamodb
2022.47.1
-
-
-
- software.amazon.awssdk
- bom
- 2.28.26
- pom
- import
-
-
-
com.amazonaws
@@ -31,20 +20,16 @@
athena-federation-integ-test
2022.47.1
test
-
-
- com.amazonaws
- aws-java-sdk-sts
-
-
software.amazon.awssdk
dynamodb
+ ${aws-sdk-v2.version}
software.amazon.awssdk
dynamodb-enhanced
+ ${aws-sdk-v2.version}
com.amazonaws
@@ -55,6 +40,7 @@
software.amazon.awssdk
url-connection-client
+ ${aws-sdk-v2.version}
test
@@ -111,13 +97,10 @@
test-jar
test
-
- software.amazon.awssdk
- sdk-core
-
software.amazon.awssdk
sts
+ ${aws-sdk-v2.version}
diff --git a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBMetadataHandler.java b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBMetadataHandler.java
index f6be93e6b1..d472551f13 100644
--- a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBMetadataHandler.java
+++ b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBMetadataHandler.java
@@ -57,14 +57,7 @@
import com.amazonaws.athena.connectors.dynamodb.util.DDBTableUtils;
import com.amazonaws.athena.connectors.dynamodb.util.DDBTypeUtils;
import com.amazonaws.athena.connectors.dynamodb.util.IncrementingValueNameProducer;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.glue.AWSGlue;
-import com.amazonaws.services.glue.model.Database;
-import com.amazonaws.services.glue.model.ErrorDetails;
-import com.amazonaws.services.glue.model.FederationSourceErrorCode;
-import com.amazonaws.services.glue.model.Table;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
-import com.amazonaws.util.json.Jackson;
+import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap;
import org.apache.arrow.vector.complex.reader.FieldReader;
@@ -74,10 +67,17 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.enhanced.dynamodb.document.EnhancedDocument;
+import software.amazon.awssdk.services.athena.AthenaClient;
import software.amazon.awssdk.services.dynamodb.DynamoDbClient;
import software.amazon.awssdk.services.dynamodb.model.AttributeValue;
import software.amazon.awssdk.services.dynamodb.model.ExecuteStatementRequest;
import software.amazon.awssdk.services.dynamodb.model.ExecuteStatementResponse;
+import software.amazon.awssdk.services.glue.GlueClient;
+import software.amazon.awssdk.services.glue.model.Database;
+import software.amazon.awssdk.services.glue.model.ErrorDetails;
+import software.amazon.awssdk.services.glue.model.FederationSourceErrorCode;
+import software.amazon.awssdk.services.glue.model.Table;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.util.ArrayList;
import java.util.Collections;
@@ -134,15 +134,15 @@ public class DynamoDBMetadataHandler
// defines the value that should be present in the Glue Database URI to enable the DB for DynamoDB.
static final String DYNAMO_DB_FLAG = "dynamo-db-flag";
// used to filter out Glue tables which lack indications of being used for DDB.
- private static final TableFilter TABLE_FILTER = (Table table) -> table.getStorageDescriptor().getLocation().contains(DYNAMODB)
- || (table.getParameters() != null && DYNAMODB.equals(table.getParameters().get("classification")))
- || (table.getStorageDescriptor().getParameters() != null && DYNAMODB.equals(table.getStorageDescriptor().getParameters().get("classification")));
+ private static final TableFilter TABLE_FILTER = (Table table) -> table.storageDescriptor().location().contains(DYNAMODB)
+ || (table.parameters() != null && DYNAMODB.equals(table.parameters().get("classification")))
+ || (table.storageDescriptor().parameters() != null && DYNAMODB.equals(table.storageDescriptor().parameters().get("classification")));
// used to filter out Glue databases which lack the DYNAMO_DB_FLAG in the URI.
- private static final DatabaseFilter DB_FILTER = (Database database) -> (database.getLocationUri() != null && database.getLocationUri().contains(DYNAMO_DB_FLAG));
+ private static final DatabaseFilter DB_FILTER = (Database database) -> (database.locationUri() != null && database.locationUri().contains(DYNAMO_DB_FLAG));
private final ThrottlingInvoker invoker;
private final DynamoDbClient ddbClient;
- private final AWSGlue glueClient;
+ private final GlueClient glueClient;
private final DynamoDBTableResolver tableResolver;
private final DDBQueryPassthrough queryPassthrough;
@@ -162,12 +162,12 @@ public DynamoDBMetadataHandler(java.util.Map configOptions)
@VisibleForTesting
DynamoDBMetadataHandler(
EncryptionKeyFactory keyFactory,
- AWSSecretsManager secretsManager,
- AmazonAthena athena,
+ SecretsManagerClient secretsManager,
+ AthenaClient athena,
String spillBucket,
String spillPrefix,
DynamoDbClient ddbClient,
- AWSGlue glueClient,
+ GlueClient glueClient,
java.util.Map configOptions)
{
super(glueClient, keyFactory, secretsManager, athena, SOURCE_TYPE, spillBucket, spillPrefix, configOptions);
@@ -258,7 +258,7 @@ public ListTablesResponse doListTables(BlockAllocator allocator, ListTablesReque
public GetTableResponse doGetQueryPassthroughSchema(BlockAllocator allocator, GetTableRequest request) throws Exception
{
if (!request.isQueryPassthrough()) {
- throw new AthenaConnectorException("No Query passed through [{}]" + request, new ErrorDetails().withErrorCode(FederationSourceErrorCode.InvalidInputException.toString()).withErrorMessage("No Query passed through [{}]" + request));
+ throw new AthenaConnectorException("No Query passed through [{}]" + request, ErrorDetails.builder().errorCode(FederationSourceErrorCode.INVALID_INPUT_EXCEPTION.toString()).errorMessage("No Query passed through [{}]" + request).build());
}
queryPassthrough.verify(request.getQueryPassthroughArguments());
@@ -327,7 +327,7 @@ public void enhancePartitionSchema(SchemaBuilder partitionSchemaBuilder, GetTabl
table = tableResolver.getTableMetadata(tableName);
}
catch (TimeoutException e) {
- throw new AthenaConnectorException(e.getMessage(), new ErrorDetails().withErrorCode(FederationSourceErrorCode.OperationTimeoutException.toString()).withErrorMessage(e.getMessage()));
+ throw new AthenaConnectorException(e.getMessage(), ErrorDetails.builder().errorCode(FederationSourceErrorCode.OPERATION_TIMEOUT_EXCEPTION.toString()).errorMessage(e.getMessage()).build());
}
// add table name so we don't have to do case insensitive resolution again
partitionSchemaBuilder.addMetadata(TABLE_METADATA, table.getName());
@@ -449,7 +449,14 @@ private void precomputeAdditionalMetadata(Set columnsToIgnore, Map partitionMetadata = partitions.getSchema().getCustomMetadata();
String partitionType = partitionMetadata.get(PARTITION_TYPE_METADATA);
if (partitionType == null) {
- throw new AthenaConnectorException(String.format("No metadata %s defined in Schema %s", PARTITION_TYPE_METADATA, partitions.getSchema()), new ErrorDetails().withErrorCode(FederationSourceErrorCode.InvalidInputException.toString()));
+ throw new AthenaConnectorException(String.format("No metadata %s defined in Schema %s", PARTITION_TYPE_METADATA, partitions.getSchema()), ErrorDetails.builder().errorCode(FederationSourceErrorCode.INVALID_INPUT_EXCEPTION.toString()).build());
}
if (QUERY_PARTITION_TYPE.equals(partitionType)) {
String hashKeyName = partitionMetadata.get(HASH_KEY_NAME_METADATA);
@@ -530,7 +537,7 @@ else if (SCAN_PARTITION_TYPE.equals(partitionType)) {
return new GetSplitsResponse(request.getCatalogName(), splits, null);
}
else {
- throw new AthenaConnectorException("Unexpected partition type " + partitionType, new ErrorDetails().withErrorCode(FederationSourceErrorCode.InvalidInputException.toString()));
+ throw new AthenaConnectorException("Unexpected partition type " + partitionType, ErrorDetails.builder().errorCode(FederationSourceErrorCode.INVALID_INPUT_EXCEPTION.toString()).build());
}
}
diff --git a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBRecordHandler.java b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBRecordHandler.java
index 4ecf630889..8215b578ce 100644
--- a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBRecordHandler.java
+++ b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/DynamoDBRecordHandler.java
@@ -36,13 +36,8 @@
import com.amazonaws.athena.connectors.dynamodb.util.DDBPredicateUtils;
import com.amazonaws.athena.connectors.dynamodb.util.DDBRecordMetadata;
import com.amazonaws.athena.connectors.dynamodb.util.DDBTypeUtils;
-import com.amazonaws.services.athena.AmazonAthena;
-import com.amazonaws.services.glue.model.ErrorDetails;
-import com.amazonaws.services.glue.model.FederationSourceErrorCode;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.secretsmanager.AWSSecretsManager;
-import com.amazonaws.util.json.Jackson;
import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
@@ -52,6 +47,7 @@
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import software.amazon.awssdk.enhanced.dynamodb.document.EnhancedDocument;
+import software.amazon.awssdk.services.athena.AthenaClient;
import software.amazon.awssdk.services.dynamodb.DynamoDbClient;
import software.amazon.awssdk.services.dynamodb.model.AttributeValue;
import software.amazon.awssdk.services.dynamodb.model.ExecuteStatementRequest;
@@ -60,6 +56,10 @@
import software.amazon.awssdk.services.dynamodb.model.QueryResponse;
import software.amazon.awssdk.services.dynamodb.model.ScanRequest;
import software.amazon.awssdk.services.dynamodb.model.ScanResponse;
+import software.amazon.awssdk.services.glue.model.ErrorDetails;
+import software.amazon.awssdk.services.glue.model.FederationSourceErrorCode;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.secretsmanager.SecretsManagerClient;
import java.io.IOException;
import java.util.ArrayList;
@@ -131,7 +131,7 @@ public ThrottlingInvoker load(String tableName)
}
@VisibleForTesting
- DynamoDBRecordHandler(DynamoDbClient ddbClient, AmazonS3 amazonS3, AWSSecretsManager secretsManager, AmazonAthena athena, String sourceType, java.util.Map configOptions)
+ DynamoDBRecordHandler(DynamoDbClient ddbClient, S3Client amazonS3, SecretsManagerClient secretsManager, AthenaClient athena, String sourceType, java.util.Map configOptions)
{
super(amazonS3, secretsManager, athena, sourceType, configOptions);
this.ddbClient = ddbClient;
@@ -209,7 +209,7 @@ protected void readWithConstraint(BlockSpiller spiller, ReadRecordsRequest recor
private void handleQueryPassthroughPartiQLQuery(BlockSpiller spiller, ReadRecordsRequest recordsRequest, QueryStatusChecker queryStatusChecker)
{
if (!recordsRequest.getConstraints().isQueryPassThrough()) {
- throw new AthenaConnectorException("Attempting to readConstraints with Query Passthrough without PartiQL Query", new ErrorDetails().withErrorCode(FederationSourceErrorCode.InvalidInputException.toString()));
+ throw new AthenaConnectorException("Attempting to readConstraints with Query Passthrough without PartiQL Query", ErrorDetails.builder().errorCode(FederationSourceErrorCode.INVALID_INPUT_EXCEPTION.toString()).build());
}
queryPassthrough.verify(recordsRequest.getConstraints().getQueryPassthroughArguments());
@@ -324,11 +324,12 @@ private QueryRequest buildQueryRequest(Split split, String tableName, Schema sch
Map expressionAttributeValues = new HashMap<>();
if (rangeKeyFilter != null || nonKeyFilter != null) {
try {
- expressionAttributeNames.putAll(Jackson.getObjectMapper().readValue(split.getProperty(EXPRESSION_NAMES_METADATA), STRING_MAP_TYPE_REFERENCE));
+ ObjectMapper objectMapper = new ObjectMapper();
+ expressionAttributeNames.putAll(objectMapper.readValue(split.getProperty(EXPRESSION_NAMES_METADATA), STRING_MAP_TYPE_REFERENCE));
expressionAttributeValues.putAll(EnhancedDocument.fromJson(split.getProperty(EXPRESSION_VALUES_METADATA)).toMap());
}
catch (IOException e) {
- throw new AthenaConnectorException(e.getMessage(), new ErrorDetails().withErrorCode(FederationSourceErrorCode.InternalServiceException.toString()));
+ throw new AthenaConnectorException(e.getMessage(), ErrorDetails.builder().errorCode(FederationSourceErrorCode.INTERNAL_SERVICE_EXCEPTION.toString()).build());
}
}
@@ -391,11 +392,12 @@ private ScanRequest buildScanRequest(Split split, String tableName, Schema schem
Map expressionAttributeValues = new HashMap<>();
if (rangeKeyFilter != null || nonKeyFilter != null) {
try {
- expressionAttributeNames.putAll(Jackson.getObjectMapper().readValue(split.getProperty(EXPRESSION_NAMES_METADATA), STRING_MAP_TYPE_REFERENCE));
+ ObjectMapper objectMapper = new ObjectMapper();
+ expressionAttributeNames.putAll(objectMapper.readValue(split.getProperty(EXPRESSION_NAMES_METADATA), STRING_MAP_TYPE_REFERENCE));
expressionAttributeValues.putAll(EnhancedDocument.fromJson(split.getProperty(EXPRESSION_VALUES_METADATA)).toMap());
}
catch (IOException e) {
- throw new AthenaConnectorException(e.getMessage(), new ErrorDetails().withErrorCode(FederationSourceErrorCode.InternalServiceException.toString()));
+ throw new AthenaConnectorException(e.getMessage(), ErrorDetails.builder().errorCode(FederationSourceErrorCode.INTERNAL_SERVICE_EXCEPTION.toString()).build());
}
}
@@ -468,7 +470,7 @@ public Map next()
}
}
catch (TimeoutException | ExecutionException e) {
- throw new AthenaConnectorException(e.getMessage(), new ErrorDetails().withErrorCode(FederationSourceErrorCode.OperationTimeoutException.toString()));
+ throw new AthenaConnectorException(e.getMessage(), ErrorDetails.builder().errorCode(FederationSourceErrorCode.OPERATION_TIMEOUT_EXCEPTION.toString()).build());
}
currentPageIterator.set(iterator);
if (iterator.hasNext()) {
diff --git a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/qpt/DDBQueryPassthrough.java b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/qpt/DDBQueryPassthrough.java
index 09a250a1fd..68a6d70403 100644
--- a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/qpt/DDBQueryPassthrough.java
+++ b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/qpt/DDBQueryPassthrough.java
@@ -21,11 +21,11 @@
import com.amazonaws.athena.connector.lambda.exceptions.AthenaConnectorException;
import com.amazonaws.athena.connector.lambda.metadata.optimizations.querypassthrough.QueryPassthroughSignature;
-import com.amazonaws.services.glue.model.ErrorDetails;
-import com.amazonaws.services.glue.model.FederationSourceErrorCode;
import com.google.common.collect.ImmutableSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.glue.model.ErrorDetails;
+import software.amazon.awssdk.services.glue.model.FederationSourceErrorCode;
import java.util.Arrays;
import java.util.List;
@@ -80,7 +80,7 @@ public void customConnectorVerifications(Map engineQptArguments)
// Immediately check if the statement starts with "SELECT"
if (!upperCaseStatement.startsWith("SELECT")) {
- throw new AthenaConnectorException("Statement does not start with SELECT.", new ErrorDetails().withErrorCode(FederationSourceErrorCode.OperationNotSupportedException.toString()));
+ throw new AthenaConnectorException("Statement does not start with SELECT.", ErrorDetails.builder().errorCode(FederationSourceErrorCode.OPERATION_NOT_SUPPORTED_EXCEPTION.toString()).build());
}
// List of disallowed keywords
@@ -89,7 +89,7 @@ public void customConnectorVerifications(Map engineQptArguments)
// Check if the statement contains any disallowed keywords
for (String keyword : disallowedKeywords) {
if (upperCaseStatement.contains(keyword)) {
- throw new AthenaConnectorException("Unaccepted operation; only SELECT statements are allowed. Found: " + keyword, new ErrorDetails().withErrorCode(FederationSourceErrorCode.OperationNotSupportedException.toString()));
+ throw new AthenaConnectorException("Unaccepted operation; only SELECT statements are allowed. Found: " + keyword, ErrorDetails.builder().errorCode(FederationSourceErrorCode.OPERATION_NOT_SUPPORTED_EXCEPTION.toString()).build());
}
}
}
diff --git a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/resolver/DynamoDBFieldResolver.java b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/resolver/DynamoDBFieldResolver.java
index cebb175715..0a186d7763 100644
--- a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/resolver/DynamoDBFieldResolver.java
+++ b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/resolver/DynamoDBFieldResolver.java
@@ -23,12 +23,12 @@
import com.amazonaws.athena.connector.lambda.exceptions.AthenaConnectorException;
import com.amazonaws.athena.connectors.dynamodb.util.DDBRecordMetadata;
import com.amazonaws.athena.connectors.dynamodb.util.DDBTypeUtils;
-import com.amazonaws.services.glue.model.ErrorDetails;
-import com.amazonaws.services.glue.model.FederationSourceErrorCode;
import org.apache.arrow.vector.types.Types;
import org.apache.arrow.vector.types.pojo.Field;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.services.glue.model.ErrorDetails;
+import software.amazon.awssdk.services.glue.model.FederationSourceErrorCode;
import java.util.Map;
@@ -90,7 +90,7 @@ public Object getFieldValue(Field field, Object originalValue)
}
throw new AthenaConnectorException("Invalid field value encountered in DB record for field: " + field +
- ",value: " + fieldValue, new ErrorDetails().withErrorCode(FederationSourceErrorCode.InvalidInputException.toString()));
+ ",value: " + fieldValue, ErrorDetails.builder().errorCode(FederationSourceErrorCode.INVALID_INPUT_EXCEPTION.toString()).build());
}
// Return the field value of a map key
diff --git a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/resolver/DynamoDBTableResolver.java b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/resolver/DynamoDBTableResolver.java
index 290359507b..7ae1fd436e 100644
--- a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/resolver/DynamoDBTableResolver.java
+++ b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/resolver/DynamoDBTableResolver.java
@@ -24,8 +24,6 @@
import com.amazonaws.athena.connectors.dynamodb.model.DynamoDBPaginatedTables;
import com.amazonaws.athena.connectors.dynamodb.model.DynamoDBTable;
import com.amazonaws.athena.connectors.dynamodb.util.DDBTableUtils;
-import com.amazonaws.services.glue.model.ErrorDetails;
-import com.amazonaws.services.glue.model.FederationSourceErrorCode;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.Multimap;
import org.apache.arrow.vector.types.pojo.Schema;
@@ -35,6 +33,8 @@
import software.amazon.awssdk.services.dynamodb.model.ListTablesRequest;
import software.amazon.awssdk.services.dynamodb.model.ListTablesResponse;
import software.amazon.awssdk.services.dynamodb.model.ResourceNotFoundException;
+import software.amazon.awssdk.services.glue.model.ErrorDetails;
+import software.amazon.awssdk.services.glue.model.FederationSourceErrorCode;
import java.util.ArrayList;
import java.util.Collection;
@@ -121,7 +121,7 @@ public Schema getTableSchema(String tableName)
return DDBTableUtils.peekTableForSchema(caseInsensitiveMatch.get(), invoker, ddbClient);
}
else {
- throw new AthenaConnectorException(e.getMessage(), new ErrorDetails().withErrorCode(FederationSourceErrorCode.EntityNotFoundException.toString()));
+ throw new AthenaConnectorException(e.getMessage(), ErrorDetails.builder().errorCode(FederationSourceErrorCode.ENTITY_NOT_FOUND_EXCEPTION.toString()).build());
}
}
}
diff --git a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBPredicateUtils.java b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBPredicateUtils.java
index 3c38e4dec7..bf7aa0854f 100644
--- a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBPredicateUtils.java
+++ b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBPredicateUtils.java
@@ -27,12 +27,12 @@
import com.amazonaws.athena.connector.lambda.exceptions.AthenaConnectorException;
import com.amazonaws.athena.connectors.dynamodb.model.DynamoDBIndex;
import com.amazonaws.athena.connectors.dynamodb.model.DynamoDBTable;
-import com.amazonaws.services.glue.model.ErrorDetails;
-import com.amazonaws.services.glue.model.FederationSourceErrorCode;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableList;
import software.amazon.awssdk.services.dynamodb.model.AttributeValue;
import software.amazon.awssdk.services.dynamodb.model.ProjectionType;
+import software.amazon.awssdk.services.glue.model.ErrorDetails;
+import software.amazon.awssdk.services.glue.model.FederationSourceErrorCode;
import java.util.ArrayList;
import java.util.HashSet;
@@ -192,7 +192,7 @@ private static void validateColumnRange(Range range)
case EXACTLY:
break;
case BELOW:
- throw new AthenaConnectorException("Low marker should never use BELOW bound", new ErrorDetails().withErrorCode(FederationSourceErrorCode.InvalidInputException.toString()));
+ throw new AthenaConnectorException("Low marker should never use BELOW bound", ErrorDetails.builder().errorCode(FederationSourceErrorCode.INVALID_INPUT_EXCEPTION.toString()).build());
default:
throw new AssertionError("Unhandled lower bound: " + range.getLow().getBound());
}
@@ -200,7 +200,7 @@ private static void validateColumnRange(Range range)
if (!range.getHigh().isUpperUnbounded()) {
switch (range.getHigh().getBound()) {
case ABOVE:
- throw new AthenaConnectorException("High marker should never use ABOVE bound", new ErrorDetails().withErrorCode(FederationSourceErrorCode.InvalidInputException.toString()));
+ throw new AthenaConnectorException("High marker should never use ABOVE bound", ErrorDetails.builder().errorCode(FederationSourceErrorCode.INVALID_INPUT_EXCEPTION.toString()).build());
case EXACTLY:
break;
case BELOW:
diff --git a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBTableUtils.java b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBTableUtils.java
index 923d03ec48..98332f78c1 100644
--- a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBTableUtils.java
+++ b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBTableUtils.java
@@ -24,8 +24,6 @@
import com.amazonaws.athena.connector.lambda.exceptions.AthenaConnectorException;
import com.amazonaws.athena.connectors.dynamodb.model.DynamoDBIndex;
import com.amazonaws.athena.connectors.dynamodb.model.DynamoDBTable;
-import com.amazonaws.services.glue.model.ErrorDetails;
-import com.amazonaws.services.glue.model.FederationSourceErrorCode;
import com.google.common.collect.ImmutableList;
import org.apache.arrow.vector.types.pojo.Schema;
import org.slf4j.Logger;
@@ -44,6 +42,8 @@
import software.amazon.awssdk.services.dynamodb.model.ScanRequest;
import software.amazon.awssdk.services.dynamodb.model.ScanResponse;
import software.amazon.awssdk.services.dynamodb.model.TableDescription;
+import software.amazon.awssdk.services.glue.model.ErrorDetails;
+import software.amazon.awssdk.services.glue.model.FederationSourceErrorCode;
import java.util.List;
import java.util.Map;
@@ -170,7 +170,7 @@ public static Schema peekTableForSchema(String tableName, ThrottlingInvoker invo
logger.warn("Failed to retrieve table schema due to KMS issue, empty schema for table: {}. Error Message: {}", tableName, runtimeException.getMessage());
}
else {
- throw new AthenaConnectorException(runtimeException.getMessage(), new ErrorDetails().withErrorCode(FederationSourceErrorCode.OperationTimeoutException.toString()));
+ throw new AthenaConnectorException(runtimeException.getMessage(), ErrorDetails.builder().errorCode(FederationSourceErrorCode.OPERATION_TIMEOUT_EXCEPTION.toString()).build());
}
}
return schemaBuilder.build();
diff --git a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBTypeUtils.java b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBTypeUtils.java
index b5f27a434a..d1abcdefaa 100644
--- a/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBTypeUtils.java
+++ b/athena-dynamodb/src/main/java/com/amazonaws/athena/connectors/dynamodb/util/DDBTypeUtils.java
@@ -32,8 +32,6 @@
import com.amazonaws.athena.connector.lambda.domain.predicate.ConstraintProjector;
import com.amazonaws.athena.connector.lambda.exceptions.AthenaConnectorException;
import com.amazonaws.athena.connectors.dynamodb.resolver.DynamoDBFieldResolver;
-import com.amazonaws.services.glue.model.ErrorDetails;
-import com.amazonaws.services.glue.model.FederationSourceErrorCode;
import org.apache.arrow.vector.FieldVector;
import org.apache.arrow.vector.holders.NullableBitHolder;
import org.apache.arrow.vector.types.Types;
@@ -52,6 +50,8 @@
import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.EnhancedAttributeValue;
import software.amazon.awssdk.enhanced.dynamodb.internal.converter.attribute.StringAttributeConverter;
import software.amazon.awssdk.services.dynamodb.model.AttributeValue;
+import software.amazon.awssdk.services.glue.model.ErrorDetails;
+import software.amazon.awssdk.services.glue.model.FederationSourceErrorCode;
import software.amazon.awssdk.utils.ImmutableMap;
import java.math.BigDecimal;
@@ -191,7 +191,7 @@ else if (enhancedAttributeValue.isMap()) {
}
String attributeTypeName = (value == null || value.getClass() == null) ? "null" : enhancedAttributeValue.type().name();
- throw new AthenaConnectorException("Unknown Attribute Value Type[" + attributeTypeName + "] for field[" + key + "]", new ErrorDetails().withErrorCode(FederationSourceErrorCode.InvalidInputException.toString()));
+ throw new AthenaConnectorException("Unknown Attribute Value Type[" + attributeTypeName + "] for field[" + key + "]", ErrorDetails.builder().errorCode(FederationSourceErrorCode.INVALID_INPUT_EXCEPTION.toString()).build());
}
/**
@@ -265,7 +265,7 @@ public static Field getArrowFieldFromDDBType(String attributeName, String attrib
case MAP:
return new Field(attributeName, FieldType.nullable(Types.MinorType.STRUCT.getType()), null);
default:
- throw new AthenaConnectorException("Unknown type[" + attributeType + "] for field[" + attributeName + "]", new ErrorDetails().withErrorCode(FederationSourceErrorCode.InvalidInputException.toString()));
+ throw new AthenaConnectorException("Unknown type[" + attributeType + "] for field[" + attributeName + "]", ErrorDetails.builder().errorCode(FederationSourceErrorCode.INVALID_INPUT_EXCEPTION.toString()).build());
}
}
@@ -385,7 +385,7 @@ public static List