diff --git a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400EnvironmentProperties.java b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400EnvironmentProperties.java index 46dcde3b14..e4473f833d 100644 --- a/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400EnvironmentProperties.java +++ b/athena-db2-as400/src/main/java/com/amazonaws/athena/connectors/db2as400/Db2As400EnvironmentProperties.java @@ -37,14 +37,14 @@ public Map connectionPropertiesToEnvironment(Map HashMap environment = new HashMap<>(); // now construct jdbc string - String connectionString = "db2as400://jdbc:as400://" + connectionProperties.get(HOST) - + ";" + connectionProperties.getOrDefault(JDBC_PARAMS, ""); + String connectionString = String.join("db2as400://jdbc:as400://", connectionProperties.get(HOST), + ";", connectionProperties.getOrDefault(JDBC_PARAMS, "")); if (connectionProperties.containsKey(SECRET_NAME)) { if (connectionProperties.containsKey(JDBC_PARAMS)) { // need to add delimiter connectionString = connectionString + ";"; } - connectionString = connectionString + ":${" + connectionProperties.get(SECRET_NAME) + "}"; + connectionString = String.join(connectionString, ":${", connectionProperties.get(SECRET_NAME), "}"); } logger.debug("Constructed connection string: {}", connectionString); diff --git a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandler.java b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandler.java index 5b21336bab..8b57dd6685 100644 --- a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandler.java +++ b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBMetadataHandler.java @@ -96,6 +96,8 @@ public class DocDBMetadataHandler //The Glue table property that indicates that a table matching the name of an DocDB table //is indeed enabled for use by this connector. private static final String DOCDB_METADATA_FLAG = "docdb-metadata-flag"; + //The prefix of a connection string + protected static final String DOCDB_CONN_STRING_PREFIX = "mongodb://"; //Used to filter out Glue tables which lack a docdb metadata flag. private static final TableFilter TABLE_FILTER = (Table table) -> table.parameters().containsKey(DOCDB_METADATA_FLAG); //The number of documents to scan when attempting to infer schema from an DocDB collection. @@ -134,7 +136,7 @@ private MongoClient getOrCreateConn(MetadataRequest request) { String connStr = getConnStr(request); if (configOptions.containsKey(SECRET_NAME) && !hasEmbeddedSecret(connStr)) { - connStr = connStr.substring(0, 10) + "${" + configOptions.get(SECRET_NAME) + "}@" + connStr.substring(10); + connStr = String.join(connStr.substring(0, DOCDB_CONN_STRING_PREFIX.length()), "${", configOptions.get(SECRET_NAME), "}@", connStr.substring(DOCDB_CONN_STRING_PREFIX.length())); } String endpoint = resolveSecrets(connStr); return connectionFactory.getOrCreateConn(endpoint); diff --git a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandler.java b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandler.java index d1d536ee73..446578361f 100644 --- a/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandler.java +++ b/athena-docdb/src/main/java/com/amazonaws/athena/connectors/docdb/DocDBRecordHandler.java @@ -49,6 +49,7 @@ import static com.amazonaws.athena.connector.lambda.handlers.GlueMetadataHandler.SOURCE_TABLE_PROPERTY; import static com.amazonaws.athena.connectors.docdb.DocDBFieldResolver.DEFAULT_FIELD_RESOLVER; import static com.amazonaws.athena.connectors.docdb.DocDBMetadataHandler.DOCDB_CONN_STR; +import static com.amazonaws.athena.connectors.docdb.DocDBMetadataHandler.DOCDB_CONN_STRING_PREFIX; /** * Handles data read record requests for the Athena DocumentDB Connector. @@ -110,7 +111,7 @@ private MongoClient getOrCreateConn(Split split) throw new RuntimeException(DOCDB_CONN_STR + " Split property is null! Unable to create connection."); } if (configOptions.containsKey(SECRET_NAME) && !hasEmbeddedSecret(connStr)) { - connStr = connStr.substring(0, 10) + "${" + configOptions.get(SECRET_NAME) + "}@" + connStr.substring(10); + connStr = String.join(connStr.substring(0, DOCDB_CONN_STRING_PREFIX.length()), "${", configOptions.get(SECRET_NAME), "}@", connStr.substring(DOCDB_CONN_STRING_PREFIX.length())); } String endpoint = resolveSecrets(connStr); return connectionFactory.getOrCreateConn(endpoint); diff --git a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/JdbcEnvironmentProperties.java b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/JdbcEnvironmentProperties.java index db4a349155..e25b14b60d 100644 --- a/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/JdbcEnvironmentProperties.java +++ b/athena-jdbc/src/main/java/com/amazonaws/athena/connectors/jdbc/JdbcEnvironmentProperties.java @@ -39,8 +39,8 @@ public Map connectionPropertiesToEnvironment(Map HashMap environment = new HashMap<>(); // now construct jdbc string - String connectionString = getConnectionStringPrefix(connectionProperties) + connectionProperties.get(HOST) - + ":" + connectionProperties.get(PORT) + getDatabase(connectionProperties) + getJdbcParameters(connectionProperties); + String connectionString = String.join(getConnectionStringPrefix(connectionProperties), connectionProperties.get(HOST), + ":", connectionProperties.get(PORT), getDatabase(connectionProperties), getJdbcParameters(connectionProperties)); environment.put(DEFAULT, connectionString); return environment; diff --git a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandler.java b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandler.java index 334c3ea139..1979c3596e 100644 --- a/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandler.java +++ b/athena-oracle/src/main/java/com/amazonaws/athena/connectors/oracle/OracleMetadataHandler.java @@ -161,13 +161,11 @@ public void getPartitions(final BlockWriter blockWriter, final GetTableLayoutReq LOGGER.debug("{}: Schema {}, table {}", getTableLayoutRequest.getQueryId(), transformString(getTableLayoutRequest.getTableName().getSchemaName(), true), transformString(getTableLayoutRequest.getTableName().getTableName(), true)); try (Connection connection = getJdbcConnectionFactory().getConnection(getCredentialProvider())) { - List parameters = Arrays.asList(transformString(getTableLayoutRequest.getTableName().getTableName(), true)); - //try (Statement statement = connection.createStatement(); ResultSet resultSet = statement.executeQuery(GET_PARTITIONS_QUERY + )) + List parameters = Arrays.asList(transformString(getTableLayoutRequest.getTableName().getTableName(), true)); try (PreparedStatement preparedStatement = new PreparedStatementBuilder().withConnection(connection).withQuery(GET_PARTITIONS_QUERY).withParameters(parameters).build(); ResultSet resultSet = preparedStatement.executeQuery()) { // Return a single partition if no partitions defined if (!resultSet.next()) { - LOGGER.debug("here"); blockWriter.writeRows((Block block, int rowNum) -> { LOGGER.debug("Parameters: " + BLOCK_PARTITION_COLUMN_NAME + " " + rowNum + " " + ALL_PARTITIONS); block.setValue(BLOCK_PARTITION_COLUMN_NAME, rowNum, ALL_PARTITIONS); diff --git a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java index 9f292c12aa..37421c2a02 100644 --- a/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java +++ b/athena-snowflake/src/test/java/com/amazonaws/athena/connectors/snowflake/SnowflakeMetadataHandlerTest.java @@ -116,7 +116,6 @@ public void doGetTableLayout() PreparedStatement countsPreparedStatement = Mockito.mock(PreparedStatement.class); String GET_PKEY_COUNTS_QUERY = "SELECT \"pkey\", count(*) as COUNTS FROM \"testSchema\".\"testTable\" GROUP BY \"pkey\" ORDER BY COUNTS DESC"; - System.err.println("test:" + GET_PKEY_COUNTS_QUERY); String[] countsColumns = new String[] {"pkey", SnowflakeMetadataHandler.COUNTS_COLUMN_NAME}; Object[][] countsValues = {{"a", 1}}; ResultSet countsResultSet = mockResultSet(countsColumns, countsValues, new AtomicInteger(-1)); @@ -181,7 +180,6 @@ public void doGetTableLayoutSinglePartition() PreparedStatement countsPreparedStatement = Mockito.mock(PreparedStatement.class); String GET_PKEY_COUNTS_QUERY = "SELECT \"pkey\", count(*) as COUNTS FROM \"testSchema\".\"testTable\" GROUP BY \"pkey\" ORDER BY COUNTS DESC"; - System.err.println("test:" + GET_PKEY_COUNTS_QUERY); String[] countsColumns = new String[] {"pkey", SnowflakeMetadataHandler.COUNTS_COLUMN_NAME}; Object[][] countsValues = {{"a", 1}}; ResultSet countsResultSet = mockResultSet(countsColumns, countsValues, new AtomicInteger(-1)); @@ -225,7 +223,6 @@ public void doGetTableLayoutMaxPartition() long pageCount = (long) (Math.ceil(totalActualRecordCount / MAX_PARTITION_COUNT)); long partitionActualRecordCount = (totalActualRecordCount <= 10000) ? (long) totalActualRecordCount : pageCount; double limit = (int) Math.ceil(totalActualRecordCount / partitionActualRecordCount); -// double limit = 1; long offset = 0; String[] columns = {"partition"}; int[] types = {Types.VARCHAR};