Skip to content

Commit

Permalink
[iceberg] Reformat incorrectly formatted files
Browse files Browse the repository at this point in the history
As a part of the project, we have a set of checkstyle guidelines.
Generally we recommend developers to use IntelliJ for Java
development. If you use the official style guidelines and format
with the provided style rules within IntelliJ there are a number of
files in the Iceberg connector which don't follow the official
formatting rules. Maven checkstyle isn't capable of finding them
all automatically.

The changes in this commit are the result of running IntelliJ's reformat
procedure on all the files in the Iceberg connector using Presto's
officially-provided style rules.
  • Loading branch information
ZacBlanco committed Feb 7, 2025
1 parent 8789cd9 commit 8eefeff
Show file tree
Hide file tree
Showing 22 changed files with 108 additions and 93 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,8 @@ public HdfsInputStream(SeekableInputStream delegate)
}

@Override
public int read() throws IOException
public int read()
throws IOException
{
return delegate.read();
}
Expand All @@ -96,19 +97,22 @@ public int read(byte[] b, int off, int len)
}

@Override
public long getPos() throws IOException
public long getPos()
throws IOException
{
return delegate.getPos();
}

@Override
public void seek(long newPos) throws IOException
public void seek(long newPos)
throws IOException
{
delegate.seek(newPos);
}

@Override
public void close() throws IOException
public void close()
throws IOException
{
delegate.close();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -181,15 +181,16 @@ private static synchronized void initTableLevelLockCache(long evictionTimeout)
{
if (commitLockCache == null) {
commitLockCache = CacheBuilder.newBuilder()
.expireAfterAccess(evictionTimeout, TimeUnit.MILLISECONDS)
.build(
new CacheLoader<String, ReentrantLock>() {
@Override
public ReentrantLock load(String fullName)
{
return new ReentrantLock();
}
});
.expireAfterAccess(evictionTimeout, TimeUnit.MILLISECONDS)
.build(
new CacheLoader<String, ReentrantLock>()
{
@Override
public ReentrantLock load(String fullName)
{
return new ReentrantLock();
}
});
}
}

Expand Down Expand Up @@ -304,11 +305,11 @@ public void commit(@Nullable TableMetadata base, TableMetadata metadata)
PrestoPrincipal owner = new PrestoPrincipal(USER, table.getOwner());
PrincipalPrivileges privileges = new PrincipalPrivileges(
ImmutableMultimap.<String, HivePrivilegeInfo>builder()
.put(table.getOwner(), new HivePrivilegeInfo(SELECT, true, owner, owner))
.put(table.getOwner(), new HivePrivilegeInfo(INSERT, true, owner, owner))
.put(table.getOwner(), new HivePrivilegeInfo(UPDATE, true, owner, owner))
.put(table.getOwner(), new HivePrivilegeInfo(DELETE, true, owner, owner))
.build(),
.put(table.getOwner(), new HivePrivilegeInfo(SELECT, true, owner, owner))
.put(table.getOwner(), new HivePrivilegeInfo(INSERT, true, owner, owner))
.put(table.getOwner(), new HivePrivilegeInfo(UPDATE, true, owner, owner))
.put(table.getOwner(), new HivePrivilegeInfo(DELETE, true, owner, owner))
.build(),
ImmutableMultimap.of());
if (base == null) {
metastore.createTable(metastoreContext, table, privileges, emptyList());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -457,8 +457,8 @@ private static class PagePartitioner
private Page transformedPage;

public PagePartitioner(PageIndexerFactory pageIndexerFactory,
List<PartitionColumn> columns,
ConnectorSession session)
List<PartitionColumn> columns,
ConnectorSession session)
{
this.pageIndexer = pageIndexerFactory.createPageIndexer(columns.stream()
.map(PartitionColumn::getResultType)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,6 @@
import static com.facebook.presto.iceberg.ExpressionConverter.toIcebergExpression;
import static com.facebook.presto.iceberg.FileContent.POSITION_DELETES;
import static com.facebook.presto.iceberg.FileContent.fromIcebergFileContent;
import static com.facebook.presto.iceberg.FileFormat.PARQUET;
import static com.facebook.presto.iceberg.IcebergColumnHandle.DATA_SEQUENCE_NUMBER_COLUMN_HANDLE;
import static com.facebook.presto.iceberg.IcebergColumnHandle.PATH_COLUMN_HANDLE;
import static com.facebook.presto.iceberg.IcebergErrorCode.ICEBERG_INVALID_FORMAT_VERSION;
Expand Down Expand Up @@ -856,10 +855,10 @@ public static long getDataSequenceNumber(ContentFile<?> file)
* @param requestedSchema If provided, only delete files with this schema will be provided
*/
public static CloseableIterable<DeleteFile> getDeleteFiles(Table table,
long snapshot,
TupleDomain<IcebergColumnHandle> filter,
Optional<Set<Integer>> requestedPartitionSpec,
Optional<Set<Integer>> requestedSchema)
long snapshot,
TupleDomain<IcebergColumnHandle> filter,
Optional<Set<Integer>> requestedPartitionSpec,
Optional<Set<Integer>> requestedSchema)
{
Expression filterExpression = toIcebergExpression(filter);
CloseableIterable<FileScanTask> fileTasks = table.newScan().useSnapshot(snapshot).filter(filterExpression).planFiles();
Expand Down Expand Up @@ -1035,9 +1034,9 @@ private static class DeleteFilesIterator
private DeleteFile currentFile;

private DeleteFilesIterator(Map<Integer, PartitionSpec> partitionSpecsById,
CloseableIterator<FileScanTask> fileTasks,
Optional<Set<Integer>> requestedPartitionSpec,
Optional<Set<Integer>> requestedSchema)
CloseableIterator<FileScanTask> fileTasks,
Optional<Set<Integer>> requestedPartitionSpec,
Optional<Set<Integer>> requestedSchema)
{
this.partitionSpecsById = partitionSpecsById;
this.fileTasks = fileTasks;
Expand Down Expand Up @@ -1221,8 +1220,8 @@ public static Optional<PartitionData> partitionDataFromStructLike(PartitionSpec

/**
* Get the metadata location for target {@link Table},
* considering iceberg table properties {@code WRITE_METADATA_LOCATION}
* */
* considering iceberg table properties {@code WRITE_METADATA_LOCATION}
*/
public static String metadataLocation(Table icebergTable)
{
String metadataLocation = icebergTable.properties().get(TableProperties.WRITE_METADATA_LOCATION);
Expand All @@ -1237,8 +1236,8 @@ public static String metadataLocation(Table icebergTable)

/**
* Get the data location for target {@link Table},
* considering iceberg table properties {@code WRITE_DATA_LOCATION}, {@code OBJECT_STORE_PATH} and {@code WRITE_FOLDER_STORAGE_LOCATION}
* */
* considering iceberg table properties {@code WRITE_DATA_LOCATION}, {@code OBJECT_STORE_PATH} and {@code WRITE_FOLDER_STORAGE_LOCATION}
*/
public static String dataLocation(Table icebergTable)
{
Map<String, String> properties = icebergTable.properties();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,10 +141,10 @@ private List<ColumnMetadata> getPartitionColumnsMetadata(List<PartitionField> fi
private List<ColumnMetadata> getColumnMetadata(List<Types.NestedField> columns)
{
return columns.stream().map(column -> new ColumnMetadata(column.name(),
RowType.from(ImmutableList.of(
new RowType.Field(Optional.of("min"), toPrestoType(column.type(), typeManager)),
new RowType.Field(Optional.of("max"), toPrestoType(column.type(), typeManager)),
new RowType.Field(Optional.of("null_count"), BIGINT)))))
RowType.from(ImmutableList.of(
new RowType.Field(Optional.of("min"), toPrestoType(column.type(), typeManager)),
new RowType.Field(Optional.of("max"), toPrestoType(column.type(), typeManager)),
new RowType.Field(Optional.of("null_count"), BIGINT)))))
.collect(toImmutableList());
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ public final class PartitionTransforms
private static final DateTimeField MONTH_OF_YEAR_UTC = getInstanceUTC().monthOfYear();
public static final int MILLISECONDS_PER_HOUR = 60 * 60 * 1000;
public static final int MILLISECONDS_PER_DAY = MILLISECONDS_PER_HOUR * 24;

private PartitionTransforms() {}

/**
Expand Down Expand Up @@ -550,9 +551,9 @@ public static class ColumnTransform
private final ValueTransform valueTransform;

public ColumnTransform(String transformName,
Type type,
Function<Block, Block> transform,
ValueTransform valueTransform)
Type type,
Function<Block, Block> transform,
ValueTransform valueTransform)
{
this.transformName = requireNonNull(transformName, "transformName is null");
this.type = requireNonNull(type, "resultType is null");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@

/**
* This is a dummy class required for {@link org.apache.iceberg.MetricsConfig#forTable}
* */
*/
public class PrestoIcebergTableForMetricsConfig
implements Table
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ protected ConnectorPushdownFilterResult getConnectorPushdownFilterResult(
Optional<Set<IcebergColumnHandle>> requestedColumns = currentLayoutHandle.map(layout -> ((IcebergTableLayoutHandle) layout).getRequestedColumns()).orElse(Optional.empty());

TupleDomain<ColumnHandle> partitionColumnPredicate = TupleDomain.withColumnDomains(Maps.filterKeys(
constraint.getSummary().getDomains().get(), Predicates.in(partitionColumns)));
constraint.getSummary().getDomains().get(), Predicates.in(partitionColumns)));

List<HivePartition> partitions = getPartitions(
typeManager,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,10 +90,10 @@ public class IcebergMetadataOptimizer
private final StandardFunctionResolution functionResolution;

public IcebergMetadataOptimizer(FunctionMetadataManager functionMetadataManager,
TypeManager typeManager,
IcebergTransactionManager icebergTransactionManager,
RowExpressionService rowExpressionService,
StandardFunctionResolution functionResolution)
TypeManager typeManager,
IcebergTransactionManager icebergTransactionManager,
RowExpressionService rowExpressionService,
StandardFunctionResolution functionResolution)
{
this.functionMetadataManager = requireNonNull(functionMetadataManager, "functionMetadataManager is null");
this.typeManager = requireNonNull(typeManager, "typeManager is null");
Expand Down Expand Up @@ -131,13 +131,13 @@ private static class Optimizer
private final List<Predicate<FunctionHandle>> allowedFunctionsPredicates;

private Optimizer(ConnectorSession connectorSession,
PlanNodeIdAllocator idAllocator,
FunctionMetadataManager functionMetadataManager,
TypeManager typeManager,
IcebergTransactionManager icebergTransactionManager,
RowExpressionService rowExpressionService,
StandardFunctionResolution functionResolution,
int rowsForMetadataOptimizationThreshold)
PlanNodeIdAllocator idAllocator,
FunctionMetadataManager functionMetadataManager,
TypeManager typeManager,
IcebergTransactionManager icebergTransactionManager,
RowExpressionService rowExpressionService,
StandardFunctionResolution functionResolution,
int rowsForMetadataOptimizationThreshold)
{
checkArgument(rowsForMetadataOptimizationThreshold >= 0, "The value of `rowsForMetadataOptimizationThreshold` should not less than 0");
this.connectorSession = connectorSession;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -87,9 +87,9 @@ public class IcebergPlanOptimizer
private final IcebergTransactionManager transactionManager;

IcebergPlanOptimizer(StandardFunctionResolution functionResolution,
RowExpressionService rowExpressionService,
FunctionMetadataManager functionMetadataManager,
IcebergTransactionManager transactionManager)
RowExpressionService rowExpressionService,
FunctionMetadataManager functionMetadataManager,
IcebergTransactionManager transactionManager)
{
this.functionResolution = requireNonNull(functionResolution, "functionResolution is null");
this.rowExpressionService = requireNonNull(rowExpressionService, "rowExpressionService is null");
Expand Down Expand Up @@ -190,10 +190,10 @@ public PlanNode visitFilter(FilterNode filter, RewriteContext<Void> context)

// Get predicate expression on entire columns that could not be enforced by iceberg table
TupleDomain<RowExpression> nonPartitionColumnPredicate = TupleDomain.withColumnDomains(
Maps.filterKeys(
entireColumnDomain.transform(icebergColumnHandle -> (ColumnHandle) icebergColumnHandle)
.getDomains().get(),
Predicates.not(Predicates.in(enforcedColumns))))
Maps.filterKeys(
entireColumnDomain.transform(icebergColumnHandle -> (ColumnHandle) icebergColumnHandle)
.getDomains().get(),
Predicates.not(Predicates.in(enforcedColumns))))
.transform(columnHandle -> new Subfield(columnHandleToNameMapping.get(columnHandle), ImmutableList.of()))
.transform(subfield -> subfieldExtractor.toRowExpression(subfield, columnTypes.get(subfield.getRootName())));
RowExpression nonPartitionColumn = rowExpressionService.getDomainTranslator().toPredicate(nonPartitionColumnPredicate);
Expand Down Expand Up @@ -374,13 +374,13 @@ private static boolean yieldSamePartitioningValue(
!field.transform().isIdentity()) {
TimestampType timestampType = (TimestampType) sourceType;
first = adjustTimestampForPartitionTransform(
session.getSqlFunctionProperties(),
timestampType,
first);
session.getSqlFunctionProperties(),
timestampType,
first);
second = adjustTimestampForPartitionTransform(
session.getSqlFunctionProperties(),
timestampType,
second);
session.getSqlFunctionProperties(),
timestampType,
second);
}
Object firstTransformed = transform.getValueTransform().apply(nativeValueToBlock(sourceType, first), 0);
Object secondTransformed = transform.getValueTransform().apply(nativeValueToBlock(sourceType, second), 0);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ public class RemoveOrphanFiles

@Inject
public RemoveOrphanFiles(IcebergMetadataFactory metadataFactory,
HdfsEnvironment hdfsEnvironment)
HdfsEnvironment hdfsEnvironment)
{
this.metadataFactory = requireNonNull(metadataFactory, "metadataFactory is null");
this.hdfsEnvironment = requireNonNull(hdfsEnvironment, "hdfsEnvironment is null");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,8 @@ private IcebergLibUtils()

/**
* Call the method in Iceberg lib's protected class to set explicitly
* whether to use incremental cleanup when expiring snapshots
* */
* whether to use incremental cleanup when expiring snapshots
*/
public static ExpireSnapshots withIncrementalCleanup(ExpireSnapshots expireSnapshots, boolean incrementalCleanup)
{
requireNonNull(expireSnapshots, "expireSnapshots is null");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@ public void testDescribeTable()
MaterializedResult actualColumns = computeActual("DESCRIBE orders");
Assert.assertEquals(actualColumns, expectedColumns);
}

@Test
public void testShowCreateTable()
{
Expand Down Expand Up @@ -771,15 +772,15 @@ private void testCreateTableLike()

assertUpdate(session, "CREATE TABLE test_create_table_like_copy4 (LIKE test_create_table_like_original INCLUDING PROPERTIES) WITH (format = 'ORC')");
assertEquals(getTablePropertiesString("test_create_table_like_copy4"), format("WITH (\n" +
" delete_mode = 'merge-on-read',\n" +
" format = 'ORC',\n" +
" format_version = '2',\n" +
" location = '%s',\n" +
" metadata_delete_after_commit = false,\n" +
" metadata_previous_versions_max = 100,\n" +
" metrics_max_inferred_column = 100,\n" +
" partitioning = ARRAY['adate']\n" +
")",
" delete_mode = 'merge-on-read',\n" +
" format = 'ORC',\n" +
" format_version = '2',\n" +
" location = '%s',\n" +
" metadata_delete_after_commit = false,\n" +
" metadata_previous_versions_max = 100,\n" +
" metrics_max_inferred_column = 100,\n" +
" partitioning = ARRAY['adate']\n" +
")",
getLocation(schemaName, "test_create_table_like_original")));
dropTable(session, "test_create_table_like_copy4");
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,8 @@ protected TestIcebergDistributedQueries(CatalogType catalogType)
}

@Override
protected QueryRunner createQueryRunner() throws Exception
protected QueryRunner createQueryRunner()
throws Exception
{
return IcebergQueryRunner.createIcebergQueryRunner(ImmutableMap.of(), catalogType, extraConnectorProperties);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,8 @@ public class TestIcebergFileWriter
private ConnectorSession connectorSession;

@BeforeClass
public void setup() throws Exception
public void setup()
throws Exception
{
ConnectorId connectorId = new ConnectorId("iceberg");
SessionPropertyManager sessionPropertyManager = createTestingSessionPropertyManager();
Expand Down Expand Up @@ -119,7 +120,8 @@ public void setup() throws Exception
}

@Test
public void testWriteParquetFileWithLogicalTypes() throws Exception
public void testWriteParquetFileWithLogicalTypes()
throws Exception
{
Path path = new Path(createTempDir().getAbsolutePath() + "/test.parquet");
Schema icebergSchema = toIcebergSchema(ImmutableList.of(
Expand Down
Loading

0 comments on commit 8eefeff

Please sign in to comment.