Skip to content

Commit

Permalink
Remove deprecated precomputed hash optimizer
Browse files Browse the repository at this point in the history
  • Loading branch information
martint committed Feb 1, 2025
1 parent f799e9e commit 60cc9df
Show file tree
Hide file tree
Showing 16 changed files with 11 additions and 1,226 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@
public final class SystemSessionProperties
implements SystemSessionPropertiesProvider
{
public static final String OPTIMIZE_HASH_GENERATION = "optimize_hash_generation";
public static final String JOIN_DISTRIBUTION_TYPE = "join_distribution_type";
public static final String JOIN_MAX_BROADCAST_TABLE_SIZE = "join_max_broadcast_table_size";
public static final String JOIN_MULTI_CLAUSE_INDEPENDENCE_FACTOR = "join_multi_clause_independence_factor";
Expand Down Expand Up @@ -253,11 +252,6 @@ public SystemSessionProperties(
"Policy used for scheduling query tasks",
queryManagerConfig.getQueryExecutionPolicy(),
false),
booleanProperty(
OPTIMIZE_HASH_GENERATION,
"Compute hash codes for distribution, joins, and aggregations early in query plan",
optimizerConfig.isOptimizeHashGeneration(),
false),
enumProperty(
JOIN_DISTRIBUTION_TYPE,
"Join distribution type",
Expand Down Expand Up @@ -1148,11 +1142,6 @@ public static String getExecutionPolicy(Session session)
return session.getSystemProperty(EXECUTION_POLICY, String.class);
}

public static boolean isOptimizeHashGenerationEnabled(Session session)
{
return session.getSystemProperty(OPTIMIZE_HASH_GENERATION, Boolean.class);
}

public static JoinDistributionType getJoinDistributionType(Session session)
{
return session.getSystemProperty(JOIN_DISTRIBUTION_TYPE, JoinDistributionType.class);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@
import static io.airlift.bytecode.expression.BytecodeExpressions.not;
import static io.airlift.bytecode.expression.BytecodeExpressions.notEqual;
import static io.trino.cache.SafeCaches.buildNonEvictableCache;
import static io.trino.operator.HashGenerator.INITIAL_HASH_VALUE;
import static io.trino.spi.function.InvocationConvention.InvocationArgumentConvention.BLOCK_POSITION_NOT_NULL;
import static io.trino.spi.function.InvocationConvention.InvocationArgumentConvention.FLAT;
import static io.trino.spi.function.InvocationConvention.InvocationReturnConvention.BLOCK_BUILDER;
Expand All @@ -81,7 +82,6 @@
import static io.trino.sql.gen.Bootstrap.BOOTSTRAP_METHOD;
import static io.trino.sql.gen.BytecodeUtils.loadConstant;
import static io.trino.sql.gen.SqlTypeBytecodeExpression.constantType;
import static io.trino.sql.planner.optimizations.HashGenerationOptimizer.INITIAL_HASH_VALUE;
import static io.trino.util.CompilerUtils.defineClass;
import static io.trino.util.CompilerUtils.makeClassName;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,9 @@
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkState;
import static io.airlift.units.DataSize.Unit.MEGABYTE;
import static io.trino.operator.HashGenerator.INITIAL_HASH_VALUE;
import static io.trino.operator.aggregation.builder.InMemoryHashAggregationBuilder.toTypes;
import static io.trino.spi.type.BigintType.BIGINT;
import static io.trino.sql.planner.optimizations.HashGenerationOptimizer.INITIAL_HASH_VALUE;
import static io.trino.type.TypeUtils.NULL_HASH_CODE;
import static java.util.Objects.requireNonNull;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@

public interface HashGenerator
{
int INITIAL_HASH_VALUE = 0;

long hashPosition(int position, Page page);

default int getPartition(int partitionCount, int position, Page page)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import io.trino.spi.block.Block;
import io.trino.spi.type.Type;
import io.trino.spi.type.TypeOperators;
import io.trino.sql.planner.optimizations.HashGenerationOptimizer;
import jakarta.annotation.Nullable;

import java.lang.invoke.MethodHandle;
Expand Down Expand Up @@ -79,7 +78,7 @@ private InterpretedHashGenerator(List<Type> hashChannelTypes, @Nullable int[] ha
public long hashPosition(int position, Page page)
{
// Note: this code is duplicated for performance but must logically match hashPosition(position, IntFunction<Block> blockProvider)
long result = HashGenerationOptimizer.INITIAL_HASH_VALUE;
long result = INITIAL_HASH_VALUE;
for (int i = 0; i < hashCodeOperators.length; i++) {
Block block = page.getBlock(hashChannels == null ? i : hashChannels[i]);
result = CombineHashFunction.getHash(result, nullSafeHash(i, block, position));
Expand All @@ -90,7 +89,7 @@ public long hashPosition(int position, Page page)
public long hashPosition(int position, IntFunction<Block> blockProvider)
{
// Note: this code is duplicated for performance but must logically match hashPosition(position, Page page)
long result = HashGenerationOptimizer.INITIAL_HASH_VALUE;
long result = INITIAL_HASH_VALUE;
for (int i = 0; i < hashCodeOperators.length; i++) {
Block block = blockProvider.apply(hashChannels == null ? i : hashChannels[i]);
result = CombineHashFunction.getHash(result, nullSafeHash(i, block, position));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
"preferred-write-partitioning-min-number-of-partitions",
"optimizer.use-mark-distinct",
"optimizer.optimize-mixed-distinct-aggregations",
"optimizer.optimize-hash-generation",
})
public class OptimizerConfig
{
Expand Down Expand Up @@ -67,7 +68,6 @@ public class OptimizerConfig
private Duration iterativeOptimizerTimeout = new Duration(3, MINUTES); // by default let optimizer wait a long time in case it retrieves some data from ConnectorMetadata

private boolean optimizeMetadataQueries;
private boolean optimizeHashGeneration;
private boolean pushTableWriteThroughUnion = true;
private boolean dictionaryAggregation;
private MarkDistinctStrategy markDistinctStrategy;
Expand Down Expand Up @@ -544,18 +544,6 @@ public OptimizerConfig setOptimizeTopNRanking(boolean optimizeTopNRanking)
return this;
}

public boolean isOptimizeHashGeneration()
{
return optimizeHashGeneration;
}

@Config("optimizer.optimize-hash-generation")
public OptimizerConfig setOptimizeHashGeneration(boolean optimizeHashGeneration)
{
this.optimizeHashGeneration = optimizeHashGeneration;
return this;
}

public boolean isPushTableWriteThroughUnion()
{
return pushTableWriteThroughUnion;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,6 @@
import io.trino.sql.planner.optimizations.BeginTableWrite;
import io.trino.sql.planner.optimizations.CheckSubqueryNodesAreRewritten;
import io.trino.sql.planner.optimizations.DeterminePartitionCount;
import io.trino.sql.planner.optimizations.HashGenerationOptimizer;
import io.trino.sql.planner.optimizations.IndexJoinOptimizer;
import io.trino.sql.planner.optimizations.LimitPushDown;
import io.trino.sql.planner.optimizations.MetadataQueryOptimizer;
Expand Down Expand Up @@ -1023,9 +1022,6 @@ public PlanOptimizers(
new RemoveRedundantIdentityProjections())));
// DO NOT add optimizers that change the plan shape (computations) after this point

// Precomputed hashes - this assumes that partitioning will not change
builder.add(new HashGenerationOptimizer(metadata));

builder.add(new IterativeOptimizer(
plannerContext,
ruleStats,
Expand Down
Loading

0 comments on commit 60cc9df

Please sign in to comment.