From 880e7f1ddc3d8125547329718ba7cb7390dd943c Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Tue, 4 Feb 2025 17:54:52 +0100 Subject: [PATCH] more fixes --- tsl/src/nodes/vector_agg/exec.c | 2 +- .../vector_agg/hashing/hash_strategy_serialized.c | 3 +-- .../vector_agg/hashing/hash_strategy_single_text.c | 14 +++++++------- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/tsl/src/nodes/vector_agg/exec.c b/tsl/src/nodes/vector_agg/exec.c index 8d5485ee878..7cd3888417f 100644 --- a/tsl/src/nodes/vector_agg/exec.c +++ b/tsl/src/nodes/vector_agg/exec.c @@ -78,7 +78,7 @@ grouping_column_comparator(const void *a_ptr, const void *b_ptr) static void get_column_storage_properties(const CustomScanState *state, int input_offset, - GroupingColum *result) + GroupingColumn *result) { const DecompressChunkState *decompress_state = (DecompressChunkState *) state; const DecompressContext *dcontext = &decompress_state->decompress_context; diff --git a/tsl/src/nodes/vector_agg/hashing/hash_strategy_serialized.c b/tsl/src/nodes/vector_agg/hashing/hash_strategy_serialized.c index 95512ce8057..c47df3537de 100644 --- a/tsl/src/nodes/vector_agg/hashing/hash_strategy_serialized.c +++ b/tsl/src/nodes/vector_agg/hashing/hash_strategy_serialized.c @@ -34,8 +34,7 @@ serialized_key_hashing_init(HashingStrategy *hashing) } static void -serialized_key_hashing_prepare_for_batch(GroupingPolicyHash *policy, - DecompressBatchState *batch_state) +serialized_key_hashing_prepare_for_batch(GroupingPolicyHash *policy, TupleTableSlot *vector_slot) { } diff --git a/tsl/src/nodes/vector_agg/hashing/hash_strategy_single_text.c b/tsl/src/nodes/vector_agg/hashing/hash_strategy_single_text.c index e6dc5e161fd..fe8333fbc38 100644 --- a/tsl/src/nodes/vector_agg/hashing/hash_strategy_single_text.c +++ b/tsl/src/nodes/vector_agg/hashing/hash_strategy_single_text.c @@ -145,15 +145,17 @@ single_text_key_hashing_prepare_for_batch(GroupingPolicyHash *policy, TupleTable */ policy->use_key_index_for_dict = false; - BatchHashingParams params = build_batch_hashing_params(policy, batch_state); + BatchHashingParams params = build_batch_hashing_params(policy, vector_slot); if (params.single_grouping_column.decompression_type != DT_ArrowTextDict) { return; } + uint16 batch_rows; + const uint64 *row_filter = vector_slot_get_qual_result(vector_slot, &batch_rows); + const int dict_rows = params.single_grouping_column.arrow->dictionary->length; - if ((size_t) dict_rows > - arrow_num_valid(batch_state->vector_qual_result, batch_state->total_batch_rows)) + if ((size_t) dict_rows > arrow_num_valid(row_filter, batch_rows)) { return; } @@ -185,9 +187,7 @@ single_text_key_hashing_prepare_for_batch(GroupingPolicyHash *policy, TupleTable * We shouldn't add the dictionary entries that are not used by any matching * rows. Translate the batch filter bitmap to dictionary rows. */ - const int batch_rows = batch_state->total_batch_rows; - const uint64 *row_filter = batch_state->vector_qual_result; - if (batch_state->vector_qual_result != NULL) + if (row_filter != NULL) { uint64 *restrict dict_filter = policy->tmp_filter; const size_t dict_words = (dict_rows + 63) / 64; @@ -249,7 +249,7 @@ single_text_key_hashing_prepare_for_batch(GroupingPolicyHash *policy, TupleTable * batch filter. */ bool have_null_key = false; - if (batch_state->vector_qual_result != NULL) + if (row_filter != NULL) { if (params.single_grouping_column.arrow->null_count > 0) {