Skip to content

Commit

Permalink
more fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
akuzm committed Feb 4, 2025
1 parent 8243f8a commit 880e7f1
Show file tree
Hide file tree
Showing 3 changed files with 9 additions and 10 deletions.
2 changes: 1 addition & 1 deletion tsl/src/nodes/vector_agg/exec.c
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ grouping_column_comparator(const void *a_ptr, const void *b_ptr)

static void
get_column_storage_properties(const CustomScanState *state, int input_offset,
GroupingColum *result)
GroupingColumn *result)
{
const DecompressChunkState *decompress_state = (DecompressChunkState *) state;
const DecompressContext *dcontext = &decompress_state->decompress_context;
Expand Down
3 changes: 1 addition & 2 deletions tsl/src/nodes/vector_agg/hashing/hash_strategy_serialized.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,7 @@ serialized_key_hashing_init(HashingStrategy *hashing)
}

static void
serialized_key_hashing_prepare_for_batch(GroupingPolicyHash *policy,
DecompressBatchState *batch_state)
serialized_key_hashing_prepare_for_batch(GroupingPolicyHash *policy, TupleTableSlot *vector_slot)
{
}

Expand Down
14 changes: 7 additions & 7 deletions tsl/src/nodes/vector_agg/hashing/hash_strategy_single_text.c
Original file line number Diff line number Diff line change
Expand Up @@ -145,15 +145,17 @@ single_text_key_hashing_prepare_for_batch(GroupingPolicyHash *policy, TupleTable
*/
policy->use_key_index_for_dict = false;

BatchHashingParams params = build_batch_hashing_params(policy, batch_state);
BatchHashingParams params = build_batch_hashing_params(policy, vector_slot);
if (params.single_grouping_column.decompression_type != DT_ArrowTextDict)
{
return;
}

uint16 batch_rows;
const uint64 *row_filter = vector_slot_get_qual_result(vector_slot, &batch_rows);

const int dict_rows = params.single_grouping_column.arrow->dictionary->length;
if ((size_t) dict_rows >
arrow_num_valid(batch_state->vector_qual_result, batch_state->total_batch_rows))
if ((size_t) dict_rows > arrow_num_valid(row_filter, batch_rows))
{
return;
}
Expand Down Expand Up @@ -185,9 +187,7 @@ single_text_key_hashing_prepare_for_batch(GroupingPolicyHash *policy, TupleTable
* We shouldn't add the dictionary entries that are not used by any matching
* rows. Translate the batch filter bitmap to dictionary rows.
*/
const int batch_rows = batch_state->total_batch_rows;
const uint64 *row_filter = batch_state->vector_qual_result;
if (batch_state->vector_qual_result != NULL)
if (row_filter != NULL)
{
uint64 *restrict dict_filter = policy->tmp_filter;
const size_t dict_words = (dict_rows + 63) / 64;
Expand Down Expand Up @@ -249,7 +249,7 @@ single_text_key_hashing_prepare_for_batch(GroupingPolicyHash *policy, TupleTable
* batch filter.
*/
bool have_null_key = false;
if (batch_state->vector_qual_result != NULL)
if (row_filter != NULL)
{
if (params.single_grouping_column.arrow->null_count > 0)
{
Expand Down

0 comments on commit 880e7f1

Please sign in to comment.