Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/main' into HEAD
Browse files Browse the repository at this point in the history
  • Loading branch information
akuzm committed Jan 10, 2025
2 parents ac00ac4 + 3170014 commit 9b47614
Show file tree
Hide file tree
Showing 62 changed files with 3,479 additions and 3,031 deletions.
4 changes: 0 additions & 4 deletions .github/workflows/linux-32bit-build-and-test-ignored.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ jobs:
runs-on: ubuntu-latest
outputs:
pg_latest: ${{ steps.setter.outputs.PG_LATEST }}
pg17_latest: ${{ steps.setter.outputs.PG17_LATEST }}
steps:
- name: Checkout source code
uses: actions/checkout@v4
Expand All @@ -38,9 +37,6 @@ jobs:
matrix:
pg: ${{ fromJson(needs.config.outputs.pg_latest) }}
build_type: [ Debug ]
include:
- pg: ${{ fromJson(needs.config.outputs.pg17_latest) }}
build_type: Debug
steps:
- run: |
echo "No build required"
4 changes: 0 additions & 4 deletions .github/workflows/linux-32bit-build-and-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ jobs:
runs-on: ubuntu-latest
outputs:
pg_latest: ${{ steps.setter.outputs.PG_LATEST }}
pg17_latest: ${{ steps.setter.outputs.PG17_LATEST }}
steps:
- name: Checkout source code
uses: actions/checkout@v4
Expand Down Expand Up @@ -56,9 +55,6 @@ jobs:
matrix:
pg: ${{ fromJson(needs.config.outputs.pg_latest) }}
build_type: [ Debug ]
include:
- pg: ${{ fromJson(needs.config.outputs.pg17_latest) }}
build_type: Debug
fail-fast: false

steps:
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/pgspot.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -39,17 +39,17 @@ jobs:

steps:

- name: Setup python 3.12
- name: Setup python 3.13
uses: actions/setup-python@v5
with:
python-version: '3.12'
python-version: '3.13'

- name: Checkout timescaledb
uses: actions/checkout@v4

- name: Install pgspot
run: |
python -m pip install pgspot==0.8.1
python -m pip install pgspot==0.9.0
- name: Build timescaledb sqlfiles
run: |
Expand Down
3 changes: 1 addition & 2 deletions .github/workflows/pr-handling.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,9 @@ name: Assign PR to author and reviewers
types: [ opened, reopened, ready_for_review ]

jobs:

assign-pr:
name: Assign PR to author
permissions:
contents: write
runs-on: ubuntu-latest
steps:
- uses: toshimaru/[email protected]
Expand Down
2 changes: 1 addition & 1 deletion .unreleased/pr_6901
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Implements: #6901 Add hypertable support for transition tables
Implements: #6901: Add hypertable support for transition tables.
2 changes: 1 addition & 1 deletion .unreleased/pr_7104
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Implements: #7104 Hypercore table access method
Implements: #7104: Hypercore table access method.
2 changes: 1 addition & 1 deletion .unreleased/pr_7271
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Implements: #7271 Push down ORDER BY in real time continuous aggregate queries
Implements: #7271: Push down `order by` in real-time continuous aggregate queries.
2 changes: 1 addition & 1 deletion .unreleased/pr_7295
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Implements: #7295: Support ALTER TABLE SET ACCESS METHOD on hypertable.
Implements: #7295: Support `alter table set access method` on hypertable.
4 changes: 2 additions & 2 deletions .unreleased/pr_7378
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
Fixes: #7378 Remove obsolete job referencing policy_job_error_retention
Thanks: @pgloader for reporting an issue an internal background job
Fixes: #7378: Remove obsolete job referencing `policy_job_error_retention`.
Thanks: @pgloader for reporting the issue in an internal background job.
2 changes: 1 addition & 1 deletion .unreleased/pr_7390
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Implements: #7390 Disable custom hashagg planner code
Implements: #7390: Disable custom `hashagg` planner code.
2 changes: 1 addition & 1 deletion .unreleased/pr_7409
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Fixes: #7409 Update bgw job table when altering procedure
Fixes: #7409: Update `bgw_job` table when altering procedure.
2 changes: 1 addition & 1 deletion .unreleased/pr_7411
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Implements: #7411 Change parameter name to enable Hypercore TAM
Implements: #7411: Change parameter name to enable hypercore table access method.
2 changes: 1 addition & 1 deletion .unreleased/pr_7412
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Implements: #7412 Add GUC for hypercore_use_access_method default
Implements: #7412: Add GUC for `hypercore_use_access_method` default.
2 changes: 1 addition & 1 deletion .unreleased/pr_7426
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Fixes: #7426 Fix datetime parsing error in chunk constraint creation
Fixes: #7426: Fix `datetime` parsing error in chunk constraint creation.
2 changes: 1 addition & 1 deletion .unreleased/pr_7432
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Fixes: #7432 Verify that heap tuple is valid before using
Fixes: #7432: Verify that the heap tuple is valid before using.
4 changes: 2 additions & 2 deletions .unreleased/pr_7434
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
Fixes: #7434 Fixes segfault when internally set the replica identity for a given chunk
Thanks: @bharrisau for reporting the segfault when creating chunks
Fixes: #7434: Fixes the segfault when internally setting the replica identity for a given chunk.
Thanks: @bharrisau for reporting the segfault when creating chunks.
2 changes: 1 addition & 1 deletion .unreleased/pr_7443
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Implements: #7443 Add Hypercore function and view aliases
Implements: #7443: Add hypercore function and view aliases.
2 changes: 1 addition & 1 deletion .unreleased/pr_7455
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Implements: #7455: Support DROP NOT NULL on compressed hypertables
Implements: #7455: Support `drop not null` on compressed hypertables.
2 changes: 1 addition & 1 deletion .unreleased/pr_7482
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Implements: #7482 Optimize recompression of partially compressed chunks
Implements: #7482: Optimize recompression of partially compressed chunks.
2 changes: 1 addition & 1 deletion .unreleased/pr_7486
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Implements: #7486 Prevent building against postgres versions with broken ABI
Implements: #7486: Prevent building against postgres versions with broken ABI.
2 changes: 1 addition & 1 deletion .unreleased/pr_7488
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Fixes: #7488 Emit error for transition table trigger on chunks
Fixes: #7488: Emit error for transition table trigger on chunks.
2 changes: 1 addition & 1 deletion .unreleased/pr_7514
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Fixes: #7514 Fix error: invalid child of chunk append
Fixes: #7514: Fix the error: `invalid child of chunk append`.
1 change: 1 addition & 0 deletions .unreleased/pr_7517
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fixes: #7517 Fixes performance regression on `cagg_migrate` procedure
2 changes: 1 addition & 1 deletion .unreleased/pr_7557
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Fixes: #7557 Fix NULL handling for in-memory tuple filtering
Fixes: #7557: Fix null handling for in-memory tuple filtering.
2 changes: 2 additions & 0 deletions .unreleased/pr_7565
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
Implements: #7565 Add hint when hypertable creation fails
Thanks: @k-rus for suggesting the improvement
1 change: 1 addition & 0 deletions .unreleased/pr_7584
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fixes: #7584 Fix NaN-handling for vectorized aggregation
4 changes: 2 additions & 2 deletions .unreleased/resolve-vars
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
Fixes: #7410 "aggregated compressed column not found" error on aggregation query.
Thanks: @uasiddiqi for reporting the "aggregated compressed column not found" error.
Fixes: #7410: Fix the `aggregated compressed column not found` error on aggregation query.
Thanks: @uasiddiqi for reporting the `aggregated compressed column not found` error.
2 changes: 1 addition & 1 deletion .unreleased/vectorized-agg-filter
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Implements: #7458 Support vecorized aggregation with aggregate FILTER clauses that are also vectorizable
Implements: #7458: Support vecorized aggregation with aggregate `filter` clauses that are also vectorizable.
2 changes: 1 addition & 1 deletion .unreleased/vectorized-grouping-one-fixed
Original file line number Diff line number Diff line change
@@ -1 +1 @@
Implements: #7341 Vectorized aggregation with grouping by one fixed-size by-value compressed column (such as arithmetic types).
Implements: #7341: Vectorized aggregation with grouping by one fixed-size by-value compressed column (such as arithmetic types).
109 changes: 91 additions & 18 deletions sql/cagg_migrate.sql
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,7 @@ DECLARE
_bucket_column_type TEXT;
_interval_type TEXT;
_interval_value TEXT;
_nbuckets INTEGER := 10; -- number of buckets per transaction
BEGIN
IF _timescaledb_functions.cagg_migrate_plan_exists(_cagg_data.mat_hypertable_id) IS TRUE THEN
RAISE EXCEPTION 'plan already exists for materialized hypertable %', _cagg_data.mat_hypertable_id;
Expand Down Expand Up @@ -118,8 +119,12 @@ BEGIN
AND hypertable_name = _matht.table_name
AND dimension_type = 'Time';

-- Get the current cagg bucket width
SELECT bucket_width
INTO _interval_value
FROM _timescaledb_functions.cagg_get_bucket_function_info(_cagg_data.mat_hypertable_id);

IF _integer_interval IS NOT NULL THEN
_interval_value := _integer_interval::TEXT;
_interval_type := _bucket_column_type;
IF _bucket_column_type = 'bigint' THEN
_watermark := COALESCE(_timescaledb_functions.cagg_watermark(_cagg_data.mat_hypertable_id)::bigint, '-9223372036854775808'::bigint)::TEXT;
Expand All @@ -129,7 +134,6 @@ BEGIN
_watermark := COALESCE(_timescaledb_functions.cagg_watermark(_cagg_data.mat_hypertable_id)::smallint, '-32768'::smallint)::TEXT;
END IF;
ELSE
_interval_value := _time_interval::TEXT;
_interval_type := 'interval';

-- We expect an ISO date later in parsing (i.e., min value has to be '4714-11-24 00:53:28+00:53:28 BC')
Expand Down Expand Up @@ -177,16 +181,17 @@ BEGIN
'COPY DATA',
jsonb_build_object (
'start_ts', start::text,
'end_ts', (start + CAST(%8$L AS %9$s))::text,
'end_ts', (start + (CAST(%8$L AS %9$s) * %10$s) )::text,
'bucket_column_name', bucket_column_name,
'bucket_column_type', bucket_column_type,
'cagg_name_new', cagg_name_new
)
FROM boundaries,
LATERAL generate_series(min, max, CAST(%8$L AS %9$s)) AS start;
LATERAL generate_series(min, max, (CAST(%8$L AS %9$s) * %10$s)) AS start;
$$,
_bucket_column_name, _bucket_column_type, _cagg_name_new, _cagg_data.user_view_schema,
_cagg_data.user_view_name, _watermark, _cagg_data.mat_hypertable_id, _interval_value, _interval_type
_bucket_column_name, _bucket_column_type, _cagg_name_new, _matht.schema_name,
_matht.table_name, _watermark, _cagg_data.mat_hypertable_id, _interval_value,
_interval_type, _nbuckets
);

EXECUTE _sql;
Expand Down Expand Up @@ -355,6 +360,14 @@ BEGIN
END;
$BODY$ SET search_path TO pg_catalog, pg_temp;

CREATE OR REPLACE PROCEDURE _timescaledb_functions.cagg_migrate_update_watermark(_mat_hypertable_id INTEGER)
LANGUAGE sql AS
$BODY$
INSERT INTO _timescaledb_catalog.continuous_aggs_watermark
VALUES (_mat_hypertable_id, _timescaledb_functions.cagg_watermark_materialized(_mat_hypertable_id))
ON CONFLICT (mat_hypertable_id) DO UPDATE SET watermark = excluded.watermark;
$BODY$ SECURITY DEFINER SET search_path TO pg_catalog, pg_temp;

-- Refresh new cagg created by the migration
CREATE OR REPLACE PROCEDURE _timescaledb_functions.cagg_migrate_execute_refresh_new_cagg (
_cagg_data _timescaledb_catalog.continuous_agg,
Expand All @@ -365,6 +378,7 @@ $BODY$
DECLARE
_cagg_name TEXT;
_override BOOLEAN;
_mat_hypertable_id INTEGER;
BEGIN
SELECT (config->>'override')::BOOLEAN
INTO _override
Expand All @@ -378,6 +392,18 @@ BEGIN
_cagg_name = _cagg_data.user_view_name;
END IF;

--
-- Update new cagg watermark
--
SELECT h.id
INTO _mat_hypertable_id
FROM _timescaledb_catalog.continuous_agg ca
JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id)
WHERE user_view_schema = _cagg_data.user_view_schema
AND user_view_name = _plan_step.config->>'cagg_name_new';

CALL _timescaledb_functions.cagg_migrate_update_watermark(_mat_hypertable_id);

--
-- Since we're still having problems with the `refresh_continuous_aggregate` executed inside procedures
-- and the issue isn't easy/trivial to fix we decided to skip this step here WARNING users to do it
Expand Down Expand Up @@ -407,6 +433,11 @@ DECLARE
_stmt TEXT;
_mat_schema_name TEXT;
_mat_table_name TEXT;
_mat_schema_name_old TEXT;
_mat_table_name_old TEXT;
_query TEXT;
_select_columns TEXT;
_groupby_columns TEXT;
BEGIN
SELECT h.schema_name, h.table_name
INTO _mat_schema_name, _mat_table_name
Expand All @@ -415,17 +446,59 @@ BEGIN
WHERE user_view_schema = _cagg_data.user_view_schema
AND user_view_name = _plan_step.config->>'cagg_name_new';

_stmt := format(
'INSERT INTO %I.%I SELECT * FROM %I.%I WHERE %I >= %L AND %I < %L',
_mat_schema_name,
_mat_table_name,
_cagg_data.user_view_schema,
_cagg_data.user_view_name,
_plan_step.config->>'bucket_column_name',
_plan_step.config->>'start_ts',
_plan_step.config->>'bucket_column_name',
_plan_step.config->>'end_ts'
);
-- For realtime CAggs we need to read direct from the materialization hypertable
IF _cagg_data.materialized_only IS FALSE THEN
SELECT h.schema_name, h.table_name
INTO _mat_schema_name_old, _mat_table_name_old
FROM _timescaledb_catalog.continuous_agg ca
JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id)
WHERE user_view_schema = _cagg_data.user_view_schema
AND user_view_name = _cagg_data.user_view_name;

_query :=
split_part(
pg_get_viewdef(format('%I.%I', _cagg_data.user_view_schema, _cagg_data.user_view_name)),
'UNION ALL',
1);

_groupby_columns :=
split_part(
_query,
'GROUP BY ',
2);

_select_columns :=
split_part(
_query,
format('FROM %I.%I', _mat_schema_name_old, _mat_table_name_old),
1);

_stmt := format(
'INSERT INTO %I.%I %s FROM %I.%I WHERE %I >= %L AND %I < %L GROUP BY %s',
_mat_schema_name,
_mat_table_name,
_select_columns,
_mat_schema_name_old,
_mat_table_name_old,
_plan_step.config->>'bucket_column_name',
_plan_step.config->>'start_ts',
_plan_step.config->>'bucket_column_name',
_plan_step.config->>'end_ts',
_groupby_columns
);
ELSE
_stmt := format(
'INSERT INTO %I.%I SELECT * FROM %I.%I WHERE %I >= %L AND %I < %L',
_mat_schema_name,
_mat_table_name,
_mat_schema_name_old,
_mat_table_name_old,
_plan_step.config->>'bucket_column_name',
_plan_step.config->>'start_ts',
_plan_step.config->>'bucket_column_name',
_plan_step.config->>'end_ts'
);
END IF;

EXECUTE _stmt;
END;
Expand Down Expand Up @@ -600,4 +673,4 @@ $BODY$;
-- Migrate a CAgg which is using the experimental time_bucket_ng function
-- into a CAgg using the regular time_bucket function
CREATE OR REPLACE PROCEDURE _timescaledb_functions.cagg_migrate_to_time_bucket(cagg REGCLASS)
AS '@MODULE_PATHNAME@', 'ts_continuous_agg_migrate_to_time_bucket' LANGUAGE C;
AS '@MODULE_PATHNAME@', 'ts_continuous_agg_migrate_to_time_bucket' LANGUAGE C;
1 change: 1 addition & 0 deletions sql/updates/reverse-dev.sql
Original file line number Diff line number Diff line change
Expand Up @@ -57,3 +57,4 @@ ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.chunk_columnstore_
DROP VIEW timescaledb_information.hypertable_columnstore_settings;
DROP VIEW timescaledb_information.chunk_columnstore_settings;

DROP PROCEDURE IF EXISTS _timescaledb_functions.cagg_migrate_update_watermark(INTEGER);
2 changes: 1 addition & 1 deletion src/bgw/job_stat_history.c
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,7 @@ ts_bgw_job_stat_history_mark_end(BgwJob *job, JobResult result, Jsonb *edata)
bgw_job_stat_history_tuple_mark_end,
NULL,
&context,
ShareRowExclusiveLock))
RowExclusiveLock))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("unable to find job history " INT64_FORMAT, new_job->job_history.id)));
Expand Down
5 changes: 4 additions & 1 deletion src/indexing.c
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,10 @@ ts_indexing_verify_columns(const Hyperspace *hs, const List *indexelems)
(errcode(ERRCODE_TS_BAD_HYPERTABLE_INDEX_DEFINITION),
errmsg("cannot create a unique index without the column \"%s\" (used in "
"partitioning)",
NameStr(dim->fd.column_name))));
NameStr(dim->fd.column_name)),
errhint(
"If you're creating a hypertable on a table with a primary key, ensure "
"the partitioning column is part of the primary or composite key.")));
}
}

Expand Down
Loading

0 comments on commit 9b47614

Please sign in to comment.