Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/main' into HEAD
Browse files Browse the repository at this point in the history
  • Loading branch information
akuzm committed Feb 19, 2025
2 parents 26b3bcb + f7e626b commit 7a23931
Show file tree
Hide file tree
Showing 100 changed files with 3,911 additions and 657 deletions.
8 changes: 4 additions & 4 deletions .github/ci_settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,19 +12,19 @@
#

PG14_EARLIEST = "14.0"
PG14_LATEST = "14.15"
PG14_LATEST = "14.16"
PG14_ABI_MIN = "14.0"

PG15_EARLIEST = "15.0"
PG15_LATEST = "15.10"
PG15_LATEST = "15.11"
PG15_ABI_MIN = "15.0"

PG16_EARLIEST = "16.0"
PG16_LATEST = "16.6"
PG16_LATEST = "16.7"
PG16_ABI_MIN = "16.0"

PG17_EARLIEST = "17.0"
PG17_LATEST = "17.2"
PG17_LATEST = "17.3"
PG17_ABI_MIN = "17.0"

PG_LATEST = [PG14_LATEST, PG15_LATEST, PG16_LATEST, PG17_LATEST]
15 changes: 15 additions & 0 deletions .github/gh_matrix_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,21 @@ def macos_config(overrides):

m["include"].append(build_debug_config({"pg": PG17_LATEST}))

# Also test on ARM. See the available runners here:
# https://github.com/timescale/timescaledb/actions/runners
m["include"].append(
build_debug_config(
{
"pg": PG17_LATEST,
"os": "Ubuntu22.04-2Core",
# We need to enable ARM crypto extensions to build the vectorized grouping
# code. The actual architecture for our ARM CI runner is reported as:
# -imultiarch aarch64-linux-gnu - -mlittle-endian -mabi=lp64 -march=armv8.2-a+crypto+fp16+rcpc+dotprod
"pg_extra_args": "--enable-debug --enable-cassert --without-llvm CFLAGS=-march=armv8.2-a+crypto",
}
)
)

# test timescaledb with release config on latest postgres release in MacOS
m["include"].append(build_release_config(macos_config({"pg": PG17_LATEST})))

Expand Down
24 changes: 24 additions & 0 deletions .github/workflows/backport-trigger.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# A helper workflow to trigger the run of the backport workflow on the main
# branch, when a release branch or the main branch were changed.
name: Trigger the Backport Workflow
"on":
push:
branches:
- main
- ?.*.x
pull_request:
paths: .github/workflows/backport-trigger.yaml

jobs:
backport_trigger:
runs-on: ubuntu-latest
steps:
- name: Checkout TimescaleDB
uses: actions/checkout@v4

- name: Trigger the Backport Workflow
env:
GH_TOKEN: ${{ secrets.ORG_AUTOMATION_TOKEN }}
run: |
gh workflow run backport.yaml --ref main
6 changes: 2 additions & 4 deletions .github/workflows/backport.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,9 @@ on:
- cron: '0 12 * * 1-5'
workflow_dispatch:
push:
# This is also triggered from backport-trigger.yaml when the release branches
# are updated.
branches:
# Ideally we want to create a backport PR as soon as the fix is merged
# into the main branch
- main

# You can run and debug new versions of the backport script by pushing it
# to this branch. workflow_dispatch can only be run through github cli for
# branches that are not main, so it's inconvenient.
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/linux-32bit-build-and-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ jobs:
append-* transparent_decompression-*
transparent_decompress_chunk-* pg_dump telemetry bgw_db_scheduler*
hypercore_vacuum vectorized_aggregation vector_agg_text
vector_agg_groupagg
vector_agg_groupagg hypercore_parallel hypercore_vectoragg
SKIPS: chunk_adaptive histogram_test-*
EXTENSIONS: "postgres_fdw test_decoding pageinspect pgstattuple"
strategy:
Expand Down
3 changes: 3 additions & 0 deletions .github/workflows/linux-build-and-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,9 @@ jobs:
- name: Build TimescaleDB
run: |
# Show the actual architecture this CI runner has
"$CC" -march=native -E -v - </dev/null 2>&1 | grep cc1
./bootstrap -DCMAKE_BUILD_TYPE="${{ matrix.build_type }}" \
-DPG_SOURCE_DIR=~/$PG_SRC_DIR -DPG_PATH=~/$PG_INSTALL_DIR \
${{ matrix.tsdb_build_args }} -DCODECOVERAGE=${{ matrix.coverage }} \
Expand Down
2 changes: 2 additions & 0 deletions .github/workflows/minor_release.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ jobs:
minor-release-feature-freeze:
name: Minor Release - Feature Freeze
runs-on: ubuntu-latest
environment:
name: Release Ceremonies

steps:
- name: Install Linux Dependencies
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/prerelease-tests-on-release-branch.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.ORG_AUTOMATION_TOKEN }}
run: |
git push upstream HEAD:prerelease_test --force
git push origin HEAD:prerelease_test --force
# Coverity Testing
Expand All @@ -74,4 +74,4 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.ORG_AUTOMATION_TOKEN }}
run: |
git push upstream HEAD:coverity_scan --force
git push origin HEAD:coverity_scan --force
1 change: 0 additions & 1 deletion .unreleased/hash-groupagg-bug

This file was deleted.

1 change: 1 addition & 0 deletions .unreleased/pr_7632
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Implements: #7632 Optimize recompression for chunks without segmentby
1 change: 1 addition & 0 deletions .unreleased/pr_7655
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Implements: #7655 Support vectorized aggregation on Hypercore TAM
1 change: 0 additions & 1 deletion .unreleased/pr_7694

This file was deleted.

1 change: 1 addition & 0 deletions .unreleased/pr_7701
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Implements: #7701 Implement a custom compression algorithm for bool columns. It is experimental and can undergo backwards-incompatible changes. For testing, enable it using timescaledb.enable_bool_compression = on.
1 change: 1 addition & 0 deletions .unreleased/pr_7707
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Implements: #7707 Support ALTER COLUMN SET NOT NULL on compressed chunks
17 changes: 17 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,23 @@
`psql` with the `-X` flag to prevent any `.psqlrc` commands from
accidentally triggering the load of a previous DB version.**

## 2.18.2 (2025-02-19)

This release contains performance improvements and bug fixes since
the 2.18.1 release. We recommend that you upgrade at the next
available opportunity.

**Bugfixes**
* [#7686](https://github.com/timescale/timescaledb/pull/7686) Potential wrong aggregation result when using vectorized aggregation with hash grouping in reverse order
* [#7694](https://github.com/timescale/timescaledb/pull/7694) Fix ExplainHook breaking call chain
* [#7695](https://github.com/timescale/timescaledb/pull/7695) Block dropping internal compressed chunks with `drop_chunk()`
* [#7711](https://github.com/timescale/timescaledb/pull/7711) License error when using hypercore handler
* [#7712](https://github.com/timescale/timescaledb/pull/7712) Respect other extensions' ExecutorStart hooks

**Thanks**
* @davidmehren and @jflambert for reporting an issue with extension hooks
* @jflambert for reporting a bug with license errors shown in autovacuum

## 2.18.1 (2025-02-10)

This release contains performance improvements and bug fixes since
Expand Down
12 changes: 7 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ See [other installation options](https://docs.timescale.com/self-hosted/latest/i

## Create a hypertable

You create a regular table and then convert it into a hypertable. A hypertable automatically partitions data into chunks based on your configuration.
You create a regular table and then convert it into a hypertable. A hypertable automatically partitions data into chunks to accelerate your queries.

```sql
-- Create timescaledb extension
Expand All @@ -61,7 +61,7 @@ See more:

## Enable columnstore

TimescaleDB's hypercore is a hybrid row-columnar store that boosts analytical query performance on your time-series and event data, while reducing data size by more than 90%. This keeps your queries operating at lightning speed and ensures low storage costs as you scale. Data is inserted in row format in the rowstore and converted to columnar format in the columnstore based on your configuration.
TimescaleDB's hypercore is a hybrid row-columnar store that boosts analytical query performance on your time-series and event data, while reducing data size by more than 90%. This keeps your analytics operating at lightning speed and ensures low storage costs as you scale. Data is inserted in row format in the rowstore and converted to columnar format in the columnstore based on your configuration.
- Configure the columnstore on a hypertable:
Expand Down Expand Up @@ -141,7 +141,7 @@ See more:
## Create continuous aggregates
Continuous aggregates are designed to make queries on very large datasets run faster. They continuously and incrementally refresh a query in the background, so that when you run such query, only the data that has changed needs to be computed, not the entire dataset. This is what makes them different from regular PostgreSQL [materialized views](https://www.postgresql.org/docs/current/rules-materializedviews.html), which cannot be incrementally materialized and have to be rebuilt from scratch every time you want to refresh it.
Continuous aggregates make real-time analytics run faster on very large datasets. They continuously and incrementally refresh a query in the background, so that when you run such query, only the data that has changed needs to be computed, not the entire dataset. This is what makes them different from regular PostgreSQL [materialized views](https://www.postgresql.org/docs/current/rules-materializedviews.html), which cannot be incrementally materialized and have to be rebuilt from scratch every time you want to refresh it.
For example, create a continuous aggregate view for daily weather data in two simple steps:
Expand Down Expand Up @@ -181,9 +181,11 @@ See more:
## Want TimescaleDB hosted and managed for you? Try Timescale Cloud
[Timescale Cloud](https://docs.timescale.com/getting-started/latest/) is a cloud-based PostgreSQL platform for resource-intensive workloads. We help you build faster, scale further, and stay under budget. A Timescale Cloud service is a single optimized 100% PostgreSQL database instance that you use as is, or extend with capabilities specific to your business needs. The available capabilities are:
[Timescale Cloud](https://docs.timescale.com/getting-started/latest/) is the modern PostgreSQL data platform for all your applications. It enhances PostgreSQL to handle time series, events, real-time analytics, and vector search—all in a single database alongside transactional workloads. You get one system that handles live data ingestion, late and out-of-order updates, and low latency queries, with the performance, reliability, and scalability your app needs. Ideal for IoT, crypto, finance, SaaS, and a myriad other domains, Timescale Cloud allows you to build data-heavy, mission-critical apps while retaining the familiarity and reliability of PostgreSQL.
- **Time-series and analytics**: PostgreSQL with TimescaleDB. The PostgreSQL you know and love, supercharged with functionality for storing and querying time-series data at scale for analytics and other use cases. Get faster time-based queries with hypertables, continuous aggregates, and columnar storage. Save on storage with native compression, data retention policies, and bottomless data tiering to Amazon S3.
A Timescale Cloud service is a single optimized 100% PostgreSQL database instance that you use as is, or extend with capabilities specific to your business needs. The available capabilities are:
- **Time-series and analytics**: PostgreSQL with TimescaleDB. The PostgreSQL you know and love, supercharged with functionality for storing and querying time-series data at scale for real-time analytics and other use cases. Get faster time-based queries with hypertables, continuous aggregates, and columnar storage. Save on storage with native compression, data retention policies, and bottomless data tiering to Amazon S3.
- **AI and vector**: PostgreSQL with vector extensions. Use PostgreSQL as a vector database with purpose built extensions for building AI applications from start to scale. Get fast and accurate similarity search with the pgvector and pgvectorscale extensions. Create vector embeddings and perform LLM reasoning on your data with the pgai extension.
- **PostgreSQL**: the trusted industry-standard RDBMS. Ideal for applications requiring strong data consistency, complex relationships, and advanced querying capabilities. Get ACID compliance, extensive SQL support, JSON handling, and extensibility through custom functions, data types, and extensions.
All services include all the cloud tooling you'd expect for production use: [automatic backups](https://docs.timescale.com/use-timescale/latest/backup-restore/backup-restore-cloud/), [high availability](https://docs.timescale.com/use-timescale/latest/ha-replicas/), [read replicas](https://docs.timescale.com/use-timescale/latest/ha-replicas/read-scaling/), [data forking](https://docs.timescale.com/use-timescale/latest/services/service-management/#fork-a-service), [connection pooling](https://docs.timescale.com/use-timescale/latest/services/connection-pooling/), [tiered storage](https://docs.timescale.com/use-timescale/latest/data-tiering/), [usage-based storage](https://docs.timescale.com/about/latest/pricing-and-account-management/), and much more.
Expand Down
3 changes: 2 additions & 1 deletion scripts/check_sql_script.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,8 @@ def visit_ViewStmt(self, _ancestors, node):

def visit_CreateFunctionStmt(self, _ancestors, node):
if not node.replace:
self.error(node, "Consider using CREATE OR REPLACE FUNCTION")
fn_str = ("FUNCTION", "PROCEDURE")[node.is_procedure is True]
self.error(node, f"Consider using CREATE OR REPLACE {fn_str}")

return Skip

Expand Down
2 changes: 1 addition & 1 deletion scripts/merge_changelogs.sh
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ echo_changelog() {
echo "${1}"
# skip the template and release notes files
grep -i "${2}" .unreleased/* | \
cut -d: -f3- | sort | uniq | sed -e 's/^[[:space:]]*//' -e 's/^/* /'
cut -d: -f3- | sort | uniq | sed -e 's/^[[:space:]]*//' -e 's/^/* /' -e 's!#\([0-9]\+\)![#\1](https://github.com/timescale/timescaledb/pull/\1)!'
echo
}

Expand Down
6 changes: 4 additions & 2 deletions sql/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,8 @@ set(MOD_FILES
updates/2.17.0--2.17.1.sql
updates/2.17.1--2.17.2.sql
updates/2.17.2--2.18.0.sql
updates/2.18.0--2.18.1.sql)
updates/2.18.0--2.18.1.sql
updates/2.18.1--2.18.2.sql)

# The downgrade file to generate a downgrade script for the current version, as
# specified in version.config
Expand Down Expand Up @@ -94,7 +95,8 @@ set(OLD_REV_FILES
2.17.1--2.17.0.sql
2.17.2--2.17.1.sql
2.18.0--2.17.2.sql
2.18.1--2.18.0.sql)
2.18.1--2.18.0.sql
2.18.2--2.18.1.sql)

set(MODULE_PATHNAME "$libdir/timescaledb-${PROJECT_VERSION_MOD}")
set(LOADER_PATHNAME "$libdir/timescaledb")
Expand Down
5 changes: 5 additions & 0 deletions sql/pre_install/types.functions.sql
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,11 @@ CREATE OR REPLACE FUNCTION _timescaledb_functions.compressed_data_info(_timescal
LANGUAGE C STRICT IMMUTABLE
AS '@MODULE_PATHNAME@', 'ts_compressed_data_info';

CREATE OR REPLACE FUNCTION _timescaledb_functions.compressed_data_has_nulls(_timescaledb_internal.compressed_data)
RETURNS BOOL
LANGUAGE C STRICT IMMUTABLE
AS '@MODULE_PATHNAME@', 'ts_compressed_data_has_nulls';

CREATE OR REPLACE FUNCTION _timescaledb_functions.dimension_info_in(cstring)
RETURNS _timescaledb_internal.dimension_info
LANGUAGE C STRICT IMMUTABLE
Expand Down
3 changes: 3 additions & 0 deletions sql/updates/2.18.1--2.18.2.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
ALTER TABLE _timescaledb_internal.bgw_job_stat_history
ALTER COLUMN succeeded DROP NOT NULL,
ALTER COLUMN succeeded DROP DEFAULT;
5 changes: 5 additions & 0 deletions sql/updates/2.18.2--2.18.1.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
UPDATE _timescaledb_internal.bgw_job_stat_history SET succeeded = FALSE WHERE succeeded IS NULL;

ALTER TABLE _timescaledb_internal.bgw_job_stat_history
ALTER COLUMN succeeded SET NOT NULL,
ALTER COLUMN succeeded SET DEFAULT FALSE;
10 changes: 7 additions & 3 deletions sql/updates/latest-dev.sql
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
ALTER TABLE _timescaledb_internal.bgw_job_stat_history
ALTER COLUMN succeeded DROP NOT NULL,
ALTER COLUMN succeeded DROP DEFAULT;
CREATE FUNCTION _timescaledb_functions.compressed_data_has_nulls(_timescaledb_internal.compressed_data)
RETURNS BOOL
LANGUAGE C STRICT IMMUTABLE
AS '@MODULE_PATHNAME@', 'ts_update_placeholder';

INSERT INTO _timescaledb_catalog.compression_algorithm( id, version, name, description) values
( 5, 1, 'COMPRESSION_ALGORITHM_BOOL', 'bool');
6 changes: 2 additions & 4 deletions sql/updates/reverse-dev.sql
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
UPDATE _timescaledb_internal.bgw_job_stat_history SET succeeded = FALSE WHERE succeeded IS NULL;
DROP FUNCTION IF EXISTS _timescaledb_functions.compressed_data_has_nulls(_timescaledb_internal.compressed_data);

ALTER TABLE _timescaledb_internal.bgw_job_stat_history
ALTER COLUMN succeeded SET NOT NULL,
ALTER COLUMN succeeded SET DEFAULT FALSE;
DELETE FROM _timescaledb_catalog.compression_algorithm WHERE id = 5 AND version = 1 AND name = 'COMPRESSION_ALGORITHM_BOOL';
8 changes: 8 additions & 0 deletions src/chunk.c
Original file line number Diff line number Diff line change
Expand Up @@ -4044,6 +4044,14 @@ ts_chunk_drop_single_chunk(PG_FUNCTION_ARGS)
true);
Assert(ch != NULL);
ts_chunk_validate_chunk_status_for_operation(ch, CHUNK_DROP, true /*throw_error */);

if (ts_chunk_contains_compressed_data(ch))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("dropping compressed chunks not supported"),
errhint("Please drop the corresponding chunk on the uncompressed hypertable "
"instead.")));

/* do not drop any chunk dependencies */
ts_chunk_drop(ch, DROP_RESTRICT, LOG);
PG_RETURN_BOOL(true);
Expand Down
34 changes: 31 additions & 3 deletions src/cross_module_fn.c
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ CROSSMODULE_WRAPPER(compressed_data_recv);
CROSSMODULE_WRAPPER(compressed_data_in);
CROSSMODULE_WRAPPER(compressed_data_out);
CROSSMODULE_WRAPPER(compressed_data_info);
CROSSMODULE_WRAPPER(compressed_data_has_nulls);
CROSSMODULE_WRAPPER(deltadelta_compressor_append);
CROSSMODULE_WRAPPER(deltadelta_compressor_finish);
CROSSMODULE_WRAPPER(gorilla_compressor_append);
Expand All @@ -76,6 +77,8 @@ CROSSMODULE_WRAPPER(dictionary_compressor_append);
CROSSMODULE_WRAPPER(dictionary_compressor_finish);
CROSSMODULE_WRAPPER(array_compressor_append);
CROSSMODULE_WRAPPER(array_compressor_finish);
CROSSMODULE_WRAPPER(bool_compressor_append);
CROSSMODULE_WRAPPER(bool_compressor_finish);
CROSSMODULE_WRAPPER(create_compressed_chunk);
CROSSMODULE_WRAPPER(compress_chunk);
CROSSMODULE_WRAPPER(decompress_chunk);
Expand Down Expand Up @@ -134,8 +137,13 @@ error_hypercore_proxy_index_options(Datum reloptions, bool validate)
* parsing index options instead.
*/
static Datum
error_pg_community_hypercore_proxy_handler(PG_FUNCTION_ARGS)
process_hypercore_proxy_handler(PG_FUNCTION_ARGS)
{
ts_license_enable_module_loading();

if (ts_cm_functions->hypercore_proxy_handler != process_hypercore_proxy_handler)
return ts_cm_functions->hypercore_proxy_handler(fcinfo);

IndexAmRoutine *amroutine = makeNode(IndexAmRoutine);

amroutine->amstrategies = 0;
Expand Down Expand Up @@ -247,6 +255,24 @@ process_cagg_try_repair(PG_FUNCTION_ARGS)
pg_unreachable();
}

/*
* Ensure that the TSL library is loaded before trying to use the handler.
*
* As for the functions above, the TSL library might not be loaded when this
* function is called, so we try to load this function, but fall back on the
* Apache error message if not possible.
*/
static Datum
process_hypercore_handler(PG_FUNCTION_ARGS)
{
ts_license_enable_module_loading();
if (ts_cm_functions->hypercore_handler != process_hypercore_handler)
return ts_cm_functions->hypercore_handler(fcinfo);

error_no_default_fn_pg_community(fcinfo);
pg_unreachable();
}

static DDLResult
process_cagg_viewstmt_default(Node *stmt, const char *query_string, void *pstmt,
WithClauseResult *with_clause_options)
Expand Down Expand Up @@ -395,8 +421,10 @@ TSDLLEXPORT CrossModuleFunctions ts_cm_functions_default = {
.dictionary_compressor_finish = error_no_default_fn_pg_community,
.array_compressor_append = error_no_default_fn_pg_community,
.array_compressor_finish = error_no_default_fn_pg_community,
.hypercore_handler = error_no_default_fn_pg_community,
.hypercore_proxy_handler = error_pg_community_hypercore_proxy_handler,
.bool_compressor_append = error_no_default_fn_pg_community,
.bool_compressor_finish = error_no_default_fn_pg_community,
.hypercore_handler = process_hypercore_handler,
.hypercore_proxy_handler = process_hypercore_proxy_handler,
.is_compressed_tid = error_no_default_fn_pg_community,

.show_chunk = error_no_default_fn_pg_community,
Expand Down
3 changes: 3 additions & 0 deletions src/cross_module_fn.h
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,7 @@ typedef struct CrossModuleFunctions
PGFunction compressed_data_in;
PGFunction compressed_data_out;
PGFunction compressed_data_info;
PGFunction compressed_data_has_nulls;
bool (*process_compress_table)(AlterTableCmd *cmd, Hypertable *ht,
WithClauseResult *with_clause_options);
void (*process_altertable_cmd)(Hypertable *ht, const AlterTableCmd *cmd);
Expand Down Expand Up @@ -149,6 +150,8 @@ typedef struct CrossModuleFunctions
PGFunction dictionary_compressor_finish;
PGFunction array_compressor_append;
PGFunction array_compressor_finish;
PGFunction bool_compressor_append;
PGFunction bool_compressor_finish;
PGFunction hypercore_handler;
PGFunction hypercore_proxy_handler;
PGFunction is_compressed_tid;
Expand Down
Loading

0 comments on commit 7a23931

Please sign in to comment.