diff --git a/src/process_utility.c b/src/process_utility.c index 9db6d417ce1..7cddcff235f 100644 --- a/src/process_utility.c +++ b/src/process_utility.c @@ -281,6 +281,7 @@ check_alter_table_allowed_on_ht_with_compression(Hypertable *ht, AlterTableStmt case AT_ReAddStatistics: case AT_SetCompression: case AT_DropNotNull: + case AT_SetNotNull: #if PG15_GE case AT_SetAccessMethod: #endif @@ -2563,11 +2564,50 @@ process_altertable_validate_constraint_end(Hypertable *ht, AlterTableCmd *cmd) foreach_chunk(ht, validate_hypertable_constraint, cmd); } +/* + * Validate that SET NOT NULL is ok for this chunk. + * + * Throws an error if SET NOT NULL on this chunk is not allowed, right now, + * SET NOT NULL is allowed on chunks that are either a fully decompressed, or + * are using the Hypercore table access method. + */ +static void +validate_set_not_null(Hypertable *ht, Oid chunk_relid, void *arg) +{ + Chunk *chunk = ts_chunk_get_by_relid(chunk_relid, true); + if (ts_chunk_is_compressed(chunk) && !ts_is_hypercore_am(chunk->amoid)) + { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("operation not supported on compressed chunks not using the " + "\"hypercore\" table access method"), + errdetail("Chunk %s.%s is using the heap table access method and has compressed " + "data.", + NameStr(chunk->fd.schema_name), + NameStr(chunk->fd.table_name)), + errhint("Either decompress all chunks of the hypertable or use \"ALTER TABLE " + "%s.%s SET ACCESS METHOD hypercore\" on all chunks to change access " + "method.", + NameStr(chunk->fd.schema_name), + NameStr(chunk->fd.table_name)))); + } +} + +/* + * This function checks that we are not dropping NOT NULL from bad columns and + * that all chunks support the modification. + */ static void -process_altertable_drop_not_null(Hypertable *ht, AlterTableCmd *cmd) +process_altertable_alter_not_null_start(Hypertable *ht, AlterTableCmd *cmd) { int i; + if (cmd->subtype == AT_SetNotNull) + foreach_chunk(ht, validate_set_not_null, cmd); + + if (cmd->subtype != AT_DropNotNull) + return; + for (i = 0; i < ht->space->num_dimensions; i++) { Dimension *dim = &ht->space->dimensions[i]; @@ -3803,9 +3843,10 @@ process_altertable_start_table(ProcessUtilityArgs *args) verify_constraint_hypertable(ht, cmd->def); } break; + case AT_SetNotNull: case AT_DropNotNull: if (ht != NULL) - process_altertable_drop_not_null(ht, cmd); + process_altertable_alter_not_null_start(ht, cmd); break; case AT_AddColumn: #if PG16_LT @@ -4187,6 +4228,8 @@ process_altertable_end_subcmd(Hypertable *ht, Node *parsetree, ObjectAddress *ob case AT_DropCluster: foreach_chunk(ht, process_altertable_chunk, cmd); break; + case AT_SetNotNull: + case AT_DropNotNull: case AT_SetRelOptions: case AT_ResetRelOptions: case AT_ReplaceRelOptions: @@ -4213,8 +4256,6 @@ process_altertable_end_subcmd(Hypertable *ht, Node *parsetree, ObjectAddress *ob case AT_SetStorage: case AT_ColumnDefault: case AT_CookedColumnDefault: - case AT_SetNotNull: - case AT_DropNotNull: case AT_AddOf: case AT_DropOf: case AT_AddIdentity: @@ -4494,8 +4535,8 @@ process_reassign_owned_start(ProcessUtilityArgs *args) Oid newrole_oid = get_rolespec_oid(stmt->newrole, false); HeapTuple tuple = ts_scanner_fetch_heap_tuple(ti, false, &should_free); - /* We do not need to check privileges here since ReassignOwnedObjects() will check the - * privileges and error out if they are not correct. */ + /* We do not need to check privileges here since ReassignOwnedObjects() will check + * the privileges and error out if they are not correct. */ ts_bgw_job_update_owner(ti->scanrel, tuple, ts_scanner_get_tupledesc(ti), newrole_oid); if (should_free) @@ -4631,8 +4672,8 @@ process_create_stmt(ProcessUtilityArgs *args) errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("hypercore access method not supported on \"%s\"", stmt->relation->relname), errdetail("The hypercore access method is only supported for hypertables."), - errhint("It does not make sense to set the default access method for all tables " - "to \"%s\" since it is only supported for hypertables.", + errhint("It does not make sense to set the default access method for all " + "tables to \"%s\" since it is only supported for hypertables.", TS_HYPERCORE_TAM_NAME)); return DDL_CONTINUE; diff --git a/tsl/src/hypercore/utils.c b/tsl/src/hypercore/utils.c index 4e8060a6130..5e6de7b81fc 100644 --- a/tsl/src/hypercore/utils.c +++ b/tsl/src/hypercore/utils.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/tsl/test/expected/compression_ddl.out b/tsl/test/expected/compression_ddl.out index 7caf0be32b1..ecbbdc8828b 100644 --- a/tsl/test/expected/compression_ddl.out +++ b/tsl/test/expected/compression_ddl.out @@ -2591,4 +2591,28 @@ SELECT count(*) FROM test2 WHERE i IS NULL; 1 (1 row) -SET client_min_messages = NOTICE; +SELECT count(compress_chunk(ch)) FROM show_chunks('test2') ch; + count +------- + 28 +(1 row) + +SELECT count(*) FROM test2 WHERE i IS NULL; + count +------- + 1 +(1 row) + +\set ON_ERROR_STOP 0 +ALTER TABLE test2 ALTER COLUMN i SET NOT NULL; +ERROR: operation not supported on compressed chunks not using the "hypercore" table access method +DELETE FROM test2 WHERE i IS NULL; +SELECT count(*) FROM test2 WHERE i IS NULL; + count +------- + 0 +(1 row) + +ALTER TABLE test2 ALTER COLUMN i SET NOT NULL; +ERROR: operation not supported on compressed chunks not using the "hypercore" table access method +\set ON_ERROR_STOP 1 diff --git a/tsl/test/expected/compression_errors-14.out b/tsl/test/expected/compression_errors-14.out index ab0c3d32f65..0f363c03fa5 100644 --- a/tsl/test/expected/compression_errors-14.out +++ b/tsl/test/expected/compression_errors-14.out @@ -239,7 +239,7 @@ DETAIL: Cannot drop column that is a hypertable partitioning (space or time) di ALTER TABLE foo DROP COLUMN b; ERROR: cannot drop orderby or segmentby column from a hypertable with compression enabled ALTER TABLE foo ALTER COLUMN t SET NOT NULL; -ERROR: operation not supported on hypertables that have compression enabled +ERROR: column "t" of relation "_hyper_10_2_chunk" contains null values ALTER TABLE foo RESET (timescaledb.compress); ERROR: compression options cannot be reset --can add constraints as long as no data is compressed diff --git a/tsl/test/expected/compression_errors-15.out b/tsl/test/expected/compression_errors-15.out index ab0c3d32f65..0f363c03fa5 100644 --- a/tsl/test/expected/compression_errors-15.out +++ b/tsl/test/expected/compression_errors-15.out @@ -239,7 +239,7 @@ DETAIL: Cannot drop column that is a hypertable partitioning (space or time) di ALTER TABLE foo DROP COLUMN b; ERROR: cannot drop orderby or segmentby column from a hypertable with compression enabled ALTER TABLE foo ALTER COLUMN t SET NOT NULL; -ERROR: operation not supported on hypertables that have compression enabled +ERROR: column "t" of relation "_hyper_10_2_chunk" contains null values ALTER TABLE foo RESET (timescaledb.compress); ERROR: compression options cannot be reset --can add constraints as long as no data is compressed diff --git a/tsl/test/expected/compression_errors-16.out b/tsl/test/expected/compression_errors-16.out index c40f4f7f125..9265ddd655b 100644 --- a/tsl/test/expected/compression_errors-16.out +++ b/tsl/test/expected/compression_errors-16.out @@ -239,7 +239,7 @@ DETAIL: Cannot drop column that is a hypertable partitioning (space or time) di ALTER TABLE foo DROP COLUMN b; ERROR: cannot drop orderby or segmentby column from a hypertable with compression enabled ALTER TABLE foo ALTER COLUMN t SET NOT NULL; -ERROR: operation not supported on hypertables that have compression enabled +ERROR: column "t" of relation "_hyper_10_2_chunk" contains null values ALTER TABLE foo RESET (timescaledb.compress); ERROR: compression options cannot be reset --can add constraints as long as no data is compressed diff --git a/tsl/test/expected/compression_errors-17.out b/tsl/test/expected/compression_errors-17.out index c40f4f7f125..9265ddd655b 100644 --- a/tsl/test/expected/compression_errors-17.out +++ b/tsl/test/expected/compression_errors-17.out @@ -239,7 +239,7 @@ DETAIL: Cannot drop column that is a hypertable partitioning (space or time) di ALTER TABLE foo DROP COLUMN b; ERROR: cannot drop orderby or segmentby column from a hypertable with compression enabled ALTER TABLE foo ALTER COLUMN t SET NOT NULL; -ERROR: operation not supported on hypertables that have compression enabled +ERROR: column "t" of relation "_hyper_10_2_chunk" contains null values ALTER TABLE foo RESET (timescaledb.compress); ERROR: compression options cannot be reset --can add constraints as long as no data is compressed diff --git a/tsl/test/expected/hypercore_ddl.out b/tsl/test/expected/hypercore_ddl.out index c6e23a0c920..51e65e8616f 100644 --- a/tsl/test/expected/hypercore_ddl.out +++ b/tsl/test/expected/hypercore_ddl.out @@ -39,10 +39,6 @@ select compress_chunk(show_chunks('readings'), hypercore_use_access_method => tr _timescaledb_internal._hyper_1_4_chunk (4 rows) --- Insert some extra data to get some non-compressed data as well. -insert into readings (time, location, device, temp, humidity, jdata) -select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100, '{"a":1,"b":2}'::jsonb -from generate_series('2022-06-01 00:01:00'::timestamptz, '2022-06-04'::timestamptz, '5m') t; select chunk, amname from chunk_info where hypertable = 'readings'::regclass; chunk | amname ----------------------------------------+----------- @@ -52,10 +48,163 @@ select chunk, amname from chunk_info where hypertable = 'readings'::regclass; _timescaledb_internal._hyper_1_4_chunk | hypercore (4 rows) --- Pick a chunk to truncate that is not the first chunk. This is +-- Pick a chunk to play with that is not the first chunk. This is -- mostly a precaution to make sure that there is no bias towards the -- first chunk and we could just as well pick the first chunk. select chunk from show_chunks('readings') x(chunk) limit 1 offset 3 \gset +---------------------------------------------------------------- +-- Test ALTER TABLE .... ALTER COLUMN commands +-- This should fail since "location" is NOT NULL +\set ON_ERROR_STOP 0 +insert into readings(time,device,temp,humidity,jdata) +values ('2024-01-01 00:00:10', 1, 99.0, 99.0, '{"magic": "yes"}'::jsonb); +ERROR: null value in column "location" of relation "_hyper_1_9_chunk" violates not-null constraint +\set ON_ERROR_STOP 1 +-- Test altering column definitions to drop NOT NULL and check that it +-- propagates to the chunks. We just pick one chunk here and check +-- that the setting propagates. +alter table readings alter column location drop not null; +\d readings + Table "public.readings" + Column | Type | Collation | Nullable | Default +----------+--------------------------+-----------+----------+--------- + time | timestamp with time zone | | not null | + location | integer | | | + device | integer | | not null | + temp | numeric(4,1) | | | + humidity | double precision | | | + jdata | jsonb | | | +Indexes: + "readings_time_key" UNIQUE CONSTRAINT, btree ("time") +Triggers: + ts_insert_blocker BEFORE INSERT ON readings FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker() +Number of child tables: 4 (Use \d+ to list them.) + +\d :chunk + Table "_timescaledb_internal._hyper_1_4_chunk" + Column | Type | Collation | Nullable | Default +----------+--------------------------+-----------+----------+--------- + time | timestamp with time zone | | not null | + location | integer | | | + device | integer | | not null | + temp | numeric(4,1) | | | + humidity | double precision | | | + jdata | jsonb | | | +Indexes: + "4_4_readings_time_key" UNIQUE CONSTRAINT, btree ("time") +Check constraints: + "constraint_4" CHECK ("time" >= 'Fri Jun 03 17:00:00 2022 PDT'::timestamp with time zone AND "time" < 'Sat Jun 04 17:00:00 2022 PDT'::timestamp with time zone) +Inherits: readings + +-- This should now work since we allow NULL values +insert into readings(time,device,temp,humidity,jdata) +values ('2024-01-01 00:00:10', 1, 99.0, 99.0, '{"magic": "yes"}'::jsonb); +select count(*) from readings where location is null; + count +------- + 1 +(1 row) + +select compress_chunk(show_chunks('readings'), hypercore_use_access_method => true); +NOTICE: chunk "_hyper_1_1_chunk" is already compressed +NOTICE: chunk "_hyper_1_2_chunk" is already compressed +NOTICE: chunk "_hyper_1_3_chunk" is already compressed +NOTICE: chunk "_hyper_1_4_chunk" is already compressed + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk + _timescaledb_internal._hyper_1_3_chunk + _timescaledb_internal._hyper_1_4_chunk + _timescaledb_internal._hyper_1_10_chunk +(5 rows) + +select count(*) from readings where location is null; + count +------- + 1 +(1 row) + +-- We insert another row with nulls, that will end up in the +-- non-compressed region. +insert into readings(time,device,temp,humidity,jdata) +values ('2024-01-02 00:00:10', 1, 66.0, 66.0, '{"magic": "more"}'::jsonb); +-- We should not be able to set the not null before we have removed +-- the null rows in the table. This works for hypercore-compressed +-- chunks but not for heap-compressed chunks. +\set ON_ERROR_STOP 0 +alter table readings alter column location set not null; +ERROR: column "location" of relation "_hyper_1_10_chunk" contains null values +\set ON_ERROR_STOP 1 +delete from readings where location is null; +-- Compress the data to make sure that we are not working on +-- non-compressed data. +select compress_chunk(show_chunks('readings'), hypercore_use_access_method => true); + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk + _timescaledb_internal._hyper_1_3_chunk + _timescaledb_internal._hyper_1_4_chunk + _timescaledb_internal._hyper_1_10_chunk + _timescaledb_internal._hyper_1_12_chunk +(6 rows) + +select count(*) from readings where location is null; + count +------- + 0 +(1 row) + +alter table readings alter column location set not null; +\d readings + Table "public.readings" + Column | Type | Collation | Nullable | Default +----------+--------------------------+-----------+----------+--------- + time | timestamp with time zone | | not null | + location | integer | | not null | + device | integer | | not null | + temp | numeric(4,1) | | | + humidity | double precision | | | + jdata | jsonb | | | +Indexes: + "readings_time_key" UNIQUE CONSTRAINT, btree ("time") +Triggers: + ts_insert_blocker BEFORE INSERT ON readings FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker() +Number of child tables: 6 (Use \d+ to list them.) + +\d :chunk + Table "_timescaledb_internal._hyper_1_4_chunk" + Column | Type | Collation | Nullable | Default +----------+--------------------------+-----------+----------+--------- + time | timestamp with time zone | | not null | + location | integer | | not null | + device | integer | | not null | + temp | numeric(4,1) | | | + humidity | double precision | | | + jdata | jsonb | | | +Indexes: + "4_4_readings_time_key" UNIQUE CONSTRAINT, btree ("time") +Check constraints: + "constraint_4" CHECK ("time" >= 'Fri Jun 03 17:00:00 2022 PDT'::timestamp with time zone AND "time" < 'Sat Jun 04 17:00:00 2022 PDT'::timestamp with time zone) +Inherits: readings + +select count(*) from readings where location is null; + count +------- + 0 +(1 row) + +---------------------------------------------------------------- +-- TRUNCATE test +-- We keep the truncate test last in the file to avoid having to +-- re-populate it. +-- Insert some extra data to get some non-compressed data as +-- well. This checks that truncate will deal with with write-store +-- (WS) and read-store (RS) +insert into readings (time, location, device, temp, humidity, jdata) +select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100, '{"a":1,"b":2}'::jsonb +from generate_series('2022-06-01 00:01:00'::timestamptz, '2022-06-04'::timestamptz, '5m') t; -- Check that the number of bytes in the table before and after the -- truncate. -- @@ -68,7 +217,7 @@ select pg_table_size(chunk) as chunk_size, where chunk = :'chunk'::regclass; chunk_size | compressed_chunk_size ------------+----------------------- - 40960 | 57344 + 49152 | 73728 (1 row) truncate :chunk; @@ -88,7 +237,7 @@ select (select count(*) from readings) tuples, (select count(*) from show_chunks('readings')) chunks; tuples | chunks --------+-------- - 1560 | 4 + 1560 | 6 (1 row) truncate readings; @@ -99,32 +248,3 @@ select (select count(*) from readings) tuples, 0 | 0 (1 row) -\set ON_ERROR_STOP 0 -insert into readings(time,device,temp,humidity,jdata) -values ('2024-01-01 00:00:00', 1, 99.0, 99.0, '{"magic": "yes"}'::jsonb); -ERROR: null value in column "location" of relation "_hyper_1_9_chunk" violates not-null constraint -\set ON_ERROR_STOP 1 --- Test altering column definitions -alter table readings - alter column location drop not null; --- This should now work. -insert into readings(time,device,temp,humidity,jdata) -values ('2024-01-01 00:00:00', 1, 99.0, 99.0, '{"magic": "yes"}'::jsonb); -select count(*) from readings where location is null; - count -------- - 1 -(1 row) - -select compress_chunk(show_chunks('readings'), hypercore_use_access_method => true); - compress_chunk ------------------------------------------ - _timescaledb_internal._hyper_1_10_chunk -(1 row) - -select count(*) from readings where location is null; - count -------- - 1 -(1 row) - diff --git a/tsl/test/sql/compression_ddl.sql b/tsl/test/sql/compression_ddl.sql index d06c0b73eb9..35ab26d35f6 100644 --- a/tsl/test/sql/compression_ddl.sql +++ b/tsl/test/sql/compression_ddl.sql @@ -1069,4 +1069,12 @@ SELECT count(compress_chunk(ch)) FROM show_chunks('test2') ch; SELECT count(*) FROM test2 WHERE i IS NULL; SELECT count(decompress_chunk(ch)) FROM show_chunks('test2') ch; SELECT count(*) FROM test2 WHERE i IS NULL; -SET client_min_messages = NOTICE; +SELECT count(compress_chunk(ch)) FROM show_chunks('test2') ch; +SELECT count(*) FROM test2 WHERE i IS NULL; + +\set ON_ERROR_STOP 0 +ALTER TABLE test2 ALTER COLUMN i SET NOT NULL; +DELETE FROM test2 WHERE i IS NULL; +SELECT count(*) FROM test2 WHERE i IS NULL; +ALTER TABLE test2 ALTER COLUMN i SET NOT NULL; +\set ON_ERROR_STOP 1 diff --git a/tsl/test/sql/hypercore_ddl.sql b/tsl/test/sql/hypercore_ddl.sql index 0fa6b145b02..73aef56d10d 100644 --- a/tsl/test/sql/hypercore_ddl.sql +++ b/tsl/test/sql/hypercore_ddl.sql @@ -48,18 +48,70 @@ from generate_series('2022-06-01'::timestamptz, '2022-06-04'::timestamptz, '5m') select compress_chunk(show_chunks('readings'), hypercore_use_access_method => true); --- Insert some extra data to get some non-compressed data as well. -insert into readings (time, location, device, temp, humidity, jdata) -select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100, '{"a":1,"b":2}'::jsonb -from generate_series('2022-06-01 00:01:00'::timestamptz, '2022-06-04'::timestamptz, '5m') t; - select chunk, amname from chunk_info where hypertable = 'readings'::regclass; --- Pick a chunk to truncate that is not the first chunk. This is +-- Pick a chunk to play with that is not the first chunk. This is -- mostly a precaution to make sure that there is no bias towards the -- first chunk and we could just as well pick the first chunk. select chunk from show_chunks('readings') x(chunk) limit 1 offset 3 \gset +---------------------------------------------------------------- +-- Test ALTER TABLE .... ALTER COLUMN commands + +-- This should fail since "location" is NOT NULL +\set ON_ERROR_STOP 0 +insert into readings(time,device,temp,humidity,jdata) +values ('2024-01-01 00:00:10', 1, 99.0, 99.0, '{"magic": "yes"}'::jsonb); +\set ON_ERROR_STOP 1 + +-- Test altering column definitions to drop NOT NULL and check that it +-- propagates to the chunks. We just pick one chunk here and check +-- that the setting propagates. +alter table readings alter column location drop not null; +\d readings +\d :chunk + +-- This should now work since we allow NULL values +insert into readings(time,device,temp,humidity,jdata) +values ('2024-01-01 00:00:10', 1, 99.0, 99.0, '{"magic": "yes"}'::jsonb); + +select count(*) from readings where location is null; +select compress_chunk(show_chunks('readings'), hypercore_use_access_method => true); +select count(*) from readings where location is null; + +-- We insert another row with nulls, that will end up in the +-- non-compressed region. +insert into readings(time,device,temp,humidity,jdata) +values ('2024-01-02 00:00:10', 1, 66.0, 66.0, '{"magic": "more"}'::jsonb); + +-- We should not be able to set the not null before we have removed +-- the null rows in the table. This works for hypercore-compressed +-- chunks but not for heap-compressed chunks. +\set ON_ERROR_STOP 0 +alter table readings alter column location set not null; +\set ON_ERROR_STOP 1 +delete from readings where location is null; +-- Compress the data to make sure that we are not working on +-- non-compressed data. +select compress_chunk(show_chunks('readings'), hypercore_use_access_method => true); +select count(*) from readings where location is null; +alter table readings alter column location set not null; +\d readings +\d :chunk +select count(*) from readings where location is null; + +---------------------------------------------------------------- +-- TRUNCATE test +-- We keep the truncate test last in the file to avoid having to +-- re-populate it. + +-- Insert some extra data to get some non-compressed data as +-- well. This checks that truncate will deal with with write-store +-- (WS) and read-store (RS) +insert into readings (time, location, device, temp, humidity, jdata) +select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100, '{"a":1,"b":2}'::jsonb +from generate_series('2022-06-01 00:01:00'::timestamptz, '2022-06-04'::timestamptz, '5m') t; + -- Check that the number of bytes in the table before and after the -- truncate. -- @@ -84,21 +136,3 @@ select (select count(*) from readings) tuples, truncate readings; select (select count(*) from readings) tuples, (select count(*) from show_chunks('readings')) chunks; - - -\set ON_ERROR_STOP 0 -insert into readings(time,device,temp,humidity,jdata) -values ('2024-01-01 00:00:00', 1, 99.0, 99.0, '{"magic": "yes"}'::jsonb); -\set ON_ERROR_STOP 1 - --- Test altering column definitions -alter table readings - alter column location drop not null; - --- This should now work. -insert into readings(time,device,temp,humidity,jdata) -values ('2024-01-01 00:00:00', 1, 99.0, 99.0, '{"magic": "yes"}'::jsonb); - -select count(*) from readings where location is null; -select compress_chunk(show_chunks('readings'), hypercore_use_access_method => true); -select count(*) from readings where location is null;