diff --git a/.unreleased/pr_6696 b/.unreleased/pr_6696 new file mode 100644 index 00000000000..c52ca5f2d35 --- /dev/null +++ b/.unreleased/pr_6696 @@ -0,0 +1 @@ +Implements: #6696 Improve defaults for compression segment_by and order_by diff --git a/sql/compression_defaults.sql b/sql/compression_defaults.sql index cd1db71bc00..d3f0aa7cbfd 100644 --- a/sql/compression_defaults.sql +++ b/sql/compression_defaults.sql @@ -45,10 +45,10 @@ BEGIN INNER JOIN pg_attribute a on (a.attnum = i.attnum AND a.attrelid = relation) --right now stats are from the hypertable itself. Use chunks in the future. - INNER JOIN pg_statistic s ON (s.staattnum = a.attnum and s.starelid = relation) + INNER JOIN pg_stats s ON (s.attname = a.attname and s.schemaname = _schema_name and s.tablename = _table_name) WHERE a.attname NOT IN (SELECT column_name FROM _timescaledb_catalog.dimension d WHERE d.hypertable_id = _hypertable_row.id) - AND s.stadistinct > 1 + AND s.n_distinct > 1 ORDER BY i.pos LIMIT 1; @@ -75,10 +75,10 @@ BEGIN INNER JOIN pg_attribute a on (a.attnum = i.attnum AND a.attrelid = relation) --right now stats are from the hypertable itself. Use chunks in the future. - INNER JOIN pg_statistic s ON (s.staattnum = a.attnum and s.starelid = relation) + INNER JOIN pg_stats s ON (s.attname = a.attname and s.schemaname = _schema_name and s.tablename = _table_name) WHERE a.attname NOT IN (SELECT column_name FROM _timescaledb_catalog.dimension d WHERE d.hypertable_id = _hypertable_row.id) - AND s.stadistinct > 1 + AND s.n_distinct > 1 ORDER BY i.pos LIMIT 1; @@ -106,10 +106,10 @@ BEGIN LEFT JOIN pg_catalog.pg_attrdef ad ON (ad.adrelid = relation AND ad.adnum = a.attnum) LEFT JOIN - pg_statistic s ON (s.staattnum = a.attnum and s.starelid = relation) + pg_stats s ON (s.attname = a.attname and s.schemaname = _schema_name and s.tablename = _table_name) WHERE a.attname NOT IN (SELECT column_name FROM _timescaledb_catalog.dimension d WHERE d.hypertable_id = _hypertable_row.id) - AND s.stadistinct is null + AND s.n_distinct is null AND a.attidentity = '' AND (ad.adbin IS NULL OR pg_get_expr(adbin, adrelid) not like 'nextval%') ORDER BY i.pos LIMIT 1; @@ -141,10 +141,10 @@ BEGIN LEFT JOIN pg_catalog.pg_attrdef ad ON (ad.adrelid = relation AND ad.adnum = a.attnum) LEFT JOIN - pg_statistic s ON (s.staattnum = a.attnum and s.starelid = relation) + pg_stats s ON (s.attname = a.attname and s.schemaname = _schema_name and s.tablename = _table_name) WHERE a.attname NOT IN (SELECT column_name FROM _timescaledb_catalog.dimension d WHERE d.hypertable_id = _hypertable_row.id) - AND s.stadistinct is null + AND s.n_distinct is null AND a.attidentity = '' AND (ad.adbin IS NULL OR pg_get_expr(adbin, adrelid) not like 'nextval%') ORDER BY i.pos LIMIT 1; @@ -285,12 +285,12 @@ BEGIN --add DESC to any dimensions SELECT - array_agg( + coalesce(array_agg( CASE WHEN d.column_name IS NULL THEN - a.colname + format('%I', a.colname) ELSE - a.colname || ' DESC' - END ORDER BY pos) INTO STRICT _orderby_clauses + format('%I DESC', a.colname) + END ORDER BY pos), array[]::text[]) INTO STRICT _orderby_clauses FROM unnest(_orderby_names) WITH ORDINALITY as a(colname, pos) LEFT JOIN _timescaledb_catalog.dimension d ON (d.column_name = a.colname AND d.hypertable_id = _hypertable_row.id); diff --git a/src/compression_with_clause.c b/src/compression_with_clause.c index 3ac89137cae..02d377e202b 100644 --- a/src/compression_with_clause.c +++ b/src/compression_with_clause.c @@ -176,8 +176,8 @@ throw_order_by_error(char *order_by) } /* compress_orderby is parsed same as order by in select queries */ -static OrderBySettings -parse_order_collist(char *inpstr, Hypertable *hypertable) +OrderBySettings +ts_compress_parse_order_collist(char *inpstr, Hypertable *hypertable) { StringInfoData buf; List *parsed; @@ -322,13 +322,9 @@ ts_compress_hypertable_parse_segment_by(WithClauseResult *parsed_options, Hypert OrderBySettings ts_compress_hypertable_parse_order_by(WithClauseResult *parsed_options, Hypertable *hypertable) { - if (parsed_options[CompressOrderBy].is_default == false) - { - Datum textarg = parsed_options[CompressOrderBy].parsed; - return parse_order_collist(TextDatumGetCString(textarg), hypertable); - } - else - return (OrderBySettings){ 0 }; + Ensure(parsed_options[CompressOrderBy].is_default == false, "with clause is not default"); + Datum textarg = parsed_options[CompressOrderBy].parsed; + return ts_compress_parse_order_collist(TextDatumGetCString(textarg), hypertable); } /* returns List of CompressedParsedCol diff --git a/src/compression_with_clause.h b/src/compression_with_clause.h index 1510b2e7b77..e03fb4a8dc1 100644 --- a/src/compression_with_clause.h +++ b/src/compression_with_clause.h @@ -44,3 +44,5 @@ ts_compress_hypertable_parse_order_by(WithClauseResult *parsed_options, Hypertab extern TSDLLEXPORT Interval * ts_compress_hypertable_parse_chunk_time_interval(WithClauseResult *parsed_options, Hypertable *hypertable); +extern TSDLLEXPORT OrderBySettings ts_compress_parse_order_collist(char *inpstr, + Hypertable *hypertable); diff --git a/src/guc.c b/src/guc.c index d634945e6fe..da93a0f52b2 100644 --- a/src/guc.c +++ b/src/guc.c @@ -5,9 +5,13 @@ */ #include #include +#include +#include +#include #include #include "guc.h" +#include "extension.h" #include "license_guc.h" #include "config.h" #include "hypertable_cache.h" @@ -76,6 +80,8 @@ TSDLLEXPORT bool ts_guc_enable_bulk_decompression = true; TSDLLEXPORT bool ts_guc_auto_sparse_indexes = true; TSDLLEXPORT int ts_guc_bgw_log_level = WARNING; TSDLLEXPORT bool ts_guc_enable_skip_scan = true; +static char *ts_guc_default_segmentby_fn = NULL; +static char *ts_guc_default_orderby_fn = NULL; /* default value of ts_guc_max_open_chunks_per_insert and ts_guc_max_cached_chunks_per_hypertable * will be set as their respective boot-value when the GUC mechanism starts up */ int ts_guc_max_open_chunks_per_insert; @@ -218,6 +224,90 @@ assign_max_open_chunks_per_insert_hook(int newval, void *extra) validate_chunk_cache_sizes(ts_guc_max_cached_chunks_per_hypertable, newval); } +static Oid +get_segmentby_func(char *input_name) +{ + List *namelist = NIL; + + if (strlen(input_name) == 0) + { + return InvalidOid; + } + +#if PG16_LT + namelist = stringToQualifiedNameList(input_name); +#else + namelist = stringToQualifiedNameList(input_name, NULL); +#endif + Oid argtyp[] = { REGCLASSOID }; + return LookupFuncName(namelist, lengthof(argtyp), argtyp, true); +} + +static bool +check_segmentby_func(char **newval, void **extra, GucSource source) +{ + /* if the extension doesn't exist you can't check for the function, have to take it on faith */ + if (ts_extension_is_loaded()) + { + Oid segment_func_oid = get_segmentby_func(*newval); + + if (strlen(*newval) > 0 && !OidIsValid(segment_func_oid)) + { + GUC_check_errdetail("Function \"%s\" does not exist.", *newval); + return false; + } + } + return true; +} + +Oid +ts_guc_default_segmentby_fn_oid() +{ + return get_segmentby_func(ts_guc_default_segmentby_fn); +} + +static Oid +get_orderby_func(char *input_name) +{ + List *namelist = NIL; + + if (strlen(input_name) == 0) + { + return InvalidOid; + } + +#if PG16_LT + namelist = stringToQualifiedNameList(input_name); +#else + namelist = stringToQualifiedNameList(input_name, NULL); +#endif + Oid argtyp[] = { REGCLASSOID, TEXTARRAYOID }; + return LookupFuncName(namelist, lengthof(argtyp), argtyp, true); +} + +static bool +check_orderby_func(char **newval, void **extra, GucSource source) +{ + /* if the extension doesn't exist you can't check for the function, have to take it on faith */ + if (ts_extension_is_loaded()) + { + Oid func_oid = get_orderby_func(*newval); + + if (strlen(*newval) > 0 && !OidIsValid(func_oid)) + { + GUC_check_errdetail("Function \"%s\" does not exist.", *newval); + return false; + } + } + return true; +} + +Oid +ts_guc_default_orderby_fn_oid() +{ + return get_orderby_func(ts_guc_default_orderby_fn); +} + void _guc_init(void) { @@ -570,6 +660,32 @@ _guc_init(void) NULL); #endif + DefineCustomStringVariable(/* name= */ MAKE_EXTOPTION("compression_segmentby_default_function"), + /* short_desc= */ "Function that sets default segment_by", + /* long_desc= */ + "Function to use for calculating default segment_by setting for " + "compression", + /* valueAddr= */ &ts_guc_default_segmentby_fn, + /* Value= */ "_timescaledb_functions.get_segmentby_defaults", + /* context= */ PGC_USERSET, + /* flags= */ 0, + /* check_hook= */ check_segmentby_func, + /* assign_hook= */ NULL, + /* show_hook= */ NULL); + + DefineCustomStringVariable(/* name= */ MAKE_EXTOPTION("compression_orderby_default_function"), + /* short_desc= */ "Function that sets default order_by", + /* long_desc= */ + "Function to use for calculating default order_by setting for " + "compression", + /* valueAddr= */ &ts_guc_default_orderby_fn, + /* Value= */ "_timescaledb_functions.get_orderby_defaults", + /* context= */ PGC_USERSET, + /* flags= */ 0, + /* check_hook= */ check_orderby_func, + /* assign_hook= */ NULL, + /* show_hook= */ NULL); + DefineCustomStringVariable(/* name= */ MAKE_EXTOPTION("license"), /* short_desc= */ "TimescaleDB license type", /* long_desc= */ "Determines which features are enabled", diff --git a/src/guc.h b/src/guc.h index a2a17dfc787..180ba766fdf 100644 --- a/src/guc.h +++ b/src/guc.h @@ -94,3 +94,5 @@ typedef enum } FeatureFlagType; extern TSDLLEXPORT void ts_feature_flag_check(FeatureFlagType); +extern TSDLLEXPORT Oid ts_guc_default_segmentby_fn_oid(void); +extern TSDLLEXPORT Oid ts_guc_default_orderby_fn_oid(void); diff --git a/src/ts_catalog/array_utils.c b/src/ts_catalog/array_utils.c index 99d83e809a4..ec78d899b73 100644 --- a/src/ts_catalog/array_utils.c +++ b/src/ts_catalog/array_utils.c @@ -89,6 +89,42 @@ ts_array_is_member(ArrayType *arr, const char *name) return ret; } +extern TSDLLEXPORT void +ts_array_append_stringinfo(ArrayType *arr, StringInfo info) +{ + bool first = true; + Datum datum; + bool null; + + if (!arr) + return; + + Assert(ARR_NDIM(arr) <= 1); + Assert(arr->elemtype == TEXTOID); + + ArrayIterator it = array_create_iterator(arr, 0, NULL); + while (array_iterate(it, &datum, &null)) + { + Assert(!null); + /* + * Our internal catalog arrays should either be NULL or + * have non-NULL members. During normal operation it should + * never have NULL members. If we have NULL members either + * the catalog is corrupted or some catalog tampering has + * happened. + */ + Ensure(!null, "array element was NULL"); + if (!first) + appendStringInfoString(info, ", "); + else + first = false; + + appendStringInfo(info, "%s", TextDatumGetCString(datum)); + } + + array_free_iterator(it); +} + extern TSDLLEXPORT int ts_array_position(ArrayType *arr, const char *name) { diff --git a/src/ts_catalog/array_utils.h b/src/ts_catalog/array_utils.h index 5aa8d62c4f9..2366057a516 100644 --- a/src/ts_catalog/array_utils.h +++ b/src/ts_catalog/array_utils.h @@ -20,6 +20,7 @@ extern TSDLLEXPORT int ts_array_length(ArrayType *arr); extern TSDLLEXPORT bool ts_array_equal(ArrayType *left, ArrayType *right); extern TSDLLEXPORT bool ts_array_is_member(ArrayType *arr, const char *name); +extern TSDLLEXPORT void ts_array_append_stringinfo(ArrayType *arr, StringInfo info); extern TSDLLEXPORT int ts_array_position(ArrayType *arr, const char *name); extern TSDLLEXPORT bool ts_array_get_element_bool(ArrayType *arr, int position); diff --git a/tsl/src/compression/create.c b/tsl/src/compression/create.c index bba3e7c735a..ae0c1367382 100644 --- a/tsl/src/compression/create.c +++ b/tsl/src/compression/create.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -48,6 +49,7 @@ #include "trigger.h" #include "utils.h" #include "guc.h" +#include static const char *sparse_index_types[] = { "min", "max" }; @@ -888,6 +890,238 @@ validate_hypertable_for_compression(Hypertable *ht) } } +/* + * Get the default segment by value for a hypertable + */ +static ArrayType * +compression_setting_segmentby_get_default(const Hypertable *ht) +{ + StringInfoData command; + StringInfoData result; + int res; + ArrayType *column_res = NULL; + Datum datum; + text *message; + char *original_search_path = pstrdup(GetConfigOption("search_path", false, true)); + bool isnull; + MemoryContext upper = CurrentMemoryContext; + MemoryContext old; + int32 confidence = -1; + Oid default_segmentby_fn = ts_guc_default_segmentby_fn_oid(); + + if (!OidIsValid(default_segmentby_fn)) + { + elog(LOG_SERVER_ONLY, + "segment_by default: hypertable=\"%s\" columns=\"\" function: \"\" confidence=-1", + get_rel_name(ht->main_table_relid)); + return NULL; + } + + initStringInfo(&command); + appendStringInfo(&command, + "SELECT " + " (SELECT array_agg(x) " + " FROM jsonb_array_elements_text(seg_by->'columns') t(x))::text[], " + " seg_by->>'message', " + " (seg_by->>'confidence')::int " + "FROM %s.%s(%d) seg_by", + quote_identifier(get_namespace_name(get_func_namespace(default_segmentby_fn))), + quote_identifier(get_func_name(default_segmentby_fn)), + ht->main_table_relid); + + if (SPI_connect() != SPI_OK_CONNECT) + elog(ERROR, "could not connect to SPI"); + + /* Lock down search_path */ + res = SPI_exec("SET LOCAL search_path TO pg_catalog, pg_temp", 0); + if (res < 0) + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), (errmsg("could not set search_path")))); + + res = SPI_execute(command.data, true /* read_only */, 0 /*count*/); + + if (res < 0) + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + (errmsg("could not get the default segment by for a hypertable \"%s\"", + get_rel_name(ht->main_table_relid))))); + + old = MemoryContextSwitchTo(upper); + datum = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull); + if (!isnull) + column_res = DatumGetArrayTypePCopy(datum); + MemoryContextSwitchTo(old); + + datum = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 2, &isnull); + + if (!isnull) + { + message = DatumGetTextPP(datum); + elog(WARNING, + "there was some uncertainty picking the default segment by for the hypertable: %s", + text_to_cstring(message)); + } + + datum = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 3, &isnull); + if (!isnull) + { + confidence = DatumGetInt32(datum); + } + + /* Reset search path since this can be executed as part of a larger transaction */ + resetStringInfo(&command); + appendStringInfo(&command, "SET LOCAL search_path TO %s", original_search_path); + res = SPI_exec(command.data, 0); + if (res < 0) + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), (errmsg("could not reset search_path")))); + pfree(original_search_path); + + pfree(command.data); + + res = SPI_finish(); + if (res != SPI_OK_FINISH) + elog(ERROR, "SPI_finish failed: %s", SPI_result_code_string(res)); + + initStringInfo(&result); + ts_array_append_stringinfo(column_res, &result); + elog(NOTICE, + "default segment by for hypertable \"%s\" is set to \"%s\"", + get_rel_name(ht->main_table_relid), + result.data); + + elog(LOG_SERVER_ONLY, + "segment_by default: hypertable=\"%s\" columns=\"%s\" function: \"%s.%s\" confidence=%d", + get_rel_name(ht->main_table_relid), + result.data, + get_namespace_name(get_func_namespace(default_segmentby_fn)), + get_func_name(default_segmentby_fn), + confidence); + pfree(result.data); + return column_res; +} + +/* + * Get the default segment by value for a hypertable + */ +static OrderBySettings +compression_setting_orderby_get_default(Hypertable *ht, ArrayType *segmentby) +{ + StringInfoData command; + int res; + text *column_res = NULL; + Datum datum; + text *message; + bool isnull; + MemoryContext upper = CurrentMemoryContext; + MemoryContext old; + char *orderby; + char *original_search_path = pstrdup(GetConfigOption("search_path", false, true)); + int32 confidence = -1; + + Oid types[] = { TEXTARRAYOID }; + Datum values[] = { PointerGetDatum(segmentby) }; + char nulls[] = { segmentby == NULL ? 'n' : 'v' }; + Oid orderby_fn = ts_guc_default_orderby_fn_oid(); + + if (!OidIsValid(orderby_fn)) + { + /* fallback to original logic */ + OrderBySettings obs = (OrderBySettings){ 0 }; + obs = add_time_to_order_by_if_not_included(obs, segmentby, ht); + elog(LOG_SERVER_ONLY, + "order_by default: hypertable=\"%s\" function=\"\" confidence=-1", + get_rel_name(ht->main_table_relid)); + return obs; + } + + initStringInfo(&command); + appendStringInfo(&command, + "SELECT " + " (SELECT string_agg(x, ', ') FROM " + "jsonb_array_elements_text(seg_by->'clauses') " + "t(x))::text, " + " seg_by->>'message', " + " (seg_by->>'confidence')::int " + "FROM %s.%s(%d, coalesce($1, array[]::text[])) seg_by", + quote_identifier(get_namespace_name(get_func_namespace(orderby_fn))), + quote_identifier(get_func_name(orderby_fn)), + ht->main_table_relid); + + if (SPI_connect() != SPI_OK_CONNECT) + elog(ERROR, "could not connect to SPI"); + + /* Lock down search_path */ + res = SPI_exec("SET LOCAL search_path TO pg_catalog, pg_temp", 0); + if (res < 0) + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), (errmsg("could not set search_path")))); + + res = SPI_execute_with_args(command.data, + 1, + types, + values, + nulls, + true /* read_only */, + 0 /*count*/); + if (res < 0) + ereport(ERROR, + (errcode(ERRCODE_INTERNAL_ERROR), + (errmsg("could not get the default order by for a hypertable \"%s\"", + get_rel_name(ht->main_table_relid))))); + + old = MemoryContextSwitchTo(upper); + datum = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 1, &isnull); + + if (!isnull) + column_res = DatumGetTextPCopy(datum); + MemoryContextSwitchTo(old); + + datum = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 2, &isnull); + + if (!isnull) + { + message = DatumGetTextPP(datum); + elog(WARNING, + "there was some uncertainty picking the default order by for the hypertable: %s", + text_to_cstring(message)); + } + datum = SPI_getbinval(SPI_tuptable->vals[0], SPI_tuptable->tupdesc, 3, &isnull); + if (!isnull) + { + confidence = DatumGetInt32(datum); + } + + /* Reset search path since this can be executed as part of a larger transaction */ + resetStringInfo(&command); + appendStringInfo(&command, "SET LOCAL search_path TO %s", original_search_path); + res = SPI_exec(command.data, 0); + if (res < 0) + ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), (errmsg("could not reset search_path")))); + pfree(original_search_path); + pfree(command.data); + + res = SPI_finish(); + if (res != SPI_OK_FINISH) + elog(ERROR, "SPI_finish failed: %s", SPI_result_code_string(res)); + + if (column_res != NULL) + orderby = TextDatumGetCString(PointerGetDatum(column_res)); + else + orderby = ""; + + elog(NOTICE, + "default order by for hypertable \"%s\" is set to \"%s\"", + get_rel_name(ht->main_table_relid), + orderby); + + elog(LOG_SERVER_ONLY, + "order_by default: hypertable=\"%s\" clauses=\"%s\" function=\"%s.%s\" confidence=%d", + get_rel_name(ht->main_table_relid), + orderby, + get_namespace_name(get_func_namespace(orderby_fn)), + get_func_name(orderby_fn), + confidence); + return ts_compress_parse_order_collist(orderby, ht); +} + static void compression_settings_update(Hypertable *ht, CompressionSettings *settings, WithClauseResult *with_clause_options) @@ -906,11 +1140,23 @@ compression_settings_update(Hypertable *ht, CompressionSettings *settings, { settings->fd.segmentby = ts_compress_hypertable_parse_segment_by(with_clause_options, ht); } + else if (!settings->fd.segmentby) + { + settings->fd.segmentby = compression_setting_segmentby_get_default(ht); + } if (!with_clause_options[CompressOrderBy].is_default || !settings->fd.orderby) { - OrderBySettings obs = ts_compress_hypertable_parse_order_by(with_clause_options, ht); - obs = add_time_to_order_by_if_not_included(obs, settings->fd.segmentby, ht); + OrderBySettings obs; + if (with_clause_options[CompressOrderBy].is_default) + { + obs = compression_setting_orderby_get_default(ht, settings->fd.segmentby); + } + else + { + obs = ts_compress_hypertable_parse_order_by(with_clause_options, ht); + obs = add_time_to_order_by_if_not_included(obs, settings->fd.segmentby, ht); + } settings->fd.orderby = obs.orderby; settings->fd.orderby_desc = obs.orderby_desc; settings->fd.orderby_nullsfirst = obs.orderby_nullsfirst; diff --git a/tsl/test/expected/bgw_custom-13.out b/tsl/test/expected/bgw_custom-13.out index ace41468c1f..13076e04a5a 100644 --- a/tsl/test/expected/bgw_custom-13.out +++ b/tsl/test/expected/bgw_custom-13.out @@ -913,6 +913,8 @@ INSERT INTO sensor_data time; -- enable compression ALTER TABLE sensor_data SET (timescaledb.compress, timescaledb.compress_orderby = 'time DESC'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "sensor_data" is set to "" -- create new chunks INSERT INTO sensor_data SELECT diff --git a/tsl/test/expected/bgw_custom-14.out b/tsl/test/expected/bgw_custom-14.out index 0ea73953d04..c13889328de 100644 --- a/tsl/test/expected/bgw_custom-14.out +++ b/tsl/test/expected/bgw_custom-14.out @@ -913,6 +913,8 @@ INSERT INTO sensor_data time; -- enable compression ALTER TABLE sensor_data SET (timescaledb.compress, timescaledb.compress_orderby = 'time DESC'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "sensor_data" is set to "" -- create new chunks INSERT INTO sensor_data SELECT diff --git a/tsl/test/expected/bgw_custom-15.out b/tsl/test/expected/bgw_custom-15.out index 0ea73953d04..c13889328de 100644 --- a/tsl/test/expected/bgw_custom-15.out +++ b/tsl/test/expected/bgw_custom-15.out @@ -913,6 +913,8 @@ INSERT INTO sensor_data time; -- enable compression ALTER TABLE sensor_data SET (timescaledb.compress, timescaledb.compress_orderby = 'time DESC'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "sensor_data" is set to "" -- create new chunks INSERT INTO sensor_data SELECT diff --git a/tsl/test/expected/bgw_custom-16.out b/tsl/test/expected/bgw_custom-16.out index 0ea73953d04..c13889328de 100644 --- a/tsl/test/expected/bgw_custom-16.out +++ b/tsl/test/expected/bgw_custom-16.out @@ -913,6 +913,8 @@ INSERT INTO sensor_data time; -- enable compression ALTER TABLE sensor_data SET (timescaledb.compress, timescaledb.compress_orderby = 'time DESC'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "sensor_data" is set to "" -- create new chunks INSERT INTO sensor_data SELECT diff --git a/tsl/test/expected/bgw_db_scheduler_fixed.out b/tsl/test/expected/bgw_db_scheduler_fixed.out index 4c93bcbdd87..ef5749e3e51 100644 --- a/tsl/test/expected/bgw_db_scheduler_fixed.out +++ b/tsl/test/expected/bgw_db_scheduler_fixed.out @@ -1655,6 +1655,8 @@ select show_chunks('test_table_scheduler'); (8 rows) alter table test_table_scheduler set (timescaledb.compress, timescaledb.compress_orderby = 'time DESC'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_table_scheduler" is set to "" select add_retention_policy('test_table_scheduler', interval '2 year', initial_start => :'init'::timestamptz, timezone => 'Europe/Berlin'); add_retention_policy ---------------------- diff --git a/tsl/test/expected/bgw_policy.out b/tsl/test/expected/bgw_policy.out index b72cb018988..48876cf0ce1 100644 --- a/tsl/test/expected/bgw_policy.out +++ b/tsl/test/expected/bgw_policy.out @@ -661,6 +661,9 @@ select add_retention_policy('test_strict', interval '2 days', schedule_interval -- test compression with null arguments alter table test_strict set (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_strict" is set to "" +NOTICE: default order by for hypertable "test_strict" is set to ""time" DESC" select add_compression_policy(NULL, compress_after => NULL); add_compression_policy ------------------------ @@ -691,6 +694,9 @@ select create_hypertable('test_missing_schedint', 'time', chunk_time_interval=> select add_retention_policy('test_missing_schedint', interval '2 weeks') as retenion_id_missing_schedint \gset -- we expect schedule_interval to be chunk_time_interval/2 for timestamptz time alter table test_missing_schedint set (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_missing_schedint" is set to "" +NOTICE: default order by for hypertable "test_missing_schedint" is set to ""time" DESC" select add_compression_policy('test_missing_schedint', interval '60 days') as compression_id_missing_schedint \gset -- we expect schedule_interval to be 1 day for int time create table test_missing_schedint_integer (time int not null, a int, b int); @@ -702,6 +708,9 @@ select create_hypertable('test_missing_schedint_integer', 'time', chunk_time_int (1 row) alter table test_missing_schedint_integer set (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_missing_schedint_integer" is set to "" +NOTICE: default order by for hypertable "test_missing_schedint_integer" is set to ""time" DESC" select add_compression_policy('test_missing_schedint_integer', BIGINT '600000') as compression_id_integer \gset select * from _timescaledb_config.bgw_job where id in (:retenion_id_missing_schedint, :compression_id_missing_schedint, :compression_id_integer); id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone diff --git a/tsl/test/expected/bgw_reorder_drop_chunks.out b/tsl/test/expected/bgw_reorder_drop_chunks.out index 88b2875e3d4..22d9ab2a669 100644 --- a/tsl/test/expected/bgw_reorder_drop_chunks.out +++ b/tsl/test/expected/bgw_reorder_drop_chunks.out @@ -699,6 +699,9 @@ select total_runs, total_successes, total_failures from timescaledb_information. -- test the compression policy alter table test_schedint set (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_schedint" is set to "" +NOTICE: default order by for hypertable "test_schedint" is set to ""time" DESC" select add_compression_policy('test_schedint', interval '3 weeks', schedule_interval => '40 seconds') as polcomp_schedint \gset select ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish(1000); ts_bgw_db_scheduler_test_run_and_wait_for_scheduler_finish diff --git a/tsl/test/expected/cagg_ddl-13.out b/tsl/test/expected/cagg_ddl-13.out index c127cdaf5ca..898baae4784 100644 --- a/tsl/test/expected/cagg_ddl-13.out +++ b/tsl/test/expected/cagg_ddl-13.out @@ -1508,6 +1508,8 @@ INSERT INTO test_setting VALUES( '2020-11-01', 20); --try out 2 settings here -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_40" is set to "" SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1526,6 +1528,8 @@ SELECT count(*) from test_setting_cagg ORDER BY 1; --now set it back to false -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_40" is set to "" SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1593,6 +1597,8 @@ INSERT INTO test_setting VALUES( '2020-11-01', 20); --try out 2 settings here -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_42" is set to "" SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1611,6 +1617,8 @@ SELECT count(*) from test_setting_cagg ORDER BY 1; --now set it back to false -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_42" is set to "" SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; diff --git a/tsl/test/expected/cagg_ddl-14.out b/tsl/test/expected/cagg_ddl-14.out index c127cdaf5ca..898baae4784 100644 --- a/tsl/test/expected/cagg_ddl-14.out +++ b/tsl/test/expected/cagg_ddl-14.out @@ -1508,6 +1508,8 @@ INSERT INTO test_setting VALUES( '2020-11-01', 20); --try out 2 settings here -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_40" is set to "" SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1526,6 +1528,8 @@ SELECT count(*) from test_setting_cagg ORDER BY 1; --now set it back to false -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_40" is set to "" SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1593,6 +1597,8 @@ INSERT INTO test_setting VALUES( '2020-11-01', 20); --try out 2 settings here -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_42" is set to "" SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1611,6 +1617,8 @@ SELECT count(*) from test_setting_cagg ORDER BY 1; --now set it back to false -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_42" is set to "" SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; diff --git a/tsl/test/expected/cagg_ddl-15.out b/tsl/test/expected/cagg_ddl-15.out index c127cdaf5ca..898baae4784 100644 --- a/tsl/test/expected/cagg_ddl-15.out +++ b/tsl/test/expected/cagg_ddl-15.out @@ -1508,6 +1508,8 @@ INSERT INTO test_setting VALUES( '2020-11-01', 20); --try out 2 settings here -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_40" is set to "" SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1526,6 +1528,8 @@ SELECT count(*) from test_setting_cagg ORDER BY 1; --now set it back to false -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_40" is set to "" SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1593,6 +1597,8 @@ INSERT INTO test_setting VALUES( '2020-11-01', 20); --try out 2 settings here -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_42" is set to "" SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1611,6 +1617,8 @@ SELECT count(*) from test_setting_cagg ORDER BY 1; --now set it back to false -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_42" is set to "" SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; diff --git a/tsl/test/expected/cagg_ddl-16.out b/tsl/test/expected/cagg_ddl-16.out index 56bf85cc6e5..8949425ee99 100644 --- a/tsl/test/expected/cagg_ddl-16.out +++ b/tsl/test/expected/cagg_ddl-16.out @@ -1508,6 +1508,8 @@ INSERT INTO test_setting VALUES( '2020-11-01', 20); --try out 2 settings here -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_40" is set to "" SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1526,6 +1528,8 @@ SELECT count(*) from test_setting_cagg ORDER BY 1; --now set it back to false -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_40" is set to "" SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1593,6 +1597,8 @@ INSERT INTO test_setting VALUES( '2020-11-01', 20); --try out 2 settings here -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true'); NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_42" is set to "" SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; @@ -1611,6 +1617,8 @@ SELECT count(*) from test_setting_cagg ORDER BY 1; --now set it back to false -- ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true'); NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_42" is set to "" SELECT view_name, compression_enabled, materialized_only FROM timescaledb_information.continuous_aggregates where view_name = 'test_setting_cagg'; diff --git a/tsl/test/expected/cagg_errors.out b/tsl/test/expected/cagg_errors.out index 5050a1c067b..8c2e8b32c19 100644 --- a/tsl/test/expected/cagg_errors.out +++ b/tsl/test/expected/cagg_errors.out @@ -560,9 +560,13 @@ NOTICE: defaulting compress_orderby to bucket ERROR: cannot use column "bucket" for both ordering and segmenting ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, timescaledb.compress_orderby = 'bucket'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_14" is set to "" --enable compression and test re-enabling compression ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress); NOTICE: defaulting compress_orderby to bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_14" is set to "" insert into i2980 select now(); call refresh_continuous_aggregate('i2980_cagg2', NULL, NULL); SELECT compress_chunk(ch) FROM show_chunks('i2980_cagg2') ch; @@ -575,6 +579,8 @@ ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress = 'false'); ERROR: cannot disable compression on hypertable with compressed chunks ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress = 'true'); NOTICE: defaulting compress_orderby to bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_14" is set to "" ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, timescaledb.compress_segmentby = 'bucket'); NOTICE: defaulting compress_orderby to bucket ERROR: cannot use column "bucket" for both ordering and segmenting @@ -589,6 +595,8 @@ SELECT add_compression_policy('i2980_cagg', '8 day'::interval); ERROR: compression not enabled on continuous aggregate "i2980_cagg" ALTER MATERIALIZED VIEW i2980_cagg SET ( timescaledb.compress ); NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_13" is set to "" SELECT add_compression_policy('i2980_cagg', '8 day'::interval); ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for i2980_cagg SELECT add_continuous_aggregate_policy('i2980_cagg2', '10 day'::interval, '6 day'::interval); @@ -626,6 +634,9 @@ SELECT table_name FROM create_hypertable('comp_ht_test','time'); (1 row) ALTER TABLE comp_ht_test SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "comp_ht_test" is set to "" +NOTICE: default order by for hypertable "comp_ht_test" is set to ""time" DESC" SELECT format('%I.%I', ht.schema_name, ht.table_name) AS "INTERNALTABLE" FROM diff --git a/tsl/test/expected/cagg_migrate.out b/tsl/test/expected/cagg_migrate.out index 91cd7866ffe..63ca4456442 100644 --- a/tsl/test/expected/cagg_migrate.out +++ b/tsl/test/expected/cagg_migrate.out @@ -342,6 +342,8 @@ psql:include/cagg_migrate_common.sql:104: ERROR: plan already exists for contin -- policies for test ALTER MATERIALIZED VIEW conditions_summary_daily SET (timescaledb.compress=true); psql:include/cagg_migrate_common.sql:108: NOTICE: defaulting compress_orderby to bucket +psql:include/cagg_migrate_common.sql:108: WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +psql:include/cagg_migrate_common.sql:108: NOTICE: default segment by for hypertable "_materialized_hypertable_3" is set to "" \if :IS_TIME_DIMENSION SELECT add_retention_policy('conditions_summary_daily', '30 days'::interval); SELECT add_continuous_aggregate_policy('conditions_summary_daily', '30 days'::interval, '1 day'::interval, '1 hour'::interval); @@ -385,6 +387,8 @@ TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCA psql:include/cagg_migrate_common.sql:128: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" CALL cagg_migrate('conditions_summary_daily'); psql:include/cagg_migrate_common.sql:129: NOTICE: defaulting compress_orderby to bucket +psql:include/cagg_migrate_common.sql:129: WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +psql:include/cagg_migrate_common.sql:129: NOTICE: default segment by for hypertable "_materialized_hypertable_7" is set to "" psql:include/cagg_migrate_common.sql:129: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('1008' AS integer), NULL);" SELECT ca.raw_hypertable_id AS "NEW_RAW_HYPERTABLE_ID", @@ -530,6 +534,8 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d CALL cagg_migrate('conditions_summary_daily', override => TRUE); psql:include/cagg_migrate_common.sql:181: NOTICE: defaulting compress_orderby to bucket +psql:include/cagg_migrate_common.sql:181: WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +psql:include/cagg_migrate_common.sql:181: NOTICE: default segment by for hypertable "_materialized_hypertable_9" is set to "" psql:include/cagg_migrate_common.sql:181: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('1008' AS integer), NULL);" -- cagg with the new format because it was overriden \d+ conditions_summary_daily @@ -634,6 +640,8 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d CALL cagg_migrate('conditions_summary_daily', override => TRUE, drop_old => TRUE); psql:include/cagg_migrate_common.sql:203: NOTICE: defaulting compress_orderby to bucket +psql:include/cagg_migrate_common.sql:203: WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +psql:include/cagg_migrate_common.sql:203: NOTICE: default segment by for hypertable "_materialized_hypertable_11" is set to "" psql:include/cagg_migrate_common.sql:203: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('1008' AS integer), NULL);" psql:include/cagg_migrate_common.sql:203: NOTICE: drop cascades to 10 other objects psql:include/cagg_migrate_common.sql:203: NOTICE: job 1002 not found, skipping @@ -1195,6 +1203,8 @@ psql:include/cagg_migrate_common.sql:104: ERROR: plan already exists for contin -- policies for test ALTER MATERIALIZED VIEW conditions_summary_daily SET (timescaledb.compress=true); psql:include/cagg_migrate_common.sql:108: NOTICE: defaulting compress_orderby to bucket +psql:include/cagg_migrate_common.sql:108: WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +psql:include/cagg_migrate_common.sql:108: NOTICE: default segment by for hypertable "_materialized_hypertable_7" is set to "" \if :IS_TIME_DIMENSION SELECT add_retention_policy('conditions_summary_daily', '30 days'::interval); add_retention_policy @@ -1238,6 +1248,8 @@ TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCA psql:include/cagg_migrate_common.sql:128: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" CALL cagg_migrate('conditions_summary_daily'); psql:include/cagg_migrate_common.sql:129: NOTICE: defaulting compress_orderby to bucket +psql:include/cagg_migrate_common.sql:129: WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +psql:include/cagg_migrate_common.sql:129: NOTICE: default segment by for hypertable "_materialized_hypertable_11" is set to "" psql:include/cagg_migrate_common.sql:129: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('2023-01-01 00:00:00' AS timestamp without time zone), NULL);" SELECT ca.raw_hypertable_id AS "NEW_RAW_HYPERTABLE_ID", @@ -1371,6 +1383,8 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d CALL cagg_migrate('conditions_summary_daily', override => TRUE); psql:include/cagg_migrate_common.sql:181: NOTICE: defaulting compress_orderby to bucket +psql:include/cagg_migrate_common.sql:181: WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +psql:include/cagg_migrate_common.sql:181: NOTICE: default segment by for hypertable "_materialized_hypertable_13" is set to "" psql:include/cagg_migrate_common.sql:181: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('2023-01-01 00:00:00' AS timestamp without time zone), NULL);" -- cagg with the new format because it was overriden \d+ conditions_summary_daily @@ -1475,6 +1489,8 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d CALL cagg_migrate('conditions_summary_daily', override => TRUE, drop_old => TRUE); psql:include/cagg_migrate_common.sql:203: NOTICE: defaulting compress_orderby to bucket +psql:include/cagg_migrate_common.sql:203: WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +psql:include/cagg_migrate_common.sql:203: NOTICE: default segment by for hypertable "_materialized_hypertable_15" is set to "" psql:include/cagg_migrate_common.sql:203: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('2023-01-01 00:00:00' AS timestamp without time zone), NULL);" psql:include/cagg_migrate_common.sql:203: NOTICE: drop cascades to 6 other objects psql:include/cagg_migrate_common.sql:203: NOTICE: job 1014 not found, skipping @@ -2027,6 +2043,8 @@ psql:include/cagg_migrate_common.sql:104: ERROR: plan already exists for contin -- policies for test ALTER MATERIALIZED VIEW conditions_summary_daily SET (timescaledb.compress=true); psql:include/cagg_migrate_common.sql:108: NOTICE: defaulting compress_orderby to bucket +psql:include/cagg_migrate_common.sql:108: WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +psql:include/cagg_migrate_common.sql:108: NOTICE: default segment by for hypertable "_materialized_hypertable_11" is set to "" \if :IS_TIME_DIMENSION SELECT add_retention_policy('conditions_summary_daily', '30 days'::interval); add_retention_policy @@ -2070,6 +2088,8 @@ TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCA psql:include/cagg_migrate_common.sql:128: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step" CALL cagg_migrate('conditions_summary_daily'); psql:include/cagg_migrate_common.sql:129: NOTICE: defaulting compress_orderby to bucket +psql:include/cagg_migrate_common.sql:129: WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +psql:include/cagg_migrate_common.sql:129: NOTICE: default segment by for hypertable "_materialized_hypertable_21" is set to "" psql:include/cagg_migrate_common.sql:129: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('2022-12-31 16:00:00-08' AS timestamp with time zone), NULL);" SELECT ca.raw_hypertable_id AS "NEW_RAW_HYPERTABLE_ID", @@ -2203,6 +2223,8 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d CALL cagg_migrate('conditions_summary_daily', override => TRUE); psql:include/cagg_migrate_common.sql:181: NOTICE: defaulting compress_orderby to bucket +psql:include/cagg_migrate_common.sql:181: WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +psql:include/cagg_migrate_common.sql:181: NOTICE: default segment by for hypertable "_materialized_hypertable_23" is set to "" psql:include/cagg_migrate_common.sql:181: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('2022-12-31 16:00:00-08' AS timestamp with time zone), NULL);" -- cagg with the new format because it was overriden \d+ conditions_summary_daily @@ -2307,6 +2329,8 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d CALL cagg_migrate('conditions_summary_daily', override => TRUE, drop_old => TRUE); psql:include/cagg_migrate_common.sql:203: NOTICE: defaulting compress_orderby to bucket +psql:include/cagg_migrate_common.sql:203: WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +psql:include/cagg_migrate_common.sql:203: NOTICE: default segment by for hypertable "_materialized_hypertable_25" is set to "" psql:include/cagg_migrate_common.sql:203: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('2022-12-31 16:00:00-08' AS timestamp with time zone), NULL);" psql:include/cagg_migrate_common.sql:203: NOTICE: drop cascades to 6 other objects psql:include/cagg_migrate_common.sql:203: NOTICE: job 1026 not found, skipping diff --git a/tsl/test/expected/cagg_policy.out b/tsl/test/expected/cagg_policy.out index d795c9df0b9..50515aaccad 100644 --- a/tsl/test/expected/cagg_policy.out +++ b/tsl/test/expected/cagg_policy.out @@ -384,6 +384,8 @@ CREATE MATERIALIZED VIEW max_mat_view_date -- Test 1 step policy for timestamp type buckets ALTER materialized view max_mat_view_date set (timescaledb.compress = true); NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_6" is set to "" -- Only works for cagg SELECT timescaledb_experimental.add_policies('continuous_agg_max_mat_date', refresh_start_offset => '1 day'::interval, refresh_end_offset => '2 day'::interval, compress_after => '20 days'::interval, drop_after => '25 days'::interval); ERROR: "continuous_agg_max_mat_date" is not a continuous aggregate @@ -868,6 +870,8 @@ GROUP BY 1 WITH NO DATA; -- Test 1 step policy for smallint type buckets ALTER materialized view mat_smallint set (timescaledb.compress = true); NOTICE: defaulting compress_orderby to a +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_11" is set to "" -- All policies are added in one step SELECT timescaledb_experimental.add_policies('mat_smallint', refresh_start_offset => 10::smallint, refresh_end_offset => 1::smallint, compress_after => 11::smallint, drop_after => 20::smallint); add_policies @@ -1071,6 +1075,8 @@ GROUP BY 1 WITH NO DATA; -- Test 1 step policy for bigint type buckets ALTER materialized view mat_bigint set (timescaledb.compress = true); NOTICE: defaulting compress_orderby to a +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_14" is set to "" -- All policies are added in one step SELECT timescaledb_experimental.add_policies('mat_bigint', refresh_start_offset => 10::bigint, refresh_end_offset => 1::bigint, compress_after => 11::bigint, drop_after => 20::bigint); add_policies @@ -1142,8 +1148,12 @@ SELECT * FROM mat_bigint WHERE a>100 ORDER BY 1; ALTER MATERIALIZED VIEW mat_bigint SET (timescaledb.compress); NOTICE: defaulting compress_orderby to a +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_14" is set to "" ALTER MATERIALIZED VIEW mat_smallint SET (timescaledb.compress); NOTICE: defaulting compress_orderby to a +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_11" is set to "" \set ON_ERROR_STOP 0 SELECT add_compression_policy('mat_smallint', 0::smallint); ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for mat_smallint @@ -1192,6 +1202,9 @@ SELECT time, FROM generate_series('2000-01-01 0:00:00+0'::timestamptz, '2000-01-02 23:55:00+0', '20m') gtime (time), generate_series(1, 2, 1) gdevice (device_id); ALTER TABLE metrics SET ( timescaledb.compress ); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "metrics" is set to "" +NOTICE: default order by for hypertable "metrics" is set to ""time" DESC" SELECT compress_chunk(ch) FROM show_chunks('metrics') ch; compress_chunk ------------------------------------------ diff --git a/tsl/test/expected/chunk_utils_compression.out b/tsl/test/expected/chunk_utils_compression.out index c561a01f979..44309ecdff2 100644 --- a/tsl/test/expected/chunk_utils_compression.out +++ b/tsl/test/expected/chunk_utils_compression.out @@ -29,6 +29,7 @@ SELECT create_hypertable('public.table_to_compress', 'time', chunk_time_interval (1 row) ALTER TABLE public.table_to_compress SET (timescaledb.compress, timescaledb.compress_segmentby = 'acq_id'); +NOTICE: default order by for hypertable "table_to_compress" is set to ""time" DESC" INSERT INTO public.table_to_compress VALUES ('2020-01-01', 1234567, 777888); INSERT INTO public.table_to_compress VALUES ('2020-02-01', 567567, 890890); INSERT INTO public.table_to_compress VALUES ('2020-02-10', 1234, 5678); diff --git a/tsl/test/expected/chunk_utils_internal.out b/tsl/test/expected/chunk_utils_internal.out index 6536bd44ec7..f741753adb2 100644 --- a/tsl/test/expected/chunk_utils_internal.out +++ b/tsl/test/expected/chunk_utils_internal.out @@ -250,6 +250,7 @@ SELECT create_hypertable('public.table_to_compress', 'time', chunk_time_interval (1 row) ALTER TABLE public.table_to_compress SET (timescaledb.compress, timescaledb.compress_segmentby = 'acq_id'); +NOTICE: default order by for hypertable "table_to_compress" is set to ""time" DESC" INSERT INTO public.table_to_compress VALUES ('2020-01-01', 1234567, 777888); INSERT INTO public.table_to_compress VALUES ('2020-02-01', 567567, 890890); INSERT INTO public.table_to_compress VALUES ('2020-02-10', 1234, 5678); @@ -780,6 +781,9 @@ EXPLAIN (COSTS OFF) SELECT * from ht_try WHERE timec < '2023-01-01 01:00'; -- in the chunks' targetlists on DELETE/UPDATE works (including partially -- compressed chunks) ALTER table ht_try SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "ht_try" is set to "" +NOTICE: default order by for hypertable "ht_try" is set to "timec DESC" INSERT INTO ht_try VALUES ('2021-06-05 01:00', 10, 222); SELECT compress_chunk(show_chunks('ht_try', newer_than => '2021-01-01'::timestamptz)); compress_chunk @@ -1381,6 +1385,9 @@ SELECT create_hypertable('test2', 'time'); INSERT INTO test2 VALUES ('2020-01-01'::timestamptz, 1); ALTER TABLE test2 SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test2" is set to "" +NOTICE: default order by for hypertable "test2" is set to ""time" DESC" SELECT compress_chunk(show_chunks('test2')); compress_chunk ------------------------------------------ diff --git a/tsl/test/expected/compress_auto_sparse_index.out b/tsl/test/expected/compress_auto_sparse_index.out index a4424d6e2d2..d0839cc0130 100644 --- a/tsl/test/expected/compress_auto_sparse_index.out +++ b/tsl/test/expected/compress_auto_sparse_index.out @@ -11,6 +11,9 @@ NOTICE: adding not-null constraint to column "ts" insert into sparse select x, x from generate_series(1, 10000) x; alter table sparse set (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "sparse" is set to "" +NOTICE: default order by for hypertable "sparse" is set to "ts DESC" -- When the chunks are compressed, minmax metadata are created for columns that -- have btree indexes. create index ii on sparse(value); diff --git a/tsl/test/expected/compress_bgw_reorder_drop_chunks.out b/tsl/test/expected/compress_bgw_reorder_drop_chunks.out index 58afffbda71..6ad5336113f 100644 --- a/tsl/test/expected/compress_bgw_reorder_drop_chunks.out +++ b/tsl/test/expected/compress_bgw_reorder_drop_chunks.out @@ -94,6 +94,8 @@ SELECT * FROM _timescaledb_config.bgw_job where id=:retention_job_id; --turn on compression and compress all chunks ALTER TABLE test_retention_table set (timescaledb.compress, timescaledb.compress_orderby = 'time DESC'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_retention_table" is set to "" SELECT count(compress_chunk(ch)) FROM show_chunks('test_retention_table') ch; count ------- @@ -186,6 +188,8 @@ INSERT INTO test_reorder_chunks_table VALUES (5, 5); INSERT INTO test_reorder_chunks_table VALUES (6, 6); -- Enable compression ALTER TABLE test_reorder_chunks_table set (timescaledb.compress, timescaledb.compress_orderby = 'time DESC'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_reorder_chunks_table" is set to "" -- Compress 2 chunks: SELECT compress_chunk(show_chunks('test_reorder_chunks_table', newer_than => 2, older_than => 4)); compress_chunk diff --git a/tsl/test/expected/compress_float8_corrupt.out b/tsl/test/expected/compress_float8_corrupt.out index 9991c0a59ae..0d5d903a9f7 100644 --- a/tsl/test/expected/compress_float8_corrupt.out +++ b/tsl/test/expected/compress_float8_corrupt.out @@ -14,6 +14,9 @@ SELECT create_hypertable('corrupt_float8','time'); (1 row) ALTER TABLE corrupt_float8 SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "corrupt_float8" is set to "" +NOTICE: default order by for hypertable "corrupt_float8" is set to ""time" DESC" -- insert these 1000 values INSERT INTO corrupt_float8 VALUES ('2023-09-21 01:47:02.979+00', 11510.1), ('2023-09-21 01:48:03.58+00', 11510.6), ('2023-09-21 01:49:01.92+00', 11511), ('2023-09-21 01:50:02.49+00', 11511.5), ('2023-09-21 01:51:03.057+00', 11511.9), diff --git a/tsl/test/expected/compressed_detoaster.out b/tsl/test/expected/compressed_detoaster.out index 0823b1d0ad1..0f5eabb8df9 100644 --- a/tsl/test/expected/compressed_detoaster.out +++ b/tsl/test/expected/compressed_detoaster.out @@ -12,6 +12,9 @@ NOTICE: adding not-null constraint to column "ts" (1 row) alter table longstr set (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "longstr" is set to "" +NOTICE: default order by for hypertable "longstr" is set to "ts DESC" -- We want to test the case for inline compression. It is technically possible, -- but very hard to hit with the usual toast_tuple_target = 128 on compressed -- chunks. So here we increase the toast_tuple_target to simplify diff --git a/tsl/test/expected/compression.out b/tsl/test/expected/compression.out index 56185addf6c..bf3a83a4da9 100644 --- a/tsl/test/expected/compression.out +++ b/tsl/test/expected/compression.out @@ -531,6 +531,8 @@ NOTICE: adding not-null constraint to column "time" (1 row) ALTER TABLE plan_inval SET (timescaledb.compress,timescaledb.compress_orderby='time desc'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "plan_inval" is set to "" -- create 2 chunks INSERT INTO plan_inval SELECT * FROM (VALUES ('2000-01-01'::timestamptz,1), ('2000-01-07'::timestamptz,1)) v(time,device_id); SET max_parallel_workers_per_gather to 0; @@ -898,6 +900,9 @@ WARNING: column type "timestamp without time zone" used for "timestamp_column" (1 row) ALTER TABLE datatype_test SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "datatype_test" is set to "" +NOTICE: default order by for hypertable "datatype_test" is set to ""time" DESC" INSERT INTO datatype_test VALUES ('2000-01-01',2,4,8,4.0,8.0,'2000-01-01','2001-01-01 12:00','2001-01-01 6:00','1 week', 3.41, 4.2, 'text', 'x'); SELECT count(compress_chunk(ch)) FROM show_chunks('datatype_test') ch; count @@ -954,6 +959,9 @@ SELECT * FROM cagg_expr ORDER BY time LIMIT 5; (5 rows) ALTER TABLE metrics set(timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "metrics" is set to "" +NOTICE: default order by for hypertable "metrics" is set to ""time" DESC" -- test rescan in compress chunk dml blocker CREATE TABLE rescan_test(id integer NOT NULL, t timestamptz NOT NULL, val double precision, PRIMARY KEY(id, t)); SELECT create_hypertable('rescan_test', 't', chunk_time_interval => interval '1 day'); @@ -964,6 +972,7 @@ SELECT create_hypertable('rescan_test', 't', chunk_time_interval => interval '1 -- compression ALTER TABLE rescan_test SET (timescaledb.compress, timescaledb.compress_segmentby = 'id'); +NOTICE: default order by for hypertable "rescan_test" is set to "t DESC" -- INSERT dummy data INSERT INTO rescan_test SELECT 1, time, random() FROM generate_series('2000-01-01'::timestamptz, '2000-01-05'::timestamptz, '1h'::interval) g(time); SELECT count(*) FROM rescan_test; @@ -1100,6 +1109,9 @@ INSERT INTO ht5 SELECT '2000-01-01'::TIMESTAMPTZ; INSERT INTO ht5 SELECT '2001-01-01'::TIMESTAMPTZ; -- compressed chunk stats should not show dropped chunks ALTER TABLE ht5 SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "ht5" is set to "" +NOTICE: default order by for hypertable "ht5" is set to ""time" DESC" SELECT compress_chunk(i) FROM show_chunks('ht5') i; compress_chunk ------------------------------------------ @@ -1133,9 +1145,11 @@ SELECT create_hypertable('table1','col1', chunk_time_interval => 10); -- Trying to list an incomplete set of fields of the compound key (should fail with a nice message) ALTER TABLE table1 SET (timescaledb.compress, timescaledb.compress_segmentby = 'col1'); +NOTICE: default order by for hypertable "table1" is set to "" ERROR: column "col2" must be used for segmenting -- Listing all fields of the compound key should succeed: ALTER TABLE table1 SET (timescaledb.compress, timescaledb.compress_segmentby = 'col1,col2'); +NOTICE: default order by for hypertable "table1" is set to "" SELECT * FROM timescaledb_information.compression_settings ORDER BY hypertable_name; hypertable_schema | hypertable_name | attname | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst -------------------+-----------------+-------------+------------------------+----------------------+-------------+-------------------- @@ -1188,6 +1202,9 @@ SELECT table_name FROM create_hypertable ('compressed_ht', 'time'); (1 row) ALTER TABLE compressed_ht SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "compressed_ht" is set to "" +NOTICE: default order by for hypertable "compressed_ht" is set to ""time" DESC" INSERT INTO compressed_ht VALUES ('2020-04-20 01:01', 100, 1), ('2020-05-20 01:01', 100, 1); SELECT count(compress_chunk(ch)) FROM show_chunks('compressed_ht') ch; @@ -1247,6 +1264,9 @@ SELECT * FROM pg_stats WHERE tablename = :statchunk; (0 rows) ALTER TABLE stattest SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "stattest" is set to "" +NOTICE: default order by for hypertable "stattest" is set to ""time" DESC" -- check that approximate_row_count works with all normal chunks SELECT approximate_row_count('stattest'); approximate_row_count @@ -1445,6 +1465,7 @@ SELECT create_hypertable('stattest2', 'time', chunk_time_interval=>'1 day'::inte (1 row) ALTER TABLE stattest2 SET (timescaledb.compress, timescaledb.compress_segmentby='c1'); +NOTICE: default order by for hypertable "stattest2" is set to ""time" DESC" INSERT INTO stattest2 SELECT '2020/06/20 01:00'::TIMESTAMPTZ ,1 , generate_series(1, 200, 1); INSERT INTO stattest2 SELECT '2020/07/20 01:00'::TIMESTAMPTZ ,1 , generate_series(1, 200, 1); SELECT compress_chunk(ch) FROM show_chunks('stattest2') ch LIMIT 1; @@ -1643,6 +1664,7 @@ NOTICE: adding not-null constraint to column "time" (1 row) ALTER TABLE local_seq SET(timescaledb.compress,timescaledb.compress_segmentby='device'); +NOTICE: default order by for hypertable "local_seq" is set to ""time" DESC" INSERT INTO local_seq SELECT '2000-01-01',1 FROM generate_series(1,3000); INSERT INTO local_seq SELECT '2000-01-01',2 FROM generate_series(1,3500); INSERT INTO local_seq SELECT '2000-01-01',3 FROM generate_series(1,3000); @@ -1952,6 +1974,7 @@ NOTICE: adding not-null constraint to column "time" (1 row) ALTER TABLE ht_metrics_partially_compressed SET (timescaledb.compress, timescaledb.compress_segmentby='device'); +NOTICE: default order by for hypertable "ht_metrics_partially_compressed" is set to ""time" DESC" INSERT INTO ht_metrics_partially_compressed SELECT time, device, device * 0.1 FROM generate_series('2020-01-01'::timestamptz,'2020-01-02'::timestamptz, INTERVAL '1 m') g(time), @@ -2019,6 +2042,7 @@ ALTER TABLE i6069 SET ( timescaledb.compress, timescaledb.compress_segmentby = 'attr_id' ); +NOTICE: default order by for hypertable "i6069" is set to ""timestamp" DESC" INSERT INTO i6069 VALUES('2023-07-01', 1, 1),('2023-07-03', 2, 1),('2023-07-05', 3, 1), ('2023-07-01', 4, 1),('2023-07-03', 5, 1),('2023-07-05', 6, 1), ('2023-07-01', 7, 1),('2023-07-03', 8, 1),('2023-07-05', 9, 1), @@ -2675,6 +2699,9 @@ SELECT create_hypertable('compress_chunk_test', 'time'); INSERT INTO compress_chunk_test SELECT '2020-01-01', 'r2d2', 3.14; ALTER TABLE compress_chunk_test SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "compress_chunk_test" is set to "" +NOTICE: default order by for hypertable "compress_chunk_test" is set to ""time" DESC" SELECT show_chunks('compress_chunk_test') AS "CHUNK" \gset -- initial call will compress the chunk SELECT compress_chunk(:'CHUNK'); diff --git a/tsl/test/expected/compression_bgw-13.out b/tsl/test/expected/compression_bgw-13.out index 67e770b1878..ad217300051 100644 --- a/tsl/test/expected/compression_bgw-13.out +++ b/tsl/test/expected/compression_bgw-13.out @@ -152,6 +152,9 @@ SELECT set_integer_now_func('test_table_smallint', 'dummy_now_smallint'); INSERT INTO test_table_smallint SELECT generate_series(1,5), 10; ALTER TABLE test_table_smallint SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_table_smallint" is set to "" +NOTICE: default order by for hypertable "test_table_smallint" is set to ""time" DESC" \set ON_ERROR_STOP 0 select add_compression_policy( 'test_table_smallint', compress_after=> '1 day'::interval ); ERROR: invalid value for parameter compress_after @@ -193,6 +196,9 @@ SELECT set_integer_now_func('test_table_integer', 'dummy_now_integer'); INSERT INTO test_table_integer SELECT generate_series(1,5), 10; ALTER TABLE test_table_integer SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_table_integer" is set to "" +NOTICE: default order by for hypertable "test_table_integer" is set to ""time" DESC" SELECT add_compression_policy('test_table_integer', 2::INTEGER) AS compressjob_id \gset SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone @@ -230,6 +236,9 @@ SELECT set_integer_now_func('test_table_bigint', 'dummy_now_bigint'); INSERT INTO test_table_bigint SELECT generate_series(1,5), 10; ALTER TABLE test_table_bigint SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_table_bigint" is set to "" +NOTICE: default order by for hypertable "test_table_bigint" is set to ""time" DESC" SELECT add_compression_policy('test_table_bigint', 2::BIGINT) AS compressjob_id \gset SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone @@ -267,6 +276,9 @@ SELECT set_integer_now_func('test_table_nologin', 'dummy_now_bigint'); (1 row) ALTER TABLE test_table_nologin set (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_table_nologin" is set to "" +NOTICE: default order by for hypertable "test_table_nologin" is set to ""time" DESC" \set ON_ERROR_STOP 0 SELECT add_compression_policy('test_table_nologin', 2::int); ERROR: permission denied to start background process as role "nologin_role" @@ -300,6 +312,9 @@ FROM conditions GROUP BY device, time_bucket(INTERVAL '1 hour', "time") WITH NO DATA; CALL refresh_continuous_aggregate('conditions_summary', NULL, NULL); ALTER TABLE conditions SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "conditions" is set to "" +NOTICE: default order by for hypertable "conditions" is set to ""time" DESC" SELECT COUNT(*) AS dropped_chunks_count FROM drop_chunks('conditions', TIMESTAMPTZ '2018-12-15 00:00'); dropped_chunks_count @@ -532,6 +547,9 @@ SELECT compress_chunk(:'CHUNK_NAME'::regclass); CREATE TABLE metrics(time timestamptz NOT NULL); SELECT hypertable_id AS "HYPERTABLE_ID", schema_name, table_name, created FROM create_hypertable('metrics','time') \gset ALTER TABLE metrics SET (timescaledb.compress); +psql:include/recompress_basic.sql:120: WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +psql:include/recompress_basic.sql:120: NOTICE: default segment by for hypertable "metrics" is set to "" +psql:include/recompress_basic.sql:120: NOTICE: default order by for hypertable "metrics" is set to ""time" DESC" -- create chunk with some data and compress INSERT INTO metrics SELECT '2000-01-01' FROM generate_series(1,10); -- create custom compression job without recompress boolean diff --git a/tsl/test/expected/compression_bgw-14.out b/tsl/test/expected/compression_bgw-14.out index c97a467b045..a5b9036c7c0 100644 --- a/tsl/test/expected/compression_bgw-14.out +++ b/tsl/test/expected/compression_bgw-14.out @@ -152,6 +152,9 @@ SELECT set_integer_now_func('test_table_smallint', 'dummy_now_smallint'); INSERT INTO test_table_smallint SELECT generate_series(1,5), 10; ALTER TABLE test_table_smallint SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_table_smallint" is set to "" +NOTICE: default order by for hypertable "test_table_smallint" is set to ""time" DESC" \set ON_ERROR_STOP 0 select add_compression_policy( 'test_table_smallint', compress_after=> '1 day'::interval ); ERROR: invalid value for parameter compress_after @@ -193,6 +196,9 @@ SELECT set_integer_now_func('test_table_integer', 'dummy_now_integer'); INSERT INTO test_table_integer SELECT generate_series(1,5), 10; ALTER TABLE test_table_integer SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_table_integer" is set to "" +NOTICE: default order by for hypertable "test_table_integer" is set to ""time" DESC" SELECT add_compression_policy('test_table_integer', 2::INTEGER) AS compressjob_id \gset SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone @@ -230,6 +236,9 @@ SELECT set_integer_now_func('test_table_bigint', 'dummy_now_bigint'); INSERT INTO test_table_bigint SELECT generate_series(1,5), 10; ALTER TABLE test_table_bigint SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_table_bigint" is set to "" +NOTICE: default order by for hypertable "test_table_bigint" is set to ""time" DESC" SELECT add_compression_policy('test_table_bigint', 2::BIGINT) AS compressjob_id \gset SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone @@ -267,6 +276,9 @@ SELECT set_integer_now_func('test_table_nologin', 'dummy_now_bigint'); (1 row) ALTER TABLE test_table_nologin set (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_table_nologin" is set to "" +NOTICE: default order by for hypertable "test_table_nologin" is set to ""time" DESC" \set ON_ERROR_STOP 0 SELECT add_compression_policy('test_table_nologin', 2::int); ERROR: permission denied to start background process as role "nologin_role" @@ -300,6 +312,9 @@ FROM conditions GROUP BY device, time_bucket(INTERVAL '1 hour', "time") WITH NO DATA; CALL refresh_continuous_aggregate('conditions_summary', NULL, NULL); ALTER TABLE conditions SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "conditions" is set to "" +NOTICE: default order by for hypertable "conditions" is set to ""time" DESC" SELECT COUNT(*) AS dropped_chunks_count FROM drop_chunks('conditions', TIMESTAMPTZ '2018-12-15 00:00'); dropped_chunks_count @@ -532,6 +547,9 @@ SELECT compress_chunk(:'CHUNK_NAME'::regclass); CREATE TABLE metrics(time timestamptz NOT NULL); SELECT hypertable_id AS "HYPERTABLE_ID", schema_name, table_name, created FROM create_hypertable('metrics','time') \gset ALTER TABLE metrics SET (timescaledb.compress); +psql:include/recompress_basic.sql:120: WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +psql:include/recompress_basic.sql:120: NOTICE: default segment by for hypertable "metrics" is set to "" +psql:include/recompress_basic.sql:120: NOTICE: default order by for hypertable "metrics" is set to ""time" DESC" -- create chunk with some data and compress INSERT INTO metrics SELECT '2000-01-01' FROM generate_series(1,10); -- create custom compression job without recompress boolean diff --git a/tsl/test/expected/compression_bgw-15.out b/tsl/test/expected/compression_bgw-15.out index c97a467b045..a5b9036c7c0 100644 --- a/tsl/test/expected/compression_bgw-15.out +++ b/tsl/test/expected/compression_bgw-15.out @@ -152,6 +152,9 @@ SELECT set_integer_now_func('test_table_smallint', 'dummy_now_smallint'); INSERT INTO test_table_smallint SELECT generate_series(1,5), 10; ALTER TABLE test_table_smallint SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_table_smallint" is set to "" +NOTICE: default order by for hypertable "test_table_smallint" is set to ""time" DESC" \set ON_ERROR_STOP 0 select add_compression_policy( 'test_table_smallint', compress_after=> '1 day'::interval ); ERROR: invalid value for parameter compress_after @@ -193,6 +196,9 @@ SELECT set_integer_now_func('test_table_integer', 'dummy_now_integer'); INSERT INTO test_table_integer SELECT generate_series(1,5), 10; ALTER TABLE test_table_integer SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_table_integer" is set to "" +NOTICE: default order by for hypertable "test_table_integer" is set to ""time" DESC" SELECT add_compression_policy('test_table_integer', 2::INTEGER) AS compressjob_id \gset SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone @@ -230,6 +236,9 @@ SELECT set_integer_now_func('test_table_bigint', 'dummy_now_bigint'); INSERT INTO test_table_bigint SELECT generate_series(1,5), 10; ALTER TABLE test_table_bigint SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_table_bigint" is set to "" +NOTICE: default order by for hypertable "test_table_bigint" is set to ""time" DESC" SELECT add_compression_policy('test_table_bigint', 2::BIGINT) AS compressjob_id \gset SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone @@ -267,6 +276,9 @@ SELECT set_integer_now_func('test_table_nologin', 'dummy_now_bigint'); (1 row) ALTER TABLE test_table_nologin set (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_table_nologin" is set to "" +NOTICE: default order by for hypertable "test_table_nologin" is set to ""time" DESC" \set ON_ERROR_STOP 0 SELECT add_compression_policy('test_table_nologin', 2::int); ERROR: permission denied to start background process as role "nologin_role" @@ -300,6 +312,9 @@ FROM conditions GROUP BY device, time_bucket(INTERVAL '1 hour', "time") WITH NO DATA; CALL refresh_continuous_aggregate('conditions_summary', NULL, NULL); ALTER TABLE conditions SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "conditions" is set to "" +NOTICE: default order by for hypertable "conditions" is set to ""time" DESC" SELECT COUNT(*) AS dropped_chunks_count FROM drop_chunks('conditions', TIMESTAMPTZ '2018-12-15 00:00'); dropped_chunks_count @@ -532,6 +547,9 @@ SELECT compress_chunk(:'CHUNK_NAME'::regclass); CREATE TABLE metrics(time timestamptz NOT NULL); SELECT hypertable_id AS "HYPERTABLE_ID", schema_name, table_name, created FROM create_hypertable('metrics','time') \gset ALTER TABLE metrics SET (timescaledb.compress); +psql:include/recompress_basic.sql:120: WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +psql:include/recompress_basic.sql:120: NOTICE: default segment by for hypertable "metrics" is set to "" +psql:include/recompress_basic.sql:120: NOTICE: default order by for hypertable "metrics" is set to ""time" DESC" -- create chunk with some data and compress INSERT INTO metrics SELECT '2000-01-01' FROM generate_series(1,10); -- create custom compression job without recompress boolean diff --git a/tsl/test/expected/compression_bgw-16.out b/tsl/test/expected/compression_bgw-16.out index c97a467b045..a5b9036c7c0 100644 --- a/tsl/test/expected/compression_bgw-16.out +++ b/tsl/test/expected/compression_bgw-16.out @@ -152,6 +152,9 @@ SELECT set_integer_now_func('test_table_smallint', 'dummy_now_smallint'); INSERT INTO test_table_smallint SELECT generate_series(1,5), 10; ALTER TABLE test_table_smallint SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_table_smallint" is set to "" +NOTICE: default order by for hypertable "test_table_smallint" is set to ""time" DESC" \set ON_ERROR_STOP 0 select add_compression_policy( 'test_table_smallint', compress_after=> '1 day'::interval ); ERROR: invalid value for parameter compress_after @@ -193,6 +196,9 @@ SELECT set_integer_now_func('test_table_integer', 'dummy_now_integer'); INSERT INTO test_table_integer SELECT generate_series(1,5), 10; ALTER TABLE test_table_integer SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_table_integer" is set to "" +NOTICE: default order by for hypertable "test_table_integer" is set to ""time" DESC" SELECT add_compression_policy('test_table_integer', 2::INTEGER) AS compressjob_id \gset SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone @@ -230,6 +236,9 @@ SELECT set_integer_now_func('test_table_bigint', 'dummy_now_bigint'); INSERT INTO test_table_bigint SELECT generate_series(1,5), 10; ALTER TABLE test_table_bigint SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_table_bigint" is set to "" +NOTICE: default order by for hypertable "test_table_bigint" is set to ""time" DESC" SELECT add_compression_policy('test_table_bigint', 2::BIGINT) AS compressjob_id \gset SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id; id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone @@ -267,6 +276,9 @@ SELECT set_integer_now_func('test_table_nologin', 'dummy_now_bigint'); (1 row) ALTER TABLE test_table_nologin set (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_table_nologin" is set to "" +NOTICE: default order by for hypertable "test_table_nologin" is set to ""time" DESC" \set ON_ERROR_STOP 0 SELECT add_compression_policy('test_table_nologin', 2::int); ERROR: permission denied to start background process as role "nologin_role" @@ -300,6 +312,9 @@ FROM conditions GROUP BY device, time_bucket(INTERVAL '1 hour', "time") WITH NO DATA; CALL refresh_continuous_aggregate('conditions_summary', NULL, NULL); ALTER TABLE conditions SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "conditions" is set to "" +NOTICE: default order by for hypertable "conditions" is set to ""time" DESC" SELECT COUNT(*) AS dropped_chunks_count FROM drop_chunks('conditions', TIMESTAMPTZ '2018-12-15 00:00'); dropped_chunks_count @@ -532,6 +547,9 @@ SELECT compress_chunk(:'CHUNK_NAME'::regclass); CREATE TABLE metrics(time timestamptz NOT NULL); SELECT hypertable_id AS "HYPERTABLE_ID", schema_name, table_name, created FROM create_hypertable('metrics','time') \gset ALTER TABLE metrics SET (timescaledb.compress); +psql:include/recompress_basic.sql:120: WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +psql:include/recompress_basic.sql:120: NOTICE: default segment by for hypertable "metrics" is set to "" +psql:include/recompress_basic.sql:120: NOTICE: default order by for hypertable "metrics" is set to ""time" DESC" -- create chunk with some data and compress INSERT INTO metrics SELECT '2000-01-01' FROM generate_series(1,10); -- create custom compression job without recompress boolean diff --git a/tsl/test/expected/compression_conflicts.out b/tsl/test/expected/compression_conflicts.out index 9679a6b12af..b146ee76d94 100644 --- a/tsl/test/expected/compression_conflicts.out +++ b/tsl/test/expected/compression_conflicts.out @@ -11,6 +11,9 @@ SELECT table_name FROM create_hypertable('comp_conflicts_1','time'); (1 row) ALTER TABLE comp_conflicts_1 SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "comp_conflicts_1" is set to "" +NOTICE: default order by for hypertable "comp_conflicts_1" is set to ""time" DESC" -- implicitly create chunk INSERT INTO comp_conflicts_1 VALUES ('2020-01-01','d1',0.1); -- sanity check behaviour without compression @@ -101,8 +104,8 @@ SELECT table_name FROM create_hypertable('comp_conflicts_2','time'); comp_conflicts_2 (1 row) -ALTER TABLE comp_conflicts_2 SET (timescaledb.compress); -WARNING: column "device" should be used for segmenting or ordering +ALTER TABLE comp_conflicts_2 SET (timescaledb.compress, timescaledb.compress_segmentby=''); +NOTICE: default order by for hypertable "comp_conflicts_2" is set to ""time" DESC, device" -- implicitly create chunk INSERT INTO comp_conflicts_2 VALUES ('2020-01-01','d1',0.1); INSERT INTO comp_conflicts_2 VALUES ('2020-01-01','d2',0.2); @@ -178,6 +181,7 @@ SELECT table_name FROM create_hypertable('comp_conflicts_3','time'); (1 row) ALTER TABLE comp_conflicts_3 SET (timescaledb.compress,timescaledb.compress_segmentby='device'); +NOTICE: default order by for hypertable "comp_conflicts_3" is set to ""time" DESC" -- implicitly create chunk INSERT INTO comp_conflicts_3 VALUES ('2020-01-01','d1',0.1); INSERT INTO comp_conflicts_3 VALUES ('2020-01-01','d2',0.2); @@ -287,7 +291,7 @@ SELECT table_name FROM create_hypertable('comp_conflicts_4','time'); comp_conflicts_4 (1 row) -ALTER TABLE comp_conflicts_4 SET (timescaledb.compress,timescaledb.compress_orderby='time,device'); +ALTER TABLE comp_conflicts_4 SET (timescaledb.compress,timescaledb.compress_segmentby='',timescaledb.compress_orderby='time,device'); -- implicitly create chunk INSERT INTO comp_conflicts_4 SELECT generate_series('2020-01-01'::timestamp, '2020-01-01 2:00:00', '1s'), 'd1',0.1; INSERT INTO comp_conflicts_4 VALUES ('2020-01-01','d2',0.2); @@ -424,6 +428,7 @@ ALTER TABLE compressed_ht SET ( timescaledb.compress, timescaledb.compress_segmentby = 'sensor_id' ); +NOTICE: default order by for hypertable "compressed_ht" is set to ""time" DESC" SELECT COMPRESS_CHUNK(SHOW_CHUNKS('compressed_ht')); compress_chunk ----------------------------------------- diff --git a/tsl/test/expected/compression_ddl.out b/tsl/test/expected/compression_ddl.out index 189b421ad4a..e2a2591a869 100644 --- a/tsl/test/expected/compression_ddl.out +++ b/tsl/test/expected/compression_ddl.out @@ -71,6 +71,9 @@ WARNING: column type "timestamp without time zone" used for "time" does not fol (1 row) ALTER TABLE records SET (timescaledb.compress = true); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "records" is set to "" +NOTICE: default order by for hypertable "records" is set to ""time" DESC" ALTER TABLE records ADD COLUMN col1 boolean DEFAULT false NOT NULL; -- NULL constraints are useless and it is safe allow adding this -- column with NULL constraint to a compressed hypertable (Issue #5151) @@ -649,6 +652,8 @@ INSERT INTO test1 SELECT '2018-03-02 1:05'::TIMESTAMPTZ, 2; NOTICE: raise notice test1_print_trigger called NOTICE: raise notice test1_print_trigger called ALTER TABLE test1 set (timescaledb.compress, timescaledb.compress_orderby = '"Time" DESC'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test1" is set to "" SELECT count(compress_chunk(ch)) FROM show_chunks('test1') ch; count ------- @@ -675,6 +680,9 @@ INSERT INTO i2844 SELECT generate_series('2000-01-01'::timestamptz, '2000-01-02' CREATE MATERIALIZED VIEW test_agg WITH (timescaledb.continuous) AS SELECT time_bucket('1 hour', created_at) AS bucket, AVG(c1) AS avg_c1 FROM i2844 GROUP BY bucket; NOTICE: refreshing continuous aggregate "test_agg" ALTER TABLE i2844 SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "i2844" is set to "" +NOTICE: default order by for hypertable "i2844" is set to "created_at DESC" SELECT * FROM _timescaledb_catalog.compression_settings WHERE relid='i2844'::regclass; relid | segmentby | orderby | orderby_desc | orderby_nullsfirst -------+-----------+--------------+--------------+-------------------- @@ -982,6 +990,7 @@ SELECT create_hypertable('test_defaults','time'); (1 row) ALTER TABLE test_defaults SET (timescaledb.compress,timescaledb.compress_segmentby='device_id'); +psql:include/compression_alter.sql:119: NOTICE: default order by for hypertable "test_defaults" is set to ""time" DESC" -- create 2 chunks INSERT INTO test_defaults SELECT '2000-01-01', 1; INSERT INTO test_defaults SELECT '2001-01-01', 1; @@ -1238,6 +1247,9 @@ SELECT create_hypertable('issue4140', 'time'); (1 row) ALTER TABLE issue4140 SET(timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "issue4140" is set to "" +NOTICE: default order by for hypertable "issue4140" is set to ""time" DESC" SELECT format('%I.%I', schema_name, table_name)::regclass AS ctable FROM _timescaledb_catalog.hypertable WHERE id = (SELECT compressed_hypertable_id FROM _timescaledb_catalog.hypertable WHERE table_name = 'issue4140') \gset @@ -1304,6 +1316,7 @@ SELECT create_hypertable( (1 row) alter table "tEst2" set (timescaledb.compress=true, timescaledb.compress_segmentby='"Id"'); +NOTICE: default order by for hypertable "tEst2" is set to ""Time" DESC" CREATE MATERIALIZED VIEW "tEst2_mv" WITH (timescaledb.continuous) AS SELECT "Id" as "Idd", @@ -1856,6 +1869,9 @@ VALUES -- chunk1 ('2022-01-01 00:04'::timestamptz, 1, 2); -- enable compression, compress all chunks ALTER TABLE test_partials SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_partials" is set to "" +NOTICE: default order by for hypertable "test_partials" is set to ""time" DESC" SELECT compress_chunk(show_chunks('test_partials')); compress_chunk ------------------------------------------- @@ -2134,6 +2150,9 @@ INSERT INTO space_part values ('2021-01-01 00:03', 2, 1, 1); -- compress them ALTER TABLE space_part SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "space_part" is set to "" +NOTICE: default order by for hypertable "space_part" is set to ""time" DESC" SELECT compress_chunk(show_chunks('space_part')); compress_chunk ------------------------------------------- @@ -2330,8 +2349,9 @@ WARNING: column type "character varying" used for "col1" does not follow best p create unique index myidx_unique ON mytab (lower(col1::text), col2, departure_ts, arrival_ts); alter table mytab set (timescaledb.compress); -WARNING: column "col2" should be used for segmenting or ordering -WARNING: column "arrival_ts" should be used for segmenting or ordering +WARNING: there was some uncertainty picking the default segment by for the hypertable: Please make sure col2 is not a unique column and appropriate for a segment by +NOTICE: default segment by for hypertable "mytab" is set to "col2" +NOTICE: default order by for hypertable "mytab" is set to "departure_ts DESC, arrival_ts" -- github issue 6186 -- verify inserting into index works as expected during decompression insert into mytab (col1, col2, value, arrival_ts, departure_ts) @@ -2416,6 +2436,7 @@ INSERT INTO hyper_ex(time, device_id,sensor_1) VALUES ('2022-01-01 01:00', 'dev2', 12); \set ON_ERROR_STOP 0 ALTER TABLE hyper_ex SET (timescaledb.compress, timescaledb.compress_segmentby='device_id'); +NOTICE: default order by for hypertable "hyper_ex" is set to ""time" DESC" ERROR: constraint hyper_ex_time_device_id_excl is not supported for compression \set ON_ERROR_STOP 1 -- check deferred uniqueness @@ -2433,6 +2454,9 @@ NOTICE: adding not-null constraint to column "time" INSERT INTO hyper_unique_deferred(time, device_id,sensor_1) VALUES (1257987700000000000, 'dev2', 11); alter table hyper_unique_deferred set (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "hyper_unique_deferred" is set to "" +NOTICE: default order by for hypertable "hyper_unique_deferred" is set to ""time" DESC" select compress_chunk(show_chunks('hyper_unique_deferred')); -- also worked fine before 2.11.0 compress_chunk ------------------------------------------- diff --git a/tsl/test/expected/compression_defaults.out b/tsl/test/expected/compression_defaults.out index 533d52a6ce6..503f785f246 100644 --- a/tsl/test/expected/compression_defaults.out +++ b/tsl/test/expected/compression_defaults.out @@ -1,7 +1,7 @@ -- This file and its contents are licensed under the Timescale License. -- Please see the included NOTICE for copyright information and -- LICENSE-TIMESCALE for a copy of the license. -\c :TEST_DBNAME :ROLE_SUPERUSER +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER -- statitics on CREATE TABLE "public"."metrics" ( "time" timestamp with time zone NOT NULL, @@ -32,11 +32,66 @@ SELECT _timescaledb_functions.get_segmentby_defaults('public.metrics'); (1 row) SELECT _timescaledb_functions.get_orderby_defaults('public.metrics', ARRAY['device_id']); - get_orderby_defaults ---------------------------------------------- - {"clauses": ["time DESC"], "confidence": 8} -(1 row) - + get_orderby_defaults +------------------------------------------------- + {"clauses": ["\"time\" DESC"], "confidence": 8} +(1 row) + +ALTER TABLE metrics SET (timescaledb.compress = true); +NOTICE: default segment by for hypertable "metrics" is set to "device_id" +NOTICE: default order by for hypertable "metrics" is set to ""time" DESC" +SELECT * FROM _timescaledb_catalog.compression_settings; + relid | segmentby | orderby | orderby_desc | orderby_nullsfirst +---------+-------------+---------+--------------+-------------------- + metrics | {device_id} | {time} | {t} | {t} +(1 row) + +ALTER TABLE metrics SET (timescaledb.compress = false); +ALTER TABLE metrics SET (timescaledb.compress = true, timescaledb.compress_segmentby = 'device_id'); +NOTICE: default order by for hypertable "metrics" is set to ""time" DESC" +SELECT * FROM _timescaledb_catalog.compression_settings; + relid | segmentby | orderby | orderby_desc | orderby_nullsfirst +---------+-------------+---------+--------------+-------------------- + metrics | {device_id} | {time} | {t} | {t} +(1 row) + +ALTER TABLE metrics SET (timescaledb.compress = false); +--make sure all the GUC combinations work +SET timescaledb.compression_segmentby_default_function = ''; +SET timescaledb.compression_orderby_default_function = ''; +ALTER TABLE metrics SET (timescaledb.compress = true); +WARNING: column "device_id" should be used for segmenting or ordering +SELECT * FROM _timescaledb_catalog.compression_settings; + relid | segmentby | orderby | orderby_desc | orderby_nullsfirst +---------+-----------+---------+--------------+-------------------- + metrics | | {time} | {t} | {t} +(1 row) + +ALTER TABLE metrics SET (timescaledb.compress = false); +SET timescaledb.compression_segmentby_default_function = ''; +RESET timescaledb.compression_orderby_default_function; +ALTER TABLE metrics SET (timescaledb.compress = true); +NOTICE: default order by for hypertable "metrics" is set to "device_id, "time" DESC" +SELECT * FROM _timescaledb_catalog.compression_settings; + relid | segmentby | orderby | orderby_desc | orderby_nullsfirst +---------+-----------+------------------+--------------+-------------------- + metrics | | {device_id,time} | {f,t} | {f,t} +(1 row) + +ALTER TABLE metrics SET (timescaledb.compress = false); +RESET timescaledb.compression_segmentby_default_function; +SET timescaledb.compression_orderby_default_function = ''; +ALTER TABLE metrics SET (timescaledb.compress = true); +NOTICE: default segment by for hypertable "metrics" is set to "device_id" +SELECT * FROM _timescaledb_catalog.compression_settings; + relid | segmentby | orderby | orderby_desc | orderby_nullsfirst +---------+-------------+---------+--------------+-------------------- + metrics | {device_id} | {time} | {t} | {t} +(1 row) + +ALTER TABLE metrics SET (timescaledb.compress = false); +RESET timescaledb.compression_segmentby_default_function; +RESET timescaledb.compression_orderby_default_function; --opposite order of columns drop index test_idx; CREATE UNIQUE INDEX test_idx ON metrics(time, device_id); @@ -47,9 +102,9 @@ SELECT _timescaledb_functions.get_segmentby_defaults('public.metrics'); (1 row) SELECT _timescaledb_functions.get_orderby_defaults('public.metrics', ARRAY['device_id']); - get_orderby_defaults ---------------------------------------------- - {"clauses": ["time DESC"], "confidence": 8} + get_orderby_defaults +------------------------------------------------- + {"clauses": ["\"time\" DESC"], "confidence": 8} (1 row) --use a high-cardinality column in the index @@ -62,9 +117,9 @@ SELECT _timescaledb_functions.get_segmentby_defaults('public.metrics'); (1 row) SELECT _timescaledb_functions.get_orderby_defaults('public.metrics', ARRAY[]::text[]); - get_orderby_defaults ----------------------------------------------------- - {"clauses": ["val", "time DESC"], "confidence": 8} + get_orderby_defaults +-------------------------------------------------------- + {"clauses": ["val", "\"time\" DESC"], "confidence": 8} (1 row) --use a non-unique index @@ -77,9 +132,9 @@ SELECT _timescaledb_functions.get_segmentby_defaults('public.metrics'); (1 row) SELECT _timescaledb_functions.get_orderby_defaults('public.metrics', ARRAY['device_id']::text[]); - get_orderby_defaults ---------------------------------------------- - {"clauses": ["time DESC"], "confidence": 5} + get_orderby_defaults +------------------------------------------------- + {"clauses": ["\"time\" DESC"], "confidence": 5} (1 row) --another non-unique index column order @@ -92,9 +147,9 @@ SELECT _timescaledb_functions.get_segmentby_defaults('public.metrics'); (1 row) SELECT _timescaledb_functions.get_orderby_defaults('public.metrics', ARRAY['device_id']::text[]); - get_orderby_defaults ----------------------------------------------------- - {"clauses": ["time DESC", "val"], "confidence": 5} + get_orderby_defaults +-------------------------------------------------------- + {"clauses": ["\"time\" DESC", "val"], "confidence": 5} (1 row) --use a high-cardinality column in the non-unque index @@ -107,9 +162,9 @@ SELECT _timescaledb_functions.get_segmentby_defaults('public.metrics'); (1 row) SELECT _timescaledb_functions.get_orderby_defaults('public.metrics', ARRAY[]::text[]); - get_orderby_defaults ----------------------------------------------------- - {"clauses": ["time DESC", "val"], "confidence": 5} + get_orderby_defaults +-------------------------------------------------------- + {"clauses": ["\"time\" DESC", "val"], "confidence": 5} (1 row) --use 2 indexes @@ -123,9 +178,9 @@ SELECT _timescaledb_functions.get_segmentby_defaults('public.metrics'); (1 row) SELECT _timescaledb_functions.get_orderby_defaults('public.metrics', ARRAY['device_id']::text[]); - get_orderby_defaults ----------------------------------------------------- - {"clauses": ["time DESC", "val"], "confidence": 5} + get_orderby_defaults +-------------------------------------------------------- + {"clauses": ["\"time\" DESC", "val"], "confidence": 5} (1 row) --no indexes @@ -138,11 +193,22 @@ SELECT _timescaledb_functions.get_segmentby_defaults('public.metrics'); (1 row) SELECT _timescaledb_functions.get_orderby_defaults('public.metrics', ARRAY[]::text[]); - get_orderby_defaults ---------------------------------------------- - {"clauses": ["time DESC"], "confidence": 5} + get_orderby_defaults +------------------------------------------------- + {"clauses": ["\"time\" DESC"], "confidence": 5} +(1 row) + +ALTER TABLE metrics SET (timescaledb.compress = true); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "metrics" is set to "" +NOTICE: default order by for hypertable "metrics" is set to ""time" DESC" +SELECT * FROM _timescaledb_catalog.compression_settings; + relid | segmentby | orderby | orderby_desc | orderby_nullsfirst +---------+-----------+---------+--------------+-------------------- + metrics | | {time} | {t} | {t} (1 row) +ALTER TABLE metrics SET (timescaledb.compress = false); -- tables with no stats -- drop table metrics; --serial case @@ -155,7 +221,7 @@ CREATE TABLE "public"."metrics" ( SELECT create_hypertable('public.metrics', 'time', create_default_indexes=>false); create_hypertable ---------------------- - (2,public,metrics,t) + (8,public,metrics,t) (1 row) --no indexes @@ -166,9 +232,9 @@ SELECT _timescaledb_functions.get_segmentby_defaults('public.metrics'); (1 row) SELECT _timescaledb_functions.get_orderby_defaults('public.metrics', ARRAY[]::text[]); - get_orderby_defaults ---------------------------------------------- - {"clauses": ["time DESC"], "confidence": 5} + get_orderby_defaults +------------------------------------------------- + {"clauses": ["\"time\" DESC"], "confidence": 5} (1 row) --minimum index @@ -220,7 +286,7 @@ CREATE TABLE "public"."metrics" ( SELECT create_hypertable('public.metrics', 'time', create_default_indexes=>false); create_hypertable ---------------------- - (3,public,metrics,t) + (9,public,metrics,t) (1 row) --minimum index @@ -249,9 +315,9 @@ CREATE TABLE "public"."metrics" ( "val" double precision ) WITH (autovacuum_enabled=0); SELECT create_hypertable('public.metrics', 'time', create_default_indexes=>true); - create_hypertable ----------------------- - (4,public,metrics,t) + create_hypertable +----------------------- + (10,public,metrics,t) (1 row) SELECT _timescaledb_functions.get_segmentby_defaults('public.metrics'); @@ -261,9 +327,9 @@ SELECT _timescaledb_functions.get_segmentby_defaults('public.metrics'); (1 row) SELECT _timescaledb_functions.get_orderby_defaults('public.metrics', ARRAY[]::text[]); - get_orderby_defaults ---------------------------------------------- - {"clauses": ["time DESC"], "confidence": 5} + get_orderby_defaults +------------------------------------------------- + {"clauses": ["\"time\" DESC"], "confidence": 5} (1 row) CREATE INDEX test_idx ON metrics(device_id); @@ -274,9 +340,9 @@ SELECT _timescaledb_functions.get_segmentby_defaults('public.metrics'); (1 row) SELECT _timescaledb_functions.get_orderby_defaults('public.metrics', ARRAY['device_id']); - get_orderby_defaults ---------------------------------------------- - {"clauses": ["time DESC"], "confidence": 5} + get_orderby_defaults +------------------------------------------------- + {"clauses": ["\"time\" DESC"], "confidence": 5} (1 row) drop index test_idx; @@ -288,8 +354,46 @@ SELECT _timescaledb_functions.get_segmentby_defaults('public.metrics'); (1 row) SELECT _timescaledb_functions.get_orderby_defaults('public.metrics', ARRAY['device_id']::text[]); - get_orderby_defaults ---------------------------------------------- - {"clauses": ["time DESC"], "confidence": 8} + get_orderby_defaults +------------------------------------------------- + {"clauses": ["\"time\" DESC"], "confidence": 8} (1 row) +--test on an empty order_by +CREATE TABLE table1(col1 INT NOT NULL, col2 INT); +SELECT create_hypertable('table1','col1', chunk_time_interval => 10); + create_hypertable +---------------------- + (11,public,table1,t) +(1 row) + +SELECT _timescaledb_functions.get_orderby_defaults('table1', ARRAY['col1']::text[]); + get_orderby_defaults +---------------------------------- + {"clauses": [], "confidence": 5} +(1 row) + +ALTER TABLE table1 SET (timescaledb.compress, timescaledb.compress_segmentby = 'col1'); +NOTICE: default order by for hypertable "table1" is set to "" +SELECT * FROM _timescaledb_catalog.compression_settings; + relid | segmentby | orderby | orderby_desc | orderby_nullsfirst +--------+-----------+---------+--------------+-------------------- + table1 | {col1} | | | +(1 row) + +ALTER TABLE table1 SET (timescaledb.compress = false); +\set ON_ERROR_STOP 0 +SET timescaledb.compression_segmentby_default_function = 'function_does_not_exist'; +ERROR: invalid value for parameter "timescaledb.compression_segmentby_default_function": "function_does_not_exist" +SET timescaledb.compression_orderby_default_function = 'function_does_not_exist'; +ERROR: invalid value for parameter "timescaledb.compression_orderby_default_function": "function_does_not_exist" +--wrong function signatures +SET timescaledb.compression_segmentby_default_function = '_timescaledb_functions.get_orderby_defaults'; +ERROR: invalid value for parameter "timescaledb.compression_segmentby_default_function": "_timescaledb_functions.get_orderby_defaults" +SET timescaledb.compression_orderby_default_function = '_timescaledb_functions.get_segmentby_defaults'; +ERROR: invalid value for parameter "timescaledb.compression_orderby_default_function": "_timescaledb_functions.get_segmentby_defaults" +\set ON_ERROR_STOP 1 +SET timescaledb.compression_orderby_default_function = '_timescaledb_functions.get_orderby_defaults'; +SET timescaledb.compression_segmentby_default_function = '_timescaledb_functions.get_segmentby_defaults'; +RESET timescaledb.compression_segmentby_default_function; +RESET timescaledb.compression_orderby_default_function; diff --git a/tsl/test/expected/compression_errors-13.out b/tsl/test/expected/compression_errors-13.out index fd931ce007e..ca90ec06dbd 100644 --- a/tsl/test/expected/compression_errors-13.out +++ b/tsl/test/expected/compression_errors-13.out @@ -51,6 +51,7 @@ select create_hypertable('default_skipped', 'a', chunk_time_interval=> 10); (1 row) alter table default_skipped set (timescaledb.compress, timescaledb.compress_segmentby = 'c'); +NOTICE: default order by for hypertable "default_skipped" is set to "a DESC" alter table default_skipped set (timescaledb.compress, timescaledb.compress_segmentby = 'c'); create table with_rls (a integer, b integer); ALTER TABLE with_rls ENABLE ROW LEVEL SECURITY; @@ -73,8 +74,12 @@ SELECT * FROM _timescaledb_catalog.compression_settings ORDER BY relid::text; (2 rows) ALTER TABLE foo3 set (timescaledb.compress, timescaledb.compress_orderby='d DeSc NullS lAsT'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo3" is set to "" --shold allow alter since segment by was empty ALTER TABLE foo3 set (timescaledb.compress, timescaledb.compress_orderby='d Asc NullS lAsT'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo3" is set to "" --this is ok too ALTER TABLE foo3 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c', timescaledb.compress_orderby = 'd DeSc NullS lAsT'); -- Negative test cases --- @@ -121,48 +126,78 @@ ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'd') ERROR: column "d" does not exist HINT: The timescaledb.compress_segmentby option must reference a valid column. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'd'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: column "d" does not exist HINT: The timescaledb.compress_orderby option must reference a valid column. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc nulls'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c desc nulls" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc nulls thirsty'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c desc nulls thirsty" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c climb nulls first'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c climb nulls first" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c nulls first asC'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c nulls first asC" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc nulls first asc'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c desc nulls first asc" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc hurry'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c desc hurry" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c descend'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c descend" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c; SELECT 1'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c; SELECT 1" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = '1,2'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "1,2" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c + 1'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c + 1" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'random()'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "random()" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c LIMIT 1'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c LIMIT 1" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c USING <'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c USING <" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 't COLLATE "en_US"'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "t COLLATE "en_US"" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c asc' , timescaledb.compress_orderby = 'c'); @@ -184,16 +219,22 @@ ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c + ERROR: unable to parse segmenting option "c + b" HINT: The option timescaledb.compress_segmentby must be a set of columns separated by commas. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a, p'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: invalid ordering column type point DETAIL: Could not identify a less-than operator for the type. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'b, b'); ERROR: duplicate column name "b" HINT: The timescaledb.compress_segmentby option must reference distinct column. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'b, b'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: duplicate column name "b" HINT: The timescaledb.compress_orderby option must reference distinct column. --should succeed ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a, b'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" --ddl on ht with compression ALTER TABLE foo DROP COLUMN a; ERROR: cannot drop column named in partition key @@ -308,7 +349,7 @@ select table_name from create_hypertable('table_constr', 'timec', chunk_time_int BEGIN; ALTER TABLE table_constr set (timescaledb.compress, timescaledb.compress_segmentby = 'd'); -WARNING: column "device_id" should be used for segmenting or ordering +NOTICE: default order by for hypertable "table_constr" is set to "device_id, timec DESC" ROLLBACK; alter table table_constr add constraint table_constr_uk unique (location, timec, device_id); BEGIN; @@ -368,6 +409,7 @@ SELECT create_hypertable('table_fk', 'time'); ALTER TABLE table_fk DROP COLUMN id1; ALTER TABLE table_fk SET (timescaledb.compress,timescaledb.compress_segmentby = 'id2'); +NOTICE: default order by for hypertable "table_fk" is set to ""time" DESC" -- TEST fk cascade delete behavior on compressed chunk -- insert into fortable values(1); insert into fortable values(10); @@ -409,9 +451,11 @@ SELECT table_name from create_hypertable('table_constr2', 'timec', chunk_time_in INSERT INTO fortable VALUES( 99 ); INSERT INTO table_constr2 VALUES( 1000, 10, 5, 99); ALTER TABLE table_constr2 SET (timescaledb.compress, timescaledb.compress_segmentby = 'device_id'); +NOTICE: default order by for hypertable "table_constr2" is set to "timec DESC" ERROR: column "d" must be used for segmenting DETAIL: The foreign key constraint "table_constr2_d_fkey" cannot be enforced with the given compression configuration. ALTER TABLE table_constr2 SET (timescaledb.compress, timescaledb.compress_segmentby = 'device_id, d'); +NOTICE: default order by for hypertable "table_constr2" is set to "timec DESC" --compress a chunk and try to disable compression, it should fail -- SELECT ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_NAME" FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht @@ -452,6 +496,9 @@ SELECT set_integer_now_func('test_table_int', 'dummy_now'); INSERT INTO test_table_int SELECT generate_series(1,5), 10; ALTER TABLE test_table_int set (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_table_int" is set to "" +NOTICE: default order by for hypertable "test_table_int" is set to ""time" DESC" SELECT add_compression_policy('test_table_int', 2::int) AS compressjob_id \gset \c :TEST_DBNAME :ROLE_SUPERUSER @@ -639,6 +686,7 @@ NOTICE: migrating data to chunks create unique index readings_uniq_idx on readings("time",battery_temperature); ALTER TABLE readings SET (timescaledb.compress,timescaledb.compress_segmentby = 'battery_temperature'); +NOTICE: default order by for hypertable "readings" is set to ""time" DESC" SELECT compress_chunk(show_chunks('readings')); compress_chunk ------------------------------------------ @@ -759,6 +807,7 @@ SELECT create_hypertable('readings', 'time', chunk_time_interval => interval '12 CREATE UNIQUE INDEX readings_uniq_idx ON readings("time", battery_temperature); ALTER TABLE readings SET (timescaledb.compress, timescaledb.compress_segmentby = 'battery_temperature'); +NOTICE: default order by for hypertable "readings" is set to ""time" DESC" ALTER TABLE readings DROP COLUMN battery_status; ALTER TABLE readings DROP COLUMN battery_status2; INSERT INTO readings("time", candy, battery_temperature) @@ -792,25 +841,29 @@ SELECT table_name FROM create_hypertable('table_unique_index', 'time'); -- Will warn because the lack of segmentby/orderby compression options ALTER TABLE table_unique_index SET (timescaledb.compress); -WARNING: column "location" should be used for segmenting or ordering -WARNING: column "device_id" should be used for segmenting or ordering +WARNING: there was some uncertainty picking the default segment by for the hypertable: Please make sure location is not a unique column and appropriate for a segment by +NOTICE: default segment by for hypertable "table_unique_index" is set to "location" +NOTICE: default order by for hypertable "table_unique_index" is set to "device_id, "time" DESC" ALTER TABLE table_unique_index SET (timescaledb.compress = off); ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'location'); -WARNING: column "device_id" should be used for segmenting or ordering +NOTICE: default order by for hypertable "table_unique_index" is set to "device_id, "time" DESC" ALTER TABLE table_unique_index SET (timescaledb.compress = off); ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_orderby = 'device_id'); -WARNING: column "location" should be used for segmenting or ordering +WARNING: there was some uncertainty picking the default segment by for the hypertable: Please make sure location is not a unique column and appropriate for a segment by +NOTICE: default segment by for hypertable "table_unique_index" is set to "location" ALTER TABLE table_unique_index SET (timescaledb.compress = off); -- Will enable compression without warnings ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'device_id'); ALTER TABLE table_unique_index SET (timescaledb.compress = off); ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'device_id,location'); +NOTICE: default order by for hypertable "table_unique_index" is set to ""time" DESC" ALTER TABLE table_unique_index SET (timescaledb.compress = off); -ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_orderby = 'device_id,location'); +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby='', timescaledb.compress_orderby = 'device_id,location'); ALTER TABLE table_unique_index SET (timescaledb.compress = off); ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'time,location', timescaledb.compress_orderby = 'device_id'); ALTER TABLE table_unique_index SET (timescaledb.compress = off); ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'time,location,device_id'); +NOTICE: default order by for hypertable "table_unique_index" is set to "" ALTER TABLE table_unique_index SET (timescaledb.compress = off); -- try compressing osm chunk CREATE TABLE osm_table (time timestamptz NOT NULL, device_id text, value float); @@ -821,6 +874,9 @@ SELECT table_name FROM create_hypertable('osm_table', 'time'); (1 row) ALTER TABLE osm_table SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "osm_table" is set to "" +NOTICE: default order by for hypertable "osm_table" is set to ""time" DESC" INSERT INTO osm_table VALUES ('2022-11-11 11:11:11', 'foo', 1.0); UPDATE _timescaledb_catalog.chunk ch SET osm_chunk = true FROM _timescaledb_catalog.hypertable ht WHERE ch.hypertable_id = ht.id AND ht.table_name='osm_table'; SELECT compress_chunk(show_chunks('osm_table')); diff --git a/tsl/test/expected/compression_errors-14.out b/tsl/test/expected/compression_errors-14.out index fd931ce007e..ca90ec06dbd 100644 --- a/tsl/test/expected/compression_errors-14.out +++ b/tsl/test/expected/compression_errors-14.out @@ -51,6 +51,7 @@ select create_hypertable('default_skipped', 'a', chunk_time_interval=> 10); (1 row) alter table default_skipped set (timescaledb.compress, timescaledb.compress_segmentby = 'c'); +NOTICE: default order by for hypertable "default_skipped" is set to "a DESC" alter table default_skipped set (timescaledb.compress, timescaledb.compress_segmentby = 'c'); create table with_rls (a integer, b integer); ALTER TABLE with_rls ENABLE ROW LEVEL SECURITY; @@ -73,8 +74,12 @@ SELECT * FROM _timescaledb_catalog.compression_settings ORDER BY relid::text; (2 rows) ALTER TABLE foo3 set (timescaledb.compress, timescaledb.compress_orderby='d DeSc NullS lAsT'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo3" is set to "" --shold allow alter since segment by was empty ALTER TABLE foo3 set (timescaledb.compress, timescaledb.compress_orderby='d Asc NullS lAsT'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo3" is set to "" --this is ok too ALTER TABLE foo3 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c', timescaledb.compress_orderby = 'd DeSc NullS lAsT'); -- Negative test cases --- @@ -121,48 +126,78 @@ ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'd') ERROR: column "d" does not exist HINT: The timescaledb.compress_segmentby option must reference a valid column. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'd'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: column "d" does not exist HINT: The timescaledb.compress_orderby option must reference a valid column. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc nulls'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c desc nulls" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc nulls thirsty'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c desc nulls thirsty" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c climb nulls first'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c climb nulls first" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c nulls first asC'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c nulls first asC" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc nulls first asc'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c desc nulls first asc" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc hurry'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c desc hurry" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c descend'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c descend" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c; SELECT 1'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c; SELECT 1" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = '1,2'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "1,2" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c + 1'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c + 1" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'random()'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "random()" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c LIMIT 1'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c LIMIT 1" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c USING <'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c USING <" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 't COLLATE "en_US"'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "t COLLATE "en_US"" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c asc' , timescaledb.compress_orderby = 'c'); @@ -184,16 +219,22 @@ ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c + ERROR: unable to parse segmenting option "c + b" HINT: The option timescaledb.compress_segmentby must be a set of columns separated by commas. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a, p'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: invalid ordering column type point DETAIL: Could not identify a less-than operator for the type. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'b, b'); ERROR: duplicate column name "b" HINT: The timescaledb.compress_segmentby option must reference distinct column. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'b, b'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: duplicate column name "b" HINT: The timescaledb.compress_orderby option must reference distinct column. --should succeed ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a, b'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" --ddl on ht with compression ALTER TABLE foo DROP COLUMN a; ERROR: cannot drop column named in partition key @@ -308,7 +349,7 @@ select table_name from create_hypertable('table_constr', 'timec', chunk_time_int BEGIN; ALTER TABLE table_constr set (timescaledb.compress, timescaledb.compress_segmentby = 'd'); -WARNING: column "device_id" should be used for segmenting or ordering +NOTICE: default order by for hypertable "table_constr" is set to "device_id, timec DESC" ROLLBACK; alter table table_constr add constraint table_constr_uk unique (location, timec, device_id); BEGIN; @@ -368,6 +409,7 @@ SELECT create_hypertable('table_fk', 'time'); ALTER TABLE table_fk DROP COLUMN id1; ALTER TABLE table_fk SET (timescaledb.compress,timescaledb.compress_segmentby = 'id2'); +NOTICE: default order by for hypertable "table_fk" is set to ""time" DESC" -- TEST fk cascade delete behavior on compressed chunk -- insert into fortable values(1); insert into fortable values(10); @@ -409,9 +451,11 @@ SELECT table_name from create_hypertable('table_constr2', 'timec', chunk_time_in INSERT INTO fortable VALUES( 99 ); INSERT INTO table_constr2 VALUES( 1000, 10, 5, 99); ALTER TABLE table_constr2 SET (timescaledb.compress, timescaledb.compress_segmentby = 'device_id'); +NOTICE: default order by for hypertable "table_constr2" is set to "timec DESC" ERROR: column "d" must be used for segmenting DETAIL: The foreign key constraint "table_constr2_d_fkey" cannot be enforced with the given compression configuration. ALTER TABLE table_constr2 SET (timescaledb.compress, timescaledb.compress_segmentby = 'device_id, d'); +NOTICE: default order by for hypertable "table_constr2" is set to "timec DESC" --compress a chunk and try to disable compression, it should fail -- SELECT ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_NAME" FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht @@ -452,6 +496,9 @@ SELECT set_integer_now_func('test_table_int', 'dummy_now'); INSERT INTO test_table_int SELECT generate_series(1,5), 10; ALTER TABLE test_table_int set (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_table_int" is set to "" +NOTICE: default order by for hypertable "test_table_int" is set to ""time" DESC" SELECT add_compression_policy('test_table_int', 2::int) AS compressjob_id \gset \c :TEST_DBNAME :ROLE_SUPERUSER @@ -639,6 +686,7 @@ NOTICE: migrating data to chunks create unique index readings_uniq_idx on readings("time",battery_temperature); ALTER TABLE readings SET (timescaledb.compress,timescaledb.compress_segmentby = 'battery_temperature'); +NOTICE: default order by for hypertable "readings" is set to ""time" DESC" SELECT compress_chunk(show_chunks('readings')); compress_chunk ------------------------------------------ @@ -759,6 +807,7 @@ SELECT create_hypertable('readings', 'time', chunk_time_interval => interval '12 CREATE UNIQUE INDEX readings_uniq_idx ON readings("time", battery_temperature); ALTER TABLE readings SET (timescaledb.compress, timescaledb.compress_segmentby = 'battery_temperature'); +NOTICE: default order by for hypertable "readings" is set to ""time" DESC" ALTER TABLE readings DROP COLUMN battery_status; ALTER TABLE readings DROP COLUMN battery_status2; INSERT INTO readings("time", candy, battery_temperature) @@ -792,25 +841,29 @@ SELECT table_name FROM create_hypertable('table_unique_index', 'time'); -- Will warn because the lack of segmentby/orderby compression options ALTER TABLE table_unique_index SET (timescaledb.compress); -WARNING: column "location" should be used for segmenting or ordering -WARNING: column "device_id" should be used for segmenting or ordering +WARNING: there was some uncertainty picking the default segment by for the hypertable: Please make sure location is not a unique column and appropriate for a segment by +NOTICE: default segment by for hypertable "table_unique_index" is set to "location" +NOTICE: default order by for hypertable "table_unique_index" is set to "device_id, "time" DESC" ALTER TABLE table_unique_index SET (timescaledb.compress = off); ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'location'); -WARNING: column "device_id" should be used for segmenting or ordering +NOTICE: default order by for hypertable "table_unique_index" is set to "device_id, "time" DESC" ALTER TABLE table_unique_index SET (timescaledb.compress = off); ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_orderby = 'device_id'); -WARNING: column "location" should be used for segmenting or ordering +WARNING: there was some uncertainty picking the default segment by for the hypertable: Please make sure location is not a unique column and appropriate for a segment by +NOTICE: default segment by for hypertable "table_unique_index" is set to "location" ALTER TABLE table_unique_index SET (timescaledb.compress = off); -- Will enable compression without warnings ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'device_id'); ALTER TABLE table_unique_index SET (timescaledb.compress = off); ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'device_id,location'); +NOTICE: default order by for hypertable "table_unique_index" is set to ""time" DESC" ALTER TABLE table_unique_index SET (timescaledb.compress = off); -ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_orderby = 'device_id,location'); +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby='', timescaledb.compress_orderby = 'device_id,location'); ALTER TABLE table_unique_index SET (timescaledb.compress = off); ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'time,location', timescaledb.compress_orderby = 'device_id'); ALTER TABLE table_unique_index SET (timescaledb.compress = off); ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'time,location,device_id'); +NOTICE: default order by for hypertable "table_unique_index" is set to "" ALTER TABLE table_unique_index SET (timescaledb.compress = off); -- try compressing osm chunk CREATE TABLE osm_table (time timestamptz NOT NULL, device_id text, value float); @@ -821,6 +874,9 @@ SELECT table_name FROM create_hypertable('osm_table', 'time'); (1 row) ALTER TABLE osm_table SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "osm_table" is set to "" +NOTICE: default order by for hypertable "osm_table" is set to ""time" DESC" INSERT INTO osm_table VALUES ('2022-11-11 11:11:11', 'foo', 1.0); UPDATE _timescaledb_catalog.chunk ch SET osm_chunk = true FROM _timescaledb_catalog.hypertable ht WHERE ch.hypertable_id = ht.id AND ht.table_name='osm_table'; SELECT compress_chunk(show_chunks('osm_table')); diff --git a/tsl/test/expected/compression_errors-15.out b/tsl/test/expected/compression_errors-15.out index fd931ce007e..ca90ec06dbd 100644 --- a/tsl/test/expected/compression_errors-15.out +++ b/tsl/test/expected/compression_errors-15.out @@ -51,6 +51,7 @@ select create_hypertable('default_skipped', 'a', chunk_time_interval=> 10); (1 row) alter table default_skipped set (timescaledb.compress, timescaledb.compress_segmentby = 'c'); +NOTICE: default order by for hypertable "default_skipped" is set to "a DESC" alter table default_skipped set (timescaledb.compress, timescaledb.compress_segmentby = 'c'); create table with_rls (a integer, b integer); ALTER TABLE with_rls ENABLE ROW LEVEL SECURITY; @@ -73,8 +74,12 @@ SELECT * FROM _timescaledb_catalog.compression_settings ORDER BY relid::text; (2 rows) ALTER TABLE foo3 set (timescaledb.compress, timescaledb.compress_orderby='d DeSc NullS lAsT'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo3" is set to "" --shold allow alter since segment by was empty ALTER TABLE foo3 set (timescaledb.compress, timescaledb.compress_orderby='d Asc NullS lAsT'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo3" is set to "" --this is ok too ALTER TABLE foo3 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c', timescaledb.compress_orderby = 'd DeSc NullS lAsT'); -- Negative test cases --- @@ -121,48 +126,78 @@ ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'd') ERROR: column "d" does not exist HINT: The timescaledb.compress_segmentby option must reference a valid column. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'd'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: column "d" does not exist HINT: The timescaledb.compress_orderby option must reference a valid column. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc nulls'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c desc nulls" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc nulls thirsty'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c desc nulls thirsty" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c climb nulls first'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c climb nulls first" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c nulls first asC'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c nulls first asC" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc nulls first asc'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c desc nulls first asc" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc hurry'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c desc hurry" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c descend'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c descend" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c; SELECT 1'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c; SELECT 1" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = '1,2'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "1,2" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c + 1'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c + 1" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'random()'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "random()" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c LIMIT 1'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c LIMIT 1" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c USING <'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c USING <" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 't COLLATE "en_US"'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "t COLLATE "en_US"" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c asc' , timescaledb.compress_orderby = 'c'); @@ -184,16 +219,22 @@ ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c + ERROR: unable to parse segmenting option "c + b" HINT: The option timescaledb.compress_segmentby must be a set of columns separated by commas. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a, p'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: invalid ordering column type point DETAIL: Could not identify a less-than operator for the type. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'b, b'); ERROR: duplicate column name "b" HINT: The timescaledb.compress_segmentby option must reference distinct column. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'b, b'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: duplicate column name "b" HINT: The timescaledb.compress_orderby option must reference distinct column. --should succeed ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a, b'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" --ddl on ht with compression ALTER TABLE foo DROP COLUMN a; ERROR: cannot drop column named in partition key @@ -308,7 +349,7 @@ select table_name from create_hypertable('table_constr', 'timec', chunk_time_int BEGIN; ALTER TABLE table_constr set (timescaledb.compress, timescaledb.compress_segmentby = 'd'); -WARNING: column "device_id" should be used for segmenting or ordering +NOTICE: default order by for hypertable "table_constr" is set to "device_id, timec DESC" ROLLBACK; alter table table_constr add constraint table_constr_uk unique (location, timec, device_id); BEGIN; @@ -368,6 +409,7 @@ SELECT create_hypertable('table_fk', 'time'); ALTER TABLE table_fk DROP COLUMN id1; ALTER TABLE table_fk SET (timescaledb.compress,timescaledb.compress_segmentby = 'id2'); +NOTICE: default order by for hypertable "table_fk" is set to ""time" DESC" -- TEST fk cascade delete behavior on compressed chunk -- insert into fortable values(1); insert into fortable values(10); @@ -409,9 +451,11 @@ SELECT table_name from create_hypertable('table_constr2', 'timec', chunk_time_in INSERT INTO fortable VALUES( 99 ); INSERT INTO table_constr2 VALUES( 1000, 10, 5, 99); ALTER TABLE table_constr2 SET (timescaledb.compress, timescaledb.compress_segmentby = 'device_id'); +NOTICE: default order by for hypertable "table_constr2" is set to "timec DESC" ERROR: column "d" must be used for segmenting DETAIL: The foreign key constraint "table_constr2_d_fkey" cannot be enforced with the given compression configuration. ALTER TABLE table_constr2 SET (timescaledb.compress, timescaledb.compress_segmentby = 'device_id, d'); +NOTICE: default order by for hypertable "table_constr2" is set to "timec DESC" --compress a chunk and try to disable compression, it should fail -- SELECT ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_NAME" FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht @@ -452,6 +496,9 @@ SELECT set_integer_now_func('test_table_int', 'dummy_now'); INSERT INTO test_table_int SELECT generate_series(1,5), 10; ALTER TABLE test_table_int set (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_table_int" is set to "" +NOTICE: default order by for hypertable "test_table_int" is set to ""time" DESC" SELECT add_compression_policy('test_table_int', 2::int) AS compressjob_id \gset \c :TEST_DBNAME :ROLE_SUPERUSER @@ -639,6 +686,7 @@ NOTICE: migrating data to chunks create unique index readings_uniq_idx on readings("time",battery_temperature); ALTER TABLE readings SET (timescaledb.compress,timescaledb.compress_segmentby = 'battery_temperature'); +NOTICE: default order by for hypertable "readings" is set to ""time" DESC" SELECT compress_chunk(show_chunks('readings')); compress_chunk ------------------------------------------ @@ -759,6 +807,7 @@ SELECT create_hypertable('readings', 'time', chunk_time_interval => interval '12 CREATE UNIQUE INDEX readings_uniq_idx ON readings("time", battery_temperature); ALTER TABLE readings SET (timescaledb.compress, timescaledb.compress_segmentby = 'battery_temperature'); +NOTICE: default order by for hypertable "readings" is set to ""time" DESC" ALTER TABLE readings DROP COLUMN battery_status; ALTER TABLE readings DROP COLUMN battery_status2; INSERT INTO readings("time", candy, battery_temperature) @@ -792,25 +841,29 @@ SELECT table_name FROM create_hypertable('table_unique_index', 'time'); -- Will warn because the lack of segmentby/orderby compression options ALTER TABLE table_unique_index SET (timescaledb.compress); -WARNING: column "location" should be used for segmenting or ordering -WARNING: column "device_id" should be used for segmenting or ordering +WARNING: there was some uncertainty picking the default segment by for the hypertable: Please make sure location is not a unique column and appropriate for a segment by +NOTICE: default segment by for hypertable "table_unique_index" is set to "location" +NOTICE: default order by for hypertable "table_unique_index" is set to "device_id, "time" DESC" ALTER TABLE table_unique_index SET (timescaledb.compress = off); ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'location'); -WARNING: column "device_id" should be used for segmenting or ordering +NOTICE: default order by for hypertable "table_unique_index" is set to "device_id, "time" DESC" ALTER TABLE table_unique_index SET (timescaledb.compress = off); ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_orderby = 'device_id'); -WARNING: column "location" should be used for segmenting or ordering +WARNING: there was some uncertainty picking the default segment by for the hypertable: Please make sure location is not a unique column and appropriate for a segment by +NOTICE: default segment by for hypertable "table_unique_index" is set to "location" ALTER TABLE table_unique_index SET (timescaledb.compress = off); -- Will enable compression without warnings ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'device_id'); ALTER TABLE table_unique_index SET (timescaledb.compress = off); ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'device_id,location'); +NOTICE: default order by for hypertable "table_unique_index" is set to ""time" DESC" ALTER TABLE table_unique_index SET (timescaledb.compress = off); -ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_orderby = 'device_id,location'); +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby='', timescaledb.compress_orderby = 'device_id,location'); ALTER TABLE table_unique_index SET (timescaledb.compress = off); ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'time,location', timescaledb.compress_orderby = 'device_id'); ALTER TABLE table_unique_index SET (timescaledb.compress = off); ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'time,location,device_id'); +NOTICE: default order by for hypertable "table_unique_index" is set to "" ALTER TABLE table_unique_index SET (timescaledb.compress = off); -- try compressing osm chunk CREATE TABLE osm_table (time timestamptz NOT NULL, device_id text, value float); @@ -821,6 +874,9 @@ SELECT table_name FROM create_hypertable('osm_table', 'time'); (1 row) ALTER TABLE osm_table SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "osm_table" is set to "" +NOTICE: default order by for hypertable "osm_table" is set to ""time" DESC" INSERT INTO osm_table VALUES ('2022-11-11 11:11:11', 'foo', 1.0); UPDATE _timescaledb_catalog.chunk ch SET osm_chunk = true FROM _timescaledb_catalog.hypertable ht WHERE ch.hypertable_id = ht.id AND ht.table_name='osm_table'; SELECT compress_chunk(show_chunks('osm_table')); diff --git a/tsl/test/expected/compression_errors-16.out b/tsl/test/expected/compression_errors-16.out index 15050c2b914..e4ea1ae376b 100644 --- a/tsl/test/expected/compression_errors-16.out +++ b/tsl/test/expected/compression_errors-16.out @@ -51,6 +51,7 @@ select create_hypertable('default_skipped', 'a', chunk_time_interval=> 10); (1 row) alter table default_skipped set (timescaledb.compress, timescaledb.compress_segmentby = 'c'); +NOTICE: default order by for hypertable "default_skipped" is set to "a DESC" alter table default_skipped set (timescaledb.compress, timescaledb.compress_segmentby = 'c'); create table with_rls (a integer, b integer); ALTER TABLE with_rls ENABLE ROW LEVEL SECURITY; @@ -73,8 +74,12 @@ SELECT * FROM _timescaledb_catalog.compression_settings ORDER BY relid::text; (2 rows) ALTER TABLE foo3 set (timescaledb.compress, timescaledb.compress_orderby='d DeSc NullS lAsT'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo3" is set to "" --shold allow alter since segment by was empty ALTER TABLE foo3 set (timescaledb.compress, timescaledb.compress_orderby='d Asc NullS lAsT'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo3" is set to "" --this is ok too ALTER TABLE foo3 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c', timescaledb.compress_orderby = 'd DeSc NullS lAsT'); -- Negative test cases --- @@ -121,48 +126,78 @@ ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'd') ERROR: column "d" does not exist HINT: The timescaledb.compress_segmentby option must reference a valid column. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'd'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: column "d" does not exist HINT: The timescaledb.compress_orderby option must reference a valid column. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc nulls'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c desc nulls" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc nulls thirsty'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c desc nulls thirsty" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c climb nulls first'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c climb nulls first" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c nulls first asC'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c nulls first asC" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc nulls first asc'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c desc nulls first asc" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c desc hurry'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c desc hurry" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c descend'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c descend" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c; SELECT 1'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c; SELECT 1" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = '1,2'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "1,2" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c + 1'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c + 1" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'random()'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "random()" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c LIMIT 1'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c LIMIT 1" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c USING <'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "c USING <" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 't COLLATE "en_US"'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: unable to parse ordering option "t COLLATE "en_US"" HINT: The timescaledb.compress_orderby option must be a set of column names with sort options, separated by commas. It is the same format as an ORDER BY clause. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c asc' , timescaledb.compress_orderby = 'c'); @@ -184,16 +219,22 @@ ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c + ERROR: unable to parse segmenting option "c + b" HINT: The option timescaledb.compress_segmentby must be a set of columns separated by commas. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a, p'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: invalid ordering column type point DETAIL: Could not identify a less-than operator for the type. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'b, b'); ERROR: duplicate column name "b" HINT: The timescaledb.compress_segmentby option must reference distinct column. ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'b, b'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" ERROR: duplicate column name "b" HINT: The timescaledb.compress_orderby option must reference distinct column. --should succeed ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a, b'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "foo" is set to "" --ddl on ht with compression ALTER TABLE foo DROP COLUMN a; ERROR: cannot drop column named in partition key @@ -308,7 +349,7 @@ select table_name from create_hypertable('table_constr', 'timec', chunk_time_int BEGIN; ALTER TABLE table_constr set (timescaledb.compress, timescaledb.compress_segmentby = 'd'); -WARNING: column "device_id" should be used for segmenting or ordering +NOTICE: default order by for hypertable "table_constr" is set to "device_id, timec DESC" ROLLBACK; alter table table_constr add constraint table_constr_uk unique (location, timec, device_id); BEGIN; @@ -368,6 +409,7 @@ SELECT create_hypertable('table_fk', 'time'); ALTER TABLE table_fk DROP COLUMN id1; ALTER TABLE table_fk SET (timescaledb.compress,timescaledb.compress_segmentby = 'id2'); +NOTICE: default order by for hypertable "table_fk" is set to ""time" DESC" -- TEST fk cascade delete behavior on compressed chunk -- insert into fortable values(1); insert into fortable values(10); @@ -409,9 +451,11 @@ SELECT table_name from create_hypertable('table_constr2', 'timec', chunk_time_in INSERT INTO fortable VALUES( 99 ); INSERT INTO table_constr2 VALUES( 1000, 10, 5, 99); ALTER TABLE table_constr2 SET (timescaledb.compress, timescaledb.compress_segmentby = 'device_id'); +NOTICE: default order by for hypertable "table_constr2" is set to "timec DESC" ERROR: column "d" must be used for segmenting DETAIL: The foreign key constraint "table_constr2_d_fkey" cannot be enforced with the given compression configuration. ALTER TABLE table_constr2 SET (timescaledb.compress, timescaledb.compress_segmentby = 'device_id, d'); +NOTICE: default order by for hypertable "table_constr2" is set to "timec DESC" --compress a chunk and try to disable compression, it should fail -- SELECT ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_NAME" FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht @@ -452,6 +496,9 @@ SELECT set_integer_now_func('test_table_int', 'dummy_now'); INSERT INTO test_table_int SELECT generate_series(1,5), 10; ALTER TABLE test_table_int set (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_table_int" is set to "" +NOTICE: default order by for hypertable "test_table_int" is set to ""time" DESC" SELECT add_compression_policy('test_table_int', 2::int) AS compressjob_id \gset \c :TEST_DBNAME :ROLE_SUPERUSER @@ -639,6 +686,7 @@ NOTICE: migrating data to chunks create unique index readings_uniq_idx on readings("time",battery_temperature); ALTER TABLE readings SET (timescaledb.compress,timescaledb.compress_segmentby = 'battery_temperature'); +NOTICE: default order by for hypertable "readings" is set to ""time" DESC" SELECT compress_chunk(show_chunks('readings')); compress_chunk ------------------------------------------ @@ -759,6 +807,7 @@ SELECT create_hypertable('readings', 'time', chunk_time_interval => interval '12 CREATE UNIQUE INDEX readings_uniq_idx ON readings("time", battery_temperature); ALTER TABLE readings SET (timescaledb.compress, timescaledb.compress_segmentby = 'battery_temperature'); +NOTICE: default order by for hypertable "readings" is set to ""time" DESC" ALTER TABLE readings DROP COLUMN battery_status; ALTER TABLE readings DROP COLUMN battery_status2; INSERT INTO readings("time", candy, battery_temperature) @@ -792,25 +841,29 @@ SELECT table_name FROM create_hypertable('table_unique_index', 'time'); -- Will warn because the lack of segmentby/orderby compression options ALTER TABLE table_unique_index SET (timescaledb.compress); -WARNING: column "location" should be used for segmenting or ordering -WARNING: column "device_id" should be used for segmenting or ordering +WARNING: there was some uncertainty picking the default segment by for the hypertable: Please make sure location is not a unique column and appropriate for a segment by +NOTICE: default segment by for hypertable "table_unique_index" is set to "location" +NOTICE: default order by for hypertable "table_unique_index" is set to "device_id, "time" DESC" ALTER TABLE table_unique_index SET (timescaledb.compress = off); ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'location'); -WARNING: column "device_id" should be used for segmenting or ordering +NOTICE: default order by for hypertable "table_unique_index" is set to "device_id, "time" DESC" ALTER TABLE table_unique_index SET (timescaledb.compress = off); ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_orderby = 'device_id'); -WARNING: column "location" should be used for segmenting or ordering +WARNING: there was some uncertainty picking the default segment by for the hypertable: Please make sure location is not a unique column and appropriate for a segment by +NOTICE: default segment by for hypertable "table_unique_index" is set to "location" ALTER TABLE table_unique_index SET (timescaledb.compress = off); -- Will enable compression without warnings ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'device_id'); ALTER TABLE table_unique_index SET (timescaledb.compress = off); ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'device_id,location'); +NOTICE: default order by for hypertable "table_unique_index" is set to ""time" DESC" ALTER TABLE table_unique_index SET (timescaledb.compress = off); -ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_orderby = 'device_id,location'); +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby='', timescaledb.compress_orderby = 'device_id,location'); ALTER TABLE table_unique_index SET (timescaledb.compress = off); ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'time,location', timescaledb.compress_orderby = 'device_id'); ALTER TABLE table_unique_index SET (timescaledb.compress = off); ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'time,location,device_id'); +NOTICE: default order by for hypertable "table_unique_index" is set to "" ALTER TABLE table_unique_index SET (timescaledb.compress = off); -- try compressing osm chunk CREATE TABLE osm_table (time timestamptz NOT NULL, device_id text, value float); @@ -821,6 +874,9 @@ SELECT table_name FROM create_hypertable('osm_table', 'time'); (1 row) ALTER TABLE osm_table SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "osm_table" is set to "" +NOTICE: default order by for hypertable "osm_table" is set to ""time" DESC" INSERT INTO osm_table VALUES ('2022-11-11 11:11:11', 'foo', 1.0); UPDATE _timescaledb_catalog.chunk ch SET osm_chunk = true FROM _timescaledb_catalog.hypertable ht WHERE ch.hypertable_id = ht.id AND ht.table_name='osm_table'; SELECT compress_chunk(show_chunks('osm_table')); diff --git a/tsl/test/expected/compression_hypertable.out b/tsl/test/expected/compression_hypertable.out index 5d6742604e9..92909dcaa52 100644 --- a/tsl/test/expected/compression_hypertable.out +++ b/tsl/test/expected/compression_hypertable.out @@ -433,6 +433,8 @@ select create_hypertable( 'test5', 'time', chunk_time_interval=> '1 day'::interv (1 row) alter table test5 set (timescaledb.compress, timescaledb.compress_orderby = 'device_id, time'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test5" is set to "" insert into test5 select generate_series('2018-01-01 00:00'::timestamp, '2018-01-10 00:00'::timestamp, '2 hour'), 'device_1', gen_rand_minstd(); insert into test5 @@ -588,6 +590,8 @@ NOTICE: adding not-null constraint to column "time" ALTER TABLE test7 SET (timescaledb.compress, timescaledb.compress_orderby = 'time DESC, c1 DESC'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test7" is set to "" INSERT INTO test7 SELECT t, d, '2019/07/07 01:00', gen_rand_minstd(), 'a' FROM generate_series(1, 10) t, diff --git a/tsl/test/expected/compression_insert.out b/tsl/test/expected/compression_insert.out index cffbb4d99f4..8cd235fa89c 100644 --- a/tsl/test/expected/compression_insert.out +++ b/tsl/test/expected/compression_insert.out @@ -406,6 +406,9 @@ SELECT create_hypertable('test_gen', 'id', chunk_time_interval=>10); (1 row) ALTER TABLE test_gen set (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_gen" is set to "" +NOTICE: default order by for hypertable "test_gen" is set to "id DESC" INSERT into test_gen (payload) SELECT generate_series(1,15) ; SELECT max(id) from test_gen; max @@ -479,6 +482,7 @@ INSERT INTO trigger_test(time, device, value,dropcol1) SELECT '2000-01-01',1,1,1 -- drop the column before we compress ALTER TABLE trigger_test DROP COLUMN dropcol1; ALTER TABLE trigger_test SET (timescaledb.compress,timescaledb.compress_segmentby='device'); +NOTICE: default order by for hypertable "trigger_test" is set to ""time" DESC" SELECT compress_chunk(c) FROM show_chunks('trigger_test') c; compress_chunk ------------------------------------------ @@ -766,6 +770,8 @@ NOTICE: adding not-null constraint to column "time" (1 row) ALTER TABLE test_ordering SET (timescaledb.compress,timescaledb.compress_orderby='time desc'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_ordering" is set to "" INSERT INTO test_ordering VALUES (5),(4),(3); -- should be ordered append :PREFIX SELECT * FROM test_ordering ORDER BY 1; @@ -916,6 +922,9 @@ SELECT * FROM cagg_conditions ORDER BY 1; (1 row) ALTER TABLE conditions SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "conditions" is set to "" +NOTICE: default order by for hypertable "conditions" is set to "timec DESC" SELECT compress_chunk(ch) FROM show_chunks('conditions') ch; compress_chunk ------------------------------------------ @@ -960,6 +969,9 @@ SELECT table_name FROM create_hypertable('direct_insert','time'); (1 row) ALTER TABLE direct_insert SET(timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "direct_insert" is set to "" +NOTICE: default order by for hypertable "direct_insert" is set to ""time" DESC" SELECT format('%I.%I', ht.schema_name, ht.table_name) AS "TABLENAME" FROM @@ -1050,6 +1062,7 @@ INSERT INTO test_limit SELECT t, i FROM generate_series(1,10000,1) t CROSS JOIN CREATE UNIQUE INDEX timestamp_id_idx ON test_limit(timestamp, id); ALTER TABLE test_limit SET ( timescaledb.compress, + timescaledb.compress_segmentby = '', timescaledb.compress_orderby = 'timestamp' ); WARNING: column "id" should be used for segmenting or ordering diff --git a/tsl/test/expected/compression_merge.out b/tsl/test/expected/compression_merge.out index 1588e1098ce..c98894c0eb6 100644 --- a/tsl/test/expected/compression_merge.out +++ b/tsl/test/expected/compression_merge.out @@ -179,6 +179,8 @@ INSERT INTO test3 SELECT t, 2, gen_rand_minstd(), gen_rand_minstd() FROM generat INSERT INTO test3 SELECT t, 3, gen_rand_minstd(), gen_rand_minstd() FROM generate_series('2018-03-02 2:00'::TIMESTAMPTZ, '2018-03-02 2:01', '1 minute') t; -- Compression is set to merge those 25 chunks into 12 2 hour chunks and a single 1 hour chunks on a different space dimensions. ALTER TABLE test3 set (timescaledb.compress, timescaledb.compress_orderby='loc,"Time"', timescaledb.compress_chunk_time_interval='2 hours'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test3" is set to "" SELECT $$ SELECT * FROM test3 WHERE i = 1 ORDER BY "Time" @@ -240,6 +242,8 @@ NOTICE: adding not-null constraint to column "Time" -- Setting compress_chunk_time_interval to non-multiple of chunk_time_interval should emit a warning. ALTER TABLE test4 set (timescaledb.compress, timescaledb.compress_orderby='loc,"Time"', timescaledb.compress_chunk_time_interval='90 minutes'); WARNING: compress chunk interval is not a multiple of chunk interval, you should use a factor of chunk interval to merge as much as possible +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test4" is set to "" DROP TABLE test4; CREATE TABLE test5 ("Time" timestamptz, i integer, value integer); SELECT table_name from create_hypertable('test5', 'Time', chunk_time_interval=> INTERVAL '1 hour'); diff --git a/tsl/test/expected/compression_policy.out b/tsl/test/expected/compression_policy.out index c444c96e146..0c7be4ba1a6 100644 --- a/tsl/test/expected/compression_policy.out +++ b/tsl/test/expected/compression_policy.out @@ -13,6 +13,9 @@ SELECT FROM create_hypertable('sensor_data_1month', 'time', chunk_time_interval (1 row) ALTER TABLE sensor_data_1month SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "sensor_data_1month" is set to "" +NOTICE: default order by for hypertable "sensor_data_1month" is set to ""time" DESC" -- Add a compression policy and check the schedule interval SELECT add_compression_policy('sensor_data_1month','7 days'::INTERVAL) as compression_job \gset SELECT schedule_interval from timescaledb_information.jobs where job_id = :compression_job; @@ -33,6 +36,9 @@ SELECT FROM create_hypertable('sensor_data_1week', 'time', chunk_time_interval = (1 row) ALTER TABLE sensor_data_1week SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "sensor_data_1week" is set to "" +NOTICE: default order by for hypertable "sensor_data_1week" is set to ""time" DESC" -- Add a compression policy and check the schedule interval SELECT add_compression_policy('sensor_data_1week','7 days'::INTERVAL) as compression_job \gset SELECT schedule_interval from timescaledb_information.jobs where job_id = :compression_job; @@ -53,6 +59,9 @@ SELECT FROM create_hypertable('sensor_data_1day', 'time', chunk_time_interval => (1 row) ALTER TABLE sensor_data_1day SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "sensor_data_1day" is set to "" +NOTICE: default order by for hypertable "sensor_data_1day" is set to ""time" DESC" -- Add a compression policy and check the schedule interval SELECT add_compression_policy('sensor_data_1day','7 days'::INTERVAL) as compression_job \gset SELECT schedule_interval from timescaledb_information.jobs where job_id = :compression_job; @@ -73,6 +82,9 @@ SELECT FROM create_hypertable('sensor_data_1hour', 'time', chunk_time_interval = (1 row) ALTER TABLE sensor_data_1hour SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "sensor_data_1hour" is set to "" +NOTICE: default order by for hypertable "sensor_data_1hour" is set to ""time" DESC" -- Add a compression policy and check the schedule interval SELECT add_compression_policy('sensor_data_1hour','7 days'::INTERVAL) as compression_job \gset SELECT schedule_interval from timescaledb_information.jobs where job_id = :compression_job; @@ -93,6 +105,9 @@ SELECT FROM create_hypertable('sensor_data_1hour_custom', 'time', chunk_time_int (1 row) ALTER TABLE sensor_data_1hour_custom SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "sensor_data_1hour_custom" is set to "" +NOTICE: default order by for hypertable "sensor_data_1hour_custom" is set to ""time" DESC" -- Add a compression policy and check the schedule interval SELECT add_compression_policy('sensor_data_1hour_custom','7 days'::INTERVAL, schedule_interval => '7 days') as compression_job \gset SELECT schedule_interval from timescaledb_information.jobs where job_id = :compression_job; diff --git a/tsl/test/expected/compression_qualpushdown.out b/tsl/test/expected/compression_qualpushdown.out index b978f625f17..289e7de7306 100644 --- a/tsl/test/expected/compression_qualpushdown.out +++ b/tsl/test/expected/compression_qualpushdown.out @@ -101,6 +101,8 @@ NOTICE: adding not-null constraint to column "fmid" (1 row) ALTER TABLE metaseg_tab SET (timescaledb.compress, timescaledb.compress_orderby= 'end_dt'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "metaseg_tab" is set to "" INSERT INTO metaseg_tab values (56,0,'2012-12-10 09:45:00','2012-12-10 09:50:00',1,0.1,'2012-12-10'); SELECT compress_chunk(i) from show_chunks('metaseg_tab') i; compress_chunk @@ -190,6 +192,7 @@ WARNING: column type "character varying" used for "dev_vc" does not follow best (1 row) ALTER TABLE pushdown_relabel SET (timescaledb.compress, timescaledb.compress_segmentby='dev_vc,dev_c'); +NOTICE: default order by for hypertable "pushdown_relabel" is set to ""time" DESC" INSERT INTO pushdown_relabel SELECT '2000-01-01','varchar','char'; SELECT compress_chunk(i) from show_chunks('pushdown_relabel') i; compress_chunk @@ -282,6 +285,7 @@ ALTER TABLE deleteme SET ( timescaledb.compress, timescaledb.compress_segmentby = 'segment' ); +NOTICE: default order by for hypertable "deleteme" is set to ""timestamp" DESC" SELECT compress_chunk(i) FROM show_chunks('deleteme') i; compress_chunk ---------------------------------------- @@ -319,6 +323,7 @@ ALTER TABLE deleteme_with_bytea SET ( timescaledb.compress, timescaledb.compress_segmentby = 'bdata' ); +NOTICE: default order by for hypertable "deleteme_with_bytea" is set to ""time" DESC" SELECT compress_chunk(i) FROM show_chunks('deleteme_with_bytea') i; compress_chunk ----------------------------------------- @@ -355,6 +360,7 @@ NOTICE: adding not-null constraint to column "time" (1 row) ALTER TABLE svf_pushdown SET (timescaledb.compress,timescaledb.compress_segmentby='c_date,c_time, c_timetz,c_timestamp,c_name'); +NOTICE: default order by for hypertable "svf_pushdown" is set to ""time" DESC" INSERT INTO svf_pushdown SELECT '2020-01-01'; SELECT compress_chunk(show_chunks('svf_pushdown')); compress_chunk diff --git a/tsl/test/expected/compression_settings.out b/tsl/test/expected/compression_settings.out index 0a823f3d499..aa18b9746c2 100644 --- a/tsl/test/expected/compression_settings.out +++ b/tsl/test/expected/compression_settings.out @@ -13,6 +13,7 @@ SELECT table_name FROM create_hypertable('metrics','time'); (1 row) ALTER TABLE metrics SET (timescaledb.compress, timescaledb.compress_segmentby='device'); +NOTICE: default order by for hypertable "metrics" is set to ""time" DESC" SELECT * FROM settings; relid | segmentby | orderby | orderby_desc | orderby_nullsfirst ---------+-----------+---------+--------------+-------------------- @@ -161,6 +162,9 @@ SELECT table_name FROM create_hypertable('metrics','time'); (1 row) ALTER TABLE metrics SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "metrics" is set to "" +NOTICE: default order by for hypertable "metrics" is set to ""time" DESC" -- hypertable should have default settings now SELECT * FROM settings; relid | segmentby | orderby | orderby_desc | orderby_nullsfirst @@ -329,6 +333,8 @@ SELECT * FROM ht_settings; (2 rows) ALTER TABLE metrics2 SET (timescaledb.compress_orderby='d1 NULLS FIRST, d2 NULLS LAST, time, value ASC'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "metrics2" is set to "" SELECT * FROM ht_settings; hypertable | segmentby | orderby | compress_interval_length ------------+-----------+--------------------------------+-------------------------- @@ -337,6 +343,8 @@ SELECT * FROM ht_settings; (2 rows) ALTER TABLE metrics2 SET (timescaledb.compress_orderby='d1 DESC NULLS LAST, d2 ASC NULLS FIRST, value DESC, time ASC NULLS FIRST'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "metrics2" is set to "" SELECT * FROM ht_settings; hypertable | segmentby | orderby | compress_interval_length ------------+-----------+-----------------------------------------------------------------+-------------------------- diff --git a/tsl/test/expected/compression_sorted_merge-13.out b/tsl/test/expected/compression_sorted_merge-13.out index c98a8226425..fb45e8e831f 100644 --- a/tsl/test/expected/compression_sorted_merge-13.out +++ b/tsl/test/expected/compression_sorted_merge-13.out @@ -1531,6 +1531,7 @@ ALTER TABLE test SET ( timescaledb.compress, timescaledb.compress_segmentby = 'otherId,valueFk,otherFk' ); +NOTICE: default order by for hypertable "test" is set to "dttm DESC" INSERT INTO public.test (id, dttm, otherid, valuefk, otherfk, measure) VALUES (109288, '2023-05-25 23:12:13.000000', 130, 14499, 13, 0.13216569884001217), (109286, '2023-05-25 23:12:13.000000', 130, 14500, 13, 0.3740651978942786), diff --git a/tsl/test/expected/compression_sorted_merge-14.out b/tsl/test/expected/compression_sorted_merge-14.out index c98a8226425..fb45e8e831f 100644 --- a/tsl/test/expected/compression_sorted_merge-14.out +++ b/tsl/test/expected/compression_sorted_merge-14.out @@ -1531,6 +1531,7 @@ ALTER TABLE test SET ( timescaledb.compress, timescaledb.compress_segmentby = 'otherId,valueFk,otherFk' ); +NOTICE: default order by for hypertable "test" is set to "dttm DESC" INSERT INTO public.test (id, dttm, otherid, valuefk, otherfk, measure) VALUES (109288, '2023-05-25 23:12:13.000000', 130, 14499, 13, 0.13216569884001217), (109286, '2023-05-25 23:12:13.000000', 130, 14500, 13, 0.3740651978942786), diff --git a/tsl/test/expected/compression_sorted_merge-15.out b/tsl/test/expected/compression_sorted_merge-15.out index a6d09f1742d..92651ea94e5 100644 --- a/tsl/test/expected/compression_sorted_merge-15.out +++ b/tsl/test/expected/compression_sorted_merge-15.out @@ -1533,6 +1533,7 @@ ALTER TABLE test SET ( timescaledb.compress, timescaledb.compress_segmentby = 'otherId,valueFk,otherFk' ); +NOTICE: default order by for hypertable "test" is set to "dttm DESC" INSERT INTO public.test (id, dttm, otherid, valuefk, otherfk, measure) VALUES (109288, '2023-05-25 23:12:13.000000', 130, 14499, 13, 0.13216569884001217), (109286, '2023-05-25 23:12:13.000000', 130, 14500, 13, 0.3740651978942786), diff --git a/tsl/test/expected/compression_sorted_merge-16.out b/tsl/test/expected/compression_sorted_merge-16.out index a6d09f1742d..92651ea94e5 100644 --- a/tsl/test/expected/compression_sorted_merge-16.out +++ b/tsl/test/expected/compression_sorted_merge-16.out @@ -1533,6 +1533,7 @@ ALTER TABLE test SET ( timescaledb.compress, timescaledb.compress_segmentby = 'otherId,valueFk,otherFk' ); +NOTICE: default order by for hypertable "test" is set to "dttm DESC" INSERT INTO public.test (id, dttm, otherid, valuefk, otherfk, measure) VALUES (109288, '2023-05-25 23:12:13.000000', 130, 14499, 13, 0.13216569884001217), (109286, '2023-05-25 23:12:13.000000', 130, 14500, 13, 0.3740651978942786), diff --git a/tsl/test/expected/compression_sorted_merge_distinct.out b/tsl/test/expected/compression_sorted_merge_distinct.out index 65eb802ce68..5fa164b4eec 100644 --- a/tsl/test/expected/compression_sorted_merge_distinct.out +++ b/tsl/test/expected/compression_sorted_merge_distinct.out @@ -22,6 +22,7 @@ insert into t select 100 * mix(low_card + high_card) * sin(x / mix(low_card + high_card + 1)) from generate_series(1, 400) x, generate_series(1, 3) low_card, generate_series(1, 700) high_card; alter table t set (timescaledb.compress = true, timescaledb.compress_segmentby = 'low_card,high_card'); +NOTICE: default order by for hypertable "t" is set to "ts DESC" select count(compress_chunk(x, true)) from show_chunks('t') x; count ------- diff --git a/tsl/test/expected/compression_sorted_merge_filter.out b/tsl/test/expected/compression_sorted_merge_filter.out index 10706f55bae..b61e29d8195 100644 --- a/tsl/test/expected/compression_sorted_merge_filter.out +++ b/tsl/test/expected/compression_sorted_merge_filter.out @@ -11,6 +11,7 @@ NOTICE: adding not-null constraint to column "ts" (1 row) alter table batches set (timescaledb.compress, timescaledb.compress_segmentby = 'id'); +NOTICE: default order by for hypertable "batches" is set to "ts DESC" insert into batches values ('2022-02-02 00:00:01', 1), ('2022-02-02 00:00:11', 1), ('2022-02-02 00:00:02', 2), ('2022-02-02 00:00:12', 2), ('2022-02-02 00:00:03', 3), ('2022-02-02 00:00:13', 3); diff --git a/tsl/test/expected/compression_update_delete.out b/tsl/test/expected/compression_update_delete.out index 4e7a5bf3670..26f36db39d5 100644 --- a/tsl/test/expected/compression_update_delete.out +++ b/tsl/test/expected/compression_update_delete.out @@ -59,6 +59,7 @@ ALTER TABLE sample_table SET ( timescaledb.compress, timescaledb.compress_segmentby = 'sensor_id' ); +NOTICE: default order by for hypertable "sample_table" is set to ""time" DESC" -- compress all chunks SELECT compress_chunk(show_chunks('sample_table')); compress_chunk @@ -460,6 +461,7 @@ SELECT * FROM create_hypertable('sample_table', 'time', chunk_time_interval => 1 ALTER TABLE sample_table SET ( timescaledb.compress, timescaledb.compress_segmentby = 'time, val'); +NOTICE: default order by for hypertable "sample_table" is set to "" INSERT INTO sample_table VALUES (1, 1, 1), (2, 2, 1), (3, 3, 1), (1, 3, 2), (11, 4, 2), (1, 1, 2); -- compress all chunks SELECT compress_chunk(show_chunks('sample_table')); @@ -583,6 +585,7 @@ INSERT INTO sample_table VALUES ('2017-03-22T09:18:22', 23.5, 1, '{"a": 1, "b": ('2017-08-22T09:18:22', 34.1, 3, '{"c": 4}'); ALTER TABLE sample_table SET (timescaledb.compress, timescaledb.compress_segmentby = 'time'); +NOTICE: default order by for hypertable "sample_table" is set to "" SELECT compress_chunk(show_chunks('sample_table')); compress_chunk ----------------------------------------- @@ -1004,6 +1007,7 @@ ALTER TABLE sample_table DROP COLUMN c; ALTER TABLE sample_table SET ( timescaledb.compress, timescaledb.compress_segmentby = 'device_id, d'); +NOTICE: default order by for hypertable "sample_table" is set to ""time" DESC" -- compress all chunks SELECT compress_chunk(show_chunks('sample_table')); compress_chunk @@ -1230,6 +1234,7 @@ ALTER TABLE sample_table DROP column c3; ALTER TABLE sample_table ADD column c4 text; INSERT INTO sample_table SELECT '2000-01-01', '1', '2', '3'; ALTER TABLE sample_table SET (timescaledb.compress,timescaledb.compress_segmentby='c4'); +NOTICE: default order by for hypertable "sample_table" is set to ""time" DESC" SELECT compress_chunk(show_chunks('sample_table')); compress_chunk ------------------------------------------ @@ -1985,7 +1990,9 @@ SELECT table_name FROM create_hypertable('join_test2', 'time'); (1 row) ALTER TABLE join_test1 SET (timescaledb.compress, timescaledb.compress_segmentby='device'); +NOTICE: default order by for hypertable "join_test1" is set to ""time" DESC" ALTER TABLE join_test2 SET (timescaledb.compress, timescaledb.compress_segmentby='device'); +NOTICE: default order by for hypertable "join_test2" is set to ""time" DESC" INSERT INTO join_test1 VALUES ('2000-01-01','d1',0.1), ('2000-02-01','d1',0.1), ('2000-03-01','d1',0.1); INSERT INTO join_test2 VALUES ('2000-02-01','d1',0.1), ('2000-02-01','d2',0.1), ('2000-02-01','d3',0.1); SELECT compress_chunk(show_chunks('join_test1')); @@ -2473,6 +2480,9 @@ NOTICE: adding not-null constraint to column "a" INSERT INTO t values(1, 2); ALTER TABLE t SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "t" is set to "" +NOTICE: default order by for hypertable "t" is set to "a DESC" SELECT compress_chunk(show_chunks('t')); compress_chunk ------------------------------------------ @@ -2706,6 +2716,8 @@ ALTER TABLE test_limit SET ( timescaledb.compress, timescaledb.compress_orderby = 'timestamp' ); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_limit" is set to "" SELECT count(compress_chunk(ch)) FROM show_chunks('test_limit') ch; count ------- diff --git a/tsl/test/expected/continuous_aggs-13.out b/tsl/test/expected/continuous_aggs-13.out index 07caf4b377f..f7bdd97f614 100644 --- a/tsl/test/expected/continuous_aggs-13.out +++ b/tsl/test/expected/continuous_aggs-13.out @@ -1645,6 +1645,8 @@ AS SELECT time_bucket('30 days',time), avg(val1), count(val2) NOTICE: refreshing continuous aggregate "test_morecols_cagg" ALTER MATERIALIZED VIEW test_morecols_cagg SET (timescaledb.compress='true'); NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_44" is set to "" SELECT compress_chunk(ch) FROM show_chunks('test_morecols_cagg') ch; compress_chunk ------------------------------------------ diff --git a/tsl/test/expected/continuous_aggs-14.out b/tsl/test/expected/continuous_aggs-14.out index 152b835cfa7..3a9e83e9463 100644 --- a/tsl/test/expected/continuous_aggs-14.out +++ b/tsl/test/expected/continuous_aggs-14.out @@ -1644,6 +1644,8 @@ AS SELECT time_bucket('30 days',time), avg(val1), count(val2) NOTICE: refreshing continuous aggregate "test_morecols_cagg" ALTER MATERIALIZED VIEW test_morecols_cagg SET (timescaledb.compress='true'); NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_44" is set to "" SELECT compress_chunk(ch) FROM show_chunks('test_morecols_cagg') ch; compress_chunk ------------------------------------------ diff --git a/tsl/test/expected/continuous_aggs-15.out b/tsl/test/expected/continuous_aggs-15.out index 152b835cfa7..3a9e83e9463 100644 --- a/tsl/test/expected/continuous_aggs-15.out +++ b/tsl/test/expected/continuous_aggs-15.out @@ -1644,6 +1644,8 @@ AS SELECT time_bucket('30 days',time), avg(val1), count(val2) NOTICE: refreshing continuous aggregate "test_morecols_cagg" ALTER MATERIALIZED VIEW test_morecols_cagg SET (timescaledb.compress='true'); NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_44" is set to "" SELECT compress_chunk(ch) FROM show_chunks('test_morecols_cagg') ch; compress_chunk ------------------------------------------ diff --git a/tsl/test/expected/continuous_aggs-16.out b/tsl/test/expected/continuous_aggs-16.out index 74bdff997bf..e0c064d0d14 100644 --- a/tsl/test/expected/continuous_aggs-16.out +++ b/tsl/test/expected/continuous_aggs-16.out @@ -1644,6 +1644,8 @@ AS SELECT time_bucket('30 days',time), avg(val1), count(val2) NOTICE: refreshing continuous aggregate "test_morecols_cagg" ALTER MATERIALIZED VIEW test_morecols_cagg SET (timescaledb.compress='true'); NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_44" is set to "" SELECT compress_chunk(ch) FROM show_chunks('test_morecols_cagg') ch; compress_chunk ------------------------------------------ diff --git a/tsl/test/expected/decompress_vector_qual.out b/tsl/test/expected/decompress_vector_qual.out index 80287aeba0b..23f105775b5 100644 --- a/tsl/test/expected/decompress_vector_qual.out +++ b/tsl/test/expected/decompress_vector_qual.out @@ -13,6 +13,7 @@ NOTICE: adding not-null constraint to column "ts" (1 row) alter table vectorqual set (timescaledb.compress, timescaledb.compress_segmentby = 'device'); +NOTICE: default order by for hypertable "vectorqual" is set to "ts DESC" insert into vectorqual(ts, device, metric1, metric2) values ('2020-01-01 00:00:00', 1, 11, 12); select count(compress_chunk(x, true)) from show_chunks('vectorqual') x; count @@ -120,6 +121,9 @@ NOTICE: adding not-null constraint to column "ts" (1 row) alter table arithmetic set (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "arithmetic" is set to "" +NOTICE: default order by for hypertable "arithmetic" is set to "ts DESC" insert into arithmetic values (100, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), (101, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11); select count(compress_chunk(x, true)) from show_chunks('arithmetic') x; @@ -577,6 +581,9 @@ WARNING: column type "timestamp without time zone" used for "ts" does not follo (1 row) alter table singlebatch set (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "singlebatch" is set to "" +NOTICE: default order by for hypertable "singlebatch" is set to "ts DESC" insert into singlebatch select '2022-02-02 02:02:02', metric2, device, metric3, metric4, tag from vectorqual; select count(compress_chunk(x, true)) from show_chunks('singlebatch') x; count @@ -1107,6 +1114,9 @@ NOTICE: adding not-null constraint to column "ts" (1 row) alter table date_table set (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "date_table" is set to "" +NOTICE: default order by for hypertable "date_table" is set to "ts DESC" insert into date_table values ('2021-01-01'), ('2021-01-02'), ('2021-01-03'); select count(compress_chunk(x, true)) from show_chunks('date_table') x; @@ -1166,6 +1176,7 @@ NOTICE: adding not-null constraint to column "ts" (1 row) alter table text_table set (timescaledb.compress, timescaledb.compress_segmentby = 'd'); +NOTICE: default order by for hypertable "text_table" is set to "ts DESC" insert into text_table select x, 0 /*, default */ from generate_series(1, 1000) x; select count(compress_chunk(x, true)) from show_chunks('text_table') x; count diff --git a/tsl/test/expected/exp_cagg_monthly.out b/tsl/test/expected/exp_cagg_monthly.out index a4284bf176c..277a3ea68b8 100644 --- a/tsl/test/expected/exp_cagg_monthly.out +++ b/tsl/test/expected/exp_cagg_monthly.out @@ -505,6 +505,7 @@ ALTER TABLE conditions SET ( timescaledb.compress, timescaledb.compress_segmentby = 'city' ); +NOTICE: default order by for hypertable "conditions" is set to "day DESC" SELECT compress_chunk(ch) FROM show_chunks('conditions') AS ch; compress_chunk ------------------------------------------ diff --git a/tsl/test/expected/exp_cagg_origin.out b/tsl/test/expected/exp_cagg_origin.out index 6e3993b14c9..a88b270ef91 100644 --- a/tsl/test/expected/exp_cagg_origin.out +++ b/tsl/test/expected/exp_cagg_origin.out @@ -351,6 +351,7 @@ ALTER TABLE conditions SET ( timescaledb.compress, timescaledb.compress_segmentby = 'city' ); +NOTICE: default order by for hypertable "conditions" is set to "day DESC" SELECT compress_chunk(ch) FROM show_chunks('conditions') AS ch; compress_chunk ----------------------------------------- @@ -664,6 +665,7 @@ ALTER TABLE conditions_timestamp SET ( timescaledb.compress, timescaledb.compress_segmentby = 'city' ); +NOTICE: default order by for hypertable "conditions_timestamp" is set to "tstamp DESC" SELECT compress_chunk(ch) FROM show_chunks('conditions_timestamp') AS ch; compress_chunk ------------------------------------------- @@ -890,6 +892,7 @@ ALTER TABLE conditions_timestamptz SET ( timescaledb.compress, timescaledb.compress_segmentby = 'city' ); +NOTICE: default order by for hypertable "conditions_timestamptz" is set to "tstamp DESC" SELECT compress_chunk(ch) FROM show_chunks('conditions_timestamptz') AS ch; compress_chunk ------------------------------------------- diff --git a/tsl/test/expected/exp_cagg_timezone.out b/tsl/test/expected/exp_cagg_timezone.out index e8b4e8def9b..f037ce22e50 100644 --- a/tsl/test/expected/exp_cagg_timezone.out +++ b/tsl/test/expected/exp_cagg_timezone.out @@ -452,6 +452,7 @@ ALTER TABLE conditions_tz SET ( timescaledb.compress, timescaledb.compress_segmentby = 'city' ); +NOTICE: default order by for hypertable "conditions_tz" is set to "day DESC" SELECT compress_chunk(ch) FROM show_chunks('conditions_tz') AS ch; compress_chunk ----------------------------------------- diff --git a/tsl/test/expected/insert_memory_usage.out b/tsl/test/expected/insert_memory_usage.out index 9ba10c4f2ac..7c8a1c73e8c 100644 --- a/tsl/test/expected/insert_memory_usage.out +++ b/tsl/test/expected/insert_memory_usage.out @@ -71,6 +71,8 @@ select * from portal_memory_log where ( truncate uk_price_paid; insert into uk_price_paid select * from uk_price_paid_one; alter table uk_price_paid set (timescaledb.compress, timescaledb.compress_orderby = 'date'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "uk_price_paid" is set to "" select count(compress_chunk(chunk)) from show_chunks('uk_price_paid') chunk; count ------- diff --git a/tsl/test/expected/modify_exclusion-14.out b/tsl/test/expected/modify_exclusion-14.out index e17bbc64357..0c96c2928bf 100644 --- a/tsl/test/expected/modify_exclusion-14.out +++ b/tsl/test/expected/modify_exclusion-14.out @@ -134,6 +134,9 @@ SELECT table_name FROM create_hypertable('metrics_compressed','time'); (1 row) ALTER TABLE metrics_compressed SET (timescaledb.compress); +psql:include/modify_exclusion_load.sql:73: WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +psql:include/modify_exclusion_load.sql:73: NOTICE: default segment by for hypertable "metrics_compressed" is set to "" +psql:include/modify_exclusion_load.sql:73: NOTICE: default order by for hypertable "metrics_compressed" is set to ""time" DESC" -- create first chunk and compress INSERT INTO metrics_compressed VALUES ('2000-01-01',1,0.5); SELECT count(compress_chunk(chunk)) FROM show_chunks('metrics_compressed') chunk; diff --git a/tsl/test/expected/modify_exclusion-15.out b/tsl/test/expected/modify_exclusion-15.out index ded42f5e288..9d7a54cdea9 100644 --- a/tsl/test/expected/modify_exclusion-15.out +++ b/tsl/test/expected/modify_exclusion-15.out @@ -134,6 +134,9 @@ SELECT table_name FROM create_hypertable('metrics_compressed','time'); (1 row) ALTER TABLE metrics_compressed SET (timescaledb.compress); +psql:include/modify_exclusion_load.sql:73: WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +psql:include/modify_exclusion_load.sql:73: NOTICE: default segment by for hypertable "metrics_compressed" is set to "" +psql:include/modify_exclusion_load.sql:73: NOTICE: default order by for hypertable "metrics_compressed" is set to ""time" DESC" -- create first chunk and compress INSERT INTO metrics_compressed VALUES ('2000-01-01',1,0.5); SELECT count(compress_chunk(chunk)) FROM show_chunks('metrics_compressed') chunk; diff --git a/tsl/test/expected/modify_exclusion-16.out b/tsl/test/expected/modify_exclusion-16.out index ded42f5e288..9d7a54cdea9 100644 --- a/tsl/test/expected/modify_exclusion-16.out +++ b/tsl/test/expected/modify_exclusion-16.out @@ -134,6 +134,9 @@ SELECT table_name FROM create_hypertable('metrics_compressed','time'); (1 row) ALTER TABLE metrics_compressed SET (timescaledb.compress); +psql:include/modify_exclusion_load.sql:73: WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +psql:include/modify_exclusion_load.sql:73: NOTICE: default segment by for hypertable "metrics_compressed" is set to "" +psql:include/modify_exclusion_load.sql:73: NOTICE: default order by for hypertable "metrics_compressed" is set to ""time" DESC" -- create first chunk and compress INSERT INTO metrics_compressed VALUES ('2000-01-01',1,0.5); SELECT count(compress_chunk(chunk)) FROM show_chunks('metrics_compressed') chunk; diff --git a/tsl/test/expected/move.out b/tsl/test/expected/move.out index d6944b4a247..a665e8450d8 100644 --- a/tsl/test/expected/move.out +++ b/tsl/test/expected/move.out @@ -456,6 +456,7 @@ SELECT * FROM test.show_indexesp('_timescaledb_internal._hyper%_chunk'); --TEST with compression bug 4000 --compress chunk and then move chunk and index to different tablespaces ALTER TABLE cluster_test SET (timescaledb.compress, timescaledb.compress_segmentby = 'location'); +NOTICE: default order by for hypertable "cluster_test" is set to ""time" DESC" SELECT compress_chunk('_timescaledb_internal._hyper_1_2_chunk') as ch; ch ---------------------------------------- diff --git a/tsl/test/expected/policy_generalization.out b/tsl/test/expected/policy_generalization.out index 03d0b1c3a3f..2fe2092f038 100644 --- a/tsl/test/expected/policy_generalization.out +++ b/tsl/test/expected/policy_generalization.out @@ -135,6 +135,9 @@ SELECT remove_retention_policy('test'); -- compression policy ALTER TABLE test SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test" is set to "" +NOTICE: default order by for hypertable "test" is set to ""time" DESC" INSERT INTO test SELECT i, i %10, 0.10 FROM generate_series(1, 100, 1) i; -- Chunk compression status SELECT DISTINCT compression_status FROM _timescaledb_internal.compressed_chunk_stats; diff --git a/tsl/test/expected/read_only.out b/tsl/test/expected/read_only.out index 735b98dda3c..f9057559143 100644 --- a/tsl/test/expected/read_only.out +++ b/tsl/test/expected/read_only.out @@ -199,6 +199,9 @@ select set_integer_now_func('test_table_int', 'dummy_now'); (1 row) ALTER TABLE test_table_int SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_table_int" is set to "" +NOTICE: default order by for hypertable "test_table_int" is set to ""time" DESC" INSERT INTO test_table_int VALUES (0, 1), (10,10); SELECT add_compression_policy('test_table_int', '1'::integer) as comp_job_id \gset SELECT config as comp_job_config diff --git a/tsl/test/expected/recompress_chunk_segmentwise.out b/tsl/test/expected/recompress_chunk_segmentwise.out index 8b6972a3fbe..1e5ee1a679e 100644 --- a/tsl/test/expected/recompress_chunk_segmentwise.out +++ b/tsl/test/expected/recompress_chunk_segmentwise.out @@ -36,6 +36,7 @@ insert into mytab_oneseg values ('2023-01-01 21:56:20.048355+02'::timestamptz, 2, NULL, 2), ('2023-01-01 21:56:10.048355+02'::timestamptz, 2, NULL, 2); --same chunk same segment alter table mytab_oneseg set (timescaledb.compress, timescaledb.compress_segmentby = 'a, c'); +NOTICE: default order by for hypertable "mytab_oneseg" is set to ""time" DESC" select show_chunks as chunk_to_compress_1 from show_chunks('mytab_oneseg') limit 1 \gset select compress_chunk(:'chunk_to_compress_1'); compress_chunk @@ -100,6 +101,7 @@ insert into mytab_twoseg values ('2023-01-01 21:56:20.048355+02'::timestamptz, 3, NULL, 3), --same chunk diff segment ('2023-01-01 21:57:20.048355+02'::timestamptz, 3, NULL, 3); alter table mytab_twoseg set (timescaledb.compress, timescaledb.compress_segmentby = 'a, c'); +NOTICE: default order by for hypertable "mytab_twoseg" is set to ""time" DESC" select show_chunks as chunk_to_compress_2 from show_chunks('mytab_twoseg') limit 1 \gset select compress_chunk(:'chunk_to_compress_2'); compress_chunk @@ -176,6 +178,7 @@ select create_hypertable('mytab2', 'time', chunk_time_interval => interval '1 we insert into mytab2 (time, a, c) select t,s,s from generate_series('2023-01-01 00:00:00+00'::timestamptz, '2023-01-01 00:00:00+00'::timestamptz + interval '1 day', interval '30 sec') t cross join generate_series(0,2, 1) s; alter table mytab2 set (timescaledb.compress, timescaledb.compress_segmentby = 'a, c'); +NOTICE: default order by for hypertable "mytab2" is set to ""time" DESC" select compress_chunk(c) from show_chunks('mytab2') c; compress_chunk ---------------------------------------- @@ -245,6 +248,7 @@ SELECT create_hypertable('test_defaults','time'); (1 row) ALTER TABLE test_defaults SET (timescaledb.compress,timescaledb.compress_segmentby='device_id'); +NOTICE: default order by for hypertable "test_defaults" is set to ""time" DESC" -- create 2 chunks INSERT INTO test_defaults SELECT '2000-01-01', 1; INSERT INTO test_defaults SELECT '2001-01-01', 1; @@ -315,6 +319,7 @@ NOTICE: adding not-null constraint to column "time" INSERT INTO mytab_prep VALUES ('2023-01-01'::timestamptz, 2, NULL, 2), ('2023-01-01'::timestamptz, 2, NULL, 2); alter table mytab_prep set (timescaledb.compress, timescaledb.compress_segmentby = 'a, c'); +NOTICE: default order by for hypertable "mytab_prep" is set to ""time" DESC" PREPARE p1 AS SELECT * FROM mytab_prep ORDER BY a, c, time DESC; select show_chunks as chunk_to_compress_prep from show_chunks('mytab_prep') limit 1 \gset @@ -385,6 +390,7 @@ INSERT INTO mytab VALUES ('2023-01-01'::timestamptz, 2, NULL, 2), select show_chunks as chunk_to_compress_mytab from show_chunks('mytab') limit 1 \gset -- index exists, recompression should happen segment by segment so expect a debug message alter table mytab set (timescaledb.compress, timescaledb.compress_segmentby = 'a, c'); +NOTICE: default order by for hypertable "mytab" is set to ""time" DESC" select compress_chunk(show_chunks('mytab')); compress_chunk ------------------------------------------ @@ -444,6 +450,9 @@ SELECT decompress_chunk(show_chunks('mytab')); alter table mytab set (timescaledb.compress = false); alter table mytab set (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "mytab" is set to "" +NOTICE: default order by for hypertable "mytab" is set to ""time" DESC" select compress_chunk(show_chunks('mytab')); compress_chunk ------------------------------------------ @@ -478,6 +487,7 @@ NOTICE: adding not-null constraint to column "time" insert into nullseg_one values (:'start_time', 1, 1), (:'start_time', 1, 2), (:'start_time', 2,2), (:'start_time', 2,3); alter table nullseg_one set (timescaledb.compress, timescaledb.compress_segmentby= 'a'); +NOTICE: default order by for hypertable "nullseg_one" is set to ""time" DESC" select compress_chunk(show_chunks('nullseg_one')); compress_chunk ------------------------------------------ @@ -528,6 +538,7 @@ NOTICE: adding not-null constraint to column "time" insert into nullseg_many values (:'start_time', 1, 1, 1), (:'start_time', 1, 2, 2), (:'start_time', 2,2, 2), (:'start_time', 2,3, 3), (:'start_time', 2, NULL, 3); alter table nullseg_many set (timescaledb.compress, timescaledb.compress_segmentby= 'a, c'); +NOTICE: default order by for hypertable "nullseg_many" is set to ""time" DESC" select compress_chunk(show_chunks('nullseg_many')); compress_chunk ------------------------------------------ diff --git a/tsl/test/expected/reorder.out b/tsl/test/expected/reorder.out index dedb70bfee5..9933a3f61e0 100644 --- a/tsl/test/expected/reorder.out +++ b/tsl/test/expected/reorder.out @@ -1359,6 +1359,9 @@ SELECT table_name FROM create_hypertable('comp_ht_test','time'); (1 row) ALTER TABLE comp_ht_test SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "comp_ht_test" is set to "" +NOTICE: default order by for hypertable "comp_ht_test" is set to ""time" DESC" SELECT format('%I.%I', ht.schema_name, ht.table_name) AS "INTERNALTABLE" FROM diff --git a/tsl/test/expected/telemetry_stats-13.out b/tsl/test/expected/telemetry_stats-13.out index 8880e45dc1a..0b6fb72295b 100644 --- a/tsl/test/expected/telemetry_stats-13.out +++ b/tsl/test/expected/telemetry_stats-13.out @@ -265,6 +265,9 @@ SELECT (SELECT count(*) FROM normal) num_inserted_rows, -- Add compression ALTER TABLE hyper SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "hyper" is set to "" +NOTICE: default order by for hypertable "hyper" is set to ""time" DESC" SELECT compress_chunk(c) FROM show_chunks('hyper') c ORDER BY c LIMIT 4; compress_chunk diff --git a/tsl/test/expected/telemetry_stats-14.out b/tsl/test/expected/telemetry_stats-14.out index 6fe9153988c..3c743e59284 100644 --- a/tsl/test/expected/telemetry_stats-14.out +++ b/tsl/test/expected/telemetry_stats-14.out @@ -265,6 +265,9 @@ SELECT (SELECT count(*) FROM normal) num_inserted_rows, -- Add compression ALTER TABLE hyper SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "hyper" is set to "" +NOTICE: default order by for hypertable "hyper" is set to ""time" DESC" SELECT compress_chunk(c) FROM show_chunks('hyper') c ORDER BY c LIMIT 4; compress_chunk diff --git a/tsl/test/expected/telemetry_stats-15.out b/tsl/test/expected/telemetry_stats-15.out index 6fe9153988c..3c743e59284 100644 --- a/tsl/test/expected/telemetry_stats-15.out +++ b/tsl/test/expected/telemetry_stats-15.out @@ -265,6 +265,9 @@ SELECT (SELECT count(*) FROM normal) num_inserted_rows, -- Add compression ALTER TABLE hyper SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "hyper" is set to "" +NOTICE: default order by for hypertable "hyper" is set to ""time" DESC" SELECT compress_chunk(c) FROM show_chunks('hyper') c ORDER BY c LIMIT 4; compress_chunk diff --git a/tsl/test/expected/telemetry_stats-16.out b/tsl/test/expected/telemetry_stats-16.out index 6fe9153988c..3c743e59284 100644 --- a/tsl/test/expected/telemetry_stats-16.out +++ b/tsl/test/expected/telemetry_stats-16.out @@ -265,6 +265,9 @@ SELECT (SELECT count(*) FROM normal) num_inserted_rows, -- Add compression ALTER TABLE hyper SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "hyper" is set to "" +NOTICE: default order by for hypertable "hyper" is set to ""time" DESC" SELECT compress_chunk(c) FROM show_chunks('hyper') c ORDER BY c LIMIT 4; compress_chunk diff --git a/tsl/test/expected/transparent_decompression_join_index.out b/tsl/test/expected/transparent_decompression_join_index.out index 1b818825f73..7e9663be1ac 100644 --- a/tsl/test/expected/transparent_decompression_join_index.out +++ b/tsl/test/expected/transparent_decompression_join_index.out @@ -24,6 +24,7 @@ insert into test values create table test_copy as select * from test; -- compress the chunk alter table test set (timescaledb.compress, timescaledb.compress_segmentby='a, b'); +NOTICE: default order by for hypertable "test" is set to ""time" DESC" select compress_chunk(show_chunks('test')); compress_chunk ---------------------------------------- diff --git a/tsl/test/expected/transparent_decompression_queries.out b/tsl/test/expected/transparent_decompression_queries.out index 63dc4fae197..a05ecc89770 100644 --- a/tsl/test/expected/transparent_decompression_queries.out +++ b/tsl/test/expected/transparent_decompression_queries.out @@ -206,6 +206,9 @@ SELECT create_hypertable('pseudo','time'); (1 row) ALTER TABLE pseudo SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "pseudo" is set to "" +NOTICE: default order by for hypertable "pseudo" is set to ""time" DESC" INSERT INTO pseudo SELECT '2000-01-01'; SELECT compress_chunk(show_chunks('pseudo')); compress_chunk diff --git a/tsl/test/expected/vectorized_aggregation.out b/tsl/test/expected/vectorized_aggregation.out index 329d157c44e..552569895ad 100644 --- a/tsl/test/expected/vectorized_aggregation.out +++ b/tsl/test/expected/vectorized_aggregation.out @@ -12,6 +12,7 @@ SELECT FROM create_hypertable(relation=>'testtable', time_column_name=> 'time'); (1 row) ALTER TABLE testtable SET (timescaledb.compress, timescaledb.compress_segmentby='segment_by_value'); +NOTICE: default order by for hypertable "testtable" is set to ""time" DESC" INSERT INTO testtable SELECT time AS time, value AS segment_by_value, @@ -2270,6 +2271,7 @@ SELECT FROM create_hypertable(relation=>'testtable2', time_column_name=> 'time') (1 row) ALTER TABLE testtable2 SET (timescaledb.compress, timescaledb.compress_segmentby='segment_by_value1, segment_by_value2'); +NOTICE: default order by for hypertable "testtable2" is set to ""time" DESC" INSERT INTO testtable2 SELECT time AS time, value1 AS segment_by_value1, diff --git a/tsl/test/isolation/expected/osm_range_updates_iso.out b/tsl/test/isolation/expected/osm_range_updates_iso.out index b8d0f0e8fe8..cf89c33995a 100644 --- a/tsl/test/isolation/expected/osm_range_updates_iso.out +++ b/tsl/test/isolation/expected/osm_range_updates_iso.out @@ -131,6 +131,9 @@ osm_test | 0| | 3 step Cb: BEGIN; step UR1b: BEGIN; +C: WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +C: NOTICE: default segment by for hypertable "osm_test" is set to "" +C: NOTICE: default order by for hypertable "osm_test" is set to ""time" DESC" step Cenable: ALTER TABLE osm_test set (timescaledb.compress); diff --git a/tsl/test/shared/expected/cagg_compression.out b/tsl/test/shared/expected/cagg_compression.out index f10860ee565..173eac8cfff 100644 --- a/tsl/test/shared/expected/cagg_compression.out +++ b/tsl/test/shared/expected/cagg_compression.out @@ -350,7 +350,9 @@ CREATE MATERIALIZED VIEW comp_rename_cagg WITH (timescaledb.continuous, timescal SELECT time_bucket('1 week', time) AS bucket FROM comp_rename GROUP BY 1; NOTICE: continuous aggregate "comp_rename_cagg" is already up-to-date ALTER MATERIALIZED VIEW comp_rename_cagg RENAME COLUMN bucket to "time"; +SET client_min_messages TO WARNING; ALTER MATERIALIZED VIEW comp_rename_cagg SET ( timescaledb.compress='true'); -NOTICE: defaulting compress_orderby to "time" +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +RESET client_min_messages; DROP TABLE comp_rename CASCADE; NOTICE: drop cascades to 3 other objects diff --git a/tsl/test/shared/expected/compress_unique_index.out b/tsl/test/shared/expected/compress_unique_index.out index 38915c9f9d2..1ba2cc4f077 100644 --- a/tsl/test/shared/expected/compress_unique_index.out +++ b/tsl/test/shared/expected/compress_unique_index.out @@ -9,7 +9,7 @@ SELECT table_name FROM create_hypertable('compress_unique','offset_timestamp'); (1 row) CREATE UNIQUE INDEX uniq_expr ON compress_unique USING btree (lower((meter_id)::text), meter_channel_id, offset_timestamp, "timestamp"); -ALTER TABLE compress_unique SET (timescaledb.compress,timescaledb.compress_segmentby='meter_id,meter_channel_id'); +ALTER TABLE compress_unique SET (timescaledb.compress,timescaledb.compress_segmentby='meter_id,meter_channel_id', timescaledb.compress_orderby='offset_timestamp desc'); WARNING: column "timestamp" should be used for segmenting or ordering INSERT INTO compress_unique VALUES ('2000-01-01','m1','c1','2000-01-01'); INSERT INTO compress_unique VALUES ('2000-01-01','m1','c2','2000-01-01'); diff --git a/tsl/test/shared/expected/compression_dml.out b/tsl/test/shared/expected/compression_dml.out index 55a467763d1..e7da8841426 100644 --- a/tsl/test/shared/expected/compression_dml.out +++ b/tsl/test/shared/expected/compression_dml.out @@ -9,6 +9,9 @@ SELECT table_name FROM create_hypertable('i3719', 'time'); (1 row) ALTER TABLE i3719 SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "i3719" is set to "" +NOTICE: default order by for hypertable "i3719" is set to ""time" DESC" INSERT INTO i3719 VALUES('2021-01-01 00:00:00', 'chunk 1'); SELECT count(compress_chunk(c)) FROM show_chunks('i3719') c; count @@ -79,6 +82,7 @@ ALTER TABLE mytab SET ( timescaledb.compress, timescaledb.compress_segmentby = 'a, c' ); +NOTICE: default order by for hypertable "mytab" is set to ""time" DESC" -- get first chunk name SELECT chunk_schema || '.' || chunk_name as "chunk_table" FROM timescaledb_information.chunks diff --git a/tsl/test/shared/expected/constraint_aware_append.out b/tsl/test/shared/expected/constraint_aware_append.out index d691ebc9cab..ea6447529c7 100644 --- a/tsl/test/shared/expected/constraint_aware_append.out +++ b/tsl/test/shared/expected/constraint_aware_append.out @@ -11,6 +11,9 @@ NOTICE: adding not-null constraint to column "time" (1 row) ALTER TABLE ca_append_result SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "ca_append_result" is set to "" +NOTICE: default order by for hypertable "ca_append_result" is set to ""time" DESC" INSERT INTO ca_append_result SELECT '2000-01-03','d1',0.3; SELECT count(compress_chunk(ch)) AS compressed FROM show_chunks('ca_append_result') ch; compressed diff --git a/tsl/test/shared/expected/decompress_join.out b/tsl/test/shared/expected/decompress_join.out index 0b619022d57..a5079c431e6 100644 --- a/tsl/test/shared/expected/decompress_join.out +++ b/tsl/test/shared/expected/decompress_join.out @@ -42,6 +42,7 @@ NOTICE: adding not-null constraint to column "time" (1 row) ALTER TABLE partial_join set(timescaledb.compress,timescaledb.compress_segmentby='device'); +NOTICE: default order by for hypertable "partial_join" is set to ""time" DESC" INSERT INTO partial_join SELECT '2000-01-01','d1'; SELECT count(*) FROM (SELECT compress_chunk(show_chunks('partial_join'),true)) compress; count diff --git a/tsl/test/shared/expected/decompress_placeholdervar.out b/tsl/test/shared/expected/decompress_placeholdervar.out index 98bb0d6459f..d2b99abe850 100644 --- a/tsl/test/shared/expected/decompress_placeholdervar.out +++ b/tsl/test/shared/expected/decompress_placeholdervar.out @@ -26,6 +26,8 @@ INSERT INTO decompress_phv_device INSERT INTO decompress_phv_ping VALUES (1, 41040, 3, 243.333333333333, '2019-03-21 02:30:48.007'), (1, 41040, 3, 300.333333333333, '2019-03-21 02:00:19.957'), (1, 41040, 3, 265.333333333333, '2019-03-21 01:28:50.669'), (1, 41040, 3, 174, '2019-03-21 00:57:09.733'), (1, 41040, 3, 552.666666666667, '2019-03-21 00:21:02.305'), (1, 41041, 3, 330.333333333333, '2019-03-21 02:30:44.984'), (1, 41041, 3, 753, '2019-03-21 02:00:38.062'), (1, 41041, 3, 260, '2019-03-21 01:30:10.21'), (1, 41041, 3, 357.333333333333, '2019-03-21 00:59:10.768'), (1, 41041, 3, 474.666666666667, '2019-03-21 00:20:54.272'), (1, 65101, 3, 17.5200004577637, '2019-03-21 02:36:50.606'), (1, 65101, 3, 17.5200004577637, '2019-03-21 02:06:43.631'), (1, 65101, 3, 31.8933359781901, '2019-03-21 01:34:56.56'), (1, 65101, 3, 17.5200004577637, '2019-03-21 01:03:10.913'), (1, 65101, 3, 17.5200004577637, '2019-03-21 00:33:53.707'), (1, 65201, 3, 17.5200004577637, '2019-03-21 02:31:58.444'), (1, 65201, 3, 17.5200004577637, '2019-03-21 02:01:53.565'), (1, 65201, 3, 17.5200004577637, '2019-03-21 01:30:16.261'), (1, 65201, 2, 17.5200004577637, '2019-03-21 00:59:34.953'), (1, 65201, 3, 17.5200004577637, '2019-03-21 00:27:19.228'), (1, 41040, 3, 31.6666666666667, '2019-03-22 03:29:45.404'), (1, 41040, 3, 128, '2019-03-22 03:01:36.323'), (1, 41040, 3, 50, '2019-03-22 02:31:20.107'), (1, 41040, 3, 149.666666666667, '2019-03-22 02:01:07.201'), (1, 41040, 3, 502, '2019-03-22 01:28:27.129'), (1, 41041, 3, 355, '2019-03-22 03:30:50.844'), (1, 41041, 3, 53, '2019-03-22 03:01:40.348'), (1, 41041, 3, 121.333333333333, '2019-03-22 02:32:04.392'), (1, 41041, 3, 325.333333333333, '2019-03-22 02:02:06.968'), (1, 41041, 3, 576.666666666667, '2019-03-22 01:30:04.821'), (1, 65101, 3, 17.5200004577637, '2019-03-22 03:08:33.227'), (1, 65101, 3, 17.5200004577637, '2019-03-22 02:37:29.96'), (1, 65101, 3, 17.5200004577637, '2019-03-22 02:07:14.805'), (1, 65101, 3, 17.5200004577637, '2019-03-22 01:34:25.055'), (1, 65101, 3, 17.5200004577637, '2019-03-22 01:03:40.405'), (1, 65201, 3, 17.5200004577637, '2019-03-22 03:30:56.885'), (1, 65201, 3, 17.5200004577637, '2019-03-22 03:03:40.243'), (1, 65201, 3, 17.5200004577637, '2019-03-22 02:32:38.625'), (1, 65201, 3, 17.5200004577637, '2019-03-22 02:02:20.05'), (1, 65201, 3, 17.5200004577637, '2019-03-22 01:30:09.863'); ALTER TABLE decompress_phv_ping SET (timescaledb.compress, timescaledb.compress_orderby = 'device_id, insert_ts DESC'); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "decompress_phv_ping" is set to "" SELECT count(*) AS compressed FROM ( select compress_chunk(i) FROM show_chunks('decompress_phv_ping') i ) q; compressed diff --git a/tsl/test/shared/expected/decompress_tracking.out b/tsl/test/shared/expected/decompress_tracking.out index b8c5ca6fabc..ed4214097e1 100644 --- a/tsl/test/shared/expected/decompress_tracking.out +++ b/tsl/test/shared/expected/decompress_tracking.out @@ -10,6 +10,7 @@ SELECT table_name FROM create_hypertable('decompress_tracking','time'); (1 row) ALTER TABLE decompress_tracking SET (timescaledb.compress, timescaledb.compress_segmentby='device'); +NOTICE: default order by for hypertable "decompress_tracking" is set to ""time" DESC" INSERT INTO decompress_tracking SELECT '2020-01-01'::timestamptz + format('%s hour', g)::interval, 'd1', random() FROM generate_series(1,10) g; INSERT INTO decompress_tracking SELECT '2020-01-01'::timestamptz + format('%s hour', g)::interval, 'd2', random() FROM generate_series(1,20) g; INSERT INTO decompress_tracking SELECT '2020-01-01'::timestamptz + format('%s hour', g)::interval, 'd3', random() FROM generate_series(1,30) g; diff --git a/tsl/test/shared/expected/gapfill-13.out b/tsl/test/shared/expected/gapfill-13.out index a07b634947d..7f06d00e735 100644 --- a/tsl/test/shared/expected/gapfill-13.out +++ b/tsl/test/shared/expected/gapfill-13.out @@ -3245,6 +3245,7 @@ SELECT '4e0ee04cc6a94fd40497b8dbaac2fe434e0ee04cc6a94fd40497b8dbaac2fe43', random(); ALTER TABLE gapfill_group_toast SET(timescaledb.compress, timescaledb.compress_segmentby = 'device'); +NOTICE: default order by for hypertable "gapfill_group_toast" is set to ""time" DESC" SELECT count(compress_chunk(c)) FROM show_chunks('gapfill_group_toast') c; count 2 diff --git a/tsl/test/shared/expected/gapfill-14.out b/tsl/test/shared/expected/gapfill-14.out index a07b634947d..7f06d00e735 100644 --- a/tsl/test/shared/expected/gapfill-14.out +++ b/tsl/test/shared/expected/gapfill-14.out @@ -3245,6 +3245,7 @@ SELECT '4e0ee04cc6a94fd40497b8dbaac2fe434e0ee04cc6a94fd40497b8dbaac2fe43', random(); ALTER TABLE gapfill_group_toast SET(timescaledb.compress, timescaledb.compress_segmentby = 'device'); +NOTICE: default order by for hypertable "gapfill_group_toast" is set to ""time" DESC" SELECT count(compress_chunk(c)) FROM show_chunks('gapfill_group_toast') c; count 2 diff --git a/tsl/test/shared/expected/gapfill-15.out b/tsl/test/shared/expected/gapfill-15.out index a07b634947d..7f06d00e735 100644 --- a/tsl/test/shared/expected/gapfill-15.out +++ b/tsl/test/shared/expected/gapfill-15.out @@ -3245,6 +3245,7 @@ SELECT '4e0ee04cc6a94fd40497b8dbaac2fe434e0ee04cc6a94fd40497b8dbaac2fe43', random(); ALTER TABLE gapfill_group_toast SET(timescaledb.compress, timescaledb.compress_segmentby = 'device'); +NOTICE: default order by for hypertable "gapfill_group_toast" is set to ""time" DESC" SELECT count(compress_chunk(c)) FROM show_chunks('gapfill_group_toast') c; count 2 diff --git a/tsl/test/shared/expected/gapfill-16.out b/tsl/test/shared/expected/gapfill-16.out index 15d8bd7ad20..fea3c637a3e 100644 --- a/tsl/test/shared/expected/gapfill-16.out +++ b/tsl/test/shared/expected/gapfill-16.out @@ -3247,6 +3247,7 @@ SELECT '4e0ee04cc6a94fd40497b8dbaac2fe434e0ee04cc6a94fd40497b8dbaac2fe43', random(); ALTER TABLE gapfill_group_toast SET(timescaledb.compress, timescaledb.compress_segmentby = 'device'); +NOTICE: default order by for hypertable "gapfill_group_toast" is set to ""time" DESC" SELECT count(compress_chunk(c)) FROM show_chunks('gapfill_group_toast') c; count 2 diff --git a/tsl/test/shared/expected/security_barrier.out b/tsl/test/shared/expected/security_barrier.out index 98e3a39c643..c64da9b7c0e 100644 --- a/tsl/test/shared/expected/security_barrier.out +++ b/tsl/test/shared/expected/security_barrier.out @@ -29,6 +29,9 @@ SELECT * FROM test_security_barrier_view; RESET ROLE; ALTER TABLE test_security_barrier SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_security_barrier" is set to "" +NOTICE: default order by for hypertable "test_security_barrier" is set to ""time" DESC" -- Compress the chunk SELECT compress_chunk(show_chunks('test_security_barrier')) IS NOT NULL AS compressed; compressed diff --git a/tsl/test/shared/sql/cagg_compression.sql b/tsl/test/shared/sql/cagg_compression.sql index 4a5841c32a7..ee81a88ba3d 100644 --- a/tsl/test/shared/sql/cagg_compression.sql +++ b/tsl/test/shared/sql/cagg_compression.sql @@ -104,7 +104,10 @@ CREATE MATERIALIZED VIEW comp_rename_cagg WITH (timescaledb.continuous, timescal SELECT time_bucket('1 week', time) AS bucket FROM comp_rename GROUP BY 1; ALTER MATERIALIZED VIEW comp_rename_cagg RENAME COLUMN bucket to "time"; + +SET client_min_messages TO WARNING; ALTER MATERIALIZED VIEW comp_rename_cagg SET ( timescaledb.compress='true'); +RESET client_min_messages; DROP TABLE comp_rename CASCADE; diff --git a/tsl/test/shared/sql/compress_unique_index.sql b/tsl/test/shared/sql/compress_unique_index.sql index ea69fd4d7ed..ee76c1cef15 100644 --- a/tsl/test/shared/sql/compress_unique_index.sql +++ b/tsl/test/shared/sql/compress_unique_index.sql @@ -8,7 +8,7 @@ CREATE TABLE compress_unique(offset_timestamp timestamptz not null, meter_id tex SELECT table_name FROM create_hypertable('compress_unique','offset_timestamp'); CREATE UNIQUE INDEX uniq_expr ON compress_unique USING btree (lower((meter_id)::text), meter_channel_id, offset_timestamp, "timestamp"); -ALTER TABLE compress_unique SET (timescaledb.compress,timescaledb.compress_segmentby='meter_id,meter_channel_id'); +ALTER TABLE compress_unique SET (timescaledb.compress,timescaledb.compress_segmentby='meter_id,meter_channel_id', timescaledb.compress_orderby='offset_timestamp desc'); INSERT INTO compress_unique VALUES ('2000-01-01','m1','c1','2000-01-01'); INSERT INTO compress_unique VALUES ('2000-01-01','m1','c2','2000-01-01'); diff --git a/tsl/test/sql/compression_conflicts.sql b/tsl/test/sql/compression_conflicts.sql index 59a0703d22e..78a48ce03a9 100644 --- a/tsl/test/sql/compression_conflicts.sql +++ b/tsl/test/sql/compression_conflicts.sql @@ -81,7 +81,7 @@ SELECT count(*) FROM ONLY :CHUNK; CREATE TABLE comp_conflicts_2(time timestamptz NOT NULL, device text, value float, UNIQUE(time, device)); SELECT table_name FROM create_hypertable('comp_conflicts_2','time'); -ALTER TABLE comp_conflicts_2 SET (timescaledb.compress); +ALTER TABLE comp_conflicts_2 SET (timescaledb.compress, timescaledb.compress_segmentby=''); -- implicitly create chunk INSERT INTO comp_conflicts_2 VALUES ('2020-01-01','d1',0.1); @@ -219,7 +219,7 @@ SELECT count(*) FROM ONLY :CHUNK; CREATE TABLE comp_conflicts_4(time timestamptz NOT NULL, device text, value float, UNIQUE(time, device)); SELECT table_name FROM create_hypertable('comp_conflicts_4','time'); -ALTER TABLE comp_conflicts_4 SET (timescaledb.compress,timescaledb.compress_orderby='time,device'); +ALTER TABLE comp_conflicts_4 SET (timescaledb.compress,timescaledb.compress_segmentby='',timescaledb.compress_orderby='time,device'); -- implicitly create chunk INSERT INTO comp_conflicts_4 SELECT generate_series('2020-01-01'::timestamp, '2020-01-01 2:00:00', '1s'), 'd1',0.1; diff --git a/tsl/test/sql/compression_defaults.sql b/tsl/test/sql/compression_defaults.sql index 0b0f0935561..2fc922e7a82 100644 --- a/tsl/test/sql/compression_defaults.sql +++ b/tsl/test/sql/compression_defaults.sql @@ -2,7 +2,7 @@ -- Please see the included NOTICE for copyright information and -- LICENSE-TIMESCALE for a copy of the license. -\c :TEST_DBNAME :ROLE_SUPERUSER +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER -- statitics on CREATE TABLE "public"."metrics" ( @@ -30,6 +30,36 @@ CREATE UNIQUE INDEX test_idx ON metrics(device_id, time); SELECT _timescaledb_functions.get_segmentby_defaults('public.metrics'); SELECT _timescaledb_functions.get_orderby_defaults('public.metrics', ARRAY['device_id']); +ALTER TABLE metrics SET (timescaledb.compress = true); +SELECT * FROM _timescaledb_catalog.compression_settings; +ALTER TABLE metrics SET (timescaledb.compress = false); + +ALTER TABLE metrics SET (timescaledb.compress = true, timescaledb.compress_segmentby = 'device_id'); +SELECT * FROM _timescaledb_catalog.compression_settings; +ALTER TABLE metrics SET (timescaledb.compress = false); + +--make sure all the GUC combinations work +SET timescaledb.compression_segmentby_default_function = ''; +SET timescaledb.compression_orderby_default_function = ''; +ALTER TABLE metrics SET (timescaledb.compress = true); +SELECT * FROM _timescaledb_catalog.compression_settings; +ALTER TABLE metrics SET (timescaledb.compress = false); + +SET timescaledb.compression_segmentby_default_function = ''; +RESET timescaledb.compression_orderby_default_function; +ALTER TABLE metrics SET (timescaledb.compress = true); +SELECT * FROM _timescaledb_catalog.compression_settings; +ALTER TABLE metrics SET (timescaledb.compress = false); + +RESET timescaledb.compression_segmentby_default_function; +SET timescaledb.compression_orderby_default_function = ''; +ALTER TABLE metrics SET (timescaledb.compress = true); +SELECT * FROM _timescaledb_catalog.compression_settings; +ALTER TABLE metrics SET (timescaledb.compress = false); + +RESET timescaledb.compression_segmentby_default_function; +RESET timescaledb.compression_orderby_default_function; + --opposite order of columns drop index test_idx; CREATE UNIQUE INDEX test_idx ON metrics(time, device_id); @@ -73,6 +103,10 @@ drop index test_idx2; SELECT _timescaledb_functions.get_segmentby_defaults('public.metrics'); SELECT _timescaledb_functions.get_orderby_defaults('public.metrics', ARRAY[]::text[]); +ALTER TABLE metrics SET (timescaledb.compress = true); +SELECT * FROM _timescaledb_catalog.compression_settings; +ALTER TABLE metrics SET (timescaledb.compress = false); + -- tables with no stats -- drop table metrics; @@ -152,4 +186,24 @@ SELECT _timescaledb_functions.get_orderby_defaults('public.metrics', ARRAY['devi drop index test_idx; CREATE UNIQUE INDEX test_idx ON metrics(device_id, time); SELECT _timescaledb_functions.get_segmentby_defaults('public.metrics'); -SELECT _timescaledb_functions.get_orderby_defaults('public.metrics', ARRAY['device_id']::text[]); \ No newline at end of file +SELECT _timescaledb_functions.get_orderby_defaults('public.metrics', ARRAY['device_id']::text[]); + +--test on an empty order_by +CREATE TABLE table1(col1 INT NOT NULL, col2 INT); +SELECT create_hypertable('table1','col1', chunk_time_interval => 10); +SELECT _timescaledb_functions.get_orderby_defaults('table1', ARRAY['col1']::text[]); +ALTER TABLE table1 SET (timescaledb.compress, timescaledb.compress_segmentby = 'col1'); +SELECT * FROM _timescaledb_catalog.compression_settings; +ALTER TABLE table1 SET (timescaledb.compress = false); + +\set ON_ERROR_STOP 0 +SET timescaledb.compression_segmentby_default_function = 'function_does_not_exist'; +SET timescaledb.compression_orderby_default_function = 'function_does_not_exist'; +--wrong function signatures +SET timescaledb.compression_segmentby_default_function = '_timescaledb_functions.get_orderby_defaults'; +SET timescaledb.compression_orderby_default_function = '_timescaledb_functions.get_segmentby_defaults'; +\set ON_ERROR_STOP 1 +SET timescaledb.compression_orderby_default_function = '_timescaledb_functions.get_orderby_defaults'; +SET timescaledb.compression_segmentby_default_function = '_timescaledb_functions.get_segmentby_defaults'; +RESET timescaledb.compression_segmentby_default_function; +RESET timescaledb.compression_orderby_default_function; \ No newline at end of file diff --git a/tsl/test/sql/compression_errors.sql.in b/tsl/test/sql/compression_errors.sql.in index 6a70f3c3d6d..8266395a691 100644 --- a/tsl/test/sql/compression_errors.sql.in +++ b/tsl/test/sql/compression_errors.sql.in @@ -497,7 +497,7 @@ ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_s ALTER TABLE table_unique_index SET (timescaledb.compress = off); ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'device_id,location'); ALTER TABLE table_unique_index SET (timescaledb.compress = off); -ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_orderby = 'device_id,location'); +ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby='', timescaledb.compress_orderby = 'device_id,location'); ALTER TABLE table_unique_index SET (timescaledb.compress = off); ALTER TABLE table_unique_index SET (timescaledb.compress, timescaledb.compress_segmentby = 'time,location', timescaledb.compress_orderby = 'device_id'); ALTER TABLE table_unique_index SET (timescaledb.compress = off); diff --git a/tsl/test/sql/compression_insert.sql b/tsl/test/sql/compression_insert.sql index 0cf756e6cae..4a69fc562f9 100644 --- a/tsl/test/sql/compression_insert.sql +++ b/tsl/test/sql/compression_insert.sql @@ -700,6 +700,7 @@ CREATE UNIQUE INDEX timestamp_id_idx ON test_limit(timestamp, id); ALTER TABLE test_limit SET ( timescaledb.compress, + timescaledb.compress_segmentby = '', timescaledb.compress_orderby = 'timestamp' ); SELECT count(compress_chunk(ch)) FROM show_chunks('test_limit') ch;