Skip to content

Commit

Permalink
Merge pull request ClickHouse#35938 from ClickHouse/backport/22.3/35820
Browse files Browse the repository at this point in the history
Backport ClickHouse#35820 to 22.3: Avoid processing per-column TTL multiple times
  • Loading branch information
CurtizJ authored Apr 5, 2022
2 parents 82735cb + c9a1d9c commit abb756d
Show file tree
Hide file tree
Showing 3 changed files with 53 additions and 1 deletion.
2 changes: 1 addition & 1 deletion src/Storages/MergeTree/MergeTreeDataPartTTLInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ struct MergeTreeDataPartTTLInfos
bool empty() const
{
/// part_min_ttl in minimum of rows, rows_where and group_by TTLs
return !part_min_ttl && moves_ttl.empty() && recompression_ttl.empty();
return !part_min_ttl && moves_ttl.empty() && recompression_ttl.empty() && columns_ttl.empty();
}
};

Expand Down
1 change: 1 addition & 0 deletions tests/queries/0_stateless/02262_column_ttl.reference
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
1 0
51 changes: 51 additions & 0 deletions tests/queries/0_stateless/02262_column_ttl.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
#!/usr/bin/env bash
# Tags: no-parallel
# ^^^^^^^^^^^
# Since the underlying view may disappears while flushing log, and leads to:
#
# DB::Exception: Table test_x449vo..inner_id.9c14fb82-e6b1-4d1a-85a6-935c3a2a2029 is dropped. (TABLE_IS_DROPPED)
#

CUR_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# shellcheck source=../shell_config.sh
. "$CUR_DIR"/../shell_config.sh

# regression test for columns TTLs
# note, that this should be written in .sh since we need $CLICKHOUSE_DATABASE
# not 'default' to catch text_log

$CLICKHOUSE_CLIENT -nm -q "
drop table if exists ttl_02262;
drop table if exists this_text_log;
create table ttl_02262 (date Date, key Int, value String TTL date + interval 1 month) engine=MergeTree order by key;
insert into ttl_02262 values ('2010-01-01', 2010, 'foo');
optimize table ttl_02262 final;
detach table ttl_02262;
attach table ttl_02262;
-- create system.text_log
system flush logs;
"

ttl_02262_uuid=$($CLICKHOUSE_CLIENT -q "select uuid from system.tables where database = '$CLICKHOUSE_DATABASE' and name = 'ttl_02262'")

$CLICKHOUSE_CLIENT -nm -q "
-- OPTIMIZE TABLE x FINAL will be done in background
-- attach to it's log, via table UUID in query_id (see merger/mutator code).
create materialized view this_text_log engine=Memory() as
select * from system.text_log where query_id like '%${ttl_02262_uuid}%';
optimize table ttl_02262 final;
system flush logs;
-- If TTL will be applied again (during OPTIMIZE TABLE FINAL) it will produce the following message:
--
-- Some TTL values were not calculated for part 201701_487_641_3. Will calculate them forcefully during merge.
--
-- Let's ensure that this is not happen anymore:
select count()>0, countIf(message LIKE '%TTL%') from this_text_log;
drop table ttl_02262;
drop table this_text_log;
"

0 comments on commit abb756d

Please sign in to comment.