diff --git a/cpp-ch/clickhouse.version b/cpp-ch/clickhouse.version index 0fb13497d01a..92bf886e9b85 100644 --- a/cpp-ch/clickhouse.version +++ b/cpp-ch/clickhouse.version @@ -1,4 +1,4 @@ CH_ORG=Kyligence -CH_BRANCH=rebase_ch/20240704 -CH_COMMIT=f617655ccea +CH_BRANCH=rebase_ch/20240705 +CH_COMMIT=531a87ed802 diff --git a/cpp-ch/local-engine/Operator/DefaultHashAggregateResult.cpp b/cpp-ch/local-engine/Operator/DefaultHashAggregateResult.cpp index 35f891581595..fbad02fda592 100644 --- a/cpp-ch/local-engine/Operator/DefaultHashAggregateResult.cpp +++ b/cpp-ch/local-engine/Operator/DefaultHashAggregateResult.cpp @@ -116,7 +116,7 @@ class DefaultHashAggrgateResultTransform : public DB::IProcessor has_input = true; output_chunk = DB::Chunk(result_cols, 1); auto info = std::make_shared(); - output_chunk.setChunkInfo(info); + output_chunk.getChunkInfos().add(std::move(info)); return Status::Ready; } @@ -124,10 +124,10 @@ class DefaultHashAggrgateResultTransform : public DB::IProcessor if (input.hasData()) { output_chunk = input.pull(true); - if (!output_chunk.hasChunkInfo()) + if (output_chunk.getChunkInfos().empty()) { auto info = std::make_shared(); - output_chunk.setChunkInfo(info); + output_chunk.getChunkInfos().add(std::move(info)); } has_input = true; return Status::Ready; diff --git a/cpp-ch/local-engine/Storages/Mergetree/SparkMergeTreeWriter.cpp b/cpp-ch/local-engine/Storages/Mergetree/SparkMergeTreeWriter.cpp index 406f2aaa23df..2f673fc386e8 100644 --- a/cpp-ch/local-engine/Storages/Mergetree/SparkMergeTreeWriter.cpp +++ b/cpp-ch/local-engine/Storages/Mergetree/SparkMergeTreeWriter.cpp @@ -121,12 +121,11 @@ void SparkMergeTreeWriter::write(const DB::Block & block) checkAndMerge(); } -bool SparkMergeTreeWriter::chunkToPart(Chunk && chunk) +bool SparkMergeTreeWriter::chunkToPart(Chunk && plan_chunk) { - if (chunk.hasChunkInfo()) + if (Chunk result_chunk = DB::Squashing::squash(std::move(plan_chunk))) { - Chunk squash_chunk = DB::Squashing::squash(std::move(chunk)); - Block result = header.cloneWithColumns(squash_chunk.getColumns()); + auto result = squashing->getHeader().cloneWithColumns(result_chunk.detachColumns()); return blockToPart(result); } return false; diff --git a/cpp-ch/local-engine/Storages/Mergetree/SparkMergeTreeWriter.h b/cpp-ch/local-engine/Storages/Mergetree/SparkMergeTreeWriter.h index 13ac22394477..269b0352c056 100644 --- a/cpp-ch/local-engine/Storages/Mergetree/SparkMergeTreeWriter.h +++ b/cpp-ch/local-engine/Storages/Mergetree/SparkMergeTreeWriter.h @@ -77,7 +77,7 @@ class SparkMergeTreeWriter void saveMetadata(); void commitPartToRemoteStorageIfNeeded(); void finalizeMerge(); - bool chunkToPart(Chunk && chunk); + bool chunkToPart(Chunk && plan_chunk); bool blockToPart(Block & block); bool useLocalStorage() const; diff --git a/cpp-ch/local-engine/Storages/SourceFromJavaIter.cpp b/cpp-ch/local-engine/Storages/SourceFromJavaIter.cpp index 37501e98504a..1c5902c8ca67 100644 --- a/cpp-ch/local-engine/Storages/SourceFromJavaIter.cpp +++ b/cpp-ch/local-engine/Storages/SourceFromJavaIter.cpp @@ -109,13 +109,13 @@ DB::Chunk SourceFromJavaIter::generate() auto info = std::make_shared(); info->is_overflows = data->info.is_overflows; info->bucket_num = data->info.bucket_num; - result.setChunkInfo(info); + result.getChunkInfos().add(std::move(info)); } else { result = BlockUtil::buildRowCountChunk(rows); auto info = std::make_shared(); - result.setChunkInfo(info); + result.getChunkInfos().add(std::move(info)); } } return result; diff --git a/cpp-ch/local-engine/tests/gtest_parser.cpp b/cpp-ch/local-engine/tests/gtest_parser.cpp index 24c796358f45..34b3a8875f1a 100644 --- a/cpp-ch/local-engine/tests/gtest_parser.cpp +++ b/cpp-ch/local-engine/tests/gtest_parser.cpp @@ -101,7 +101,8 @@ TEST(LocalExecutor, StorageObjectStorageSink) /// 2. Create Chunk /// 3. comsume - sink.consume(testChunk()); + Chunk data = testChunk(); + sink.consume(data); sink.onFinish(); }