From e08b648f23f6a9bcab9b3cde6cef391bd8204b82 Mon Sep 17 00:00:00 2001 From: Chang Chen Date: Sat, 6 Jul 2024 10:43:55 +0800 Subject: [PATCH] Revert "[GLUTEN-1632][CH]Daily Update Clickhouse Version (20240705) (#6338)" This reverts commit 4a674e5e8ab757b7699f8bc75377e67fe793ed17. --- .../local-engine/Operator/DefaultHashAggregateResult.cpp | 6 +++--- .../Storages/Mergetree/SparkMergeTreeWriter.cpp | 7 ++++--- .../local-engine/Storages/Mergetree/SparkMergeTreeWriter.h | 2 +- cpp-ch/local-engine/Storages/SourceFromJavaIter.cpp | 4 ++-- cpp-ch/local-engine/tests/gtest_parser.cpp | 3 +-- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/cpp-ch/local-engine/Operator/DefaultHashAggregateResult.cpp b/cpp-ch/local-engine/Operator/DefaultHashAggregateResult.cpp index fbad02fda592..35f891581595 100644 --- a/cpp-ch/local-engine/Operator/DefaultHashAggregateResult.cpp +++ b/cpp-ch/local-engine/Operator/DefaultHashAggregateResult.cpp @@ -116,7 +116,7 @@ class DefaultHashAggrgateResultTransform : public DB::IProcessor has_input = true; output_chunk = DB::Chunk(result_cols, 1); auto info = std::make_shared(); - output_chunk.getChunkInfos().add(std::move(info)); + output_chunk.setChunkInfo(info); return Status::Ready; } @@ -124,10 +124,10 @@ class DefaultHashAggrgateResultTransform : public DB::IProcessor if (input.hasData()) { output_chunk = input.pull(true); - if (output_chunk.getChunkInfos().empty()) + if (!output_chunk.hasChunkInfo()) { auto info = std::make_shared(); - output_chunk.getChunkInfos().add(std::move(info)); + output_chunk.setChunkInfo(info); } has_input = true; return Status::Ready; diff --git a/cpp-ch/local-engine/Storages/Mergetree/SparkMergeTreeWriter.cpp b/cpp-ch/local-engine/Storages/Mergetree/SparkMergeTreeWriter.cpp index 2f673fc386e8..406f2aaa23df 100644 --- a/cpp-ch/local-engine/Storages/Mergetree/SparkMergeTreeWriter.cpp +++ b/cpp-ch/local-engine/Storages/Mergetree/SparkMergeTreeWriter.cpp @@ -121,11 +121,12 @@ void SparkMergeTreeWriter::write(const DB::Block & block) checkAndMerge(); } -bool SparkMergeTreeWriter::chunkToPart(Chunk && plan_chunk) +bool SparkMergeTreeWriter::chunkToPart(Chunk && chunk) { - if (Chunk result_chunk = DB::Squashing::squash(std::move(plan_chunk))) + if (chunk.hasChunkInfo()) { - auto result = squashing->getHeader().cloneWithColumns(result_chunk.detachColumns()); + Chunk squash_chunk = DB::Squashing::squash(std::move(chunk)); + Block result = header.cloneWithColumns(squash_chunk.getColumns()); return blockToPart(result); } return false; diff --git a/cpp-ch/local-engine/Storages/Mergetree/SparkMergeTreeWriter.h b/cpp-ch/local-engine/Storages/Mergetree/SparkMergeTreeWriter.h index 269b0352c056..13ac22394477 100644 --- a/cpp-ch/local-engine/Storages/Mergetree/SparkMergeTreeWriter.h +++ b/cpp-ch/local-engine/Storages/Mergetree/SparkMergeTreeWriter.h @@ -77,7 +77,7 @@ class SparkMergeTreeWriter void saveMetadata(); void commitPartToRemoteStorageIfNeeded(); void finalizeMerge(); - bool chunkToPart(Chunk && plan_chunk); + bool chunkToPart(Chunk && chunk); bool blockToPart(Block & block); bool useLocalStorage() const; diff --git a/cpp-ch/local-engine/Storages/SourceFromJavaIter.cpp b/cpp-ch/local-engine/Storages/SourceFromJavaIter.cpp index 1c5902c8ca67..37501e98504a 100644 --- a/cpp-ch/local-engine/Storages/SourceFromJavaIter.cpp +++ b/cpp-ch/local-engine/Storages/SourceFromJavaIter.cpp @@ -109,13 +109,13 @@ DB::Chunk SourceFromJavaIter::generate() auto info = std::make_shared(); info->is_overflows = data->info.is_overflows; info->bucket_num = data->info.bucket_num; - result.getChunkInfos().add(std::move(info)); + result.setChunkInfo(info); } else { result = BlockUtil::buildRowCountChunk(rows); auto info = std::make_shared(); - result.getChunkInfos().add(std::move(info)); + result.setChunkInfo(info); } } return result; diff --git a/cpp-ch/local-engine/tests/gtest_parser.cpp b/cpp-ch/local-engine/tests/gtest_parser.cpp index 34b3a8875f1a..24c796358f45 100644 --- a/cpp-ch/local-engine/tests/gtest_parser.cpp +++ b/cpp-ch/local-engine/tests/gtest_parser.cpp @@ -101,8 +101,7 @@ TEST(LocalExecutor, StorageObjectStorageSink) /// 2. Create Chunk /// 3. comsume - Chunk data = testChunk(); - sink.consume(data); + sink.consume(testChunk()); sink.onFinish(); }