From cb100cb3bc7459bb489154937b3a076c5bd9f1d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mi=C5=82osz=20=C5=BBeglarski?= Date: Thu, 11 Jul 2024 16:50:27 +0200 Subject: [PATCH 01/11] [Continuous batching] Replace standard max_element call with custom loop for greedy sampling (#607) Searching for max element in a custom loop gives better performance than using std::max_element --- src/cpp/src/sampler.hpp | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/cpp/src/sampler.hpp b/src/cpp/src/sampler.hpp index dc631c68ac..6390fc8725 100644 --- a/src/cpp/src/sampler.hpp +++ b/src/cpp/src/sampler.hpp @@ -219,8 +219,13 @@ class Sampler { } Token _greedy_sample(const std::vector& logit_vector) const { - auto out_token = std::max_element(logit_vector.begin(), logit_vector.end(), [](const Token& lhs, const Token& rhs) { return lhs.m_log_prob < rhs.m_log_prob; }); - return *out_token; + Token max_token{-std::numeric_limits::infinity() , 0}; + for (const auto& logit : logit_vector) { + if (logit.m_log_prob > max_token.m_log_prob) { + max_token = logit; + } + } + return max_token; } std::vector _multinomial_sample(const std::vector& logit_vector, size_t num_tokens_per_sequence) { From f0e41909ab06e22c569f1af54654aad521ce4a6e Mon Sep 17 00:00:00 2001 From: Pavel Esir Date: Fri, 12 Jul 2024 12:21:38 +0200 Subject: [PATCH 02/11] wip --- samples/CMakeLists.txt | 1 + .../benchmark_vanilla_genai/CMakeLists.txt | 25 +++++++ samples/cpp/benchmark_vanilla_genai/README.md | 2 + .../benchmark_vanilla_genai.cpp | 65 +++++++++++++++++++ .../openvino/genai/generation_metrics.hpp | 40 ++++++++++++ .../include/openvino/genai/llm_pipeline.hpp | 4 ++ src/cpp/src/generation_metrics.cpp | 62 ++++++++++++++++++ src/cpp/src/greedy_decoding.cpp | 17 ++++- src/cpp/src/llm_pipeline.cpp | 10 ++- src/cpp/src/llm_pipeline_base.hpp | 2 + 10 files changed, 223 insertions(+), 5 deletions(-) create mode 100644 samples/cpp/benchmark_vanilla_genai/CMakeLists.txt create mode 100644 samples/cpp/benchmark_vanilla_genai/README.md create mode 100644 samples/cpp/benchmark_vanilla_genai/benchmark_vanilla_genai.cpp create mode 100644 src/cpp/include/openvino/genai/generation_metrics.hpp create mode 100644 src/cpp/src/generation_metrics.cpp diff --git a/samples/CMakeLists.txt b/samples/CMakeLists.txt index 0839d58428..44f8d580b2 100644 --- a/samples/CMakeLists.txt +++ b/samples/CMakeLists.txt @@ -10,6 +10,7 @@ add_subdirectory(cpp/greedy_causal_lm) add_subdirectory(cpp/multinomial_causal_lm) add_subdirectory(cpp/prompt_lookup_decoding_lm) add_subdirectory(cpp/speculative_decoding_lm) +add_subdirectory(cpp/benchmark_vanilla_genai) install(FILES requirements.txt DESTINATION samples COMPONENT cpp_samples_genai) diff --git a/samples/cpp/benchmark_vanilla_genai/CMakeLists.txt b/samples/cpp/benchmark_vanilla_genai/CMakeLists.txt new file mode 100644 index 0000000000..e871f5a33a --- /dev/null +++ b/samples/cpp/benchmark_vanilla_genai/CMakeLists.txt @@ -0,0 +1,25 @@ +# Copyright (C) 2023-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + + +find_package(OpenVINOGenAI REQUIRED PATHS + "${CMAKE_BINARY_DIR}" # Reuse the package from the build. + ${OpenVINO_DIR} # GenAI may be installed alogside OpenVINO. +) + +FetchContent_Declare(cxxopts + URL https://github.com/jarro2783/cxxopts/archive/refs/tags/v3.1.1.tar.gz + URL_HASH SHA256=523175f792eb0ff04f9e653c90746c12655f10cb70f1d5e6d6d9491420298a08) +FetchContent_MakeAvailable(cxxopts) + +add_executable(benchmark_vanilla_genai benchmark_vanilla_genai.cpp) +target_link_libraries(benchmark_vanilla_genai PRIVATE openvino::genai cxxopts::cxxopts) +set_target_properties(benchmark_vanilla_genai PROPERTIES + COMPILE_PDB_NAME benchmark_vanilla_genai + # Ensure out of box LC_RPATH on macOS with SIP + INSTALL_RPATH_USE_LINK_PATH ON) +# target_compile_features(benchmark_vanilla_genai PRIVATE cxx_std_11) +install(TARGETS benchmark_vanilla_genai + RUNTIME DESTINATION samples_bin/ + COMPONENT samples_bin + EXCLUDE_FROM_ALL) diff --git a/samples/cpp/benchmark_vanilla_genai/README.md b/samples/cpp/benchmark_vanilla_genai/README.md new file mode 100644 index 0000000000..739c2e950c --- /dev/null +++ b/samples/cpp/benchmark_vanilla_genai/README.md @@ -0,0 +1,2 @@ +# benchmark OpenVINO GenAI sample + diff --git a/samples/cpp/benchmark_vanilla_genai/benchmark_vanilla_genai.cpp b/samples/cpp/benchmark_vanilla_genai/benchmark_vanilla_genai.cpp new file mode 100644 index 0000000000..ccb7650b84 --- /dev/null +++ b/samples/cpp/benchmark_vanilla_genai/benchmark_vanilla_genai.cpp @@ -0,0 +1,65 @@ +// Copyright (C) 2023-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#include "openvino/genai/llm_pipeline.hpp" +#include + +int main(int argc, char* argv[]) try { + cxxopts::Options options("benchmark_vanilla_genai", "Help command"); + + options.add_options() + ("p,prompt", "Prompt", cxxopts::value()->default_value("The Sky is blue because")) + ("m,model", "Path to model and tokenizers base directory", cxxopts::value()->default_value(".")) + ("nw,num_warmup", "Number of warmup iterations", cxxopts::value()->default_value(std::to_string(1))) + ("n,num_iter", "Number of iterations", cxxopts::value()->default_value(std::to_string(1))) + ("d,device", "device", cxxopts::value()->default_value("CPU")) + ("h,help", "Print usage"); + + cxxopts::ParseResult result; + try { + result = options.parse(argc, argv); + } catch (const cxxopts::exceptions::exception& e) { + std::cout << e.what() << "\n\n"; + std::cout << options.help() << std::endl; + return EXIT_FAILURE; + } + + if (result.count("help")) { + std::cout << options.help() << std::endl; + return EXIT_SUCCESS; + } + + std::string prompt = result["prompt"].as(); + const std::string model_path = result["model"].as(); + std::string device = result["device"].as(); + size_t num_warmup = result["num_warmup"].as(); + size_t num_iter = result["num_iter"].as(); + + ov::genai::GenerationConfig config; + config.max_new_tokens = 100; + + ov::genai::LLMPipeline pipe(model_path, device); + + for (size_t i = 0; i < num_warmup; i++) + pipe.generate(prompt, config); + + ov::genai::GenerationMetrics metrics; + for (size_t i = 0; i < num_iter; i++) { + ov::genai::DecodedResults res = pipe.generate(prompt, config); + metrics = metrics + res.metrics; + metrics.load_time = res.metrics.load_time; + } + + std::cout << "Load time: " << metrics.load_time << " ms" << std::endl; + std::cout << "ttft: " << metrics.mean_ttft << " ± " << metrics.std_ttft << " ms" << std::endl; + std::cout << "tpot: " << metrics.mean_tpot << " ± " << metrics.std_tpot << " ms" << std::endl; + std::cout << "Tokens/s: " << metrics.get_tokens_per_sec().first << std::endl; + + return 0; +} catch (const std::exception& error) { + std::cerr << error.what() << '\n'; + return EXIT_FAILURE; +} catch (...) { + std::cerr << "Non-exception object thrown\n"; + return EXIT_FAILURE; +} diff --git a/src/cpp/include/openvino/genai/generation_metrics.hpp b/src/cpp/include/openvino/genai/generation_metrics.hpp new file mode 100644 index 0000000000..7129e5c52b --- /dev/null +++ b/src/cpp/include/openvino/genai/generation_metrics.hpp @@ -0,0 +1,40 @@ +// Copyright (C) 2023-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include +#include +#include +#include + +namespace ov { +namespace genai { + +using TimePoints = std::vector; + +struct GenerationMetrics { + GenerationMetrics() = default; + + GenerationMetrics(const TimePoints& tok_times, size_t batch_size = 1); + GenerationMetrics(const std::vector& durations, const std::vector& times_to_first_token, size_t batch_size = 1); + + // First token time. + float mean_ttft; + float std_ttft; + std::vector times_to_first_token; + + // Time per output token. + float mean_tpot; + float std_tpot; + std::vector durations; + + std::pair get_tokens_per_sec() const; + size_t batch_size; + float load_time; + + GenerationMetrics operator+(GenerationMetrics const& metrics) const; +}; + +} // namespace genai +} // namespace ov diff --git a/src/cpp/include/openvino/genai/llm_pipeline.hpp b/src/cpp/include/openvino/genai/llm_pipeline.hpp index 84dc02bd58..9f0c9fba97 100644 --- a/src/cpp/include/openvino/genai/llm_pipeline.hpp +++ b/src/cpp/include/openvino/genai/llm_pipeline.hpp @@ -5,11 +5,13 @@ #include #include +#include #include "openvino/core/any.hpp" #include "openvino/genai/generation_config.hpp" #include "openvino/genai/tokenizer.hpp" #include "openvino/genai/streamer_base.hpp" +#include "openvino/genai/generation_metrics.hpp" namespace ov { namespace genai { @@ -34,6 +36,7 @@ class EncodedResults { public: std::vector> tokens; std::vector scores; + GenerationMetrics metrics; }; /** @@ -47,6 +50,7 @@ class DecodedResults { public: std::vector texts; std::vector scores; + GenerationMetrics metrics; // @brief Convert DecodedResults to a string. operator std::string() const { diff --git a/src/cpp/src/generation_metrics.cpp b/src/cpp/src/generation_metrics.cpp new file mode 100644 index 0000000000..8ca8e0a07d --- /dev/null +++ b/src/cpp/src/generation_metrics.cpp @@ -0,0 +1,62 @@ +// Copyright (C) 2023-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#include "openvino/genai/generation_metrics.hpp" +#include + +namespace { + +std::pair calc_mean_and_std(const std::vector& durations) { + float mean = std::accumulate(durations.begin(), durations.end(), 0.0f) / durations.size(); + + float sum_square_durations = std::accumulate(durations.begin(), durations.end(), 0.0f, + [](const float& acc, const float& duration) -> float { + return acc + duration * duration; + }); + float std = std::sqrt(sum_square_durations / durations.size() - mean * mean); + return {mean, std}; +} + +} // namespace + +namespace ov { +namespace genai { + + +GenerationMetrics::GenerationMetrics(const TimePoints& tok_times, size_t batch_size) { + this->batch_size = batch_size; + durations = std::vector(tok_times.size() - 1); + for (size_t i = 1; i < tok_times.size(); ++i) { + durations[i - 1] = std::chrono::duration_cast(tok_times[i] - tok_times[i - 1]).count(); + } + times_to_first_token.emplace_back(durations[0]); + + std::tie(mean_tpot, std_tpot) = calc_mean_and_std(durations); + std::tie(mean_ttft, std_ttft) = calc_mean_and_std(times_to_first_token); +} + +GenerationMetrics::GenerationMetrics(const std::vector& durations_, const std::vector& times_to_first_token_, size_t batch_size) + : durations(durations_), times_to_first_token(times_to_first_token_) { + this->batch_size = batch_size; + std::tie(mean_tpot, std_tpot) = calc_mean_and_std(durations); + std::tie(mean_ttft, std_ttft) = calc_mean_and_std(times_to_first_token); +} + +GenerationMetrics GenerationMetrics::operator+(GenerationMetrics const& metrics) const { + std::vector new_durations = durations; + std::vector new_times_to_first_token = times_to_first_token; + new_durations.insert(new_durations.end(), metrics.durations.begin(), metrics.durations.end()); + new_times_to_first_token.insert(new_times_to_first_token.end(), metrics.times_to_first_token.begin(), metrics.times_to_first_token.end()); + + return GenerationMetrics(new_durations, new_times_to_first_token); +} + +std::pair GenerationMetrics::get_tokens_per_sec() const { + auto mean_tps = 1000.0f * batch_size / mean_tpot; + auto std_tps = 1000.0f * std_tpot / (mean_tpot * mean_tpot); + return {mean_tps, std_tps}; +} + + +} // namespace genai +} // namespace ov diff --git a/src/cpp/src/greedy_decoding.cpp b/src/cpp/src/greedy_decoding.cpp index 9170c7d2f9..dad93a0e6e 100644 --- a/src/cpp/src/greedy_decoding.cpp +++ b/src/cpp/src/greedy_decoding.cpp @@ -19,12 +19,18 @@ EncodedResults greedy_decoding( const size_t batch_size = prompts_shape[0]; size_t running_batch_size = batch_size; size_t prompt_len = prompts_shape[1]; + size_t max_new_tokens = generation_config.get_max_new_tokens(prompt_len); EncodedResults results; + // Time before the first token generated as a reference point. + ov::genai::TimePoints tok_times; + tok_times.reserve(max_new_tokens); + tok_times.emplace_back(std::chrono::steady_clock::now()); + results.scores.resize(running_batch_size); results.tokens.resize(running_batch_size); std::fill(results.scores.begin(), results.scores.end(), 0); - + m_model_runner.set_tensor("input_ids", input_ids); m_model_runner.set_tensor("attention_mask", attention_mask); if (position_ids.has_value()) @@ -50,6 +56,8 @@ EncodedResults greedy_decoding( eos_met[batch] = (out_token == generation_config.eos_token_id); m_model_runner.get_tensor("input_ids").data()[batch] = out_token; } + tok_times.emplace_back(std::chrono::steady_clock::now()); + if (streamer && streamer->put(token_iter_results[0])) { return results; } @@ -58,8 +66,8 @@ EncodedResults greedy_decoding( if (!generation_config.ignore_eos && all_are_eos) return results; - size_t max_tokens = generation_config.get_max_new_tokens(prompt_len); - for (size_t i = 0; i < max_tokens - 1; ++i) { + + for (size_t i = 0; i < max_new_tokens - 1; ++i) { if (position_ids.has_value()) utils::update_position_ids(m_model_runner.get_tensor("position_ids"), m_model_runner.get_tensor("attention_mask")); m_model_runner.set_tensor("attention_mask", utils::extend_attention(m_model_runner.get_tensor("attention_mask"))); @@ -80,6 +88,7 @@ EncodedResults greedy_decoding( m_model_runner.get_tensor("input_ids").data()[batch] = out_token; } + tok_times.emplace_back(std::chrono::steady_clock::now()); if (streamer && streamer->put(token_iter_results[0])) return results; @@ -106,6 +115,8 @@ EncodedResults greedy_decoding( if (streamer) { streamer->end(); } + + results.metrics = GenerationMetrics(tok_times); return results; } diff --git a/src/cpp/src/llm_pipeline.cpp b/src/cpp/src/llm_pipeline.cpp index 507d988a6a..918e744286 100644 --- a/src/cpp/src/llm_pipeline.cpp +++ b/src/cpp/src/llm_pipeline.cpp @@ -9,6 +9,7 @@ #include #include "openvino/genai/generation_config.hpp" #include "openvino/genai/llm_pipeline.hpp" +#include "openvino/genai/generation_metrics.hpp" #include "llm_pipeline_base.hpp" #include "llm_pipeline_static.hpp" #include "utils.hpp" @@ -155,6 +156,8 @@ class StatefulLLMPipeline final : public LLMPipelineImplBase { m_history.push_back({{"role", "assistant"}, {"content", answer}}); } + decoded_results.metrics = std::move(encoded_results.metrics); + decoded_results.metrics.load_time = m_load_time_ms; return decoded_results; } @@ -253,7 +256,6 @@ class StatefulLLMPipeline final : public LLMPipelineImplBase { } else { m_is_cache_empty = false; } - return result; } @@ -350,6 +352,7 @@ ov::genai::LLMPipeline::LLMPipeline( const std::string& device, const ov::AnyMap& plugin_config ) { + if (device == "NPU") { m_pimpl = make_unique(std::filesystem::path(model_path), tokenizer, device, plugin_config); } else { @@ -361,12 +364,15 @@ ov::genai::LLMPipeline::LLMPipeline( const std::string& path, const std::string& device, const ov::AnyMap& config -) { +) { + auto start_time = std::chrono::steady_clock::now(); if (device == "NPU") { m_pimpl = make_unique(std::filesystem::path(path), device, config); } else { m_pimpl = make_unique(std::filesystem::path(path), device, config); } + auto stop_time = std::chrono::steady_clock::now(); + m_pimpl->m_load_time_ms = std::chrono::duration_cast(stop_time - start_time).count(); } ov::genai::GenerationConfig ov::genai::LLMPipeline::get_generation_config() const { diff --git a/src/cpp/src/llm_pipeline_base.hpp b/src/cpp/src/llm_pipeline_base.hpp index 9df6442b35..7e58cd3b37 100644 --- a/src/cpp/src/llm_pipeline_base.hpp +++ b/src/cpp/src/llm_pipeline_base.hpp @@ -36,6 +36,8 @@ class LLMPipelineImplBase { Tokenizer m_tokenizer; GenerationConfig m_generation_config; + + float m_load_time_ms = 0; }; } // namespace genai From 7cab496c63a598dcb96027c9a88d3c96ef1b5b48 Mon Sep 17 00:00:00 2001 From: Pavel Esir Date: Fri, 19 Jul 2024 13:01:02 +0200 Subject: [PATCH 03/11] add detokenization metric; refactor split to perf_conter & perf_metrics --- .../benchmark_vanilla_genai.cpp | 8 +- .../openvino/genai/generation_metrics.hpp | 40 --------- .../include/openvino/genai/llm_pipeline.hpp | 6 +- .../include/openvino/genai/perf_metrics.hpp | 50 ++++++++++++ src/cpp/src/generation_metrics.cpp | 62 -------------- src/cpp/src/greedy_decoding.cpp | 19 ++--- src/cpp/src/group_beam_searcher.cpp | 19 +++-- src/cpp/src/llm_pipeline.cpp | 30 +++++-- src/cpp/src/perf_counters.cpp | 21 +++++ src/cpp/src/perf_counters.hpp | 44 ++++++++++ src/cpp/src/perf_metrics.cpp | 81 +++++++++++++++++++ src/cpp/src/tokenizer.cpp | 2 + src/cpp/src/utils.hpp | 14 ++++ src/python/py_generate_pipeline.cpp | 14 ++++ tests/python_tests/ov_genai_test_utils.py | 2 + 15 files changed, 282 insertions(+), 130 deletions(-) delete mode 100644 src/cpp/include/openvino/genai/generation_metrics.hpp create mode 100644 src/cpp/include/openvino/genai/perf_metrics.hpp delete mode 100644 src/cpp/src/generation_metrics.cpp create mode 100644 src/cpp/src/perf_counters.cpp create mode 100644 src/cpp/src/perf_counters.hpp create mode 100644 src/cpp/src/perf_metrics.cpp diff --git a/samples/cpp/benchmark_vanilla_genai/benchmark_vanilla_genai.cpp b/samples/cpp/benchmark_vanilla_genai/benchmark_vanilla_genai.cpp index ccb7650b84..6489282b0b 100644 --- a/samples/cpp/benchmark_vanilla_genai/benchmark_vanilla_genai.cpp +++ b/samples/cpp/benchmark_vanilla_genai/benchmark_vanilla_genai.cpp @@ -37,23 +37,25 @@ int main(int argc, char* argv[]) try { ov::genai::GenerationConfig config; config.max_new_tokens = 100; + config.num_beam_groups = 3; + config.num_beams = 15; ov::genai::LLMPipeline pipe(model_path, device); for (size_t i = 0; i < num_warmup; i++) pipe.generate(prompt, config); - ov::genai::GenerationMetrics metrics; + ov::genai::PerfMetrics metrics; for (size_t i = 0; i < num_iter; i++) { ov::genai::DecodedResults res = pipe.generate(prompt, config); metrics = metrics + res.metrics; metrics.load_time = res.metrics.load_time; } - + std::cout << "Load time: " << metrics.load_time << " ms" << std::endl; std::cout << "ttft: " << metrics.mean_ttft << " ± " << metrics.std_ttft << " ms" << std::endl; std::cout << "tpot: " << metrics.mean_tpot << " ± " << metrics.std_tpot << " ms" << std::endl; - std::cout << "Tokens/s: " << metrics.get_tokens_per_sec().first << std::endl; + std::cout << "Tokens/s: " << metrics.mean_throughput << std::endl; return 0; } catch (const std::exception& error) { diff --git a/src/cpp/include/openvino/genai/generation_metrics.hpp b/src/cpp/include/openvino/genai/generation_metrics.hpp deleted file mode 100644 index 7129e5c52b..0000000000 --- a/src/cpp/include/openvino/genai/generation_metrics.hpp +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (C) 2023-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 - -#pragma once - -#include -#include -#include -#include - -namespace ov { -namespace genai { - -using TimePoints = std::vector; - -struct GenerationMetrics { - GenerationMetrics() = default; - - GenerationMetrics(const TimePoints& tok_times, size_t batch_size = 1); - GenerationMetrics(const std::vector& durations, const std::vector& times_to_first_token, size_t batch_size = 1); - - // First token time. - float mean_ttft; - float std_ttft; - std::vector times_to_first_token; - - // Time per output token. - float mean_tpot; - float std_tpot; - std::vector durations; - - std::pair get_tokens_per_sec() const; - size_t batch_size; - float load_time; - - GenerationMetrics operator+(GenerationMetrics const& metrics) const; -}; - -} // namespace genai -} // namespace ov diff --git a/src/cpp/include/openvino/genai/llm_pipeline.hpp b/src/cpp/include/openvino/genai/llm_pipeline.hpp index 9f0c9fba97..4db3c613e7 100644 --- a/src/cpp/include/openvino/genai/llm_pipeline.hpp +++ b/src/cpp/include/openvino/genai/llm_pipeline.hpp @@ -11,7 +11,7 @@ #include "openvino/genai/generation_config.hpp" #include "openvino/genai/tokenizer.hpp" #include "openvino/genai/streamer_base.hpp" -#include "openvino/genai/generation_metrics.hpp" +#include "openvino/genai/perf_metrics.hpp" namespace ov { namespace genai { @@ -36,7 +36,7 @@ class EncodedResults { public: std::vector> tokens; std::vector scores; - GenerationMetrics metrics; + PerfMetrics metrics; }; /** @@ -50,7 +50,7 @@ class DecodedResults { public: std::vector texts; std::vector scores; - GenerationMetrics metrics; + PerfMetrics metrics; // @brief Convert DecodedResults to a string. operator std::string() const { diff --git a/src/cpp/include/openvino/genai/perf_metrics.hpp b/src/cpp/include/openvino/genai/perf_metrics.hpp new file mode 100644 index 0000000000..a11c4e0374 --- /dev/null +++ b/src/cpp/include/openvino/genai/perf_metrics.hpp @@ -0,0 +1,50 @@ +// Copyright (C) 2023-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include +#include "openvino/genai/visibility.hpp" +#include +#include + +namespace ov { +namespace genai { + +using TimePoint = std::chrono::steady_clock::time_point; + +struct PerfCounters; + +struct OPENVINO_GENAI_EXPORTS PerfMetrics { + // First token time. + float mean_ttft; + float std_ttft; + + // Time per output token. + float mean_tpot; + float std_tpot; + + float load_time; + float start_time; + + float mean_generate_duration; + float mean_decoding_duration; + float mean_encoding_duration; + + float mean_throughput; + float std_throughput; + + size_t num_generated_tokens; + size_t num_input_tokens; + + std::shared_ptr m_counters; + void evaluate(TimePoint start_time); + + PerfMetrics operator+(const PerfMetrics& metrics) const; + PerfMetrics& operator+=(const PerfMetrics& right); + + +}; + +} // namespace genai +} // namespace ov diff --git a/src/cpp/src/generation_metrics.cpp b/src/cpp/src/generation_metrics.cpp deleted file mode 100644 index 8ca8e0a07d..0000000000 --- a/src/cpp/src/generation_metrics.cpp +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (C) 2023-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 - -#include "openvino/genai/generation_metrics.hpp" -#include - -namespace { - -std::pair calc_mean_and_std(const std::vector& durations) { - float mean = std::accumulate(durations.begin(), durations.end(), 0.0f) / durations.size(); - - float sum_square_durations = std::accumulate(durations.begin(), durations.end(), 0.0f, - [](const float& acc, const float& duration) -> float { - return acc + duration * duration; - }); - float std = std::sqrt(sum_square_durations / durations.size() - mean * mean); - return {mean, std}; -} - -} // namespace - -namespace ov { -namespace genai { - - -GenerationMetrics::GenerationMetrics(const TimePoints& tok_times, size_t batch_size) { - this->batch_size = batch_size; - durations = std::vector(tok_times.size() - 1); - for (size_t i = 1; i < tok_times.size(); ++i) { - durations[i - 1] = std::chrono::duration_cast(tok_times[i] - tok_times[i - 1]).count(); - } - times_to_first_token.emplace_back(durations[0]); - - std::tie(mean_tpot, std_tpot) = calc_mean_and_std(durations); - std::tie(mean_ttft, std_ttft) = calc_mean_and_std(times_to_first_token); -} - -GenerationMetrics::GenerationMetrics(const std::vector& durations_, const std::vector& times_to_first_token_, size_t batch_size) - : durations(durations_), times_to_first_token(times_to_first_token_) { - this->batch_size = batch_size; - std::tie(mean_tpot, std_tpot) = calc_mean_and_std(durations); - std::tie(mean_ttft, std_ttft) = calc_mean_and_std(times_to_first_token); -} - -GenerationMetrics GenerationMetrics::operator+(GenerationMetrics const& metrics) const { - std::vector new_durations = durations; - std::vector new_times_to_first_token = times_to_first_token; - new_durations.insert(new_durations.end(), metrics.durations.begin(), metrics.durations.end()); - new_times_to_first_token.insert(new_times_to_first_token.end(), metrics.times_to_first_token.begin(), metrics.times_to_first_token.end()); - - return GenerationMetrics(new_durations, new_times_to_first_token); -} - -std::pair GenerationMetrics::get_tokens_per_sec() const { - auto mean_tps = 1000.0f * batch_size / mean_tpot; - auto std_tps = 1000.0f * std_tpot / (mean_tpot * mean_tpot); - return {mean_tps, std_tps}; -} - - -} // namespace genai -} // namespace ov diff --git a/src/cpp/src/greedy_decoding.cpp b/src/cpp/src/greedy_decoding.cpp index dad93a0e6e..0802b87e66 100644 --- a/src/cpp/src/greedy_decoding.cpp +++ b/src/cpp/src/greedy_decoding.cpp @@ -1,7 +1,8 @@ // Copyright (C) 2023-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 -#include "openvino/genai/llm_pipeline.hpp" +#include "openvino/genai/perf_metrics.hpp" +#include "perf_counters.hpp" #include "utils.hpp" namespace ov { @@ -22,11 +23,8 @@ EncodedResults greedy_decoding( size_t max_new_tokens = generation_config.get_max_new_tokens(prompt_len); EncodedResults results; - // Time before the first token generated as a reference point. - ov::genai::TimePoints tok_times; - tok_times.reserve(max_new_tokens); - tok_times.emplace_back(std::chrono::steady_clock::now()); - + auto& perf_counters = results.metrics.m_counters; + results.scores.resize(running_batch_size); results.tokens.resize(running_batch_size); std::fill(results.scores.begin(), results.scores.end(), 0); @@ -56,8 +54,8 @@ EncodedResults greedy_decoding( eos_met[batch] = (out_token == generation_config.eos_token_id); m_model_runner.get_tensor("input_ids").data()[batch] = out_token; } - tok_times.emplace_back(std::chrono::steady_clock::now()); - + perf_counters->add_timestamp(running_batch_size); + if (streamer && streamer->put(token_iter_results[0])) { return results; } @@ -88,7 +86,7 @@ EncodedResults greedy_decoding( m_model_runner.get_tensor("input_ids").data()[batch] = out_token; } - tok_times.emplace_back(std::chrono::steady_clock::now()); + perf_counters->add_timestamp(running_batch_size); if (streamer && streamer->put(token_iter_results[0])) return results; @@ -116,9 +114,8 @@ EncodedResults greedy_decoding( streamer->end(); } - results.metrics = GenerationMetrics(tok_times); return results; } } //namespace genai -} //namespace ov \ No newline at end of file +} //namespace ov diff --git a/src/cpp/src/group_beam_searcher.cpp b/src/cpp/src/group_beam_searcher.cpp index 8695aeac02..4f5cb79f2a 100644 --- a/src/cpp/src/group_beam_searcher.cpp +++ b/src/cpp/src/group_beam_searcher.cpp @@ -362,14 +362,20 @@ std::pair beam_search(ov::InferRequest& lm, std::optional selected_beam_idx) { OPENVINO_ASSERT(config.num_beams % config.num_beam_groups == 0, "number of beams should be divisible by number of groups"); - - // Initialize beam search + auto batch_size = input_ids.get_shape().at(0); + auto sequence_length = input_ids.get_shape().at(1); + + // Initialize time metric counters. + // ov::genai::TimePoints tok_times; + // tok_times.reserve(config.get_max_new_tokens(sequence_length)); + // tok_times.emplace_back(std::chrono::steady_clock::now()); + + // Initialize beam search. const int64_t* prompt_data = input_ids.data(); std::vector> prompts; prompts.reserve(batch_size); for (size_t batch = 0; batch < batch_size; batch++) { - size_t sequence_length = input_ids.get_shape().at(1); size_t batch_offset = batch * sequence_length; const int64_t* prompt_start = prompt_data + batch_offset; prompts.push_back(std::vector{prompt_start, prompt_start + sequence_length}); @@ -389,7 +395,7 @@ std::pair beam_search(ov::InferRequest& lm, lm.set_tensor("beam_idx", beam_idx); Parameters parameters{std::move(prompts)}; - parameters.max_new_tokens = config.max_new_tokens; + parameters.max_new_tokens = config.get_max_new_tokens(sequence_length); parameters.eos_token_id = config.eos_token_id; parameters.n_groups = config.num_beam_groups; parameters.group_size = config.num_beams / config.num_beam_groups; @@ -406,6 +412,8 @@ std::pair beam_search(ov::InferRequest& lm, lm.infer(); std::tie(next_tokens, next_beams) = group_beam_searcher.select_next_tokens(lm.get_tensor("logits")); + // tok_times.emplace_back(std::chrono::steady_clock::now()); + if (next_tokens.empty() || length_count == parameters.max_new_tokens - 1) { // Break the cycle before masks are extended in update_attention_mask_with_beams. // If generation is continued, attention_mask length should be equal to KV cache size. @@ -462,7 +470,8 @@ std::pair beam_search(ov::InferRequest& lm, results.tokens.push_back(std::move(beam->get().tokens)); } } - + + // results.metrics = PerfCounters(tok_times); return {results, res_selected_beam_idx}; } diff --git a/src/cpp/src/llm_pipeline.cpp b/src/cpp/src/llm_pipeline.cpp index 918e744286..81f807c149 100644 --- a/src/cpp/src/llm_pipeline.cpp +++ b/src/cpp/src/llm_pipeline.cpp @@ -1,6 +1,7 @@ // Copyright (C) 2023-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 +#include "perf_counters.hpp" #include #include #include @@ -9,7 +10,7 @@ #include #include "openvino/genai/generation_config.hpp" #include "openvino/genai/llm_pipeline.hpp" -#include "openvino/genai/generation_metrics.hpp" +#include "openvino/genai/perf_metrics.hpp" #include "llm_pipeline_base.hpp" #include "llm_pipeline_static.hpp" #include "utils.hpp" @@ -111,8 +112,9 @@ class StatefulLLMPipeline final : public LLMPipelineImplBase { OptionalGenerationConfig generation_config, StreamerVariant streamer ) override { + auto start_time = std::chrono::steady_clock::now(); GenerationConfig config = (generation_config.has_value()) ? *generation_config : m_generation_config; - EncodedInputs encoded_input; + TokenizedInputs encoded_input; if (auto input_vector = std::get_if>(&inputs)) { encoded_input = m_tokenizer.encode(*input_vector); @@ -144,9 +146,12 @@ class StatefulLLMPipeline final : public LLMPipelineImplBase { encoded_input = m_tokenizer.encode(prompt); } } + auto encode_stop_time = std::chrono::steady_clock::now(); + auto encoded_results = generate(encoded_input, config, streamer); - auto encoded_results = generate(encoded_input, config, streamer); + auto decode_start_time = std::chrono::steady_clock::now(); DecodedResults decoded_results = {m_tokenizer.decode(encoded_results.tokens), encoded_results.scores}; + auto decode_stop_time = std::chrono::steady_clock::now(); if (is_chat_conversation) { // Tail of chat template is missing in KV cache. @@ -155,9 +160,14 @@ class StatefulLLMPipeline final : public LLMPipelineImplBase { m_templated_chat_history.append(answer); m_history.push_back({{"role", "assistant"}, {"content", answer}}); } + + auto& metrics = encoded_results.metrics; + // metrics.tokenization_duration = std::chrono::duration_cast(encode_stop_time - start_time).count(); + // metrics.detokenization_duration = std::chrono::duration_cast(decode_stop_time - decode_start_time).count(); - decoded_results.metrics = std::move(encoded_results.metrics); - decoded_results.metrics.load_time = m_load_time_ms; + // auto stop_time = std::chrono::steady_clock::now(); + // metrics.generate_durations.emplace_back(std::chrono::duration_cast(stop_time - start_time).count()); + decoded_results.metrics = std::move(metrics); return decoded_results; } @@ -166,9 +176,9 @@ class StatefulLLMPipeline final : public LLMPipelineImplBase { OptionalGenerationConfig generation_config, StreamerVariant streamer ) override { + auto start_time = std::chrono::steady_clock::now(); ov::Tensor input_ids; ov::Tensor attention_mask; - if (auto data = std::get_if(&inputs)) { input_ids = *data; attention_mask = ov::genai::utils::init_attention_mask(input_ids); @@ -256,6 +266,14 @@ class StatefulLLMPipeline final : public LLMPipelineImplBase { } else { m_is_cache_empty = false; } + + + + auto& metrics = result.metrics; + // metrics.batch_size = batch_size; + // metrics.num_generated_tokens = (metrics.m_durations.size() + 1) * batch_size; + metrics.num_input_tokens = batch_size * input_ids.get_shape().at(0); + result.metrics = std::move(metrics); return result; } diff --git a/src/cpp/src/perf_counters.cpp b/src/cpp/src/perf_counters.cpp new file mode 100644 index 0000000000..c9dac6eca0 --- /dev/null +++ b/src/cpp/src/perf_counters.cpp @@ -0,0 +1,21 @@ +// Copyright (C) 2023-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#include "perf_counters.hpp" +#include "openvino/genai/perf_metrics.hpp" +#include "openvino/openvino.hpp" +#include +#include +#include + +namespace ov { +namespace genai { + +void PerfCounters::add_timestamp(size_t batch_size) { + m_new_token_times.emplace_back(std::chrono::steady_clock::now()); + m_batch_sizes.emplace_back(batch_size); +} + + +} // namespace genai +} // namespace ov diff --git a/src/cpp/src/perf_counters.hpp b/src/cpp/src/perf_counters.hpp new file mode 100644 index 0000000000..7d33490205 --- /dev/null +++ b/src/cpp/src/perf_counters.hpp @@ -0,0 +1,44 @@ +// Copyright (C) 2023-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#pragma once + +#include +#include +#include + +namespace ov { +namespace genai { + +struct PerfCounters { + std::vector generate_durations; + std::vector tokenization_duration; + std::vector detokenization_duration; + size_t num_generated_tokens; + size_t num_input_tokens; + + std::vector m_batch_sizes; + std::vector m_durations; + std::vector m_times_to_first_token; + std::vector m_new_token_times; + void add_timestamp(size_t batch_size); + // void add_gen_finish_timestamp(size_t batch_size); + +}; + +// class StopWatch { +// TimePoint m_start; +// public: +// StopWatch& start() { +// m_start = std::chrono::steady_clock::now(); +// return *this; +// } + +// float split() { +// std::chrono::steady_clock::time_point curr_time = std::chrono::steady_clock::now(); +// return std::chrono::duration_cast(curr_time - m_start).count(); +// } +// }; + +} // namespace genai +} // namespace ov diff --git a/src/cpp/src/perf_metrics.cpp b/src/cpp/src/perf_metrics.cpp new file mode 100644 index 0000000000..4a8b1d76c6 --- /dev/null +++ b/src/cpp/src/perf_metrics.cpp @@ -0,0 +1,81 @@ +// Copyright (C) 2023-2024 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 + +#include "openvino/genai/perf_metrics.hpp" +#include "perf_counters.hpp" +#include "openvino/openvino.hpp" +#include +#include +#include + +namespace { + +std::pair calc_mean_and_std(const std::vector& durations) { + float mean = std::accumulate(durations.begin(), durations.end(), 0.0f) / durations.size(); + + float sum_square_durations = std::accumulate(durations.begin(), durations.end(), 0.0f, + [](const float& acc, const float& duration) -> float { + return acc + duration * duration; + }); + float std = std::sqrt(sum_square_durations / durations.size() - mean * mean); + return {mean, std}; +} + + +} // namespace + +namespace ov { +namespace genai { + +void PerfMetrics::evaluate(TimePoint start_time) { + + auto& tok_times = m_counters->m_new_token_times; + auto& batch_sizes = m_counters->m_batch_sizes; + m_counters->m_durations = std::vector(tok_times.size()); + + auto ttft = std::chrono::duration_cast(tok_times[0] - start_time).count(); + m_counters->m_times_to_first_token.emplace_back(ttft); + + for (size_t i = 0; i < tok_times.size(); ++i) { + m_counters->m_durations[i] = std::chrono::duration_cast(tok_times[i] - start_time).count(); + // If in 10 ms a batch of 5 new tokens is generated then TTOT is 10 ms / 5. + // todo: float check that it's valid for batch > 1. + m_counters->m_durations[i] /= batch_sizes[i]; + start_time = tok_times[i]; + } + + std::tie(mean_tpot, std_tpot) = calc_mean_and_std(m_counters->m_durations); + std::tie(mean_ttft, std_ttft) = calc_mean_and_std(m_counters->m_times_to_first_token); +} + +PerfMetrics PerfMetrics::operator+(const PerfMetrics& metrics) const { + PerfMetrics nm; // new metrics + nm.m_counters = m_counters; + auto& new_counters = nm.m_counters; + + auto& new_durations = new_counters->m_durations; + auto& new_times_to_first_token = new_counters->m_times_to_first_token; + + auto& counters_to_appnd = metrics.m_counters; + new_durations.insert(new_durations.end(), counters_to_appnd->m_durations.begin(), counters_to_appnd->m_durations.end()); + new_times_to_first_token.insert(new_times_to_first_token.end(), counters_to_appnd->m_times_to_first_token.begin(), counters_to_appnd->m_times_to_first_token.end()); + + OPENVINO_ASSERT(metrics.load_time == load_time, "generation metrics can be accumulated only for the same pipeline"); + + std::tie(nm.mean_tpot, nm.std_tpot) = calc_mean_and_std(new_counters->m_durations); + std::tie(nm.mean_ttft, nm.std_ttft) = calc_mean_and_std(new_counters->m_times_to_first_token); + + // todo: add tokenization statistics concatenation. + + return nm; +} + +PerfMetrics& PerfMetrics::operator+=(const PerfMetrics& right) { + *this = *this + right; + return *this; +} + + + +} // namespace genai +} // namespace ov diff --git a/src/cpp/src/tokenizer.cpp b/src/cpp/src/tokenizer.cpp index ac6b925dcb..501d0e86cf 100644 --- a/src/cpp/src/tokenizer.cpp +++ b/src/cpp/src/tokenizer.cpp @@ -323,6 +323,8 @@ class Tokenizer::TokenizerImpl { // Replace what jinja2cpp doesn't support std::pair replace_str_map[] = { + {"{-", "{"}, + {"{%-", "{%"}, {"'}", "' }"}, {"{'", "{ '"}, {".strip()", ""} diff --git a/src/cpp/src/utils.hpp b/src/cpp/src/utils.hpp index 25acc1c87f..446ef8549b 100644 --- a/src/cpp/src/utils.hpp +++ b/src/cpp/src/utils.hpp @@ -12,6 +12,20 @@ namespace ov { namespace genai { namespace utils { +#include +#include +#include + +// Templated function to measure execution time of an object method. +template +std::pair execution_time_wrapper(T& instance, Ret(T::*method)(Args...), Args&&... args) { + auto start = std::chrono::steady_clock::now(); + Ret result = (instance.*method)(std::forward(args)...); + auto end = std::chrono::steady_clock::now(); + auto duration = std::chrono::duration_cast(end - start).count(); + return {result, duration}; +} + Tensor init_attention_mask(const Tensor& position_ids); void print_tensor(const ov::Tensor& tensor); diff --git a/src/python/py_generate_pipeline.cpp b/src/python/py_generate_pipeline.cpp index d7b2aab29c..c78c760b6c 100644 --- a/src/python/py_generate_pipeline.cpp +++ b/src/python/py_generate_pipeline.cpp @@ -21,6 +21,7 @@ using ov::genai::GenerationConfig; using ov::genai::GenerationResult; using ov::genai::LLMPipeline; using ov::genai::OptionalGenerationConfig; +using ov::genai::PerfMetrics; using ov::genai::SchedulerConfig; using ov::genai::StopCriteria; using ov::genai::StreamerBase; @@ -536,6 +537,19 @@ PYBIND11_MODULE(py_generate_pipeline, m) { .def_readonly("scores", &DecodedResults::scores) .def("__str__", &DecodedResults::operator std::string);; + py::class_(m, "PerfMetrics") + .def(py::init<>()) + .def_readonly("mean_generate_duration", &PerfMetrics::mean_generate_duration) + .def_readonly("mean_decoding_duration", &PerfMetrics::mean_decoding_duration) + .def_readonly("mean_encoding_duration", &PerfMetrics::mean_encoding_duration) + .def_readonly("mean_tpot", &PerfMetrics::mean_tpot) + .def_readonly("mean_ttft", &PerfMetrics::mean_ttft) + .def_readonly("std_tpot", &PerfMetrics::std_tpot) + .def_readonly("std_ttft", &PerfMetrics::std_ttft) + .def_readonly("load_time", &PerfMetrics::load_time) + .def("__add__", &PerfMetrics::operator+) + .def("__iadd__", &PerfMetrics::operator+=); + py::class_(m, "TokenizedInputs") .def(py::init()) .def_readwrite("input_ids", &TokenizedInputs::input_ids) diff --git a/tests/python_tests/ov_genai_test_utils.py b/tests/python_tests/ov_genai_test_utils.py index 4ba71a1d48..5d038e65e2 100644 --- a/tests/python_tests/ov_genai_test_utils.py +++ b/tests/python_tests/ov_genai_test_utils.py @@ -81,6 +81,8 @@ def get_chat_templates(): # but skips some models that currently are not processed correctly. skipped_models = { + "berkeley-nest/Starling-LM-7B-alpha", # TODO: Need to enable and unskip, since it's preset in continious batching and has ~30 000 downloads. + # These models fail even on HF so no need to check if applying chat matches. "vibhorag101/llama-2-13b-chat-hf-phr_mental_therapy", "codellama/CodeLlama-34b-Instruct-hf", From bb1113ce69dc0126a1b83a66394f63d09146044a Mon Sep 17 00:00:00 2001 From: Pavel Esir Date: Mon, 22 Jul 2024 13:10:03 +0200 Subject: [PATCH 04/11] refactor structure, add python sample --- samples/cpp/benchmark_vanilla_genai/README.md | 1 + .../benchmark_vanilla_genai.cpp | 22 ++-- .../python/benchmark_vanilla_genai/README.md | 66 ++++++++++++ .../benchmark_vanilla_genai.py | 50 +++++++++ .../include/openvino/genai/llm_pipeline.hpp | 2 + .../include/openvino/genai/perf_metrics.hpp | 37 +++++-- src/cpp/src/greedy_decoding.cpp | 10 +- src/cpp/src/group_beam_searcher.cpp | 20 ++-- src/cpp/src/llm_pipeline.cpp | 31 +++--- src/cpp/src/perf_counters.cpp | 21 ---- src/cpp/src/perf_counters.hpp | 44 -------- src/cpp/src/perf_metrics.cpp | 100 +++++++++++------- src/cpp/src/tokenizer.cpp | 2 - src/python/py_generate_pipeline.cpp | 25 ++++- tests/python_tests/ov_genai_test_utils.py | 2 - 15 files changed, 279 insertions(+), 154 deletions(-) create mode 100644 samples/python/benchmark_vanilla_genai/README.md create mode 100755 samples/python/benchmark_vanilla_genai/benchmark_vanilla_genai.py delete mode 100644 src/cpp/src/perf_counters.cpp delete mode 100644 src/cpp/src/perf_counters.hpp diff --git a/samples/cpp/benchmark_vanilla_genai/README.md b/samples/cpp/benchmark_vanilla_genai/README.md index 739c2e950c..50197dad1d 100644 --- a/samples/cpp/benchmark_vanilla_genai/README.md +++ b/samples/cpp/benchmark_vanilla_genai/README.md @@ -1,2 +1,3 @@ # benchmark OpenVINO GenAI sample +TODO: adapt from python sample to c++ \ No newline at end of file diff --git a/samples/cpp/benchmark_vanilla_genai/benchmark_vanilla_genai.cpp b/samples/cpp/benchmark_vanilla_genai/benchmark_vanilla_genai.cpp index 6489282b0b..6d96d24fc5 100644 --- a/samples/cpp/benchmark_vanilla_genai/benchmark_vanilla_genai.cpp +++ b/samples/cpp/benchmark_vanilla_genai/benchmark_vanilla_genai.cpp @@ -11,7 +11,8 @@ int main(int argc, char* argv[]) try { ("p,prompt", "Prompt", cxxopts::value()->default_value("The Sky is blue because")) ("m,model", "Path to model and tokenizers base directory", cxxopts::value()->default_value(".")) ("nw,num_warmup", "Number of warmup iterations", cxxopts::value()->default_value(std::to_string(1))) - ("n,num_iter", "Number of iterations", cxxopts::value()->default_value(std::to_string(1))) + ("n,num_iter", "Number of iterations", cxxopts::value()->default_value(std::to_string(5))) + ("mt,max_new_tokens", "Number of iterations", cxxopts::value()->default_value(std::to_string(20))) ("d,device", "device", cxxopts::value()->default_value("CPU")) ("h,help", "Print usage"); @@ -36,26 +37,27 @@ int main(int argc, char* argv[]) try { size_t num_iter = result["num_iter"].as(); ov::genai::GenerationConfig config; - config.max_new_tokens = 100; - config.num_beam_groups = 3; - config.num_beams = 15; + config.max_new_tokens = result["max_new_tokens"].as(); ov::genai::LLMPipeline pipe(model_path, device); for (size_t i = 0; i < num_warmup; i++) pipe.generate(prompt, config); - ov::genai::PerfMetrics metrics; - for (size_t i = 0; i < num_iter; i++) { - ov::genai::DecodedResults res = pipe.generate(prompt, config); + ov::genai::DecodedResults res = pipe.generate(prompt, config); + ov::genai::PerfMetrics metrics = res.metrics; + for (size_t i = 0; i < num_iter - 1; i++) { + res = pipe.generate(prompt, config); metrics = metrics + res.metrics; - metrics.load_time = res.metrics.load_time; } std::cout << "Load time: " << metrics.load_time << " ms" << std::endl; + std::cout << "Generate time: " << metrics.mean_generate_duration << " ± " << metrics.std_generate_duration << " ms" << std::endl; + std::cout << "Tokenization time: " << metrics.mean_tokenization_duration << " ± " << metrics.std_tokenization_duration << " ms" << std::endl; + std::cout << "Detokenization time: " << metrics.mean_detokenization_duration << " ± " << metrics.std_detokenization_duration << " ms" << std::endl; std::cout << "ttft: " << metrics.mean_ttft << " ± " << metrics.std_ttft << " ms" << std::endl; - std::cout << "tpot: " << metrics.mean_tpot << " ± " << metrics.std_tpot << " ms" << std::endl; - std::cout << "Tokens/s: " << metrics.mean_throughput << std::endl; + std::cout << "tpot: " << metrics.mean_tpot << " ± " << metrics.std_tpot << " ms " << std::endl; + std::cout << "Tokens/s: " << metrics.mean_throughput << " ± " << metrics.std_throughput << std::endl; return 0; } catch (const std::exception& error) { diff --git a/samples/python/benchmark_vanilla_genai/README.md b/samples/python/benchmark_vanilla_genai/README.md new file mode 100644 index 0000000000..af66ea545d --- /dev/null +++ b/samples/python/benchmark_vanilla_genai/README.md @@ -0,0 +1,66 @@ +# Benchmark Vanilla GenAI + +This sample script demonstrates how to benchmark an LLMModel in OpenVINO GenAI. The script includes functionality for warm-up iterations, generating text, and calculating various performance metrics. + +# ov.genai.PerfMetrics structure +ov.genai.PerfMetrics is a structure which holds performance metric for each generate call. Each generate call calcualtes the following metrics: +- mean_ttft + - std_ttft + - mean_tpot + - std_tpot + - load_time + - mean_generate_duration + - std_generate_duration + - mean_tokenization_duration + - std_tokenization_duration + - mean_detokenization_duration + - std_detokenization_duration + - mean_throughput + - std_throughput + - num_generated_tokens + - num_input_tokens + +Performance metrics can be added to one another and accumulated using the += operator or the + operator. In that case the mean values accumulated by several generate calls will be calculated. + + +## Download and convert the model and tokenizers + +The `--upgrade-strategy eager` option is needed to ensure `optimum-intel` is upgraded to the latest version. + +It's not required to install [../../requirements.txt](../../requirements.txt) for deployment if the model has already been exported. + +```sh +pip install --upgrade-strategy eager -r ../../requirements.txt +optimum-cli export openvino --trust-remote-code --model TinyLlama/TinyLlama-1.1B-Chat-v1.0 TinyLlama-1.1B-Chat-v1.0 +``` + +## Usage + +```sh +python benchmark_vanilla_genai.py [OPTIONS] +``` + +### Options + +- `-m, --model`: Path to the model and tokenizers base directory. +- `-p, --prompt` (default: `"The Sky is blue because"`): The prompt to generate text. +- `-nw, --num_warmup` (default: `1`): Number of warmup iterations. +- `-mt, --max_new_tokens` (default: `20`): Number of warmup iterations. +- `-n, --num_iter` (default: `3`): Number of iterations. +- `-d, --device` (default: `"CPU"`): Device to run the model on. + +### Output: + +``` +python benchmark_vanilla_genai.py -m TinyLlama-1.1B-Chat-v1.0/ +``` + +``` +Load time: 3446 ms +Generate time: 876.2 ± 3.30719 ms +Tokenization time: 0 ± 0 ms +Detokenization time: 0 ± 0 ms +ttft: 168 ± 0 ms +tpot: 174.68 ± 4.08671 ms +Tokens/s: 5.72475 ± 0.133933 +``` diff --git a/samples/python/benchmark_vanilla_genai/benchmark_vanilla_genai.py b/samples/python/benchmark_vanilla_genai/benchmark_vanilla_genai.py new file mode 100755 index 0000000000..4c87234179 --- /dev/null +++ b/samples/python/benchmark_vanilla_genai/benchmark_vanilla_genai.py @@ -0,0 +1,50 @@ +# Copyright (C) 2023-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import argparse +import openvino_genai as ov_genai +import pdb + +def main(): + parser = argparse.ArgumentParser(description="Help command") + parser.add_argument("-m", "--model", type=str, help="Path to model and tokenizers base directory") + parser.add_argument("-p", "--prompt", type=str, default="The Sky is blue because", help="Prompt") + parser.add_argument("-nw", "--num_warmup", type=int, default=1, help="Number of warmup iterations") + parser.add_argument("-n", "--num_iter", type=int, default=3, help="Number of iterations") + parser.add_argument("-mt", "--max_new_tokens", type=int, default=20, help="Maximal number of new tokens") + parser.add_argument("-d", "--device", type=str, default="CPU", help="Device") + + args = parser.parse_args() + + prompt = [args.prompt] + model_path = args.model + device = args.device + num_warmup = args.num_warmup + num_iter = args.num_iter + + + config = ov_genai.GenerationConfig() + config.max_new_tokens = args.num_new_tokens + + pipe = ov_genai.LLMPipeline(model_path, device) + + for _ in range(num_warmup): + pipe.generate(prompt, config) + + res = pipe.generate(prompt, config) + metrics = res.metrics + for _ in range(num_iter - 1): + # pdb.set_trace() + res = pipe.generate(prompt, config) + metrics += res.metrics + + print(f"Load time: {metrics.load_time} ms") + print(f"Generate time: {metrics.mean_generate_duration:.2f} ± {metrics.std_generate_duration:.2f} ms") + print(f"Tokenization time: {metrics.mean_tokenization_duration:.2f} ± {metrics.std_tokenization_duration:.2f} ms") + print(f"Detokenization time: {metrics.mean_detokenization_duration:.2f} ± {metrics.std_detokenization_duration:.2f} ms") + print(f"TTFT: {metrics.mean_ttft:.2f} ± {metrics.std_ttft:.2f} ms") + print(f"TPOT: {metrics.mean_tpot:.2f} ± {metrics.std_tpot:.2f} ms") + print(f"Throughput tokens/s: {metrics.mean_throughput:.2f} ± {metrics.std_throughput:.2f}") + +if __name__ == "__main__": + main() diff --git a/src/cpp/include/openvino/genai/llm_pipeline.hpp b/src/cpp/include/openvino/genai/llm_pipeline.hpp index 4db3c613e7..14100d4f16 100644 --- a/src/cpp/include/openvino/genai/llm_pipeline.hpp +++ b/src/cpp/include/openvino/genai/llm_pipeline.hpp @@ -31,6 +31,7 @@ using StringInputs = std::variant>; * * @param tokens sequence of resulting tokens * @param scores sum of logarithmic probabilities of all tokens in the sequence +* @param metrics performance metrics with tpot, ttft, etc. of type ov::genai::PerfMetrics */ class EncodedResults { public: @@ -45,6 +46,7 @@ class EncodedResults { * * @param texts vector of resulting sequences * @param scores scores for each sequence +* @param metrics performance metrics with tpot, ttft, etc. of type ov::genai::PerfMetrics */ class DecodedResults { public: diff --git a/src/cpp/include/openvino/genai/perf_metrics.hpp b/src/cpp/include/openvino/genai/perf_metrics.hpp index a11c4e0374..e66c917e81 100644 --- a/src/cpp/include/openvino/genai/perf_metrics.hpp +++ b/src/cpp/include/openvino/genai/perf_metrics.hpp @@ -7,14 +7,34 @@ #include "openvino/genai/visibility.hpp" #include #include +#include namespace ov { namespace genai { using TimePoint = std::chrono::steady_clock::time_point; -struct PerfCounters; +/** +* @brief Structure with raw performance metrics for each generation before any statistics calculated. +*/ +struct OPENVINO_GENAI_EXPORTS RawPerfMetrics { + std::vector generate_durations; + std::vector tokenization_durations; + std::vector detokenization_durations; + + std::vector m_times_to_first_token; + std::vector m_new_token_times; + std::vector m_batch_sizes; + std::vector m_durations; + size_t num_generated_tokens; + size_t num_input_tokens; +}; + +/** +* @brief Structure to store performance metric for each generation +* +*/ struct OPENVINO_GENAI_EXPORTS PerfMetrics { // First token time. float mean_ttft; @@ -25,11 +45,13 @@ struct OPENVINO_GENAI_EXPORTS PerfMetrics { float std_tpot; float load_time; - float start_time; float mean_generate_duration; - float mean_decoding_duration; - float mean_encoding_duration; + float std_generate_duration; + float mean_tokenization_duration; + float std_tokenization_duration; + float mean_detokenization_duration; + float std_detokenization_duration; float mean_throughput; float std_throughput; @@ -37,13 +59,12 @@ struct OPENVINO_GENAI_EXPORTS PerfMetrics { size_t num_generated_tokens; size_t num_input_tokens; - std::shared_ptr m_counters; - void evaluate(TimePoint start_time); - + void evaluate_statistics(std::optional start_time = std::nullopt); + static float get_duration_ms(std::chrono::steady_clock::duration duration); PerfMetrics operator+(const PerfMetrics& metrics) const; PerfMetrics& operator+=(const PerfMetrics& right); - + RawPerfMetrics raw_counters; }; } // namespace genai diff --git a/src/cpp/src/greedy_decoding.cpp b/src/cpp/src/greedy_decoding.cpp index 0802b87e66..c5bf10a2d1 100644 --- a/src/cpp/src/greedy_decoding.cpp +++ b/src/cpp/src/greedy_decoding.cpp @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 #include "openvino/genai/perf_metrics.hpp" -#include "perf_counters.hpp" +// #include "perf_counters.hpp" #include "utils.hpp" namespace ov { @@ -23,7 +23,7 @@ EncodedResults greedy_decoding( size_t max_new_tokens = generation_config.get_max_new_tokens(prompt_len); EncodedResults results; - auto& perf_counters = results.metrics.m_counters; + auto& raw_perf_counters = results.metrics.raw_counters; results.scores.resize(running_batch_size); results.tokens.resize(running_batch_size); @@ -54,7 +54,8 @@ EncodedResults greedy_decoding( eos_met[batch] = (out_token == generation_config.eos_token_id); m_model_runner.get_tensor("input_ids").data()[batch] = out_token; } - perf_counters->add_timestamp(running_batch_size); + raw_perf_counters.m_new_token_times.emplace_back(std::chrono::steady_clock::now()); + raw_perf_counters.m_batch_sizes.emplace_back(batch_size); if (streamer && streamer->put(token_iter_results[0])) { return results; @@ -86,7 +87,8 @@ EncodedResults greedy_decoding( m_model_runner.get_tensor("input_ids").data()[batch] = out_token; } - perf_counters->add_timestamp(running_batch_size); + raw_perf_counters.m_new_token_times.emplace_back(std::chrono::steady_clock::now()); + raw_perf_counters.m_batch_sizes.emplace_back(batch_size); if (streamer && streamer->put(token_iter_results[0])) return results; diff --git a/src/cpp/src/group_beam_searcher.cpp b/src/cpp/src/group_beam_searcher.cpp index 4f5cb79f2a..784ff1a915 100644 --- a/src/cpp/src/group_beam_searcher.cpp +++ b/src/cpp/src/group_beam_searcher.cpp @@ -366,11 +366,6 @@ std::pair beam_search(ov::InferRequest& lm, auto batch_size = input_ids.get_shape().at(0); auto sequence_length = input_ids.get_shape().at(1); - // Initialize time metric counters. - // ov::genai::TimePoints tok_times; - // tok_times.reserve(config.get_max_new_tokens(sequence_length)); - // tok_times.emplace_back(std::chrono::steady_clock::now()); - // Initialize beam search. const int64_t* prompt_data = input_ids.data(); std::vector> prompts; @@ -407,12 +402,19 @@ std::pair beam_search(ov::InferRequest& lm, std::vector next_tokens; std::vector next_beams; - + + // Reserve for performance counters. + std::vector new_token_times; + std::vector batch_sizes; + new_token_times.reserve(parameters.max_new_tokens); + batch_sizes.reserve(parameters.max_new_tokens); + for (size_t length_count = 0; ; ++length_count) { lm.infer(); std::tie(next_tokens, next_beams) = group_beam_searcher.select_next_tokens(lm.get_tensor("logits")); - // tok_times.emplace_back(std::chrono::steady_clock::now()); + new_token_times.emplace_back(std::chrono::steady_clock::now()); + batch_sizes.emplace_back(batch_size); if (next_tokens.empty() || length_count == parameters.max_new_tokens - 1) { // Break the cycle before masks are extended in update_attention_mask_with_beams. @@ -442,6 +444,9 @@ std::pair beam_search(ov::InferRequest& lm, int32_t res_selected_beam_idx = 0; results.scores.reserve(config.num_return_sequences * result.size()); results.tokens.reserve(config.num_return_sequences * result.size()); + auto& raw_perf_counters = results.metrics.raw_counters; + raw_perf_counters.m_new_token_times = new_token_times; + raw_perf_counters.m_batch_sizes = batch_sizes; // align output with HF for (size_t prompt_id = 0; prompt_id < result.size(); prompt_id++) { @@ -471,7 +476,6 @@ std::pair beam_search(ov::InferRequest& lm, } } - // results.metrics = PerfCounters(tok_times); return {results, res_selected_beam_idx}; } diff --git a/src/cpp/src/llm_pipeline.cpp b/src/cpp/src/llm_pipeline.cpp index 81f807c149..5241142afe 100644 --- a/src/cpp/src/llm_pipeline.cpp +++ b/src/cpp/src/llm_pipeline.cpp @@ -1,7 +1,6 @@ // Copyright (C) 2023-2024 Intel Corporation // SPDX-License-Identifier: Apache-2.0 -#include "perf_counters.hpp" #include #include #include @@ -160,14 +159,18 @@ class StatefulLLMPipeline final : public LLMPipelineImplBase { m_templated_chat_history.append(answer); m_history.push_back({{"role", "assistant"}, {"content", answer}}); } - - auto& metrics = encoded_results.metrics; - // metrics.tokenization_duration = std::chrono::duration_cast(encode_stop_time - start_time).count(); - // metrics.detokenization_duration = std::chrono::duration_cast(decode_stop_time - decode_start_time).count(); - // auto stop_time = std::chrono::steady_clock::now(); - // metrics.generate_durations.emplace_back(std::chrono::duration_cast(stop_time - start_time).count()); - decoded_results.metrics = std::move(metrics); + // generate_durations + decoded_results.metrics = encoded_results.metrics; + + auto& raw_counters = decoded_results.metrics.raw_counters; + auto stop_time = std::chrono::steady_clock::now(); + + raw_counters.generate_durations.emplace_back(PerfMetrics::get_duration_ms(stop_time - start_time)); + raw_counters.tokenization_durations.emplace_back(PerfMetrics::get_duration_ms(encode_stop_time - start_time)); + raw_counters.detokenization_durations.emplace_back(PerfMetrics::get_duration_ms(decode_stop_time - decode_start_time)); + + decoded_results.metrics.evaluate_statistics(start_time); return decoded_results; } @@ -267,13 +270,11 @@ class StatefulLLMPipeline final : public LLMPipelineImplBase { m_is_cache_empty = false; } - - + // If is called without tokenization then that stat will not be reported. auto& metrics = result.metrics; - // metrics.batch_size = batch_size; - // metrics.num_generated_tokens = (metrics.m_durations.size() + 1) * batch_size; - metrics.num_input_tokens = batch_size * input_ids.get_shape().at(0); - result.metrics = std::move(metrics); + metrics.num_input_tokens = batch_size * input_ids.get_shape().at(1); + metrics.load_time = this->m_load_time_ms; + metrics.evaluate_statistics(start_time); return result; } @@ -390,7 +391,7 @@ ov::genai::LLMPipeline::LLMPipeline( m_pimpl = make_unique(std::filesystem::path(path), device, config); } auto stop_time = std::chrono::steady_clock::now(); - m_pimpl->m_load_time_ms = std::chrono::duration_cast(stop_time - start_time).count(); + m_pimpl->m_load_time_ms = PerfMetrics::get_duration_ms(stop_time - start_time); } ov::genai::GenerationConfig ov::genai::LLMPipeline::get_generation_config() const { diff --git a/src/cpp/src/perf_counters.cpp b/src/cpp/src/perf_counters.cpp deleted file mode 100644 index c9dac6eca0..0000000000 --- a/src/cpp/src/perf_counters.cpp +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2023-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 - -#include "perf_counters.hpp" -#include "openvino/genai/perf_metrics.hpp" -#include "openvino/openvino.hpp" -#include -#include -#include - -namespace ov { -namespace genai { - -void PerfCounters::add_timestamp(size_t batch_size) { - m_new_token_times.emplace_back(std::chrono::steady_clock::now()); - m_batch_sizes.emplace_back(batch_size); -} - - -} // namespace genai -} // namespace ov diff --git a/src/cpp/src/perf_counters.hpp b/src/cpp/src/perf_counters.hpp deleted file mode 100644 index 7d33490205..0000000000 --- a/src/cpp/src/perf_counters.hpp +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (C) 2023-2024 Intel Corporation -// SPDX-License-Identifier: Apache-2.0 - -#pragma once - -#include -#include -#include - -namespace ov { -namespace genai { - -struct PerfCounters { - std::vector generate_durations; - std::vector tokenization_duration; - std::vector detokenization_duration; - size_t num_generated_tokens; - size_t num_input_tokens; - - std::vector m_batch_sizes; - std::vector m_durations; - std::vector m_times_to_first_token; - std::vector m_new_token_times; - void add_timestamp(size_t batch_size); - // void add_gen_finish_timestamp(size_t batch_size); - -}; - -// class StopWatch { -// TimePoint m_start; -// public: -// StopWatch& start() { -// m_start = std::chrono::steady_clock::now(); -// return *this; -// } - -// float split() { -// std::chrono::steady_clock::time_point curr_time = std::chrono::steady_clock::now(); -// return std::chrono::duration_cast(curr_time - m_start).count(); -// } -// }; - -} // namespace genai -} // namespace ov diff --git a/src/cpp/src/perf_metrics.cpp b/src/cpp/src/perf_metrics.cpp index 4a8b1d76c6..3947793802 100644 --- a/src/cpp/src/perf_metrics.cpp +++ b/src/cpp/src/perf_metrics.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 #include "openvino/genai/perf_metrics.hpp" -#include "perf_counters.hpp" #include "openvino/openvino.hpp" #include #include @@ -17,7 +16,7 @@ std::pair calc_mean_and_std(const std::vector& durations) { [](const float& acc, const float& duration) -> float { return acc + duration * duration; }); - float std = std::sqrt(sum_square_durations / durations.size() - mean * mean); + float std = std::sqrt(sum_square_durations / durations.size() - mean * mean); return {mean, std}; } @@ -26,48 +25,77 @@ std::pair calc_mean_and_std(const std::vector& durations) { namespace ov { namespace genai { - -void PerfMetrics::evaluate(TimePoint start_time) { - - auto& tok_times = m_counters->m_new_token_times; - auto& batch_sizes = m_counters->m_batch_sizes; - m_counters->m_durations = std::vector(tok_times.size()); - auto ttft = std::chrono::duration_cast(tok_times[0] - start_time).count(); - m_counters->m_times_to_first_token.emplace_back(ttft); +float PerfMetrics::get_duration_ms(std::chrono::steady_clock::duration duration) { + return std::chrono::duration_cast(duration).count(); +} - for (size_t i = 0; i < tok_times.size(); ++i) { - m_counters->m_durations[i] = std::chrono::duration_cast(tok_times[i] - start_time).count(); - // If in 10 ms a batch of 5 new tokens is generated then TTOT is 10 ms / 5. - // todo: float check that it's valid for batch > 1. - m_counters->m_durations[i] /= batch_sizes[i]; - start_time = tok_times[i]; - } +void PerfMetrics::evaluate_statistics(std::optional start_time) { + // If start_tiem is specified then recalcualte durations according to start times and calculate statistics only after that. + if (start_time.has_value()) { + auto start_time_val = *start_time; + auto& tok_times = raw_counters.m_new_token_times; + auto& batch_sizes = raw_counters.m_batch_sizes; + raw_counters.m_durations = std::vector(tok_times.size()); - std::tie(mean_tpot, std_tpot) = calc_mean_and_std(m_counters->m_durations); - std::tie(mean_ttft, std_ttft) = calc_mean_and_std(m_counters->m_times_to_first_token); -} + auto ttft = std::chrono::duration_cast(tok_times[0] - start_time_val).count(); + raw_counters.m_times_to_first_token = std::vector(); + raw_counters.m_times_to_first_token.emplace_back(ttft); + num_generated_tokens = 0; + for (size_t i = 0; i < tok_times.size(); ++i) { + raw_counters.m_durations[i] = std::chrono::duration_cast(tok_times[i] - start_time_val).count(); + + // If in 10 ms a batch of 5 new tokens is generated then TTOT is 10 ms / 5. + // todo: float check that it's valid for batch > 1. + raw_counters.m_durations[i] /= batch_sizes[i]; + num_generated_tokens += batch_sizes[i]; + start_time_val = tok_times[i]; + } + } -PerfMetrics PerfMetrics::operator+(const PerfMetrics& metrics) const { - PerfMetrics nm; // new metrics - nm.m_counters = m_counters; - auto& new_counters = nm.m_counters; + std::tie(mean_tpot, std_tpot) = calc_mean_and_std(raw_counters.m_durations); + std::tie(mean_ttft, std_ttft) = calc_mean_and_std(raw_counters.m_times_to_first_token); - auto& new_durations = new_counters->m_durations; - auto& new_times_to_first_token = new_counters->m_times_to_first_token; - - auto& counters_to_appnd = metrics.m_counters; - new_durations.insert(new_durations.end(), counters_to_appnd->m_durations.begin(), counters_to_appnd->m_durations.end()); - new_times_to_first_token.insert(new_times_to_first_token.end(), counters_to_appnd->m_times_to_first_token.begin(), counters_to_appnd->m_times_to_first_token.end()); + std::tie(mean_generate_duration, std_generate_duration) = calc_mean_and_std(raw_counters.generate_durations); + std::tie(mean_tokenization_duration, std_tokenization_duration) = calc_mean_and_std(raw_counters.tokenization_durations); + std::tie(mean_detokenization_duration, std_detokenization_duration) = calc_mean_and_std(raw_counters.detokenization_durations); - OPENVINO_ASSERT(metrics.load_time == load_time, "generation metrics can be accumulated only for the same pipeline"); + mean_throughput = 1000.0f / mean_tpot; + std_throughput = (std_tpot * 1000.0f) / (mean_tpot * mean_tpot); +} + +PerfMetrics PerfMetrics::operator+(const PerfMetrics& right) const { + OPENVINO_ASSERT(right.load_time == load_time, "generation metrics can be accumulated only for the same pipeline"); - std::tie(nm.mean_tpot, nm.std_tpot) = calc_mean_and_std(new_counters->m_durations); - std::tie(nm.mean_ttft, nm.std_ttft) = calc_mean_and_std(new_counters->m_times_to_first_token); + // Copy left value to res. + PerfMetrics res = *this; + + // Concatenate duration and first token times. + auto& new_durations = res.raw_counters.m_durations; + auto& new_times_to_first_token = res.raw_counters.m_times_to_first_token; + auto& right_durations = right.raw_counters.m_durations; + auto& right_times_to_first_token = right.raw_counters.m_times_to_first_token; - // todo: add tokenization statistics concatenation. + new_durations.insert(new_durations.end(), right_durations.begin(), right_durations.end()); + new_times_to_first_token.insert(new_times_to_first_token.end(), right_times_to_first_token.begin(), right_times_to_first_token.end()); + + // Concatenate tokenization/detokenization and total generation times. + auto& new_tok_durations = res.raw_counters.tokenization_durations; + auto& new_detok_durations = res.raw_counters.detokenization_durations; + auto& new_gen_durations = res.raw_counters.generate_durations; + auto& right_tok_durations = right.raw_counters.tokenization_durations; + auto& right_detok_durations = right.raw_counters.detokenization_durations; + auto& right_gen_durations = right.raw_counters.generate_durations; - return nm; + new_tok_durations.insert(new_tok_durations.end(), right_tok_durations.begin(), right_tok_durations.end()); + new_detok_durations.insert(new_detok_durations.end(), right_detok_durations.begin(), right_detok_durations.end()); + new_gen_durations.insert(new_gen_durations.end(), right_gen_durations.begin(), right_gen_durations.end()); + + res.num_generated_tokens = num_generated_tokens + right.num_generated_tokens; + res.num_input_tokens = num_generated_tokens + right.num_input_tokens; + res.load_time = load_time; + res.evaluate_statistics(); + return res; } PerfMetrics& PerfMetrics::operator+=(const PerfMetrics& right) { @@ -75,7 +103,5 @@ PerfMetrics& PerfMetrics::operator+=(const PerfMetrics& right) { return *this; } - - } // namespace genai } // namespace ov diff --git a/src/cpp/src/tokenizer.cpp b/src/cpp/src/tokenizer.cpp index 501d0e86cf..ac6b925dcb 100644 --- a/src/cpp/src/tokenizer.cpp +++ b/src/cpp/src/tokenizer.cpp @@ -323,8 +323,6 @@ class Tokenizer::TokenizerImpl { // Replace what jinja2cpp doesn't support std::pair replace_str_map[] = { - {"{-", "{"}, - {"{%-", "{%"}, {"'}", "' }"}, {"{'", "{ '"}, {".strip()", ""} diff --git a/src/python/py_generate_pipeline.cpp b/src/python/py_generate_pipeline.cpp index c78c760b6c..860d3c3592 100644 --- a/src/python/py_generate_pipeline.cpp +++ b/src/python/py_generate_pipeline.cpp @@ -22,6 +22,7 @@ using ov::genai::GenerationResult; using ov::genai::LLMPipeline; using ov::genai::OptionalGenerationConfig; using ov::genai::PerfMetrics; +using ov::genai::RawPerfMetrics; using ov::genai::SchedulerConfig; using ov::genai::StopCriteria; using ov::genai::StreamerBase; @@ -535,13 +536,30 @@ PYBIND11_MODULE(py_generate_pipeline, m) { .def(py::init<>()) .def_property_readonly("texts", [](const DecodedResults &dr) { return handle_utf8_results(dr); }) .def_readonly("scores", &DecodedResults::scores) + .def_readonly("metrics", &DecodedResults::metrics) .def("__str__", &DecodedResults::operator std::string);; + py::class_(m, "RawPerfMetrics") + .def(py::init<>()) + .def_readonly("generate_durations", &RawPerfMetrics::generate_durations) + .def_readonly("tokenization_durations", &RawPerfMetrics::tokenization_durations) + .def_readonly("detokenization_durations", &RawPerfMetrics::detokenization_durations) + .def_readonly("m_times_to_first_token", &RawPerfMetrics::m_times_to_first_token) + .def_readonly("m_batch_sizes", &RawPerfMetrics::m_batch_sizes) + .def_readonly("m_durations", &RawPerfMetrics::m_durations) + .def_readonly("num_generated_tokens", &RawPerfMetrics::num_generated_tokens) + .def_readonly("num_input_tokens", &RawPerfMetrics::num_input_tokens); + py::class_(m, "PerfMetrics") .def(py::init<>()) .def_readonly("mean_generate_duration", &PerfMetrics::mean_generate_duration) - .def_readonly("mean_decoding_duration", &PerfMetrics::mean_decoding_duration) - .def_readonly("mean_encoding_duration", &PerfMetrics::mean_encoding_duration) + .def_readonly("std_generate_duration", &PerfMetrics::std_generate_duration) + .def_readonly("mean_tokenization_duration", &PerfMetrics::mean_tokenization_duration) + .def_readonly("std_tokenization_duration", &PerfMetrics::std_tokenization_duration) + .def_readonly("mean_detokenization_duration", &PerfMetrics::mean_detokenization_duration) + .def_readonly("std_detokenization_duration", &PerfMetrics::std_detokenization_duration) + .def_readonly("mean_throughput", &PerfMetrics::mean_throughput) + .def_readonly("std_throughput", &PerfMetrics::std_throughput) .def_readonly("mean_tpot", &PerfMetrics::mean_tpot) .def_readonly("mean_ttft", &PerfMetrics::mean_ttft) .def_readonly("std_tpot", &PerfMetrics::std_tpot) @@ -557,7 +575,8 @@ PYBIND11_MODULE(py_generate_pipeline, m) { py::class_(m, "EncodedResults") .def_readonly("tokens", &EncodedResults::tokens) - .def_readonly("scores", &EncodedResults::scores); + .def_readonly("scores", &EncodedResults::scores) + .def_readonly("metrics", &EncodedResults::metrics); py::class_>(m, "StreamerBase") // Change the holder form unique_ptr to shared_ptr .def(py::init<>()) diff --git a/tests/python_tests/ov_genai_test_utils.py b/tests/python_tests/ov_genai_test_utils.py index 5d038e65e2..4ba71a1d48 100644 --- a/tests/python_tests/ov_genai_test_utils.py +++ b/tests/python_tests/ov_genai_test_utils.py @@ -81,8 +81,6 @@ def get_chat_templates(): # but skips some models that currently are not processed correctly. skipped_models = { - "berkeley-nest/Starling-LM-7B-alpha", # TODO: Need to enable and unskip, since it's preset in continious batching and has ~30 000 downloads. - # These models fail even on HF so no need to check if applying chat matches. "vibhorag101/llama-2-13b-chat-hf-phr_mental_therapy", "codellama/CodeLlama-34b-Instruct-hf", From 0a8f0d95dcd37e59cced6a959de719d8a53e5c98 Mon Sep 17 00:00:00 2001 From: Pavel Esir Date: Mon, 22 Jul 2024 17:24:33 +0200 Subject: [PATCH 05/11] add more preicise durations --- .../benchmark_vanilla_genai.cpp | 2 +- .../python/benchmark_vanilla_genai/README.md | 15 ++++++------ .../benchmark_vanilla_genai.py | 9 ++++--- .../include/openvino/genai/perf_metrics.hpp | 11 +++++---- src/cpp/src/greedy_decoding.cpp | 1 + src/cpp/src/llm_pipeline.cpp | 6 +++-- src/cpp/src/multinomial_decoding.cpp | 8 ++++++- src/cpp/src/perf_metrics.cpp | 24 ++++++++++++------- src/cpp/src/sampler.hpp | 9 ++----- src/cpp/src/utils.hpp | 14 ----------- src/python/py_generate_pipeline.cpp | 6 +++-- 11 files changed, 52 insertions(+), 53 deletions(-) diff --git a/samples/cpp/benchmark_vanilla_genai/benchmark_vanilla_genai.cpp b/samples/cpp/benchmark_vanilla_genai/benchmark_vanilla_genai.cpp index 6d96d24fc5..a9bc07f641 100644 --- a/samples/cpp/benchmark_vanilla_genai/benchmark_vanilla_genai.cpp +++ b/samples/cpp/benchmark_vanilla_genai/benchmark_vanilla_genai.cpp @@ -11,7 +11,7 @@ int main(int argc, char* argv[]) try { ("p,prompt", "Prompt", cxxopts::value()->default_value("The Sky is blue because")) ("m,model", "Path to model and tokenizers base directory", cxxopts::value()->default_value(".")) ("nw,num_warmup", "Number of warmup iterations", cxxopts::value()->default_value(std::to_string(1))) - ("n,num_iter", "Number of iterations", cxxopts::value()->default_value(std::to_string(5))) + ("n,num_iter", "Number of iterations", cxxopts::value()->default_value(std::to_string(20))) ("mt,max_new_tokens", "Number of iterations", cxxopts::value()->default_value(std::to_string(20))) ("d,device", "device", cxxopts::value()->default_value("CPU")) ("h,help", "Print usage"); diff --git a/samples/python/benchmark_vanilla_genai/README.md b/samples/python/benchmark_vanilla_genai/README.md index af66ea545d..13666a7de9 100644 --- a/samples/python/benchmark_vanilla_genai/README.md +++ b/samples/python/benchmark_vanilla_genai/README.md @@ -56,11 +56,12 @@ python benchmark_vanilla_genai.py -m TinyLlama-1.1B-Chat-v1.0/ ``` ``` -Load time: 3446 ms -Generate time: 876.2 ± 3.30719 ms -Tokenization time: 0 ± 0 ms -Detokenization time: 0 ± 0 ms -ttft: 168 ± 0 ms -tpot: 174.68 ± 4.08671 ms -Tokens/s: 5.72475 ± 0.133933 +Load time: 3405.69 ms +Generate time: 1430.77 ± 3.04 ms +Tokenization time: 0.51 ± 0.02 ms +Detokenization time: 0.37 ± 0.01 ms +TTFT: 81.60 ± 0.54 ms +TPOT: 71.52 ± 2.72 ms +Throughput tokens/s: 13.98 ± 0.53 ``` +s \ No newline at end of file diff --git a/samples/python/benchmark_vanilla_genai/benchmark_vanilla_genai.py b/samples/python/benchmark_vanilla_genai/benchmark_vanilla_genai.py index 4c87234179..9e4debe847 100755 --- a/samples/python/benchmark_vanilla_genai/benchmark_vanilla_genai.py +++ b/samples/python/benchmark_vanilla_genai/benchmark_vanilla_genai.py @@ -10,7 +10,7 @@ def main(): parser.add_argument("-m", "--model", type=str, help="Path to model and tokenizers base directory") parser.add_argument("-p", "--prompt", type=str, default="The Sky is blue because", help="Prompt") parser.add_argument("-nw", "--num_warmup", type=int, default=1, help="Number of warmup iterations") - parser.add_argument("-n", "--num_iter", type=int, default=3, help="Number of iterations") + parser.add_argument("-n", "--num_iter", type=int, default=2, help="Number of iterations") parser.add_argument("-mt", "--max_new_tokens", type=int, default=20, help="Maximal number of new tokens") parser.add_argument("-d", "--device", type=str, default="CPU", help="Device") @@ -22,9 +22,8 @@ def main(): num_warmup = args.num_warmup num_iter = args.num_iter - config = ov_genai.GenerationConfig() - config.max_new_tokens = args.num_new_tokens + config.max_new_tokens = args.max_new_tokens pipe = ov_genai.LLMPipeline(model_path, device) @@ -37,8 +36,8 @@ def main(): # pdb.set_trace() res = pipe.generate(prompt, config) metrics += res.metrics - - print(f"Load time: {metrics.load_time} ms") + + print(f"Load time: {metrics.load_time:.2f} ms") print(f"Generate time: {metrics.mean_generate_duration:.2f} ± {metrics.std_generate_duration:.2f} ms") print(f"Tokenization time: {metrics.mean_tokenization_duration:.2f} ± {metrics.std_tokenization_duration:.2f} ms") print(f"Detokenization time: {metrics.mean_detokenization_duration:.2f} ± {metrics.std_detokenization_duration:.2f} ms") diff --git a/src/cpp/include/openvino/genai/perf_metrics.hpp b/src/cpp/include/openvino/genai/perf_metrics.hpp index e66c917e81..5779b9b080 100644 --- a/src/cpp/include/openvino/genai/perf_metrics.hpp +++ b/src/cpp/include/openvino/genai/perf_metrics.hpp @@ -13,19 +13,20 @@ namespace ov { namespace genai { using TimePoint = std::chrono::steady_clock::time_point; +using MicroSeconds = std::chrono::duration>; /** * @brief Structure with raw performance metrics for each generation before any statistics calculated. */ struct OPENVINO_GENAI_EXPORTS RawPerfMetrics { - std::vector generate_durations; - std::vector tokenization_durations; - std::vector detokenization_durations; + std::vector generate_durations; + std::vector tokenization_durations; + std::vector detokenization_durations; - std::vector m_times_to_first_token; + std::vector m_times_to_first_token; std::vector m_new_token_times; std::vector m_batch_sizes; - std::vector m_durations; + std::vector m_durations; size_t num_generated_tokens; size_t num_input_tokens; diff --git a/src/cpp/src/greedy_decoding.cpp b/src/cpp/src/greedy_decoding.cpp index c5bf10a2d1..c8fd36cbdd 100644 --- a/src/cpp/src/greedy_decoding.cpp +++ b/src/cpp/src/greedy_decoding.cpp @@ -22,6 +22,7 @@ EncodedResults greedy_decoding( size_t prompt_len = prompts_shape[1]; size_t max_new_tokens = generation_config.get_max_new_tokens(prompt_len); + // Initialize results and performance metrics. EncodedResults results; auto& raw_perf_counters = results.metrics.raw_counters; diff --git a/src/cpp/src/llm_pipeline.cpp b/src/cpp/src/llm_pipeline.cpp index 5241142afe..adac9110e1 100644 --- a/src/cpp/src/llm_pipeline.cpp +++ b/src/cpp/src/llm_pipeline.cpp @@ -165,7 +165,7 @@ class StatefulLLMPipeline final : public LLMPipelineImplBase { auto& raw_counters = decoded_results.metrics.raw_counters; auto stop_time = std::chrono::steady_clock::now(); - + raw_counters.generate_durations = std::vector(); raw_counters.generate_durations.emplace_back(PerfMetrics::get_duration_ms(stop_time - start_time)); raw_counters.tokenization_durations.emplace_back(PerfMetrics::get_duration_ms(encode_stop_time - start_time)); raw_counters.detokenization_durations.emplace_back(PerfMetrics::get_duration_ms(decode_stop_time - decode_start_time)); @@ -269,11 +269,13 @@ class StatefulLLMPipeline final : public LLMPipelineImplBase { } else { m_is_cache_empty = false; } + auto stop_time = std::chrono::steady_clock::now(); // If is called without tokenization then that stat will not be reported. auto& metrics = result.metrics; metrics.num_input_tokens = batch_size * input_ids.get_shape().at(1); metrics.load_time = this->m_load_time_ms; + metrics.raw_counters.generate_durations.emplace_back(PerfMetrics::get_duration_ms(stop_time - start_time)); metrics.evaluate_statistics(start_time); return result; } @@ -391,7 +393,7 @@ ov::genai::LLMPipeline::LLMPipeline( m_pimpl = make_unique(std::filesystem::path(path), device, config); } auto stop_time = std::chrono::steady_clock::now(); - m_pimpl->m_load_time_ms = PerfMetrics::get_duration_ms(stop_time - start_time); + m_pimpl->m_load_time_ms = PerfMetrics::get_duration_ms(stop_time - start_time) / 1000.0f; } ov::genai::GenerationConfig ov::genai::LLMPipeline::get_generation_config() const { diff --git a/src/cpp/src/multinomial_decoding.cpp b/src/cpp/src/multinomial_decoding.cpp index fd16e948c1..fc59f00e12 100644 --- a/src/cpp/src/multinomial_decoding.cpp +++ b/src/cpp/src/multinomial_decoding.cpp @@ -162,7 +162,9 @@ ov::genai::EncodedResults multinominal_decoding(ov::InferRequest& m_model_runner size_t prompt_len = prompts_shape[1]; - ov::genai::EncodedResults results; + // Initialize results and performance metrics. + EncodedResults results; + auto& raw_perf_counters = results.metrics.raw_counters; results.scores.resize(batch_size, 0); results.tokens.resize(batch_size); @@ -179,6 +181,8 @@ ov::genai::EncodedResults multinominal_decoding(ov::InferRequest& m_model_runner m_model_runner.get_tensor("beam_idx").data()[0] = 0; m_model_runner.infer(); + raw_perf_counters.m_new_token_times.emplace_back(std::chrono::steady_clock::now()); + raw_perf_counters.m_batch_sizes.emplace_back(batch_size); auto logits_tensor = m_model_runner.get_tensor("logits"); @@ -222,6 +226,8 @@ ov::genai::EncodedResults multinominal_decoding(ov::InferRequest& m_model_runner m_model_runner.get_tensor("input_ids").data()[0] = out_token.id; m_model_runner.infer(); + raw_perf_counters.m_new_token_times.emplace_back(std::chrono::steady_clock::now()); + raw_perf_counters.m_batch_sizes.emplace_back(batch_size); logits = m_model_runner.get_tensor("logits").data(); out_token = sampling.get_out_token(logits, vocab_size, tokens); diff --git a/src/cpp/src/perf_metrics.cpp b/src/cpp/src/perf_metrics.cpp index 3947793802..d4dc6c8de6 100644 --- a/src/cpp/src/perf_metrics.cpp +++ b/src/cpp/src/perf_metrics.cpp @@ -9,12 +9,18 @@ namespace { -std::pair calc_mean_and_std(const std::vector& durations) { - float mean = std::accumulate(durations.begin(), durations.end(), 0.0f) / durations.size(); +// std::pair calc_mean_and_std(const std::vector& durations) { +std::pair calc_mean_and_std(const std::vector& durations) { + float mean = std::accumulate(durations.begin(), durations.end(), 0.0f, + [](const float& acc, const ov::genai::MicroSeconds& duration) -> float { + return acc + duration.count(); + }); + mean /= durations.size(); + mean /= 1000.f; float sum_square_durations = std::accumulate(durations.begin(), durations.end(), 0.0f, - [](const float& acc, const float& duration) -> float { - return acc + duration * duration; + [](const float& acc, const ov::genai::MicroSeconds& duration) -> float { + return acc + duration.count() * duration.count() / 1000000.0f; }); float std = std::sqrt(sum_square_durations / durations.size() - mean * mean); return {mean, std}; @@ -27,7 +33,7 @@ namespace ov { namespace genai { float PerfMetrics::get_duration_ms(std::chrono::steady_clock::duration duration) { - return std::chrono::duration_cast(duration).count(); + return std::chrono::duration_cast(duration).count(); } void PerfMetrics::evaluate_statistics(std::optional start_time) { @@ -36,14 +42,14 @@ void PerfMetrics::evaluate_statistics(std::optional start_time) { auto start_time_val = *start_time; auto& tok_times = raw_counters.m_new_token_times; auto& batch_sizes = raw_counters.m_batch_sizes; - raw_counters.m_durations = std::vector(tok_times.size()); + raw_counters.m_durations = std::vector(tok_times.size()); - auto ttft = std::chrono::duration_cast(tok_times[0] - start_time_val).count(); - raw_counters.m_times_to_first_token = std::vector(); + auto ttft = tok_times[0] - start_time_val; + raw_counters.m_times_to_first_token = std::vector(); raw_counters.m_times_to_first_token.emplace_back(ttft); num_generated_tokens = 0; for (size_t i = 0; i < tok_times.size(); ++i) { - raw_counters.m_durations[i] = std::chrono::duration_cast(tok_times[i] - start_time_val).count(); + raw_counters.m_durations[i] = tok_times[i] - start_time_val; // If in 10 ms a batch of 5 new tokens is generated then TTOT is 10 ms / 5. // todo: float check that it's valid for batch > 1. diff --git a/src/cpp/src/sampler.hpp b/src/cpp/src/sampler.hpp index 6390fc8725..dc631c68ac 100644 --- a/src/cpp/src/sampler.hpp +++ b/src/cpp/src/sampler.hpp @@ -219,13 +219,8 @@ class Sampler { } Token _greedy_sample(const std::vector& logit_vector) const { - Token max_token{-std::numeric_limits::infinity() , 0}; - for (const auto& logit : logit_vector) { - if (logit.m_log_prob > max_token.m_log_prob) { - max_token = logit; - } - } - return max_token; + auto out_token = std::max_element(logit_vector.begin(), logit_vector.end(), [](const Token& lhs, const Token& rhs) { return lhs.m_log_prob < rhs.m_log_prob; }); + return *out_token; } std::vector _multinomial_sample(const std::vector& logit_vector, size_t num_tokens_per_sequence) { diff --git a/src/cpp/src/utils.hpp b/src/cpp/src/utils.hpp index 446ef8549b..25acc1c87f 100644 --- a/src/cpp/src/utils.hpp +++ b/src/cpp/src/utils.hpp @@ -12,20 +12,6 @@ namespace ov { namespace genai { namespace utils { -#include -#include -#include - -// Templated function to measure execution time of an object method. -template -std::pair execution_time_wrapper(T& instance, Ret(T::*method)(Args...), Args&&... args) { - auto start = std::chrono::steady_clock::now(); - Ret result = (instance.*method)(std::forward(args)...); - auto end = std::chrono::steady_clock::now(); - auto duration = std::chrono::duration_cast(end - start).count(); - return {result, duration}; -} - Tensor init_attention_mask(const Tensor& position_ids); void print_tensor(const ov::Tensor& tensor); diff --git a/src/python/py_generate_pipeline.cpp b/src/python/py_generate_pipeline.cpp index 860d3c3592..e2f89cd962 100644 --- a/src/python/py_generate_pipeline.cpp +++ b/src/python/py_generate_pipeline.cpp @@ -537,7 +537,7 @@ PYBIND11_MODULE(py_generate_pipeline, m) { .def_property_readonly("texts", [](const DecodedResults &dr) { return handle_utf8_results(dr); }) .def_readonly("scores", &DecodedResults::scores) .def_readonly("metrics", &DecodedResults::metrics) - .def("__str__", &DecodedResults::operator std::string);; + .def("__str__", &DecodedResults::operator std::string); py::class_(m, "RawPerfMetrics") .def(py::init<>()) @@ -566,7 +566,9 @@ PYBIND11_MODULE(py_generate_pipeline, m) { .def_readonly("std_ttft", &PerfMetrics::std_ttft) .def_readonly("load_time", &PerfMetrics::load_time) .def("__add__", &PerfMetrics::operator+) - .def("__iadd__", &PerfMetrics::operator+=); + .def("__iadd__", &PerfMetrics::operator+=) + .def_readonly("raw_counters", &PerfMetrics::raw_counters) + ; py::class_(m, "TokenizedInputs") .def(py::init()) From 90320f411257e215d06bcdf100d37bbe20f1622e Mon Sep 17 00:00:00 2001 From: Pavel Esir Date: Tue, 23 Jul 2024 21:57:11 +0200 Subject: [PATCH 06/11] add cpp Readme, ensured correct batch processing, add PerfMetrics to Readme --- samples/CMakeLists.txt | 2 +- .../CMakeLists.txt | 12 ++-- samples/cpp/benchmark_genai/README.md | 47 +++++++++++++ .../benchmark_genai.cpp} | 12 ++-- samples/cpp/benchmark_vanilla_genai/README.md | 3 - .../README.md | 30 ++------- .../benchmark_genai.py} | 24 ++++--- .../benchmark_genai_automatic.py | 62 +++++++++++++++++ src/README.md | 49 ++++++++++++++ .../include/openvino/genai/llm_pipeline.hpp | 4 +- .../include/openvino/genai/perf_metrics.hpp | 24 ++++--- src/cpp/src/greedy_decoding.cpp | 2 +- src/cpp/src/group_beam_searcher.cpp | 2 +- src/cpp/src/llm_pipeline.cpp | 18 ++--- src/cpp/src/multinomial_decoding.cpp | 2 +- src/cpp/src/perf_metrics.cpp | 67 ++++++++++--------- src/python/py_generate_pipeline.cpp | 33 +++++++-- 17 files changed, 278 insertions(+), 115 deletions(-) rename samples/cpp/{benchmark_vanilla_genai => benchmark_genai}/CMakeLists.txt (64%) create mode 100644 samples/cpp/benchmark_genai/README.md rename samples/cpp/{benchmark_vanilla_genai/benchmark_vanilla_genai.cpp => benchmark_genai/benchmark_genai.cpp} (90%) delete mode 100644 samples/cpp/benchmark_vanilla_genai/README.md rename samples/python/{benchmark_vanilla_genai => benchmark_genai}/README.md (64%) rename samples/python/{benchmark_vanilla_genai/benchmark_vanilla_genai.py => benchmark_genai/benchmark_genai.py} (58%) create mode 100755 samples/python/benchmark_genai/benchmark_genai_automatic.py diff --git a/samples/CMakeLists.txt b/samples/CMakeLists.txt index 44f8d580b2..5339817c1f 100644 --- a/samples/CMakeLists.txt +++ b/samples/CMakeLists.txt @@ -10,7 +10,7 @@ add_subdirectory(cpp/greedy_causal_lm) add_subdirectory(cpp/multinomial_causal_lm) add_subdirectory(cpp/prompt_lookup_decoding_lm) add_subdirectory(cpp/speculative_decoding_lm) -add_subdirectory(cpp/benchmark_vanilla_genai) +add_subdirectory(cpp/benchmark_genai) install(FILES requirements.txt DESTINATION samples COMPONENT cpp_samples_genai) diff --git a/samples/cpp/benchmark_vanilla_genai/CMakeLists.txt b/samples/cpp/benchmark_genai/CMakeLists.txt similarity index 64% rename from samples/cpp/benchmark_vanilla_genai/CMakeLists.txt rename to samples/cpp/benchmark_genai/CMakeLists.txt index e871f5a33a..bfa1592f61 100644 --- a/samples/cpp/benchmark_vanilla_genai/CMakeLists.txt +++ b/samples/cpp/benchmark_genai/CMakeLists.txt @@ -12,14 +12,14 @@ FetchContent_Declare(cxxopts URL_HASH SHA256=523175f792eb0ff04f9e653c90746c12655f10cb70f1d5e6d6d9491420298a08) FetchContent_MakeAvailable(cxxopts) -add_executable(benchmark_vanilla_genai benchmark_vanilla_genai.cpp) -target_link_libraries(benchmark_vanilla_genai PRIVATE openvino::genai cxxopts::cxxopts) -set_target_properties(benchmark_vanilla_genai PROPERTIES - COMPILE_PDB_NAME benchmark_vanilla_genai +add_executable(benchmark_genai benchmark_genai.cpp) +target_link_libraries(benchmark_genai PRIVATE openvino::genai cxxopts::cxxopts) +set_target_properties(benchmark_genai PROPERTIES + COMPILE_PDB_NAME benchmark_genai # Ensure out of box LC_RPATH on macOS with SIP INSTALL_RPATH_USE_LINK_PATH ON) -# target_compile_features(benchmark_vanilla_genai PRIVATE cxx_std_11) -install(TARGETS benchmark_vanilla_genai +# target_compile_features(benchmark_genai PRIVATE cxx_std_11) +install(TARGETS benchmark_genai RUNTIME DESTINATION samples_bin/ COMPONENT samples_bin EXCLUDE_FROM_ALL) diff --git a/samples/cpp/benchmark_genai/README.md b/samples/cpp/benchmark_genai/README.md new file mode 100644 index 0000000000..bac16c2f7d --- /dev/null +++ b/samples/cpp/benchmark_genai/README.md @@ -0,0 +1,47 @@ +# Benchmarking Vanilla GenAI + +This sample script demonstrates how to benchmark an LLMModel in OpenVINO GenAI. The script includes functionality for warm-up iterations, generating text, and calculating various performance metrics. + +## Download and convert the model and tokenizers + +The `--upgrade-strategy eager` option is needed to ensure `optimum-intel` is upgraded to the latest version. + +It's not required to install [../../requirements.txt](../../requirements.txt) for deployment if the model has already been exported. + +```sh +pip install --upgrade-strategy eager -r ../../requirements.txt +optimum-cli export openvino --trust-remote-code --model TinyLlama/TinyLlama-1.1B-Chat-v1.0 TinyLlama-1.1B-Chat-v1.0 +``` + +## Usage + +```sh +benchmark_vanilla_genai [OPTIONS] +``` + +### Options + +- `-m, --model`: Path to the model and tokenizers base directory. +- `-p, --prompt` (default: `"The Sky is blue because"`): The prompt to generate text. +- `-nw, --num_warmup` (default: `1`): Number of warmup iterations. +- `-mt, --max_new_tokens` (default: `20`): Number of warmup iterations. +- `-n, --num_iter` (default: `3`): Number of iterations. +- `-d, --device` (default: `"CPU"`): Device to run the model on. + +### Output: + +``` +benchmark_vanilla_genai -m TinyLlama-1.1B-Chat-v1.0 -n 10 +``` + +``` +Load time: 3405.69 ms +Generate time: 1430.77 ± 3.04 ms +Tokenization time: 0.51 ± 0.02 ms +Detokenization time: 0.37 ± 0.01 ms +TTFT: 81.60 ± 0.54 ms +TPOT: 71.52 ± 2.72 ms +Throughput tokens/s: 13.98 ± 0.53 +``` + +For more information how performance metrics are calculated please follow [performance-metrics tutorial](../../../src/README.md#performance-metrics). diff --git a/samples/cpp/benchmark_vanilla_genai/benchmark_vanilla_genai.cpp b/samples/cpp/benchmark_genai/benchmark_genai.cpp similarity index 90% rename from samples/cpp/benchmark_vanilla_genai/benchmark_vanilla_genai.cpp rename to samples/cpp/benchmark_genai/benchmark_genai.cpp index a9bc07f641..9610aabe54 100644 --- a/samples/cpp/benchmark_vanilla_genai/benchmark_vanilla_genai.cpp +++ b/samples/cpp/benchmark_genai/benchmark_genai.cpp @@ -8,11 +8,11 @@ int main(int argc, char* argv[]) try { cxxopts::Options options("benchmark_vanilla_genai", "Help command"); options.add_options() - ("p,prompt", "Prompt", cxxopts::value()->default_value("The Sky is blue because")) ("m,model", "Path to model and tokenizers base directory", cxxopts::value()->default_value(".")) + ("p,prompt", "Prompt", cxxopts::value()->default_value("The Sky is blue because")) ("nw,num_warmup", "Number of warmup iterations", cxxopts::value()->default_value(std::to_string(1))) - ("n,num_iter", "Number of iterations", cxxopts::value()->default_value(std::to_string(20))) - ("mt,max_new_tokens", "Number of iterations", cxxopts::value()->default_value(std::to_string(20))) + ("n,num_iter", "Number of iterations", cxxopts::value()->default_value(std::to_string(3))) + ("mt,max_new_tokens", "Maximal number of new tokens", cxxopts::value()->default_value(std::to_string(20))) ("d,device", "device", cxxopts::value()->default_value("CPU")) ("h,help", "Print usage"); @@ -38,6 +38,8 @@ int main(int argc, char* argv[]) try { ov::genai::GenerationConfig config; config.max_new_tokens = result["max_new_tokens"].as(); + config.num_beam_groups = 3; + config.num_beams = 15; ov::genai::LLMPipeline pipe(model_path, device); @@ -45,10 +47,10 @@ int main(int argc, char* argv[]) try { pipe.generate(prompt, config); ov::genai::DecodedResults res = pipe.generate(prompt, config); - ov::genai::PerfMetrics metrics = res.metrics; + ov::genai::PerfMetrics metrics = res.perf_metrics; for (size_t i = 0; i < num_iter - 1; i++) { res = pipe.generate(prompt, config); - metrics = metrics + res.metrics; + metrics = metrics + res.perf_metrics; } std::cout << "Load time: " << metrics.load_time << " ms" << std::endl; diff --git a/samples/cpp/benchmark_vanilla_genai/README.md b/samples/cpp/benchmark_vanilla_genai/README.md deleted file mode 100644 index 50197dad1d..0000000000 --- a/samples/cpp/benchmark_vanilla_genai/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# benchmark OpenVINO GenAI sample - -TODO: adapt from python sample to c++ \ No newline at end of file diff --git a/samples/python/benchmark_vanilla_genai/README.md b/samples/python/benchmark_genai/README.md similarity index 64% rename from samples/python/benchmark_vanilla_genai/README.md rename to samples/python/benchmark_genai/README.md index 13666a7de9..fa4fa85576 100644 --- a/samples/python/benchmark_vanilla_genai/README.md +++ b/samples/python/benchmark_genai/README.md @@ -1,28 +1,7 @@ -# Benchmark Vanilla GenAI +# Benchmarking Vanilla GenAI This sample script demonstrates how to benchmark an LLMModel in OpenVINO GenAI. The script includes functionality for warm-up iterations, generating text, and calculating various performance metrics. -# ov.genai.PerfMetrics structure -ov.genai.PerfMetrics is a structure which holds performance metric for each generate call. Each generate call calcualtes the following metrics: -- mean_ttft - - std_ttft - - mean_tpot - - std_tpot - - load_time - - mean_generate_duration - - std_generate_duration - - mean_tokenization_duration - - std_tokenization_duration - - mean_detokenization_duration - - std_detokenization_duration - - mean_throughput - - std_throughput - - num_generated_tokens - - num_input_tokens - -Performance metrics can be added to one another and accumulated using the += operator or the + operator. In that case the mean values accumulated by several generate calls will be calculated. - - ## Download and convert the model and tokenizers The `--upgrade-strategy eager` option is needed to ensure `optimum-intel` is upgraded to the latest version. @@ -45,14 +24,14 @@ python benchmark_vanilla_genai.py [OPTIONS] - `-m, --model`: Path to the model and tokenizers base directory. - `-p, --prompt` (default: `"The Sky is blue because"`): The prompt to generate text. - `-nw, --num_warmup` (default: `1`): Number of warmup iterations. -- `-mt, --max_new_tokens` (default: `20`): Number of warmup iterations. - `-n, --num_iter` (default: `3`): Number of iterations. +- `-mt, --max_new_tokens` (default: `20`): Number of warmup iterations. - `-d, --device` (default: `"CPU"`): Device to run the model on. ### Output: ``` -python benchmark_vanilla_genai.py -m TinyLlama-1.1B-Chat-v1.0/ +python benchmark_vanilla_genai.py -m TinyLlama-1.1B-Chat-v1.0 -n 10 ``` ``` @@ -64,4 +43,5 @@ TTFT: 81.60 ± 0.54 ms TPOT: 71.52 ± 2.72 ms Throughput tokens/s: 13.98 ± 0.53 ``` -s \ No newline at end of file + +For more information on how performance metrics are calculated, see [performance metrics readme](../../../src/README.md#performance-metrics). diff --git a/samples/python/benchmark_vanilla_genai/benchmark_vanilla_genai.py b/samples/python/benchmark_genai/benchmark_genai.py similarity index 58% rename from samples/python/benchmark_vanilla_genai/benchmark_vanilla_genai.py rename to samples/python/benchmark_genai/benchmark_genai.py index 9e4debe847..06bd8b0f48 100755 --- a/samples/python/benchmark_vanilla_genai/benchmark_vanilla_genai.py +++ b/samples/python/benchmark_genai/benchmark_genai.py @@ -3,7 +3,6 @@ import argparse import openvino_genai as ov_genai -import pdb def main(): parser = argparse.ArgumentParser(description="Help command") @@ -16,6 +15,8 @@ def main(): args = parser.parse_args() + # Perf metrics is stored in DecodedResults. + # In order to get DecodedResults instead of a string input should be a list. prompt = [args.prompt] model_path = args.model device = args.device @@ -24,6 +25,8 @@ def main(): config = ov_genai.GenerationConfig() config.max_new_tokens = args.max_new_tokens + config.num_beam_groups = 3 + config.num_beams = 15 pipe = ov_genai.LLMPipeline(model_path, device) @@ -31,19 +34,18 @@ def main(): pipe.generate(prompt, config) res = pipe.generate(prompt, config) - metrics = res.metrics + perf_metrics = res.perf_metrics for _ in range(num_iter - 1): - # pdb.set_trace() res = pipe.generate(prompt, config) - metrics += res.metrics + perf_metrics += res.perf_metrics - print(f"Load time: {metrics.load_time:.2f} ms") - print(f"Generate time: {metrics.mean_generate_duration:.2f} ± {metrics.std_generate_duration:.2f} ms") - print(f"Tokenization time: {metrics.mean_tokenization_duration:.2f} ± {metrics.std_tokenization_duration:.2f} ms") - print(f"Detokenization time: {metrics.mean_detokenization_duration:.2f} ± {metrics.std_detokenization_duration:.2f} ms") - print(f"TTFT: {metrics.mean_ttft:.2f} ± {metrics.std_ttft:.2f} ms") - print(f"TPOT: {metrics.mean_tpot:.2f} ± {metrics.std_tpot:.2f} ms") - print(f"Throughput tokens/s: {metrics.mean_throughput:.2f} ± {metrics.std_throughput:.2f}") + print(f"Load time: {perf_metrics.load_time:.2f} ms") + print(f"Generate time: {perf_metrics.mean_generate_duration:.2f} ± {perf_metrics.std_generate_duration:.2f} ms") + print(f"Tokenization time: {perf_metrics.mean_tokenization_duration:.2f} ± {perf_metrics.std_tokenization_duration:.2f} ms") + print(f"Detokenization time: {perf_metrics.mean_detokenization_duration:.2f} ± {perf_metrics.std_detokenization_duration:.2f} ms") + print(f"TTFT: {perf_metrics.mean_ttft:.2f} ± {perf_metrics.std_ttft:.2f} ms") + print(f"TPOT: {perf_metrics.mean_tpot:.2f} ± {perf_metrics.std_tpot:.2f} ms") + print(f"Throughput tokens/s: {perf_metrics.mean_throughput:.2f} ± {perf_metrics.std_throughput:.2f}") if __name__ == "__main__": main() diff --git a/samples/python/benchmark_genai/benchmark_genai_automatic.py b/samples/python/benchmark_genai/benchmark_genai_automatic.py new file mode 100755 index 0000000000..98a00a8c99 --- /dev/null +++ b/samples/python/benchmark_genai/benchmark_genai_automatic.py @@ -0,0 +1,62 @@ +# Copyright (C) 2023-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 + +import argparse +import openvino_genai as ov_genai +import pdb + +def main(): + parser = argparse.ArgumentParser(description="Help command") + parser.add_argument("-m", "--model", type=str, help="Path to model and tokenizers base directory") + parser.add_argument("-p", "--prompt", type=str, default="The Sky is blue because", help="Prompt") + parser.add_argument("-nw", "--num_warmup", type=int, default=1, help="Number of warmup iterations") + parser.add_argument("-n", "--num_iter", type=int, default=5, help="Number of iterations") + parser.add_argument("-mt", "--max_new_tokens", type=int, default=20, help="Maximal number of new tokens") + parser.add_argument("-d", "--device", type=str, default="CPU", help="Device") + + args = parser.parse_args() + + # Perf metrics is stored in DecodedResults. + # In order to get DecodedResults instead of a string input should be a list. + + model_path = args.model + device = args.device + num_warmup = args.num_warmup + num_iter = args.num_iter + + config = ov_genai.GenerationConfig() + config.max_new_tokens = 20 + # config.num_beam_groups = 3 + # config.num_beams = 15 + + pipe = ov_genai.LLMPipeline(model_path, device) + + import pandas as pd + metrics_df = pd.DataFrame(columns=['batch_size', 'throughput', 'ttft', 'tpot', 'std_throughput', 'std_ttft', 'std_tpot']) + + batch_sizes = [1, 2, 4, 16, 32, 64, 256] + for batch_size in batch_sizes: + prompt = [args.prompt] * batch_size + for _ in range(num_warmup): + pipe.generate(prompt, config) + + res = pipe.generate(prompt, config) + metrics = res.metrics + for _ in range(num_iter - 1): + res = pipe.generate(prompt, config) + metrics += res.metrics + # pdb.set_trace() + metrics_df = metrics_df._append({ + 'batch_size': batch_size, + 'throughput': metrics.mean_throughput, + 'ttft': metrics.mean_ttft, + 'tpot': metrics.mean_tpot, + 'std_throughput': metrics.std_throughput, + 'std_ttft': metrics.std_ttft, + 'std_tpot': metrics.std_tpot, + }, ignore_index=True) + + metrics_df.to_csv('metrics.csv', index=False) + +if __name__ == "__main__": + main() diff --git a/src/README.md b/src/README.md index 445b88aa58..a5530ea578 100644 --- a/src/README.md +++ b/src/README.md @@ -196,6 +196,55 @@ int main(int argc, char* argv[]) { } ``` +### Performance Metrics + +`ov.genai.PerfMetrics` (referred to as `PerfMetrics` for simplicity) is a structure that holds performance metrics for each generate call. `PerfMetrics` hold fields with mean and standard deviations for the following metrics: +- `ttft` +- `tpot` +- `load_time` +- `generate_duration` +- `tokenization_duration` +- `detokenization_duration` +- `throughput` + +and: +- `num_generated_tokens` +- `num_input_tokens` + +Performance metrics are stored either in the `DecodedResults` or `EncodedResults` `perf_metric` field. Additionally to the fields mentioned above, `PerfMetrics` has a member `raw_metrics` of type `ov.genai.RawPerfMetrics` (referred to as `RawPerfMetrics` for simplicity) that contains raw values for the durations of each batch of new token generation, tokenization durations, detokenization durations, and more. These raw metrics are accessible if you wish to calculate your own statistical values such as median or percentiles. However, since mean and standard deviation values are usually sufficient, we will focus on `PerfMetrics`. + +```python +import openvino_genai as ov_genai +pipe = ov_genai.LLMPipeline(model_path, "CPU") +res = pipe.generate(["The Sun is yellow because"], max_new_tokens=20) +perf_metrics = res.perf_metrics +print(f'generate_duration: {perf_metrics.mean_generate_duration:.2f}') +print(f'ttft: {perf_metrics.mean_ttft:.2f}') +print(f'tpot: {perf_metrics.mean_tpot:.2f}') +``` +output: +```sh +mean_generate_duration: 76.28 +mean_ttft: 42.58 +mean_tpot 3.80 +``` + +>**Note**: If the input prompt is just a string, the generate function will return only a string without perf_metrics. To obtain perf_metrics, provide the prompt as a list with at least one element or call generate with encoded inputs. + +Several `perf_metrics` can be added with each other. In that case `raw_metrics` will be concatenated and mean/std values will be recalculated. This enhances benchmarking and accumulating statistics from several calls. + +```python +import openvino_genai as ov_genai +pipe = ov_genai.LLMPipeline(model_path, "CPU") +res_1 = pipe.generate(["The Sun is yellow because"], max_new_tokens=20) +res_2 = pipe.generate(["Why Sky is blue because"], max_new_tokens=20) +perf_metrics = res_1.perf_metrics + res_2.perf_metrics + +print(f'generate_duration: {perf_metrics.mean_generate_duration:.2f}') +print(f'ttft: {perf_metrics.mean_ttft:.2f}') +print(f'tpot: {perf_metrics.mean_tpot:.2f}') +``` + ## How It Works For information on how OpenVINO™ GenAI works, refer to the [How It Works Section](https://github.com/openvinotoolkit/openvino.genai/tree/releases/2024/2/src/docs/HOW_IT_WORKS.md). diff --git a/src/cpp/include/openvino/genai/llm_pipeline.hpp b/src/cpp/include/openvino/genai/llm_pipeline.hpp index 14100d4f16..4be298128e 100644 --- a/src/cpp/include/openvino/genai/llm_pipeline.hpp +++ b/src/cpp/include/openvino/genai/llm_pipeline.hpp @@ -37,7 +37,7 @@ class EncodedResults { public: std::vector> tokens; std::vector scores; - PerfMetrics metrics; + PerfMetrics perf_metrics; }; /** @@ -52,7 +52,7 @@ class DecodedResults { public: std::vector texts; std::vector scores; - PerfMetrics metrics; + PerfMetrics perf_metrics; // @brief Convert DecodedResults to a string. operator std::string() const { diff --git a/src/cpp/include/openvino/genai/perf_metrics.hpp b/src/cpp/include/openvino/genai/perf_metrics.hpp index 5779b9b080..44535cf3a2 100644 --- a/src/cpp/include/openvino/genai/perf_metrics.hpp +++ b/src/cpp/include/openvino/genai/perf_metrics.hpp @@ -37,23 +37,25 @@ struct OPENVINO_GENAI_EXPORTS RawPerfMetrics { * */ struct OPENVINO_GENAI_EXPORTS PerfMetrics { - // First token time. + // Load time in ms. + float load_time; + + // First token time (in ms). float mean_ttft; float std_ttft; - // Time per output token. + // Time (in ms) per output token. float mean_tpot; float std_tpot; - float load_time; - float mean_generate_duration; float std_generate_duration; - float mean_tokenization_duration; - float std_tokenization_duration; - float mean_detokenization_duration; - float std_detokenization_duration; - + float mean_tokenization_duration = -1; + float std_tokenization_duration = -1; + float mean_detokenization_duration = -1; + float std_detokenization_duration = -1; + + // Tokens per second. float mean_throughput; float std_throughput; @@ -61,11 +63,11 @@ struct OPENVINO_GENAI_EXPORTS PerfMetrics { size_t num_input_tokens; void evaluate_statistics(std::optional start_time = std::nullopt); - static float get_duration_ms(std::chrono::steady_clock::duration duration); + static float get_microsec(std::chrono::steady_clock::duration duration); PerfMetrics operator+(const PerfMetrics& metrics) const; PerfMetrics& operator+=(const PerfMetrics& right); - RawPerfMetrics raw_counters; + RawPerfMetrics raw_metrics; }; } // namespace genai diff --git a/src/cpp/src/greedy_decoding.cpp b/src/cpp/src/greedy_decoding.cpp index c8fd36cbdd..8b0cf19c1f 100644 --- a/src/cpp/src/greedy_decoding.cpp +++ b/src/cpp/src/greedy_decoding.cpp @@ -24,7 +24,7 @@ EncodedResults greedy_decoding( // Initialize results and performance metrics. EncodedResults results; - auto& raw_perf_counters = results.metrics.raw_counters; + auto& raw_perf_counters = results.perf_metrics.raw_metrics; results.scores.resize(running_batch_size); results.tokens.resize(running_batch_size); diff --git a/src/cpp/src/group_beam_searcher.cpp b/src/cpp/src/group_beam_searcher.cpp index 784ff1a915..1b9729b2f6 100644 --- a/src/cpp/src/group_beam_searcher.cpp +++ b/src/cpp/src/group_beam_searcher.cpp @@ -444,7 +444,7 @@ std::pair beam_search(ov::InferRequest& lm, int32_t res_selected_beam_idx = 0; results.scores.reserve(config.num_return_sequences * result.size()); results.tokens.reserve(config.num_return_sequences * result.size()); - auto& raw_perf_counters = results.metrics.raw_counters; + auto& raw_perf_counters = results.perf_metrics.raw_metrics; raw_perf_counters.m_new_token_times = new_token_times; raw_perf_counters.m_batch_sizes = batch_sizes; diff --git a/src/cpp/src/llm_pipeline.cpp b/src/cpp/src/llm_pipeline.cpp index adac9110e1..1c1bd5ccd8 100644 --- a/src/cpp/src/llm_pipeline.cpp +++ b/src/cpp/src/llm_pipeline.cpp @@ -161,16 +161,16 @@ class StatefulLLMPipeline final : public LLMPipelineImplBase { } // generate_durations - decoded_results.metrics = encoded_results.metrics; + decoded_results.perf_metrics = encoded_results.perf_metrics; - auto& raw_counters = decoded_results.metrics.raw_counters; + auto& raw_counters = decoded_results.perf_metrics.raw_metrics; auto stop_time = std::chrono::steady_clock::now(); raw_counters.generate_durations = std::vector(); - raw_counters.generate_durations.emplace_back(PerfMetrics::get_duration_ms(stop_time - start_time)); - raw_counters.tokenization_durations.emplace_back(PerfMetrics::get_duration_ms(encode_stop_time - start_time)); - raw_counters.detokenization_durations.emplace_back(PerfMetrics::get_duration_ms(decode_stop_time - decode_start_time)); + raw_counters.generate_durations.emplace_back(PerfMetrics::get_microsec(stop_time - start_time)); + raw_counters.tokenization_durations.emplace_back(PerfMetrics::get_microsec(encode_stop_time - start_time)); + raw_counters.detokenization_durations.emplace_back(PerfMetrics::get_microsec(decode_stop_time - decode_start_time)); - decoded_results.metrics.evaluate_statistics(start_time); + decoded_results.perf_metrics.evaluate_statistics(start_time); return decoded_results; } @@ -272,10 +272,10 @@ class StatefulLLMPipeline final : public LLMPipelineImplBase { auto stop_time = std::chrono::steady_clock::now(); // If is called without tokenization then that stat will not be reported. - auto& metrics = result.metrics; + auto& metrics = result.perf_metrics; metrics.num_input_tokens = batch_size * input_ids.get_shape().at(1); metrics.load_time = this->m_load_time_ms; - metrics.raw_counters.generate_durations.emplace_back(PerfMetrics::get_duration_ms(stop_time - start_time)); + metrics.raw_metrics.generate_durations.emplace_back(PerfMetrics::get_microsec(stop_time - start_time)); metrics.evaluate_statistics(start_time); return result; } @@ -393,7 +393,7 @@ ov::genai::LLMPipeline::LLMPipeline( m_pimpl = make_unique(std::filesystem::path(path), device, config); } auto stop_time = std::chrono::steady_clock::now(); - m_pimpl->m_load_time_ms = PerfMetrics::get_duration_ms(stop_time - start_time) / 1000.0f; + m_pimpl->m_load_time_ms = PerfMetrics::get_microsec(stop_time - start_time) / 1000.0f; } ov::genai::GenerationConfig ov::genai::LLMPipeline::get_generation_config() const { diff --git a/src/cpp/src/multinomial_decoding.cpp b/src/cpp/src/multinomial_decoding.cpp index fc59f00e12..b00c62aed7 100644 --- a/src/cpp/src/multinomial_decoding.cpp +++ b/src/cpp/src/multinomial_decoding.cpp @@ -164,7 +164,7 @@ ov::genai::EncodedResults multinominal_decoding(ov::InferRequest& m_model_runner // Initialize results and performance metrics. EncodedResults results; - auto& raw_perf_counters = results.metrics.raw_counters; + auto& raw_perf_counters = results.perf_metrics.raw_metrics; results.scores.resize(batch_size, 0); results.tokens.resize(batch_size); diff --git a/src/cpp/src/perf_metrics.cpp b/src/cpp/src/perf_metrics.cpp index d4dc6c8de6..c319032449 100644 --- a/src/cpp/src/perf_metrics.cpp +++ b/src/cpp/src/perf_metrics.cpp @@ -9,18 +9,18 @@ namespace { -// std::pair calc_mean_and_std(const std::vector& durations) { std::pair calc_mean_and_std(const std::vector& durations) { + // Accepts time durations in microseconds and returns standard deviation and mean in milliseconds. float mean = std::accumulate(durations.begin(), durations.end(), 0.0f, [](const float& acc, const ov::genai::MicroSeconds& duration) -> float { - return acc + duration.count(); + return acc + duration.count() / 1000.0f; }); mean /= durations.size(); - mean /= 1000.f; float sum_square_durations = std::accumulate(durations.begin(), durations.end(), 0.0f, [](const float& acc, const ov::genai::MicroSeconds& duration) -> float { - return acc + duration.count() * duration.count() / 1000000.0f; + auto d = duration.count() / 1000.0f; + return acc + d * d; }); float std = std::sqrt(sum_square_durations / durations.size() - mean * mean); return {mean, std}; @@ -32,7 +32,7 @@ std::pair calc_mean_and_std(const std::vector(duration).count(); } @@ -40,33 +40,33 @@ void PerfMetrics::evaluate_statistics(std::optional start_time) { // If start_tiem is specified then recalcualte durations according to start times and calculate statistics only after that. if (start_time.has_value()) { auto start_time_val = *start_time; - auto& tok_times = raw_counters.m_new_token_times; - auto& batch_sizes = raw_counters.m_batch_sizes; - raw_counters.m_durations = std::vector(tok_times.size()); + auto& tok_times = raw_metrics.m_new_token_times; + auto& batch_sizes = raw_metrics.m_batch_sizes; + raw_metrics.m_durations = std::vector(tok_times.size()); auto ttft = tok_times[0] - start_time_val; - raw_counters.m_times_to_first_token = std::vector(); - raw_counters.m_times_to_first_token.emplace_back(ttft); + raw_metrics.m_times_to_first_token = std::vector(); + raw_metrics.m_times_to_first_token.emplace_back(ttft); num_generated_tokens = 0; for (size_t i = 0; i < tok_times.size(); ++i) { - raw_counters.m_durations[i] = tok_times[i] - start_time_val; + raw_metrics.m_durations[i] = tok_times[i] - start_time_val; - // If in 10 ms a batch of 5 new tokens is generated then TTOT is 10 ms / 5. - // todo: float check that it's valid for batch > 1. - raw_counters.m_durations[i] /= batch_sizes[i]; + // If in 10 ms a batch of 5 new tokens is generated then TPOT is 10 / 5 = 2 tok/ms. + raw_metrics.m_durations[i] /= batch_sizes[i]; num_generated_tokens += batch_sizes[i]; start_time_val = tok_times[i]; } } + + // calc_mean_and_std will convert microsecond to milliseconds. + std::tie(mean_tpot, std_tpot) = calc_mean_and_std(raw_metrics.m_durations); + std::tie(mean_ttft, std_ttft) = calc_mean_and_std(raw_metrics.m_times_to_first_token); - std::tie(mean_tpot, std_tpot) = calc_mean_and_std(raw_counters.m_durations); - std::tie(mean_ttft, std_ttft) = calc_mean_and_std(raw_counters.m_times_to_first_token); - - std::tie(mean_generate_duration, std_generate_duration) = calc_mean_and_std(raw_counters.generate_durations); - std::tie(mean_tokenization_duration, std_tokenization_duration) = calc_mean_and_std(raw_counters.tokenization_durations); - std::tie(mean_detokenization_duration, std_detokenization_duration) = calc_mean_and_std(raw_counters.detokenization_durations); + std::tie(mean_generate_duration, std_generate_duration) = calc_mean_and_std(raw_metrics.generate_durations); + std::tie(mean_tokenization_duration, std_tokenization_duration) = calc_mean_and_std(raw_metrics.tokenization_durations); + std::tie(mean_detokenization_duration, std_detokenization_duration) = calc_mean_and_std(raw_metrics.detokenization_durations); - mean_throughput = 1000.0f / mean_tpot; + mean_throughput = 1000.0f / mean_tpot; // tokens per second std_throughput = (std_tpot * 1000.0f) / (mean_tpot * mean_tpot); } @@ -76,22 +76,25 @@ PerfMetrics PerfMetrics::operator+(const PerfMetrics& right) const { // Copy left value to res. PerfMetrics res = *this; - // Concatenate duration and first token times. - auto& new_durations = res.raw_counters.m_durations; - auto& new_times_to_first_token = res.raw_counters.m_times_to_first_token; - auto& right_durations = right.raw_counters.m_durations; - auto& right_times_to_first_token = right.raw_counters.m_times_to_first_token; + // Concatenate durations, batch_sizes first token times. + auto& new_durations = res.raw_metrics.m_durations; + auto& new_batch_sizes = res.raw_metrics.m_batch_sizes; + auto& new_times_to_first_token = res.raw_metrics.m_times_to_first_token; + auto& right_durations = right.raw_metrics.m_durations; + auto& right_batch_sizes = right.raw_metrics.m_batch_sizes; + auto& right_times_to_first_token = right.raw_metrics.m_times_to_first_token; new_durations.insert(new_durations.end(), right_durations.begin(), right_durations.end()); new_times_to_first_token.insert(new_times_to_first_token.end(), right_times_to_first_token.begin(), right_times_to_first_token.end()); + new_batch_sizes.insert(new_batch_sizes.end(), right_batch_sizes.begin(), right_batch_sizes.end()); // Concatenate tokenization/detokenization and total generation times. - auto& new_tok_durations = res.raw_counters.tokenization_durations; - auto& new_detok_durations = res.raw_counters.detokenization_durations; - auto& new_gen_durations = res.raw_counters.generate_durations; - auto& right_tok_durations = right.raw_counters.tokenization_durations; - auto& right_detok_durations = right.raw_counters.detokenization_durations; - auto& right_gen_durations = right.raw_counters.generate_durations; + auto& new_tok_durations = res.raw_metrics.tokenization_durations; + auto& new_detok_durations = res.raw_metrics.detokenization_durations; + auto& new_gen_durations = res.raw_metrics.generate_durations; + auto& right_tok_durations = right.raw_metrics.tokenization_durations; + auto& right_detok_durations = right.raw_metrics.detokenization_durations; + auto& right_gen_durations = right.raw_metrics.generate_durations; new_tok_durations.insert(new_tok_durations.end(), right_tok_durations.begin(), right_tok_durations.end()); new_detok_durations.insert(new_detok_durations.end(), right_detok_durations.begin(), right_detok_durations.end()); diff --git a/src/python/py_generate_pipeline.cpp b/src/python/py_generate_pipeline.cpp index e2f89cd962..6c88b3ffcc 100644 --- a/src/python/py_generate_pipeline.cpp +++ b/src/python/py_generate_pipeline.cpp @@ -38,6 +38,17 @@ using PyBindStreamerVariant = std::variant, std::sh template struct overloaded : Ts... { using Ts::operator()...; }; template overloaded(Ts...) -> overloaded; +template +std::vector get_ms(const T& instance, U T::*member) { + // Converts c++ duration to float so that it can be used in Python. + std::vector res; + const auto& durations = instance.*member; + res.reserve(durations.size()); + std::transform(durations.begin(), durations.end(), std::back_inserter(res), + [](const auto& duration) { return duration.count(); }); + return res; +} + namespace { auto generate_docstring = R"( @@ -536,17 +547,25 @@ PYBIND11_MODULE(py_generate_pipeline, m) { .def(py::init<>()) .def_property_readonly("texts", [](const DecodedResults &dr) { return handle_utf8_results(dr); }) .def_readonly("scores", &DecodedResults::scores) - .def_readonly("metrics", &DecodedResults::metrics) + .def_readonly("perf_metrics", &DecodedResults::perf_metrics) .def("__str__", &DecodedResults::operator std::string); py::class_(m, "RawPerfMetrics") .def(py::init<>()) .def_readonly("generate_durations", &RawPerfMetrics::generate_durations) - .def_readonly("tokenization_durations", &RawPerfMetrics::tokenization_durations) - .def_readonly("detokenization_durations", &RawPerfMetrics::detokenization_durations) - .def_readonly("m_times_to_first_token", &RawPerfMetrics::m_times_to_first_token) + .def_property_readonly("tokenization_durations", [](const RawPerfMetrics &rw) { + return get_ms(rw, &RawPerfMetrics::tokenization_durations); + }) + .def_property_readonly("detokenization_durations", [](const RawPerfMetrics &rw) { + return get_ms(rw, &RawPerfMetrics::detokenization_durations); + }) + .def_property_readonly("m_times_to_first_token", [](const RawPerfMetrics &rw) { + return get_ms(rw, &RawPerfMetrics::m_times_to_first_token); + }) + .def_property_readonly("m_durations", [](const RawPerfMetrics &rw) { + return get_ms(rw, &RawPerfMetrics::m_durations); + }) .def_readonly("m_batch_sizes", &RawPerfMetrics::m_batch_sizes) - .def_readonly("m_durations", &RawPerfMetrics::m_durations) .def_readonly("num_generated_tokens", &RawPerfMetrics::num_generated_tokens) .def_readonly("num_input_tokens", &RawPerfMetrics::num_input_tokens); @@ -567,7 +586,7 @@ PYBIND11_MODULE(py_generate_pipeline, m) { .def_readonly("load_time", &PerfMetrics::load_time) .def("__add__", &PerfMetrics::operator+) .def("__iadd__", &PerfMetrics::operator+=) - .def_readonly("raw_counters", &PerfMetrics::raw_counters) + .def_readonly("raw_metrics", &PerfMetrics::raw_metrics) ; py::class_(m, "TokenizedInputs") @@ -578,7 +597,7 @@ PYBIND11_MODULE(py_generate_pipeline, m) { py::class_(m, "EncodedResults") .def_readonly("tokens", &EncodedResults::tokens) .def_readonly("scores", &EncodedResults::scores) - .def_readonly("metrics", &EncodedResults::metrics); + .def_readonly("perf_metrics", &EncodedResults::perf_metrics); py::class_>(m, "StreamerBase") // Change the holder form unique_ptr to shared_ptr .def(py::init<>()) From aeec730c4ebd14c90c081df40e50fd49d3c66f0d Mon Sep 17 00:00:00 2001 From: Pavel Esir Date: Tue, 23 Jul 2024 23:08:39 +0200 Subject: [PATCH 07/11] use MeanStdPair --- .../cpp/benchmark_genai/benchmark_genai.cpp | 14 +++-- .../python/benchmark_genai/benchmark_genai.py | 14 +++-- .../include/openvino/genai/perf_metrics.hpp | 51 +++++++++++-------- src/cpp/src/perf_metrics.cpp | 18 ++++--- src/python/py_generate_pipeline.cpp | 27 +++++----- 5 files changed, 64 insertions(+), 60 deletions(-) diff --git a/samples/cpp/benchmark_genai/benchmark_genai.cpp b/samples/cpp/benchmark_genai/benchmark_genai.cpp index 9610aabe54..24b9491219 100644 --- a/samples/cpp/benchmark_genai/benchmark_genai.cpp +++ b/samples/cpp/benchmark_genai/benchmark_genai.cpp @@ -38,8 +38,6 @@ int main(int argc, char* argv[]) try { ov::genai::GenerationConfig config; config.max_new_tokens = result["max_new_tokens"].as(); - config.num_beam_groups = 3; - config.num_beams = 15; ov::genai::LLMPipeline pipe(model_path, device); @@ -54,12 +52,12 @@ int main(int argc, char* argv[]) try { } std::cout << "Load time: " << metrics.load_time << " ms" << std::endl; - std::cout << "Generate time: " << metrics.mean_generate_duration << " ± " << metrics.std_generate_duration << " ms" << std::endl; - std::cout << "Tokenization time: " << metrics.mean_tokenization_duration << " ± " << metrics.std_tokenization_duration << " ms" << std::endl; - std::cout << "Detokenization time: " << metrics.mean_detokenization_duration << " ± " << metrics.std_detokenization_duration << " ms" << std::endl; - std::cout << "ttft: " << metrics.mean_ttft << " ± " << metrics.std_ttft << " ms" << std::endl; - std::cout << "tpot: " << metrics.mean_tpot << " ± " << metrics.std_tpot << " ms " << std::endl; - std::cout << "Tokens/s: " << metrics.mean_throughput << " ± " << metrics.std_throughput << std::endl; + std::cout << "Generate time: " << metrics.generate_duration.mean << " ± " << metrics.generate_duration.std << " ms" << std::endl; + std::cout << "Tokenization time: " << metrics.tokenization_duration.mean << " ± " << metrics.tokenization_duration.std << " ms" << std::endl; + std::cout << "Detokenization time: " << metrics.detokenization_duration.mean << " ± " << metrics.detokenization_duration.std << " ms" << std::endl; + std::cout << "ttft: " << metrics.ttft.mean << " ± " << metrics.ttft.std << " ms" << std::endl; + std::cout << "tpot: " << metrics.tpot.mean << " ± " << metrics.tpot.std << " ms " << std::endl; + std::cout << "Tokens/s: " << metrics.throughput.mean << " ± " << metrics.throughput.std << std::endl; return 0; } catch (const std::exception& error) { diff --git a/samples/python/benchmark_genai/benchmark_genai.py b/samples/python/benchmark_genai/benchmark_genai.py index 06bd8b0f48..c29c508bf4 100755 --- a/samples/python/benchmark_genai/benchmark_genai.py +++ b/samples/python/benchmark_genai/benchmark_genai.py @@ -25,8 +25,6 @@ def main(): config = ov_genai.GenerationConfig() config.max_new_tokens = args.max_new_tokens - config.num_beam_groups = 3 - config.num_beams = 15 pipe = ov_genai.LLMPipeline(model_path, device) @@ -40,12 +38,12 @@ def main(): perf_metrics += res.perf_metrics print(f"Load time: {perf_metrics.load_time:.2f} ms") - print(f"Generate time: {perf_metrics.mean_generate_duration:.2f} ± {perf_metrics.std_generate_duration:.2f} ms") - print(f"Tokenization time: {perf_metrics.mean_tokenization_duration:.2f} ± {perf_metrics.std_tokenization_duration:.2f} ms") - print(f"Detokenization time: {perf_metrics.mean_detokenization_duration:.2f} ± {perf_metrics.std_detokenization_duration:.2f} ms") - print(f"TTFT: {perf_metrics.mean_ttft:.2f} ± {perf_metrics.std_ttft:.2f} ms") - print(f"TPOT: {perf_metrics.mean_tpot:.2f} ± {perf_metrics.std_tpot:.2f} ms") - print(f"Throughput tokens/s: {perf_metrics.mean_throughput:.2f} ± {perf_metrics.std_throughput:.2f}") + print(f"Generate time: {perf_metrics.generate_duration.mean:.2f} ± {perf_metrics.generate_duration.std:.2f} ms") + print(f"Tokenization time: {perf_metrics.tokenization_duration.mean:.2f} ± {perf_metrics.tokenization_duration.std:.2f} ms") + print(f"Detokenization time: {perf_metrics.detokenization_duration.mean:.2f} ± {perf_metrics.detokenization_duration.std:.2f} ms") + print(f"TTFT: {perf_metrics.ttft.mean:.2f} ± {perf_metrics.ttft.std:.2f} ms") + print(f"TPOT: {perf_metrics.tpot.mean:.2f} ± {perf_metrics.tpot.std:.2f} ms") + print(f"Throughput tokens/s: {perf_metrics.throughput.mean:.2f} ± {perf_metrics.throughput.std:.2f}") if __name__ == "__main__": main() diff --git a/src/cpp/include/openvino/genai/perf_metrics.hpp b/src/cpp/include/openvino/genai/perf_metrics.hpp index 44535cf3a2..8715761792 100644 --- a/src/cpp/include/openvino/genai/perf_metrics.hpp +++ b/src/cpp/include/openvino/genai/perf_metrics.hpp @@ -33,36 +33,43 @@ struct OPENVINO_GENAI_EXPORTS RawPerfMetrics { }; /** -* @brief Structure to store performance metric for each generation -* +* @brief Structure to store mean and standart deviation values. */ -struct OPENVINO_GENAI_EXPORTS PerfMetrics { - // Load time in ms. - float load_time; - - // First token time (in ms). - float mean_ttft; - float std_ttft; +struct OPENVINO_GENAI_EXPORTS MeanStdPair { + float mean; + float std; +}; - // Time (in ms) per output token. - float mean_tpot; - float std_tpot; +/** +* @brief Structure to store performance metric for each generation. +* +* @param +*/ +struct OPENVINO_GENAI_EXPORTS PerfMetrics { + float load_time; // Load time in ms. + MeanStdPair ttft; // Time to the first token (in ms) (TTTFT). + MeanStdPair tpot; // Time (in ms) per output token (TPOT). + MeanStdPair throughput; // Tokens per second. - float mean_generate_duration; - float std_generate_duration; - float mean_tokenization_duration = -1; - float std_tokenization_duration = -1; - float mean_detokenization_duration = -1; - float std_detokenization_duration = -1; - - // Tokens per second. - float mean_throughput; - float std_throughput; + MeanStdPair generate_duration; + MeanStdPair tokenization_duration = {-1, -1}; + MeanStdPair detokenization_duration = {-1. -1}; size_t num_generated_tokens; size_t num_input_tokens; + /** + * @brief calculates mean/std values from raw_metrics. + * + * @param start_time optional start_time in case if duration needs to be updated. + */ void evaluate_statistics(std::optional start_time = std::nullopt); + + /** + * @brief convert duration to microseconds + * + * @param duration duration in + */ static float get_microsec(std::chrono::steady_clock::duration duration); PerfMetrics operator+(const PerfMetrics& metrics) const; PerfMetrics& operator+=(const PerfMetrics& right); diff --git a/src/cpp/src/perf_metrics.cpp b/src/cpp/src/perf_metrics.cpp index c319032449..bc394fae52 100644 --- a/src/cpp/src/perf_metrics.cpp +++ b/src/cpp/src/perf_metrics.cpp @@ -9,7 +9,7 @@ namespace { -std::pair calc_mean_and_std(const std::vector& durations) { +ov::genai::MeanStdPair calc_mean_and_std(const std::vector& durations) { // Accepts time durations in microseconds and returns standard deviation and mean in milliseconds. float mean = std::accumulate(durations.begin(), durations.end(), 0.0f, [](const float& acc, const ov::genai::MicroSeconds& duration) -> float { @@ -59,15 +59,17 @@ void PerfMetrics::evaluate_statistics(std::optional start_time) { } // calc_mean_and_std will convert microsecond to milliseconds. - std::tie(mean_tpot, std_tpot) = calc_mean_and_std(raw_metrics.m_durations); - std::tie(mean_ttft, std_ttft) = calc_mean_and_std(raw_metrics.m_times_to_first_token); + tpot = calc_mean_and_std(raw_metrics.m_durations); + ttft = calc_mean_and_std(raw_metrics.m_times_to_first_token); - std::tie(mean_generate_duration, std_generate_duration) = calc_mean_and_std(raw_metrics.generate_durations); - std::tie(mean_tokenization_duration, std_tokenization_duration) = calc_mean_and_std(raw_metrics.tokenization_durations); - std::tie(mean_detokenization_duration, std_detokenization_duration) = calc_mean_and_std(raw_metrics.detokenization_durations); + generate_duration = calc_mean_and_std(raw_metrics.generate_durations); + generate_duration = calc_mean_and_std(raw_metrics.generate_durations); + + tokenization_duration = calc_mean_and_std(raw_metrics.tokenization_durations); + detokenization_duration = calc_mean_and_std(raw_metrics.detokenization_durations); - mean_throughput = 1000.0f / mean_tpot; // tokens per second - std_throughput = (std_tpot * 1000.0f) / (mean_tpot * mean_tpot); + // tokens per second + throughput = {1000.0f / tpot.mean, (tpot.std * 1000.0f) / (tpot.mean * tpot.mean)}; } PerfMetrics PerfMetrics::operator+(const PerfMetrics& right) const { diff --git a/src/python/py_generate_pipeline.cpp b/src/python/py_generate_pipeline.cpp index 6c88b3ffcc..e744179c34 100644 --- a/src/python/py_generate_pipeline.cpp +++ b/src/python/py_generate_pipeline.cpp @@ -20,6 +20,7 @@ using ov::genai::EncodedResults; using ov::genai::GenerationConfig; using ov::genai::GenerationResult; using ov::genai::LLMPipeline; +using ov::genai::MeanStdPair; using ov::genai::OptionalGenerationConfig; using ov::genai::PerfMetrics; using ov::genai::RawPerfMetrics; @@ -569,25 +570,23 @@ PYBIND11_MODULE(py_generate_pipeline, m) { .def_readonly("num_generated_tokens", &RawPerfMetrics::num_generated_tokens) .def_readonly("num_input_tokens", &RawPerfMetrics::num_input_tokens); + py::class_(m, "MeanStdPair") + .def(py::init<>()) + .def_readonly("mean", &MeanStdPair::mean) + .def_readonly("std", &MeanStdPair::std); + py::class_(m, "PerfMetrics") .def(py::init<>()) - .def_readonly("mean_generate_duration", &PerfMetrics::mean_generate_duration) - .def_readonly("std_generate_duration", &PerfMetrics::std_generate_duration) - .def_readonly("mean_tokenization_duration", &PerfMetrics::mean_tokenization_duration) - .def_readonly("std_tokenization_duration", &PerfMetrics::std_tokenization_duration) - .def_readonly("mean_detokenization_duration", &PerfMetrics::mean_detokenization_duration) - .def_readonly("std_detokenization_duration", &PerfMetrics::std_detokenization_duration) - .def_readonly("mean_throughput", &PerfMetrics::mean_throughput) - .def_readonly("std_throughput", &PerfMetrics::std_throughput) - .def_readonly("mean_tpot", &PerfMetrics::mean_tpot) - .def_readonly("mean_ttft", &PerfMetrics::mean_ttft) - .def_readonly("std_tpot", &PerfMetrics::std_tpot) - .def_readonly("std_ttft", &PerfMetrics::std_ttft) + .def_readonly("generate_duration", &PerfMetrics::generate_duration) + .def_readonly("tokenization_duration", &PerfMetrics::tokenization_duration) + .def_readonly("detokenization_duration", &PerfMetrics::detokenization_duration) + .def_readonly("throughput", &PerfMetrics::throughput) + .def_readonly("tpot", &PerfMetrics::tpot) + .def_readonly("ttft", &PerfMetrics::ttft) .def_readonly("load_time", &PerfMetrics::load_time) .def("__add__", &PerfMetrics::operator+) .def("__iadd__", &PerfMetrics::operator+=) - .def_readonly("raw_metrics", &PerfMetrics::raw_metrics) - ; + .def_readonly("raw_metrics", &PerfMetrics::raw_metrics); py::class_(m, "TokenizedInputs") .def(py::init()) From be2fdafb273319084999fe944d02e5653d030de7 Mon Sep 17 00:00:00 2001 From: Pavel Esir Date: Fri, 26 Jul 2024 10:12:55 +0200 Subject: [PATCH 08/11] resolve conflicts --- src/cpp/src/llm_pipeline.cpp | 35 +++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/src/cpp/src/llm_pipeline.cpp b/src/cpp/src/llm_pipeline.cpp index 40d4377b00..8505daf3b2 100644 --- a/src/cpp/src/llm_pipeline.cpp +++ b/src/cpp/src/llm_pipeline.cpp @@ -510,7 +510,10 @@ ov::genai::LLMPipeline::LLMPipeline( const ov::genai::Tokenizer& tokenizer, OptionalGenerationConfig generation_config ) { + auto start_time = std::chrono::steady_clock::now(); m_pimpl = std::make_unique(request, tokenizer, generation_config); + auto stop_time = std::chrono::steady_clock::now(); + m_pimpl->m_load_time_ms = std::chrono::duration_cast(stop_time - start_time).count(); } ov::genai::LLMPipeline::LLMPipeline( @@ -518,27 +521,35 @@ ov::genai::LLMPipeline::LLMPipeline( const ov::genai::Tokenizer& tokenizer, const std::string& device, const ov::AnyMap& plugin_config -): m_pimpl{[&]() -> std::unique_ptr { +){ + auto start_time = std::chrono::steady_clock::now(); if ("CB" == device) { - return std::make_unique(model_path, tokenizer, "CPU", plugin_config); - } if ("NPU" == device) { - return std::make_unique(model_path, tokenizer, device, plugin_config); + m_pimpl = std::make_unique(model_path, tokenizer, "CPU", plugin_config); + } else if ("NPU" == device) { + m_pimpl = std::make_unique(model_path, tokenizer, device, plugin_config); + } else { + m_pimpl = std::make_unique(model_path, tokenizer, device, plugin_config); } - return std::make_unique(model_path, tokenizer, device, plugin_config); -}()} {} + auto stop_time = std::chrono::steady_clock::now(); + m_pimpl->m_load_time_ms = std::chrono::duration_cast(stop_time - start_time).count(); +} ov::genai::LLMPipeline::LLMPipeline( const std::string& path, const std::string& device, const ov::AnyMap& config -): m_pimpl{[&]() -> std::unique_ptr { +){ + auto start_time = std::chrono::steady_clock::now(); if ("CB" == device) { - return std::make_unique(path, "CPU", config); - } if ("NPU" == device) { - return std::make_unique(path, device, config); + m_pimpl = std::make_unique(path, "CPU", config); + } else if ("NPU" == device) { + m_pimpl = std::make_unique(path, device, config); + } else { + m_pimpl = std::make_unique(path, device, config); } - return std::make_unique(path, device, config); -}()} {} + auto stop_time = std::chrono::steady_clock::now(); + m_pimpl->m_load_time_ms = std::chrono::duration_cast(stop_time - start_time).count(); +} ov::genai::GenerationConfig ov::genai::LLMPipeline::get_generation_config() const { return m_pimpl->m_generation_config; From b00bcd8f411e65c7a5d455fec502fcf2639fa022 Mon Sep 17 00:00:00 2001 From: Pavel Esir Date: Fri, 26 Jul 2024 14:05:36 +0200 Subject: [PATCH 09/11] apply comments --- samples/cpp/benchmark_genai/CMakeLists.txt | 1 - .../cpp/benchmark_genai/benchmark_genai.cpp | 9 +- .../python/benchmark_genai/benchmark_genai.py | 2 +- .../benchmark_genai_automatic.py | 62 -------------- src/README.md | 82 ++++++++++++++----- src/cpp/src/greedy_decoding.cpp | 1 - src/cpp/src/perf_metrics.cpp | 2 - 7 files changed, 67 insertions(+), 92 deletions(-) delete mode 100755 samples/python/benchmark_genai/benchmark_genai_automatic.py diff --git a/samples/cpp/benchmark_genai/CMakeLists.txt b/samples/cpp/benchmark_genai/CMakeLists.txt index bfa1592f61..5443439de5 100644 --- a/samples/cpp/benchmark_genai/CMakeLists.txt +++ b/samples/cpp/benchmark_genai/CMakeLists.txt @@ -18,7 +18,6 @@ set_target_properties(benchmark_genai PROPERTIES COMPILE_PDB_NAME benchmark_genai # Ensure out of box LC_RPATH on macOS with SIP INSTALL_RPATH_USE_LINK_PATH ON) -# target_compile_features(benchmark_genai PRIVATE cxx_std_11) install(TARGETS benchmark_genai RUNTIME DESTINATION samples_bin/ COMPONENT samples_bin diff --git a/samples/cpp/benchmark_genai/benchmark_genai.cpp b/samples/cpp/benchmark_genai/benchmark_genai.cpp index 24b9491219..2fd5eafc69 100644 --- a/samples/cpp/benchmark_genai/benchmark_genai.cpp +++ b/samples/cpp/benchmark_genai/benchmark_genai.cpp @@ -50,14 +50,15 @@ int main(int argc, char* argv[]) try { res = pipe.generate(prompt, config); metrics = metrics + res.perf_metrics; } - + + std::cout << std::fixed << std::setprecision(2); std::cout << "Load time: " << metrics.load_time << " ms" << std::endl; std::cout << "Generate time: " << metrics.generate_duration.mean << " ± " << metrics.generate_duration.std << " ms" << std::endl; std::cout << "Tokenization time: " << metrics.tokenization_duration.mean << " ± " << metrics.tokenization_duration.std << " ms" << std::endl; std::cout << "Detokenization time: " << metrics.detokenization_duration.mean << " ± " << metrics.detokenization_duration.std << " ms" << std::endl; - std::cout << "ttft: " << metrics.ttft.mean << " ± " << metrics.ttft.std << " ms" << std::endl; - std::cout << "tpot: " << metrics.tpot.mean << " ± " << metrics.tpot.std << " ms " << std::endl; - std::cout << "Tokens/s: " << metrics.throughput.mean << " ± " << metrics.throughput.std << std::endl; + std::cout << "TTFT: " << metrics.ttft.mean << " ± " << metrics.ttft.std << " ms" << std::endl; + std::cout << "TPOT: " << metrics.tpot.mean << " ± " << metrics.tpot.std << " ms/token " << std::endl; + std::cout << "Throughput: " << metrics.throughput.mean << " ± " << metrics.throughput.std << " tokens/s" << std::endl; return 0; } catch (const std::exception& error) { diff --git a/samples/python/benchmark_genai/benchmark_genai.py b/samples/python/benchmark_genai/benchmark_genai.py index c29c508bf4..ef468053d8 100755 --- a/samples/python/benchmark_genai/benchmark_genai.py +++ b/samples/python/benchmark_genai/benchmark_genai.py @@ -43,7 +43,7 @@ def main(): print(f"Detokenization time: {perf_metrics.detokenization_duration.mean:.2f} ± {perf_metrics.detokenization_duration.std:.2f} ms") print(f"TTFT: {perf_metrics.ttft.mean:.2f} ± {perf_metrics.ttft.std:.2f} ms") print(f"TPOT: {perf_metrics.tpot.mean:.2f} ± {perf_metrics.tpot.std:.2f} ms") - print(f"Throughput tokens/s: {perf_metrics.throughput.mean:.2f} ± {perf_metrics.throughput.std:.2f}") + print(f"Throughput : {perf_metrics.throughput.mean:.2f} ± {perf_metrics.throughput.std:.2f} tokens/s") if __name__ == "__main__": main() diff --git a/samples/python/benchmark_genai/benchmark_genai_automatic.py b/samples/python/benchmark_genai/benchmark_genai_automatic.py deleted file mode 100755 index 98a00a8c99..0000000000 --- a/samples/python/benchmark_genai/benchmark_genai_automatic.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (C) 2023-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 - -import argparse -import openvino_genai as ov_genai -import pdb - -def main(): - parser = argparse.ArgumentParser(description="Help command") - parser.add_argument("-m", "--model", type=str, help="Path to model and tokenizers base directory") - parser.add_argument("-p", "--prompt", type=str, default="The Sky is blue because", help="Prompt") - parser.add_argument("-nw", "--num_warmup", type=int, default=1, help="Number of warmup iterations") - parser.add_argument("-n", "--num_iter", type=int, default=5, help="Number of iterations") - parser.add_argument("-mt", "--max_new_tokens", type=int, default=20, help="Maximal number of new tokens") - parser.add_argument("-d", "--device", type=str, default="CPU", help="Device") - - args = parser.parse_args() - - # Perf metrics is stored in DecodedResults. - # In order to get DecodedResults instead of a string input should be a list. - - model_path = args.model - device = args.device - num_warmup = args.num_warmup - num_iter = args.num_iter - - config = ov_genai.GenerationConfig() - config.max_new_tokens = 20 - # config.num_beam_groups = 3 - # config.num_beams = 15 - - pipe = ov_genai.LLMPipeline(model_path, device) - - import pandas as pd - metrics_df = pd.DataFrame(columns=['batch_size', 'throughput', 'ttft', 'tpot', 'std_throughput', 'std_ttft', 'std_tpot']) - - batch_sizes = [1, 2, 4, 16, 32, 64, 256] - for batch_size in batch_sizes: - prompt = [args.prompt] * batch_size - for _ in range(num_warmup): - pipe.generate(prompt, config) - - res = pipe.generate(prompt, config) - metrics = res.metrics - for _ in range(num_iter - 1): - res = pipe.generate(prompt, config) - metrics += res.metrics - # pdb.set_trace() - metrics_df = metrics_df._append({ - 'batch_size': batch_size, - 'throughput': metrics.mean_throughput, - 'ttft': metrics.mean_ttft, - 'tpot': metrics.mean_tpot, - 'std_throughput': metrics.std_throughput, - 'std_ttft': metrics.std_ttft, - 'std_tpot': metrics.std_tpot, - }, ignore_index=True) - - metrics_df.to_csv('metrics.csv', index=False) - -if __name__ == "__main__": - main() diff --git a/src/README.md b/src/README.md index 3a53e175dd..aa4dc0f301 100644 --- a/src/README.md +++ b/src/README.md @@ -198,29 +198,49 @@ int main(int argc, char* argv[]) { ### Performance Metrics -`ov.genai.PerfMetrics` (referred to as `PerfMetrics` for simplicity) is a structure that holds performance metrics for each generate call. `PerfMetrics` hold fields with mean and standard deviations for the following metrics: -- `ttft` -- `tpot` -- `load_time` -- `generate_duration` -- `tokenization_duration` -- `detokenization_duration` -- `throughput` +`openvino_genai.PerfMetrics` (referred as `PerfMetrics` for simplicity) is a structure that holds performance metrics for each generate call. `PerfMetrics` holds fields with mean and standard deviations for the following metrics: +- Time To the First Token (TTFT), ms +- Time per Output Token (TPOT), ms/token +- Generate total duration, ms +- Tokenization duration, ms +- Detokenization duration, ms +- Throughput, tokens/s and: -- `num_generated_tokens` -- `num_input_tokens` +- Load time, ms +- Number of generated tokens +- Number of tokens in the input prompt -Performance metrics are stored either in the `DecodedResults` or `EncodedResults` `perf_metric` field. Additionally to the fields mentioned above, `PerfMetrics` has a member `raw_metrics` of type `ov.genai.RawPerfMetrics` (referred to as `RawPerfMetrics` for simplicity) that contains raw values for the durations of each batch of new token generation, tokenization durations, detokenization durations, and more. These raw metrics are accessible if you wish to calculate your own statistical values such as median or percentiles. However, since mean and standard deviation values are usually sufficient, we will focus on `PerfMetrics`. +Performance metrics are stored either in the `DecodedResults` or `EncodedResults` `perf_metric` field. Additionally to the fields mentioned above, `PerfMetrics` has a member `raw_metrics` of type `openvino_genai.RawPerfMetrics` (referred to as `RawPerfMetrics` for simplicity) that contains raw values for the durations of each batch of new token generation, tokenization durations, detokenization durations, and more. These raw metrics are accessible if you wish to calculate your own statistical values such as median or percentiles. However, since mean and standard deviation values are usually sufficient, we will focus on `PerfMetrics`. ```python import openvino_genai as ov_genai pipe = ov_genai.LLMPipeline(model_path, "CPU") -res = pipe.generate(["The Sun is yellow because"], max_new_tokens=20) -perf_metrics = res.perf_metrics -print(f'generate_duration: {perf_metrics.mean_generate_duration:.2f}') -print(f'ttft: {perf_metrics.mean_ttft:.2f}') -print(f'tpot: {perf_metrics.mean_tpot:.2f}') +result = pipe.generate(["The Sun is yellow because"], max_new_tokens=20) +perf_metrics = result.perf_metrics + +print(f'Generate duration: {perf_metrics.generate_duration.mean:.2f}') +print(f'TTFT: {perf_metrics.ttft.mean:.2f} ms') +print(f'TPOT: {perf_metrics.tpot.mean:.2f} ms/token') +print(f'Throughput: {perf_metrics.throughput.mean:.2f} tokens/s') +``` + +```cpp +#include "openvino/genai/llm_pipeline.hpp" +#include + +int main(int argc, char* argv[]) { + std::string model_path = argv[1]; + ov::genai::LLMPipeline pipe(model_path, "CPU"); + auto result = pipe.generate("The Sun is yellow because", ov::genai::max_new_tokens(20)); + auto perf_metrics = result.perf_metrics; + + std::cout << std::fixed << std::setprecision(2); + std::cout << "Generate duration: " << perf_metrics.generate_duration.mean << " ms" << std::endl; + std::cout << "TTFT: " << metrics.ttft.mean << " ms" << std::endl; + std::cout << "TPOT: " << metrics.tpot.mean << " ms/token " << std::endl; + std::cout << "Throughput: " << metrics.throughput.mean << " tokens/s" << std::endl; +} ``` output: ```sh @@ -229,9 +249,28 @@ mean_ttft: 42.58 mean_tpot 3.80 ``` ->**Note**: If the input prompt is just a string, the generate function will return only a string without perf_metrics. To obtain perf_metrics, provide the prompt as a list with at least one element or call generate with encoded inputs. +>**Note**: If the input prompt is just a string, the generate function returns only a string without perf_metrics. To obtain perf_metrics, provide the prompt as a list with at least one element or call generate with encoded inputs. -Several `perf_metrics` can be added with each other. In that case `raw_metrics` will be concatenated and mean/std values will be recalculated. This enhances benchmarking and accumulating statistics from several calls. +Several `perf_metrics` can be added to each other. In that case `raw_metrics` are concatenated and mean/std values are recalculated. This accumulates statistics from several `generate()` calls + +```cpp +#include "openvino/genai/llm_pipeline.hpp" +#include + +int main(int argc, char* argv[]) { + std::string model_path = argv[1]; + ov::genai::LLMPipeline pipe(model_path, "CPU"); + auto result_1 = pipe.generate("The Sun is yellow because", ov::genai::max_new_tokens(20)); + auto result_2 = pipe.generate("The Sun is yellow because", ov::genai::max_new_tokens(20)); + auto perf_metrics = result_1.perf_metrics + result_2.perf_metrics + + std::cout << std::fixed << std::setprecision(2); + std::cout << "Generate duration: " << perf_metrics.generate_duration.mean << " ms" << std::endl; + std::cout << "TTFT: " << metrics.ttft.mean << " ms" << std::endl; + std::cout << "TPOT: " << metrics.tpot.mean << " ms/token " << std::endl; + std::cout << "Throughput: " << metrics.throughput.mean << " tokens/s" << std::endl; +} +``` ```python import openvino_genai as ov_genai @@ -240,9 +279,10 @@ res_1 = pipe.generate(["The Sun is yellow because"], max_new_tokens=20) res_2 = pipe.generate(["Why Sky is blue because"], max_new_tokens=20) perf_metrics = res_1.perf_metrics + res_2.perf_metrics -print(f'generate_duration: {perf_metrics.mean_generate_duration:.2f}') -print(f'ttft: {perf_metrics.mean_ttft:.2f}') -print(f'tpot: {perf_metrics.mean_tpot:.2f}') +print(f'Generate duration: {perf_metrics.generate_duration.mean:.2f}') +print(f'TTFT: {perf_metrics.ttft.mean:.2f} ms') +print(f'TPOT: {perf_metrics.tpot.mean:.2f} ms/token') +print(f'Throughput: {perf_metrics.throughput.mean:.2f} tokens/s') ``` ## How It Works diff --git a/src/cpp/src/greedy_decoding.cpp b/src/cpp/src/greedy_decoding.cpp index 8b0cf19c1f..8dc56b4ba8 100644 --- a/src/cpp/src/greedy_decoding.cpp +++ b/src/cpp/src/greedy_decoding.cpp @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 #include "openvino/genai/perf_metrics.hpp" -// #include "perf_counters.hpp" #include "utils.hpp" namespace ov { diff --git a/src/cpp/src/perf_metrics.cpp b/src/cpp/src/perf_metrics.cpp index bc394fae52..92b6315990 100644 --- a/src/cpp/src/perf_metrics.cpp +++ b/src/cpp/src/perf_metrics.cpp @@ -63,8 +63,6 @@ void PerfMetrics::evaluate_statistics(std::optional start_time) { ttft = calc_mean_and_std(raw_metrics.m_times_to_first_token); generate_duration = calc_mean_and_std(raw_metrics.generate_durations); - generate_duration = calc_mean_and_std(raw_metrics.generate_durations); - tokenization_duration = calc_mean_and_std(raw_metrics.tokenization_durations); detokenization_duration = calc_mean_and_std(raw_metrics.detokenization_durations); From 60e71881766334a2dfd05e4b17b22e7de740d2d1 Mon Sep 17 00:00:00 2001 From: Pavel Esir Date: Fri, 26 Jul 2024 14:56:13 +0200 Subject: [PATCH 10/11] uset getter and cache evaluate results --- .../cpp/benchmark_genai/benchmark_genai.cpp | 16 +++--- .../python/benchmark_genai/benchmark_genai.py | 14 ++--- src/README.md | 32 ++++++------ .../include/openvino/genai/perf_metrics.hpp | 16 ++++++ src/cpp/src/perf_metrics.cpp | 52 ++++++++++++++++++- src/python/py_generate_pipeline.cpp | 14 ++--- 6 files changed, 104 insertions(+), 40 deletions(-) diff --git a/samples/cpp/benchmark_genai/benchmark_genai.cpp b/samples/cpp/benchmark_genai/benchmark_genai.cpp index 2fd5eafc69..287d6b379a 100644 --- a/samples/cpp/benchmark_genai/benchmark_genai.cpp +++ b/samples/cpp/benchmark_genai/benchmark_genai.cpp @@ -50,15 +50,15 @@ int main(int argc, char* argv[]) try { res = pipe.generate(prompt, config); metrics = metrics + res.perf_metrics; } - + std::cout << std::fixed << std::setprecision(2); - std::cout << "Load time: " << metrics.load_time << " ms" << std::endl; - std::cout << "Generate time: " << metrics.generate_duration.mean << " ± " << metrics.generate_duration.std << " ms" << std::endl; - std::cout << "Tokenization time: " << metrics.tokenization_duration.mean << " ± " << metrics.tokenization_duration.std << " ms" << std::endl; - std::cout << "Detokenization time: " << metrics.detokenization_duration.mean << " ± " << metrics.detokenization_duration.std << " ms" << std::endl; - std::cout << "TTFT: " << metrics.ttft.mean << " ± " << metrics.ttft.std << " ms" << std::endl; - std::cout << "TPOT: " << metrics.tpot.mean << " ± " << metrics.tpot.std << " ms/token " << std::endl; - std::cout << "Throughput: " << metrics.throughput.mean << " ± " << metrics.throughput.std << " tokens/s" << std::endl; + std::cout << "Load time: " << metrics.get_load_time() << " ms" << std::endl; + std::cout << "Generate time: " << metrics.get_generate_duration().mean << " ± " << metrics.get_generate_duration().std << " ms" << std::endl; + std::cout << "Tokenization time: " << metrics.get_tokenization_duration().mean << " ± " << metrics.get_tokenization_duration().std << " ms" << std::endl; + std::cout << "Detokenization time: " << metrics.get_detokenization_duration().mean << " ± " << metrics.get_detokenization_duration().std << " ms" << std::endl; + std::cout << "TTFT: " << metrics.get_ttft().mean << " ± " << metrics.get_ttft().std << " ms" << std::endl; + std::cout << "TPOT: " << metrics.get_tpot().mean << " ± " << metrics.get_tpot().std << " ms/token " << std::endl; + std::cout << "Throughput: " << metrics.get_throughput().mean << " ± " << metrics.get_throughput().std << " tokens/s" << std::endl; return 0; } catch (const std::exception& error) { diff --git a/samples/python/benchmark_genai/benchmark_genai.py b/samples/python/benchmark_genai/benchmark_genai.py index ef468053d8..9851483880 100755 --- a/samples/python/benchmark_genai/benchmark_genai.py +++ b/samples/python/benchmark_genai/benchmark_genai.py @@ -37,13 +37,13 @@ def main(): res = pipe.generate(prompt, config) perf_metrics += res.perf_metrics - print(f"Load time: {perf_metrics.load_time:.2f} ms") - print(f"Generate time: {perf_metrics.generate_duration.mean:.2f} ± {perf_metrics.generate_duration.std:.2f} ms") - print(f"Tokenization time: {perf_metrics.tokenization_duration.mean:.2f} ± {perf_metrics.tokenization_duration.std:.2f} ms") - print(f"Detokenization time: {perf_metrics.detokenization_duration.mean:.2f} ± {perf_metrics.detokenization_duration.std:.2f} ms") - print(f"TTFT: {perf_metrics.ttft.mean:.2f} ± {perf_metrics.ttft.std:.2f} ms") - print(f"TPOT: {perf_metrics.tpot.mean:.2f} ± {perf_metrics.tpot.std:.2f} ms") - print(f"Throughput : {perf_metrics.throughput.mean:.2f} ± {perf_metrics.throughput.std:.2f} tokens/s") + print(f"Load time: {perf_metrics.get_load_time():.2f} ms") + print(f"Generate time: {perf_metrics.get_generate_duration().mean:.2f} ± {perf_metrics.get_generate_duration().std:.2f} ms") + print(f"Tokenization time: {perf_metrics.get_tokenization_duration().mean:.2f} ± {perf_metrics.get_tokenization_duration().std:.2f} ms") + print(f"Detokenization time: {perf_metrics.get_detokenization_duration().mean:.2f} ± {perf_metrics.get_detokenization_duration().std:.2f} ms") + print(f"TTFT: {perf_metrics.get_ttft().mean:.2f} ± {perf_metrics.get_ttft().std:.2f} ms") + print(f"TPOT: {perf_metrics.get_tpot().mean:.2f} ± {perf_metrics.get_tpot().std:.2f} ms") + print(f"Throughput : {perf_metrics.get_throughput().mean:.2f} ± {perf_metrics.get_throughput().std:.2f} tokens/s") if __name__ == "__main__": main() diff --git a/src/README.md b/src/README.md index aa4dc0f301..aefa993d8e 100644 --- a/src/README.md +++ b/src/README.md @@ -219,10 +219,10 @@ pipe = ov_genai.LLMPipeline(model_path, "CPU") result = pipe.generate(["The Sun is yellow because"], max_new_tokens=20) perf_metrics = result.perf_metrics -print(f'Generate duration: {perf_metrics.generate_duration.mean:.2f}') -print(f'TTFT: {perf_metrics.ttft.mean:.2f} ms') -print(f'TPOT: {perf_metrics.tpot.mean:.2f} ms/token') -print(f'Throughput: {perf_metrics.throughput.mean:.2f} tokens/s') +print(f'Generate duration: {perf_metrics.get_generate_duration().mean:.2f}') +print(f'TTFT: {perf_metrics.get_ttft().mean:.2f} ms') +print(f'TPOT: {perf_metrics.get_tpot().mean:.2f} ms/token') +print(f'Throughput: {perf_metrics.get_throughput()get_.mean():.2f} tokens/s') ``` ```cpp @@ -236,10 +236,10 @@ int main(int argc, char* argv[]) { auto perf_metrics = result.perf_metrics; std::cout << std::fixed << std::setprecision(2); - std::cout << "Generate duration: " << perf_metrics.generate_duration.mean << " ms" << std::endl; - std::cout << "TTFT: " << metrics.ttft.mean << " ms" << std::endl; - std::cout << "TPOT: " << metrics.tpot.mean << " ms/token " << std::endl; - std::cout << "Throughput: " << metrics.throughput.mean << " tokens/s" << std::endl; + std::cout << "Generate duration: " << perf_metrics.get_generate_duration().mean << " ms" << std::endl; + std::cout << "TTFT: " << metrics.get_ttft().mean << " ms" << std::endl; + std::cout << "TPOT: " << metrics.get_tpot().mean << " ms/token " << std::endl; + std::cout << "Throughput: " << metrics.get_throughput().mean << " tokens/s" << std::endl; } ``` output: @@ -265,10 +265,10 @@ int main(int argc, char* argv[]) { auto perf_metrics = result_1.perf_metrics + result_2.perf_metrics std::cout << std::fixed << std::setprecision(2); - std::cout << "Generate duration: " << perf_metrics.generate_duration.mean << " ms" << std::endl; - std::cout << "TTFT: " << metrics.ttft.mean << " ms" << std::endl; - std::cout << "TPOT: " << metrics.tpot.mean << " ms/token " << std::endl; - std::cout << "Throughput: " << metrics.throughput.mean << " tokens/s" << std::endl; + std::cout << "Generate duration: " << perf_metrics.get_generate_duration().mean << " ms" << std::endl; + std::cout << "TTFT: " << metrics.get_ttft().mean << " ms" << std::endl; + std::cout << "TPOT: " << metrics.get_tpot().mean << " ms/token " << std::endl; + std::cout << "Throughput: " << metrics.get_throughput().mean << " tokens/s" << std::endl; } ``` @@ -279,10 +279,10 @@ res_1 = pipe.generate(["The Sun is yellow because"], max_new_tokens=20) res_2 = pipe.generate(["Why Sky is blue because"], max_new_tokens=20) perf_metrics = res_1.perf_metrics + res_2.perf_metrics -print(f'Generate duration: {perf_metrics.generate_duration.mean:.2f}') -print(f'TTFT: {perf_metrics.ttft.mean:.2f} ms') -print(f'TPOT: {perf_metrics.tpot.mean:.2f} ms/token') -print(f'Throughput: {perf_metrics.throughput.mean:.2f} tokens/s') +print(f'Generate duration: {perf_metrics.get_generate_duration().mean:.2f}') +print(f'TTFT: {perf_metrics.get_ttft().mean:.2f} ms') +print(f'TPOT: {perf_metrics.get_tpot().mean:.2f} ms/token') +print(f'Throughput: {perf_metrics.get_throughput().mean:.2f} tokens/s') ``` ## How It Works diff --git a/src/cpp/include/openvino/genai/perf_metrics.hpp b/src/cpp/include/openvino/genai/perf_metrics.hpp index 8715761792..ddb9ff581f 100644 --- a/src/cpp/include/openvino/genai/perf_metrics.hpp +++ b/src/cpp/include/openvino/genai/perf_metrics.hpp @@ -57,6 +57,22 @@ struct OPENVINO_GENAI_EXPORTS PerfMetrics { size_t num_generated_tokens; size_t num_input_tokens; + + float get_load_time(); // Load time in ms. + float get_num_generated_tokens(); + float get_num_input_tokens(); + MeanStdPair get_ttft(); // Time to the first token (in ms) (TTTFT). + MeanStdPair get_tpot(); // Time (in ms) per output token (TPOT). + MeanStdPair get_throughput(); // Tokens per second. + + MeanStdPair get_generate_duration(); + MeanStdPair get_tokenization_duration(); + MeanStdPair get_detokenization_duration(); + + // Flag indicating if raw metrics were evaluated. + // If false means current mean/std ttft, tpot, etc. are not actual + // and evaluate_statistics() should recalculate them. + bool m_evaluated = false; /** * @brief calculates mean/std values from raw_metrics. diff --git a/src/cpp/src/perf_metrics.cpp b/src/cpp/src/perf_metrics.cpp index 92b6315990..2f378ab302 100644 --- a/src/cpp/src/perf_metrics.cpp +++ b/src/cpp/src/perf_metrics.cpp @@ -32,11 +32,58 @@ ov::genai::MeanStdPair calc_mean_and_std(const std::vector(duration).count(); } - + void PerfMetrics::evaluate_statistics(std::optional start_time) { + if (m_evaluated){ + return; + } // If start_tiem is specified then recalcualte durations according to start times and calculate statistics only after that. if (start_time.has_value()) { auto start_time_val = *start_time; @@ -68,6 +115,7 @@ void PerfMetrics::evaluate_statistics(std::optional start_time) { // tokens per second throughput = {1000.0f / tpot.mean, (tpot.std * 1000.0f) / (tpot.mean * tpot.mean)}; + m_evaluated = true; } PerfMetrics PerfMetrics::operator+(const PerfMetrics& right) const { @@ -103,7 +151,7 @@ PerfMetrics PerfMetrics::operator+(const PerfMetrics& right) const { res.num_generated_tokens = num_generated_tokens + right.num_generated_tokens; res.num_input_tokens = num_generated_tokens + right.num_input_tokens; res.load_time = load_time; - res.evaluate_statistics(); + res.m_evaluated = false; return res; } diff --git a/src/python/py_generate_pipeline.cpp b/src/python/py_generate_pipeline.cpp index ed687d6f40..9bee185ff7 100644 --- a/src/python/py_generate_pipeline.cpp +++ b/src/python/py_generate_pipeline.cpp @@ -606,13 +606,13 @@ PYBIND11_MODULE(py_generate_pipeline, m) { py::class_(m, "PerfMetrics") .def(py::init<>()) - .def_readonly("generate_duration", &PerfMetrics::generate_duration) - .def_readonly("tokenization_duration", &PerfMetrics::tokenization_duration) - .def_readonly("detokenization_duration", &PerfMetrics::detokenization_duration) - .def_readonly("throughput", &PerfMetrics::throughput) - .def_readonly("tpot", &PerfMetrics::tpot) - .def_readonly("ttft", &PerfMetrics::ttft) - .def_readonly("load_time", &PerfMetrics::load_time) + .def("get_generate_duration", &PerfMetrics::get_generate_duration) + .def("get_tokenization_duration", &PerfMetrics::get_tokenization_duration) + .def("get_detokenization_duration", &PerfMetrics::get_detokenization_duration) + .def("get_throughput", &PerfMetrics::get_throughput) + .def("get_tpot", &PerfMetrics::get_tpot) + .def("get_ttft", &PerfMetrics::get_ttft) + .def("get_load_time", &PerfMetrics::get_load_time) .def("__add__", &PerfMetrics::operator+) .def("__iadd__", &PerfMetrics::operator+=) .def_readonly("raw_metrics", &PerfMetrics::raw_metrics); From e553ef5dd78ea6bb11cc32bdfb6fb397cba55a24 Mon Sep 17 00:00:00 2001 From: Pavel Esir Date: Fri, 26 Jul 2024 15:11:41 +0200 Subject: [PATCH 11/11] update Readme's --- samples/cpp/benchmark_genai/README.md | 4 ++-- samples/python/benchmark_genai/README.md | 4 ++-- src/README.md | 2 ++ 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/samples/cpp/benchmark_genai/README.md b/samples/cpp/benchmark_genai/README.md index bac16c2f7d..616bb6a36d 100644 --- a/samples/cpp/benchmark_genai/README.md +++ b/samples/cpp/benchmark_genai/README.md @@ -1,6 +1,6 @@ -# Benchmarking Vanilla GenAI +# LLMs benchmarking sample -This sample script demonstrates how to benchmark an LLMModel in OpenVINO GenAI. The script includes functionality for warm-up iterations, generating text, and calculating various performance metrics. +This sample script demonstrates how to benchmark an LLMs in OpenVINO GenAI. The script includes functionality for warm-up iterations, generating text, and calculating various performance metrics. ## Download and convert the model and tokenizers diff --git a/samples/python/benchmark_genai/README.md b/samples/python/benchmark_genai/README.md index fa4fa85576..1ff9ef4305 100644 --- a/samples/python/benchmark_genai/README.md +++ b/samples/python/benchmark_genai/README.md @@ -1,6 +1,6 @@ -# Benchmarking Vanilla GenAI +# LLMs benchmarking sample -This sample script demonstrates how to benchmark an LLMModel in OpenVINO GenAI. The script includes functionality for warm-up iterations, generating text, and calculating various performance metrics. +This sample script demonstrates how to benchmark an LLMs in OpenVINO GenAI. The script includes functionality for warm-up iterations, generating text, and calculating various performance metrics. ## Download and convert the model and tokenizers diff --git a/src/README.md b/src/README.md index aefa993d8e..e88c2f784f 100644 --- a/src/README.md +++ b/src/README.md @@ -285,6 +285,8 @@ print(f'TPOT: {perf_metrics.get_tpot().mean:.2f} ms/token') print(f'Throughput: {perf_metrics.get_throughput().mean:.2f} tokens/s') ``` +For more examples of how metrics are used, please refer to the Python [benchmark_genai.py](https://github.com/openvinotoolkit/openvino.genai/tree/releases/2024/3/samples/python/benchmark_genai/README.md) and C++ [benchmark_genai](https://github.com/openvinotoolkit/openvino.genai/tree/releases/2024/3/samples/cpp/benchmark_genai/README.md) samples. + ## How It Works For information on how OpenVINO™ GenAI works, refer to the [How It Works Section](https://github.com/openvinotoolkit/openvino.genai/tree/releases/2024/2/src/docs/HOW_IT_WORKS.md).