From fa81dbefd384fdb1155e2d63e52e839dc6cc8d40 Mon Sep 17 00:00:00 2001 From: Cengguang Zhang Date: Tue, 9 Jul 2024 11:14:17 +0800 Subject: [PATCH] LLM: update multi gpu write csv in all-in-one benchmark. (#11538) --- python/llm/dev/benchmark/all-in-one/run.py | 23 +++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/python/llm/dev/benchmark/all-in-one/run.py b/python/llm/dev/benchmark/all-in-one/run.py index 9544f07f263..cfaa1f973f1 100644 --- a/python/llm/dev/benchmark/all-in-one/run.py +++ b/python/llm/dev/benchmark/all-in-one/run.py @@ -1980,11 +1980,20 @@ def run_pipeline_parallel_gpu(repo_id, df = pd.DataFrame(results, columns=['model', '1st token avg latency (ms)', '2+ avg latency (ms/token)', 'encoder time (ms)', 'input/output tokens', 'batch_size', 'actual input/output tokens', 'num_beams', 'low_bit', 'cpu_embedding', 'model loading time (s)', 'peak mem (GB)', 'streaming', 'use_fp16_torch_dtype']) - df.index += max(line_counter-1, 0) - if api not in ["transformer_int4_gpu", "transformer_int4_fp16_gpu"]: - if line_counter == 0: - df.to_csv(csv_name, mode='a') - else: - df.to_csv(csv_name, mode='a', header=None) - line_counter += len(df.index) + if "pipeline" in api or "deepspeed" in api: + if torch.distributed.get_rank() == 0: + df.index += max(line_counter - 1, 0) + if line_counter == 0: + df.to_csv(csv_name, mode='a') + else: + df.to_csv(csv_name, mode='a', header=None) + line_counter += len(df.index) + else: + df.index += max(line_counter - 1, 0) + if api not in ["transformer_int4_gpu", "transformer_int4_fp16_gpu"]: + if line_counter == 0: + df.to_csv(csv_name, mode='a') + else: + df.to_csv(csv_name, mode='a', header=None) + line_counter += len(df.index) results = []