From 6b0c247ab70053866b53f067516cb891e0f5575d Mon Sep 17 00:00:00 2001 From: IlyasMoutawwakil Date: Fri, 24 Nov 2023 06:38:33 +0000 Subject: [PATCH] trigger super class' __post_init__ --- optimum_benchmark/backends/config.py | 9 ++++++--- optimum_benchmark/backends/neural_compressor/config.py | 2 ++ optimum_benchmark/backends/onnxruntime/config.py | 2 ++ optimum_benchmark/backends/pytorch/config.py | 2 ++ 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/optimum_benchmark/backends/config.py b/optimum_benchmark/backends/config.py index 68bfab68..d0ac9cee 100644 --- a/optimum_benchmark/backends/config.py +++ b/optimum_benchmark/backends/config.py @@ -1,9 +1,12 @@ from abc import ABC +from logging import getLogger from dataclasses import dataclass from typing import Optional, TypeVar from psutil import cpu_count +LOGGER = getLogger("backend") + @dataclass class BackendConfig(ABC): @@ -18,7 +21,7 @@ class BackendConfig(ABC): # device isolation options continuous_isolation: bool = True - isolation_check_interval: Optional[int] = None + isolation_check_interval: Optional[float] = None # clean up options delete_cache: bool = False @@ -32,8 +35,8 @@ def __post_init__(self): if self.intra_op_num_threads == -1: self.intra_op_num_threads = cpu_count() - if self.isolation_check_interval is None: - self.isolation_check_interval = 1 # 1 second + if self.continuous_isolation and self.isolation_check_interval is None: + self.isolation_check_interval = 1 BackendConfigT = TypeVar("BackendConfigT", bound=BackendConfig) diff --git a/optimum_benchmark/backends/neural_compressor/config.py b/optimum_benchmark/backends/neural_compressor/config.py index c0b9754f..4ae8e953 100644 --- a/optimum_benchmark/backends/neural_compressor/config.py +++ b/optimum_benchmark/backends/neural_compressor/config.py @@ -72,6 +72,8 @@ class INCConfig(BackendConfig): calibration_config: Dict[str, Any] = field(default_factory=dict) def __post_init__(self): + super().__post_init__() + if self.ptq_quantization: self.ptq_quantization_config = OmegaConf.to_object( OmegaConf.merge(PTQ_QUANTIZATION_CONFIG, self.ptq_quantization_config) diff --git a/optimum_benchmark/backends/onnxruntime/config.py b/optimum_benchmark/backends/onnxruntime/config.py index 10c7b79f..4f4b5dd4 100644 --- a/optimum_benchmark/backends/onnxruntime/config.py +++ b/optimum_benchmark/backends/onnxruntime/config.py @@ -130,6 +130,8 @@ class ORTConfig(BackendConfig): peft_config: Dict[str, Any] = field(default_factory=dict) def __post_init__(self): + super().__post_init__() + if not self.no_weights and not self.export and self.torch_dtype is not None: raise NotImplementedError("Can't convert an exported model's weights to a different dtype.") diff --git a/optimum_benchmark/backends/pytorch/config.py b/optimum_benchmark/backends/pytorch/config.py index 071fbc28..497c171a 100644 --- a/optimum_benchmark/backends/pytorch/config.py +++ b/optimum_benchmark/backends/pytorch/config.py @@ -72,6 +72,8 @@ class PyTorchConfig(BackendConfig): peft_config: Dict[str, Any] = field(default_factory=dict) def __post_init__(self): + super().__post_init__() + if self.torch_compile: self.torch_compile_config = OmegaConf.to_object(OmegaConf.merge(COMPILE_CONFIG, self.torch_compile_config))