diff --git a/setup.cfg b/setup.cfg index 2ceb9ea..abcc5bb 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] name = tensor_parallel -version = 1.3.0 +version = 1.3.1 author = Andrei Panferov and Yaroslav Lisnyak author_email = yalisnyak@nes.com description = Automatically shard your large model between multiple GPUs, works without torch.distributed diff --git a/src/tensor_parallel/pretrained_model.py b/src/tensor_parallel/pretrained_model.py index d2e42ee..33c5d01 100644 --- a/src/tensor_parallel/pretrained_model.py +++ b/src/tensor_parallel/pretrained_model.py @@ -24,9 +24,9 @@ def find_predefined_tensor_parallel_config( ) -> Optional[Config]: device_ids = check_device_ids(device_ids) - try: + if model_config.model_type in PREDEFINED_CONFIGS: return PREDEFINED_CONFIGS[model_config.model_type](model_config, device_ids) - except KeyError: + else: logger.warning( "Using automatic config: tensor parallel config not provided and no custom config registered for the model" )