From 3e1dd3c9ea69781edc3970403f5103a133cc4c19 Mon Sep 17 00:00:00 2001 From: ivy-lv11 Date: Mon, 3 Jun 2024 16:10:53 +0800 Subject: [PATCH] modify --- libs/community/langchain_community/llms/ipex_llm.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/libs/community/langchain_community/llms/ipex_llm.py b/libs/community/langchain_community/llms/ipex_llm.py index d6173dff1f260..eca0171a4fcff 100644 --- a/libs/community/langchain_community/llms/ipex_llm.py +++ b/libs/community/langchain_community/llms/ipex_llm.py @@ -143,15 +143,15 @@ def _load_model( _tokenizer_id = tokenizer_id or model_id # Set "cpu" as default device - if "device" not in model_kwargs: - model_kwargs["device"] = "cpu" + if "device" not in _model_kwargs: + _model_kwargs["device"] = "cpu" - if model_kwargs["device"] not in ["cpu", "xpu"]: + if _model_kwargs["device"] not in ["cpu", "xpu"]: raise ValueError( "IpexLLMBgeEmbeddings currently only supports device to be " - f"'cpu' or 'xpu', but you have: {model_kwargs['device']}." + f"'cpu' or 'xpu', but you have: {_model_kwargs['device']}." ) - device = model_kwargs.pop("device") + device = _model_kwargs.pop("device") try: tokenizer = AutoTokenizer.from_pretrained(_tokenizer_id, **_model_kwargs)