diff --git a/libs/community/langchain_community/llms/ipex_llm.py b/libs/community/langchain_community/llms/ipex_llm.py index d6173dff1f260..eca0171a4fcff 100644 --- a/libs/community/langchain_community/llms/ipex_llm.py +++ b/libs/community/langchain_community/llms/ipex_llm.py @@ -143,15 +143,15 @@ def _load_model( _tokenizer_id = tokenizer_id or model_id # Set "cpu" as default device - if "device" not in model_kwargs: - model_kwargs["device"] = "cpu" + if "device" not in _model_kwargs: + _model_kwargs["device"] = "cpu" - if model_kwargs["device"] not in ["cpu", "xpu"]: + if _model_kwargs["device"] not in ["cpu", "xpu"]: raise ValueError( "IpexLLMBgeEmbeddings currently only supports device to be " - f"'cpu' or 'xpu', but you have: {model_kwargs['device']}." + f"'cpu' or 'xpu', but you have: {_model_kwargs['device']}." ) - device = model_kwargs.pop("device") + device = _model_kwargs.pop("device") try: tokenizer = AutoTokenizer.from_pretrained(_tokenizer_id, **_model_kwargs)