diff --git a/python/llm/src/ipex_llm/transformers/low_bit_linear.py b/python/llm/src/ipex_llm/transformers/low_bit_linear.py index ed44140d708..3d328812c7d 100644 --- a/python/llm/src/ipex_llm/transformers/low_bit_linear.py +++ b/python/llm/src/ipex_llm/transformers/low_bit_linear.py @@ -405,6 +405,7 @@ def use_batch_forward(x: torch.Tensor, qtype: int, output_len: int): or (device in ["arc", "flex"] and qtype in [SYM_INT8, FP4]) or (device in ["arc", "flex", "mtl"] and qtype in [FP8E4]) or (device in ["lnl"] and qtype in [SYM_INT4] and x.shape[1] % 512 == 0) + or (device in ["bmg"] and qtype in [SYM_INT4, FP8E5]) ) return False diff --git a/python/llm/src/ipex_llm/transformers/utils.py b/python/llm/src/ipex_llm/transformers/utils.py index 2ec0dcf456f..b2ae0ca374d 100644 --- a/python/llm/src/ipex_llm/transformers/utils.py +++ b/python/llm/src/ipex_llm/transformers/utils.py @@ -174,6 +174,8 @@ def get_xpu_device_type(x): name = torch.xpu.get_device_name(x.device.index) if name.startswith("Intel(R) Arc(TM) A"): return "arc" + elif name.startswith("Intel(R) Graphics [0xe20b]"): + return "bmg" elif name.startswith("Intel(R) Arc(TM)"): if 'V' in name: return "lnl"