Skip to content

Commit

Permalink
Hotfix of BCE-Emdedding model (#12490)
Browse files Browse the repository at this point in the history
  • Loading branch information
plusbang authored Dec 3, 2024
1 parent 80f15e4 commit c592844
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 3 deletions.
4 changes: 2 additions & 2 deletions python/llm/src/ipex_llm/transformers/npu_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -766,7 +766,7 @@ def optimize_npu_model(cls, *args, **kwargs):
optimize_llm_pre(model, qtype, mixed_precision,
quantization_group_size=quantization_group_size)
cls.load_convert_fp16(qtype, model.encoder, "cpu", modules_to_not_convert,
quantization_group_size, None, *args, **kwargs)
quantization_group_size)
create_npu_kernels(model.encoder)
model = model.eval()
logger.info(f"Finish to convert model")
Expand All @@ -781,7 +781,7 @@ def optimize_npu_model(cls, *args, **kwargs):

@classmethod
def load_convert_fp16(cls, q_k, optimize_model, device, modules_to_not_convert,
group_size=0, imatrix_data=None, *arg, **kwarg):
group_size=0, imatrix_data=None):
from ipex_llm.transformers.npu_models.xlm_mp import replace_with_FP16Linear
replace_with_FP16Linear(optimize_model, q_k, device=device,
modules_to_not_convert=modules_to_not_convert,
Expand Down
2 changes: 1 addition & 1 deletion python/llm/src/ipex_llm/transformers/npu_models/xlm_mp.py
Original file line number Diff line number Diff line change
Expand Up @@ -711,7 +711,7 @@ def forward(self, x):

@module_optimization
def replace_with_Layernorm(layer, qtype=None, device='NPU',
modules_to_not_convert=[], group_size=0):
modules_to_not_convert=[], group_size=0, **kwargs):
if isinstance(layer, torch.nn.LayerNorm):
return XLMLayerNorm(
weight=layer.weight.to(torch.float16),
Expand Down

0 comments on commit c592844

Please sign in to comment.