From 9368d9333a6076f78216debe48abdcc09f091ee5 Mon Sep 17 00:00:00 2001 From: okada Date: Thu, 23 Nov 2023 16:40:05 +0900 Subject: [PATCH] rename --- flatline_lsp.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flatline_lsp.py b/flatline_lsp.py index f9d12b9..05eb86f 100644 --- a/flatline_lsp.py +++ b/flatline_lsp.py @@ -24,7 +24,7 @@ def __init__(self, model_name, vocab_size, config: LlamaCppConfig, n_threads: in self.vocab_size = vocab_size # self.model = AutoModelForCausalLM.from_pretrained("gpt2") - self.plamo_cpp_model = infer.load_model_from_file(model_name, n_threads) + self.llama_cpp_model = infer.load_model_from_file(model_name, n_threads) @property def device(self) -> torch.device: @@ -39,7 +39,7 @@ def forward( # type: ignore input_ids: torch.LongTensor, **kwargs, ) -> CausalLMOutput: - logits = torch.from_numpy(self.plamo_cpp_model.calc_next_token_logits( + logits = torch.from_numpy(self.llama_cpp_model.calc_next_token_logits( input_ids.numpy(), self.vocab_size)) return CausalLMOutput( loss=None,