From 2c5204bd16eb9fb277053be28089743fd6de386d Mon Sep 17 00:00:00 2001 From: okada Date: Thu, 23 Nov 2023 16:40:22 +0900 Subject: [PATCH] complete only one line --- flatline_lsp.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flatline_lsp.py b/flatline_lsp.py index e08fb6e..1d2de39 100644 --- a/flatline_lsp.py +++ b/flatline_lsp.py @@ -120,8 +120,8 @@ def generate_completion(self, text: str) -> str: tokenized_prompt = self.tokenizer(text).input_ids generated_tokens = self.model.generate(inputs=torch.LongTensor( [tokenized_prompt]), max_new_tokens=self.max_new_tokens, do_sample=False, - #stopping_criteria=[stop_cutoff_completion, self.stop_word])[0] - stopping_criteria=[stop_cutoff_completion])[0] + stopping_criteria=[stop_cutoff_completion, self.stop_word])[0] + # stopping_criteria=[stop_cutoff_completion])[0] generated_text = self.tokenizer.decode(generated_tokens[len(tokenized_prompt):]) return generated_text