diff --git a/lmformatenforcer/jsonschemaparser.py b/lmformatenforcer/jsonschemaparser.py index 0692faf..a466964 100644 --- a/lmformatenforcer/jsonschemaparser.py +++ b/lmformatenforcer/jsonschemaparser.py @@ -132,7 +132,6 @@ def get_allowed_characters(self) -> str: # characters when the object stack is empty (= we are done parsing) allowed_characters = WHITESPACE_CHARACTERS - print("MAXCONSWS: ", self.config.max_consecutive_whitespaces) if self.num_consecutive_whitespaces >= self.config.max_consecutive_whitespaces: # print("Filtering whitespace characters") allowed_characters = "".join(c for c in allowed_characters if c not in WHITESPACE_CHARACTERS) diff --git a/lmformatenforcer/tokenenforcer.py b/lmformatenforcer/tokenenforcer.py index 61d3cff..7c7beb0 100644 --- a/lmformatenforcer/tokenenforcer.py +++ b/lmformatenforcer/tokenenforcer.py @@ -162,7 +162,6 @@ def _apply_new_characters(self, state: 'TokenEnforcer.OutputTensorState', token_ new_characters = new_decoded[len(prev_decoded):] if len(new_characters) == 1 and self.tokenizer_tree.tokens_to_strs.get(token_sequence[-2]) == '�' and self.tokenizer_tree.tokens_to_strs[new_token] == '�': - print("TRIGGERED") decoded_unicode_char = self.decoder(token_sequence[-2:]) new_characters = 'X'*len(decoded_unicode_char)