diff --git a/flatline_lsp.py b/flatline_lsp.py index ee93438..acb82da 100644 --- a/flatline_lsp.py +++ b/flatline_lsp.py @@ -265,7 +265,7 @@ def main() -> None: "--backend-server-port", type=int, help="llm inference backend server port number", - default=5000, + default=57045, ) parser.add_argument( "--tokenizer-name", diff --git a/flatline_server.cpp b/flatline_server.cpp index ffa176c..b748606 100644 --- a/flatline_server.cpp +++ b/flatline_server.cpp @@ -148,7 +148,7 @@ std::string make_response_json(std::vector const &next_token_logits) { #include struct app_options { - std::optional port = "5000"; + std::optional port = "57045"; std::optional model_path; std::optional numa = true; std::optional n_gpu_layers = 0;