diff --git a/selfie-ui/src/app/components/Settings/Settings.tsx b/selfie-ui/src/app/components/Settings/Settings.tsx index afdbb12..792fbf6 100644 --- a/selfie-ui/src/app/components/Settings/Settings.tsx +++ b/selfie-ui/src/app/components/Settings/Settings.tsx @@ -130,6 +130,12 @@ const Settings = () => { const uiSchema = { 'ui:order': ['gpu', 'ngrok_enabled', 'ngrok_authtoken', 'ngrok_domain', '*', 'verbose_logging'], + // TODO: this doesn't work + // ngrok_enabled: { + // "ui:enableMarkdownInDescription": true, + // "ui:description": "Enables ngrok for exposing your local server to the internet.", + // }, + method: { "ui:widget": "radio", }, @@ -359,8 +365,22 @@ const Settings = () => {

LLM Presets

{taskMessage && } + {settings.method === "llama.cpp" && ( +
+ + + + Heads up, you've selected llama.cpp - please ensure you have enough system RAM to load the configured model! +
+ )} -

Customize your LLM provider using one of the presets below, or manually configure any llama.cpp or LiteLLM-supported model.

+

Customize your LLM provider using one of the presets below, or manually configure any + llama.cpp or LiteLLM-supported + model. Configure ngrok to expose your local server to the internet.

{presets.map(renderPreset)} diff --git a/selfie/gui.py b/selfie/gui.py index b7f4adb..856153a 100644 --- a/selfie/gui.py +++ b/selfie/gui.py @@ -100,6 +100,8 @@ def __init__(self, argv): def show_log_window(self): self.log_widget.show() + self.log_widget.raise_() + self.log_widget.activateWindow() def update_gpu_mode_status(self): # TODO: Fix this hack