diff --git a/screenpipe-app-tauri/modules/ai-providers/providers/embedded/provider-metadata.ts b/screenpipe-app-tauri/modules/ai-providers/providers/embedded/provider-metadata.ts index b60c1ba99..cefd355bd 100644 --- a/screenpipe-app-tauri/modules/ai-providers/providers/embedded/provider-metadata.ts +++ b/screenpipe-app-tauri/modules/ai-providers/providers/embedded/provider-metadata.ts @@ -30,8 +30,4 @@ export const EmbeddedAiProvider: ProviderMetadata = { port: settings.embeddedLLM.port.toString() } }, -} -// essentially then embedded ischosen -// show form, save settings -// once settings are saved, enable llm model control component -// this component should start, stop and show status and logs \ No newline at end of file +} \ No newline at end of file diff --git a/screenpipe-app-tauri/modules/settings/components/ai-section/embedded-llm/control-center.tsx b/screenpipe-app-tauri/modules/settings/components/ai-section/embedded-llm/control-center.tsx index a7aec0924..599d61468 100644 --- a/screenpipe-app-tauri/modules/settings/components/ai-section/embedded-llm/control-center.tsx +++ b/screenpipe-app-tauri/modules/settings/components/ai-section/embedded-llm/control-center.tsx @@ -125,7 +125,7 @@ export function EmbeddedControlCenter({ control center

- control and find useful information about screenpipe's embedded ai + control and find useful information about screenpipe's embedded ai

diff --git a/screenpipe-app-tauri/modules/settings/components/ai-section/embedded-llm/llm-controller.tsx b/screenpipe-app-tauri/modules/settings/components/ai-section/embedded-llm/llm-controller.tsx index d9c24d498..2a63fac4e 100644 --- a/screenpipe-app-tauri/modules/settings/components/ai-section/embedded-llm/llm-controller.tsx +++ b/screenpipe-app-tauri/modules/settings/components/ai-section/embedded-llm/llm-controller.tsx @@ -30,7 +30,7 @@ export function LLMControler({ description: "downloading and initializing the embedded ai, may take a while (check $HOME/.ollama/models)...", }); - // // TODOO: Separate start server and start model rust commands + try { // const result = await invoke("start_ollama_sidecar", { // settings: { @@ -132,7 +132,9 @@ export function LLMControler({ className="w-[40px] h-[40px]" > {stopIsPending || startIsPending - ? + ?
+ +
: } diff --git a/screenpipe-app-tauri/src-tauri/src/llm_sidecar.rs b/screenpipe-app-tauri/src-tauri/src/llm_sidecar.rs index cb231bde4..39221b181 100644 --- a/screenpipe-app-tauri/src-tauri/src/llm_sidecar.rs +++ b/screenpipe-app-tauri/src-tauri/src/llm_sidecar.rs @@ -59,7 +59,6 @@ impl LLMSidecar { } info!("Spawning Ollama serve command"); - // serve_command.spawn()?; serve_command.spawn()?; info!("Ollama serve command spawned. Please wait a few seconds to check ollama's server health");