Skip to content

Commit

Permalink
Initial commit of home-orchestrator
Browse files Browse the repository at this point in the history
Project created by Hugo Ferreira

Signed-off-by: Alexandre Peixoto Ferreira <[email protected]>
  • Loading branch information
alexandref75 committed Feb 19, 2025
0 parents commit 0d02026
Show file tree
Hide file tree
Showing 5 changed files with 164 additions and 0 deletions.
21 changes: 21 additions & 0 deletions .github/workflows/smarter-org-docker-buildx.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
name: Docker Image BuildX CI and Publish

# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.

on:
schedule:
- cron: "19 16 * * *"
push:
branches: ["main"]
# Publish semver tags as releases.
tags: ["v*.*.*"]
pull_request:
branches: ["main"]
workflow_dispatch:

jobs:
build:
uses: smarter-project/reusable-workflows/.github/workflows/smarter-org-docker-buildx.yml@main
14 changes: 14 additions & 0 deletions .github/workflows/smarter-org-helm.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
# release.yaml
name: Release Charts

on:
push:
branches:
- main

jobs:
release:
uses: smarter-project/reusable-workflows/.github/workflows/smarter-org-helm.yml@main
secrets:
GPG_KEYRING_BASE64: ${{ secrets.GPG_KEYRING_BASE64 }}
GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }}
19 changes: 19 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
FROM alpine:3.19

# Install required tools
RUN apk add --no-cache \
curl \
jq \
bash

# Set working directory
WORKDIR /app

# Copy the setup script
COPY initial_setup.sh .

# Make the script executable
RUN chmod +x initial_setup.sh

# Set the entrypoint
ENTRYPOINT ["./initial_setup.sh"]
7 changes: 7 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# Part of Smart-home security demo using AI - LLMs

## Motivation

## Usage

Container that configures ML infrastructure
103 changes: 103 additions & 0 deletions initial_setup.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
#!/usr/bin/env bash

# For future readers: So here's the thing... right now, both the host and the guest load immediately,
# which means that we had two instances of the model loaded into memory by using this script.
#
# There is a way to unload the model from the host as seen in the Ollama docs:
# https://github.com/ollama/ollama/blob/main/docs/api.md#unload-a-model-1
#
# unload_model_from_memory() {
# echo "Unloading model from memory..."
# curl -X POST "${OLLAMA_HOST_URL}/api/chat" -d "{
# \"model\": \"$OLLAMA_MODEL\",
# \"messages\": [],
# \"keep_alive\": 0
# }"
# }
#
# But that wouldn't be a robust solution, and it doesn't necessarily avoid OOM, without
# signalling to the guest that the setup is complete, and then have the guest load the model on its own.
#
# So what we do instead is receive two URL's: one for the host, one for the guest.
# The host URL is the one we use to download the model, and the guest URL is the one
# we use to test the model by sending a simple prompt.

# Environment variables with defaults
export OLLAMA_MODEL=${OLLAMA_MODEL:-"llama3.2-vision:latest"}
export OLLAMA_HOST_URL=${OLLAMA_HOST_URL:-"http://localhost:11435"}
export OLLAMA_GUEST_URL=${OLLAMA_GUEST_URL:-"http://localhost:11434"}
export OLLAMA_MAX_RETRIES=${OLLAMA_MAX_RETRIES:-30}
export OLLAMA_RETRY_DELAY=${OLLAMA_RETRY_DELAY:-2}
export OLLAMA_WAIT_FOREVER=${OLLAMA_WAIT_FOREVER:0}

# Helper functions
wait_for_ollama() {
local service_type=$1
local url="OLLAMA_${service_type}_URL"
url="${!url}" # Get either OLLAMA_HOST_URL or OLLAMA_GUEST_URL

echo "Waiting for Ollama $service_type service to be ready..."
for i in $(seq 1 $OLLAMA_MAX_RETRIES); do
if curl -s "${url}/api/tags" > /dev/null; then
echo "Ollama $service_type service is ready!"
return 0
fi

if [ $i -eq $OLLAMA_MAX_RETRIES ]; then
echo "Timeout waiting for Ollama $service_type service"
return 1
fi

echo "Attempt $i/$OLLAMA_MAX_RETRIES: $service_type service not ready yet, waiting ${OLLAMA_RETRY_DELAY}s..."
sleep $OLLAMA_RETRY_DELAY
done
}

pull_model() {
echo "Pulling model $OLLAMA_MODEL..."
if ! curl -f -X POST "${OLLAMA_HOST_URL}/api/pull" -d "{\"name\": \"$OLLAMA_MODEL\"}"; then
echo "Failed to pull model $OLLAMA_MODEL"
return 1
fi
}

test_model() {
echo "Testing model with a simple prompt..."
local RESPONSE
RESPONSE=$(curl -s "${OLLAMA_GUEST_URL}/api/generate" -d "{
\"model\": \"$OLLAMA_MODEL\",
\"prompt\": \"Why is the sky blue? Answer in less than 10 words.\",
\"stream\": false,
\"options\": { \"seed\": 123 }
}")

if [ $? -ne 0 ]; then
echo "Failed to connect to ${OLLAMA_GUEST_URL}"
return 1
fi

if ! echo "$RESPONSE" | jq -e '.response' >/dev/null 2>&1; then
echo "Invalid response format from model. Got:"
echo "$RESPONSE" | jq '.'
return 1
fi

echo "Model test successful. Response:"
echo "$RESPONSE" | jq -c '.response'
}

# Main execution
main() {
wait_for_ollama HOST || exit 1
pull_model || exit 1
wait_for_ollama GUEST || exit 1
test_model || exit 1
echo "Setup complete!"
if [ ${OLLAMA_WAIT_FOREVER} -gt 0 ]; then
while true; do
sleep 300
done
fi
}

main

0 comments on commit 0d02026

Please sign in to comment.