Skip to content

Commit

Permalink
Merge branch 'master' into onramp-rework-env
Browse files Browse the repository at this point in the history
  • Loading branch information
salverius-tech committed Apr 9, 2024
2 parents c20a91c + 545cf1d commit 1532c50
Show file tree
Hide file tree
Showing 5 changed files with 77 additions and 30 deletions.
15 changes: 15 additions & 0 deletions .templates/Ollamamodels.sample
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
codellama:7b-python
codellama:latest
dolphin-mistral:latest
dolphin-mixtral:latest
dolphincoder:latest
llama2:13b-chat
llama2:chat
llama2:latest
mistral:latest
mixtral:latest
nomic-embed-text:latest
openhermes:latest
sqlcoder:latest
stable-code:latest
tinydolphin:latest
47 changes: 47 additions & 0 deletions make.d/scripts/ollama-update-models.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
#!/bin/bash

file="etc/ollama/Ollamamodels"
parallel=false

# Parse command line arguments
while [[ $# -gt 0 ]]; do
key="$1"
case $key in
--parallel)
parallel=true
shift
;;
*)
shift
;;
esac
done

# Check if the Ollamamodels file exists
if [ ! -f "$file" ]; then
echo "Error: $file not found." >&2
exit 1
fi

# Count the total number of models
total_count=$(wc -l < "$file")

index=1
while read -r line; do
# Skip empty lines and lines starting with or containing only spaces
if [[ -z "$line" || "$line" =~ ^[[:space:]]+$ || "$line" == "#"* ]]; then
continue
fi

echo "Pulling model ($index/$total_count) $line"
if $parallel; then
docker exec ollama ollama pull "$line" &
else
docker exec ollama ollama pull "$line"
fi

index=$((index + 1))
done < "$file"

# Wait for all the docker exec commands to finish
wait
13 changes: 5 additions & 8 deletions services-available/ollama-amd.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,11 @@ volumes:
# https://hub.docker.com/r/ollama/ollama
# https://github.com/ollama/ollama

# docker exec -it ollama ollama pull llama2 # https://ollama.com/library/llama2 [4 GB]
# docker exec -it ollama ollama pull llama2:chat # [4 GB]
# docker exec -it ollama ollama pull llama2:13b-chat # [7.5 GB]
# docker exec -it ollama ollama pull mistral # https://ollama.com/library/mistral [4 GB]
# docker exec -it ollama ollama pull mixtral # https://ollama.com/library/mixtral [26 GB]
# docker exec -it ollama ollama pull codellama # https://ollama.com/library/codellama [4 GB]
# docker exec -it ollama ollama pull openhermes # [4 GB]
# docker exec -it ollama ollama pull dolphin-mistral
# add models to etc/ollama/Ollamamodels and run
# make.d/scripts/ollama-update-models.sh
# to pull or update models
# see .templates/Ollamamodels.sample for examples
# Available models: https://ollama.com/library/

services:
ollama:
Expand Down
13 changes: 5 additions & 8 deletions services-available/ollama-cpu.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,11 @@ volumes:
# https://hub.docker.com/r/ollama/ollama
# https://github.com/ollama/ollama

# docker exec -it ollama ollama pull llama2 # https://ollama.com/library/llama2 [4 GB]
# docker exec -it ollama ollama pull llama2:chat # [4 GB]
# docker exec -it ollama ollama pull llama2:13b-chat # [7.5 GB]
# docker exec -it ollama ollama pull mistral # https://ollama.com/library/mistral [4 GB]
# docker exec -it ollama ollama pull mixtral # https://ollama.com/library/mixtral [26 GB]
# docker exec -it ollama ollama pull codellama # https://ollama.com/library/codellama [4 GB]
# docker exec -it ollama ollama pull openhermes # [4 GB]
# docker exec -it ollama ollama pull dolphin-mistral
# add models to etc/ollama/Ollamamodels and run
# make.d/scripts/ollama-update-models.sh
# to pull or update models
# see .templates/Ollamamodels.sample for examples
# Available models: https://ollama.com/library/

services:
ollama:
Expand Down
19 changes: 5 additions & 14 deletions services-available/ollama-nvidia.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,20 +8,11 @@ networks:
# https://hub.docker.com/r/ollama/ollama
# https://github.com/ollama/ollama

# docker exec -it ollama ollama pull llama2 # https://ollama.com/library/llama2 [4 GB]
# docker exec -it ollama ollama pull llama2:chat # [4 GB]
# docker exec -it ollama ollama pull llama2:13b-chat # [7.5 GB]
# docker exec -it ollama ollama pull mistral # https://ollama.com/library/mistral [4 GB]
# docker exec -it ollama ollama pull mixtral # https://ollama.com/library/mixtral [26 GB]
# docker exec -it ollama ollama pull codellama # https://ollama.com/library/codellama [4 GB]
# docker exec -it ollama ollama pull codellama:7b-python
# docker exec -it ollama ollama pull openhermes # [4 GB]
# docker exec -it ollama ollama pull dolphin-mistral # https://ollama.com/library/dolphin-mistral [4 GB]
# docker exec -it ollama ollama pull dolphin-mixtral # https://ollama.com/library/dolphin-mixtral [26GB]
# docker exec -it ollama ollama pull stable-code
# docker exec -it ollama ollama pull dolphincoder
# docker exec -it ollama ollama pull tinydolphin
# docker exec -it ollama ollama pull sqlcoder
# add models to etc/ollama/Ollamamodels and run
# make.d/scripts/ollama-update-models.sh
# to pull or update models
# see .templates/Ollamamodels.sample for examples
# Available models: https://ollama.com/library/


services:
Expand Down

0 comments on commit 1532c50

Please sign in to comment.