diff --git a/dgraph-modus-example/backend/modus.json b/dgraph-modus-example/backend/modus.json index 9403f4a..f3f5ab0 100644 --- a/dgraph-modus-example/backend/modus.json +++ b/dgraph-modus-example/backend/modus.json @@ -9,7 +9,7 @@ }, "models": { "text-generator": { - "sourceModel": "meta-llama/Meta-Llama-3.1-8B-Instruct", + "sourceModel": "meta-llama/Llama-3.2-3B-Instruct", "provider": "hugging-face", "connection": "hypermode" } diff --git a/modus-getting-started/modus.json b/modus-getting-started/modus.json index ef623b8..d343d7c 100644 --- a/modus-getting-started/modus.json +++ b/modus-getting-started/modus.json @@ -15,7 +15,7 @@ }, "models": { "text-generator": { - "sourceModel": "meta-llama/Meta-Llama-3.1-8B-Instruct", + "sourceModel": "meta-llama/Llama-3.2-3B-Instruct", "provider": "hugging-face", "connection": "hypermode" } diff --git a/modus-press/modus.json b/modus-press/modus.json index dd0e6c2..13eaed9 100644 --- a/modus-press/modus.json +++ b/modus-press/modus.json @@ -9,7 +9,7 @@ }, "models": { "llama": { - "sourceModel": "meta-llama/Meta-Llama-3.1-8B-Instruct", + "sourceModel": "meta-llama/Llama-3.2-3B-Instruct", "provider": "hugging-face", "connection": "hypermode" } diff --git a/modus101/modus.json b/modus101/modus.json index 9e0c8bc..6865a18 100644 --- a/modus101/modus.json +++ b/modus101/modus.json @@ -9,7 +9,7 @@ }, "models": { "text-generator": { - "sourceModel": "meta-llama/Meta-Llama-3.1-8B-Instruct", + "sourceModel": "meta-llama/Llama-3.2-3B-Instruct", "provider": "hugging-face", "connection": "hypermode" } diff --git a/modushack-data-models/modus.json b/modushack-data-models/modus.json index e447d9c..c70621b 100644 --- a/modushack-data-models/modus.json +++ b/modushack-data-models/modus.json @@ -18,7 +18,7 @@ }, "models": { "text-generator": { - "sourceModel": "meta-llama/Meta-Llama-3.1-8B-Instruct", + "sourceModel": "meta-llama/Llama-3.2-3B-Instruct", "provider": "hugging-face", "connection": "hypermode" } diff --git a/rag/rag-mdfiles/README.md b/rag/rag-mdfiles/README.md index 36aa646..f3077c9 100644 --- a/rag/rag-mdfiles/README.md +++ b/rag/rag-mdfiles/README.md @@ -36,7 +36,7 @@ Additionally We are using - `sentence-transformers/all-MiniLM-L6-v2` for the embedding -- `meta-llama/Meta-Llama-3.1-8B-Instruct` for text generation All models are shared models hosted by +- `meta-llama/Llama-3.2-3B-Instruct` for text generation All models are shared models hosted by Hypermode. Login to Hypermode to get access to those model before running this project locally. @@ -117,7 +117,7 @@ text-generator ```json "text-generator": { - "sourceModel": "meta-llama/Meta-Llama-3.1-8B-Instruct", + "sourceModel": "meta-llama/Llama-3.2-3B-Instruct", "connection": "hypermode", "provider": "hugging-face" }, diff --git a/rag/rag-mdfiles/api-as/modus.json b/rag/rag-mdfiles/api-as/modus.json index bb606c5..3e38c66 100644 --- a/rag/rag-mdfiles/api-as/modus.json +++ b/rag/rag-mdfiles/api-as/modus.json @@ -14,7 +14,7 @@ "provider": "hugging-face" }, "text-generator": { - "sourceModel": "meta-llama/Meta-Llama-3.1-8B-Instruct", + "sourceModel": "meta-llama/Llama-3.2-3B-Instruct", "connection": "hypermode", "provider": "hugging-face" }