From 5a5c6b0cbae8242ec7ecfb4ae2897e35eaaaf655 Mon Sep 17 00:00:00 2001 From: Thomas Vitale Date: Wed, 19 Jun 2024 07:08:36 +0200 Subject: [PATCH] Use Mistral as default model with Ollama --- 01-chat-models/chat-models-ollama/README.md | 8 ++++---- .../java/com/thomasvitale/ai/spring/ChatController.java | 2 +- .../chat-models-ollama/src/main/resources/application.yml | 2 +- .../ai/spring/TestChatModelsOllamaApplication.java | 2 +- 02-prompts/prompts-basics-ollama/README.md | 8 ++++---- .../src/main/resources/application.yml | 2 +- .../ai/spring/TestPromptBasicsOllamaApplication.java | 2 +- 02-prompts/prompts-messages-ollama/README.md | 8 ++++---- .../src/main/resources/application.yml | 2 +- .../ai/spring/TestPromptsMessagesOllamaApplication.java | 2 +- 02-prompts/prompts-templates-ollama/README.md | 8 ++++---- .../src/main/resources/application.yml | 2 +- .../ai/spring/TestPromptsTemplatesOllamaApplication.java | 2 +- 03-output-converters/output-converters-ollama/README.md | 8 ++++---- .../src/main/resources/application.yml | 2 +- .../ai/spring/TestOutputParsersOllamaApplication.java | 2 +- 04-embedding-models/embedding-models-ollama/README.md | 8 ++++---- .../com/thomasvitale/ai/spring/EmbeddingController.java | 2 +- .../src/main/resources/application.yml | 2 +- .../ai/spring/TestEmbeddingModelsOllamaApplication.java | 2 +- 05-etl-pipeline/document-readers-json-ollama/README.md | 8 ++++---- .../src/main/resources/application.yml | 4 ++-- .../spring/TestDocumentReadersJsonOllamaApplication.java | 2 +- 05-etl-pipeline/document-readers-pdf-ollama/README.md | 8 ++++---- .../src/main/resources/application.yml | 4 ++-- .../spring/TestDocumentReadersPdfOllamaApplication.java | 2 +- 05-etl-pipeline/document-readers-text-ollama/README.md | 8 ++++---- .../src/main/resources/application.yml | 4 ++-- .../spring/TestDocumentReadersTextOllamaApplication.java | 2 +- .../document-transformers-metadata-ollama/README.md | 8 ++++---- .../src/main/resources/application.yml | 4 ++-- ...TestDocumentTransformersMetadataOllamaApplication.java | 2 +- .../document-transformers-splitters-ollama/README.md | 8 ++++---- .../src/main/resources/application.yml | 4 ++-- ...TestDocumentTransformersMetadataOllamaApplication.java | 2 +- 35 files changed, 73 insertions(+), 73 deletions(-) diff --git a/01-chat-models/chat-models-ollama/README.md b/01-chat-models/chat-models-ollama/README.md index 607dd75..2b7fc90 100644 --- a/01-chat-models/chat-models-ollama/README.md +++ b/01-chat-models/chat-models-ollama/README.md @@ -31,10 +31,10 @@ The application relies on Ollama for providing LLMs. You can either run Ollama l ### Ollama as a native application First, make sure you have [Ollama](https://ollama.ai) installed on your laptop. -Then, use Ollama to run the _llama3_ large language model. That's what we'll use in this example. +Then, use Ollama to run the _mistral_ large language model. That's what we'll use in this example. ```shell -ollama run llama3 +ollama run mistral ``` Finally, run the Spring Boot application. @@ -45,7 +45,7 @@ Finally, run the Spring Boot application. ### Ollama as a dev service with Testcontainers -The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _llama3_ model at startup time. +The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _mistral_ model at startup time. ```shell ./gradlew bootTestRun @@ -53,7 +53,7 @@ The application relies on the native Testcontainers support in Spring Boot to sp ## Calling the application -You can now call the application that will use Ollama and _llama3_ to generate text based on a default prompt. +You can now call the application that will use Ollama and _mistral_ to generate text based on a default prompt. This example uses [httpie](https://httpie.io) to send HTTP requests. ```shell diff --git a/01-chat-models/chat-models-ollama/src/main/java/com/thomasvitale/ai/spring/ChatController.java b/01-chat-models/chat-models-ollama/src/main/java/com/thomasvitale/ai/spring/ChatController.java index fff8555..5c84027 100644 --- a/01-chat-models/chat-models-ollama/src/main/java/com/thomasvitale/ai/spring/ChatController.java +++ b/01-chat-models/chat-models-ollama/src/main/java/com/thomasvitale/ai/spring/ChatController.java @@ -33,7 +33,7 @@ String chatWithGenericOptions(@RequestParam(defaultValue = "What did Gandalf say @GetMapping("/chat/ollama-options") String chatWithOllamaOptions(@RequestParam(defaultValue = "What did Gandalf say to the Balrog?") String message) { return chatModel.call(new Prompt(message, OllamaOptions.create() - .withModel("llama3") + .withModel("mistral") .withRepeatPenalty(1.5f))) .getResult().getOutput().getContent(); } diff --git a/01-chat-models/chat-models-ollama/src/main/resources/application.yml b/01-chat-models/chat-models-ollama/src/main/resources/application.yml index cdc237a..a05c157 100644 --- a/01-chat-models/chat-models-ollama/src/main/resources/application.yml +++ b/01-chat-models/chat-models-ollama/src/main/resources/application.yml @@ -3,5 +3,5 @@ spring: ollama: chat: options: - model: llama3 + model: mistral temperature: 0.7 diff --git a/01-chat-models/chat-models-ollama/src/test/java/com/thomasvitale/ai/spring/TestChatModelsOllamaApplication.java b/01-chat-models/chat-models-ollama/src/test/java/com/thomasvitale/ai/spring/TestChatModelsOllamaApplication.java index 834956b..79b6918 100644 --- a/01-chat-models/chat-models-ollama/src/test/java/com/thomasvitale/ai/spring/TestChatModelsOllamaApplication.java +++ b/01-chat-models/chat-models-ollama/src/test/java/com/thomasvitale/ai/spring/TestChatModelsOllamaApplication.java @@ -15,7 +15,7 @@ public class TestChatModelsOllamaApplication { @RestartScope @ServiceConnection OllamaContainer ollama() { - return new OllamaContainer(DockerImageName.parse("ghcr.io/thomasvitale/ollama-llama3") + return new OllamaContainer(DockerImageName.parse("ghcr.io/thomasvitale/ollama-mistral") .asCompatibleSubstituteFor("ollama/ollama")); } diff --git a/02-prompts/prompts-basics-ollama/README.md b/02-prompts/prompts-basics-ollama/README.md index ea518e9..3648300 100644 --- a/02-prompts/prompts-basics-ollama/README.md +++ b/02-prompts/prompts-basics-ollama/README.md @@ -9,10 +9,10 @@ The application relies on Ollama for providing LLMs. You can either run Ollama l ### Ollama as a native application First, make sure you have [Ollama](https://ollama.ai) installed on your laptop. -Then, use Ollama to run the _llama3_ large language model. +Then, use Ollama to run the _mistral_ large language model. ```shell -ollama run llama3 +ollama run mistral ``` Finally, run the Spring Boot application. @@ -23,7 +23,7 @@ Finally, run the Spring Boot application. ### Ollama as a dev service with Testcontainers -The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _llama3_ model at startup time. +The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _mistral_ model at startup time. ```shell ./gradlew bootTestRun @@ -31,7 +31,7 @@ The application relies on the native Testcontainers support in Spring Boot to sp ## Calling the application -You can now call the application that will use Ollama and _llama3_ to generate an answer to your questions. +You can now call the application that will use Ollama and _mistral_ to generate an answer to your questions. This example uses [httpie](https://httpie.io) to send HTTP requests. ```shell diff --git a/02-prompts/prompts-basics-ollama/src/main/resources/application.yml b/02-prompts/prompts-basics-ollama/src/main/resources/application.yml index cdc237a..a05c157 100644 --- a/02-prompts/prompts-basics-ollama/src/main/resources/application.yml +++ b/02-prompts/prompts-basics-ollama/src/main/resources/application.yml @@ -3,5 +3,5 @@ spring: ollama: chat: options: - model: llama3 + model: mistral temperature: 0.7 diff --git a/02-prompts/prompts-basics-ollama/src/test/java/com/thomasvitale/ai/spring/TestPromptBasicsOllamaApplication.java b/02-prompts/prompts-basics-ollama/src/test/java/com/thomasvitale/ai/spring/TestPromptBasicsOllamaApplication.java index c6b0bc3..ef41e1d 100644 --- a/02-prompts/prompts-basics-ollama/src/test/java/com/thomasvitale/ai/spring/TestPromptBasicsOllamaApplication.java +++ b/02-prompts/prompts-basics-ollama/src/test/java/com/thomasvitale/ai/spring/TestPromptBasicsOllamaApplication.java @@ -15,7 +15,7 @@ public class TestPromptBasicsOllamaApplication { @RestartScope @ServiceConnection OllamaContainer ollama() { - return new OllamaContainer(DockerImageName.parse("ghcr.io/thomasvitale/ollama-llama3") + return new OllamaContainer(DockerImageName.parse("ghcr.io/thomasvitale/ollama-mistral") .asCompatibleSubstituteFor("ollama/ollama")); } diff --git a/02-prompts/prompts-messages-ollama/README.md b/02-prompts/prompts-messages-ollama/README.md index 2e0be31..3728ed4 100644 --- a/02-prompts/prompts-messages-ollama/README.md +++ b/02-prompts/prompts-messages-ollama/README.md @@ -9,10 +9,10 @@ The application relies on Ollama for providing LLMs. You can either run Ollama l ### Ollama as a native application First, make sure you have [Ollama](https://ollama.ai) installed on your laptop. -Then, use Ollama to run the _llama3_ large language model. +Then, use Ollama to run the _mistral_ large language model. ```shell -ollama run llama3 +ollama run mistral ``` Finally, run the Spring Boot application. @@ -23,7 +23,7 @@ Finally, run the Spring Boot application. ### Ollama as a dev service with Testcontainers -The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _llama3_ model at startup time. +The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _mistral_ model at startup time. ```shell ./gradlew bootTestRun @@ -31,7 +31,7 @@ The application relies on the native Testcontainers support in Spring Boot to sp ## Calling the application -You can now call the application that will use Ollama and _llama3_ to generate an answer to your questions. +You can now call the application that will use Ollama and _mistral_ to generate an answer to your questions. This example uses [httpie](https://httpie.io) to send HTTP requests. ```shell diff --git a/02-prompts/prompts-messages-ollama/src/main/resources/application.yml b/02-prompts/prompts-messages-ollama/src/main/resources/application.yml index cdc237a..a05c157 100644 --- a/02-prompts/prompts-messages-ollama/src/main/resources/application.yml +++ b/02-prompts/prompts-messages-ollama/src/main/resources/application.yml @@ -3,5 +3,5 @@ spring: ollama: chat: options: - model: llama3 + model: mistral temperature: 0.7 diff --git a/02-prompts/prompts-messages-ollama/src/test/java/com/thomasvitale/ai/spring/TestPromptsMessagesOllamaApplication.java b/02-prompts/prompts-messages-ollama/src/test/java/com/thomasvitale/ai/spring/TestPromptsMessagesOllamaApplication.java index 0f66ba9..9f1533f 100644 --- a/02-prompts/prompts-messages-ollama/src/test/java/com/thomasvitale/ai/spring/TestPromptsMessagesOllamaApplication.java +++ b/02-prompts/prompts-messages-ollama/src/test/java/com/thomasvitale/ai/spring/TestPromptsMessagesOllamaApplication.java @@ -15,7 +15,7 @@ public class TestPromptsMessagesOllamaApplication { @RestartScope @ServiceConnection OllamaContainer ollama() { - return new OllamaContainer(DockerImageName.parse("ghcr.io/thomasvitale/ollama-llama3") + return new OllamaContainer(DockerImageName.parse("ghcr.io/thomasvitale/ollama-mistral") .asCompatibleSubstituteFor("ollama/ollama")); } diff --git a/02-prompts/prompts-templates-ollama/README.md b/02-prompts/prompts-templates-ollama/README.md index bbc12c2..cacb9ee 100644 --- a/02-prompts/prompts-templates-ollama/README.md +++ b/02-prompts/prompts-templates-ollama/README.md @@ -9,10 +9,10 @@ The application relies on Ollama for providing LLMs. You can either run Ollama l ### Ollama as a native application First, make sure you have [Ollama](https://ollama.ai) installed on your laptop. -Then, use Ollama to run the _llama3_ large language model. +Then, use Ollama to run the _mistral_ large language model. ```shell -ollama run llama3 +ollama run mistral ``` Finally, run the Spring Boot application. @@ -23,7 +23,7 @@ Finally, run the Spring Boot application. ### Ollama as a dev service with Testcontainers -The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _llama3_ model at startup time. +The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _mistral_ model at startup time. ```shell ./gradlew bootTestRun @@ -31,7 +31,7 @@ The application relies on the native Testcontainers support in Spring Boot to sp ## Calling the application -You can now call the application that will use Ollama and _llama3_ to generate an answer to your questions. +You can now call the application that will use Ollama and _mistral_ to generate an answer to your questions. This example uses [httpie](https://httpie.io) to send HTTP requests. ```shell diff --git a/02-prompts/prompts-templates-ollama/src/main/resources/application.yml b/02-prompts/prompts-templates-ollama/src/main/resources/application.yml index cdc237a..a05c157 100644 --- a/02-prompts/prompts-templates-ollama/src/main/resources/application.yml +++ b/02-prompts/prompts-templates-ollama/src/main/resources/application.yml @@ -3,5 +3,5 @@ spring: ollama: chat: options: - model: llama3 + model: mistral temperature: 0.7 diff --git a/02-prompts/prompts-templates-ollama/src/test/java/com/thomasvitale/ai/spring/TestPromptsTemplatesOllamaApplication.java b/02-prompts/prompts-templates-ollama/src/test/java/com/thomasvitale/ai/spring/TestPromptsTemplatesOllamaApplication.java index c0f2db8..1554a21 100644 --- a/02-prompts/prompts-templates-ollama/src/test/java/com/thomasvitale/ai/spring/TestPromptsTemplatesOllamaApplication.java +++ b/02-prompts/prompts-templates-ollama/src/test/java/com/thomasvitale/ai/spring/TestPromptsTemplatesOllamaApplication.java @@ -15,7 +15,7 @@ public class TestPromptsTemplatesOllamaApplication { @RestartScope @ServiceConnection OllamaContainer ollama() { - return new OllamaContainer(DockerImageName.parse("ghcr.io/thomasvitale/ollama-llama3") + return new OllamaContainer(DockerImageName.parse("ghcr.io/thomasvitale/ollama-mistral") .asCompatibleSubstituteFor("ollama/ollama")); } diff --git a/03-output-converters/output-converters-ollama/README.md b/03-output-converters/output-converters-ollama/README.md index 0aa71ea..a29482b 100644 --- a/03-output-converters/output-converters-ollama/README.md +++ b/03-output-converters/output-converters-ollama/README.md @@ -9,10 +9,10 @@ The application relies on Ollama for providing LLMs. You can either run Ollama l ### Ollama as a native application First, make sure you have [Ollama](https://ollama.ai) installed on your laptop. -Then, use Ollama to run the _llama3_ large language model. +Then, use Ollama to run the _mistral_ large language model. ```shell -ollama run llama3 +ollama run mistral ``` Finally, run the Spring Boot application. @@ -23,7 +23,7 @@ Finally, run the Spring Boot application. ### Ollama as a dev service with Testcontainers -The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _llama3_ model at startup time. +The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _mistral_ model at startup time. ```shell ./gradlew bootTestRun @@ -31,7 +31,7 @@ The application relies on the native Testcontainers support in Spring Boot to sp ## Calling the application -You can now call the application that will use Ollama and _llama3_ to generate an answer to your questions. +You can now call the application that will use Ollama and _mistral_ to generate an answer to your questions. This example uses [httpie](https://httpie.io) to send HTTP requests. ```shell diff --git a/03-output-converters/output-converters-ollama/src/main/resources/application.yml b/03-output-converters/output-converters-ollama/src/main/resources/application.yml index cdc237a..a05c157 100644 --- a/03-output-converters/output-converters-ollama/src/main/resources/application.yml +++ b/03-output-converters/output-converters-ollama/src/main/resources/application.yml @@ -3,5 +3,5 @@ spring: ollama: chat: options: - model: llama3 + model: mistral temperature: 0.7 diff --git a/03-output-converters/output-converters-ollama/src/test/java/com/thomasvitale/ai/spring/TestOutputParsersOllamaApplication.java b/03-output-converters/output-converters-ollama/src/test/java/com/thomasvitale/ai/spring/TestOutputParsersOllamaApplication.java index 06f5a12..070dd35 100644 --- a/03-output-converters/output-converters-ollama/src/test/java/com/thomasvitale/ai/spring/TestOutputParsersOllamaApplication.java +++ b/03-output-converters/output-converters-ollama/src/test/java/com/thomasvitale/ai/spring/TestOutputParsersOllamaApplication.java @@ -15,7 +15,7 @@ public class TestOutputParsersOllamaApplication { @RestartScope @ServiceConnection OllamaContainer ollama() { - return new OllamaContainer(DockerImageName.parse("ghcr.io/thomasvitale/ollama-llama3") + return new OllamaContainer(DockerImageName.parse("ghcr.io/thomasvitale/ollama-mistral") .asCompatibleSubstituteFor("ollama/ollama")); } diff --git a/04-embedding-models/embedding-models-ollama/README.md b/04-embedding-models/embedding-models-ollama/README.md index 36e416f..3bfb39c 100644 --- a/04-embedding-models/embedding-models-ollama/README.md +++ b/04-embedding-models/embedding-models-ollama/README.md @@ -32,10 +32,10 @@ The application relies on Ollama for providing LLMs. You can either run Ollama l ### Ollama as a native application First, make sure you have [Ollama](https://ollama.ai) installed on your laptop. -Then, use Ollama to run the _llama3_ large language model. +Then, use Ollama to run the _mistral_ large language model. ```shell -ollama run llama3 +ollama run mistral ``` Finally, run the Spring Boot application. @@ -46,7 +46,7 @@ Finally, run the Spring Boot application. ### Ollama as a dev service with Testcontainers -The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _llama3_ model at startup time. +The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _mistral_ model at startup time. ```shell ./gradlew bootTestRun @@ -54,7 +54,7 @@ The application relies on the native Testcontainers support in Spring Boot to sp ## Calling the application -You can now call the application that will use Ollama and llama3 to generate a vector representation (embeddings) of a default text. +You can now call the application that will use Ollama and mistral to generate a vector representation (embeddings) of a default text. This example uses [httpie](https://httpie.io) to send HTTP requests. ```shell diff --git a/04-embedding-models/embedding-models-ollama/src/main/java/com/thomasvitale/ai/spring/EmbeddingController.java b/04-embedding-models/embedding-models-ollama/src/main/java/com/thomasvitale/ai/spring/EmbeddingController.java index 516c37d..ab3f0f8 100644 --- a/04-embedding-models/embedding-models-ollama/src/main/java/com/thomasvitale/ai/spring/EmbeddingController.java +++ b/04-embedding-models/embedding-models-ollama/src/main/java/com/thomasvitale/ai/spring/EmbeddingController.java @@ -27,7 +27,7 @@ String embed(@RequestParam(defaultValue = "And Gandalf yelled: 'You shall not pa @GetMapping("/embed/ollama-options") String embedWithOllamaOptions(@RequestParam(defaultValue = "And Gandalf yelled: 'You shall not pass!'") String message) { var embeddings = embeddingModel.call(new EmbeddingRequest(List.of(message), OllamaOptions.create() - .withModel("llama3"))) + .withModel("mistral"))) .getResult().getOutput(); return "Size of the embedding vector: " + embeddings.size(); } diff --git a/04-embedding-models/embedding-models-ollama/src/main/resources/application.yml b/04-embedding-models/embedding-models-ollama/src/main/resources/application.yml index dd37a89..143de4c 100644 --- a/04-embedding-models/embedding-models-ollama/src/main/resources/application.yml +++ b/04-embedding-models/embedding-models-ollama/src/main/resources/application.yml @@ -3,4 +3,4 @@ spring: ollama: embedding: options: - model: llama3 + model: mistral diff --git a/04-embedding-models/embedding-models-ollama/src/test/java/com/thomasvitale/ai/spring/TestEmbeddingModelsOllamaApplication.java b/04-embedding-models/embedding-models-ollama/src/test/java/com/thomasvitale/ai/spring/TestEmbeddingModelsOllamaApplication.java index 29abaa7..6f9b78c 100644 --- a/04-embedding-models/embedding-models-ollama/src/test/java/com/thomasvitale/ai/spring/TestEmbeddingModelsOllamaApplication.java +++ b/04-embedding-models/embedding-models-ollama/src/test/java/com/thomasvitale/ai/spring/TestEmbeddingModelsOllamaApplication.java @@ -15,7 +15,7 @@ public class TestEmbeddingModelsOllamaApplication { @RestartScope @ServiceConnection OllamaContainer ollama() { - return new OllamaContainer(DockerImageName.parse("ghcr.io/thomasvitale/ollama-llama3") + return new OllamaContainer(DockerImageName.parse("ghcr.io/thomasvitale/ollama-mistral") .asCompatibleSubstituteFor("ollama/ollama")); } diff --git a/05-etl-pipeline/document-readers-json-ollama/README.md b/05-etl-pipeline/document-readers-json-ollama/README.md index a3ba479..333dd63 100644 --- a/05-etl-pipeline/document-readers-json-ollama/README.md +++ b/05-etl-pipeline/document-readers-json-ollama/README.md @@ -9,10 +9,10 @@ The application relies on Ollama for providing LLMs. You can either run Ollama l ### Ollama as a native application First, make sure you have [Ollama](https://ollama.ai) installed on your laptop. -Then, use Ollama to run the _llama3_ large language model. +Then, use Ollama to run the _mistral_ large language model. ```shell -ollama run llama3 +ollama run mistral ``` Finally, run the Spring Boot application. @@ -23,7 +23,7 @@ Finally, run the Spring Boot application. ### Ollama as a dev service with Testcontainers -The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _llama3_ model at startup time. +The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _mistral_ model at startup time. ```shell ./gradlew bootTestRun @@ -31,7 +31,7 @@ The application relies on the native Testcontainers support in Spring Boot to sp ## Calling the application -You can now call the application that will use Ollama and _llama3_ to load JSON documents as embeddings and generate an answer to your questions based on those documents (RAG pattern). +You can now call the application that will use Ollama and _mistral_ to load JSON documents as embeddings and generate an answer to your questions based on those documents (RAG pattern). This example uses [httpie](https://httpie.io) to send HTTP requests. ```shell diff --git a/05-etl-pipeline/document-readers-json-ollama/src/main/resources/application.yml b/05-etl-pipeline/document-readers-json-ollama/src/main/resources/application.yml index 13db25e..5090f64 100644 --- a/05-etl-pipeline/document-readers-json-ollama/src/main/resources/application.yml +++ b/05-etl-pipeline/document-readers-json-ollama/src/main/resources/application.yml @@ -3,7 +3,7 @@ spring: ollama: chat: options: - model: llama3 + model: mistral embedding: options: - model: llama3 + model: mistral diff --git a/05-etl-pipeline/document-readers-json-ollama/src/test/java/com/thomasvitale/ai/spring/TestDocumentReadersJsonOllamaApplication.java b/05-etl-pipeline/document-readers-json-ollama/src/test/java/com/thomasvitale/ai/spring/TestDocumentReadersJsonOllamaApplication.java index 2f33d91..8acb60f 100644 --- a/05-etl-pipeline/document-readers-json-ollama/src/test/java/com/thomasvitale/ai/spring/TestDocumentReadersJsonOllamaApplication.java +++ b/05-etl-pipeline/document-readers-json-ollama/src/test/java/com/thomasvitale/ai/spring/TestDocumentReadersJsonOllamaApplication.java @@ -15,7 +15,7 @@ public class TestDocumentReadersJsonOllamaApplication { @RestartScope @ServiceConnection OllamaContainer ollama() { - return new OllamaContainer(DockerImageName.parse("ghcr.io/thomasvitale/ollama-llama3") + return new OllamaContainer(DockerImageName.parse("ghcr.io/thomasvitale/ollama-mistral") .asCompatibleSubstituteFor("ollama/ollama")); } diff --git a/05-etl-pipeline/document-readers-pdf-ollama/README.md b/05-etl-pipeline/document-readers-pdf-ollama/README.md index 41e8d32..d7e5daa 100644 --- a/05-etl-pipeline/document-readers-pdf-ollama/README.md +++ b/05-etl-pipeline/document-readers-pdf-ollama/README.md @@ -9,10 +9,10 @@ The application relies on Ollama for providing LLMs. You can either run Ollama l ### Ollama as a native application First, make sure you have [Ollama](https://ollama.ai) installed on your laptop. -Then, use Ollama to run the _llama3_ large language model. +Then, use Ollama to run the _mistral_ large language model. ```shell -ollama run llama3 +ollama run mistral ``` Finally, run the Spring Boot application. @@ -23,7 +23,7 @@ Finally, run the Spring Boot application. ### Ollama as a dev service with Testcontainers -The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _llama3_ model at startup time. +The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _mistral_ model at startup time. ```shell ./gradlew bootTestRun @@ -31,7 +31,7 @@ The application relies on the native Testcontainers support in Spring Boot to sp ## Calling the application -You can now call the application that will use Ollama and _llama3_ to load PDF documents as embeddings and generate an answer to your questions based on those documents (RAG pattern). +You can now call the application that will use Ollama and _mistral_ to load PDF documents as embeddings and generate an answer to your questions based on those documents (RAG pattern). This example uses [httpie](https://httpie.io) to send HTTP requests. ```shell diff --git a/05-etl-pipeline/document-readers-pdf-ollama/src/main/resources/application.yml b/05-etl-pipeline/document-readers-pdf-ollama/src/main/resources/application.yml index 13db25e..5090f64 100644 --- a/05-etl-pipeline/document-readers-pdf-ollama/src/main/resources/application.yml +++ b/05-etl-pipeline/document-readers-pdf-ollama/src/main/resources/application.yml @@ -3,7 +3,7 @@ spring: ollama: chat: options: - model: llama3 + model: mistral embedding: options: - model: llama3 + model: mistral diff --git a/05-etl-pipeline/document-readers-pdf-ollama/src/test/java/com/thomasvitale/ai/spring/TestDocumentReadersPdfOllamaApplication.java b/05-etl-pipeline/document-readers-pdf-ollama/src/test/java/com/thomasvitale/ai/spring/TestDocumentReadersPdfOllamaApplication.java index 1ed9b33..f36c054 100644 --- a/05-etl-pipeline/document-readers-pdf-ollama/src/test/java/com/thomasvitale/ai/spring/TestDocumentReadersPdfOllamaApplication.java +++ b/05-etl-pipeline/document-readers-pdf-ollama/src/test/java/com/thomasvitale/ai/spring/TestDocumentReadersPdfOllamaApplication.java @@ -15,7 +15,7 @@ public class TestDocumentReadersPdfOllamaApplication { @RestartScope @ServiceConnection OllamaContainer ollama() { - return new OllamaContainer(DockerImageName.parse("ghcr.io/thomasvitale/ollama-llama3") + return new OllamaContainer(DockerImageName.parse("ghcr.io/thomasvitale/ollama-mistral") .asCompatibleSubstituteFor("ollama/ollama")); } diff --git a/05-etl-pipeline/document-readers-text-ollama/README.md b/05-etl-pipeline/document-readers-text-ollama/README.md index 44acef8..8bda187 100644 --- a/05-etl-pipeline/document-readers-text-ollama/README.md +++ b/05-etl-pipeline/document-readers-text-ollama/README.md @@ -9,10 +9,10 @@ The application relies on Ollama for providing LLMs. You can either run Ollama l ### Ollama as a native application First, make sure you have [Ollama](https://ollama.ai) installed on your laptop. -Then, use Ollama to run the _llama3_ large language model. +Then, use Ollama to run the _mistral_ large language model. ```shell -ollama run llama3 +ollama run mistral ``` Finally, run the Spring Boot application. @@ -23,7 +23,7 @@ Finally, run the Spring Boot application. ### Ollama as a dev service with Testcontainers -The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _llama3_ model at startup time. +The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _mistral_ model at startup time. ```shell ./gradlew bootTestRun @@ -31,7 +31,7 @@ The application relies on the native Testcontainers support in Spring Boot to sp ## Calling the application -You can now call the application that will use Ollama and _llama3_ to load text documents as embeddings and generate an answer to your questions based on those documents (RAG pattern). +You can now call the application that will use Ollama and _mistral_ to load text documents as embeddings and generate an answer to your questions based on those documents (RAG pattern). This example uses [httpie](https://httpie.io) to send HTTP requests. ```shell diff --git a/05-etl-pipeline/document-readers-text-ollama/src/main/resources/application.yml b/05-etl-pipeline/document-readers-text-ollama/src/main/resources/application.yml index 41d4def..8758e3d 100644 --- a/05-etl-pipeline/document-readers-text-ollama/src/main/resources/application.yml +++ b/05-etl-pipeline/document-readers-text-ollama/src/main/resources/application.yml @@ -3,7 +3,7 @@ spring: ollama: chat: options: - model: llama3 + model: mistral embedding: options: - model: llama3 \ No newline at end of file + model: mistral \ No newline at end of file diff --git a/05-etl-pipeline/document-readers-text-ollama/src/test/java/com/thomasvitale/ai/spring/TestDocumentReadersTextOllamaApplication.java b/05-etl-pipeline/document-readers-text-ollama/src/test/java/com/thomasvitale/ai/spring/TestDocumentReadersTextOllamaApplication.java index 65fe442..b4415b5 100644 --- a/05-etl-pipeline/document-readers-text-ollama/src/test/java/com/thomasvitale/ai/spring/TestDocumentReadersTextOllamaApplication.java +++ b/05-etl-pipeline/document-readers-text-ollama/src/test/java/com/thomasvitale/ai/spring/TestDocumentReadersTextOllamaApplication.java @@ -15,7 +15,7 @@ public class TestDocumentReadersTextOllamaApplication { @RestartScope @ServiceConnection OllamaContainer ollama() { - return new OllamaContainer(DockerImageName.parse("ghcr.io/thomasvitale/ollama-llama3") + return new OllamaContainer(DockerImageName.parse("ghcr.io/thomasvitale/ollama-mistral") .asCompatibleSubstituteFor("ollama/ollama")); } diff --git a/05-etl-pipeline/document-transformers-metadata-ollama/README.md b/05-etl-pipeline/document-transformers-metadata-ollama/README.md index 0c1e828..97f58eb 100644 --- a/05-etl-pipeline/document-transformers-metadata-ollama/README.md +++ b/05-etl-pipeline/document-transformers-metadata-ollama/README.md @@ -9,10 +9,10 @@ The application relies on Ollama for providing LLMs. You can either run Ollama l ### Ollama as a native application First, make sure you have [Ollama](https://ollama.ai) installed on your laptop. -Then, use Ollama to run the _llama3_ large language model. +Then, use Ollama to run the _mistral_ large language model. ```shell -ollama run llama3 +ollama run mistral ``` Finally, run the Spring Boot application. @@ -23,7 +23,7 @@ Finally, run the Spring Boot application. ### Ollama as a dev service with Testcontainers -The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _llama3_ model at startup time. +The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _mistral_ model at startup time. ```shell ./gradlew bootTestRun @@ -31,7 +31,7 @@ The application relies on the native Testcontainers support in Spring Boot to sp ## Calling the application -You can now call the application that will use Ollama and _llama3_ to load text documents as embeddings and generate an answer to your questions based on those documents (RAG pattern). +You can now call the application that will use Ollama and _mistral_ to load text documents as embeddings and generate an answer to your questions based on those documents (RAG pattern). This example uses [httpie](https://httpie.io) to send HTTP requests. ```shell diff --git a/05-etl-pipeline/document-transformers-metadata-ollama/src/main/resources/application.yml b/05-etl-pipeline/document-transformers-metadata-ollama/src/main/resources/application.yml index 13db25e..5090f64 100644 --- a/05-etl-pipeline/document-transformers-metadata-ollama/src/main/resources/application.yml +++ b/05-etl-pipeline/document-transformers-metadata-ollama/src/main/resources/application.yml @@ -3,7 +3,7 @@ spring: ollama: chat: options: - model: llama3 + model: mistral embedding: options: - model: llama3 + model: mistral diff --git a/05-etl-pipeline/document-transformers-metadata-ollama/src/test/java/com/thomasvitale/ai/spring/TestDocumentTransformersMetadataOllamaApplication.java b/05-etl-pipeline/document-transformers-metadata-ollama/src/test/java/com/thomasvitale/ai/spring/TestDocumentTransformersMetadataOllamaApplication.java index ce988f8..a37c3d3 100644 --- a/05-etl-pipeline/document-transformers-metadata-ollama/src/test/java/com/thomasvitale/ai/spring/TestDocumentTransformersMetadataOllamaApplication.java +++ b/05-etl-pipeline/document-transformers-metadata-ollama/src/test/java/com/thomasvitale/ai/spring/TestDocumentTransformersMetadataOllamaApplication.java @@ -15,7 +15,7 @@ public class TestDocumentTransformersMetadataOllamaApplication { @RestartScope @ServiceConnection OllamaContainer ollama() { - return new OllamaContainer(DockerImageName.parse("ghcr.io/thomasvitale/ollama-llama3") + return new OllamaContainer(DockerImageName.parse("ghcr.io/thomasvitale/ollama-mistral") .asCompatibleSubstituteFor("ollama/ollama")); } diff --git a/05-etl-pipeline/document-transformers-splitters-ollama/README.md b/05-etl-pipeline/document-transformers-splitters-ollama/README.md index ad7650a..9cbcc06 100644 --- a/05-etl-pipeline/document-transformers-splitters-ollama/README.md +++ b/05-etl-pipeline/document-transformers-splitters-ollama/README.md @@ -9,10 +9,10 @@ The application relies on Ollama for providing LLMs. You can either run Ollama l ### Ollama as a native application First, make sure you have [Ollama](https://ollama.ai) installed on your laptop. -Then, use Ollama to run the _llama3_ large language model. +Then, use Ollama to run the _mistral_ large language model. ```shell -ollama run llama3 +ollama run mistral ``` Finally, run the Spring Boot application. @@ -23,7 +23,7 @@ Finally, run the Spring Boot application. ### Ollama as a dev service with Testcontainers -The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _llama3_ model at startup time. +The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _mistral_ model at startup time. ```shell ./gradlew bootTestRun @@ -31,7 +31,7 @@ The application relies on the native Testcontainers support in Spring Boot to sp ## Calling the application -You can now call the application that will use Ollama and _llama3_ to load text documents as embeddings and generate an answer to your questions based on those documents (RAG pattern). +You can now call the application that will use Ollama and _mistral_ to load text documents as embeddings and generate an answer to your questions based on those documents (RAG pattern). This example uses [httpie](https://httpie.io) to send HTTP requests. ```shell diff --git a/05-etl-pipeline/document-transformers-splitters-ollama/src/main/resources/application.yml b/05-etl-pipeline/document-transformers-splitters-ollama/src/main/resources/application.yml index 13db25e..5090f64 100644 --- a/05-etl-pipeline/document-transformers-splitters-ollama/src/main/resources/application.yml +++ b/05-etl-pipeline/document-transformers-splitters-ollama/src/main/resources/application.yml @@ -3,7 +3,7 @@ spring: ollama: chat: options: - model: llama3 + model: mistral embedding: options: - model: llama3 + model: mistral diff --git a/05-etl-pipeline/document-transformers-splitters-ollama/src/test/java/com/thomasvitale/ai/spring/TestDocumentTransformersMetadataOllamaApplication.java b/05-etl-pipeline/document-transformers-splitters-ollama/src/test/java/com/thomasvitale/ai/spring/TestDocumentTransformersMetadataOllamaApplication.java index ce988f8..a37c3d3 100644 --- a/05-etl-pipeline/document-transformers-splitters-ollama/src/test/java/com/thomasvitale/ai/spring/TestDocumentTransformersMetadataOllamaApplication.java +++ b/05-etl-pipeline/document-transformers-splitters-ollama/src/test/java/com/thomasvitale/ai/spring/TestDocumentTransformersMetadataOllamaApplication.java @@ -15,7 +15,7 @@ public class TestDocumentTransformersMetadataOllamaApplication { @RestartScope @ServiceConnection OllamaContainer ollama() { - return new OllamaContainer(DockerImageName.parse("ghcr.io/thomasvitale/ollama-llama3") + return new OllamaContainer(DockerImageName.parse("ghcr.io/thomasvitale/ollama-mistral") .asCompatibleSubstituteFor("ollama/ollama")); }