diff --git a/.sdkmanrc b/.sdkmanrc new file mode 100644 index 0000000..fca95be --- /dev/null +++ b/.sdkmanrc @@ -0,0 +1,6 @@ +# Use sdkman to run "sdk env" to initialize with correct JDK version +# Enable auto-env through the sdkman_auto_env config +# See https://sdkman.io/usage#config +# A summary is to add the following to ~/.sdkman/etc/config +# sdkman_auto_env=true +java=21.0.2-tem diff --git a/01-chat-models/chat-models-ollama/README.md b/01-chat-models/chat-models-ollama/README.md index 9387940..ec1a0dc 100644 --- a/01-chat-models/chat-models-ollama/README.md +++ b/01-chat-models/chat-models-ollama/README.md @@ -29,7 +29,7 @@ class ChatController { The application relies on Ollama for providing LLMs. You can either run Ollama locally on your laptop (macOS or Linux), or rely on the Testcontainers support in Spring Boot to spin up an Ollama service automatically. -### When using Ollama +### Ollama as a native application First, make sure you have [Ollama](https://ollama.ai) installed on your laptop (macOS or Linux). Then, use Ollama to run the _llama2_ large language model. @@ -44,7 +44,7 @@ Finally, run the Spring Boot application. ./gradlew bootRun ``` -### When using Docker/Podman +### Ollama as a dev service with Testcontainers The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _llama2_ model at startup time. diff --git a/01-chat-models/chat-models-ollama/build.gradle b/01-chat-models/chat-models-ollama/build.gradle index a0ede90..124f83c 100644 --- a/01-chat-models/chat-models-ollama/build.gradle +++ b/01-chat-models/chat-models-ollama/build.gradle @@ -8,7 +8,9 @@ group = 'com.thomasvitale' version = '0.0.1-SNAPSHOT' java { - sourceCompatibility = '21' + toolchain { + languageVersion = JavaLanguageVersion.of(21) + } } repositories { diff --git a/01-chat-models/chat-models-openai/build.gradle b/01-chat-models/chat-models-openai/build.gradle index bfb9516..4167e6f 100644 --- a/01-chat-models/chat-models-openai/build.gradle +++ b/01-chat-models/chat-models-openai/build.gradle @@ -8,7 +8,9 @@ group = 'com.thomasvitale' version = '0.0.1-SNAPSHOT' java { - sourceCompatibility = '21' + toolchain { + languageVersion = JavaLanguageVersion.of(21) + } } repositories { diff --git a/02-prompts/prompts-basics-ollama/README.md b/02-prompts/prompts-basics-ollama/README.md index 37546d8..02b20d1 100644 --- a/02-prompts/prompts-basics-ollama/README.md +++ b/02-prompts/prompts-basics-ollama/README.md @@ -6,7 +6,7 @@ Prompting using simple text with LLMs via Ollama. The application relies on Ollama for providing LLMs. You can either run Ollama locally on your laptop (macOS or Linux), or rely on the Testcontainers support in Spring Boot to spin up an Ollama service automatically. -### When using Ollama +### Ollama as a native application First, make sure you have [Ollama](https://ollama.ai) installed on your laptop (macOS or Linux). Then, use Ollama to run the _llama2_ large language model. @@ -21,7 +21,7 @@ Finally, run the Spring Boot application. ./gradlew bootRun ``` -### When using Docker/Podman +### Ollama as a dev service with Testcontainers The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _llama2_ model at startup time. diff --git a/02-prompts/prompts-basics-ollama/build.gradle b/02-prompts/prompts-basics-ollama/build.gradle index a0ede90..124f83c 100644 --- a/02-prompts/prompts-basics-ollama/build.gradle +++ b/02-prompts/prompts-basics-ollama/build.gradle @@ -8,7 +8,9 @@ group = 'com.thomasvitale' version = '0.0.1-SNAPSHOT' java { - sourceCompatibility = '21' + toolchain { + languageVersion = JavaLanguageVersion.of(21) + } } repositories { diff --git a/02-prompts/prompts-basics-openai/build.gradle b/02-prompts/prompts-basics-openai/build.gradle index bfb9516..4167e6f 100644 --- a/02-prompts/prompts-basics-openai/build.gradle +++ b/02-prompts/prompts-basics-openai/build.gradle @@ -8,7 +8,9 @@ group = 'com.thomasvitale' version = '0.0.1-SNAPSHOT' java { - sourceCompatibility = '21' + toolchain { + languageVersion = JavaLanguageVersion.of(21) + } } repositories { diff --git a/02-prompts/prompts-messages-ollama/README.md b/02-prompts/prompts-messages-ollama/README.md index 2bb3029..c427ccc 100644 --- a/02-prompts/prompts-messages-ollama/README.md +++ b/02-prompts/prompts-messages-ollama/README.md @@ -6,7 +6,7 @@ Prompting using structured messages and roles with LLMs via Ollama. The application relies on Ollama for providing LLMs. You can either run Ollama locally on your laptop (macOS or Linux), or rely on the Testcontainers support in Spring Boot to spin up an Ollama service automatically. -### When using Ollama +### Ollama as a native application First, make sure you have [Ollama](https://ollama.ai) installed on your laptop (macOS or Linux). Then, use Ollama to run the _llama2_ large language model. @@ -21,7 +21,7 @@ Finally, run the Spring Boot application. ./gradlew bootRun ``` -### When using Docker/Podman +### Ollama as a dev service with Testcontainers The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _llama2_ model at startup time. diff --git a/02-prompts/prompts-messages-ollama/build.gradle b/02-prompts/prompts-messages-ollama/build.gradle index a0ede90..124f83c 100644 --- a/02-prompts/prompts-messages-ollama/build.gradle +++ b/02-prompts/prompts-messages-ollama/build.gradle @@ -8,7 +8,9 @@ group = 'com.thomasvitale' version = '0.0.1-SNAPSHOT' java { - sourceCompatibility = '21' + toolchain { + languageVersion = JavaLanguageVersion.of(21) + } } repositories { diff --git a/02-prompts/prompts-messages-openai/build.gradle b/02-prompts/prompts-messages-openai/build.gradle index bfb9516..4167e6f 100644 --- a/02-prompts/prompts-messages-openai/build.gradle +++ b/02-prompts/prompts-messages-openai/build.gradle @@ -8,7 +8,9 @@ group = 'com.thomasvitale' version = '0.0.1-SNAPSHOT' java { - sourceCompatibility = '21' + toolchain { + languageVersion = JavaLanguageVersion.of(21) + } } repositories { diff --git a/02-prompts/prompts-templates-ollama/README.md b/02-prompts/prompts-templates-ollama/README.md index 13b69e9..7bd0738 100644 --- a/02-prompts/prompts-templates-ollama/README.md +++ b/02-prompts/prompts-templates-ollama/README.md @@ -6,7 +6,7 @@ Prompting using templates with LLMs via Ollama. The application relies on Ollama for providing LLMs. You can either run Ollama locally on your laptop (macOS or Linux), or rely on the Testcontainers support in Spring Boot to spin up an Ollama service automatically. -### When using Ollama +### Ollama as a native application First, make sure you have [Ollama](https://ollama.ai) installed on your laptop (macOS or Linux). Then, use Ollama to run the _llama2_ large language model. @@ -21,7 +21,7 @@ Finally, run the Spring Boot application. ./gradlew bootRun ``` -### When using Docker/Podman +### Ollama as a dev service with Testcontainers The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _llama2_ model at startup time. diff --git a/02-prompts/prompts-templates-ollama/build.gradle b/02-prompts/prompts-templates-ollama/build.gradle index a0ede90..124f83c 100644 --- a/02-prompts/prompts-templates-ollama/build.gradle +++ b/02-prompts/prompts-templates-ollama/build.gradle @@ -8,7 +8,9 @@ group = 'com.thomasvitale' version = '0.0.1-SNAPSHOT' java { - sourceCompatibility = '21' + toolchain { + languageVersion = JavaLanguageVersion.of(21) + } } repositories { diff --git a/02-prompts/prompts-templates-openai/build.gradle b/02-prompts/prompts-templates-openai/build.gradle index bfb9516..4167e6f 100644 --- a/02-prompts/prompts-templates-openai/build.gradle +++ b/02-prompts/prompts-templates-openai/build.gradle @@ -8,7 +8,9 @@ group = 'com.thomasvitale' version = '0.0.1-SNAPSHOT' java { - sourceCompatibility = '21' + toolchain { + languageVersion = JavaLanguageVersion.of(21) + } } repositories { diff --git a/03-output-parsers/output-parsers-ollama/README.md b/03-output-parsers/output-parsers-ollama/README.md index 2da2af5..44661bd 100644 --- a/03-output-parsers/output-parsers-ollama/README.md +++ b/03-output-parsers/output-parsers-ollama/README.md @@ -6,7 +6,7 @@ Parsing the LLM output as structured objects (Beans, Map, List) via Ollama. The application relies on Ollama for providing LLMs. You can either run Ollama locally on your laptop (macOS or Linux), or rely on the Testcontainers support in Spring Boot to spin up an Ollama service automatically. -### When using Ollama +### Ollama as a native application First, make sure you have [Ollama](https://ollama.ai) installed on your laptop (macOS or Linux). Then, use Ollama to run the _llama2_ large language model. @@ -21,7 +21,7 @@ Finally, run the Spring Boot application. ./gradlew bootRun ``` -### When using Docker/Podman +### Ollama as a dev service with Testcontainers The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _llama2_ model at startup time. diff --git a/03-output-parsers/output-parsers-ollama/build.gradle b/03-output-parsers/output-parsers-ollama/build.gradle index a0ede90..124f83c 100644 --- a/03-output-parsers/output-parsers-ollama/build.gradle +++ b/03-output-parsers/output-parsers-ollama/build.gradle @@ -8,7 +8,9 @@ group = 'com.thomasvitale' version = '0.0.1-SNAPSHOT' java { - sourceCompatibility = '21' + toolchain { + languageVersion = JavaLanguageVersion.of(21) + } } repositories { diff --git a/03-output-parsers/output-parsers-openai/build.gradle b/03-output-parsers/output-parsers-openai/build.gradle index bfb9516..4167e6f 100644 --- a/03-output-parsers/output-parsers-openai/build.gradle +++ b/03-output-parsers/output-parsers-openai/build.gradle @@ -8,7 +8,9 @@ group = 'com.thomasvitale' version = '0.0.1-SNAPSHOT' java { - sourceCompatibility = '21' + toolchain { + languageVersion = JavaLanguageVersion.of(21) + } } repositories { diff --git a/04-embedding-models/embedding-models-ollama/README.md b/04-embedding-models/embedding-models-ollama/README.md index 279f997..ff6f837 100644 --- a/04-embedding-models/embedding-models-ollama/README.md +++ b/04-embedding-models/embedding-models-ollama/README.md @@ -30,7 +30,7 @@ class EmbeddingController { The application relies on Ollama for providing LLMs. You can either run Ollama locally on your laptop (macOS or Linux), or rely on the Testcontainers support in Spring Boot to spin up an Ollama service automatically. -### When using Ollama +### Ollama as a native application First, make sure you have [Ollama](https://ollama.ai) installed on your laptop (macOS or Linux). Then, use Ollama to run the _llama2_ large language model. @@ -45,7 +45,7 @@ Finally, run the Spring Boot application. ./gradlew bootRun ``` -### When using Docker/Podman +### Ollama as a dev service with Testcontainers The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _llama2_ model at startup time. diff --git a/04-embedding-models/embedding-models-ollama/build.gradle b/04-embedding-models/embedding-models-ollama/build.gradle index a0ede90..124f83c 100644 --- a/04-embedding-models/embedding-models-ollama/build.gradle +++ b/04-embedding-models/embedding-models-ollama/build.gradle @@ -8,7 +8,9 @@ group = 'com.thomasvitale' version = '0.0.1-SNAPSHOT' java { - sourceCompatibility = '21' + toolchain { + languageVersion = JavaLanguageVersion.of(21) + } } repositories { diff --git a/04-embedding-models/embedding-models-openai/build.gradle b/04-embedding-models/embedding-models-openai/build.gradle index bfb9516..4167e6f 100644 --- a/04-embedding-models/embedding-models-openai/build.gradle +++ b/04-embedding-models/embedding-models-openai/build.gradle @@ -8,7 +8,9 @@ group = 'com.thomasvitale' version = '0.0.1-SNAPSHOT' java { - sourceCompatibility = '21' + toolchain { + languageVersion = JavaLanguageVersion.of(21) + } } repositories { diff --git a/05-document-readers/document-readers-json-ollama/README.md b/05-document-readers/document-readers-json-ollama/README.md index eb271cf..5aa3835 100644 --- a/05-document-readers/document-readers-json-ollama/README.md +++ b/05-document-readers/document-readers-json-ollama/README.md @@ -6,7 +6,7 @@ Reading and vectorizing JSON documents with LLMs via Ollama. The application relies on Ollama for providing LLMs. You can either run Ollama locally on your laptop (macOS or Linux), or rely on the Testcontainers support in Spring Boot to spin up an Ollama service automatically. -### When using Ollama +### Ollama as a native application First, make sure you have [Ollama](https://ollama.ai) installed on your laptop (macOS or Linux). Then, use Ollama to run the _llama2_ large language model. @@ -21,7 +21,7 @@ Finally, run the Spring Boot application. ./gradlew bootRun ``` -### When using Docker/Podman +### Ollama as a dev service with Testcontainers The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _llama2_ model at startup time. diff --git a/05-document-readers/document-readers-json-ollama/build.gradle b/05-document-readers/document-readers-json-ollama/build.gradle index a0ede90..124f83c 100644 --- a/05-document-readers/document-readers-json-ollama/build.gradle +++ b/05-document-readers/document-readers-json-ollama/build.gradle @@ -8,7 +8,9 @@ group = 'com.thomasvitale' version = '0.0.1-SNAPSHOT' java { - sourceCompatibility = '21' + toolchain { + languageVersion = JavaLanguageVersion.of(21) + } } repositories { diff --git a/05-document-readers/document-readers-pdf-ollama/README.md b/05-document-readers/document-readers-pdf-ollama/README.md index 53fed4c..ce9c9ac 100644 --- a/05-document-readers/document-readers-pdf-ollama/README.md +++ b/05-document-readers/document-readers-pdf-ollama/README.md @@ -6,7 +6,7 @@ Reading and vectorizing PDF documents with LLMs via Ollama. The application relies on Ollama for providing LLMs. You can either run Ollama locally on your laptop (macOS or Linux), or rely on the Testcontainers support in Spring Boot to spin up an Ollama service automatically. -### When using Ollama +### Ollama as a native application First, make sure you have [Ollama](https://ollama.ai) installed on your laptop (macOS or Linux). Then, use Ollama to run the _llama2_ large language model. @@ -21,7 +21,7 @@ Finally, run the Spring Boot application. ./gradlew bootRun ``` -### When using Docker/Podman +### Ollama as a dev service with Testcontainers The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _llama2_ model at startup time. diff --git a/05-document-readers/document-readers-pdf-ollama/build.gradle b/05-document-readers/document-readers-pdf-ollama/build.gradle index f2c529e..6ca9980 100644 --- a/05-document-readers/document-readers-pdf-ollama/build.gradle +++ b/05-document-readers/document-readers-pdf-ollama/build.gradle @@ -8,7 +8,9 @@ group = 'com.thomasvitale' version = '0.0.1-SNAPSHOT' java { - sourceCompatibility = '21' + toolchain { + languageVersion = JavaLanguageVersion.of(21) + } } repositories { diff --git a/05-document-readers/document-readers-text-ollama/README.md b/05-document-readers/document-readers-text-ollama/README.md index dfc2943..196c53c 100644 --- a/05-document-readers/document-readers-text-ollama/README.md +++ b/05-document-readers/document-readers-text-ollama/README.md @@ -6,7 +6,7 @@ Reading and vectorizing text documents with LLMs via Ollama. The application relies on Ollama for providing LLMs. You can either run Ollama locally on your laptop (macOS or Linux), or rely on the Testcontainers support in Spring Boot to spin up an Ollama service automatically. -### When using Ollama +### Ollama as a native application First, make sure you have [Ollama](https://ollama.ai) installed on your laptop (macOS or Linux). Then, use Ollama to run the _llama2_ large language model. @@ -21,7 +21,7 @@ Finally, run the Spring Boot application. ./gradlew bootRun ``` -### When using Docker/Podman +### Ollama as a dev service with Testcontainers The application relies on the native Testcontainers support in Spring Boot to spin up an Ollama service with a _llama2_ model at startup time. diff --git a/05-document-readers/document-readers-text-ollama/build.gradle b/05-document-readers/document-readers-text-ollama/build.gradle index a0ede90..124f83c 100644 --- a/05-document-readers/document-readers-text-ollama/build.gradle +++ b/05-document-readers/document-readers-text-ollama/build.gradle @@ -8,7 +8,9 @@ group = 'com.thomasvitale' version = '0.0.1-SNAPSHOT' java { - sourceCompatibility = '21' + toolchain { + languageVersion = JavaLanguageVersion.of(21) + } } repositories { diff --git a/settings.gradle b/settings.gradle index 34b8ac9..e93ccec 100644 --- a/settings.gradle +++ b/settings.gradle @@ -1,3 +1,7 @@ +plugins { + id "org.gradle.toolchains.foojay-resolver-convention" version '0.7.0' +} + rootProject.name = 'llm-apps-java-spring-ai' include '01-chat-models:chat-models-ollama'