From 900f54844f4d3481a9f569b64b82c80bdba900a5 Mon Sep 17 00:00:00 2001 From: Jeff MAURY Date: Tue, 24 Sep 2024 23:38:43 +0200 Subject: [PATCH 1/2] feat: add function calling recipe Fixes #562 Signed-off-by: Jeff MAURY --- .../function_calling/Makefile | 9 + .../function_calling/README.md | 180 ++++++++++++++++++ .../function_calling/ai-lab.yaml | 27 +++ .../function_calling/app/Containerfile | 8 + .../function_calling/app/app.py | 114 +++++++++++ .../function_calling/app/requirements.txt | 4 + .../function_calling/bootc/Containerfile | 50 +++++ .../bootc/Containerfile.nocache | 26 +++ .../function_calling/bootc/README.md | 94 +++++++++ .../function_calling/provision/playbook.yml | 63 ++++++ .../provision/requirements.yml | 4 + .../function_calling/quadlet/README.md | 9 + .../quadlet/function_calling.kube | 16 ++ .../quadlet/function_calling.yaml | 45 +++++ 14 files changed, 649 insertions(+) create mode 100644 recipes/natural_language_processing/function_calling/Makefile create mode 100644 recipes/natural_language_processing/function_calling/README.md create mode 100644 recipes/natural_language_processing/function_calling/ai-lab.yaml create mode 100644 recipes/natural_language_processing/function_calling/app/Containerfile create mode 100644 recipes/natural_language_processing/function_calling/app/app.py create mode 100644 recipes/natural_language_processing/function_calling/app/requirements.txt create mode 100644 recipes/natural_language_processing/function_calling/bootc/Containerfile create mode 100644 recipes/natural_language_processing/function_calling/bootc/Containerfile.nocache create mode 100644 recipes/natural_language_processing/function_calling/bootc/README.md create mode 100644 recipes/natural_language_processing/function_calling/provision/playbook.yml create mode 100644 recipes/natural_language_processing/function_calling/provision/requirements.yml create mode 100644 recipes/natural_language_processing/function_calling/quadlet/README.md create mode 100644 recipes/natural_language_processing/function_calling/quadlet/function_calling.kube create mode 100644 recipes/natural_language_processing/function_calling/quadlet/function_calling.yaml diff --git a/recipes/natural_language_processing/function_calling/Makefile b/recipes/natural_language_processing/function_calling/Makefile new file mode 100644 index 00000000..323faf71 --- /dev/null +++ b/recipes/natural_language_processing/function_calling/Makefile @@ -0,0 +1,9 @@ +SHELL := /bin/bash +APP ?= function_calling +PORT ?= 8501 + +include ../../common/Makefile.common + +RECIPE_BINARIES_PATH := $(shell realpath ../../common/bin) +RELATIVE_MODELS_PATH := ../../../models +RELATIVE_TESTS_PATH := ../tests diff --git a/recipes/natural_language_processing/function_calling/README.md b/recipes/natural_language_processing/function_calling/README.md new file mode 100644 index 00000000..c992588e --- /dev/null +++ b/recipes/natural_language_processing/function_calling/README.md @@ -0,0 +1,180 @@ +# Function Calling Application + + This recipe helps developers start building their own custom function calling enabled chat applications. It consists of two main components: the Model Service and the AI Application. + + There are a few options today for local Model Serving, but this recipe will use [`llama-cpp-python`](https://github.com/abetlen/llama-cpp-python) and their OpenAI compatible Model Service. There is a Containerfile provided that can be used to build this Model Service within the repo, [`model_servers/llamacpp_python/base/Containerfile`](/model_servers/llamacpp_python/base/Containerfile). + + The AI Application will connect to the Model Service via its OpenAI compatible API. The recipe relies on [Langchain's](https://python.langchain.com/docs/get_started/introduction) python package to simplify communication with the Model Service and uses [Streamlit](https://streamlit.io/) for the UI layer. You can find an example of the chat application below. + +![](/assets/chatbot_ui.png) + + +## Try the Function Calling Application + +The [Podman Desktop](https://podman-desktop.io) [AI Lab Extension](https://github.com/containers/podman-desktop-extension-ai-lab) includes this recipe among others. To try it out, open `Recipes Catalog` -> `Function Calling` and follow the instructions to start the application. + +# Build the Application + +The rest of this document will explain how to build and run the application from the terminal, and will +go into greater detail on how each container in the Pod above is built, run, and +what purpose it serves in the overall application. All the recipes use a central [Makefile](../../common/Makefile.common) that includes variables populated with default values to simplify getting started. Please review the [Makefile docs](../../common/README.md), to learn about further customizing your application. + + +This application requires a model, a model service and an AI inferencing application. + +* [Quickstart](#quickstart) +* [Download a model](#download-a-model) +* [Build the Model Service](#build-the-model-service) +* [Deploy the Model Service](#deploy-the-model-service) +* [Build the AI Application](#build-the-ai-application) +* [Deploy the AI Application](#deploy-the-ai-application) +* [Interact with the AI Application](#interact-with-the-ai-application) +* [Embed the AI Application in a Bootable Container Image](#embed-the-ai-application-in-a-bootable-container-image) + + +## Quickstart +To run the application with pre-built images from `quay.io/ai-lab`, use `make quadlet`. This command +builds the application's metadata and generates Kubernetes YAML at `./build/chatbot.yaml` to spin up a Pod that can then be launched locally. +Try it with: + +``` +make quadlet +podman kube play build/chatbot.yaml +``` + +This will take a few minutes if the model and model-server container images need to be downloaded. +The Pod is named `chatbot`, so you may use [Podman](https://podman.io) to manage the Pod and its containers: + +``` +podman pod list +podman ps +``` + +Once the Pod and its containers are running, the application can be accessed at `http://localhost:8501`. However, if you started the app via the podman desktop UI, a random port will be assigned instead of `8501`. Please use the AI App Details `Open AI App` button to access it instead. +Please refer to the section below for more details about [interacting with the chatbot application](#interact-with-the-ai-application). + +To stop and remove the Pod, run: + +``` +podman pod stop chatbot +podman pod rm chatbot +``` + +## Download a model + +If you are just getting started, we recommend using [granite-7b-lab](https://huggingface.co/instructlab/granite-7b-lab). This is a well +performant mid-sized model with an apache-2.0 license. In order to use it with our Model Service we need it converted +and quantized into the [GGUF format](https://github.com/ggerganov/ggml/blob/master/docs/gguf.md). There are a number of +ways to get a GGUF version of granite-7b-lab, but the simplest is to download a pre-converted one from +[huggingface.co](https://huggingface.co) here: https://huggingface.co/instructlab/granite-7b-lab-GGUF. + +The recommended model can be downloaded using the code snippet below: + +```bash +cd ../../../models +curl -sLO https://huggingface.co/instructlab/granite-7b-lab-GGUF/resolve/main/granite-7b-lab-Q4_K_M.gguf +cd ../recipes/natural_language_processing/chatbot +``` + +_A full list of supported open models is forthcoming._ + + +## Build the Model Service + +The complete instructions for building and deploying the Model Service can be found in the +[llamacpp_python model-service document](../../../model_servers/llamacpp_python/README.md). + +The Model Service can be built from make commands from the [llamacpp_python directory](../../../model_servers/llamacpp_python/). + +```bash +# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes +make build +``` +Checkout the [Makefile](../../../model_servers/llamacpp_python/Makefile) to get more details on different options for how to build. + +## Deploy the Model Service + +The local Model Service relies on a volume mount to the localhost to access the model files. It also employs environment variables to dictate the model used and where its served. You can start your local Model Service using the following `make` command from `model_servers/llamacpp_python` set with reasonable defaults: + +```bash +# from path model_servers/llamacpp_python from repo containers/ai-lab-recipes +make run +``` + +## Build the AI Application + +The AI Application can be built from the make command: + +```bash +# Run this from the current directory (path recipes/natural_language_processing/chatbot from repo containers/ai-lab-recipes) +make build +``` + +## Deploy the AI Application + +Make sure the Model Service is up and running before starting this container image. When starting the AI Application container image we need to direct it to the correct `MODEL_ENDPOINT`. This could be any appropriately hosted Model Service (running locally or in the cloud) using an OpenAI compatible API. In our case the Model Service is running inside the Podman machine so we need to provide it with the appropriate address `10.88.0.1`. To deploy the AI application use the following: + +```bash +# Run this from the current directory (path recipes/natural_language_processing/chatbot from repo containers/ai-lab-recipes) +make run +``` + +## Interact with the AI Application + +Everything should now be up an running with the chat application available at [`http://localhost:8501`](http://localhost:8501). By using this recipe and getting this starting point established, users should now have an easier time customizing and building their own LLM enabled chatbot applications. + +## Embed the AI Application in a Bootable Container Image + +To build a bootable container image that includes this sample chatbot workload as a service that starts when a system is booted, run: `make -f Makefile bootc`. You can optionally override the default image / tag you want to give the make command by specifying it as follows: `make -f Makefile BOOTC_IMAGE= bootc`. + +Substituting the bootc/Containerfile FROM command is simple using the Makefile FROM option. + +```bash +make FROM=registry.redhat.io/rhel9/rhel-bootc:9.4 bootc +``` + +Selecting the ARCH for the bootc/Containerfile is simple using the Makefile ARCH= variable. + +``` +make ARCH=x86_64 bootc +``` + +The magic happens when you have a bootc enabled system running. If you do, and you'd like to update the operating system to the OS you just built +with the chatbot application, it's as simple as ssh-ing into the bootc system and running: + +```bash +bootc switch quay.io/ai-lab/chatbot-bootc:latest +``` + +Upon a reboot, you'll see that the chatbot service is running on the system. Check on the service with: + +```bash +ssh user@bootc-system-ip +sudo systemctl status chatbot +``` + +### What are bootable containers? + +What's a [bootable OCI container](https://containers.github.io/bootc/) and what's it got to do with AI? + +That's a good question! We think it's a good idea to embed AI workloads (or any workload!) into bootable images at _build time_ rather than +at _runtime_. This extends the benefits, such as portability and predictability, that containerizing applications provides to the operating system. +Bootable OCI images bake exactly what you need to run your workloads into the operating system at build time by using your favorite containerization +tools. Might I suggest [podman](https://podman.io/)? + +Once installed, a bootc enabled system can be updated by providing an updated bootable OCI image from any OCI +image registry with a single `bootc` command. This works especially well for fleets of devices that have fixed workloads - think +factories or appliances. Who doesn't want to add a little AI to their appliance, am I right? + +Bootable images lend toward immutable operating systems, and the more immutable an operating system is, the less that can go wrong at runtime! + +#### Creating bootable disk images + +You can convert a bootc image to a bootable disk image using the +[quay.io/centos-bootc/bootc-image-builder](https://github.com/osbuild/bootc-image-builder) container image. + +This container image allows you to build and deploy [multiple disk image types](../../common/README_bootc_image_builder.md) from bootc container images. + +Default image types can be set via the DISK_TYPE Makefile variable. + +`make bootc-image-builder DISK_TYPE=ami` diff --git a/recipes/natural_language_processing/function_calling/ai-lab.yaml b/recipes/natural_language_processing/function_calling/ai-lab.yaml new file mode 100644 index 00000000..c392c729 --- /dev/null +++ b/recipes/natural_language_processing/function_calling/ai-lab.yaml @@ -0,0 +1,27 @@ +version: v1.0 +application: + type: language + name: Function_Calling_Streamlit + description: Function calling a remote service with a model service in a web frontend. + containers: + - name: llamacpp-server + contextdir: ../../../model_servers/llamacpp_python + containerfile: ./base/Containerfile + model-service: true + backend: + - llama-cpp + arch: + - arm64 + - amd64 + ports: + - 8001 + image: quay.io/ai-lab/llamacpp_python:latest + - name: streamlit-function-calling-app + contextdir: app + containerfile: Containerfile + arch: + - arm64 + - amd64 + ports: + - 8501 + image: quay.io/ai-lab/function-calling:latest diff --git a/recipes/natural_language_processing/function_calling/app/Containerfile b/recipes/natural_language_processing/function_calling/app/Containerfile new file mode 100644 index 00000000..62d6e5ee --- /dev/null +++ b/recipes/natural_language_processing/function_calling/app/Containerfile @@ -0,0 +1,8 @@ +FROM registry.access.redhat.com/ubi9/python-311:1-72.1722518949 +WORKDIR /function-call +COPY requirements.txt . +RUN pip install --upgrade pip +RUN pip install --no-cache-dir --upgrade -r /function-call/requirements.txt +COPY *.py . +EXPOSE 8501 +ENTRYPOINT [ "streamlit", "run", "app.py" ] diff --git a/recipes/natural_language_processing/function_calling/app/app.py b/recipes/natural_language_processing/function_calling/app/app.py new file mode 100644 index 00000000..2f492692 --- /dev/null +++ b/recipes/natural_language_processing/function_calling/app/app.py @@ -0,0 +1,114 @@ +from langchain_openai import ChatOpenAI +from langchain.chains import LLMChain +from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder +from langchain_core.pydantic_v1 import BaseModel, Field +from langchain_core.output_parsers import PydanticToolsParser +from langchain.globals import set_debug, set_verbose +import streamlit as st +import requests +import time +import json +import os + +from urllib3 import request + +model_service = os.getenv("MODEL_ENDPOINT", + "http://localhost:8001") +model_service = f"{model_service}/v1" + +@st.cache_resource(show_spinner=False) +def checking_model_service(): + start = time.time() + print("Checking Model Service Availability...") + ready = False + while not ready: + try: + request_cpp = requests.get(f'{model_service}/models') + request_ollama = requests.get(f'{model_service[:-2]}api/tags') + if request_cpp.status_code == 200: + server = "Llamacpp_Python" + ready = True + elif request_ollama.status_code == 200: + server = "Ollama" + ready = True + except: + pass + time.sleep(1) + print(f"{server} Model Service Available") + print(f"{time.time()-start} seconds") + return server + +def get_models(): + try: + response = requests.get(f"{model_service[:-2]}api/tags") + return [i["name"].split(":")[0] for i in + json.loads(response.content)["models"]] + except: + return None + +with st.spinner("Checking Model Service Availability..."): + server = checking_model_service() + +def enableInput(): + st.session_state["input_disabled"] = False + +def disableInput(): + st.session_state["input_disabled"] = True + +st.title("💬 Function calling") +if "input_disabled" not in st.session_state: + enableInput() + +model_name = os.getenv("MODEL_NAME", "") + +if server == "Ollama": + models = get_models() + with st.sidebar: + model_name = st.radio(label="Select Model", + options=models) + +class getWeather(BaseModel): + """Get the current weather in a given latitude and longitude.""" + + latitude: float = Field(description="The latitude of a place") + longitude: float = Field(description="The longitude of a place") + + #https://api.open-meteo.com/v1/forecast?latitude=52.52&longitude=13.41&hourly=temperature_2m + def retrieve(self): + return requests.get("https://api.open-meteo.com/v1/forecast", params={'latitude': self.latitude, 'longitude': self.longitude, 'hourly': 'temperature_2m'}).json(); + +llm = ChatOpenAI(base_url=model_service, + api_key="sk-no-key-required", + model=model_name, + streaming=False, verbose=False).bind_tools(tools=[getWeather], tool_choice='auto') + +SYSTEM_MESSAGE=""" +You are a helpful assistant. +You can call functions with appropriate input when necessary. +""" + + +prompt = ChatPromptTemplate.from_messages([ + ("system", SYSTEM_MESSAGE), + ("user", "What's the weather like in {input} ?") +]) + +chain = prompt | llm | PydanticToolsParser(tools=[getWeather]) + +st.markdown(""" +This demo application will ask the LLM for the weather in the city given in the input field and +specify a tool that can get weather information give a latitude and longitude. The weather information +retrieval is implemented using open-meteo.com. +""") +container = st.empty() + +if prompt := st.chat_input(placeholder="Enter the city name:", disabled=not input): + with container: + st.write("Calling LLM") + response = chain.invoke(prompt) + with container: + st.write("Retrieving weather information") + temperatures = list(map(lambda r: r.retrieve(), response)) + print(temperatures[0]) + with container: + st.line_chart(temperatures[0]['hourly'], x='time', y='temperature_2m') diff --git a/recipes/natural_language_processing/function_calling/app/requirements.txt b/recipes/natural_language_processing/function_calling/app/requirements.txt new file mode 100644 index 00000000..dba29a90 --- /dev/null +++ b/recipes/natural_language_processing/function_calling/app/requirements.txt @@ -0,0 +1,4 @@ +langchain==0.2.3 +langchain-openai==0.1.7 +langchain-community==0.2.4 +streamlit==1.34.0 \ No newline at end of file diff --git a/recipes/natural_language_processing/function_calling/bootc/Containerfile b/recipes/natural_language_processing/function_calling/bootc/Containerfile new file mode 100644 index 00000000..b83fa917 --- /dev/null +++ b/recipes/natural_language_processing/function_calling/bootc/Containerfile @@ -0,0 +1,50 @@ +# Example: an AI powered sample application is embedded as a systemd service +# via Podman quadlet files in /usr/share/containers/systemd +# +# from recipes/natural_language_processing/chatbot, run +# 'make bootc' + +FROM quay.io/centos-bootc/centos-bootc:stream9 +ARG SSHPUBKEY + +# The --build-arg "SSHPUBKEY=$(cat ~/.ssh/id_rsa.pub)" option inserts your +# public key into the image, allowing root access via ssh. +RUN set -eu; mkdir -p /usr/ssh && \ + echo 'AuthorizedKeysFile /usr/ssh/%u.keys .ssh/authorized_keys .ssh/authorized_keys2' >> /etc/ssh/sshd_config.d/30-auth-system.conf && \ + echo ${SSHPUBKEY} > /usr/ssh/root.keys && chmod 0600 /usr/ssh/root.keys + +ARG RECIPE=function_calling +ARG MODEL_IMAGE=quay.io/ai-lab/granite-7b-lab:latest +ARG APP_IMAGE=quay.io/ai-lab/${RECIPE}:latest +ARG SERVER_IMAGE=quay.io/ai-lab/llamacpp_python:latest +ARG TARGETARCH + +# Include growfs service +COPY build/usr/lib /usr/lib +COPY --chmod=0755 build/usr/libexec/bootc-generic-growpart /usr/libexec/bootc-generic-growpart + +# Add quadlet files to setup system to automatically run AI application on boot +COPY build/${RECIPE}.kube build/${RECIPE}.yaml /usr/share/containers/systemd + +# Because images are prepulled, no need for .image quadlet +# If commenting out the pulls below, uncomment this to track the images +# so the systemd service will wait for the images with the service startup +# COPY build/${RECIPE}.image /usr/share/containers/systemd + +# Setup /usr/lib/containers/storage as an additional store for images. +# Remove once the base images have this set by default. +RUN sed -i -e '/additionalimage.*/a "/usr/lib/containers/storage",' \ + /etc/containers/storage.conf + +# Added for running as an OCI Container to prevent Overlay on Overlay issues. +VOLUME /var/lib/containers + +# Prepull the model, model_server & application images to populate the system. +# Comment the pull commands to keep bootc image smaller. +# The quadlet .image file added above pulls following images with service startup + +RUN podman pull --arch=${TARGETARCH} --root /usr/lib/containers/storage ${SERVER_IMAGE} +RUN podman pull --arch=${TARGETARCH} --root /usr/lib/containers/storage ${APP_IMAGE} +RUN podman pull --arch=${TARGETARCH} --root /usr/lib/containers/storage ${MODEL_IMAGE} + +RUN podman system reset --force 2>/dev/null diff --git a/recipes/natural_language_processing/function_calling/bootc/Containerfile.nocache b/recipes/natural_language_processing/function_calling/bootc/Containerfile.nocache new file mode 100644 index 00000000..fd87474c --- /dev/null +++ b/recipes/natural_language_processing/function_calling/bootc/Containerfile.nocache @@ -0,0 +1,26 @@ +# Example: an AI powered sample application is embedded as a systemd service +# via Podman quadlet files in /usr/share/containers/systemd +# +# from recipes/natural_language_processing/chatbot, run +# 'make bootc' + +FROM quay.io/centos-bootc/centos-bootc:stream9 +ARG SSHPUBKEY + +# The --build-arg "SSHPUBKEY=$(cat ~/.ssh/id_rsa.pub)" option inserts your +# public key into the image, allowing root access via ssh. +RUN set -eu; mkdir -p /usr/ssh && \ + echo 'AuthorizedKeysFile /usr/ssh/%u.keys .ssh/authorized_keys .ssh/authorized_keys2' >> /etc/ssh/sshd_config.d/30-auth-system.conf && \ + echo ${SSHPUBKEY} > /usr/ssh/root.keys && chmod 0600 /usr/ssh/root.keys + +ARG RECIPE=function_calling + +# Include growfs service +COPY build/usr/lib /usr/lib +COPY --chmod=0755 build/usr/libexec/bootc-generic-growpart /usr/libexec/bootc-generic-growpart + +# Add quadlet files to setup system to automatically run AI application on boot +COPY build/${RECIPE}.image build/${RECIPE}.kube build/${RECIPE}.yaml /usr/share/containers/systemd + +# Added for running as an OCI Container to prevent Overlay on Overlay issues. +VOLUME /var/lib/containers diff --git a/recipes/natural_language_processing/function_calling/bootc/README.md b/recipes/natural_language_processing/function_calling/bootc/README.md new file mode 100644 index 00000000..b11ed1ed --- /dev/null +++ b/recipes/natural_language_processing/function_calling/bootc/README.md @@ -0,0 +1,94 @@ +## Embed workload (AI sample applications) in a bootable container image + +### Create a custom centos-bootc:stream9 image + +* [Containerfile](./Containerfile) - embeds an LLM-powered sample function calling application. + +Details on the application can be found [in the chatbot/README.md](../README.md). By default, this Containerfile includes a model-server +that is meant to run with CPU - no additional GPU drivers or toolkits are embedded. You can substitute the llamacpp_python model-server image +for one that has GPU drivers and toolkits with additional build-args. The `FROM` must be replaced with a base image that has the necessary +kernel drivers and toolkits if building for GPU enabled systems. For an example of an NVIDIA/CUDA base image, +see [NVIDIA bootable image example](https://gitlab.com/bootc-org/examples/-/tree/main/nvidia?ref_type=heads) + +In order to pre-pull the workload images, you need to build from the same architecture you're building for. +If not pre-pulling the workload images, you can cross build (ie, build from a Mac for an X86_64 system). +To build the derived bootc image for x86_64 architecture, run the following: + +```bash +cd recipes/natural_language_processing/function_calling + +# for CPU powered sample LLM application +# to switch to an alternate platform like aarch64, pass --platform linux/arm64 +# the --cap-add SYS_ADMIN switch is needed when you are embedding Podman +# commands within the container build. If the registry you are pulling images +# from requires authentication, then you will need to volume mount the +# auth_json file with SELinux separation disabled. +podman login --auth-file auth.json quay.io/yourrepo +podman build --build-arg "SSHPUBKEY=$(cat ~/.ssh/id_rsa.pub)" \ + --security-opt label=disable \ + -v ./auth.json:/run/containers/0/auth.json \ + --cap-add SYS_ADMIN \ + -t quay.io/yourrepo/youros:tag . + +# for GPU powered sample LLM application with llamacpp cuda model server +podman build --build-arg "SSHPUBKEY=$(cat ~/.ssh/id_rsa.pub)" \ + --build-arg "model-server-image="quay.io/ai-lab/llamacpp_python_cuda:latest" \ + --from \ + --cap-add SYS_ADMIN \ + --platform linux/amd64 \ + -t quay.io/yourrepo/youros:tag . + +podman push quay.io/yourrepo/youros:tag +``` + +### Update a bootc-enabled system with the new derived image + +To build a disk image from an OCI bootable image, you can refer to [bootc-org/examples](https://gitlab.com/bootc-org/examples). +For this example, we will assume a bootc enabled system is already running. +If already running a bootc-enabled OS, `bootc switch` can be used to update the system to target a new bootable OCI image with embedded workloads. + +SSH into the bootc-enabled system and run: + +```bash +bootc switch quay.io/yourrepo/youros:tag +``` + +The necessary image layers will be downloaded from the OCI registry, and the system will prompt you to reboot into the new operating system. +From this point, with any subsequent modifications and pushes to the `quay.io/yourrepo/youreos:tag` OCI image, your OS can be updated with: + +```bash +bootc upgrade +``` + +### Accessing the embedded workloads + +The chatbot can be accessed by visiting port `8150` of the running bootc system. +They will be running as systemd services from Podman quadlet files placed at `/usr/share/containers/systemd/` on the bootc system. +For more information about running containerized applications as systemd services with Podman, refer to this +[Podman quadlet post](https://www.redhat.com/sysadmin/quadlet-podman) or, [podman documentation](https://podman.io/docs) + +To monitor the sample applications, SSH into the bootc system and run either: + +```bash +systemctl status function_calling +``` + +You can also view the pods and containers that are managed with systemd by running: + +``` +podman pod list +podman ps -a +``` + +To stop the sample applications, SSH into the bootc system and run: + +```bash +systemctl stop function_calling +``` + +To run the sample application _not_ as a systemd service, stop the services then +run the appropriate commands based on the application you have embedded. + +```bash +podman kube play /usr/share/containers/systemd/function_calling.yaml +``` diff --git a/recipes/natural_language_processing/function_calling/provision/playbook.yml b/recipes/natural_language_processing/function_calling/provision/playbook.yml new file mode 100644 index 00000000..0f8a428b --- /dev/null +++ b/recipes/natural_language_processing/function_calling/provision/playbook.yml @@ -0,0 +1,63 @@ +--- +- name: Test Environment Provisioning + hosts: test_environments + remote_user: fedora + become: true + gather_facts: false + + tasks: + + - name: Wait until the instance is ready + ansible.builtin.wait_for_connection: + delay: 10 + timeout: 60 + + - name: Gather facts for first time + ansible.builtin.setup: + + - name: Required Packages + ansible.builtin.package: + name: "{{ item }}" + state: present + with_items: + - podman + - python3-libdnf5 + + - name: Models host directory + ansible.builtin.file: + path: locallm/models + state: directory + + - name: Download Model + ansible.builtin.get_url: + url: https://huggingface.co/MaziyarPanahi/Mistral-7B-Instruct-v0.3-GGUF/resolve/main/Mistral-7B-Instruct-v0.3.Q4_K_M.gguf + dest: locallm/models + + - name: Run Model + containers.podman.podman_container: + name: llamacpp_python + image: ghcr.io/containers/llamacpp_python:latest + state: started + interactive: true + tty: true + detach: true + ports: + - 8001:8001 + volume: + - ./locallm/models:/locallm/models:ro,Z + env: + MODEL_PATH: models/Mistral-7B-Instruct-v0.3.Q4_K_M.gguf + HOST: 0.0.0.0 + PORT: 8001 + + - name: Run Application + containers.podman.podman_container: + name: function_calling + image: ghcr.io/containers/function_calling:latest + state: started + interactive: true + tty: true + ports: + - 8501:8501 + env: + MODEL_ENDPOINT: http://10.88.0.1:8001 diff --git a/recipes/natural_language_processing/function_calling/provision/requirements.yml b/recipes/natural_language_processing/function_calling/provision/requirements.yml new file mode 100644 index 00000000..da8ae831 --- /dev/null +++ b/recipes/natural_language_processing/function_calling/provision/requirements.yml @@ -0,0 +1,4 @@ +--- +collections: + - name: containers.podman + version: 1.13.0 diff --git a/recipes/natural_language_processing/function_calling/quadlet/README.md b/recipes/natural_language_processing/function_calling/quadlet/README.md new file mode 100644 index 00000000..1c361fd5 --- /dev/null +++ b/recipes/natural_language_processing/function_calling/quadlet/README.md @@ -0,0 +1,9 @@ +### Run function calling as a systemd service + +```bash +(cd ../;make quadlet) +sudo cp ../build/function_calling.yaml ../build/function_calling.kube ../build/function_calling.image /usr/share/containers/systemd/ +sudo /usr/libexec/podman/quadlet --dryrun #optional +sudo systemctl daemon-reload +sudo systemctl start function_calling +``` diff --git a/recipes/natural_language_processing/function_calling/quadlet/function_calling.kube b/recipes/natural_language_processing/function_calling/quadlet/function_calling.kube new file mode 100644 index 00000000..aabe9cc5 --- /dev/null +++ b/recipes/natural_language_processing/function_calling/quadlet/function_calling.kube @@ -0,0 +1,16 @@ +[Unit] +Description=Kubernetes YAML file used to do function calling inferencing +Documentation=man:podman-generate-systemd(1) +Wants=network-online.target +After=network-online.target +RequiresMountsFor=%t/containers + +[Kube] +# Point to the yaml file in the same directory +Yaml=function_calling.yaml + +[Service] +Restart=always + +[Install] +WantedBy=default.target diff --git a/recipes/natural_language_processing/function_calling/quadlet/function_calling.yaml b/recipes/natural_language_processing/function_calling/quadlet/function_calling.yaml new file mode 100644 index 00000000..9051282a --- /dev/null +++ b/recipes/natural_language_processing/function_calling/quadlet/function_calling.yaml @@ -0,0 +1,45 @@ +apiVersion: v1 +kind: Pod +metadata: + labels: + app: function_calling + name: function_calling +spec: + initContainers: + - name: model-file + image: MODEL_IMAGE + command: ['/usr/bin/install', "/model/model.file", "/shared/"] + volumeMounts: + - name: model-file + mountPath: /shared + containers: + - env: + - name: MODEL_ENDPOINT + value: http://0.0.0.0:8001 + image: APP_IMAGE + name: function_calling-inference + ports: + - containerPort: 8501 + hostPort: 8501 + securityContext: + runAsNonRoot: true + - env: + - name: HOST + value: 0.0.0.0 + - name: PORT + value: 8001 + - name: MODEL_PATH + value: /model/model.file + image: SERVER_IMAGE + name: function_calling-model-service + ports: + - containerPort: 8001 + hostPort: 8001 + securityContext: + runAsNonRoot: true + volumeMounts: + - name: model-file + mountPath: /model + volumes: + - name: model-file + emptyDir: {} From f421121cf3dad69097f927e487cad8962c0261a8 Mon Sep 17 00:00:00 2001 From: Jeff MAURY Date: Wed, 25 Sep 2024 14:57:14 +0200 Subject: [PATCH 2/2] fix: typo and unused imports Signed-off-by: Jeff MAURY --- .../function_calling/app/app.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/recipes/natural_language_processing/function_calling/app/app.py b/recipes/natural_language_processing/function_calling/app/app.py index 2f492692..17eda1c1 100644 --- a/recipes/natural_language_processing/function_calling/app/app.py +++ b/recipes/natural_language_processing/function_calling/app/app.py @@ -1,17 +1,13 @@ from langchain_openai import ChatOpenAI -from langchain.chains import LLMChain -from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder +from langchain_core.prompts import ChatPromptTemplate from langchain_core.pydantic_v1 import BaseModel, Field from langchain_core.output_parsers import PydanticToolsParser -from langchain.globals import set_debug, set_verbose import streamlit as st import requests import time import json import os -from urllib3 import request - model_service = os.getenv("MODEL_ENDPOINT", "http://localhost:8001") model_service = f"{model_service}/v1" @@ -97,7 +93,7 @@ def retrieve(self): st.markdown(""" This demo application will ask the LLM for the weather in the city given in the input field and -specify a tool that can get weather information give a latitude and longitude. The weather information +specify a tool that can get weather information given a latitude and longitude. The weather information retrieval is implemented using open-meteo.com. """) container = st.empty()