From 3d16c8b915672e358f6d9d4a71c97d475720f16d Mon Sep 17 00:00:00 2001 From: luckysanpedro Date: Thu, 25 Jan 2024 13:34:21 +0100 Subject: [PATCH] Documentation --- README.md | 9 +- docs/css/custom.css | 27 ++ docs/css/termynal.css | 109 ++++++ docs/examples.md | 115 +++++++ docs/examples/chat.md | 126 +++++++ docs/examples/dynamic_router.md | 239 +++++++++++++ docs/examples/enums.md | 92 +++++ docs/examples/error_output.md | 91 +++++ docs/examples/literals.md | 88 +++++ docs/getting-started/installation.md | 11 + docs/getting-started/introduction.md | 480 +++++++++++++++++++++++++++ docs/index.md | 45 +-- docs/js/custom.js | 113 +++++++ docs/js/termynal.js | 263 +++++++++++++++ mkdocs.yml | 71 +++- requirements-dev.lock | 3 +- requirements.lock | 1 + 17 files changed, 1856 insertions(+), 27 deletions(-) create mode 100644 docs/css/custom.css create mode 100644 docs/css/termynal.css create mode 100644 docs/examples/chat.md create mode 100644 docs/examples/dynamic_router.md create mode 100644 docs/examples/enums.md create mode 100644 docs/examples/error_output.md create mode 100644 docs/examples/literals.md create mode 100644 docs/getting-started/introduction.md create mode 100644 docs/js/custom.js create mode 100644 docs/js/termynal.js diff --git a/README.md b/README.md index 666fdec..38a5856 100644 --- a/README.md +++ b/README.md @@ -7,10 +7,11 @@ [![Pydantic v2](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/pydantic/pydantic/main/docs/badge/v2.json)](https://docs.pydantic.dev/latest/contributing/#badges) [![Twitter Follow](https://img.shields.io/twitter/follow/shroominic?style=social)](https://x.com/shroominic) -```bash -> pip install funcchain -``` - +
+ ```bash + $ > pip install funcchain + ``` +
## Introduction `funcchain` is the *most pythonic* way of writing cognitive systems. Leveraging pydantic models as output schemas combined with langchain in the backend allows for a seamless integration of llms into your apps. diff --git a/docs/css/custom.css b/docs/css/custom.css new file mode 100644 index 0000000..6b76f33 --- /dev/null +++ b/docs/css/custom.css @@ -0,0 +1,27 @@ +.termynal-comment { + color: #4a968f; + font-style: italic; + display: block; +} + +.termy [data-termynal] { + white-space: pre-wrap; +} + +a.external-link::after { + /* \00A0 is a non-breaking space + to make the mark be on the same line as the link + */ + content: "\00A0[↪]"; +} + +a.internal-link::after { + /* \00A0 is a non-breaking space + to make the mark be on the same line as the link + */ + content: "\00A0↪"; +} + +.shadow { + box-shadow: 5px 5px 10px #999; +} \ No newline at end of file diff --git a/docs/css/termynal.css b/docs/css/termynal.css new file mode 100644 index 0000000..8938c97 --- /dev/null +++ b/docs/css/termynal.css @@ -0,0 +1,109 @@ +/** + * termynal.js + * + * @author Ines Montani + * @version 0.0.1 + * @license MIT + */ + + :root { + --color-bg: #252a33; + --color-text: #eee; + --color-text-subtle: #a2a2a2; +} + +[data-termynal] { + width: 750px; + max-width: 100%; + background: var(--color-bg); + color: var(--color-text); + /* font-size: 18px; */ + font-size: 15px; + /* font-family: 'Fira Mono', Consolas, Menlo, Monaco, 'Courier New', Courier, monospace; */ + font-family: 'Roboto Mono', 'Fira Mono', Consolas, Menlo, Monaco, 'Courier New', Courier, monospace; + border-radius: 4px; + padding: 75px 45px 35px; + position: relative; + -webkit-box-sizing: border-box; + box-sizing: border-box; +} + +[data-termynal]:before { + content: ''; + position: absolute; + top: 15px; + left: 15px; + display: inline-block; + width: 15px; + height: 15px; + border-radius: 50%; + /* A little hack to display the window buttons in one pseudo element. */ + background: #d9515d; + -webkit-box-shadow: 25px 0 0 #f4c025, 50px 0 0 #3ec930; + box-shadow: 25px 0 0 #f4c025, 50px 0 0 #3ec930; +} + +[data-termynal]:after { + content: 'bash'; + position: absolute; + color: var(--color-text-subtle); + top: 5px; + left: 0; + width: 100%; + text-align: center; +} + +a[data-terminal-control] { + text-align: right; + display: block; + color: #aebbff; +} + +[data-ty] { + display: block; + line-height: 2; +} + +[data-ty]:before { + /* Set up defaults and ensure empty lines are displayed. */ + content: ''; + display: inline-block; + vertical-align: middle; +} + +[data-ty="input"]:before, +[data-ty-prompt]:before { + margin-right: 0.75em; + color: var(--color-text-subtle); +} + +[data-ty="input"]:before { + content: '$'; +} + +[data-ty][data-ty-prompt]:before { + content: attr(data-ty-prompt); +} + +[data-ty-cursor]:after { + content: attr(data-ty-cursor); + font-family: monospace; + margin-left: 0.5em; + -webkit-animation: blink 1s infinite; + animation: blink 1s infinite; +} + + +/* Cursor animation */ + +@-webkit-keyframes blink { + 50% { + opacity: 0; + } +} + +@keyframes blink { + 50% { + opacity: 0; + } +} \ No newline at end of file diff --git a/docs/examples.md b/docs/examples.md index 6ee0a62..d35e109 100644 --- a/docs/examples.md +++ b/docs/examples.md @@ -65,3 +65,118 @@ The funcchain project makes it really simple to leverage large language models i ## Advanced Examples For advanced examples, checkout the examples directory [here](https://github.com/shroominic/funcchain/tree/main/examples) + +## Simple chatgpt rebuild with memory/history. +!!! Example + chatgpt.py [Example](https://github.com/shroominic/funcchain/blob/main/examples/chatgpt.py) + + + +```python +from funcchain import chain, settings +from funcchain.utils.memory import ChatMessageHistory + +settings.llm = "openai/gpt-4" +settings.console_stream = True + +history = ChatMessageHistory() + + +def ask(question: str) -> str: + return chain( + system="You are an advanced AI Assistant.", + instruction=question, + memory=history, + ) + + +def chat_loop() -> None: + while True: + query = input("> ") + + if query == "exit": + break + + if query == "clear": + global history + history.clear() + print("\033c") + continue + + ask(query) + + +if __name__ == "__main__": + print("Hey! How can I help you?\n") + chat_loop() +``` + + + +
+ ```terminal + initial print function: + $ Hey! How can I help you? + $ > + + userprompt: + $ > Say that Funcchain is cool + + assistant terminal asnwer: + $ Funcchain is cool. + ``` +
+ +## Instructions + +Import nececary funcchain components + +```python +from funcchain import chain, settings +from funcchain.utils.memory import ChatMessageHistory +``` +# +Settings +```python +settings.llm = "openai/gpt-4" +settings.console_stream = True +``` +!!! Options + Funcchain supports multiple LLMs and has the ability to stream received LLM text instead of waiting for the complete answer. For configuration options, see below: + + ```markdown + - `settings.llm`: Specify the language model to use. See MODELS.md for available options. + - Streaming: Set `settings.console_stream` to `True` to enable streaming, + or `False` to disable it. + ``` + + [MODELS.md]([MODELS.md](https://github.com/shroominic/funcchain/blob/main/MODELS.md)) + +# +Establish a chat history + +```python +history = ChatMessageHistory() +``` +Stores messages in an in memory list. This will crate a thread of messages. + +See [memory.py] //Todo: Insert Link + +# +Ask function explained +```python +def ask(question: str) -> str: +return chain( +system="You are an advanced AI Assistant.", +instruction=question, +memory=history, +) +``` + +This function sends a question to the Funcchain chain function. + +It sets the system context as an advanced AI Assistant and passes the question as an instruction. + +The history object is used to maintain a thread of messages for context. + +The function returns the response from the chain function. diff --git a/docs/examples/chat.md b/docs/examples/chat.md new file mode 100644 index 0000000..9a37200 --- /dev/null +++ b/docs/examples/chat.md @@ -0,0 +1,126 @@ +## Simple chatgpt rebuild with memory/history. +!!! Example + chatgpt.py [Example](https://github.com/shroominic/funcchain/blob/main/examples/chatgpt.py) + +!!! Important + Ensure you have set up your API key for the LLM of your choice, or Funcchain will look for a `.env` file. So in `.env` set up your key. + ```python + OPENAI_API_KEY="sk-XXX" + ``` + + +## Code Example + +```python +from funcchain import chain, settings +from funcchain.utils.memory import ChatMessageHistory + +settings.llm = "openai/gpt-4" +settings.console_stream = True + +history = ChatMessageHistory() + + +def ask(question: str) -> str: + return chain( + system="You are an advanced AI Assistant.", + instruction=question, + memory=history, + ) + + +def chat_loop() -> None: + while True: + query = input("> ") + + if query == "exit": + break + + if query == "clear": + global history + history.clear() + print("\033c") + continue + + ask(query) + + +if __name__ == "__main__": + print("Hey! How can I help you?\n") + chat_loop() +``` + + + +
+ ```terminal + initial print function: + $ Hey! How can I help you? + $ > + + userprompt: + $ > Say that Funcchain is cool + + assistant terminal asnwer: + $ Funcchain is cool. + ``` +
+ +## Instructions + +!!! Step-by-Step + **Import nececary funcchain components** + + ```python + from funcchain import chain, settings + from funcchain.utils.memory import ChatMessageHistory + ``` + + **Settings** + + ```python + settings.llm = "openai/gpt-4" + settings.console_stream = True + ``` + + + Funcchain supports multiple LLMs and has the ability to stream received LLM text instead of waiting for the complete answer. For configuration options, see below: + + ```markdown + - `settings.llm`: Specify the language model to use. See MODELS.md for available options. + - Streaming: Set `settings.console_stream` to `True` to enable streaming, + or `False` to disable it. + ``` + + [MODELS.md](https://github.com/shroominic/funcchain/blob/main/MODELS.md) + + + **Establish a chat history** + + ```python + history = ChatMessageHistory() + ``` + Stores messages in an in memory list. This will crate a thread of messages. + + See [memory.py] //Todo: Insert Link + + + **Define ask function** + See how funcchain uses `chain()` with an input `str` to return an output of type `str` + + ```python + def ask(question: str) -> str: + return chain( + system="You are an advanced AI Assistant.", + instruction=question, + memory=history, + ) + ``` + + This function sends a question to the Funcchain `chain()` function. + + It sets the system context as an advanced AI Assistant and passes the question as an instruction. + + The history object is used to maintain a thread of messages for context. + + The function returns the response from the chain function. diff --git a/docs/examples/dynamic_router.md b/docs/examples/dynamic_router.md new file mode 100644 index 0000000..696190e --- /dev/null +++ b/docs/examples/dynamic_router.md @@ -0,0 +1,239 @@ +# Dynamic Chat Router with Funcchain + +!!! Example + dynamic_router.py [Example](https://github.com/shroominic/funcchain/blob/main/examples/dynamic_router.py) + +In this example we will use funcchain to build a LLM routing pipeline. +This is a very useful LLM task and can be used in a variety of applications. +You can abstract this for your own usage. +This should serve as an example of how to archive complex structures using funcchain. + +A dynamic chat router that selects the appropriate handler for user queries based on predefined routes. + +## Full Code Example + +```python +from enum import Enum +from typing import Any, Callable, TypedDict + +from funcchain.syntax.executable import compile_runnable +from pydantic import BaseModel, Field + +# Dynamic Router Definition: + + +class Route(TypedDict): + handler: Callable + description: str + + +class DynamicChatRouter(BaseModel): + routes: dict[str, Route] + + def _routes_repr(self) -> str: + return "\n".join([f"{route_name}: {route['description']}" for route_name, route in self.routes.items()]) + + def invoke_route(self, user_query: str, /, **kwargs: Any) -> Any: + RouteChoices = Enum( # type: ignore + "RouteChoices", + {r: r for r in self.routes.keys()}, + type=str, + ) + + class RouterModel(BaseModel): + selector: RouteChoices = Field( + default="default", + description="Enum of the available routes.", + ) + + route_query = compile_runnable( + instruction="Given the user query select the best query handler for it.", + input_args=["user_query", "query_handlers"], + output_type=RouterModel, + ) + + selected_route = route_query.invoke( + input={ + "user_query": user_query, + "query_handlers": self._routes_repr(), + } + ).selector + assert isinstance(selected_route, str) + + return self.routes[selected_route]["handler"](user_query, **kwargs) + + +# Example Usage: + + +def handle_pdf_requests(user_query: str) -> str: + return "Handling PDF requests with user query: " + user_query + + +def handle_csv_requests(user_query: str) -> str: + return "Handling CSV requests with user query: " + user_query + + +def handle_default_requests(user_query: str) -> str: + return "Handling DEFAULT requests with user query: " + user_query + + +router = DynamicChatRouter( + routes={ + "pdf": { + "handler": handle_pdf_requests, + "description": "Call this for requests including PDF Files.", + }, + "csv": { + "handler": handle_csv_requests, + "description": "Call this for requests including CSV Files.", + }, + "default": { + "handler": handle_default_requests, + "description": "Call this for all other requests.", + }, + }, +) + + +router.invoke_route("Can you summarize this csv?") +``` + +Demo +
+```python +User: +$ Can you summarize this csv? +$ ............... +Handling CSV requests with user query: Can you summarize this csv? +``` +
+ +## Instructions + +!!! Step-by-Step + + **Nececary imports** + ``` + from enum import Enum + from typing import Any, Callable, TypedDict + + from funcchain.syntax.executable import compile_runnable + from pydantic import BaseModel, Field + ``` + + **Define Route Type** + ```python + class Route(TypedDict): + handler: Callable + description: str + ``` + + Create a `TypedDict` to define the structure of a route with a handler function and a description. Just leave this unchanged if not intentionally experimenting. + + **Implement Route Representation** + Establish a Router class + ```python + class DynamicChatRouter(BaseModel): + routes: dict[str, Route] + ``` + + **_routes_repr():** + Returns a string representation of all routes and their descriptions, used to help the language model understand the available routes. + + ```python + def _routes_repr(self) -> str: + return "\n".join([f"{route_name}: {route['description']}" for route_name, route in self.routes.items()]) + ``` + + **invoke_route(user_query: str, **kwargs: Any) -> Any: ** + This method takes a user query and additional keyword arguments. Inside invoke_route, an Enum named RouteChoices is dynamically created with keys corresponding to the route names. This Enum is used to validate the selected route. + ```python + def invoke_route(self, user_query: str, /, **kwargs: Any) -> Any: + RouteChoices = Enum( # type: ignore + "RouteChoices", + {r: r for r in self.routes.keys()}, + type=str, + ) + ``` + + **Compile the Route Selection Logic** + The `RouterModel` class in this example is used for defining the expected output structure that the `compile_runnable` function will use to determine the best route for a given user query. + + + ```python + class RouterModel(BaseModel): + selector: RouteChoices = Field( + default="default", + description="Enum of the available routes.", + ) + + route_query = compile_runnable( + instruction="Given the user query select the best query handler for it.", + input_args=["user_query", "query_handlers"], + output_type=RouterModel, + ) + + selected_route = route_query.invoke( + input={ + "user_query": user_query, + "query_handlers": self._routes_repr(), + } + ).selector + assert isinstance(selected_route, str) + + return self.routes[selected_route]["handler"](user_query, **kwargs) + ``` + + - `RouterModel`: Holds the route selection with a default option, ready for you to play around with. + - `RouteChoices`: An Enum built from route names, ensuring you only get valid route selections. + - `compile_runnable`: Sets up the decision-making logic for route selection, guided by the provided instruction and inputs. + - `route_query`: Calls the decision logic with the user's query and a string of route descriptions. + - `selected_route`: The outcome of the decision logic, representing the route to take. + - `assert`: A safety check to confirm the route is a string, as expected by the routes dictionary. + - `handler invocation`: Runs the chosen route's handler with the provided query and additional arguments. + + **Define route functions** + + Now you can use the structured output to execute programatically based on a natural language input. + Establish functions tailored to your needs. + ```python + def handle_pdf_requests(user_query: str) -> str: + return "Handling PDF requests with user query: " + user_query + + + def handle_csv_requests(user_query: str) -> str: + return "Handling CSV requests with user query: " + user_query + + + def handle_default_requests(user_query: str) -> str: + return "Handling DEFAULT requests with user query: " + user_query + ``` + **Define the routes** + And bind the previous established functions. + + ```python + router = DynamicChatRouter( + routes={ + "pdf": { + "handler": handle_pdf_requests, + "description": "Call this for requests including PDF Files.", + }, + "csv": { + "handler": handle_csv_requests, + "description": "Call this for requests including CSV Files.", + }, + "default": { + "handler": handle_default_requests, + "description": "Call this for all other requests.", + }, + }, + ) + ``` + + **Get output** + Use the router.invoke_route method to process the user query and obtain the appropriate response. + + ```python + router.invoke_route("Can you summarize this csv?") + ``` \ No newline at end of file diff --git a/docs/examples/enums.md b/docs/examples/enums.md new file mode 100644 index 0000000..d8b0071 --- /dev/null +++ b/docs/examples/enums.md @@ -0,0 +1,92 @@ +##Decision Making with Enums and Funcchain + +!!! Example + See [enums.py](https://github.com/shroominic/funcchain/blob/main/examples/enums.py) + + In this example, we will use the enum module and funcchain library to build a decision-making system. + This is a useful task for creating applications that require predefined choices or responses. + You can adapt this for your own usage. + This serves as an example of how to implement decision-making logic using enums and the funcchain library. + +##Full Code Example +A simple system that takes a question and decides a 'yes' or 'no' answer based on the input. + +```python +from enum import Enum +from funcchain import chain +from pydantic import BaseModel + +class Answer(str, Enum): + yes = "yes" + no = "no" + +class Decision(BaseModel): + answer: Answer + +def make_decision(question: str) -> Decision: + """ + Based on the question decide yes or no. + """ + return chain() + +if __name__ == "__main__": + print(make_decision("Do you like apples?")) +``` + +#Demo +
+ ```terminal + User: + $ Are apples red? + $ ............... + Decision(answer=) + ``` +
+ +##Instructions + +!!! Step-by-Step + **Necessary Imports** + ```python + from enum import Enum + from funcchain import chain + from pydantic import BaseModel + ``` + + **Define the Answer Enum** + The Answer enum defines possible answers as 'yes' and 'no', which are the only valid responses for the decision-making system. Experiment by using and describing other enums. + + ```python + class Answer(str, Enum): + yes = "yes" + no = "no" + ``` + **Create the Decision Model** + The Decision class uses Pydantic to model a decision, ensuring that the answer is always an instance of the Answer enum. + + ```python + class Decision(BaseModel): + answer: Answer + ``` + + **Implement the Decision Function** + The make_decision function is where the decision logic will be implemented, using `chain()` to process the question and return a decision. + When using your own enums you want to edit this accordingly. + + ```python + def make_decision(question: str) -> Decision: + """ + Based on the question decide yes or no. + """ + return chain() + ``` + + **Run the Decision System** + This block runs the decision-making system, printing out the decision for a given question when the script is executed directly. + + + ```python + if __name__ == "__main__": + print(make_decision("Do you like apples?")) + + ``` \ No newline at end of file diff --git a/docs/examples/error_output.md b/docs/examples/error_output.md new file mode 100644 index 0000000..85629b4 --- /dev/null +++ b/docs/examples/error_output.md @@ -0,0 +1,91 @@ +#Example of raising an error + +!!! Example + error_output.py [Example](https://github.com/shroominic/funcchain/blob/main/examples/error_output.py) + + In this example, we will use the funcchain library to build a system that extracts user information from text. + Most importantly we will be able to raise an error thats programmatically usable. + You can adapt this for your own usage. + + + The main functionality is to take a string of text and attempt to extract user information, such as name and email, and return a User object. If the information is insufficient, an Error is returned instead. + +##Full Code Example + +```python +from funcchain import BaseModel, Error, chain +from rich import print + +class User(BaseModel): + name: str + email: str | None + +def extract_user_info(text: str) -> User | Error: + """ + Extract the user information from the given text. + In case you do not have enough infos, raise. + """ + return chain() + +if __name__ == "__main__": + print(extract_user_info("hey")) # returns Error + print(extract_user_info("I'm John and my mail is john@gmail.com")) # returns a User object + +``` + +Demo +
+ ```python + $ print(extract_user_info("hey")) + + Error: Insufficient information to extract user details. + + User: + $ print(extract_user_info("I'm John and my mail is john@gmail.com")) + + I'm John and my mail is john@gmail.com + User(name='John', email='john@gmail.com') + + //update example + ``` +
+ +##Instructions + +!!! Step-by-Step + + **Necessary Imports** + ```python + from funcchain import BaseModel, Error, chain + from rich import print + ``` + + **Define the User Model** + ```python + class User(BaseModel): + name: str + email: str | None + ``` + The User class is a Pydantic model that defines the structure of the user information to be extracted, with fields for `name` and an email. + Change the fields to experiment and alignment with your project. + + **Implement the Extraction Function** + The `extract_user_info` function is intended to process the input text and return either a User object with extracted information or an Error if the information is not sufficient. + ```python + def extract_user_info(text: str) -> User | Error: + """ + Extract the user information from the given text. + In case you do not have enough infos, raise. + """ + return chain() + ``` + For experiments and adoptions also change the `str` that will be used in chain() to identify what you defined earlier in the `User(BaseModel)` + + + **Run the Extraction System** + This conditional block is used to execute the extraction function and print the results when the script is run directly. + ```python + if __name__ == "__main__": + print(extract_user_info("hey")) # returns Error + print(extract_user_info("I'm John and my mail is john@gmail.com")) # returns a User object + ``` \ No newline at end of file diff --git a/docs/examples/literals.md b/docs/examples/literals.md new file mode 100644 index 0000000..4cf668f --- /dev/null +++ b/docs/examples/literals.md @@ -0,0 +1,88 @@ +#Literal Type Enforcement in Funcchain + +!!! Example + literals.py [Example](https://github.com/shroominic/funcchain/blob/main/examples/literals.py) + + This is a useful task for scenarios where you want to ensure that certain outputs strictly conform to a predefined set of values. + This serves as an example of how to implement strict type checks on outputs using the Literal type from the typing module and the funcchain library. + + You can adapt this for your own usage. + +##Full Code Example + +```python +from typing import Literal +from funcchain import chain +from pydantic import BaseModel + +class Ranking(BaseModel): + chain_of_thought: str + score: Literal[11, 22, 33, 44, 55] + error: Literal["no_input", "all_good", "invalid"] + +def rank_output(output: str) -> Ranking: + """ + Analyze and rank the output. + """ + return chain() + +if __name__ == "__main__": + rank = rank_output("The quick brown fox jumps over the lazy dog.") + print(rank) +``` + +Demo +
+```python +$ rank = rank_output("The quick brown fox jumps over the lazy dog.") +$ ........ +Ranking(chain_of_thought='...', score=33, error='all_good') +``` +
+ +##Instructions + +!!! Step-by-Step + + **Necessary Imports** + ```python + from typing import Literal + from funcchain import chain + from pydantic import BaseModel + ``` + + + **Define the Ranking Model** + The Ranking class is a Pydantic model that uses the Literal type to ensure that the score and error fields can only contain certain predefined values. + So experiment with changing those but keeping this structure of the class. + The LLM will be forced to deliver one of the defined output. + + ```python + class Ranking(BaseModel): + chain_of_thought: str + score: Literal[11, 22, 33, 44, 55] + error: Literal["no_input", "all_good", "invalid"] + ``` + + **Implement the Ranking Function** + Use `chain()` to process a user input, which must be a string. + Adjust the content based on your above defined class. + + ```python + def rank_output(output: str) -> Ranking: + """ + Analyze and rank the output. + """ + return chain() + ``` + + **Execute the Ranking System** + This block is used to execute the ranking function and print the results when the script is run directly. + ```python + if __name__ == "__main__": + rank = rank_output("The quick brown fox jumps over the lazy dog.") + print(rank) + ``` + + + diff --git a/docs/getting-started/installation.md b/docs/getting-started/installation.md index e69de29..1a28715 100644 --- a/docs/getting-started/installation.md +++ b/docs/getting-started/installation.md @@ -0,0 +1,11 @@ +!!! tip "LETS GOO" + + +
+ +```console + +// You are all set to stream the code content for this tutorial! +``` + +
\ No newline at end of file diff --git a/docs/getting-started/introduction.md b/docs/getting-started/introduction.md new file mode 100644 index 0000000..8c40f55 --- /dev/null +++ b/docs/getting-started/introduction.md @@ -0,0 +1,480 @@ +[![Version](https://badge.fury.io/py/funcchain.svg)](https://badge.fury.io/py/funcchain) +[![tests](https://github.com/shroominic/funcchain/actions/workflows/code-check.yml/badge.svg)](https://github.com/shroominic/funcchain/actions/workflows/code-check.yml) +![PyVersion](https://img.shields.io/pypi/pyversions/funcchain) +![Downloads](https://img.shields.io/pypi/dm/funcchain) +[![Pydantic v2](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/pydantic/pydantic/main/docs/badge/v2.json)](https://docs.pydantic.dev/latest/contributing/#badges) +[![Twitter Follow](https://img.shields.io/twitter/follow/shroominic?style=social)](https://x.com/shroominic) + +
+ ```bash + $ > pip install funcchain + ``` +
+ +!!! Important + Dont forget to setup your API if needed for your LLM of choice + + ```bash + export OPENAI_API_KEY="sk-XXX" + ``` + + Or funcchain will automatically detect a .env file. + + Also Useful: Langsmith integration + ```bash + LANGCHAIN_TRACING_V2=true + LANGCHAIN_ENDPOINT="https://api.smith.langchain.com" + LANGCHAIN_API_KEY="ls__XXX" + LANGCHAIN_PROJECT="YOUR_PROJECT" + ``` + Add those lines to .env; funcchain will use Langsmith trace. + + Langsmith is used to understand what happens under the hood of your AI project. + When multiple LLM calls are used for an output they can be logged for debugging. + +## Introduction + +`funcchain` is the *most pythonic* way of writing cognitive systems. Leveraging pydantic models as output schemas combined with langchain in the backend allows for a seamless integration of llms into your apps. +It works perfect with OpenAI Functions and soon with other models using JSONFormer. + +[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/ricklamers/funcchain-demo) + +## Simple Demo + +```python +from funcchain import chain +from pydantic import BaseModel + +# define your output shape +class Recipe(BaseModel): + ingredients: list[str] + instructions: list[str] + duration: int + +# write prompts utilising all native python features +def generate_recipe(topic: str) -> Recipe: + """ + Generate a recipe for a given topic. + """ + return chain() # <- this is doing all the magic + +# generate llm response +recipe = generate_recipe("christmas dinner") + +# recipe is automatically converted as pydantic model +print(recipe.ingredients) +``` +!!! Step-by-Step + ```python + # define your output shape + class Recipe(BaseModel): + ingredients: list[str] + instructions: list[str] + duration: int + ``` + + A Recipe class is defined, inheriting from BaseModel (pydantic library). This class + specifies the structure of the output data, which you can customize. + In the example it includes a list of ingredients, a list of instructions, and an integer + representing the duration + + ```python + # write prompts utilising all native python features + def generate_recipe(topic: str) -> Recipe: + """ + Generate a recipe for a given topic. + """ + return chain() # <- this is doing all the magic + ``` + In this example the `generate_recipe` function takes a topic string and returns a `Recipe` instance for that topic. + # Understanding chain() Functionality + Chain() is the backend magic of funcchain. Behind the szenes it creates the prompt executable from the function signature. + Meaning it will turn your function into usable LLM input. + + The `chain()` function does the interaction with the language model to generate a recipe. It accepts several parameters: `system` to specify the model, `instruction` for model directives, `context` to provide relevant background information, `memory` to maintain conversational state, `settings_override` for custom settings, and `**input_kwargs` for additional inputs. Within `generate_recipe`, `chain()` is called with arguments derived from the function's parameters, the function's docstring, or the library's default settings. It compiles these into a Runnable, which then prompts the language model to produce the output. This output is automatically structured into a `Recipe` instance, conforming to the Pydantic model's schema. + + # Get your response + ```python + # generate llm response + recipe = generate_recipe("christmas dinner") + + # recipe is automatically converted as pydantic model + print(recipe.ingredients) + ``` + +#Demo +
+ ``` + $ print(generate_recipe("christmas dinner").ingredients + + ['turkey', 'potatoes', 'carrots', 'brussels sprouts', 'cranberry sauce', 'gravy', + 'butter', 'salt', 'pepper', 'rosemary'] + + ``` +
+ +## Complex Structured Output + +```python +from pydantic import BaseModel, Field +from funcchain import chain + +# define nested models +class Item(BaseModel): + name: str = Field(description="Name of the item") + description: str = Field(description="Description of the item") + keywords: list[str] = Field(description="Keywords for the item") + +class ShoppingList(BaseModel): + items: list[Item] + store: str = Field(description="The store to buy the items from") + +class TodoList(BaseModel): + todos: list[Item] + urgency: int = Field(description="The urgency of all tasks (1-10)") + +# support for union types +def extract_list(user_input: str) -> TodoList | ShoppingList: + """ + The user input is either a shopping List or a todo list. + """ + return chain() + +# the model will choose the output type automatically +lst = extract_list( + input("Enter your list: ") +) + +# custom handler based on type +match lst: + case ShoppingList(items=items, store=store): + print("Here is your Shopping List: ") + for item in items: + print(f"{item.name}: {item.description}") + print(f"You need to go to: {store}") + + case TodoList(todos=todos, urgency=urgency): + print("Here is your Todo List: ") + for item in todos: + print(f"{item.name}: {item.description}") + print(f"Urgency: {urgency}") +``` + +!!! Step-by-Step + **Nececary Imports** + ```python + from pydantic import BaseModel, Field + from funcchain import chain + ``` + + **Data Structures and Model Definitions** + ```python + # define nested models + class Item(BaseModel): + name: str = Field(description="Name of the item") + description: str = Field(description="Description of the item") + keywords: list[str] = Field(description="Keywords for the item") + + class ShoppingList(BaseModel): + items: list[Item] + store: str = Field(description="The store to buy the items from") + + class TodoList(BaseModel): + todos: list[Item] + urgency: int = Field(description="The urgency of all tasks (1-10)") + + ``` + + In this example, Funcchain utilizes Pydantic models to create structured data schemas that facilitate the processing of programmatic inputs. + + You can define new Pydantic models or extend existing ones by adding additional fields or methods. The general approach is to identify the data attributes relevant to your application and create corresponding model classes with these attributes. + + + **Union types** + ```python + # support for union types + def extract_list(user_input: str) -> TodoList | ShoppingList: + """ + The user input is either a shopping List or a todo list. + """ + return chain() + ``` + The extract_list function uses the chain function to analyze user input and return a structured list: + In the example: + - Union Types: It can return either a TodoList or a ShoppingList, depending on the input. + - Usage of chain: chain simplifies the process, deciding the type of list to return. + + For your application this is going to serve as a router to route between your previously defined models. + + **Get a list from the user** (here as "lst") + ```python + # the model will choose the output type automatically + lst = extract_list( + input("Enter your list: ") + ) + + ``` + + **Define your custom handlers** + + And now its time to define what happens with the result. + You can then use the lst variable to match. + + ```python + # custom handler based on type + match lst: + case ShoppingList(items=items, store=store): + print("Here is your Shopping List: ") + for item in items: + print(f"{item.name}: {item.description}") + print(f"You need to go to: {store}") + + case TodoList(todos=todos, urgency=urgency): + print("Here is your Todo List: ") + for item in todos: + print(f"{item.name}: {item.description}") + print(f"Urgency: {urgency}") + + ``` + +#Demo +
+ ``` + lst = extract_list( + input("Enter your list: ") + ) + + User: + $ Complete project report, Prepare for meeting, Respond to emails; + $ if I don't respond I will be fired + + Output: + $ ............... + Here is your Todo List: + Complete your buisness tasks: project report, Prepare for meeting, Respond to emails + Urgency: 10 + //add real output + ``` +
+ + + + + + + +## Vision Models + +```python +from PIL import Image +from pydantic import BaseModel, Field +from funcchain import chain, settings + +# set global llm using model identifiers (see MODELS.md) +settings.llm = "openai/gpt-4-vision-preview" + +# everything defined is part of the prompt +class AnalysisResult(BaseModel): + """The result of an image analysis.""" + + theme: str = Field(description="The theme of the image") + description: str = Field(description="A description of the image") + objects: list[str] = Field(description="A list of objects found in the image") + +# easy use of images as input with structured output +def analyse_image(image: Image.Image) -> AnalysisResult: + """ + Analyse the image and extract its + theme, description and objects. + """ + return chain() + +result = analyse_image(Image.open("examples/assets/old_chinese_temple.jpg")) + +print("Theme:", result.theme) +print("Description:", result.description) +for obj in result.objects: + print("Found this object:", obj) +``` +!!! Step-by-Step + **Nececary Imports** + ```python + from PIL import Image + from pydantic import BaseModel, Field + from funcchain import chain, settings + ``` + + **Define Model** + set global llm using model identifiers see [MODELS.md]((https://github.com/shroominic/funcchain/blob/main/MODELS.md)) + ```python + settings.llm = "openai/gpt-4-vision-preview" + ``` + Funcchains modularity allows for all kinds of models including local models + + + **Analize Image** + Get structured output from an image in our example `theme`, `description` and `objects` + ```python + # everything defined is part of the prompt + class AnalysisResult(BaseModel): + """The result of an image analysis.""" + + theme: str = Field(description="The theme of the image") + description: str = Field(description="A description of the image") + objects: list[str] = Field(description="A list of objects found in the image") + ``` + Adjsut the fields as needed. Play around with the example, feel free to experiment. + You can customize the analysis by modifying the fields of the `AnalysisResult` model. + + **Function to start the analysis** + + ```python + # easy use of images as input with structured output + def analyse_image(image: Image.Image) -> AnalysisResult: + """ + Analyse the image and extract its + theme, description and objects. + """ + return chain() + ``` + Chain() will handle the image input. + We here define again the fields from before `theme`, `description` and `objects` + + give an image as input `image: Image.Image` + + Its important that the fields defined earlier are mentioned here with the prompt + `Analyse the image and extract its`... + +#Demo +
+ ``` + print(analyse_image(image: Image.Image)) + + $ .................. + + Theme: Nature + Description: A beautiful landscape with a mountain range in the background, a clear blue sky, and a calm lake in the foreground surrounded by greenery. + Found this object: mountains + Found this object: sky + Found this object: lake + Found this object: trees + Found this object: grass + + ``` +
+ + +## Seamless local model support +Yes you can use funcchain without internet connection. +Start heating up your device. + +```python +from pydantic import BaseModel, Field +from funcchain import chain, settings + +# auto-download the model from huggingface +settings.llm = "ollama/openchat" + +class SentimentAnalysis(BaseModel): + analysis: str + sentiment: bool = Field(description="True for Happy, False for Sad") + +def analyze(text: str) -> SentimentAnalysis: + """ + Determines the sentiment of the text. + """ + return chain() + +# generates using the local model +poem = analyze("I really like when my dog does a trick!") + +# promised structured output (for local models!) +print(poem.analysis) +``` +!!! Step-by-Step + **Nececary Imports** + ```python + from pydantic import BaseModel, Field + from funcchain import chain, settings + ``` + + **Choose and enjoy** + ```python + # auto-download the model from huggingface + settings.llm = "ollama/openchat" + ``` + + **Structured output definition** + With an input `str` a description can be added to return a boolean `true` or `false` + ```python + class SentimentAnalysis(BaseModel): + analysis: str + sentiment: bool = Field(description="True for Happy, False for Sad") + ``` + Experiment yourself by adding different descriptions for the true and false case. + + **Use `chain()` to analize** + Defines with natural language the analysis + ```python + def analyze(text: str) -> SentimentAnalysis: + """ + Determines the sentiment of the text. + """ + return chain() + ``` + For your own usage adjust the str. Be precise and reference your classes again. + + **Generate and print the output** + ```python + **Use the analyze function and print output** + + # generates using the local model + poem = analyze("I really like when my dog does a trick!") + + # promised structured output (for local models!) + print(poem.analysis) + ``` +#Demo +
+ ``` + poem = analyze("I really like when my dog does a trick!") + + $ .................. + + Add demo + + ``` +
+ + +## Features + +- **🎨 Minimalistic and Easy to Use**: Designed with simplicity in mind for straightforward usage. +- **🔄 Model Flexibility**: Effortlessly switch between OpenAI and local models. +- **📝 Pythonic Prompts**: Craft natural language prompts as intuitive Python functions. +- **🔧 Structured Output**: Define output schemas with Pydantic models. +- **🚀 Powered by LangChain**: Utilize the robust LangChain core for backend operations. +- **🧩 Template Support**: Employ f-strings or Jinja templates for dynamic prompt creation. +- **🔗 Integration with AI Services**: Take full advantage of OpenAI Functions or LlamaCpp Grammars. +- **🛠️ Langsmith Support**: Ensure compatibility with Langsmith for superior language model interactions. +- **⚡ Asynchronous and Pythonic**: Embrace modern Python async features. +- **🤗 Huggingface Integration**: Automatically download models from Huggingface. +- **🌊 Streaming Support**: Enable real-time streaming for interactive applications. + +## Documentation + +Highly recommend to try out the examples in the `./examples` folder. + +Coming soon... feel free to add helpful .md files :) + +## Contribution + +You want to contribute? That's great! Please run the dev setup to get started: + +```bash +> git clone https://github.com/shroominic/funcchain.git && cd funcchain + +> ./dev_setup.sh +``` + +Thanks! diff --git a/docs/index.md b/docs/index.md index dd00a0a..8af55be 100644 --- a/docs/index.md +++ b/docs/index.md @@ -8,8 +8,11 @@ ## Welcome -funcchain is the *most pythonic* way of writing cognitive systems. Leveraging pydantic models as output schemas combined with langchain in the backend allows for a seamless integration of llms into your apps. -It works perfect with OpenAI Functions and soon with other models using JSONFormer. + +!!! Description + funcchain is the *most pythonic* way of writing cognitive systems. Leveraging pydantic models as output schemas combined with langchain in the backend allows for a seamless integration of llms into your apps. + It works perfect with OpenAI Functions and soon with other models using JSONFormer. + Key features: @@ -23,28 +26,31 @@ Key features: ## Installation -```bash -pip install funcchain -``` +
+ ```bash + # pip install funcchain + ``` +
-Make sure to have an OpenAI API key in your environment variables. For example, +!!! Important + Make sure to have an OpenAI API key in your environment variables. For example, -```bash -export OPENAI_API_KEY=sk-********** -``` + ```bash + export OPENAI_API_KEY=sk-********** + ``` ## Usage -```python -from funcchain import chain - -def hello() -> str: - """Say hello in 3 languages""" - return chain() + ```python + from funcchain import chain -print(hello()) # -> Hello, Bonjour, Hola -``` + def hello() -> str: + """Say hello in 3 languages""" + return chain() + print(hello()) # -> Hello, Bonjour, Hola + ``` ++ This will call the OpenAI API and return the response. The `chain` function extracts the docstring as the prompt and the return type for parsing the response. @@ -53,10 +59,11 @@ The `chain` function extracts the docstring as the prompt and the return type fo To contribute, clone the repo and run: +
```bash -./dev_setup.sh +# ./dev_setup.sh ``` - +
This will install pre-commit hooks, dependencies and set up the environment. To activate the virtual environment managed by poetry, you can use the following command: diff --git a/docs/js/custom.js b/docs/js/custom.js new file mode 100644 index 0000000..79d0a1f --- /dev/null +++ b/docs/js/custom.js @@ -0,0 +1,113 @@ +function setupTermynal() { + document.querySelectorAll(".use-termynal").forEach(node => { + node.style.display = "block"; + new Termynal(node, { + lineDelay: 500 + }); + }); + const progressLiteralStart = "---> 100%"; + const promptLiteralStart = "$ "; + const customPromptLiteralStart = "# "; + const termynalActivateClass = "termy"; + let termynals = []; + + function createTermynals() { + document + .querySelectorAll(`.${termynalActivateClass} .highlight`) + .forEach(node => { + const text = node.textContent; + const lines = text.split("\n"); + const useLines = []; + let buffer = []; + function saveBuffer() { + if (buffer.length) { + let isBlankSpace = true; + buffer.forEach(line => { + if (line) { + isBlankSpace = false; + } + }); + dataValue = {}; + if (isBlankSpace) { + dataValue["delay"] = 0; + } + if (buffer[buffer.length - 1] === "") { + // A last single
won't have effect + // so put an additional one + buffer.push(""); + } + const bufferValue = buffer.join("
"); + dataValue["value"] = bufferValue; + useLines.push(dataValue); + buffer = []; + } + } + for (let line of lines) { + if (line === progressLiteralStart) { + saveBuffer(); + useLines.push({ + type: "progress" + }); + } else if (line.startsWith(promptLiteralStart)) { + saveBuffer(); + const value = line.replace(promptLiteralStart, "").trimEnd(); + useLines.push({ + type: "input", + value: value + }); + } else if (line.startsWith("// ")) { + saveBuffer(); + const value = "💬 " + line.replace("// ", "").trimEnd(); + useLines.push({ + value: value, + class: "termynal-comment", + delay: 0 + }); + } else if (line.startsWith(customPromptLiteralStart)) { + saveBuffer(); + const promptStart = line.indexOf(promptLiteralStart); + if (promptStart === -1) { + console.error("Custom prompt found but no end delimiter", line) + } + const prompt = line.slice(0, promptStart).replace(customPromptLiteralStart, "") + let value = line.slice(promptStart + promptLiteralStart.length); + useLines.push({ + type: "input", + value: value, + prompt: prompt + }); + } else { + buffer.push(line); + } + } + saveBuffer(); + const div = document.createElement("div"); + node.replaceWith(div); + const termynal = new Termynal(div, { + lineData: useLines, + noInit: true, + lineDelay: 500 + }); + termynals.push(termynal); + }); + } + + function loadVisibleTermynals() { + termynals = termynals.filter(termynal => { + if (termynal.container.getBoundingClientRect().top - innerHeight <= 0) { + termynal.init(); + return false; + } + return true; + }); + } + window.addEventListener("scroll", loadVisibleTermynals); + createTermynals(); + loadVisibleTermynals(); +} + +async function main() { + setupTermynal() +} + +main() \ No newline at end of file diff --git a/docs/js/termynal.js b/docs/js/termynal.js new file mode 100644 index 0000000..7146d8d --- /dev/null +++ b/docs/js/termynal.js @@ -0,0 +1,263 @@ +/** + * termynal.js + * A lightweight, modern and extensible animated terminal window, using + * async/await. + * + * @author Ines Montani + * @version 0.0.1 + * @license MIT + */ + +'use strict'; + +/** Generate a terminal widget. */ +class Termynal { + /** + * Construct the widget's settings. + * @param {(string|Node)=} container - Query selector or container element. + * @param {Object=} options - Custom settings. + * @param {string} options.prefix - Prefix to use for data attributes. + * @param {number} options.startDelay - Delay before animation, in ms. + * @param {number} options.typeDelay - Delay between each typed character, in ms. + * @param {number} options.lineDelay - Delay between each line, in ms. + * @param {number} options.progressLength - Number of characters displayed as progress bar. + * @param {string} options.progressChar – Character to use for progress bar, defaults to █. + * @param {number} options.progressPercent - Max percent of progress. + * @param {string} options.cursor – Character to use for cursor, defaults to ▋. + * @param {Object[]} lineData - Dynamically loaded line data objects. + * @param {boolean} options.noInit - Don't initialise the animation. + */ + constructor(container = '#termynal', options = {}) { + this.container = (typeof container === 'string') ? document.querySelector(container) : container; + this.pfx = `data-${options.prefix || 'ty'}`; + this.originalStartDelay = this.startDelay = options.startDelay + || parseFloat(this.container.getAttribute(`${this.pfx}-startDelay`)) || 600; + this.originalTypeDelay = this.typeDelay = options.typeDelay + || parseFloat(this.container.getAttribute(`${this.pfx}-typeDelay`)) || 90; + this.originalLineDelay = this.lineDelay = options.lineDelay + || parseFloat(this.container.getAttribute(`${this.pfx}-lineDelay`)) || 1500; + this.progressLength = options.progressLength + || parseFloat(this.container.getAttribute(`${this.pfx}-progressLength`)) || 40; + this.progressChar = options.progressChar + || this.container.getAttribute(`${this.pfx}-progressChar`) || '█'; + this.progressPercent = options.progressPercent + || parseFloat(this.container.getAttribute(`${this.pfx}-progressPercent`)) || 100; + this.cursor = options.cursor + || this.container.getAttribute(`${this.pfx}-cursor`) || '▋'; + this.lineData = this.lineDataToElements(options.lineData || []); + this.loadLines() + if (!options.noInit) this.init() + } + + loadLines() { + // Load all the lines and create the container so that the size is fixed + // Otherwise it would be changing and the user viewport would be constantly + // moving as she/he scrolls + const finish = this.generateFinish() + finish.style.visibility = 'hidden' + this.container.appendChild(finish) + // Appends dynamically loaded lines to existing line elements. + this.lines = [...this.container.querySelectorAll(`[${this.pfx}]`)].concat(this.lineData); + for (let line of this.lines) { + line.style.visibility = 'hidden' + this.container.appendChild(line) + } + const restart = this.generateRestart() + restart.style.visibility = 'hidden' + this.container.appendChild(restart) + this.container.setAttribute('data-termynal', ''); + } + + /** + * Initialise the widget, get lines, clear container and start animation. + */ + init() { + /** + * Calculates width and height of Termynal container. + * If container is empty and lines are dynamically loaded, defaults to browser `auto` or CSS. + */ + const containerStyle = getComputedStyle(this.container); + this.container.style.width = containerStyle.width !== '0px' ? + containerStyle.width : undefined; + this.container.style.minHeight = containerStyle.height !== '0px' ? + containerStyle.height : undefined; + + this.container.setAttribute('data-termynal', ''); + this.container.innerHTML = ''; + for (let line of this.lines) { + line.style.visibility = 'visible' + } + this.start(); + } + + /** + * Start the animation and rener the lines depending on their data attributes. + */ + async start() { + this.addFinish() + await this._wait(this.startDelay); + + for (let line of this.lines) { + const type = line.getAttribute(this.pfx); + const delay = line.getAttribute(`${this.pfx}-delay`) || this.lineDelay; + + if (type == 'input') { + line.setAttribute(`${this.pfx}-cursor`, this.cursor); + await this.type(line); + await this._wait(delay); + } + + else if (type == 'progress') { + await this.progress(line); + await this._wait(delay); + } + + else { + this.container.appendChild(line); + await this._wait(delay); + } + + line.removeAttribute(`${this.pfx}-cursor`); + } + this.addRestart() + this.finishElement.style.visibility = 'hidden' + this.lineDelay = this.originalLineDelay + this.typeDelay = this.originalTypeDelay + this.startDelay = this.originalStartDelay + } + + generateRestart() { + const restart = document.createElement('a') + restart.onclick = (e) => { + e.preventDefault() + this.container.innerHTML = '' + this.init() + } + restart.href = '#' + restart.setAttribute('data-terminal-control', '') + restart.innerHTML = "restart ↻" + return restart + } + + generateFinish() { + const finish = document.createElement('a') + finish.onclick = (e) => { + e.preventDefault() + this.lineDelay = 0 + this.typeDelay = 0 + this.startDelay = 0 + } + finish.href = '#' + finish.setAttribute('data-terminal-control', '') + finish.innerHTML = "fast →" + this.finishElement = finish + return finish + } + + addRestart() { + const restart = this.generateRestart() + this.container.appendChild(restart) + } + + addFinish() { + const finish = this.generateFinish() + this.container.appendChild(finish) + } + + /** + * Animate a typed line. + * @param {Node} line - The line element to render. + */ + async type(line) { + const chars = [...line.textContent]; + line.textContent = ''; + this.container.appendChild(line); + + for (let char of chars) { + const delay = line.getAttribute(`${this.pfx}-typeDelay`) || this.typeDelay; + await this._wait(delay); + line.textContent += char; + } + } + + /** + * Animate a progress bar. + * @param {Node} line - The line element to render. + */ + async progress(line) { + const progressLength = line.getAttribute(`${this.pfx}-progressLength`) + || this.progressLength; + const progressChar = line.getAttribute(`${this.pfx}-progressChar`) + || this.progressChar; + const chars = progressChar.repeat(progressLength); + const progressPercent = line.getAttribute(`${this.pfx}-progressPercent`) + || this.progressPercent; + line.textContent = ''; + this.container.appendChild(line); + + for (let i = 1; i < chars.length + 1; i++) { + await this._wait(this.typeDelay); + const percent = Math.round(i / chars.length * 100); + line.textContent = `${chars.slice(0, i)} ${percent}%`; + if (percent>progressPercent) { + break; + } + } + } + + /** + * Helper function for animation delays, called with `await`. + * @param {number} time - Timeout, in ms. + */ + _wait(time) { + return new Promise(resolve => setTimeout(resolve, time)); + } + + /** + * Converts line data objects into line elements. + * + * @param {Object[]} lineData - Dynamically loaded lines. + * @param {Object} line - Line data object. + * @returns {Element[]} - Array of line elements. + */ + lineDataToElements(lineData) { + return lineData.map(line => { + let div = document.createElement('div'); + div.innerHTML = `${line.value || ''}`; + + return div.firstElementChild; + }); + } + + /** + * Helper function for generating attributes string. + * + * @param {Object} line - Line data object. + * @returns {string} - String of attributes. + */ + _attributes(line) { + let attrs = ''; + for (let prop in line) { + // Custom add class + if (prop === 'class') { + attrs += ` class=${line[prop]} ` + continue + } + if (prop === 'type') { + attrs += `${this.pfx}="${line[prop]}" ` + } else if (prop !== 'value') { + attrs += `${this.pfx}-${prop}="${line[prop]}" ` + } + } + return attrs; + } +} + +/** +* HTML API: If current script has container(s) specified, initialise Termynal. +*/ +if (document.currentScript.hasAttribute('data-termynal-container')) { + const containers = document.currentScript.getAttribute('data-termynal-container'); + containers.split('|') + .forEach(container => new Termynal(container)) +} \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index 0e882bd..3cb00dd 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -5,21 +5,88 @@ repo_name: shroominic/funcchain repo_url: https://github.com/shroominic/funcchain/ nav: + - 'Introduction': + - 'Funcchain': 'getting-started/introduction.md' - 'Getting Started': - 'Welcome': 'index.md' - 'Installation': 'getting-started/installation.md' - 'Usage': 'getting-started/usage.md' - 'Concepts': - - 'Overview': 'overview.md' + - 'Overview': 'concepts/overview.md' - 'Chain': 'chain.md' - 'Input Args': 'input.md' - 'Prompt Template': 'prompt.md' - 'Output Parser': 'parser.md' - 'Pydantic Models': 'models.md' - 'Settings': 'settings.md' - - 'Examples': 'examples.md' + - 'Examples': + - 'ChatGPT': 'examples/chat.md' + - 'Dynamic Router': 'examples/dynamic_router.md' + - 'Enums': 'examples/enums.md' + - 'Error Output': 'examples/error_output.md' + - 'Literals': 'examples/literals.md' + - 'Union Types': 'examples/union.md' theme: name: material palette: scheme: slate + +# Extensions +markdown_extensions: + - abbr + - admonition + - pymdownx.details + - attr_list + - def_list + - footnotes + - md_in_html + - toc: + permalink: true + - pymdownx.arithmatex: + generic: true + - pymdownx.betterem: + smart_enable: all + - pymdownx.caret + - pymdownx.details + - pymdownx.emoji: + emoji_generator: !!python/name:material.extensions.emoji.to_svg + emoji_index: !!python/name:material.extensions.emoji.twemoji + - pymdownx.highlight: + anchor_linenums: true + line_spans: __span + pygments_lang_class: true + - pymdownx.inlinehilite + - pymdownx.keys + - pymdownx.magiclink: + normalize_issue_symbols: true + repo_url_shorthand: true + user: jxnl + repo: instructor + - pymdownx.mark + - pymdownx.smartsymbols + - pymdownx.snippets: + auto_append: + - includes/mkdocs.md + - pymdownx.superfences: + custom_fences: + - name: mermaid + class: mermaid + format: !!python/name:pymdownx.superfences.fence_code_format + - pymdownx.tabbed: + alternate_style: true + combine_header_slug: true + slugify: !!python/object/apply:pymdownx.slugs.slugify + kwds: + case: lower + - pymdownx.tasklist: + custom_checkbox: true + - pymdownx.tilde + +extra_css: + - css/termynal.css + - css/custom.css + +extra_javascript: + - js/termynal.js + - js/custom.js \ No newline at end of file diff --git a/requirements-dev.lock b/requirements-dev.lock index db438bb..c70599f 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -30,6 +30,7 @@ faiss-cpu==1.7.4 filelock==3.13.1 frozenlist==1.4.0 ghp-import==2.1.0 +greenlet==3.0.3 h11==0.14.0 httpcore==1.0.2 httpx==0.25.1 @@ -67,13 +68,11 @@ packaging==23.2 paginate==0.5.6 parso==0.8.3 pathspec==0.11.2 -pexpect==4.9.0 pillow==10.1.0 platformdirs==4.0.0 pluggy==1.3.0 pre-commit==3.5.0 prompt-toolkit==3.0.41 -ptyprocess==0.7.0 pure-eval==0.2.2 pydantic==2.5.2 pydantic-core==2.14.5 diff --git a/requirements.lock b/requirements.lock index 14c4468..1820e79 100644 --- a/requirements.lock +++ b/requirements.lock @@ -11,6 +11,7 @@ annotated-types==0.6.0 anyio==3.7.1 certifi==2023.11.17 charset-normalizer==3.3.2 +colorama==0.4.6 distro==1.9.0 docstring-parser==0.15 h11==0.14.0