diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 000000000..e69de29bb diff --git a/404.html b/404.html new file mode 100644 index 000000000..c4d40de28 --- /dev/null +++ b/404.html @@ -0,0 +1,2397 @@ + + + +
+ + + + + + + + + + + + + + + + +An agent takes two main arguments, an LLM and a list of tools.
+The txtai agent framework is built with Transformers Agents and additional options can be directly passed in the Agent
constructor.
from datetime import datetime
+
+from txtai import Agent
+
+wikipedia = {
+ "name": "wikipedia",
+ "description": "Searches a Wikipedia database",
+ "provider": "huggingface-hub",
+ "container": "neuml/txtai-wikipedia"
+}
+
+arxiv = {
+ "name": "arxiv",
+ "description": "Searches a database of scientific papers",
+ "provider": "huggingface-hub",
+ "container": "neuml/txtai-arxiv"
+}
+
+def today() -> str:
+ """
+ Gets the current date and time
+
+ Returns:
+ current date and time
+ """
+
+ return datetime.today().isoformat()
+
+agent = Agent(
+ tools=[today, wikipedia, arxiv, "websearch"],
+ llm="hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
+)
+
llm: string|llm instance
+
LLM path or LLM pipeline instance. See the LLM pipeline for more information.
+tools: list
+
List of tools to supply to the agent. Supports the following configurations.
+A function tool takes the following dictionary fields.
+Field | +Description | +
---|---|
name | +name of the tool | +
description | +tool description | +
target | +target method / callable | +
A function or callable method can also be directly supplied in the tools
list. In this case, the fields are inferred from the method documentation.
Embeddings indexes have built-in support. Provide the following dictionary configuration to add an embeddings index as a tool.
+Field | +Description | +
---|---|
name | +embeddings index name | +
description | +embeddings index description | +
**kwargs | +Parameters to pass to embeddings.load | +
A Transformers tool instance can be provided. Additionally, the following strings load tools directly from Transformers.
+Tool | +Description | +
---|---|
websearch | +Runs a websearch using built-in Transformers Agent tool | +
method: reactjson|reactcode|code
+
Sets the agent method. Defaults to reactjson
. Read more on this here.
An agent automatically creates workflows to answer multi-faceted user requests. Agents iteratively prompt and/or interface with tools to +step through a process and ultimately come to an answer for a request.
+Agents excel at complex tasks where multiple tools and/or methods are required. They incorporate a level of randomness similar to different +people working on the same task. When the request is simple and/or there is a rule-based process, other methods such as RAG and Workflows +should be explored.
+The following code snippet defines a basic agent.
+from datetime import datetime
+
+from txtai import Agent
+
+wikipedia = {
+ "name": "wikipedia",
+ "description": "Searches a Wikipedia database",
+ "provider": "huggingface-hub",
+ "container": "neuml/txtai-wikipedia"
+}
+
+arxiv = {
+ "name": "arxiv",
+ "description": "Searches a database of scientific papers",
+ "provider": "huggingface-hub",
+ "container": "neuml/txtai-arxiv"
+}
+
+def today() -> str:
+ """
+ Gets the current date and time
+
+ Returns:
+ current date and time
+ """
+
+ return datetime.today().isoformat()
+
+agent = Agent(
+ tools=[today, wikipedia, arxiv, "websearch"],
+ llm="hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4",
+ max_iterations=10,
+)
+
The agent above has access to two embeddings databases (Wikipedia and ArXiv) and the web. Given the user's input request, the agent decides the best tool to solve the task.
+The first example will solve a problem with multiple data points. See below.
+agent("Which city has the highest population, Boston or New York?")
+
This requires looking up the population of each city before knowing how to answer the question. Multiple search requests are run to generate a final answer.
+Standard retrieval augmented generation (RAG) runs a single vector search to obtain a context and builds a prompt with the context + input question. Agentic RAG is a more complex process that goes through multiple iterations. It can also utilize multiple databases to come to a final conclusion.
+The example below aggregates information from multiple sources and builds a report on a topic.
+researcher = """
+You're an expert researcher looking to write a paper on {topic}.
+Search for websites, scientific papers and Wikipedia related to the topic.
+Write a report with summaries and references (with hyperlinks).
+Write the text as Markdown.
+"""
+
+agent(researcher.format(topic="alien life"))
+
Agents can also be tools. This enables the concept of building "Agent Teams" to solve problems. The previous example can be rewritten as a list of agents.
+from txtai import Agent, LLM
+
+llm = LLM("hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4")
+
+websearcher = Agent(
+ tools=["websearch"],
+ llm=llm,
+)
+
+wikiman = Agent(
+ tools=[{
+ "name": "wikipedia",
+ "description": "Searches a Wikipedia database",
+ "provider": "huggingface-hub",
+ "container": "neuml/txtai-wikipedia"
+ }],
+ llm=llm,
+)
+
+researcher = Agent(
+ tools=[{
+ "name": "arxiv",
+ "description": "Searches a database of scientific papers",
+ "provider": "huggingface-hub",
+ "container": "neuml/txtai-arxiv"
+ }],
+ llm=llm,
+)
+
+agent = Agent(
+ tools=[{
+ "name": "websearcher",
+ "description": "I run web searches, there is no answer a web search can't solve!",
+ "target": websearcher
+ }, {
+ "name": "wikiman",
+ "description": "Wikipedia has all the answers, I search Wikipedia and answer questions",
+ "target": wikiman
+ }, {
+ "name": "researcher",
+ "description": "I'm a science guy. I search arXiv to get all my answers.",
+ "target": researcher
+ }],
+ llm=llm,
+ max_iterations=10
+)
+
This provides another level of intelligence to the process. Instead of just a single tool execution, each agent-tool combination has it's own reasoning engine.
+agent("""
+Work with your team and build a comprehensive report on fundamental
+concepts about Signal Processing.
+Write the output in Markdown.
+""")
+
See the link below to learn more.
+Notebook | +Description | ++ |
---|---|---|
What's new in txtai 8.0 | +Agents with txtai | ++ |
Analyzing Hugging Face Posts with Graphs and Agents | +Explore a rich dataset with Graph Analysis and Agents | ++ |
Granting autonomy to agents | +Agents that iteratively solve problems as they see fit | ++ |
__init__(**kwargs)
+
+Creates a new Agent.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ kwargs
+ |
+ + | +
+
+
+ arguments to pass to the underlying Agent backend and LLM pipeline instance + |
+
+ {}
+ |
+
txtai/agent/base.py
18 +19 +20 +21 +22 +23 +24 +25 +26 +27 +28 +29 +30 |
|
__call__(text, maxlength=8192, stream=False, **kwargs)
+
+Runs an agent loop.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ text
+ |
+ + | +
+
+
+ instructions to run + |
+ + required + | +
+ maxlength
+ |
+ + | +
+
+
+ maximum sequence length + |
+
+ 8192
+ |
+
+ stream
+ |
+ + | +
+
+
+ stream response if True, defaults to False + |
+
+ False
+ |
+
+ kwargs
+ |
+ + | +
+
+
+ additional keyword arguments + |
+
+ {}
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ result + |
+
txtai/agent/base.py
32 +33 +34 +35 +36 +37 +38 +39 +40 +41 +42 +43 +44 +45 +46 +47 +48 +49 +50 |
|
The API supports combining multiple API instances into a single logical embeddings index. An example configuration is shown below.
+cluster:
+ shards:
+ - http://127.0.0.1:8002
+ - http://127.0.0.1:8003
+
This configuration aggregates the API instances above as index shards. Data is evenly split among each of the shards at index time. Queries are run in parallel against each shard and the results are joined together. This method allows horizontal scaling and supports very large index clusters.
+This method is only recommended for data sets in the 1 billion+ records. The ANN libraries can easily support smaller data sizes and this method is not worth the additional complexity. At this time, new shards can not be added after building the initial index.
+See the link below for a detailed example covering distributed embeddings clusters.
+Notebook | +Description | ++ |
---|---|---|
Distributed embeddings cluster | +Distribute an embeddings index across multiple data nodes | ++ |
Configuration is set through YAML. In most cases, YAML keys map to fields names in Python. The example in the previous section gave a full-featured example covering a wide array of configuration options.
+Each section below describes the available configuration settings.
+The configuration parser expects a top level embeddings
key to be present in the YAML. All embeddings configuration is supported.
The following example defines an embeddings index.
+path: index path
+writable: true
+
+embeddings:
+ path: vector model
+ content: true
+
Three top level settings are available to control where indexes are saved and if an index is a read-only index.
+path: string
+
Path to save and load the embeddings index. Each API instance can only access a single index at a time.
+writable: boolean
+
Determines if the input embeddings index is writable (true) or read-only (false). This allows serving a read-only index.
+Cloud storage settings can be set under a cloud
top level configuration group.
Agents are defined under a top level agent
key. Each key under the agent
key is the name of the agent. Constructor parameters can be passed under this key.
The following example defines an agent.
+agent:
+ researcher:
+ tools:
+ - websearch
+
+llm:
+ path: hugging-quants/Meta-Llama-3.1-8B-Instruct-AWQ-INT4
+
Pipelines are loaded as top level configuration parameters. Pipeline names are automatically detected in the YAML configuration and created upon startup. All pipelines are supported.
+The following example defines a series of pipelines. Note that entries below are the lower-case names of the pipeline class.
+caption:
+
+extractor:
+ path: model path
+
+labels:
+
+summary:
+
+tabular:
+
+translation:
+
Under each pipeline name, configuration settings for the pipeline can be set.
+Workflows are defined under a top level workflow
key. Each key under the workflow
key is the name of the workflow. Under that is a tasks
key with each task definition.
The following example defines a workflow.
+workflow:
+ sumtranslate:
+ tasks:
+ - action: summary
+ - action: translation
+
Schedules a workflow using a cron expression.
+workflow:
+ index:
+ schedule:
+ cron: 0/10 * * * * *
+ elements: ["api params"]
+ tasks:
+ - task: service
+ url: api url
+ - action: index
+
tasks: list
+
Expects a list of workflow tasks. Each element defines a single workflow task. All task configuration is supported.
+A shorthand syntax for creating tasks is supported. This syntax will automatically map task strings to an action:value
pair.
Example below.
+workflow:
+ index:
+ tasks:
+ - action1
+ - action2
+
Each task element supports the following additional arguments.
+action: string|list
+
Both single and multi-action tasks are supported.
+The action parameter works slightly different when passed via configuration. The parameter(s) needs to be converted into callable method(s). If action is a pipeline that has been defined in the current configuration, it will use that pipeline as the action.
+There are three special action names index
, upsert
and search
. If index
or upsert
are used as the action, the task will collect workflow data elements and load them into defined the embeddings index. If search
is used, the task will execute embeddings queries for each input data element.
Otherwise, the action must be a path to a callable object or function. The configuration parser will resolve the function name and use that as the task action.
+task: string
+
Optionally sets the type of task to create. For example, this could be a file
task or a retrieve
task. If this is not specified, a generic task is created. The list of workflow tasks can be found here.
args: list
+
Optional list of static arguments to pass to the workflow task. These are combined with workflow data to pass to each __call__
.
The txtai API has a number of features out of the box that are designed to help get started quickly. API services can also be augmented with custom code and functionality. The two main ways to do this are with extensions and dependencies.
+Extensions add a custom endpoint. Dependencies add middleware that executes with each request. See the sections below for more.
+While the API is extremely flexible and complex logic can be executed through YAML-driven workflows, some may prefer to create an endpoint in Python. API extensions define custom Python endpoints that interact with txtai applications.
+See the link below for a detailed example.
+Notebook | +Description | ++ |
---|---|---|
Custom API Endpoints | +Extend the API with custom endpoints | ++ |
txtai has a default API token authorization method that works well in many cases. Dependencies can also add custom logic with each request. This could be an additional authorization step and/or an authentication method.
+See the link below for a detailed example.
+Notebook | +Description | ++ |
---|---|---|
API Authorization and Authentication | +Add authorization, authentication and middleware dependencies to the API | ++ |
+
+txtai has a full-featured API, backed by FastAPI, that can optionally be enabled for any txtai process. All functionality found in txtai can be accessed via the API.
+The following is an example configuration and startup script for the API.
+Note: This configuration file enables all functionality. For memory-bound systems, splitting pipelines into multiple instances is a best practice.
+# Index file path
+path: /tmp/index
+
+# Allow indexing of documents
+writable: True
+
+# Enbeddings index
+embeddings:
+ path: sentence-transformers/nli-mpnet-base-v2
+
+# Extractive QA
+extractor:
+ path: distilbert-base-cased-distilled-squad
+
+# Zero-shot labeling
+labels:
+
+# Similarity
+similarity:
+
+# Text segmentation
+segmentation:
+ sentences: true
+
+# Text summarization
+summary:
+
+# Text extraction
+textractor:
+ paragraphs: true
+ minlength: 100
+ join: true
+
+# Transcribe audio to text
+transcription:
+
+# Translate text between languages
+translation:
+
+# Workflow definitions
+workflow:
+ sumfrench:
+ tasks:
+ - action: textractor
+ task: url
+ - action: summary
+ - action: translation
+ args: ["fr"]
+ sumspanish:
+ tasks:
+ - action: textractor
+ task: url
+ - action: summary
+ - action: translation
+ args: ["es"]
+
Assuming this YAML content is stored in a file named config.yml, the following command starts the API process.
+CONFIG=config.yml uvicorn "txtai.api:app"
+
Uvicorn is a full-featured production-ready server. See the Uvicorn deployment guide for more on configuration options.
+The default port for the API is 8000. See the uvicorn link above to change this.
+txtai has a number of language bindings which abstract the API (see links below). Alternatively, code can be written to connect directly to the API. Documentation for a live running instance can be found at the /docs
url (i.e. http://localhost:8000/docs). The following example runs a workflow using cURL.
curl \
+ -X POST "http://localhost:8000/workflow" \
+ -H "Content-Type: application/json" \
+ -d '{"name":"sumfrench", "elements": ["https://github.com/neuml/txtai"]}'
+
A local instance can be instantiated. In this case, a txtai application runs internally, without any network connections, providing the same consolidated functionality. This enables running txtai in Python with configuration.
+The configuration above can be run in Python with:
+from txtai import Application
+
+# Load and run workflow
+app = Application(config.yml)
+app.workflow("sumfrench", ["https://github.com/neuml/txtai"])
+
See this link for a full list of methods.
+The API can be containerized and run. This will bring up an API instance without having to install Python, txtai or any dependencies on your machine!
+See this section for more information.
+The following programming languages have bindings with the txtai API:
+See the link below for a detailed example covering how to use the API.
+Notebook | +Description | ++ |
---|---|---|
API Gallery | +Using txtai in JavaScript, Java, Rust and Go | ++ |
API
+
+
+
+ Bases: Application
Base API template. The API is an extended txtai application, adding the ability to cluster API instances together.
+Downstream applications can extend this base template to add/modify functionality.
+ + + + + + +txtai/api/base.py
12 + 13 + 14 + 15 + 16 + 17 + 18 + 19 + 20 + 21 + 22 + 23 + 24 + 25 + 26 + 27 + 28 + 29 + 30 + 31 + 32 + 33 + 34 + 35 + 36 + 37 + 38 + 39 + 40 + 41 + 42 + 43 + 44 + 45 + 46 + 47 + 48 + 49 + 50 + 51 + 52 + 53 + 54 + 55 + 56 + 57 + 58 + 59 + 60 + 61 + 62 + 63 + 64 + 65 + 66 + 67 + 68 + 69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 |
|
add(documents)
+
+Adds a batch of documents for indexing.
+Downstream applications can override this method to also store full documents in an external system.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ documents
+ |
+ + | +
+
+
+ list of {id: value, text: value} + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ unmodified input documents + |
+
txtai/api/base.py
51 +52 +53 +54 +55 +56 +57 +58 +59 +60 +61 +62 +63 +64 +65 +66 +67 +68 +69 |
|
addobject(data, uid, field)
+
+Helper method that builds a batch of object documents.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ data
+ |
+ + | +
+
+
+ object content + |
+ + required + | +
+ uid
+ |
+ + | +
+
+
+ optional list of corresponding uids + |
+ + required + | +
+ field
+ |
+ + | +
+
+
+ optional field to set + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ documents + |
+
txtai/app/base.py
431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 |
|
agent(name, *args, **kwargs)
+
+Executes an agent.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ name
+ |
+ + | +
+
+
+ agent name + |
+ + required + | +
+ args
+ |
+ + | +
+
+
+ agent positional arguments + |
+
+ ()
+ |
+
+ kwargs
+ |
+ + | +
+
+
+ agent keyword arguments + |
+
+ {}
+ |
+
txtai/app/base.py
784 +785 +786 +787 +788 +789 +790 +791 +792 +793 +794 +795 +796 +797 |
|
batchexplain(queries, texts=None, limit=10)
+
+Explains the importance of each input token in text for a list of queries.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ query
+ |
+ + | +
+
+
+ queries text + |
+ + required + | +
+ texts
+ |
+ + | +
+
+
+ optional list of text, otherwise runs search queries + |
+
+ None
+ |
+
+ limit
+ |
+ + | +
+
+
+ optional limit if texts is None + |
+
+ 10
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ list of dict per input text per query where a higher token scores represents higher importance relative to the query + |
+
txtai/app/base.py
642 +643 +644 +645 +646 +647 +648 +649 +650 +651 +652 +653 +654 +655 +656 +657 +658 +659 |
|
batchsimilarity(queries, texts)
+
+Computes the similarity between list of queries and list of text. Returns a list +of {id: value, score: value} sorted by highest score per query, where id is the +index in texts.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ queries
+ |
+ + | +
+
+
+ queries text + |
+ + required + | +
+ texts
+ |
+ + | +
+
+
+ list of text + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ list of {id: value, score: value} per query + |
+
txtai/app/base.py
601 +602 +603 +604 +605 +606 +607 +608 +609 +610 +611 +612 +613 +614 +615 +616 +617 +618 +619 +620 +621 |
|
batchtransform(texts, category=None, index=None)
+
+Transforms list of text into embeddings arrays.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ texts
+ |
+ + | +
+
+
+ list of text + |
+ + required + | +
+ category
+ |
+ + | +
+
+
+ category for instruction-based embeddings + |
+
+ None
+ |
+
+ index
+ |
+ + | +
+
+
+ index name, if applicable + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ embeddings arrays + |
+
txtai/app/base.py
679 +680 +681 +682 +683 +684 +685 +686 +687 +688 +689 +690 +691 +692 +693 +694 +695 |
|
count()
+
+Total number of elements in this embeddings index.
+ + +Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ number of elements in embeddings index + |
+
txtai/api/base.py
121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 |
|
createagents()
+
+Create agents.
+ +txtai/app/base.py
175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 |
|
createpipelines()
+
+Create pipelines.
+ +txtai/app/base.py
93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 |
|
delete(ids)
+
+Deletes from an embeddings index. Returns list of ids deleted.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ ids
+ |
+ + | +
+
+
+ list of ids to delete + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ ids deleted + |
+
txtai/api/base.py
91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 |
|
explain(query, texts=None, limit=10)
+
+Explains the importance of each input token in text for a query.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ query
+ |
+ + | +
+
+
+ query text + |
+ + required + | +
+ texts
+ |
+ + | +
+
+
+ optional list of text, otherwise runs search query + |
+
+ None
+ |
+
+ limit
+ |
+ + | +
+
+
+ optional limit if texts is None + |
+
+ 10
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ list of dict per input text where a higher token scores represents higher importance relative to the query + |
+
txtai/app/base.py
623 +624 +625 +626 +627 +628 +629 +630 +631 +632 +633 +634 +635 +636 +637 +638 +639 +640 |
|
extract(queue, texts=None)
+
+Extracts answers to input questions.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ queue
+ |
+ + | +
+
+
+ list of {name: value, query: value, question: value, snippet: value} + |
+ + required + | +
+ texts
+ |
+ + | +
+
+
+ optional list of text + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ list of {name: value, answer: value} + |
+
txtai/app/base.py
697 +698 +699 +700 +701 +702 +703 +704 +705 +706 +707 +708 +709 +710 +711 +712 +713 +714 +715 +716 |
|
index()
+
+Builds an embeddings index for previously batched documents.
+ +txtai/api/base.py
71 +72 +73 +74 +75 +76 +77 +78 +79 |
|
label(text, labels)
+
+Applies a zero shot classifier to text using a list of labels. Returns a list of +{id: value, score: value} sorted by highest score, where id is the index in labels.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ text
+ |
+ + | +
+
+
+ text|list + |
+ + required + | +
+ labels
+ |
+ + | +
+
+
+ list of labels + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ list of {id: value, score: value} per text element + |
+
txtai/app/base.py
718 +719 +720 +721 +722 +723 +724 +725 +726 +727 +728 +729 +730 +731 +732 +733 +734 +735 +736 +737 +738 +739 |
|
pipeline(name, *args, **kwargs)
+
+Generic pipeline execution method.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ name
+ |
+ + | +
+
+
+ pipeline name + |
+ + required + | +
+ args
+ |
+ + | +
+
+
+ pipeline positional arguments + |
+
+ ()
+ |
+
+ kwargs
+ |
+ + | +
+
+
+ pipeline keyword arguments + |
+
+ {}
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ pipeline results + |
+
txtai/app/base.py
741 +742 +743 +744 +745 +746 +747 +748 +749 +750 +751 +752 +753 +754 +755 +756 +757 +758 +759 +760 |
|
reindex(config, function=None)
+
+Recreates this embeddings index using config. This method only works if document content storage is enabled.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ config
+ |
+ + | +
+
+
+ new config + |
+ + required + | +
+ function
+ |
+ + | +
+
+
+ optional function to prepare content for indexing + |
+
+ None
+ |
+
txtai/api/base.py
107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 |
|
similarity(query, texts)
+
+Computes the similarity between query and list of text. Returns a list of +{id: value, score: value} sorted by highest score, where id is the index +in texts.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ query
+ |
+ + | +
+
+
+ query text + |
+ + required + | +
+ texts
+ |
+ + | +
+
+
+ list of text + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ list of {id: value, score: value} + |
+
txtai/app/base.py
579 +580 +581 +582 +583 +584 +585 +586 +587 +588 +589 +590 +591 +592 +593 +594 +595 +596 +597 +598 +599 |
|
transform(text, category=None, index=None)
+
+Transforms text into embeddings arrays.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ text
+ |
+ + | +
+
+
+ input text + |
+ + required + | +
+ category
+ |
+ + | +
+
+
+ category for instruction-based embeddings + |
+
+ None
+ |
+
+ index
+ |
+ + | +
+
+
+ index name, if applicable + |
+
+ None
+ |
+
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ embeddings array + |
+
txtai/app/base.py
661 +662 +663 +664 +665 +666 +667 +668 +669 +670 +671 +672 +673 +674 +675 +676 +677 |
|
upsert()
+
+Runs an embeddings upsert operation for previously batched documents.
+ +txtai/api/base.py
81 +82 +83 +84 +85 +86 +87 +88 +89 |
|
wait()
+
+Closes threadpool and waits for completion.
+ +txtai/app/base.py
799 +800 +801 +802 +803 +804 +805 +806 +807 |
|
workflow(name, elements)
+
+Executes a workflow.
+ + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
+ name
+ |
+ + | +
+
+
+ workflow name + |
+ + required + | +
+ elements
+ |
+ + | +
+
+
+ elements to process + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ | +
+
+
+ processed elements + |
+
txtai/app/base.py
762 +763 +764 +765 +766 +767 +768 +769 +770 +771 +772 +773 +774 +775 +776 +777 +778 +779 +780 +781 +782 |
|
The default implementation of an API service runs via HTTP and is fully open. If the service is being run as a prototype on an internal network, that may be fine. In most scenarios, the connection should at least be encrypted. Authorization is another built-in feature that requires a valid API token with each request. See below for more.
+The default API service command starts a Uvicorn server as a HTTP service on port 8000. To run a HTTPS service, consider the following options.
+TLS Proxy Server. Recommended choice. With this configuration, the txtai API service runs as a HTTP service only accessible on the localhost/local network. The proxy server handles all encryption and redirects requests to local services. See this example configuration for more.
+Uvicorn SSL Certificate. Another option is setting the SSL certificate on the Uvicorn service. This works in simple situations but gets complex when hosting multiple txtai or other related services.
+Authorization requires a valid API token with each API request. This token is sent as a HTTP Authorization
header.
Server +
CONFIG=config.yml TOKEN=<sha256 encoded token> uvicorn "txtai.api:app"
+
Client +
curl \
+ -X POST "http://localhost:8000/workflow" \
+ -H "Content-Type: application/json" \
+ -H "Authorization: Bearer <token>" \
+ -d '{"name":"sumfrench", "elements": ["https://github.com/neuml/txtai"]}'
+
It's important to note that HTTPS must be enabled using one of the methods mentioned above. Otherwise, tokens will be exchanged as clear text.
+Authentication and Authorization can be fully customized. See the dependencies section for more.
+ + + + + + + + + + + + + +