Skip to content

Commit

Permalink
feat: bump llama index after pr was accepted
Browse files Browse the repository at this point in the history
  • Loading branch information
JoFrost committed May 16, 2024
1 parent 7605e17 commit 0c86a92
Show file tree
Hide file tree
Showing 3 changed files with 4 additions and 248 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
Context,
get_default_context,
)
from .base_mml import GeminiMultiModal2
from lavague.core.extractors import BaseExtractor
from lavague.core.retrievers import BaseHtmlRetriever
from lavague.core.context import DEFAULT_MAX_TOKENS, DEFAULT_TEMPERATURE
Expand Down Expand Up @@ -40,7 +39,7 @@ def __init__(
max_tokens=DEFAULT_MAX_TOKENS,
temperature=DEFAULT_TEMPERATURE,
),
GeminiMultiModal2(api_key=api_key, model_name=mm_llm),
GeminiMultiModal(api_key=api_key, model_name=mm_llm),
GeminiEmbedding(api_key=api_key, model_name=embedding),
retriever,
prompt_template,
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "lavague-contexts-gemini"
version = "0.1.2"
version = "0.1.3"
description = "gemini integration for lavague"
authors = ["lavague-ai"]
readme = "README.md"
Expand All @@ -22,9 +22,9 @@ packages = [{include = "lavague/"}]
[tool.poetry.dependencies]
python = "^3.10.0"
lavague-core = "^0.1.1"
llama-index-llms-gemini = "^0.1.7"
llama-index-llms-gemini = "^0.1.8"
llama-index-embeddings-gemini = "^0.1.6"
llama-index-multi-modal-llms-gemini = "^0.1.5"
llama-index-multi-modal-llms-gemini = "^0.1.6"
google-generativeai = "^0.4.1"

[build-system]
Expand Down

0 comments on commit 0c86a92

Please sign in to comment.