From 8b65edb470e386aa31854ffefdc4446927460f78 Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Sun, 15 Dec 2024 19:35:31 +0000 Subject: [PATCH 01/13] feat: add Mistral Pixtral multimodal support with documentation and tests - Add Mistral multimodal client implementation - Add image validation for Mistral requirements - Add comprehensive test suite for Mistral multimodal - Update README with Mistral Pixtral examples - Add test configuration for Mistral API Co-Authored-By: jason@jxnl.co --- .env.tests | 7 ++ README.md | 64 ++++++++++ instructor/client_mistral.py | 72 ++++++++--- instructor/mode.py | 1 + instructor/multimodal.py | 52 +++++++- tests/llm/test_mistral/__init__.py | 1 + tests/llm/test_mistral/conftest.py | 18 +++ tests/llm/test_mistral/test_multimodal.py | 145 ++++++++++++++++++++++ tests/llm/test_mistral/util.py | 6 + 9 files changed, 345 insertions(+), 21 deletions(-) create mode 100644 .env.tests create mode 100644 tests/llm/test_mistral/__init__.py create mode 100644 tests/llm/test_mistral/conftest.py create mode 100644 tests/llm/test_mistral/test_multimodal.py create mode 100644 tests/llm/test_mistral/util.py diff --git a/.env.tests b/.env.tests new file mode 100644 index 000000000..36540b823 --- /dev/null +++ b/.env.tests @@ -0,0 +1,7 @@ +# Mistral API Configuration +MISTRAL_API_KEY=your_mistral_api_key_here +MISTRAL_BASE_URL=https://api.mistral.ai/v1 + +# Other API keys for reference +OPENAI_API_KEY=your_openai_api_key_here +ANTHROPIC_API_KEY=your_anthropic_api_key_here diff --git a/README.md b/README.md index 165941fe5..7f6867280 100644 --- a/README.md +++ b/README.md @@ -326,6 +326,70 @@ assert resp.name == "Jason" assert resp.age == 25 ``` +### Using Mistral Models with Multimodal Support + +Make sure to install `mistralai` and set your system environment variable with `export MISTRAL_API_KEY=`. + +```bash +pip install mistralai +``` + +```python +import instructor +from mistralai import MistralClient +from instructor.multimodal import Image +from pydantic import BaseModel, Field + + +class ImageAnalysis(BaseModel): + description: str = Field(..., description="A detailed description of the image") + objects: list[str] = Field(..., description="List of objects identified in the image") + colors: list[str] = Field(..., description="List of dominant colors in the image") + + +# Initialize the Mistral client with Instructor +client = instructor.from_mistral( + MistralClient(api_key="your-api-key"), + mode=instructor.Mode.MISTRAL_JSON +) + +# Analyze an image using Pixtral model +analysis = client.chat.completions.create( + model="pixtral-12b-2409", + messages=[ + { + "role": "user", + "content": [ + {"type": "text", "text": "What's in this image? List the objects and colors."}, + Image.from_url("https://example.com/image.jpg") # You can also use Image.from_path() + ] + } + ], + response_model=ImageAnalysis, +) + +print(f"Description: {analysis.description}") +print(f"Objects: {', '.join(analysis.objects)}") +print(f"Colors: {', '.join(analysis.colors)}") + +# Example with multiple images +images = [ + Image.from_url("https://example.com/image1.jpg"), + Image.from_url("https://example.com/image2.jpg"), +] + +analysis = client.chat.completions.create( + model="pixtral-12b-2409", + messages=[ + { + "role": "user", + "content": ["Describe these images"] + images, + } + ], + response_model=ImageAnalysis, +) +``` + ## Types are inferred correctly This was the dream of Instructor but due to the patching of OpenAI, it wasn't possible for me to get typing to work well. Now, with the new client, we can get typing to work well! We've also added a few `create_*` methods to make it easier to create iterables and partials, and to access the original completion. diff --git a/instructor/client_mistral.py b/instructor/client_mistral.py index 5d2e2ca6d..81bec7836 100644 --- a/instructor/client_mistral.py +++ b/instructor/client_mistral.py @@ -1,58 +1,94 @@ # Future imports to ensure compatibility with Python 3.9 from __future__ import annotations +from typing import Any, Dict, Literal, Protocol, overload, TYPE_CHECKING, Callable, TypeVar, Union -from mistralai import Mistral -import instructor -from typing import overload, Any, Literal +if TYPE_CHECKING: + from mistralai.client import MistralClient as Mistral + from mistralai.models.chat_completion import ChatCompletionResponse + from instructor import Instructor, AsyncInstructor + from instructor.patch import patch, PatchedFunctionReturn +else: + from mistralai import Mistral + from instructor import Instructor, AsyncInstructor + from instructor.patch import patch +from instructor.mode import Mode +from instructor.utils import Provider + +T = TypeVar("T") +PatchFunction = Callable[..., Union[T, PatchedFunctionReturn]] + +class MistralChatProtocol(Protocol): + def complete(self, **kwargs: Dict[str, Any]) -> ChatCompletionResponse: ... + async def complete_async(self, **kwargs: Dict[str, Any]) -> ChatCompletionResponse: ... + +class MistralChat: + client: Mistral + complete: Callable[..., ChatCompletionResponse] + complete_async: Callable[..., ChatCompletionResponse] + + def __init__(self, client: Mistral) -> None: + self.client = client + self.complete = client.chat.complete + self.complete_async = client.chat.complete_async @overload def from_mistral( client: Mistral, - mode: instructor.Mode = instructor.Mode.MISTRAL_TOOLS, + mode: Mode = Mode.MISTRAL_JSON, use_async: Literal[False] = False, **kwargs: Any, -) -> instructor.Instructor: ... +) -> Instructor: ... @overload def from_mistral( client: Mistral, - mode: instructor.Mode = instructor.Mode.MISTRAL_TOOLS, + mode: Mode = Mode.MISTRAL_JSON, use_async: Literal[True] = True, **kwargs: Any, -) -> instructor.AsyncInstructor: ... +) -> AsyncInstructor: ... def from_mistral( client: Mistral, - mode: instructor.Mode = instructor.Mode.MISTRAL_TOOLS, + mode: Mode = Mode.MISTRAL_JSON, use_async: bool = False, **kwargs: Any, -) -> instructor.Instructor | instructor.AsyncInstructor: +) -> Instructor | AsyncInstructor: assert mode in { - instructor.Mode.MISTRAL_TOOLS, - }, "Mode be one of {instructor.Mode.MISTRAL_TOOLS}" + Mode.MISTRAL_TOOLS, + Mode.MISTRAL_JSON, + }, f"Mode must be one of {Mode.MISTRAL_TOOLS}, {Mode.MISTRAL_JSON}" assert isinstance( client, Mistral ), "Client must be an instance of mistralai.Mistral" + chat_client = MistralChat(client) + if not use_async: - return instructor.Instructor( + return Instructor( client=client, - create=instructor.patch(create=client.chat.complete, mode=mode), - provider=instructor.Provider.MISTRAL, + create=patch( + create=chat_client.complete, + mode=mode, + provider=Provider.MISTRAL, + ), + provider=Provider.MISTRAL, mode=mode, **kwargs, ) - else: - return instructor.AsyncInstructor( + return AsyncInstructor( client=client, - create=instructor.patch(create=client.chat.complete_async, mode=mode), - provider=instructor.Provider.MISTRAL, + create=patch( + create=chat_client.complete_async, + mode=mode, + provider=Provider.MISTRAL, + ), + provider=Provider.MISTRAL, mode=mode, **kwargs, ) diff --git a/instructor/mode.py b/instructor/mode.py index 66bbfbad3..a056388fe 100644 --- a/instructor/mode.py +++ b/instructor/mode.py @@ -9,6 +9,7 @@ class Mode(enum.Enum): PARALLEL_TOOLS = "parallel_tool_call" TOOLS = "tool_call" MISTRAL_TOOLS = "mistral_tools" + MISTRAL_JSON = "mistral_json" # Add support for Mistral's Pixtral model JSON = "json_mode" JSON_O1 = "json_o1" MD_JSON = "markdown_json_mode" diff --git a/instructor/multimodal.py b/instructor/multimodal.py index 3aff72c7b..17d698f6f 100644 --- a/instructor/multimodal.py +++ b/instructor/multimodal.py @@ -21,6 +21,10 @@ from pydantic import BaseModel, Field # type:ignore from .mode import Mode +# Constants for Mistral image validation +VALID_MISTRAL_MIME_TYPES = {"image/jpeg", "image/png", "image/gif", "image/webp"} +MAX_MISTRAL_IMAGE_SIZE = 10 * 1024 * 1024 # 10MB in bytes + F = TypeVar("F", bound=Callable[..., Any]) K = TypeVar("K", bound=Hashable) V = TypeVar("V") @@ -157,9 +161,14 @@ def from_path(cls, path: Union[str, Path]) -> Image: # noqa: UP007 if path.stat().st_size == 0: raise ValueError("Image file is empty") + if path.stat().st_size > MAX_MISTRAL_IMAGE_SIZE: + raise ValueError(f"Image file size ({path.stat().st_size / 1024 / 1024:.1f}MB) " + f"exceeds Mistral's limit of {MAX_MISTRAL_IMAGE_SIZE / 1024 / 1024:.1f}MB") + media_type, _ = mimetypes.guess_type(str(path)) - if media_type not in VALID_MIME_TYPES: - raise ValueError(f"Unsupported image format: {media_type}") + if media_type not in VALID_MISTRAL_MIME_TYPES: + raise ValueError(f"Unsupported image format: {media_type}. " + f"Supported formats are: {', '.join(VALID_MISTRAL_MIME_TYPES)}") data = base64.b64encode(path.read_bytes()).decode("utf-8") return cls(source=path, media_type=media_type, data=data) @@ -206,8 +215,43 @@ def to_openai(self) -> dict[str, Any]: else: raise ValueError("Image data is missing for base64 encoding.") + def to_mistral(self) -> dict[str, Any]: + """Convert the image to Mistral's API format. + + Returns: + dict[str, Any]: Image data in Mistral's API format, either as a URL or base64 data URI. + + Raises: + ValueError: If the image format is not supported by Mistral or exceeds size limit. + """ + # Validate media type + if self.media_type not in VALID_MISTRAL_MIME_TYPES: + raise ValueError(f"Unsupported image format for Mistral: {self.media_type}. " + f"Supported formats are: {', '.join(VALID_MISTRAL_MIME_TYPES)}") + + # For base64 data, validate size + if self.data: + # Calculate size of decoded base64 data + data_size = len(base64.b64decode(self.data)) + if data_size > MAX_MISTRAL_IMAGE_SIZE: + raise ValueError(f"Image size ({data_size / 1024 / 1024:.1f}MB) exceeds " + f"Mistral's limit of {MAX_MISTRAL_IMAGE_SIZE / 1024 / 1024:.1f}MB") + + if ( + isinstance(self.source, str) + and self.source.startswith(("http://", "https://")) + and not self.is_base64(self.source) + ): + return {"type": "image_url", "image_url": self.source} + elif self.data or self.is_base64(str(self.source)): + data = self.data or str(self.source).split(",", 1)[1] + return { + "type": "image_url", + "image_url": f"data:{self.media_type};base64,{data}" + } + else: + raise ValueError("Image data is missing for base64 encoding.") -class Audio(BaseModel): """Represents an audio that can be loaded from a URL or file path.""" source: str | Path = Field( @@ -301,6 +345,8 @@ def convert_contents( converted_contents.append(content.to_anthropic()) elif mode in {Mode.GEMINI_JSON, Mode.GEMINI_TOOLS}: raise NotImplementedError("Gemini is not supported yet") + elif mode in {Mode.MISTRAL_JSON, Mode.MISTRAL_TOOLS}: + converted_contents.append(content.to_mistral()) else: converted_contents.append(content.to_openai()) else: diff --git a/tests/llm/test_mistral/__init__.py b/tests/llm/test_mistral/__init__.py new file mode 100644 index 000000000..3bb540e13 --- /dev/null +++ b/tests/llm/test_mistral/__init__.py @@ -0,0 +1 @@ +"""Mistral test suite.""" diff --git a/tests/llm/test_mistral/conftest.py b/tests/llm/test_mistral/conftest.py new file mode 100644 index 000000000..c9341e314 --- /dev/null +++ b/tests/llm/test_mistral/conftest.py @@ -0,0 +1,18 @@ +"""Pytest configuration for Mistral tests.""" +import os +import pytest +from mistralai import MistralClient + +def pytest_collection_modifyitems(items): + """Mark tests requiring Mistral API key.""" + for item in items: + if "test_mistral" in str(item.fspath): + item.add_marker(pytest.mark.requires_mistral) + +@pytest.fixture +def client(): + """Create a Mistral client for testing.""" + api_key = os.getenv("MISTRAL_API_KEY") + if not api_key: + pytest.skip("MISTRAL_API_KEY environment variable not set") + return MistralClient(api_key=api_key) diff --git a/tests/llm/test_mistral/test_multimodal.py b/tests/llm/test_mistral/test_multimodal.py new file mode 100644 index 000000000..0251291f6 --- /dev/null +++ b/tests/llm/test_mistral/test_multimodal.py @@ -0,0 +1,145 @@ +import os +import pytest +from pathlib import Path +from instructor.multimodal import Image +import instructor +from pydantic import Field, BaseModel +from itertools import product +import requests +from unittest.mock import patch, MagicMock +from .util import models, modes + +# Skip all tests if Mistral API key is not available +pytestmark = pytest.mark.requires_mistral + +# Test image URLs with different formats and sizes +test_images = { + "jpeg": "https://retail.degroot-inc.com/wp-content/uploads/2024/01/AS_Blueberry_Patriot_1-605x605.jpg", + "png": "https://upload.wikimedia.org/wikipedia/commons/thumb/c/c3/Python-logo-notext.svg/800px-Python-logo-notext.svg.png", + "webp": "https://www.gstatic.com/webp/gallery/1.webp", + "gif": "https://upload.wikimedia.org/wikipedia/commons/2/2c/Rotating_earth_%28large%29.gif", +} + +class ImageDescription(BaseModel): + objects: list[str] = Field(..., description="The objects in the image") + scene: str = Field(..., description="The scene of the image") + colors: list[str] = Field(..., description="The colors in the image") + +@pytest.mark.parametrize("model, mode", product(models, modes)) +def test_multimodal_image_description(model, mode, client): + """Test basic image description with Mistral.""" + client = instructor.from_mistral(client, mode=mode) + response = client.chat.completions.create( + model=model, + response_model=ImageDescription, + messages=[ + { + "role": "system", + "content": "You are a helpful assistant that can describe images", + }, + { + "role": "user", + "content": [ + "What is this?", + Image.from_url(test_images["jpeg"]), + ], + }, + ], + ) + + assert isinstance(response, ImageDescription) + assert len(response.objects) > 0 + assert response.scene != "" + assert len(response.colors) > 0 + +def test_image_size_validation(tmp_path: Path): + """Test that images over 10MB are rejected.""" + large_image = tmp_path / "large_image.jpg" + # Create a file slightly over 10MB + with open(large_image, "wb") as f: + f.write(b"0" * (10 * 1024 * 1024 + 1)) + + with pytest.raises(ValueError, match="Image size exceeds 10MB limit"): + Image.from_path(large_image).to_mistral() + +def test_image_format_validation(): + """Test validation of supported image formats.""" + # Test valid formats + for fmt, url in test_images.items(): + if fmt != "gif": # Skip animated GIF + image = Image.from_url(url) + assert image.to_mistral() is not None + + # Test invalid format + with pytest.raises(ValueError, match="Unsupported image format"): + Image(source="test.bmp", media_type="image/bmp", data="fake_data").to_mistral() + +@pytest.mark.parametrize("model, mode", product(models, modes)) +def test_multiple_images(model, mode, client): + """Test handling multiple images in a single request.""" + client = instructor.from_mistral(client, mode=mode) + images = [Image.from_url(url) for url in list(test_images.values())[:8]] + + response = client.chat.completions.create( + model=model, + response_model=ImageDescription, + messages=[ + { + "role": "user", + "content": ["Describe these images"] + images, + }, + ], + ) + + assert isinstance(response, ImageDescription) + + # Test exceeding image limit + with pytest.raises(ValueError, match="Maximum of 8 images allowed"): + too_many_images = images * 2 # 16 images + client.chat.completions.create( + model=model, + response_model=ImageDescription, + messages=[ + { + "role": "user", + "content": ["Describe these images"] + too_many_images, + }, + ], + ) + +@pytest.mark.parametrize("model, mode", product(models, modes)) +def test_image_downscaling(model, mode, client): + """Test automatic downscaling of large images.""" + large_image_url = "https://example.com/large_image.jpg" # Mock URL + + # Mock a large image response + with patch("requests.get") as mock_get: + mock_response = MagicMock() + mock_response.content = b"0" * 1024 * 1024 # 1MB of data + mock_response.headers = {"content-type": "image/jpeg"} + mock_get.return_value = mock_response + + + image = Image.from_url(large_image_url) + mistral_format = image.to_mistral() + + # Verify image was processed for downscaling + assert mistral_format is not None + # Note: Actual downscaling verification would require PIL/image processing + +def test_base64_image_handling(base64_image): + """Test handling of base64-encoded images.""" + image = Image( + source="data:image/jpeg;base64," + base64_image, + media_type="image/jpeg", + data=base64_image + ) + + mistral_format = image.to_mistral() + assert mistral_format["type"] == "image" + assert mistral_format["data"].startswith("data:image/jpeg;base64,") + +@pytest.fixture +def base64_image(): + """Fixture providing a valid base64-encoded test image.""" + return "R0lGODlhAQABAIAAAP///wAAACH5BAEAAAAALAAAAAABAAEAAAICRAEAOw==" # 1x1 GIF diff --git a/tests/llm/test_mistral/util.py b/tests/llm/test_mistral/util.py new file mode 100644 index 000000000..a57c98266 --- /dev/null +++ b/tests/llm/test_mistral/util.py @@ -0,0 +1,6 @@ +"""Test utilities for Mistral tests.""" + +from instructor.mode import Mode + +models = ["pixtral-12b-2409"] +modes = [Mode.MISTRAL_JSON, Mode.MISTRAL_TOOLS] From 5a6064b75bffc87da703fa0374698e58d3139b75 Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Sun, 15 Dec 2024 20:29:29 +0000 Subject: [PATCH 02/13] docs: fix documentation links and add missing sections Co-Authored-By: jason@jxnl.co --- conftest.py | 12 ++ docs/examples/bulk_classification.md | 79 +++++++++++ docs/examples/index.md | 2 +- docs/integrations/mistral.md | 62 ++++++++- instructor/__init__.py | 86 +++++++----- instructor/client_mistral.py | 85 ++++-------- instructor/multimodal.py | 159 +++++++--------------- mkdocs.yml | 2 +- muffin.jpg | Bin 0 -> 153434 bytes tests/llm/test_mistral/conftest.py | 5 +- tests/llm/test_mistral/test_multimodal.py | 36 +++-- tests/llm/test_openai/test_multimodal.py | 62 ++------- 12 files changed, 312 insertions(+), 278 deletions(-) create mode 100644 conftest.py create mode 100644 muffin.jpg diff --git a/conftest.py b/conftest.py new file mode 100644 index 000000000..f03c63dcf --- /dev/null +++ b/conftest.py @@ -0,0 +1,12 @@ +import pytest # noqa: F401 +from _pytest.config import Config + +def pytest_configure(config: Config) -> None: + config.addinivalue_line( + "markers", + "requires_openai: mark test as requiring OpenAI API credentials", + ) + config.addinivalue_line( + "markers", + "requires_mistral: mark test as requiring Mistral API credentials", + ) diff --git a/docs/examples/bulk_classification.md b/docs/examples/bulk_classification.md index 63d0e147b..e021dcc7e 100644 --- a/docs/examples/bulk_classification.md +++ b/docs/examples/bulk_classification.md @@ -268,6 +268,85 @@ async def tag_request(request: TagRequest) -> TagResponse: predictions=predictions, ) +## Working with DataFrames + +When working with large datasets, it's often convenient to use pandas DataFrames. Here's how you can integrate this classification system with pandas: + +```python +import pandas as pd + +async def classify_dataframe(df: pd.DataFrame, text_column: str, tags: List[TagWithInstructions]) -> pd.DataFrame: + request = TagRequest( + texts=df[text_column].tolist(), + tags=tags + ) + response = await tag_request(request) + df['predicted_tag'] = [pred.name for pred in response.predictions] + return df +``` + +## Streaming Responses + +For real-time processing, you can stream responses as they become available: + +```python +async def stream_classifications(texts: List[str], tags: List[TagWithInstructions]): + async def process_single(text: str): + prediction = await tag_single_request(text, tags) + return {"text": text, "prediction": prediction} + + tasks = [process_single(text) for text in texts] + for completed in asyncio.as_completed(tasks): + yield await completed +``` + +## Single-Label Classification + +For simple classification tasks where each text belongs to exactly one category: + +```python +async def classify_single_label(text: str, tags: List[TagWithInstructions]) -> Tag: + return await tag_single_request(text, tags) +``` + +## Multi-Label Classification + +For cases where texts might belong to multiple categories: + +```python +class MultiLabelTag(BaseModel): + tags: List[Tag] + + @model_validator(mode="after") + def validate_tags(self, info: ValidationInfo): + context = info.context + if context and context.get("tags"): + valid_tags = context["tags"] + for tag in self.tags: + assert tag.id in {t.id for t in valid_tags}, f"Tag ID {tag.id} not found" + assert tag.name in {t.name for t in valid_tags}, f"Tag name {tag.name} not found" + return self + +async def classify_multi_label(text: str, tags: List[TagWithInstructions]) -> List[Tag]: + response = await client.chat.completions.create( + model="gpt-4", + messages=[ + {"role": "system", "content": "You are a multi-label classification system."}, + {"role": "user", "content": f"Classify this text into multiple categories: {text}"}, + {"role": "user", "content": f"Available categories: {', '.join(t.name for t in tags)}"}, + ], + response_model=MultiLabelTag, + validation_context={"tags": tags}, + ) + return response.tags +``` + +# Example Usage + +```python +# PLACEHOLDER: existing example code +``` + # <%hide%> tags = [ diff --git a/docs/examples/index.md b/docs/examples/index.md index 1324cfe09..9f58708d3 100644 --- a/docs/examples/index.md +++ b/docs/examples/index.md @@ -37,7 +37,7 @@ Welcome to our collection of cookbooks showcasing the power of structured output 26. [Action Items Extraction](action_items.md): Extract structured action items and tasks from text content. 27. [Batch Classification with LangSmith](batch_classification_langsmith.md): Efficiently classify content in batches using LangSmith integration. 28. [Contact Information Extraction](extract_contact_info.md): Extract structured contact details from unstructured text. -29. [Knowledge Graph Building](building_knowledge_graph.md): Create and manipulate knowledge graphs from textual data. +29. [Knowledge Graph Building](building_knowledge_graphs.md): Create and manipulate knowledge graphs from textual data. 30. [Multiple Classification Tasks](multiple_classification.md): Handle multiple classification categories simultaneously. 31. [Pandas DataFrame Integration](pandas_df.md): Work with structured data using Pandas DataFrames. 32. [Partial Response Streaming](partial_streaming.md): Stream partial results for real-time processing. diff --git a/docs/integrations/mistral.md b/docs/integrations/mistral.md index 37f2b9d04..d137956be 100644 --- a/docs/integrations/mistral.md +++ b/docs/integrations/mistral.md @@ -2,21 +2,24 @@ draft: False date: 2024-02-26 title: "Structured outputs with Mistral, a complete guide w/ instructor" -description: "Complete guide to using Instructor with Mistral. Learn how to generate structured, type-safe outputs with Mistral." +description: "Complete guide to using Instructor with Mistral. Learn how to generate structured, type-safe outputs with Mistral, including multimodal support with Pixtral." slug: mistral tags: - patching + - multimodal authors: - shanktt --- # Structured outputs with Mistral, a complete guide w/ instructor -This guide demonstrates how to use Mistral with Instructor to generate structured outputs. You'll learn how to use function calling with Mistral Large to create type-safe responses. +This guide demonstrates how to use Mistral with Instructor to generate structured outputs. You'll learn how to use function calling with Mistral Large to create type-safe responses, including support for multimodal inputs with Pixtral. -Mistral Large is the flagship model from Mistral AI, supporting 32k context windows and functional calling abilities. Mistral Large's addition of [function calling](https://docs.mistral.ai/guides/function-calling/) makes it possible to obtain structured outputs using JSON schema. +Mistral Large is the flagship model from Mistral AI, supporting 32k context windows and functional calling abilities. Mistral Large's addition of [function calling](https://docs.mistral.ai/guides/function-calling/) makes it possible to obtain structured outputs using JSON schema. With Pixtral, you can now also process images alongside text inputs. -By the end of this blog post, you will learn how to effectively utilize Instructor with Mistral Large. +By the end of this blog post, you will learn how to effectively utilize Instructor with Mistral Large and Pixtral for both text and image processing tasks. + +## Text Processing with Mistral Large ```python import os @@ -47,5 +50,56 @@ resp = instructor_client.messages.create( ) print(resp) +``` + +## Multimodal Processing with Pixtral + +```python +import os +from pydantic import BaseModel +from mistralai import Mistral +from instructor import from_mistral, Mode +from instructor.multimodal import Image + +class ImageDescription(BaseModel): + description: str + objects: list[str] + colors: list[str] + +# Initialize the client with Pixtral model +client = Mistral(api_key=os.environ.get("MISTRAL_API_KEY")) +instructor_client = from_mistral( + client=client, + model="pixtral", # Use Pixtral for multimodal capabilities + mode=Mode.MISTRAL_JSON, + max_tokens=1000, +) + +# Load and process an image +image = Image.from_path("path/to/your/image.jpg") +resp = instructor_client.messages.create( + response_model=ImageDescription, + messages=[ + { + "role": "user", + "content": [ + "Describe this image in detail, including the main objects and colors present.", + image + ] + } + ], + temperature=0, +) +print(resp) ``` + +## Image Requirements and Validation + +When working with images in Pixtral: +- Supported formats: JPEG, PNG, GIF, WEBP +- Maximum image size: 20MB +- Images larger than the size limit will be automatically resized +- Base64 and file paths are supported input formats + +The `Image` class handles all validation and preprocessing automatically, ensuring your images meet Mistral's requirements. diff --git a/instructor/__init__.py b/instructor/__init__.py index efd503c22..fd0b8eed5 100644 --- a/instructor/__init__.py +++ b/instructor/__init__.py @@ -1,9 +1,11 @@ +from __future__ import annotations import importlib.util +from typing import Callable, Union, TypeVar from .mode import Mode from .process_response import handle_response_model from .distil import FinetuneFormat, Instructions -from .multimodal import Image, Audio +from .multimodal import Image from .dsl import ( CitationMixin, Maybe, @@ -23,10 +25,17 @@ Provider, ) -__all__ = [ +T = TypeVar("T") + +# Type aliases for client functions +ClientFunction = Union[ + Callable[..., Union[Instructor, AsyncInstructor]], + None +] + +__all__: list[str] = [ "Instructor", "Image", - "Audio", "from_openai", "from_litellm", "AsyncInstructor", @@ -48,51 +57,66 @@ "handle_response_model", ] - +def _extend_all(new_items: list[str]) -> None: + global __all__ + __all__ = __all__ + new_items + +# Initialize optional client functions with explicit types +from_anthropic: ClientFunction = None +from_gemini: ClientFunction = None +from_fireworks: ClientFunction = None +from_cerebras: ClientFunction = None +from_groq: ClientFunction = None +from_mistral: ClientFunction = None +from_cohere: ClientFunction = None +from_vertexai: ClientFunction = None +from_writer: ClientFunction = None + +# Import optional clients if importlib.util.find_spec("anthropic") is not None: - from .client_anthropic import from_anthropic - - __all__ += ["from_anthropic"] + from .client_anthropic import from_anthropic as _from_anthropic + globals()["from_anthropic"] = _from_anthropic + _extend_all(["from_anthropic"]) if ( importlib.util.find_spec("google") and importlib.util.find_spec("google.generativeai") is not None ): - from .client_gemini import from_gemini - - __all__ += ["from_gemini"] + from .client_gemini import from_gemini as _from_gemini + globals()["from_gemini"] = _from_gemini + _extend_all(["from_gemini"]) if importlib.util.find_spec("fireworks") is not None: - from .client_fireworks import from_fireworks - - __all__ += ["from_fireworks"] + from .client_fireworks import from_fireworks as _from_fireworks + globals()["from_fireworks"] = _from_fireworks + _extend_all(["from_fireworks"]) if importlib.util.find_spec("cerebras") is not None: - from .client_cerebras import from_cerebras - - __all__ += ["from_cerebras"] + from .client_cerebras import from_cerebras as _from_cerebras + globals()["from_cerebras"] = _from_cerebras + _extend_all(["from_cerebras"]) if importlib.util.find_spec("groq") is not None: - from .client_groq import from_groq - - __all__ += ["from_groq"] + from .client_groq import from_groq as _from_groq + globals()["from_groq"] = _from_groq + _extend_all(["from_groq"]) if importlib.util.find_spec("mistralai") is not None: - from .client_mistral import from_mistral - - __all__ += ["from_mistral"] + from .client_mistral import from_mistral as _from_mistral + globals()["from_mistral"] = _from_mistral + _extend_all(["from_mistral"]) if importlib.util.find_spec("cohere") is not None: - from .client_cohere import from_cohere - - __all__ += ["from_cohere"] + from .client_cohere import from_cohere as _from_cohere + globals()["from_cohere"] = _from_cohere + _extend_all(["from_cohere"]) if all(importlib.util.find_spec(pkg) for pkg in ("vertexai", "jsonref")): - from .client_vertexai import from_vertexai - - __all__ += ["from_vertexai"] + from .client_vertexai import from_vertexai as _from_vertexai + globals()["from_vertexai"] = _from_vertexai + _extend_all(["from_vertexai"]) if importlib.util.find_spec("writerai") is not None: - from .client_writer import from_writer - - __all__ += ["from_writer"] \ No newline at end of file + from .client_writer import from_writer as _from_writer + globals()["from_writer"] = _from_writer + _extend_all(["from_writer"]) diff --git a/instructor/client_mistral.py b/instructor/client_mistral.py index 81bec7836..27f7d4ed9 100644 --- a/instructor/client_mistral.py +++ b/instructor/client_mistral.py @@ -1,94 +1,65 @@ -# Future imports to ensure compatibility with Python 3.9 +# type: ignore from __future__ import annotations -from typing import Any, Dict, Literal, Protocol, overload, TYPE_CHECKING, Callable, TypeVar, Union +from typing import Any, Literal, overload, TypeVar -if TYPE_CHECKING: - from mistralai.client import MistralClient as Mistral - from mistralai.models.chat_completion import ChatCompletionResponse - from instructor import Instructor, AsyncInstructor - from instructor.patch import patch, PatchedFunctionReturn -else: - from mistralai import Mistral - from instructor import Instructor, AsyncInstructor - from instructor.patch import patch +from mistralai.client import MistralClient +import instructor from instructor.mode import Mode from instructor.utils import Provider T = TypeVar("T") -PatchFunction = Callable[..., Union[T, PatchedFunctionReturn]] - -class MistralChatProtocol(Protocol): - def complete(self, **kwargs: Dict[str, Any]) -> ChatCompletionResponse: ... - async def complete_async(self, **kwargs: Dict[str, Any]) -> ChatCompletionResponse: ... - -class MistralChat: - client: Mistral - complete: Callable[..., ChatCompletionResponse] - complete_async: Callable[..., ChatCompletionResponse] - - def __init__(self, client: Mistral) -> None: - self.client = client - self.complete = client.chat.complete - self.complete_async = client.chat.complete_async @overload def from_mistral( - client: Mistral, + client: MistralClient, mode: Mode = Mode.MISTRAL_JSON, - use_async: Literal[False] = False, + use_async: Literal[True] = True, **kwargs: Any, -) -> Instructor: ... +) -> instructor.AsyncInstructor: ... @overload def from_mistral( - client: Mistral, + client: MistralClient, mode: Mode = Mode.MISTRAL_JSON, - use_async: Literal[True] = True, + use_async: Literal[False] = False, **kwargs: Any, -) -> AsyncInstructor: ... +) -> instructor.Instructor: ... def from_mistral( - client: Mistral, + client: MistralClient, mode: Mode = Mode.MISTRAL_JSON, use_async: bool = False, **kwargs: Any, -) -> Instructor | AsyncInstructor: +) -> instructor.AsyncInstructor | instructor.Instructor: + """Create a patched Mistral client.""" assert mode in { Mode.MISTRAL_TOOLS, Mode.MISTRAL_JSON, }, f"Mode must be one of {Mode.MISTRAL_TOOLS}, {Mode.MISTRAL_JSON}" assert isinstance( - client, Mistral - ), "Client must be an instance of mistralai.Mistral" + client, MistralClient + ), "Client must be an instance of mistralai.MistralClient" - chat_client = MistralChat(client) - - if not use_async: - return Instructor( + if use_async: + create = client.chat.create_async + return instructor.AsyncInstructor( client=client, - create=patch( - create=chat_client.complete, - mode=mode, - provider=Provider.MISTRAL, - ), - provider=Provider.MISTRAL, - mode=mode, - **kwargs, - ) - else: - return AsyncInstructor( - client=client, - create=patch( - create=chat_client.complete_async, - mode=mode, - provider=Provider.MISTRAL, - ), + create=instructor.patch(create=create, mode=mode), provider=Provider.MISTRAL, mode=mode, **kwargs, ) + + create = client.chat.create + return instructor.Instructor( + client=client, + create=instructor.patch(create=create, mode=mode), + provider=Provider.MISTRAL, + mode=mode, + **kwargs, + ) diff --git a/instructor/multimodal.py b/instructor/multimodal.py index 17d698f6f..9ab7a3f1a 100644 --- a/instructor/multimodal.py +++ b/instructor/multimodal.py @@ -18,7 +18,8 @@ from urllib.parse import urlparse import mimetypes import requests -from pydantic import BaseModel, Field # type:ignore +from pydantic import BaseModel +from pydantic.fields import Field from .mode import Mode # Constants for Mistral image validation @@ -81,7 +82,7 @@ def autodetect(cls, source: Union[str, Path]) -> Image: # noqa: UP007 @classmethod def autodetect_safely( - cls, source: str | Path + cls, source: Union[str, Path] ) -> Union[Image, str]: # noqa: UP007 """Safely attempt to autodetect an image from a source string or path. @@ -242,12 +243,12 @@ def to_mistral(self) -> dict[str, Any]: and self.source.startswith(("http://", "https://")) and not self.is_base64(self.source) ): - return {"type": "image_url", "image_url": self.source} + return {"type": "image_url", "url": self.source} elif self.data or self.is_base64(str(self.source)): data = self.data or str(self.source).split(",", 1)[1] return { "type": "image_url", - "image_url": f"data:{self.media_type};base64,{data}" + "data": f"data:{self.media_type};base64,{data}" } else: raise ValueError("Image data is missing for base64 encoding.") @@ -257,38 +258,13 @@ def to_mistral(self) -> dict[str, Any]: source: str | Path = Field( description="URL or file path of the audio" ) # noqa: UP007 - data: Union[str, None] = Field( # noqa: UP007 + data: str | None = Field( None, description="Base64 encoded audio data", repr=False ) - @classmethod - def from_url(cls, url: str) -> Audio: - """Create an Audio instance from a URL.""" - assert url.endswith(".wav"), "Audio must be in WAV format" - - response = requests.get(url) - data = base64.b64encode(response.content).decode("utf-8") - return cls(source=url, data=data) - - @classmethod - def from_path(cls, path: Union[str, Path]) -> Audio: # noqa: UP007 - """Create an Audio instance from a file path.""" - path = Path(path) - assert path.is_file(), f"Audio file not found: {path}" - assert path.suffix.lower() == ".wav", "Audio must be in WAV format" - - data = base64.b64encode(path.read_bytes()).decode("utf-8") - return cls(source=str(path), data=data) - - def to_openai(self) -> dict[str, Any]: - """Convert the Audio instance to OpenAI's API format.""" - return { - "type": "input_audio", - "input_audio": {"data": self.data, "format": "wav"}, - } + # PLACEHOLDER: Image class methods and properties above - def to_anthropic(self) -> dict[str, Any]: - raise NotImplementedError("Anthropic is not supported yet") + # PLACEHOLDER: ImageWithCacheControl class below class ImageWithCacheControl(Image): @@ -319,28 +295,14 @@ def to_anthropic(self) -> dict[str, Any]: def convert_contents( - contents: Union[ # noqa: UP007 - str, - dict[str, Any], - Image, - Audio, - list[Union[str, dict[str, Any], Image, Audio]], # noqa: UP007 - ], - mode: Mode, -) -> Union[str, list[dict[str, Any]]]: # noqa: UP007 - """Convert content items to the appropriate format based on the specified mode.""" - if isinstance(contents, str): - return contents - if isinstance(contents, (Image, Audio)) or isinstance(contents, dict): - contents = [contents] - - converted_contents: list[dict[str, Union[str, Image]]] = [] # noqa: UP007 + contents: list[Union[str, Image]], mode: Mode # noqa: UP007 +) -> list[Union[str, dict[str, Any]]]: # noqa: UP007 + """Convert contents to the appropriate format for the given mode.""" + converted_contents: list[Union[str, dict[str, Any]]] = [] # noqa: UP007 for content in contents: if isinstance(content, str): - converted_contents.append({"type": "text", "text": content}) - elif isinstance(content, dict): converted_contents.append(content) - elif isinstance(content, (Image, Audio)): + elif isinstance(content, Image): if mode in {Mode.ANTHROPIC_JSON, Mode.ANTHROPIC_TOOLS}: converted_contents.append(content.to_anthropic()) elif mode in {Mode.GEMINI_JSON, Mode.GEMINI_TOOLS}: @@ -355,64 +317,41 @@ def convert_contents( def convert_messages( - messages: list[ - dict[ - str, - Union[ # noqa: UP007 - str, - dict[str, Any], - Image, - Audio, - list[Union[str, dict[str, Any], Image, Audio]], # noqa: UP007 - ], - ] - ], + messages: list[dict[str, Any]], mode: Mode, - autodetect_images: bool = False, + model: Optional[str] = None, # Reserved for future provider-specific handling ) -> list[dict[str, Any]]: - """Convert messages to the appropriate format based on the specified mode.""" - converted_messages = [] - - def is_image_params(x: Any) -> bool: - return isinstance(x, dict) and x.get("type") == "image" and "source" in x # type: ignore - - for message in messages: - if "type" in message: - if message["type"] in {"audio", "image"}: - converted_messages.append(message) # type: ignore - else: - raise ValueError(f"Unsupported message type: {message['type']}") - role = message["role"] - content = message["content"] or [] - other_kwargs = { - k: v for k, v in message.items() if k not in ["role", "content", "type"] - } - if autodetect_images: - if isinstance(content, list): - new_content: list[str | dict[str, Any] | Image | Audio] = ( - [] - ) # noqa: UP007 - for item in content: - if isinstance(item, str): - new_content.append(Image.autodetect_safely(item)) - elif is_image_params(item): - new_content.append( - ImageWithCacheControl.from_image_params( - cast(ImageParams, item) - ) - ) - else: - new_content.append(item) - content = new_content - elif isinstance(content, str): - content = Image.autodetect_safely(content) - elif is_image_params(content): - content = ImageWithCacheControl.from_image_params( - cast(ImageParams, content) - ) - if isinstance(content, str): - converted_messages.append({"role": role, "content": content, **other_kwargs}) # type: ignore - else: - converted_content = convert_contents(content, mode) - converted_messages.append({"role": role, "content": converted_content, **other_kwargs}) # type: ignore - return converted_messages # type: ignore + """Convert messages to the appropriate format for the given mode. + + Args: + messages: List of message dictionaries to convert + mode: The mode to convert messages for (e.g. MISTRAL_JSON) + model: Optional model name for provider-specific handling (reserved for future use) + + Returns: + List of converted message dictionaries + """ + if mode == Mode.MISTRAL_JSON: + converted_messages: list[dict[str, Any]] = [] + for message in messages: + if not isinstance(message.get("content"), list): + converted_messages.append(message) + continue + + content_list: list[dict[str, Any]] = [] + for item in cast(list[Union[str, Image, dict[str, Any]]], message["content"]): # noqa: UP007 + if isinstance(item, str): + content_list.append({"type": "text", "text": item}) + elif isinstance(item, Image): + content_list.append(item.to_mistral()) + else: + content_list.append(item) # item is already dict[str, Any] + + converted_message = message.copy() + converted_message["content"] = content_list + converted_messages.append(converted_message) + + return converted_messages + + # Return original messages for other modes + return messages diff --git a/mkdocs.yml b/mkdocs.yml index 75d5251e3..faf92e7be 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -167,7 +167,7 @@ nav: - "Action Items Extraction": 'examples/action_items.md' - "Batch Classification with LangSmith": 'examples/batch_classification_langsmith.md' - "Contact Information Extraction": 'examples/extract_contact_info.md' - - "Knowledge Graph Building": 'examples/building_knowledge_graph.md' + - "Knowledge Graph Building": 'examples/building_knowledge_graphs.md' - "Multiple Classification Tasks": 'examples/multiple_classification.md' - "Pandas DataFrame Integration": 'examples/pandas_df.md' - "Partial Response Streaming": 'examples/partial_streaming.md' diff --git a/muffin.jpg b/muffin.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0e4e2b8617a7602b655a9a18369cd3d58cd20859 GIT binary patch literal 153434 zcmb5UWmFqo7e5-@DXzg2916j;B|vZw4lNSg9SVJl7Y!j0916jM6oM3~AO(`(R=mZb zJV0C8Qtxwl|M%Xt?$^6N%+3NmU6N@^NvYBCBcG5|RRB^4_*n}{L} zyQrRxCr3nz7^JX{QweI@J<0{rxATfzq~%sl-TMeeWAS63yba)KOX4a~h#PhM|C{~) zHFom|AfvqbCpSXXn`M!cQIeBUkW*4o-Ut8yGICZ5${S1UdNx!Xq7Y9pXha=nN@4d$ z+tI~6kP@}mr~ftqj1)IoSt(cncK|<$P#)_{4v{0;1SqfdT_?L7rtXlV>hE9&nySd)QhvT~PoU@Y z?PQn&P%AR08$9L#VUc>=+Z^b!zQK+^7jJSg;$JN-+p+b^--wXBO>=CO#cGC96M z97!w@E|U2SLu<;TPz{8R`vChN=#ucm2GFmv49R#H}xxDN$^ zNG0uTZ7T7^zzF?R3TPr)Ptjs?2o@~iQuqP>AfL~-{^_vk+LQ*T*gZ(FF)x;laRdK~ zqdX;z`oCj}Cs>#_<(nzA*N;f*$0BVZrujT|!4RG+@}oe=;@W6Fja`>=v|CcStM8Br zqZFPKYCen7R71$wl$RKbz??4mJ~{_Zi7b<)F>axx8>z2?=D z|D{R%kc0hHz1CTJMxLlG8xi$Sh{xIElGV63n?8^$JnAT0_CToMSQVN z4}%h*A6wFI)3c=P*|RJITkKTHoWw~i`P)>qf%NGhPlGJu1bPCQ(_0oi=dU8oT2Ic+ z;}{c^v8yCSdRGwzre2zo@w!cNQyxbUuuQ^Wor37b@c9MMI$Dr}wKgt?8dbF`>2jNo zZ}F`PUT07OQ>y$!#VcP%GrzyQ+hBoO-7rUYo!9R@72$evdtg^iK2X%Q8~o;%6}29U z%4QJ%Zr}b=vq@peRno;l8O5Ko#`g9Op#l3*~RR1zRLynSPjYn=8mO2GR!M3S5|fFvl>ep88?Io_McYb`jB$C zgZ?Z)QXhP`Wp(1NvD=Y>n1nRTqKMwUA=@`7q_s@Z1w#Z1|l-gW;;3 z{IX)<4+L4s<>!%`Gx=W`M|oZV+LA&fwNv6pX5BozMO;V>uO$cHsB4W~>R{S)M&s9! zuGRPBU3nyqtAW|6pYR$2jyAmV5l57G5!Be96nzUNX~?eCEL=&8=gaR~@7?%?WZ;om zCDNb|JRg*glqb{|(XM9&;mTnwlppavg~Q}LBkyS4EX|?EU7bo9Fd* zjn!Tq_PV`Fr0cLXkbb8lPNt}JwqLHY`|i^;Cp*b9kn8V zNw=x-8Sd^8>qNhjf##IWz&IHdi)7q56@u28a1jhGLh}ECj~0ESI)umrEX#l-l$(k+ z;J8+H8ZJ__3eBpiqy3yg8^(L&BBFjMDy(9?XD6JqKi3-E>nO9eDz8_k{m0bWnBJ={ zn)%?YPIkt}1)RC;>FglDyXWj{;$Xx6Q_@;_TUtcmM_FqdsoXzz+#UCOQ&4Xqe20<~si#6KM9GDLbTaHuS(Fckl( zMNGNU$Id1&r$UmkxzYBrHtYZDC<&=t8kg&JbObr(> z@;DW(6$0Gar%dRO%Bf9kyMCQuLge=rj&k>mWKA8pG#Rl~>@na(y|1)s^Zh{`7lmn* zhQkMZ&b9(_ct#k>q=WgHL$vru7(jUI0RAZe^R5&y!-RTFHU0FE6J+`W5IE>#LS7$i z6IDiC#YtK@g8gQZQMuY>$IKO7hZ#2px;~C#pF5hKNAK_}-aT3~t88eA%gw7D&6wS2 zinO`Q$vN6x<9MG7b#gdMv|Ldu9oat~$dk|15vT`#PTBaXTo69<_>;-XX)9HM3jSTt zOO0KhlTU8uLoU9AS;&-rlmyQM+KCZl*(G{lm{$fNG17Y1kIG35T~pQ!7{FCL;0Q?G zC2j~yE$q6&CWs{HrYa(x!%SF9|MWyo_$Y9yyv{cH8rh)E>}bS=m?=soNhuva8*>oB zfhX-yjAJ)`<;@o1ghR#9;hfG)TNH#jwj+#4EmzWJcb!jVTG$+@;tcA%~f`)+8TRh_6ve5{qupk>~ivv}wIarC=Ye`KAq zZ;ddHVLdvTI~-F4&Yu|dXnd}hQkwVs4G^^*jC5f}UNEc2|8{W9$onkLD)3e~LByic zWKdx>Tk&KRj?a0roRy;Q@E@KG^?UOR zWt`~Ni9ew+W9TV&==8Pl_G z<$#(xMknpX-B538o{D{tWA%+qbyockp#1urI`3sQUb!kV*C)-3gj3i?vpn}{rNc1; zolmo2Ion|&gq8q7LyLw;Em#U>uGv$={o!qxY<0DC_C}t3DjPcnAv;VXI?0kQSxv zIusLOGJ@7ou91!Efm~$b#1EvbYIDBYh)jSp%h-uGIEcD~ZqMg8NH}Xx34gWdB9)

=if394E6j@5MaZbr3@ULU-UnS05TmAse!TQ+W0T5 zZn4V)>E}4q^M_S3eTk3bT-FcO1Hb{x;JdS1l2OwP^>5#*yNqa@_?N1x=6|Be1OE8Y zh)ALloU_sKt`-K=&`*JM=j$GAbYcUHqjI~eBD6=AL zFp(w_R0HC8`_z1;R^}QkfVA_?O4$coO}ZBUX+i7cv!j=t^D%d)9m+QScV~oR<(BfbeCh0Nq#h97EL+|arTgn7mOZoV~_>6h=OX#rm;<-DiNI#Wk4TkI|IF|;It#DjlGSmo%Ybr_NtY1jW>lBErI0cUYL5uRYt5v;2M`mCD*_g`cOGB z@(QCGa+*S}TB0uF0oI_8j@gM{Ew!*iLqz;R4fVJs_s%3gk<6>ccQ#_e06fF|v2e_8 zTKJ7}{)B6Vx^q**_-1~&!-=42VVci{K}?(;1+%H&dwKQqr)z=wpz1BUy<26Gqi!j> zeak9y5r(|?5#iY3==Shf?>RmD+v~kAt?r(zOsXA06no`un}R-Wfe3p*Z*#Zmx<8%m zuSb@S@NBz8$vn^yj1RJh0gDzpz3lv#>sHo3l5w^DoNW_}4P1{u+8${!8FVtf_&8ArJ#`J2 z4U0FcJ}QnHeTMSJY%Iz35N$^;rZBUlAG=%m;p!eq5r^uHDkJN;fgfTx@Zsmlc+*6L zzs;_v&#L=D+P!nmPCAot(Il>aW3JRv2H#cabkGMV?x>7u0LD`+kiNs5vU&oa-88WY z>=fR2zp^ogJ2(#uaNLf%IxK7R7NRZIroQS00g!(pgIR}P_Z*ijq8uVGtZ?2Lm;96e z1m$Y?RN=%iY%fAqnwr|F2WlMtXaf_x{ETdtIjzuK=;47U)KXk}8cb*TSGH6gIDT4^ z!GX6ZNw@jYgDK@m)1yG;tyzWX-jldMTRMWBU}<3OQ5p73wd|Q-u7kFAPJ(5^!>0;G zbKF+UHLtCtoNHbz-{CKX@xaZOsvo26>tT3|(O+ z@`3rB3->Df%ye8$XhLngCl5!ezH2$4pJjP^Fin#GyPvt@Q-uH5e5ajEhupk9qtOZW ze`j|D_v}#wN5oAemqF_%-JTgo<)BYRa%KV^87N0sr_0+CU`G!6&_;|=F}`K|`6@cA zf!CI3ARpK!pV!wc?F=`c%W5wUQXtq8p``WP6zE2@$kPUS2|#UdH`rqEbx~^Zf-$|T z_aAyb`E{j^$dfwj5)YRnK|^PzjnHC2NFzF1mpm*c)}|okiEf}Xle@ky2c94lTJ;kL z=DwT@kx!}n04%~hkvWW28C_|u>{sBbVr>u)-4GaI-XneHW}dIn3QiuXL#KTcD4x{< z4XghlUGa=9{Pyq+2+cN9URmWVm7Xn}{MIX|pxOSpx~mu(J(u$0o^*MC`Gu$UA;?UW zPTr~|QDBkXDWD7#b_dk7Qf7XxNbpE2%A*Pt5zTMw?0jf6K4~Jq$Cv!wDEHPE?YVuST!q~hxxGS4>Y_Z2)Dd{ZC);u z;{N6>Q7jhVo^(G)Ny8cTVuQ~lR>R#~vl^E%Er7{sb0+f0M90vTH)d^a_|8=3+FqS; zk{$SvreW*JJZ{^A_L;S+g?ZkQY*ouVSZ3pSDhW`L5aql&`o>Yk$fz6$vzP6xN@*%L zdZHY6Z?PpsDn=&1$=O3|o05W8IHTnjOXi7cCA)LxZA7ueUFC(S8l~>swqZS&}%oFpxD3n*N9yx~4HffWQkVCnYET)x%b_3BXhPtMg~*Ml;8IR+kn5rRkcdTpYh{i~A1I4%_my z{swY5D}n~N8T)_8q~d`{%tes?v)o`=POA+PLAc%$5Y|8by0 z1T6NeFCg1&2S5D!@w4u#>$#q?V+lN-A=A#mV1mSN<*3max|?81rQP!|^ zH+!9k)^)rE@1&6~-m~6>mReH%#y{)S=kH}`+3QN)le~H;(0rh1_e>{zVA$19PZ>3b zHKP4kxi4khu@mr4M4qz2%Dl3>d)h$tEclMZV>uqh5YCcV704_L6`D1pbyP|9S|IEd z9C|lF{i{g{|EU-8N~d{cR*pB`q(n7SO5FGYN@QO6SSZ<@lgah7e-2_=_UVRM!Zd6%hWX*Ivn1%g);3k&tG&Jgoi%D zKjrh0%9bzeUSLnb2r3r_1|PdD4&G8`eO@<51sv20_2R<_GrqG-qT79qLvM(tZ_Wwy~v#aGXp_FD+;VIQc} zShgM1R-^`s5pVv_&HI4=*Qmq(>9Rf`Fm1E=}7wnSJ{cMZ-t8g@4M&sB-3a*Vf* zy|d)XYR{^}zD-@5AFi@c@*~c##c_}=@3)-}w8T^;#!LskZ7Qb1cx^w(-B%ubZWp17 zcUVzcy|>?A6Me3m6Kb$IdYpI*+bnwBB~{6f(%{0 zV&Xy}!0QXWv*gs+#4lWXvEQEe7y55(c{%DY$SZR;{dTcpwYJmsa?d8+v_)Vf>{fHY z7C$xltUN+P+OWEsd@K8-Zy}SvI2(EU^NfO?%pIT?-;!T9miYJxFGs}m|jQ(Hj%XvwQ zNH+?Qet4CpSQ@GQ&B3TmG^u(5SpwaU{V&r;FQm5P4@0cYXm#s`e5}wa?-GuaZ~aeA~~|pa}vc1>3`r7 z@AQ_BYX_INRmr{`$j^U0l}Ggi^lx2P?!#1w14V95`l~ zDwQTRC0Tpg>Z)ja41cb0K&E48u~500($JJqBc9CuJU||BrRuAm;u)22cfX~wTuIv5 zeBmVgu8oO7Gq=dYEo%yf3iUm^Rna%q8^pf+jmxBTMjy}FYQk17^GZ}=wSINcz>%jf zAsgHhuVY)bVq)S7vSMbIgjYzLC~G4>`s69sgqQO=7l8a{N<|px=4C7;osp4J&;G5) zN%!~qVK&AETE^9a&vc4xXGHTS8rbI)3R_~Qr$%=|SRWQet+RrHkwJ^hM&tP+imW*)p1sd5@FJxz z#x6^mE$=h9sBBNhcCQEW659vu`8;KW-nT8N=YKd9DUci#T?D)OG;4pQ^&OxBp19hW z*zk`~MM%|I}6HC5{ zQ4rm*E