From ac0066e4d0fc2ad93cf41f6d8ad1cbff40c30c02 Mon Sep 17 00:00:00 2001 From: elijahbenizzy Date: Sun, 1 Sep 2024 23:51:11 -0700 Subject: [PATCH 01/10] Adds typed state support This has qwuite a few pieces to it: 1. Adds centralized state with a typing system 2. Adds decentralized state 3. Adds pydantic implementations See issue https://github.com/DAGWorks-Inc/burr/issues/139 for more details on design decisions --- burr/core/__init__.py | 2 + burr/core/action.py | 108 ++++-- burr/core/application.py | 170 ++++++--- burr/core/state.py | 65 +++- burr/core/typing.py | 89 +++++ burr/core/validation.py | 2 +- burr/integrations/pydantic.py | 360 ++++++++++++++++++ burr/telemetry.py | 7 +- docs/concepts/index.rst | 1 + examples/pydantic/centralized_state.py | 217 +++++++++++ examples/pydantic/decentralized_state.py | 224 +++++++++++ .../application.py | 230 +++++++++-- .../youtube-to-social-media-post/server.py | 46 ++- .../statemachine.png | Bin 25106 -> 11515 bytes pyproject.toml | 2 +- tests/core/test_application.py | 42 ++ tests/core/test_state.py | 27 ++ tests/core/test_validation.py | 2 +- tests/integrations/test_burr_opentelemetry.py | 4 +- 19 files changed, 1442 insertions(+), 156 deletions(-) create mode 100644 burr/core/typing.py create mode 100644 burr/integrations/pydantic.py create mode 100644 examples/pydantic/centralized_state.py create mode 100644 examples/pydantic/decentralized_state.py diff --git a/burr/core/__init__.py b/burr/core/__init__.py index 3d61c144..c371ac47 100644 --- a/burr/core/__init__.py +++ b/burr/core/__init__.py @@ -5,6 +5,7 @@ ApplicationContext, ApplicationGraph, ) +from burr.core.graph import Graph from burr.core.state import State __all__ = [ @@ -20,4 +21,5 @@ "Result", "State", "when", + "Graph", ] diff --git a/burr/core/action.py b/burr/core/action.py index 61381afc..9ab00a86 100644 --- a/burr/core/action.py +++ b/burr/core/action.py @@ -13,6 +13,7 @@ Coroutine, Dict, Generator, + Generic, Iterator, List, Optional, @@ -96,9 +97,11 @@ def validate_inputs(self, inputs: Optional[Dict[str, Any]]) -> None: f"Inputs to function {self} are invalid. " + f"Missing the following inputs: {', '.join(missing_inputs)}." if missing_inputs - else "" f"Additional inputs: {','.join(additional_inputs)}." - if additional_inputs - else "" + else ( + "" f"Additional inputs: {','.join(additional_inputs)}." + if additional_inputs + else "" + ) ) def is_async(self) -> bool: @@ -493,7 +496,7 @@ def is_async(self) -> bool: # the following exist to share implementation between FunctionBasedStreamingAction and FunctionBasedAction # TODO -- think through the class hierarchy to simplify, for now this is OK -def _get_inputs(bound_params: dict, fn: Callable) -> tuple[list[str], list[str]]: +def get_inputs(bound_params: dict, fn: Callable) -> tuple[list[str], list[str]]: sig = inspect.signature(fn) required_inputs, optional_inputs = [], [] for param_name, param in sig.parameters.items(): @@ -507,9 +510,7 @@ def _get_inputs(bound_params: dict, fn: Callable) -> tuple[list[str], list[str]] return required_inputs, optional_inputs -FunctionBasedActionType = TypeVar( - "FunctionBasedActionType", bound=Union["FunctionBasedAction", "FunctionBasedStreamingAction"] -) +FunctionBasedActionType = Union["FunctionBasedAction", "FunctionBasedStreamingAction"] class FunctionBasedAction(SingleStepAction): @@ -520,21 +521,33 @@ def __init__( fn: Callable, reads: List[str], writes: List[str], - bound_params: dict = None, + bound_params: Optional[dict] = None, + input_spec: Optional[tuple[list[str], list[str]]] = None, + originating_fn: Optional[Callable] = None, ): """Instantiates a function-based action with the given function, reads, and writes. The function must take in a state and return a tuple of (result, new_state). - :param fn: - :param reads: - :param writes: + :param fn: Function to run + :param reads: Keys that the function reads from the state + :param writes: Keys that the function writes to the state + :param bound_params: Prior bound parameters + :param input_spec: Specification for inputs. Will derive from function if not provided. """ super(FunctionBasedAction, self).__init__() + self._originating_fn = originating_fn if originating_fn is not None else fn self._fn = fn self._reads = reads self._writes = writes self._bound_params = bound_params if bound_params is not None else {} - self._inputs = _get_inputs(self._bound_params, self._fn) + self._inputs = ( + get_inputs(self._bound_params, self._fn) + if input_spec is None + else ( + [item for item in input_spec[0] if item not in self._bound_params], + [item for item in input_spec[1] if item not in self._bound_params], + ) + ) @property def fn(self) -> Callable: @@ -562,7 +575,11 @@ def with_params(self, **kwargs: Any) -> "FunctionBasedAction": :return: """ return FunctionBasedAction( - self._fn, self._reads, self._writes, {**self._bound_params, **kwargs} + self._fn, + self._reads, + self._writes, + {**self._bound_params, **kwargs}, + input_spec=self._inputs, ) def run_and_update(self, state: State, **run_kwargs) -> tuple[dict, State]: @@ -573,10 +590,12 @@ def is_async(self) -> bool: def get_source(self) -> str: """Return the source of the code for this action.""" - return inspect.getsource(self._fn) + return inspect.getsource(self._originating_fn) -StreamType = Tuple[dict, Optional[State]] +StateType = TypeVar("StateType") + +StreamType = Tuple[dict, Optional[State[StateType]]] GeneratorReturnType = Generator[StreamType, None, None] AsyncGeneratorReturnType = AsyncGenerator[StreamType, None] @@ -590,7 +609,7 @@ class StreamingAction(Action, abc.ABC): they run in multiple passes (run -> update)""" @abc.abstractmethod - def stream_run(self, state: State, **run_kwargs) -> Generator[dict, None, None]: + def stream_run(self, state: State[StateType], **run_kwargs) -> Generator[dict, None, None]: """Streaming action ``stream_run`` is different than standard action run. It: 1. streams in an intermediate result (the dict output) 2. yields the final result at the end @@ -617,7 +636,7 @@ def stream_run(state: State) -> Generator[dict, None, dict]: """ pass - def run(self, state: State, **run_kwargs) -> dict: + def run(self, state: State[StateType], **run_kwargs) -> dict: """Runs the streaming action through to completion.""" gen = self.stream_run(state, **run_kwargs) last_result = None @@ -636,7 +655,7 @@ class AsyncStreamingAction(Action, abc.ABC): Note this is the "multi-step" variant, in which run/update are separate.""" @abc.abstractmethod - async def stream_run(self, state: State, **run_kwargs) -> AsyncGenerator[dict, None]: + async def stream_run(self, state, **run_kwargs) -> AsyncGenerator[dict, None]: """Asynchronous streaming action ``stream_run`` is different than the standard action run. It: 1. streams in an intermediate result (the dict output) 2. yields the final result at the end @@ -663,7 +682,7 @@ async def stream_run(state: State) -> Generator[dict, None, dict]: """ pass - async def run(self, state: State, **run_kwargs) -> dict: + async def run(self, state: State[StateType], **run_kwargs) -> dict: """Runs the streaming action through to completion. Returns the final result. This is used if we want a streaming action as an intermediate. @@ -686,7 +705,7 @@ def is_async(self) -> bool: return True -class StreamingResultContainer(Iterator[dict]): +class StreamingResultContainer(Iterator[dict], Generic[StateType]): """Container for a streaming result. This allows you to: 1. Iterate over the result as it comes in @@ -711,12 +730,14 @@ class StreamingResultContainer(Iterator[dict]): """ @staticmethod - def pass_through(results: dict, final_state: State) -> "StreamingResultContainer": + def pass_through( + results: dict, final_state: State[StateType] + ) -> "StreamingResultContainer[StateType]": """Instantiates a streaming result container that just passes through the given results This is to be used internally -- it allows us to wrap non-streaming action results in a streaming result container.""" - def empty_generator() -> GeneratorReturnType: + def empty_generator() -> Generator[Tuple[dict, Optional[State[StateType]]], None, None]: yield results, final_state return StreamingResultContainer( @@ -729,7 +750,7 @@ def empty_generator() -> GeneratorReturnType: def __init__( self, streaming_result_generator: GeneratorReturnType, - initial_state: State, + initial_state: State[StateType], process_result: Callable[[dict, State], tuple[dict, State]], callback: Callable[[Optional[dict], State, Optional[Exception]], None], ): @@ -788,7 +809,7 @@ def get(self) -> StreamType: return self._result -class AsyncStreamingResultContainer(typing.AsyncIterator[dict]): +class AsyncStreamingResultContainer(typing.AsyncIterator[dict], Generic[StateType]): """Container for an async streaming result. This allows you to: 1. Iterate over the result as it comes in 2. Await the final result/state at the end @@ -814,10 +835,11 @@ class AsyncStreamingResultContainer(typing.AsyncIterator[dict]): def __init__( self, streaming_result_generator: AsyncGeneratorReturnType, - initial_state: State, - process_result: Callable[[dict, State], tuple[dict, State]], + initial_state: State[StateType], + process_result: Callable[[dict, State[StateType]], tuple[dict, State[StateType]]], callback: Callable[ - [Optional[dict], State, Optional[Exception]], typing.Coroutine[None, None, None] + [Optional[dict], State[StateType], Optional[Exception]], + typing.Coroutine[None, None, None], ], ): """Initializes an async streaming result container. User will never call directly. @@ -869,7 +891,7 @@ async def gen_fn(): # return it as `__aiter__` cannot be async/have awaits :/ return gen_fn() - async def get(self) -> tuple[Optional[dict], State]: + async def get(self) -> tuple[Optional[dict], State[StateType]]: # exhaust the generator async for _ in self: pass @@ -877,7 +899,9 @@ async def get(self) -> tuple[Optional[dict], State]: return self._result @staticmethod - def pass_through(results: dict, final_state: State) -> "AsyncStreamingResultContainer": + def pass_through( + results: dict, final_state: State[StateType] + ) -> "AsyncStreamingResultContainer[StateType]": """Creates a streaming result container that just passes through the given results. This is not a public facing API.""" @@ -887,7 +911,7 @@ async def just_results() -> AsyncGeneratorReturnType: async def empty_callback(result: Optional[dict], state: State, exc: Optional[Exception]): pass - return AsyncStreamingResultContainer( + return AsyncStreamingResultContainer[StateType]( just_results(), final_state, lambda result, state: (result, state), empty_callback ) @@ -954,7 +978,9 @@ def __init__( ], reads: List[str], writes: List[str], - bound_params: dict = None, + bound_params: Optional[dict] = None, + input_spec: Optional[tuple[list[str], list[str]]] = None, + originating_fn: Optional[Callable] = None, ): """Instantiates a function-based streaming action with the given function, reads, and writes. The function must take in a state (and inputs) and return a generator of (result, new_state). @@ -968,6 +994,15 @@ def __init__( self._reads = reads self._writes = writes self._bound_params = bound_params if bound_params is not None else {} + self._inputs = ( + get_inputs(self._bound_params, self._fn) + if input_spec is None + else ( + [item for item in input_spec[0] if item not in self._bound_params], + [item for item in input_spec[1] if item not in self._bound_params], + ) + ) + self._originating_fn = originating_fn if originating_fn is not None else fn async def _a_stream_run_and_update( self, state: State, **run_kwargs @@ -1005,12 +1040,17 @@ def with_params(self, **kwargs: Any) -> "FunctionBasedStreamingAction": :return: """ return FunctionBasedStreamingAction( - self._fn, self._reads, self._writes, {**self._bound_params, **kwargs} + self._fn, + self._reads, + self._writes, + {**self._bound_params, **kwargs}, + input_spec=self._inputs, + originating_fn=self._originating_fn, ) @property def inputs(self) -> tuple[list[str], list[str]]: - return _get_inputs(self._bound_params, self._fn) + return self._inputs @property def fn(self) -> Union[StreamingFn, StreamingFnAsync]: @@ -1021,7 +1061,7 @@ def is_async(self) -> bool: def get_source(self) -> str: """Return the source of the code for this action""" - return inspect.getsource(self._fn) + return inspect.getsource(self._originating_fn) C = TypeVar("C", bound=Callable) # placeholder for any Callable diff --git a/burr/core/application.py b/burr/core/application.py index 6ae01f7d..dd13d968 100644 --- a/burr/core/application.py +++ b/burr/core/application.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import contextvars import dataclasses import functools @@ -13,6 +15,7 @@ Callable, Dict, Generator, + Generic, List, Literal, Optional, @@ -41,6 +44,7 @@ from burr.core.graph import Graph, GraphBuilder from burr.core.persistence import BaseStateLoader, BaseStateSaver from burr.core.state import State +from burr.core.typing import DictBasedTypingSystem from burr.core.validation import BASE_ERROR_MESSAGE from burr.lifecycle.base import ExecuteMethod, LifecycleAdapter, PostRunStepHook, PreRunStepHook from burr.lifecycle.internal import LifecycleAdapterSet @@ -48,6 +52,7 @@ from burr.visibility.tracing import tracer_factory_context_var if TYPE_CHECKING: + from burr.core.typing import TypingSystem from burr.tracking.base import TrackingClient logger = logging.getLogger(__name__) @@ -55,6 +60,9 @@ PRIOR_STEP = "__PRIOR_STEP" SEQUENCE_ID = "__SEQUENCE_ID" +StateType = TypeVar("StateType") +StateTypeToSet = TypeVar("StateTypeToSet") + def _validate_result(result: dict, name: str) -> None: if not isinstance(result, dict): @@ -292,7 +300,8 @@ def _run_single_step_streaming_action( f"Action {action.name} did not return a state update. For streaming actions, the last yield " f"statement must be a tuple of (result, state_update). For example, yield dict(foo='bar'), state.update(foo='bar')" ) - _validate_result(result, action.name) + # TODO -- get this back in and use the action's schema (still not set) to validate the result... + # _validate_result(result, action.name) _validate_reducer_writes(action, state_update, action.name) yield result, state_update @@ -344,7 +353,8 @@ async def _arun_single_step_streaming_action( f"Action {action.name} did not return a state update. For async actions, the last yield " f"statement must be a tuple of (result, state_update). For example, yield dict(foo='bar'), state.update(foo='bar')" ) - _validate_result(result, action.name) + # TODO -- add back in validation when we have a schema + # _validate_result(result, action.name) _validate_reducer_writes(action, state_update, action.name) # TODO -- add guard against zero-length stream yield result, state_update @@ -664,11 +674,14 @@ def post_run_step( del self.token_pointer_map[(app_id, sequence_id)] -class Application: +ApplicationStateType = TypeVar("ApplicationStateType") + + +class Application(Generic[ApplicationStateType]): def __init__( self, graph: Graph, - state: State, + state: State[ApplicationStateType], partition_key: Optional[str], uid: str, entrypoint: str, @@ -824,7 +837,9 @@ def reset_to_entrypoint(self) -> None: in your graph, but this will do the trick if you need it!""" self._set_state(self._state.wipe(delete=[PRIOR_STEP])) - def _update_internal_state_value(self, new_state: State, next_action: Action) -> State: + def _update_internal_state_value( + self, new_state: State[ApplicationStateType], next_action: Action + ) -> State[ApplicationStateType]: """Updates the internal state values of the new state.""" new_state = new_state.update( **{ @@ -889,7 +904,9 @@ def _process_inputs(self, inputs: Dict[str, Any], action: Action) -> Dict[str, A # @telemetry.capture_function_usage # ditto with step() @_call_execute_method_pre_post(ExecuteMethod.astep) - async def astep(self, inputs: Dict[str, Any] = None) -> Optional[Tuple[Action, dict, State]]: + async def astep( + self, inputs: Optional[Dict[str, Any]] = None + ) -> Optional[Tuple[Action, dict, State[ApplicationStateType]]]: """Asynchronous version of step. :param inputs: Inputs to the action -- this is if this action @@ -1022,7 +1039,7 @@ def _return_value_iterate( halt_after: list[str], prior_action: Optional[Action], result: Optional[dict], - ) -> Tuple[Optional[Action], Optional[dict], State]: + ) -> Tuple[Optional[Action], Optional[dict], State[ApplicationStateType]]: """Utility function to decide what to return for iterate/arun. Note that run() will delegate to the return value of iterate, whereas arun cannot delegate to the return value of aiterate (as async generators cannot return a value). We put the code centrally to clean up the logic. @@ -1057,7 +1074,11 @@ def iterate( halt_before: list[str] = None, halt_after: list[str] = None, inputs: Optional[Dict[str, Any]] = None, - ) -> Generator[Tuple[Action, dict, State], None, Tuple[Action, Optional[dict], State]]: + ) -> Generator[ + Tuple[Action, dict, State[ApplicationStateType]], + None, + Tuple[Action, Optional[dict], State[ApplicationStateType]], + ]: """Returns a generator that calls step() in a row, enabling you to see the state of the system as it updates. Note this returns a generator, and also the final result (for convenience). @@ -1095,7 +1116,7 @@ async def aiterate( halt_before: list[str] = None, halt_after: list[str] = None, inputs: Optional[Dict[str, Any]] = None, - ) -> AsyncGenerator[Tuple[Action, dict, State], None]: + ) -> AsyncGenerator[Tuple[Action, dict, State[ApplicationStateType]], None]: """Returns a generator that calls step() in a row, enabling you to see the state of the system as it updates. This is the asynchronous version so it has no capability of t @@ -1125,7 +1146,7 @@ def run( halt_before: list[str] = None, halt_after: list[str] = None, inputs: Optional[Dict[str, Any]] = None, - ) -> Tuple[Action, Optional[dict], State]: + ) -> Tuple[Action, Optional[dict], State[ApplicationStateType]]: """Runs your application through until completion. Does not give access to the state along the way -- if you want that, use iterate(). @@ -1179,7 +1200,7 @@ def stream_result( halt_after: list[str], halt_before: list[str] = None, inputs: Optional[Dict[str, Any]] = None, - ) -> Tuple[Action, StreamingResultContainer]: + ) -> Tuple[Action, StreamingResultContainer[ApplicationStateType]]: """Streams a result out. :param halt_after: The list of actions to halt after execution of. It will halt on the first one. @@ -1428,7 +1449,7 @@ async def astream_result( halt_after: list[str], halt_before: list[str] = None, inputs: Optional[Dict[str, Any]] = None, - ) -> Tuple[Action, AsyncStreamingResultContainer]: + ) -> Tuple[Action, AsyncStreamingResultContainer[ApplicationStateType]]: """Streams a result out in an asynchronous manner. :param halt_after: The list of actions to halt after execution of. It will halt on the first one. @@ -1680,7 +1701,7 @@ async def callback( sequence_id=self.sequence_id, exception=e, ) - await call_execute_method_wrapper.acall_post(self, None, e) + await call_execute_method_wrapper.acall_post(self, e) await self._adapter_set.call_all_lifecycle_hooks_sync_and_async( "post_end_stream", action=next_action.name, @@ -1726,13 +1747,13 @@ def visualize( **engine_kwargs, ) - def _set_state(self, new_state: State): + def _set_state(self, new_state: State[ApplicationStateType]): self._state = new_state def get_next_action(self) -> Optional[Action]: return self._graph.get_next_node(self._state.get(PRIOR_STEP), self._state, self.entrypoint) - def update_state(self, new_state: State): + def update_state(self, new_state: State[ApplicationStateType]): """Updates state -- this is meant to be called if you need to do anything with the state. For example: 1. Reset it (after going through a loop) @@ -1744,7 +1765,7 @@ def update_state(self, new_state: State): self._state = new_state @property - def state(self) -> State: + def state(self) -> State[ApplicationStateType]: """Gives the state. Recall that state is purely immutable -- anything you do with this state will not be persisted unless you subsequently call update_state. @@ -1837,7 +1858,7 @@ def partition_key(self) -> Optional[str]: return self._partition_key @property - def builder(self) -> Optional["ApplicationBuilder"]: + def builder(self) -> Optional["ApplicationBuilder[ApplicationStateType]"]: """Returns the application builder that was used to build this application. Note that this asusmes the application was built using the builder. Otherwise, @@ -1873,10 +1894,10 @@ def _validate_start(start: Optional[str], actions: Set[str]): ) -class ApplicationBuilder: +class ApplicationBuilder(Generic[StateType]): def __init__(self): self.start = None - self.state: Optional[State] = None + self.state: Optional[State[StateType]] = None self.lifecycle_adapters: List[LifecycleAdapter] = list() self.app_id: str = str(uuid.uuid4()) self.partition_key: Optional[str] = None @@ -1894,10 +1915,11 @@ def __init__(self): self.tracker = None self.graph_builder = None self.prebuilt_graph = None + self.typing_system = None def with_identifiers( self, app_id: str = None, partition_key: str = None, sequence_id: int = None - ) -> "ApplicationBuilder": + ) -> "ApplicationBuilder[StateType]": """Assigns various identifiers to the application. This is used for tracking, persistence, etc... :param app_id: Application ID -- this will be assigned to a uuid if not set. @@ -1915,7 +1937,21 @@ def with_identifiers( self.sequence_id = sequence_id return self - def with_state(self, **kwargs) -> "ApplicationBuilder": + def with_typing( + self, typing_system: TypingSystem[StateTypeToSet] + ) -> "ApplicationBuilder[StateTypeToSet]": + """Sets the typing system for the application. This is used to enforce typing on the state. + + :param typing_system: Typing system to use + :return: The application builder for future chaining. + """ + if typing_system is not None: + self.typing_system = typing_system + return self # type: ignore + + def with_state( + self, state: Optional[Union[State, StateTypeToSet]] = None, **kwargs + ) -> "ApplicationBuilder[StateType]": """Sets initial values in the state. If you want to load from a prior state, you can do so here and pass the values in. @@ -1930,13 +1966,30 @@ def with_state(self, **kwargs) -> "ApplicationBuilder": "the .initialize_from() API. Either allow the persister to set the " "state, or set the state manually." ) - if self.state is not None: + if state is not None: + if self.state is not None: + raise ValueError( + BASE_ERROR_MESSAGE + + "State items have already been set -- you cannot use the type-based API as well." + " Either set state with with_state(**kwargs) or pass in a state/typed object." + ) + if isinstance(state, State): + self.state = state + elif self.typing_system is not None: + self.state = self.typing_system.construct_state(state) + else: + raise ValueError( + BASE_ERROR_MESSAGE + + "You have not set a typing system, and you are passing in a typed state object." + " Please set a typing system using with_typing before doing so." + ) + elif self.state is not None: self.state = self.state.update(**kwargs) else: self.state = State(kwargs) return self - def with_graph(self, graph: Graph) -> "ApplicationBuilder": + def with_graph(self, graph: Graph) -> "ApplicationBuilder[StateType]": """Adds a prebuilt graph -- this is an alternative to using the with_actions and with_transitions methods. While you will likely use with_actions and with_transitions, you may want this in a few cases: @@ -1969,7 +2022,7 @@ def _initialize_graph_builder(self): if self.graph_builder is None: self.graph_builder = GraphBuilder() - def with_entrypoint(self, action: str) -> "ApplicationBuilder": + def with_entrypoint(self, action: str) -> "ApplicationBuilder[StateType]": """Adds an entrypoint to the application. This is the action that will be run first. This can only be called once. @@ -1988,7 +2041,7 @@ def with_entrypoint(self, action: str) -> "ApplicationBuilder": def with_actions( self, *action_list: Union[Action, Callable], **action_dict: Union[Action, Callable] - ) -> "ApplicationBuilder": + ) -> "ApplicationBuilder[StateType]": """Adds an action to the application. The actions are granted names (using the with_name) method post-adding, using the kw argument. If it already has a name (or you wish to use the function name, raw, and it is a function-based-action), then you can use the *args* parameter. This is the only supported way to add actions. @@ -2007,7 +2060,7 @@ def with_transitions( *transitions: Union[ Tuple[Union[str, list[str]], str], Tuple[Union[str, list[str]], str, Condition] ], - ) -> "ApplicationBuilder": + ) -> "ApplicationBuilder[StateType]": """Adds transitions to the application. Transitions are specified as tuples of either: 1. (from, to, condition) 2. (from, to) -- condition is set to DEFAULT (which is a fallback) @@ -2024,7 +2077,7 @@ def with_transitions( self.graph_builder = self.graph_builder.with_transitions(*transitions) return self - def with_hooks(self, *adapters: LifecycleAdapter) -> "ApplicationBuilder": + def with_hooks(self, *adapters: LifecycleAdapter) -> "ApplicationBuilder[StateType]": """Adds a lifecycle adapter to the application. This is a way to add hooks to the application so that they are run at the appropriate times. You can use this to synchronize state out, log results, etc... @@ -2093,7 +2146,7 @@ def initialize_from( fork_from_app_id: str = None, fork_from_partition_key: str = None, fork_from_sequence_id: int = None, - ) -> "ApplicationBuilder": + ) -> "ApplicationBuilder[StateType]": """Initializes the application we will build from some prior state object. Note (1) that you can *either* call this or use `with_state` and `with_entrypoint`. @@ -2133,7 +2186,7 @@ def initialize_from( def with_state_persister( self, persister: Union[BaseStateSaver, LifecycleAdapter], on_every: str = "step" - ) -> "ApplicationBuilder": + ) -> "ApplicationBuilder[StateType]": """Adds a state persister to the application. This is a way to persist state out to a database, file, etc... at the specified interval. This is one of two options: @@ -2156,7 +2209,7 @@ def with_state_persister( def with_spawning_parent( self, app_id: str, sequence_id: int, partition_key: Optional[str] = None - ) -> "ApplicationBuilder": + ) -> "ApplicationBuilder[StateType]": """Sets the 'spawning' parent application that created this app. This is used for tracking purposes. Doing this creates a parent/child relationship. There can be many spawned children from a single sequence ID (just as there can be many forks of an app). @@ -2251,14 +2304,14 @@ def _get_built_graph(self) -> Graph: raise ValueError( BASE_ERROR_MESSAGE + "You must set the graph using with_graph, or use with_entrypoint, with_actions, and with_transitions" - "to build the graph." + " to build the graph." ) if self.graph_builder is not None: return self.graph_builder.build() return self.prebuilt_graph - @telemetry.capture_function_usage - def build(self) -> Application: + # @telemetry.capture_function_usage + def build(self) -> Application[StateType]: """Builds the application. This function is a bit messy as we iron out the exact logic and rigor we want around things. @@ -2274,6 +2327,10 @@ def build(self) -> Application: self._load_from_persister() graph = self._get_built_graph() _validate_start(self.start, {action.name for action in graph.actions}) + typing_system: TypingSystem[StateType] = ( + self.typing_system if self.typing_system is not None else DictBasedTypingSystem() + ) # type: ignore + self.state = self.state.with_typing_system(typing_system=typing_system) return Application( graph=graph, state=self.state, @@ -2283,19 +2340,38 @@ def build(self) -> Application: entrypoint=self.start, adapter_set=LifecycleAdapterSet(*self.lifecycle_adapters), builder=self, - fork_parent_pointer=burr_types.ParentPointer( - app_id=self.fork_from_app_id, - partition_key=self.fork_from_partition_key, - sequence_id=self.fork_from_sequence_id, - ) - if self.loaded_from_fork - else None, + fork_parent_pointer=( + burr_types.ParentPointer( + app_id=self.fork_from_app_id, + partition_key=self.fork_from_partition_key, + sequence_id=self.fork_from_sequence_id, + ) + if self.loaded_from_fork + else None + ), tracker=self.tracker, - spawning_parent_pointer=burr_types.ParentPointer( - app_id=self.spawn_from_app_id, - partition_key=self.spawn_from_partition_key, - sequence_id=self.spawn_from_sequence_id, - ) - if self.spawn_from_app_id is not None - else None, + spawning_parent_pointer=( + burr_types.ParentPointer( + app_id=self.spawn_from_app_id, + partition_key=self.spawn_from_partition_key, + sequence_id=self.spawn_from_sequence_id, + ) + if self.spawn_from_app_id is not None + else None + ), ) + + +if __name__ == "__main__": + import pydantic + + class Foo(pydantic.BaseModel): + a: int + b: str + + from burr.integrations import pydantic + + app = ApplicationBuilder().with_typing(pydantic.PydanticTypingSystem(Foo)).build() + + _, _, foo = app.run(inputs={"a": 1, "b": "hello"}) + mod = foo.data diff --git a/burr/core/state.py b/burr/core/state.py index 62c9a583..67037b9f 100644 --- a/burr/core/state.py +++ b/burr/core/state.py @@ -4,9 +4,10 @@ import importlib import inspect import logging -from typing import Any, Callable, Dict, Iterator, Mapping, Union +from typing import Any, Callable, Dict, Generic, Iterator, Mapping, Optional, TypeVar, Union from burr.core import serde +from burr.core.typing import DictBasedTypingSystem, TypingSystem logger = logging.getLogger(__name__) @@ -142,7 +143,7 @@ def apply_mutate(self, inputs: dict): if key not in inputs: inputs[key] = [] if not isinstance(inputs[key], list): - raise ValueError(f"Cannot append to non-list value {key}={inputs[self.key]}") + raise ValueError(f"Cannot append to non-list value {key}={inputs[key]}") inputs[key].append(value) def validate(self, input_state: Dict[str, Any]): @@ -211,22 +212,51 @@ def apply_mutate(self, inputs: dict): inputs.pop(key, None) -class State(Mapping): +StateType = TypeVar("StateType", bound=Union[Dict[str, Any], Any]) +AssignedStateType = TypeVar("AssignedStateType") + + +class State(Mapping, Generic[StateType]): """An immutable state object. This is the only way to interact with state in Burr.""" - def __init__(self, initial_values: Dict[str, Any] = None): + def __init__( + self, + initial_values: Optional[Dict[str, Any]] = None, + typing_system: Optional[TypingSystem[StateType]] = None, + ): if initial_values is None: initial_values = dict() + self._typing_system = ( + typing_system if typing_system is not None else DictBasedTypingSystem() # type: ignore + ) self._state = initial_values - def apply_operation(self, operation: StateDelta) -> "State": + @property + def typing_system(self) -> TypingSystem[StateType]: + return self._typing_system + + def with_typing_system( + self, typing_system: TypingSystem[AssignedStateType] + ) -> "State[AssignedStateType]": + """Copies state with a specific typing system""" + return State(self._state, typing_system=typing_system) + + @property + def data(self) -> StateType: + return self._typing_system.construct_data(self) # type: ignore + + def apply_operation(self, operation: StateDelta) -> "State[StateType]": """Applies a given operation to the state, returning a new state""" - new_state = copy.deepcopy(self._state) # TODO -- restrict to just the read keys + + # Moved to copy.copy instead of copy.deepcopy + # TODO -- just copy the ones that have changed + # And if they can't be copied then we use the same ones... + new_state = copy.copy(self._state) # TODO -- restrict to just the read keys operation.validate(new_state) operation.apply_mutate( new_state ) # todo -- validate that the write keys are the only different ones - return State(new_state) + return State(new_state, typing_system=self._typing_system) def get_all(self) -> Dict[str, Any]: """Returns the entire state, realize as a dictionary. This is a copy.""" @@ -251,7 +281,7 @@ def _serialize(k, v, **extrakwargs) -> Union[dict, str]: return {k: _serialize(k, v, **kwargs) for k, v in _dict.items()} @classmethod - def deserialize(cls, json_dict: dict, **kwargs) -> "State": + def deserialize(cls, json_dict: dict, **kwargs) -> "State[StateType]": """Converts a dictionary representing a JSON object back into a state""" def _deserialize(k, v: Union[str, dict], **extrakwargs) -> Callable: @@ -262,7 +292,7 @@ def _deserialize(k, v: Union[str, dict], **extrakwargs) -> Callable: return State({k: _deserialize(k, v, **kwargs) for k, v in json_dict.items()}) - def update(self, **updates: Any) -> "State": + def update(self, **updates: Any) -> "State[StateType]": """Updates the state with a set of key-value pairs Does an upsert operation (if the keys exist their value will be overwritten, otherwise they will be created) @@ -277,7 +307,7 @@ def update(self, **updates: Any) -> "State": """ return self.apply_operation(SetFields(updates)) - def append(self, **updates: Any) -> "State": + def append(self, **updates: Any) -> "State[StateType]": """Appends to the state with a set of key-value pairs. Each one must correspond to a list-like object, or an error will be raised. @@ -295,7 +325,7 @@ def append(self, **updates: Any) -> "State": return self.apply_operation(AppendFields(updates)) - def increment(self, **updates: int) -> "State": + def increment(self, **updates: int) -> "State[StateType]": """Increments the state with a set of key-value pairs. Each one must correspond to an integer, or an error will be raised. @@ -304,7 +334,7 @@ def increment(self, **updates: int) -> "State": """ "" return self.apply_operation(IncrementFields(updates)) - def wipe(self, delete: list[str] = None, keep: list[str] = None): + def wipe(self, delete: Optional[list[str]] = None, keep: Optional[list[str]] = None): """Wipes the state, either by deleting the keys in delete and keeping everything else or keeping the keys in keep. and deleting everything else. If you pass nothing in it will delete the whole thing. @@ -324,14 +354,17 @@ def wipe(self, delete: list[str] = None, keep: list[str] = None): fields_to_delete = [key for key in self._state if key not in keep] return self.apply_operation(DeleteField(fields_to_delete)) - def merge(self, other: "State") -> "State": + def merge(self, other: "State") -> "State[StateType]": """Merges two states together, overwriting the values in self with those in other.""" - return State({**self.get_all(), **other.get_all()}) + return State({**self.get_all(), **other.get_all()}, self.typing_system) - def subset(self, *keys: str, ignore_missing: bool = True) -> "State": + def subset(self, *keys: str, ignore_missing: bool = True) -> "State[StateType]": """Returns a subset of the state, with only the given keys""" - return State({key: self[key] for key in keys if key in self or not ignore_missing}) + return State( + {key: self[key] for key in keys if key in self or not ignore_missing}, + self.typing_system, + ) def __getitem__(self, __k: str) -> Any: return self._state[__k] diff --git a/burr/core/typing.py b/burr/core/typing.py new file mode 100644 index 00000000..c0c9cdb3 --- /dev/null +++ b/burr/core/typing.py @@ -0,0 +1,89 @@ +from __future__ import annotations + +import abc +from typing import TYPE_CHECKING, Generic, Type, TypeVar + +BaseType = TypeVar("BaseType") +# SpecificType = TypeVar('SpecificType', bound=BaseType) + +if TYPE_CHECKING: + from burr.core import Action, Graph, State + +try: + from typing import Self +except ImportError: + Self = "TypingSystem" + + +class TypingSystem(abc.ABC, Generic[BaseType]): + @abc.abstractmethod + def state_type(self) -> Type[BaseType]: + """Gives the type that represents the state of the + application at any given time. Note that this must have + adequate support for Optionals (E.G. non-required values). + + :return: + """ + + @abc.abstractmethod + def state_pre_action_run_type(self, action: Action, graph: Graph) -> Type[BaseType]: + """Gives the type that represents the state after an action has completed. + Note that this could be smart -- E.g. it should have all possible upstream + types filled in. + + :param action: + :return: + """ + + @abc.abstractmethod + def state_post_action_run_type(self, action: Action, graph: Graph) -> Type[BaseType]: + """Gives the type that represents the state after an action has completed. + Note that this could be smart -- E.g. it should have all possible upstream + types filled in. + + :param action: + :return: + """ + + def validate_state(self, state: State) -> None: + """Validates the state to ensure it is valid. + + :param state: + :return: + """ + + @abc.abstractmethod + def construct_data(self, state: State[BaseType]) -> BaseType: + """Constructs a type based on the arguments passed in. + + :param kwargs: + :return: + """ + + @abc.abstractmethod + def construct_state(self, data: BaseType) -> State[BaseType]: + """Constructs a state based on the arguments passed in. + + :param kwargs: + :return: + """ + + +class DictBasedTypingSystem(TypingSystem[dict]): + """Effectively a no-op. State is backed by a dictionary, which allows every state item + to... be a dictionary.""" + + def state_type(self) -> Type[dict]: + return dict + + def state_pre_action_run_type(self, action: Action, graph: Graph) -> Type[dict]: + return dict + + def state_post_action_run_type(self, action: Action, graph: Graph) -> Type[dict]: + return dict + + def construct_data(self, state: State[dict]) -> dict: + return state.get_all() + + def construct_state(self, data: dict) -> State[dict]: + return State(data, typing_system=self) diff --git a/burr/core/validation.py b/burr/core/validation.py index e1e95514..16baf2a0 100644 --- a/burr/core/validation.py +++ b/burr/core/validation.py @@ -12,5 +12,5 @@ def assert_set(value: Optional[Any], field: str, method: str): if value is None: raise ValueError( BASE_ERROR_MESSAGE - + f"Must set `{field}` before building application! Do so with ApplicationBuilder.{method}" + + f"Must call `{method}` before building application! Do so with ApplicationBuilder." ) diff --git a/burr/integrations/pydantic.py b/burr/integrations/pydantic.py new file mode 100644 index 00000000..22a82a48 --- /dev/null +++ b/burr/integrations/pydantic.py @@ -0,0 +1,360 @@ +from __future__ import annotations + +import copy +import inspect +import types +from typing import ( + AsyncGenerator, + Awaitable, + Callable, + Generator, + List, + Optional, + ParamSpec, + Tuple, + Type, + TypeVar, + Union, +) + +import pydantic +from pydantic_core import PydanticUndefined + +from burr.core import Action, Graph, State +from burr.core.action import FunctionBasedAction, FunctionBasedStreamingAction, bind, get_inputs +from burr.core.typing import TypingSystem + +Inputs = ParamSpec("Inputs") + +PydanticActionFunction = Callable[..., Union[pydantic.BaseModel, Awaitable[pydantic.BaseModel]]] + + +def model_to_dict(model: pydantic.BaseModel, include: Optional[List[str]] = None) -> dict: + """Utility function to convert a pydantic model to a dictionary.""" + keys = model.model_fields.keys() + keys = keys if include is None else [item for item in include if item in model.model_fields] + return {key: getattr(model, key) for key in keys} + + +ModelType = TypeVar("ModelType", bound=pydantic.BaseModel) + + +def subset_model( + model: Type[ModelType], + fields: List[str], + force_optional_fields: List[str], + model_name_suffix: str, +) -> Type[ModelType]: + """Creates a new pydantic model that is a subset of the original model. + This is just to make it more efficient, as we can dynamically alter pydantic models + + :param fields: Fields that we want to include in the new model. + :param force_optional_fields: Fields that we want to include in the new model, but that will always be optional. + :param model: The model type to subset. + :param model_name_suffix: The suffix to add to the model name. + :return: The new model type. + """ + new_fields = {} + + for name, field_info in model.model_fields.items(): + if name in fields: + # copy directly + # TODO -- handle cross-field validation + new_fields[name] = (field_info.annotation, field_info) + elif name in force_optional_fields: + new_field_info = copy.deepcopy(field_info) + if new_field_info.default_factory is None and ( + new_field_info.default is PydanticUndefined + ): + # in this case we can set to None + new_field_info.default = None + annotation = field_info.annotation + if annotation is not None: + new_field_info.annotation = Optional[annotation] # type: ignore + new_fields[name] = (new_field_info.annotation, new_field_info) + return pydantic.create_model(model.__name__ + model_name_suffix, **new_fields) # type: ignore + + +def merge_to_state(model: pydantic.BaseModel, write_keys: List[str], state: State) -> State: + """Merges a pydantic model that is a subset of the new state back into the state + TODO -- implement + TODO -- consider validating that the entire state is correct + TODO -- consider validating just the deltas (if that's possible) + """ + write_dict = model_to_dict(model=model, include=write_keys) + return state.update(**write_dict) + + +def model_from_state(model: Type[ModelType], state: State) -> ModelType: + """Creates a model from the state object -- capturing just the fields that are relevant to the model itself. + + :param model: model type to create + :param state: state object to create from + :return: model object + """ + keys = [item for item in model.model_fields.keys() if item in state] + return model(**{key: state[key] for key in keys}) + + +def _validate_and_extract_signature_types( + fn: PydanticActionFunction, +) -> Tuple[Type[pydantic.BaseModel], Type[pydantic.BaseModel]]: + sig = inspect.signature(fn) + if "state" not in sig.parameters: + raise ValueError( + f"Function fn: {fn.__qualname__} is not a valid pydantic action. " + "The first argument of a pydantic " + "action must be the state object. Got signature: {sig}." + ) + state_model = sig.parameters["state"].annotation + if state_model is inspect.Parameter.empty or not issubclass(state_model, pydantic.BaseModel): + raise ValueError( + f"Function fn: {fn.__qualname__} is not a valid pydantic action. " + "a type annotation of a type extending: pydantic.BaseModel. Got parameter " + "state: {state_model.__qualname__}." + ) + if sig.return_annotation is inspect.Parameter.empty or not issubclass( + sig.return_annotation, pydantic.BaseModel + ): + raise ValueError( + f"Function fn: {fn.__qualname__} is not a valid pydantic action. " + "The return type must be a subclass of pydantic" + ".BaseModel. Got return type: {sig.return_annotation}." + ) + return state_model, sig.return_annotation + + +def _validate_keys(model: Type[pydantic.BaseModel], keys: List[str], fn: Callable) -> None: + missing_keys = [key for key in keys if key not in model.model_fields] + if missing_keys: + raise ValueError( + f"Function fn: {fn.__qualname__} is not a valid pydantic action. " + f"The keys: {missing_keys} are not present in the model: {model.__qualname__}." + ) + + +def pydantic_action( + reads: List[str], writes: List[str] +) -> Callable[[PydanticActionFunction], PydanticActionFunction]: + """Action that specifies inputs/outputs using pydantic models. + This should make it easier to develop with guardrails. + + :param reads: keys that this model reads. Note that this will be a subset of the pydantic model with which this is decorated. + We will be validating that the keys are present in the model. + :param writes: keys that this model writes. Note that this will be a subset of the pydantic model with which this is decorated. + We will be validating that the keys are present in the model. + :return: + """ + + def decorator(fn: PydanticActionFunction) -> PydanticActionFunction: + itype, otype = _validate_and_extract_signature_types(fn) + _validate_keys(model=itype, keys=reads, fn=fn) + _validate_keys(model=otype, keys=writes, fn=fn) + SubsetInputType = subset_model( + model=itype, + fields=reads, + force_optional_fields=[item for item in writes if item not in reads], + model_name_suffix=f"{fn.__name__}_input", + ) + SubsetOutputType = subset_model( + model=otype, + fields=writes, + force_optional_fields=[], + model_name_suffix=f"{fn.__name__}_input", + ) + + def action_function(state: State, **kwargs) -> State: + model_to_use = model_from_state(model=SubsetInputType, state=state) + result = fn(state=model_to_use, **kwargs) + # TODO -- validate that we can always construct this from the dict... + # We really want a copy-type function + output = SubsetOutputType(**model_to_dict(result, include=writes)) + return merge_to_state(model=output, write_keys=writes, state=state) + + async def async_action_function(state: State, **kwargs) -> State: + model_to_use = model_from_state(model=SubsetInputType, state=state) + result = await fn(state=model_to_use, **kwargs) + output = SubsetOutputType(**model_to_dict(result, include=writes)) + return merge_to_state(model=output, write_keys=writes, state=state) + + is_async = inspect.iscoroutinefunction(fn) + # This recreates the @action decorator + # TODO -- use the @action decorator directly + # TODO -- ensure that the function is the right one -- specifically it probably won't show code in the UI + # now + setattr( + fn, + FunctionBasedAction.ACTION_FUNCTION, + FunctionBasedAction( + async_action_function if is_async else action_function, + reads, + writes, + input_spec=get_inputs({}, fn), + originating_fn=fn, + ), + ) + setattr(fn, "bind", types.MethodType(bind, fn)) + # TODO -- figure out typing + # It's not smart enough to know that we have satisfied the type signature, + # as we dynamically apply it using setattr + return fn + + return decorator + + +PartialType = Union[Type[pydantic.BaseModel], Type[dict]] + +PydanticStreamingActionFunctionSync = Callable[ + ..., Generator[Tuple[Union[pydantic.BaseModel, dict], Optional[pydantic.BaseModel]], None, None] +] + +PydanticStreamingActionFunctionAsync = Callable[ + ..., AsyncGenerator[Tuple[Union[pydantic.BaseModel, dict], Optional[pydantic.BaseModel]], None] +] + +PydanticStreamingActionFunction = Union[ + PydanticStreamingActionFunctionSync, PydanticStreamingActionFunctionAsync +] + +PydanticStreamingActionFunctionVar = TypeVar( + "PydanticStreamingActionFunctionVar", bound=PydanticStreamingActionFunction +) + + +def _validate_and_extract_signature_types_streaming( + fn: PydanticStreamingActionFunction, + stream_type: Optional[Union[Type[pydantic.BaseModel], Type[dict]]], + state_input_type: Optional[Type[pydantic.BaseModel]] = None, + state_output_type: Optional[Type[pydantic.BaseModel]] = None, +) -> Tuple[ + Type[pydantic.BaseModel], Type[pydantic.BaseModel], Union[Type[dict], Type[pydantic.BaseModel]] +]: + if stream_type is None: + # TODO -- derive from the signature + raise ValueError(f"stream_type is required for function: {fn.__qualname__}") + if state_input_type is None: + # TODO -- derive from the signature + raise ValueError(f"state_input_type is required for function: {fn.__qualname__}") + if state_output_type is None: + # TODO -- derive from the signature + raise ValueError(f"state_output_type is required for function: {fn.__qualname__}") + return state_input_type, state_output_type, stream_type + + +def pydantic_streaming_action( + reads: List[str], + writes: List[str], + state_input_type: Optional[Type[pydantic.BaseModel]], + state_output_type: Optional[Type[pydantic.BaseModel]], + stream_type: Optional[PartialType], +) -> Callable[[PydanticStreamingActionFunction], PydanticStreamingActionFunction]: + """Creates a streaming action that uses pydantic models. + + :param reads: The fields this consumes from the state. + :param writes: The fields this writes to the state. + :param stream_type: The pydantic model or dictionary type that is used to represent the partial results. If this is None it will attempt to derive from the signature. + Use a dict if you want this untyped. + :param state_input_type: The pydantic model type that is used to represent the input state. If this is None it will attempt to derive from the signature. + :param state_output_type: The pydantic model type that is used to represent the output state. If this is None it will attempt to derive from the signature. + :return: The same function, decorated function. + """ + + def decorator(fn: PydanticStreamingActionFunctionVar) -> PydanticStreamingActionFunctionVar: + itype, otype, stream_type_processed = _validate_and_extract_signature_types_streaming( + fn, stream_type, state_input_type=state_input_type, state_output_type=state_output_type + ) + _validate_keys(model=itype, keys=reads, fn=fn) + _validate_keys(model=otype, keys=writes, fn=fn) + SubsetInputType = subset_model( + model=itype, + fields=reads, + force_optional_fields=[item for item in writes if item not in reads], + model_name_suffix=f"{fn.__name__}_input", + ) + SubsetOutputType = subset_model( + model=otype, + fields=writes, + force_optional_fields=[], + model_name_suffix=f"{fn.__name__}_input", + ) + # PartialModelType = stream_type_processed # TODO -- attach to action + # We don't currently use this, but we will be passing to the action to validate + + def action_generator( + state: State, **kwargs + ) -> Generator[tuple[PartialType, Optional[State]], None, None]: + model_to_use = model_from_state(model=SubsetInputType, state=state) + for partial, state_update in fn(state=model_to_use, **kwargs): + if state_update is None: + yield partial, None + else: + output = SubsetOutputType(**model_to_dict(state_update, include=writes)) + yield partial, merge_to_state(model=output, write_keys=writes, state=state) + + async def async_action_generator( + state: State, **kwargs + ) -> AsyncGenerator[tuple[dict, Optional[State]], None]: + model_to_use = model_from_state(model=SubsetInputType, state=state) + async for partial, state_update in fn(state=model_to_use, **kwargs): + if state_update is None: + yield partial, None + else: + output = SubsetOutputType(**model_to_dict(state_update, include=writes)) + yield partial, merge_to_state(model=output, write_keys=writes, state=state) + + is_async = inspect.isasyncgenfunction(fn) + # This recreates the @streaming_action decorator + # TODO -- use the @streaming_action decorator directly + # TODO -- ensure that the function is the right one -- specifically it probably won't show code in the UI + # now + setattr( + fn, + FunctionBasedAction.ACTION_FUNCTION, + FunctionBasedStreamingAction( + async_action_generator if is_async else action_generator, + reads, + writes, + input_spec=get_inputs({}, fn), + originating_fn=fn, + ), + ) + setattr(fn, "bind", types.MethodType(bind, fn)) + return fn + + return decorator + + +StateModel = TypeVar("StateModel", bound=pydantic.BaseModel) + + +class PydanticTypingSystem(TypingSystem[StateModel]): + """Typing system for pydantic models. + + :param TypingSystem: Parameterized on the state model type. + """ + + def __init__(self, model_type: Type[StateModel]): + self.model_type = model_type + + def state_type(self) -> Type[StateModel]: + return self.model_type + + def state_pre_action_run_type(self, action: Action, graph: Graph) -> Type[pydantic.BaseModel]: + raise NotImplementedError( + "TODO -- crawl through" + "the graph to figure out what can possibly be optional and what can't..." + "First get all " + ) + + def state_post_action_run_type(self, action: Action, graph: Graph) -> Type[pydantic.BaseModel]: + raise NotImplementedError( + "TODO -- crawl through" + "the graph to figure out what can possibly be optional and what can't..." + "First get all " + ) + + def construct_data(self, state: State) -> StateModel: + return model_from_state(model=self.model_type, state=state) + + def construct_state(self, data: StateModel) -> State: + return State(model_to_dict(data)) diff --git a/burr/telemetry.py b/burr/telemetry.py index 57879eed..0fb47fc0 100644 --- a/burr/telemetry.py +++ b/burr/telemetry.py @@ -24,7 +24,7 @@ import platform import threading import uuid -from typing import TYPE_CHECKING, Callable, List +from typing import TYPE_CHECKING, Callable, List, TypeVar from urllib import request if TYPE_CHECKING: @@ -256,7 +256,10 @@ def create_and_send_cli_event(command: str): send_event_json(event) -def capture_function_usage(call_fn: Callable) -> Callable: +CallableT = TypeVar("CallableT", bound=Callable) + + +def capture_function_usage(call_fn: CallableT) -> CallableT: """Decorator to wrap some application functions for telemetry capture. We want to use this for non-execute functions. diff --git a/docs/concepts/index.rst b/docs/concepts/index.rst index d5f76c6d..42405173 100644 --- a/docs/concepts/index.rst +++ b/docs/concepts/index.rst @@ -18,6 +18,7 @@ Overview of the concepts -- read these to get a mental model for how Burr works. state-persistence serde streaming-actions + state-typing hooks additional-visibility recursion diff --git a/examples/pydantic/centralized_state.py b/examples/pydantic/centralized_state.py new file mode 100644 index 00000000..5a57f891 --- /dev/null +++ b/examples/pydantic/centralized_state.py @@ -0,0 +1,217 @@ +import copy +import os +from typing import List, Optional + +import openai +import pydantic + +from burr.core import ApplicationBuilder, State, action, default, graph, when +from burr.integrations.pydantic import PydanticTypingSystem, pydantic_action +from burr.lifecycle import LifecycleAdapter + +MODES = { + "answer_question": "text", + "generate_image": "image", + "generate_code": "code", + "unknown": "text", +} + + +class ApplicationState(pydantic.BaseModel): + chat_history: List[dict[str, str]] = pydantic.Field(default_factory=list) + prompt: Optional[str] + has_openai_key: Optional[bool] + safe: Optional[bool] + mode: Optional[str] + response: dict[str, str] + + +@pydantic_action(reads=[], writes=["chat_history", "prompt"]) +def process_prompt(state: ApplicationState, prompt: str) -> ApplicationState: + state.chat_history.append({"role": "user", "content": prompt, "type": "text"}) + state.prompt = prompt + return state + + +@pydantic_action(reads=["prompt"], writes=["safe"]) +def check_safety(state: ApplicationState) -> ApplicationState: + state.safe = "unsafe" not in state.prompt + return state + + +def _get_openai_client(): + return openai.Client() + + +@pydantic_action(reads=["prompt"], writes=["mode"]) +def choose_mode(state: ApplicationState) -> ApplicationState: + prompt = ( + f"You are a chatbot. You've been prompted this: {state.prompt}. " + f"You have the capability of responding in the following modes: {', '.join(MODES)}. " + "Please respond with *only* a single word representing the mode that most accurately " + "corresponds to the prompt. Fr instance, if the prompt is 'draw a picture of a cat', " + "the mode would be 'generate_image'. If the prompt is 'what is the capital of France', the mode would be 'answer_question'." + "If none of these modes apply, please respond with 'unknown'." + ) + + result = _get_openai_client().chat.completions.create( + model="gpt-4", + messages=[ + {"role": "system", "content": "You are a helpful assistant"}, + {"role": "user", "content": prompt}, + ], + ) + content = result.choices[0].message.content + mode = content.lower() + if mode not in MODES: + mode = "unknown" + state.mode = mode + return state + + +@pydantic_action(reads=["prompt", "chat_history"], writes=["response"]) +def prompt_for_more(state: ApplicationState) -> ApplicationState: + state.response = { + "content": "None of the response modes I support apply to your question. Please clarify?", + "type": "text", + "role": "assistant", + } + return state + + +@action(reads=[], writes=["has_openai_key"]) +def check_openai_key(state: State) -> State: + result = {"has_openai_key": "OPENAI_API_KEY" in os.environ} + return state.update(**result) + + +@pydantic_action(reads=["prompt", "chat_history", "mode"], writes=["response"]) +def chat_response( + state: ApplicationState, + prepend_prompt: str, + model: str = "gpt-3.5-turbo", +) -> ApplicationState: + chat_history = copy.deepcopy(state.chat_history) + chat_history[-1]["content"] = f"{prepend_prompt}: {chat_history[-1]['content']}" + chat_history_api_format = [ + { + "role": chat["role"], + "content": chat["content"], + } + for chat in chat_history + ] + client = _get_openai_client() + result = client.chat.completions.create( + model=model, + messages=chat_history_api_format, + ) + response = result.choices[0].message.content + state.response = {"content": response, "type": MODES[state.mode], "role": "assistant"} + return state + + +@pydantic_action(reads=["prompt", "chat_history", "mode"], writes=["response"]) +def image_response(state: ApplicationState, model: str = "dall-e-2") -> ApplicationState: + client = _get_openai_client() + result = client.images.generate( + model=model, prompt=state.prompt, size="1024x1024", quality="standard", n=1 + ) + response = result.data[0].url + state.response = {"content": response, "type": MODES[state.mode], "role": "assistant"} + return state + + +@pydantic_action(reads=["response", "mode", "safe", "has_openai_key"], writes=["chat_history"]) +def response(state: ApplicationState) -> ApplicationState: + if not state.has_openai_key: + chat_item = { + "role": "assistant", + "content": "You have not set an API key for [OpenAI](https://www.openai.com). Do this " + "by setting the environment variable `OPENAI_API_KEY` to your key. " + "You can get a key at [OpenAI](https://platform.openai.com). " + "You can still look at chat history/examples.", + "type": "error", + } + elif not state.safe: + chat_item = { + "role": "assistant", + "content": "I'm sorry, I can't respond to that.", + "type": "error", + } + else: + chat_item = state.response + state.chat_history.append(chat_item) + return state + + +graph_object = ( + graph.GraphBuilder() + .with_actions( + prompt=process_prompt, + check_openai_key=check_openai_key, + check_safety=check_safety, + decide_mode=choose_mode, + generate_image=image_response, + generate_code=chat_response.bind( + prepend_prompt="Please respond with *only* code and no other text (at all) to the following:", + ), + answer_question=chat_response.bind( + prepend_prompt="Please answer the following question:", + ), + prompt_for_more=prompt_for_more, + response=response, + ) + .with_transitions( + ("prompt", "check_openai_key", default), + ("check_openai_key", "check_safety", when(has_openai_key=True)), + ("check_openai_key", "response", default), + ("check_safety", "decide_mode", when(safe=True)), + ("check_safety", "response", default), + ("decide_mode", "generate_image", when(mode="generate_image")), + ("decide_mode", "generate_code", when(mode="generate_code")), + ("decide_mode", "answer_question", when(mode="answer_question")), + ("decide_mode", "prompt_for_more", default), + ( + ["generate_image", "answer_question", "generate_code", "prompt_for_more"], + "response", + ), + ("response", "prompt", default), + ) + .build() +) + + +def application( + hooks: Optional[List[LifecycleAdapter]] = None, + project_id: str = "test_centralized_state", +): + if hooks is None: + hooks = [] + # we're initializing above so we can load from this as well + # we could also use `with_tracker("local", project=project_id, params={"storage_dir": storage_dir})` + return ( + ApplicationBuilder() + .with_graph(graph_object) + # initializes from the tracking log if it does not already exist + .with_hooks(*hooks) + .with_tracker("local", project=project_id) + .with_entrypoint("prompt") + .with_state( + ApplicationState( + chat_history=[], + ) + ) + .with_typing(PydanticTypingSystem(model_type=ApplicationState)) + .build() + ) + + +if __name__ == "__main__": + app = application() + # app.visualize( + # output_file_path="statemachine", include_conditions=False, view=True, format="png" + # ) + action, result, state = app.run( + halt_after=["response"], inputs={"prompt": "Who was Aaron Burr, sir?"} + ) + state.data diff --git a/examples/pydantic/decentralized_state.py b/examples/pydantic/decentralized_state.py new file mode 100644 index 00000000..5f40da57 --- /dev/null +++ b/examples/pydantic/decentralized_state.py @@ -0,0 +1,224 @@ +import copy +import os +from typing import List, Optional + +import openai + +from burr.core import Application, ApplicationBuilder, State, default, graph, when +from burr.core.action import action +from burr.lifecycle import LifecycleAdapter +from burr.tracking import LocalTrackingClient + +MODES = { + "answer_question": "text", + "generate_image": "image", + "generate_code": "code", + "unknown": "text", +} + + +@action(reads=[], writes=["chat_history", "prompt"]) +def process_prompt(state: State, prompt: str) -> State: + result = {"chat_item": {"role": "user", "content": prompt, "type": "text"}} + return ( + state.wipe(keep=["prompt", "chat_history"]) + .append(chat_history=result["chat_item"]) + .update(prompt=prompt) + ) + + +@action(reads=[], writes=["has_openai_key"]) +def check_openai_key(state: State) -> State: + result = {"has_openai_key": "OPENAI_API_KEY" in os.environ} + return state.update(**result) + + +@action(reads=["prompt"], writes=["safe"]) +def check_safety(state: State) -> State: + result = {"safe": "unsafe" not in state["prompt"]} # quick hack to demonstrate + return state.update(safe=result["safe"]) + + +def _get_openai_client(): + return openai.Client() + + +@action(reads=["prompt"], writes=["mode"]) +def choose_mode(state: State) -> State: + prompt = ( + f"You are a chatbot. You've been prompted this: {state['prompt']}. " + f"You have the capability of responding in the following modes: {', '.join(MODES)}. " + "Please respond with *only* a single word representing the mode that most accurately " + "corresponds to the prompt. Fr instance, if the prompt is 'draw a picture of a cat', " + "the mode would be 'generate_image'. If the prompt is 'what is the capital of France', the mode would be 'answer_question'." + "If none of these modes apply, please respond with 'unknown'." + ) + + result = _get_openai_client().chat.completions.create( + model="gpt-4", + messages=[ + {"role": "system", "content": "You are a helpful assistant"}, + {"role": "user", "content": prompt}, + ], + ) + content = result.choices[0].message.content + mode = content.lower() + if mode not in MODES: + mode = "unknown" + result = {"mode": mode} + return state.update(**result) + + +@action(reads=["prompt", "chat_history"], writes=["response"]) +def prompt_for_more(state: State) -> State: + result = { + "response": { + "content": "None of the response modes I support apply to your question. Please clarify?", + "type": "text", + "role": "assistant", + } + } + return state.update(**result) + + +@action(reads=["prompt", "chat_history", "mode"], writes=["response"]) +def chat_response( + state: State, prepend_prompt: str, display_type: str = "text", model: str = "gpt-3.5-turbo" +) -> State: + chat_history = copy.deepcopy(state["chat_history"]) + chat_history[-1]["content"] = f"{prepend_prompt}: {chat_history[-1]['content']}" + chat_history_api_format = [ + { + "role": chat["role"], + "content": chat["content"], + } + for chat in chat_history + ] + client = _get_openai_client() + result = client.chat.completions.create( + model=model, + messages=chat_history_api_format, + ) + response = result.choices[0].message.content + result = {"response": {"content": response, "type": MODES[state["mode"]], "role": "assistant"}} + return state.update(**result) + + +@action(reads=["prompt", "chat_history", "mode"], writes=["response"]) +def image_response(state: State, model: str = "dall-e-2") -> State: + """Generates an image response to the prompt. Optional save function to save the image to a URL.""" + client = _get_openai_client() + result = client.images.generate( + model=model, prompt=state["prompt"], size="1024x1024", quality="standard", n=1 + ) + response = result.data[0].url + result = {"response": {"content": response, "type": MODES[state["mode"]], "role": "assistant"}} + return state.update(**result) + + +@action(reads=["response", "mode", "safe", "has_openai_key"], writes=["chat_history"]) +def response(state: State) -> State: + if not state["has_openai_key"]: + result = { + "chat_item": { + "role": "assistant", + "content": "You have not set an API key for [OpenAI](https://www.openai.com). Do this " + "by setting the environment variable `OPENAI_API_KEY` to your key. " + "You can get a key at [OpenAI](https://platform.openai.com). " + "You can still look at chat history/examples.", + "type": "error", + } + } + elif not state["safe"]: + result = { + "chat_item": { + "role": "assistant", + "content": "I'm sorry, I can't respond to that.", + "type": "error", + } + } + else: + result = {"chat_item": state["response"]} + return state.append(chat_history=result["chat_item"]) + + +graph = ( + graph.GraphBuilder() + .with_actions( + prompt=process_prompt, + check_openai_key=check_openai_key, + check_safety=check_safety, + decide_mode=choose_mode, + generate_image=image_response, + generate_code=chat_response.bind( + prepend_prompt="Please respond with *only* code and no other text (at all) to the following:", + ), + answer_question=chat_response.bind( + prepend_prompt="Please answer the following question:", + ), + prompt_for_more=prompt_for_more, + response=response, + ) + .with_transitions( + ("prompt", "check_openai_key", default), + ("check_openai_key", "check_safety", when(has_openai_key=True)), + ("check_openai_key", "response", default), + ("check_safety", "decide_mode", when(safe=True)), + ("check_safety", "response", default), + ("decide_mode", "generate_image", when(mode="generate_image")), + ("decide_mode", "generate_code", when(mode="generate_code")), + ("decide_mode", "answer_question", when(mode="answer_question")), + ("decide_mode", "prompt_for_more", default), + ( + ["generate_image", "answer_question", "generate_code", "prompt_for_more"], + "response", + ), + ("response", "prompt", default), + ) + .build() +) + + +def base_application( + hooks: List[LifecycleAdapter], + app_id: str, + storage_dir: str, + project_id: str, +): + if hooks is None: + hooks = [] + # we're initializing above so we can load from this as well + # we could also use `with_tracker("local", project=project_id, params={"storage_dir": storage_dir})` + tracker = LocalTrackingClient(project=project_id, storage_dir=storage_dir) + return ( + ApplicationBuilder() + .with_graph(graph) + # initializes from the tracking log if it does not already exist + .initialize_from( + tracker, + resume_at_next_action=False, # always resume from entrypoint in the case of failure + default_state={"chat_history": []}, + default_entrypoint="prompt", + ) + .with_hooks(*hooks) + .with_tracker(tracker) + .with_identifiers(app_id=app_id) + .build() + ) + + +def application( + app_id: Optional[str] = None, + project_id: str = "demo_chatbot", + storage_dir: Optional[str] = "~/.burr", + hooks: Optional[List[LifecycleAdapter]] = None, +) -> Application: + return base_application(hooks, app_id, storage_dir, project_id=project_id) + + +if __name__ == "__main__": + app = application() + app.visualize( + output_file_path="statemachine", include_conditions=False, view=True, format="png" + ) + print(app.run(halt_after=["response"], inputs={"prompt": "Who was Aaron Burr, sir?"})) diff --git a/examples/youtube-to-social-media-post/application.py b/examples/youtube-to-social-media-post/application.py index 683cec3e..3a7f80c1 100644 --- a/examples/youtube-to-social-media-post/application.py +++ b/examples/youtube-to-social-media-post/application.py @@ -1,14 +1,19 @@ import textwrap -from typing import Union +from typing import Any, Generator, Optional, Tuple, Union import instructor import openai from pydantic import BaseModel, Field from pydantic.json_schema import SkipJsonSchema +from rich.console import Console from youtube_transcript_api import YouTubeTranscriptApi -from burr.core import Application, ApplicationBuilder, State, action -from burr.core.persistence import SQLLitePersister +from burr.core import Application, ApplicationBuilder +from burr.integrations.pydantic import ( + PydanticTypingSystem, + pydantic_action, + pydantic_streaming_action, +) class Concept(BaseModel): @@ -32,14 +37,16 @@ class SocialMediaPost(BaseModel): description="The body of the social media post. It should be informative and make the reader curious about viewing the video." ) concepts: list[Concept] = Field( - description="Important concepts about Hamilton or Burr mentioned in this post.", - min_items=1, + description="Important concepts about Hamilton or Burr mentioned in this post -- please have at least 1", + min_items=0, max_items=3, + validate_default=False, ) key_takeaways: list[str] = Field( - description="A list of informative key takeways for the reader.", - min_items=1, + description="A list of informative key takeways for the reader -- please have at least 1", + min_items=0, max_items=4, + validate_default=False, ) youtube_url: SkipJsonSchema[Union[str, None]] = None @@ -66,24 +73,72 @@ def display(self) -> str: ) -@action(reads=[], writes=["transcript"]) -def get_youtube_transcript(state: State, youtube_url: str) -> State: +class ApplicationState(BaseModel): + # Make these have defaults as they are only set in actions + transcript: Optional[str] = Field( + description="The full transcript of the YouTube video.", default=None + ) + post: Optional[SocialMediaPost] = Field( + description="The generated social media post.", default=None + ) + + +class ApplicationStateStream(ApplicationState): + # Make these have defaults as they are only set in actions + post_generator: Optional[Generator[SocialMediaPost, None, None]] = None + + class Config: + arbitrary_types_allowed = True + + def __copy__(self, memo: dict[int, Any] | None = None): + post_generator = self.post_generator + self.post_generator = None + out = self.model_copy(deep=True, update={"post_generator": post_generator}) + self.post_generator = post_generator + return out + # # TODO -- ensure that post_generator is copied by reference, not value... + # # Ignore this for now -- this is specifically dealing with a copy() issue + # # then delegate to the superclass + # if memo is None: + # memo = {} + # if id(self) in memo: + # return memo[id(self)] + + # # Create a shallow copy to modify + # new_obj = copy.copy(self) + + # # Copy each attribute except the generator which should be shared + # for k, v in self.__dict__.items(): + # if k != "post_generator": + # setattr(new_obj, k, copy.deepcopy(v, memo)) + + # # Reference the same generator instance + # new_obj.post_generator = self.post_generator + + # # Store in memoization dictionary + # memo[id(self)] = new_obj + + # return new_obj + + +@pydantic_action(reads=[], writes=["transcript"]) +def get_youtube_transcript(state: ApplicationState, youtube_url: str) -> ApplicationState: """Get the official YouTube transcript for a video given it's URL""" _, _, video_id = youtube_url.partition("?v=") transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=["en"]) - full_transcript = " ".join([f"ts={entry['start']} - {entry['text']}" for entry in transcript]) + state.transcript = " ".join([f"ts={entry['start']} - {entry['text']}" for entry in transcript]) + return state # store the transcript in state - return state.update(transcript=full_transcript) -@action(reads=["transcript"], writes=["post"]) -def generate_post(state: State, llm_client) -> State: +@pydantic_action(reads=["transcript"], writes=["post"]) +def generate_post(state: ApplicationState, llm_client) -> ApplicationState: """Use the Instructor LLM client to generate `SocialMediaPost` from the YouTube transcript.""" # read the transcript from state - transcript = state["transcript"] + transcript = state.transcript response = llm_client.chat.completions.create( model="gpt-4o-mini", @@ -96,59 +151,162 @@ def generate_post(state: State, llm_client) -> State: {"role": "user", "content": transcript}, ], ) + state.post = response # store the chapters in state - return state.update(post=response) - - -@action(reads=["post"], writes=["post"]) -def rewrite_post(state: State, llm_client, user_prompt: str): - post = state["post"] + return state + + +@pydantic_streaming_action( + reads=["transcript"], + writes=["post"], + state_input_type=ApplicationState, + state_output_type=ApplicationState, + stream_type=SocialMediaPost, +) +def generate_post_streaming( + state: ApplicationStateStream, llm_client +) -> Generator[Tuple[SocialMediaPost, Optional[ApplicationState]], None, None]: + """Use the Instructor LLM client to generate `SocialMediaPost` from the YouTube transcript.""" - response = llm_client.chat.completions.create( + transcript = state.transcript + response = llm_client.chat.completions.create_partial( model="gpt-4o-mini", response_model=SocialMediaPost, messages=[ { "role": "system", - "content": f"Take the previously generated social media post and modify it according to the following instructions: {user_prompt}", + "content": "Analyze the given YouTube transcript and generate a compelling social media post.", }, - {"role": "user", "content": post.model_dump_json()}, + {"role": "user", "content": transcript}, ], + stream=True, ) + final_post = None + for post in response: + final_post = post + yield post, None + + yield final_post, state + + +@pydantic_streaming_action( + reads=["transcript"], + writes=["post"], + state_input_type=ApplicationState, + state_output_type=ApplicationState, + stream_type=SocialMediaPost, +) +async def generate_post_streaming_async( + state: ApplicationStateStream, llm_client +) -> Generator[Tuple[SocialMediaPost, Optional[ApplicationState]], None, None]: + """Use the Instructor LLM client to generate `SocialMediaPost` from the YouTube transcript.""" - # pass the youtube_url from the previous post version - response.youtube_url = post.youtube_url + transcript = state.transcript + response = llm_client.chat.completions.create_partial( + model="gpt-4o-mini", + response_model=SocialMediaPost, + messages=[ + { + "role": "system", + "content": "Analyze the given YouTube transcript and generate a compelling social media post.", + }, + {"role": "user", "content": transcript}, + ], + stream=True, + ) + final_post = None + async for post in response: + final_post = post + yield post, None - return state.update(post=response) + yield final_post, state -def build_application() -> Application: +def build_application() -> Application[ApplicationState]: llm_client = instructor.from_openai(openai.OpenAI()) - return ( + app = ( ApplicationBuilder() .with_actions( get_youtube_transcript, generate_post.bind(llm_client=llm_client), - rewrite_post.bind(llm_client=llm_client), ) .with_transitions( ("get_youtube_transcript", "generate_post"), - ("generate_post", "rewrite_post"), - ("rewrite_post", "rewrite_post"), ) - .with_state_persister(SQLLitePersister(db_path=".burr.db", table_name="state")) + # .with_state_persister(SQLLitePersister(db_path=".burr.db", table_name="state")) .with_entrypoint("get_youtube_transcript") + .with_typing(PydanticTypingSystem(ApplicationState)) + .with_state(ApplicationState()) .with_tracker(project="youtube-post") .build() ) + return app -if __name__ == "__main__": - app = build_application() - app.visualize(output_file_path="statemachine.png") +def build_streaming_application() -> Application[ApplicationState]: + llm_client = instructor.from_openai(openai.OpenAI()) + app = ( + ApplicationBuilder() + .with_actions( + get_youtube_transcript, + generate_post=generate_post_streaming.bind(llm_client=llm_client), + ) + .with_transitions( + ("get_youtube_transcript", "generate_post"), + ) + # .with_state_persister(SQLLitePersister(db_path=".burr.db", table_name="state")) + .with_entrypoint("get_youtube_transcript") + .with_typing(PydanticTypingSystem(ApplicationState)) + .with_state(ApplicationState()) + .with_tracker(project="youtube-post") + .build() + ) + return app - _, _, state = app.run( + +def build_streaming_application_async() -> Application[ApplicationState]: + llm_client = instructor.from_openai(openai.AsyncOpenAI()) + app = ( + ApplicationBuilder() + .with_actions( + get_youtube_transcript, + generate_post=generate_post_streaming_async.bind(llm_client=llm_client), + ) + .with_transitions( + ("get_youtube_transcript", "generate_post"), + ) + # .with_state_persister(SQLLitePersister(db_path=".burr.db", table_name="state")) + .with_entrypoint("get_youtube_transcript") + .with_typing(PydanticTypingSystem(ApplicationState)) + .with_state(ApplicationState()) + .with_tracker(project="test-youtube-post") + .build() + ) + return app + + +async def run_async(): + console = Console() + app = build_streaming_application_async() + action, streaming_container = await app.astream_result( + halt_after=["generate_post"], + inputs={"youtube_url": "https://www.youtube.com/watch?v=hqutVJyd3TI"}, + ) + async for post in streaming_container: + obj = post.model_dump() + console.clear() + console.print(obj) + + +if __name__ == "__main__": + console = Console() + app = build_streaming_application() + action, streaming_container = app.stream_result( halt_after=["generate_post"], inputs={"youtube_url": "https://www.youtube.com/watch?v=hqutVJyd3TI"}, ) + for post in streaming_container: + obj = post.model_dump() + console.clear() + console.print(obj) diff --git a/examples/youtube-to-social-media-post/server.py b/examples/youtube-to-social-media-post/server.py index 75b8a4d4..4c0fd20f 100644 --- a/examples/youtube-to-social-media-post/server.py +++ b/examples/youtube-to-social-media-post/server.py @@ -1,45 +1,59 @@ import contextlib import logging -from typing import Optional -import application import fastapi import uvicorn +from application import ( + ApplicationState, + ApplicationStateStream, + build_application, + build_application_iterator_streaming, +) +from fastapi.responses import StreamingResponse from burr.core import Application logger = logging.getLogger(__name__) # define a global `burr_app` variable -burr_app: Optional[Application] = None - - -def get_burr_app() -> Application: - """Retrieve the global Burr app.""" - if burr_app is None: - raise RuntimeError("Burr app wasn't instantiated.") - return burr_app +burr_app: Application[ApplicationState] = None +# Second variant -- this uses a stream + a self-loop +# Note this will save a *lot* to the tracker, each stream! +burr_app_streaming_iterator: Application[ApplicationStateStream] = None @contextlib.asynccontextmanager async def lifespan(app: fastapi.FastAPI): """Instantiate the Burr application on FastAPI startup.""" # set value for the global `burr_app` variable - global burr_app - burr_app = application.build_application() + global burr_app, burr_app_streaming_iterator + burr_app = build_application() + burr_app_streaming_iterator = build_application_iterator_streaming() yield app = fastapi.FastAPI(lifespan=lifespan) -@app.get("/social_media_post") -def social_media_post(youtube_url: str, burr_app: Application = fastapi.Depends(get_burr_app)): +@app.get("/social_media_post", response_model=ApplicationState) +def social_media_post(youtube_url: str) -> ApplicationState: """Creates a completion for the chat message""" _, _, state = burr_app.run(halt_after=["generate_post"], inputs={"youtube_url": youtube_url}) - post = state["post"] - return {"formatted_post": post.display(), "post": post.model_dump()} + return state.data + + +@app.get("/social_media_post_streaming_1", response_model=StreamingResponse) +def social_media_post_streaming(youtube_url: str) -> StreamingResponse: + """Creates a completion for the chat message""" + + def gen(): + for action, _, state in burr_app_streaming_iterator.iterate( + halt_after=["final"], inputs={"youtube_url": youtube_url} + ): + yield state.data.model_dump_json() + + return StreamingResponse(gen()) if __name__ == "__main__": diff --git a/examples/youtube-to-social-media-post/statemachine.png b/examples/youtube-to-social-media-post/statemachine.png index bdf72f90c624b630570a76bc5c0fcea6f18910a6..e9ae43146bf50bebad32d3b98a3ed6a224b0c708 100644 GIT binary patch literal 11515 zcmb`tbyQVR*Dk&(X^;@41qCD|q`SMjBqXH;q#Hy)x*Mbs>25??y1To(>n`7KeBUp| zxc}U7*BP9{+2icB_nvdExt{qvGgwhx0v&}A1%e=SDM?Xf@VN;7c9CGgyYQiv4)}Ox zC?g>XJ^g$AXwHv;pjVKTsF12l>Oq>bCXN(Yw>No!9-JhOkl^>MKfd`{qpG6x5?DDp zg5}wU0(6F4?i%AN^wdaBLRhG@K@>rwFDN7fMcfo57X>Cc@L@!JBq{p0PEKahlG1Jt zH~abz_xI7l+YBJsHgBY3Z3uRRxhENkA3+PJE&sWfatjs~mZPKNLA$k;m8h7Q((Pk> z3lEbQ#_sMe37$lzqT;PrVhhpo%WY(mHp+AX@AQu`4oJ;gu2Y-#;#Q*;NJ17WA?-v9GflmyK!(H+Z#HBZt{J*zxU%!6s z=y;K#D{W;})Y|GTCDl8`5TrxcV_f0y-#!%Ift;6@M@vhq%WZrxTm9|Zw|VT}l_$wE z2@JZ8PVZ`^pRgG9TjrZw9qjB*z==a__ur4Ln)34V^CKcKYo(9mw!d(fQl6u_IF;6W z+?>`sY#GiiE-r%e-_g?A+u0GYev16|t*goPYg<(?(x{QF#d7yJQYqJ$F8x8Y=<; zuluc`3@tc5KEC3jqED=Zkg>6`Y#O(kyu7@s>iG2ZbypZszp?JwUz;JkTwl#fucXb5rQX-5nE+a$0{c_(I4pFb}b6a~mT!`oCdhe3=6QASmczk?3 z0xBNxLy>-~SDPP#rj_BpL18(lrKQEp%>0GhvnP`L`}gk2)Gsdol#12b8}eV=QubC(mMfD$C7a0Pd^iur&R9Mfzei#W>R`+K z`uh6xv?dWzN({9^VPT7|>S8#gz15tmV%p@Rd9f7@>x@0jT5#By+2 zTU*<~#rVWTaBwgI0RgiJ1~DfDT^%lDkL`eHd;k7@KtO<|s*FrXKmbCp=$;rvZRgH=N;zf0JwVb46&%i*Zu&{`TiHQjauEk~$p$j~ysHl{bl=R;v-wY

qA9GW_Hfa&4uydatw}-$J^8n%@hfQ zg#v_EN}h3W)b=GX1_lJ&+}vzzZMC|dnVzh6ic&Ds(V;?RZUMoWndFei?Rjt`Atb1w zk>EJ>=MOAoZ*N~l*icduexdLyFYj617aHL9(6BJ~Xptr2QW|>tdH%?gCncrn140}t zbrN;DN@XRb5k7Pf!YC*xA3uJCp5x(lU3*--Y78Vvo!6_*zFi=GI)d!K34r@0E z2_E+HX{uBoMo>{PvIjOv*2&Sa^HIG~I>jCV0iiLfcgWwedQ*Xi z@@Ze0y^kjo@Y>g-apCp4+@FyMC+-~@!bC@(7#@DfVu}VCoZU5n6);<6-roUy4>7qM zFPB(sv)Y-OQtg3&M^k^U4(@U?j!q-7ytud+_>F>sA}yT+pHcPbceS>PzW&peT-5Q- z7Z7YEMn>el@Ol1`feF~7(4#opB6;i2XaUtgGD zQ40qh0uqw2?ls{5hQiFTj-CIE{spt4^p($?mX?-a;c{`+_w-0^mP2LkS2pJ6JJ;9O z@(N*%_=OzHlV_?L=4W3QH&^WX;@?4$>FI+8Og+Z$F#^ZN#u|b5EG%}zNqFGl;fYB| z%42z)_8|x#KLO01#^!zf=Ao>t{GNq{*)=^qePQ@FKXZ~S9xg8Ndt*sO#o@VHTj%|0-}aTn zQih>49z-ZSJRB@Wkc~h>!^g!)h$XlY4zy#fYl@_M)h)?z7jado}f9?32jDLWq>9eslx999N4CD`BVy)jyF zAW}4nG}~^e57yTFUyP`s@8iVu8r$01Iy*b>K*nI_$Pfk{H zwhG4Vqu3hZl4R50o1pl&6%-V>fFEdUYu~YgL2HcSn$2y|x6~s3FI`KnmE78ydJ785uWGZ}o$;Doszc zVH3ax4}gO0QcKu^mP0~pEN;ox47q-b$4;8x^HiVBDbZt^`_n9J-%oHARDKFVli+*n zZyfGb4VZyafF5o)UF6`i&_8(rIB|;FBqDLHpy(gItP;tFhANq zckjTJV_RBtk4ObNlffqBd4Fwp_j$9s(eu5hgM)*LN(@Nl8QUuv=L>Lr&+NipON8$) zG}aar1WgIwB5msdjP(CO9sjuo%KzNe{{X4~gC7ll`LKYfqaRYA5*MeYr4{_HBZS%G z`WU3k82r&&*zJ(NhhyX8Z@}KWi8|FDh*_(JgxbbcAaukpEG&$Hf$;|WXS*gb#?+O1 z?zUVcoBP+05E(G;KJ${=nwsY2w5rTdBb z@Awz6={P!;ThqqvuCD&k;q!zumOETtS@C_B$v{bIVs74R$>j+Hjxhl!F*&JDjIp)9 z-|w~*yv^Q|JTD|CH!wX7;`qm8s?Luni4OpIWY^VA19*`(xwx4KA zl4bV!uO=raXJuz+@O$1qJ_JUOMuSwFk+ZMyRX!gT>4$rc?ekz!04H=wNQ{h(h=_>T z*s2*w77;8#8a{Z%M`sp&J-_Hz2o*=8E2ou3>T6OZM$z`(hg zIieKplG{C`LVBzZZyvSA`CD`I)~M&3-M1tT?=$})b2-_ru=3=Fd2ccdK1Yf=IdzLu zaZE%^=c}=N1?R*cM*plqNE8vRL|XSSueLwI1l#7QClEYe-G2vA4~duaY&o0m!hCwO ziz^eo@CR)43GsVTVYmo=9=o@E@8m>8vgK3}Cp|89C+h6h9(nB^okMz?bDxhWWY*T) z&gx)Jj5fJn$)^<5Od2!26c-og#5y_HD!I%O7Jb?f`Ou$NUzwei1rSdU;o{}ZCP1A> zi_K@6)h+nd12*f0-c;;m`aDRV48ja1@uGCD2HnN0;jW;q(5}%mZLwKFW zR#{mIBWUt@uG#V_+Qen|J%N&fs-zd>>d_!3)A80lyD-+aeZ>-;W)m-$T2=~-uFvMD0kD(XB zDb-s%wxgoJi^ffY{_fz}G$+bs293_+$lTfJ)p&9}8A}PNz%dt}oScsHzOiLR(;xpx zI+@xdtc2)iKDWuqKSbP>ms(1G7LBvr5BzQG>=}oJ!mYLL_%A6T;+ZP#p<$Pmj7O{Q zb@pGpPg690T@tImyk26BN1VD-f4n=Y`TdGFJyM~UyVbcnKNeFC{ldShiUg0IEs6I+ zr^1nL`%A-8i%~zE2%JKb=cDme?jCimtyQ+KoFVVyT!0(?+5K#=NCwO3dTgwmwJG;4 zy^;9Cw(-o^qjKs@ef`h!5yzR`;oobFWb=2_3Y5?xqQZC2k`;aJ424^ZXw?$Wna-6TGx8&kMq(Jnu}a$3m;Vx->6^cD{L|aBzKgJ~&Vw zwK5PE1}W+(H_cD(?i`eT?8>H|;fa5NXQj`&B|ppsDXJ6EzoW*4q%HhkApWdm6f-F^ zbu;wnZ@=r0tjO}quWbjRb>brIW_T}+nBpN_Qyr5smh}SG<^FI3)lY%6OW8C(*Em17 zY*YQ5lA4b>UC69z3RHGADd{g;+q+fdrDkkeBIIs}iElm|w3g z%vq!I{dq!;R#AQCv(8bJU5D7W;vhadw@poFHgm6| z%-Uv}SL8RkmO_p=Lf2tS&}3LNedQvX$~w@bA|^J1&a|Q>9m@5qW7!y^h+c-=(Fx-?Zn){pGPP}fOvoN zW>{w@=?KTnm64!`C)SfD`yDlEa8Q@{qVus{Xh75VPzX9Ba6)^wCi`2RBDm^k)|%i3 z1~NFRcKMZHdHnPBTK*t#Nk7#bgZb|Q`X}f1C>APpc4)b2+Bu|lt}m#Og|GRG_{$1l z#rATZSfxl2W@593$6=bdwq)p-_nw_JLID3lT2&YpB5q1>kt06a)Eb{>ODQ|g4Yz@`hF&)s*nl+D5L1}xr*nPwu53khJk z{$API^n-6V0M~!pmQCb_n=2_Uar1hriGM`e&xeIzqdu|q1&HUixSjuy$wfjA2~A_A zq+_eEVj@Egc6HGg*6#Y8NQcOXCTI8-LfzNl*{PjJZM8+uiHqk8F8*m8PYJpATf@_)962&_J-u$&0{tpH z{5e|KuBBz4%=evhbx1NeB0DGFkS4!z5LxCnhx7Au%GYevES^;xxcv#xkN27mD;YfB z&^WdgMTkTx-NW5**@kfL_v&_WCXBt=jG%(hwb*I;0_@UIU3pfMF?JRTmdXRmp|FOx?$ zSfAKXdMU-o`jBP|8;gPjHN`J_4ZEi+t?JWq1O=IF_gCfOIP_@UDJaV)o%;4Lf^v6_ zKT-1P2OpxovA;!EHuJPno)~@Rv)aVCz&S%UGPb3KOk+N&yFY{rSrpR300wUb8Gcj1 zj>f!lr$+Y9elTccEyrZT@O3-PE2{UYZ3c;QwFo}Rg(y(u(<7X#g$xU15P@{?v$$ln zX`VfJXUQAOhkE&6b?R_sTwbKLj|92q}_(TgQ^6S@GGe16O zo*R0BySGl4hWbL}D?!VBbDq5TbDFC0Pdd6cFQA@Nfz*J2Yk#$MCgb&IWrrxpsBq9; zhQ!d0d2!O?;lbN8seWD4_RJqyD3G-OxE7hqPH}PA#f6`4#?}WYBEx{?>@fEXBp7DQ zBf3J2QGy#H2u)EP!Kdo$wUX&m*us#cG(5Dq#_KO{Hc-eE_$TkxWiTqgS><6KbxSA8 z>S1q9X2a@UCtM?CZm$_xvoXI`U@HIL-f~vj>iTj!pNj8u_5!sVUU3^w-%`2Vo^}J= z6P_lSuhAiXDr&j1q+Z#lbElVSLK&a-uGi>Wl~NP>8g$e(p@ZaM($Q>foCxYSoh`

&V6qBtNhzwcD9OOoAo`<{$Z0MD#B6R~fkkXtTyTTxT*)D+HLGW0so!tbA^j zD_g$ptx#51?fEg*Enj1LThUav?O;ks-RuBv6ahr4*P0x6>KjwR z8i=7mP!#Ogt@*g(gYdTpGjWqWAv)a+qKcVe=?!B#H3dUke`VOS%U~ENupNQ^!p)6`o{o*{dVK}tej{oHHKMGso!*MT zhg?M^GWs_Zx9?YN?kfwj7`Y;Bb*~)KX((tX$A=Yv*C0R;pU7*Piv4PwKww~yK`O_h z;he1zAR`qSnu>W@%+mP?0_vKQA!#`;H=g~{37KP;rVFd^NyeNp9cP9lGW-iB#ygec z-gaONzD2iN#q1>>K9a#5C4c^ITmOQu9ce!t6l?;&%Sfdt)V_#|Y;szSEKB5sWSu_S z*jO}_*;2F9LlM-n=^Ojy_9RYsRtE2d%)Wl?sFt~jEm}wr=As$&__nao?oE)Gh@p5> zdgM7+h<-jf*_bdW@uBf+faZ{se%;R8`wg&_nYJi|ws`a0K3bnb2q8(A&~ri;*bwV% zUr$4*?7H1|cHYA{M&FVS9qN)xs<9ZV#ZbMZ?jyZS5Y|-fcb@WXC(=05WW|R%D07=Q ztZ;C#xT*%7ds+K<%m=%}bEY?hPFq4y>7 z#X1@)@2JR;q5KDq)-W%RM=4^K!t$2z==8~P#Zq=r3H9;=>Dm`2xqq^)70sqEVKS6^OjLpt5^W`=zD-Vb9|u_Ol|SsqH6-sAjbj?SBB%|J>vE@EewfCOJmcqQ zCif*c)h(u>qIw1mgrN&lWThH!ZLx~D=VV@evrZ&RCL~(y6#eDBG(t;H%ie!|MYiE{ z;gdSw5+z52$8t~}m@dIl5{3Bo-iG<7Cm~Hmk#F!$gneWzd2Ou{5jruLRmtuIz&@VP z-D6`e-bVKKv#%H^`Rwb8JYN_`6Ws@ zhzaaCR))D7>mu@G58B$%$M-vb1e}?cw4P|cxH&cFea)a@WEdyC*5x@{7%D0*mcz#F zmzUcdPPV+vsPo#KzSCe#pKoAzOP|H{WjcNge`Y*Nz;HfRo=VBGw z#fqG?^aaMi(A1`{RcQDupl}0F-gHLxBf1q&d4ET6Oo1PTC1(1*^5g|zA+xeR2@+FGb@z#^%<(1eN9P4fg>Il>~ zG+-llfE<1(_A$syZer>*zg8Ik=DKPJ+uTwP897)kh5cI^pOU)jaLDQNhXk3xOKHg< zMy-k*ITys@%*E^I3ek0{4Cq8~4K2)Zq z-Y4`oy6@Z%DFtmu#Nw?NJKSVPp3>0nVx_!*;jtNR?OZuwF&o6i!h5uw+Z-Y=7de;s zE>%OqLZj--Q?o5(WCDgOTYAj4u~MLD-V=(sSMn5xylNuoGVAIl{Gb!zm1z#Tv*WgJgc@@ z{m@x0WUSTHNGa*wtqi14N12EPsN~NQ^ST)HJP#ND1ZC5)va-S>APn5EVFo)HQD6mO zw5^@ApI-ohFET1>gtF33+p>llkn>elfGVmv%X#pZh#*~P^?uq`3g>^jhxuQe3ot4c zR+J6#;zM}^V9~)?;BPvLijP-UQ=2f=r4p?bY7cns@qdS6`$q`=KQpyXH*I@}fgwNW z-Ny~215ktjDa;gbmO&*Y42(G!2n|nSlzzgQpUln6>wQr18F)Q!zt~Fe%KUiNeD~?o zr)RogdcMN86#~AMf_{>(~AL{UA{aX!sK=kYx1ec2NN{fryWfPaF5WeWfd$qiNk2qiFu!AdyjTmQIk9VSbL$Q#tu8BDt}c06er@^qaJ6JPRbsUK zxrA;~`*B6^6}D?}K`Y=b8e9yfQu39YE-$+yNK{l+*Q+TY{&K1?aiFAX0sRV1fpM#o zPOP&N{9YjX`aSpOEz=Kl7eM#|)bM&WF@Kq?*HvyAdAsY9-z%n=Ji3k!0ts3bM z(#h1Y&r_G{J|{uGpSOp}(_;sTET17DAfq5FRAdaqjA9W}(9@c3tUDdvyKZh#HY3{> zx$aFKFvk=j1blT>=Js=t&3{l**CxcrpSlw}`<)yX7It!CizZ8T?llrrl9g4G`Qtqk zH+RET$fhqFkcD(~bP)G24j!_w76f(8K7j{0O6Y^LjE{|lAXgvZBmA)KXX`L1ot>1; z`V5#wGsD9Wv@L$7A*868QxlFTQtxUYxBq~XVornQfl%u8_K|QpRFFw%a-9(H5pDV5 zs-6!CQ^HgUcRcq60o0=Mj zCz*}8y>yf_$t_v2RmWfu&s*Ye<^JCjRNg6kAp_-RP5@(CebKGqW<9$=3is~AIlu-EK7uuCtwR87$1!|*;%$5$U2>BIhxlKXXm7KZ=0GsN$3xvyIs>4_Dl@pt;&S#I{MhX^DCti0V7x(};w|%?YPvsGP0`JR zWqo<}g(m%-kpsV}ag%$ab*99%fRO$7zfk1;<^GOjN`v**w-ibH>j=wJUyHgK(%y>-atU*N@0yP!Iy@GO#m`x~ud{S#Q3v z4o^MTJ8g2G?jyaHSJ&$w=`YtW!^ra^FjxrrY!6J0R6_EdZLQLeW;2EP1D7o2o~3zF z8L&i&a(lcM){{bjthU~rS3lVXZGoAkQO_{H&X19PgrJhX`x>`K-Dxs|lR*Ev99$?C zZmR+x{c}cw)cH;`Z!b6-c zj3>#Qn*)^v^_J{21814Fqby6*gQt3a&D*=I4wtxFo+sL^7ZF^G&~l66z(4xi242C@ zX=->(0H-WYlH#DzNk`lZf~MWyE$(g*dhGUG|72N@Q_VVBWMZ#AU+{e1Cn2Qvj>$mg z301L_6iT`fW$mrgn??0P#*4psP-dcd?^5yhVGK{WJ69(OQ9I($R>3=V=R7BPi0+QFGn_mDeMb;(*DR{^5fg& z@lrRTTp1IwE$y*?XV^6Xu;!jcA&lVa9agvI0tB(C#X*8^qK@HV&ob&_t7g9FS+C zD|p*jo83>%43|_K=l7i!0#A1}=Nw8PV{2_)REm##xK84!5m8)n?RN6pM#}tjMuIWm zNdQr$XD9nZZxdgqYkg6pNdN-$qo{;gQ0oOWioGh4Bxl@Z`i2tL=Z~NoVchKeG^e0e zr*(bCoPF!xQ*RM@l4kFVKh1sK?nNKErRwkDz^#Bf43O2z`!c4IQeKO9b+(Ow0HQSM zIWkNi`^0MMx98iPCcfh-EpM3Npb_zoj@Ed1$B0^o#Zyb~!#NjA7+q3rKGT-NNWxP* zdUyi?EadNa4wyzD)LfP7l~KtDN8tQkL+?)V{BLH2g#LrJ`ru+8;^Wfwz4mb-&|?%EpAJ#odFhP|D}Q>Axm9#wSQMD?_8xl~WVkL0m+tNT7Ae4?hU0 z%pDl1jUxiCxFxiX=!w2?TV`-gXX+u=xte z?|TGP+VMi4cSLPE|M<2upp?hvHn!P@mnqscN~kg%EwXy#CRhhwy3c_B^T|q-Q-EiVME3 z1McH%X9tWyg+Gw1#{xWg^!e)$As60{r}SnKaeJHuGvm2NAa{E>lW;cKU4)A{zzBX& z{_Eb(E`${&vgeT?TiFy7Z~6ZtF5D^5An{NFiFl3aYD1XI774J0bm};JHop4BKCGj2 zq{mHWcYuv&$T#P?l1Gv#+3MMiQF67g%#0dBfsEo-^uf%^6dAI{wWsh8JU00Lb#JPa zhl^`rh<R01nNCsr%=$F&a%`3)d#)H=^iT%$V0z7s@mWFcU{k|_olLak%3)N zM;l(tZOILtR8vJ<{MkQh*JOR2M@|+S8#^Yt!Ffa2(;9H6!HyGZ5w?0y+S(N!TaOu~ z=Vr|RRnBwO(Kk!UnII@o6>RykfxaG?SB!hFsVCum*zOcQ2srFN{K8kT(Z$Eb8zJVl z>e~rfD0zM3Yp24UM5ZV%uP3aX=iKo{8ivv`uUZY@r<9$#@$m@>g`}0k#u^!kiK$5t z>8saPe@|NEtR3T0dV4IN?pZ7+i}V60 zC@mY*Z>4yDKKtO!kj~}c z_S>iAV#onYPg_M+cK`9=ZosknPMy)l++sBo8=ej@5Arrk%ST5?pv6FhfcG$jfb;Y6 zLZkCr8ZrnMFNrNf`!>loxxP+MZz!I&d!UGY{@f%O9yq^?cL|_q6(lnaOiNc z>C0zlOPRiU2eX6yeV1Y+AMc`IyS>R`wO{IS@o`D=ckLeEnoE2~O0SzI7(1p$qwBgc zUzh3+^gUJ?w846Nd#8u4Z$E#mpn_Q0*g%Wa<@nAR!Ol*W;p#58+j}_NVYlshAoNVC zX>6$Qzu54^!ox~BSYO}U`&C*xWU0H<<~(V93i==O6*9c8k268%Ps8ohT~$B^IkpsN z4h5|eU0q$9o14NUo?retTu&tjd0`x741j5mjZ%68Z0w;-}#h$`;CD<%7De4~IIg80%UuDJ-^9 zDkWCY1ya8jpw18M*fGLudCNcUZ0P+XKO1}Odut0nNEGf)Z%^AHe7x{oVNj97I$FmM z=`8D&Rg_Pf9B>JB=W$(H9DDbd(|5(1o4d>AXIZ_LS(&Pa z-6|;Se9~4YN|1OB```m98Vmf7s4f5gd%}zU2L8W`?b)*jtgW|+Y~S8cUw>n6?yByI z6T+q&1FP93ZSS^Cy?XUZ%CT1?Fmn?JM@45RcZlq7zqYBLKYywnIpSGTBA=imtjx#s z{{4FjrKYBaF|eA8b5j%BL$6&m20J6}-}m?Pqt?~ek1r_T!0!m{+^H|iv-cc(u6=t_ z?!x%@T54BWc&R}~dkDk>@M|GGHz<)!Q2@!IARZgzG*C13iSxy8}a zCHYeloA{Y&@TK@4LAP)7hN*Q83gp^@Jd(C7Q={K9a~Fo&^RgWKj;ST;sPQvLMMaf;`otn}pDs5ymtEXaIdydO z`*%;P+TeqHVP$1yl&e>-?h+T*E;?^(+h#joR(2qH|B9D!aVz=4lAEVWN=jb8etqER z(e<`=b`76D`~CiXi84KP$H2%augz2}auwypix*laPIwOt*xtQ&FYehh+7l;Ej7?8F zNYc#ysZ+?Zs9a-UU{Ln;D<>Np+w<&fuZRfNw6rvGSLL2-^Eg)7J@V6Runx+~XvXV# z_>l9IwRQK@aQo{IA5_kt7u&I8$Mf?YGC9Ly#`!n8y37p>4Mi3h_b%-}^_tPk%ggDz z+T1-aFRG!>S*tFOcAk(82n^IVF!00G$j<%<_w)19YL}Om-XtzAe&ND}8^y(P4tIZu>f!eiuW92|P~iFX z>zA6Qrs|nq2#mq)&qCmarcer@(r&hW=a`+ae*bR2uAJ=d(i6(7HKV&ZD+4%%ai z>{c4L0|IC@9?NdR?IfqAQBm00*~_Y{SD!zBo?YS;wfx^PrJM1mikk&vuHEuj{1YQ2 zD!TfD!_MfJFLxgbV!vro>0^Oq#V&U>yG&c2lYFAPj*X3Q&-d@*)^G3X396M$wisQw zbm>-V>h?A3IjGjI_1!C|E75Wv51iGlOXv9Uu)BA?5)$}}7Uu>Hvd@OqcgwMmk100A z<`Xk1@LIP~tgFI%onK&}($%Xne$@NUf4%YY<;!|riI!icuHCCoojzUB-oDw?)O76k z?_0NT->$b1?#7dBN%i*f>S{IfP_eSwIyXIvY{t%D;Ns$Py>g|zp+UR<0{aH#Ki#$Y z`BGKY)!py!?^e^$P+nSGSbaMX%fcY!`0?YO-#nJMN*Cw0|NZ-S?9*{g8ZKt5;$sI7 z9*h(ezV?{iDos^r$BwFf->!K|JN7;?E4>~i=eC!5!v+g;^DRzigC=GC{r#WkUHF{oZ4cKl4om}o}JCIT|YxvO^tfx%9UFp z)V=Bbv*Z&~QfT@#MQE4T~SSU*5{MM^aLFNcLI`=g{l)2!qgI^G8W8d(~a^N z+1S?X-n|<|icd(08W$39o%^9I`^JrHJv=;6h!3FR*3~hbse63l)3^MK@o|d_zP$82 z@@Yb&t(h_EODLUEZc}M-!H*w5uCWpm6+OZ6K#jkm=Jw_lluNISQ?7LuT?ufhuC6wC zd3rtBhVJg}xE8CqiC&+f&??Ef5P+soJ2s-wX9 zbKzx&cdY*6H3e@p6&92wB_$Po8WO>Kf0a^Mz$W8BZ~8Sj*pzbo>D_@Ni8|4{EN-bQ;4V+u*re!WWL)2^prG*L z`ST}^eIIkAdvDyJG&VHEPE0sOwBqJf-KIxQ$ku)Mu$4=SXA1z&&+I&J+ zx4t=5|4nOaB;ES;!U6(^?2R9>I{b0z9UMF&sQEbI*)tItk44#E0|Po17O|EZE-o${ zwR~PDQuV^_-`~W_!jfE6g9RKU_avR4Uq;4o`K?eL7+=-!zOvGjQlEGv^6uMA<5!B5 z*Oiq@DMc?m((szzeEnKvqVP~Pn3I<>(G$2+BqB!I(PwCAD7mD>R6F<78|IYR*Et;B|ITxMv ziaxY|K`U*Z`VxL86c;bCZ{Jh0bc$o^MDHLgGX%Xi{9pOVE-08Y| zFa1!XW8X!gp`k&q9eyFD`AEjQ@bakT*Zhn2Esyrva~ulh^miy-V8eO~_$_Lb+coO3 zw7<$SD{hB`1T$rqRSjLDJRKcf*XU?SbTkL2^reLGLk-e+sN$#JvnRf|*P^7W%i`qZ zqVto{qX+1;ZXeSNW$vXs~>Ti)TWyjb2K+M&*14%C@3aU7kjAg?KI0sZn}0+T1iO> z8!UOk!rp#;S10Ed`$pN!o`chwzP(vkvIhmiQ{G3savkTRY!MYrH?kZ#nkO{#Jw#z_ zq;T}vT}SfeCdmL)pDH@Uqr*ey9U?N~<2`{EBMPuHM5j6$Lk21ho|~^oOHa>opLhAx z+-xz}l5SAsn$^f}xoXv_*QX>twY0>=#VIC+{QY|k7>RSw;1HH14<$$cXR(JIX~^&2 z(*bRrJ$shY@!-Xi-@qMqk|iJg^73Y9`(h#l9QJ)D(4j!f-d?n)r|0flbd#{~@KxAM ziThWyx3%%@-*1$19#BPM_im#hBfc=9h5Ys0vg~-KLl@H4tXbnaUcFIUOX~(|k>zM- zvFq&5$P)t}?(bep8Ebba6^>|aZtg}u+9e^Oc;LW+<$7_$O~2x(nwpk~osAq2)d8`{ zJlB@@zC72dRaI4A|Gbnl+H@zls{Mg+>2yPHsXg81h z{QY_50(d%L9{ikGS-vt=4sSW1h4^un)t;BuU^d8r3{SKF@ zuBfXORiCgme*gZ>w{z!CPy($hk4T@-JACBGRtE=%g6j(f^|9GP5)yYPLUL?73=W`y z9v3#tJI}=wrap^$%CaE2q-9*_avP2IbybxYsdOkIXkhWb7qLXLoqp-O$jRBZb?eq> zsf&ErGht7kK8;&yHFZ0xtspIZU*Kr8vQElfPEJlCSy|(hQegU=`Jt$JY^)$k{ja8{ z%`L(rB5^29;Q(d+)t1-`ER2i?YD0KNe_zbWE5#2AJ#PQj9ka8uVOd!hQwL-7 z^P^hQPAZ9siSe@qzIppLAnE9XLwE*FXQgd99FDYq@tmg};c4=0kIPMY_ACyS^+k4e z!jB(6*arRl45bX-VBtS0D&kzRVujkVV>Q#?JTfo2s5tDtI&qeTmDRcZ3(KRQ4;)Gk zq5%VfZ*dYz^DVgYN1*rPqd>evPWfx>Ky0Ed-=%rM#;j~@1HuJ0EpbIr(vAZ6#7x?r z3iM5~>~|R|i-_1nXZ_^drp)oq{DOmh3Jdq= zI(!TE@mUkAH2BhGwDUFrST9~M<6{mSK1>4wqR?)W^>rx1v^ZUDnFA`m?|YC*7~g9>_vZE>qBdu&H80bGy*z8SQY^wu}T&}IZE2vOmE-4YyaBR zz}C5;f_=lo@bC?STY_v}B_#=<4pPO&#sc5h;`%ZLSPH8QOU+ym zY76In7dpTVx-oH3|fzC=}5Y5UQJzvUsfCcPi3@C2-*#ay+ zeDuhhJ|xRD1duV*B-C-RUw^i}S68HICE<*-V($da{mk7MNnt;duP~M$GHG~`BbM-#} z&oWIlIV0nch6dfS82Q^zA77=#C$a8eTL$`_u}Z$7QBl;`V6IoMDhh-Xz}yiR8@mEP zn81F#E5hPH5>Qt13kvvTWjULYj&9f?|IhD(G(Wd~$5t)5|7%A__wev~d{PjHBrRaV zE=fsN8I2m54m_M-28F+j0s;bK6BAyMksFJOi^Z@1b^YAlj$0u&T(VVAa4pzCnfJPl zEFP?EtgPK0OY=OvA07xgxd!DNF)`sH@Y}96qr9<^8SCD5u(QN{Gdc^N+Ab%j>`^ZR zazXBf;GY^A8s_KDrR9E1%`6FMY}CPJS2Q=X68;4yl%Qj?@6g-1xU{q#f9D1>3D*O+ z0U5gZy=FabvVU;U>Q`eTH3P#&nfLVH2Y*Y{%iwWtj^d+broWRpy z$4E``ex6Yxf2>Da;@!GcX*|*foO7>z+oyq3t=8}E5vE^Wz5+0wu;82=$#dt<8Fs9y zyjpVS4)e98h3S09K6)Z6U`vC4^Wp)nU%!5AY^?0(yGU>PP6lwafL5p#sH*0cmT|f6>gu$REkN1ZoR9BndF4lq#jb)4`!7;{>()xt9m;{! zeS$^KgKXEXT{CxZU~qGD!?F`z9PRI?78Deel96#~)yk$PQb)_}AD5(o<-#H(NwH@E zlpXJ>wjJEJcdw$E8PCz9M}Y%$X&oIMAu$Loc9eQ}q=wUHnikXgE3d>qRZDD?k9f_< zq<-XxHY5r(09`#jyey4zW3kBZzn;eEwabI0@7=pscDqjw&+-!s1lZxlW&x8xNjT{2Ecxu&Gc9fHo6v1u|NO0&x!1_VNx!oc(A2^rdp%X=rV&jZ+DH5?`sH z5X4Xm1!0g+nF?DM8(JvBtiXvTUNwYGNF?c~6st8h?lx$qY+}aAPHHvP)!rKzlgbEV z$`W&2;@y;G5VY;cJ?hPjO(NH%eDJs;FF86AO^87A^%)r%D^;tqS1;GImPEFlrgRj@ zQhNlqXwZC9i4<@7PPjODxSE>UfrAHEh=_=I(fjAbp$(Q5`#(eHU8!1WXm$21H3|S7 zk31*Df$C)K_=1Ur8MM?i)zEi8@uJFtx%OWe`6}K2>zBprP~PV+UwWcT)puv@eJFKt z<@cJ~o?&5O^|83!Ry9MK`47T4VPV;Gn9u)(VozOz#7SddQuZJFl$WSv;Zh z)bxA|-egn@U`g2v>vzu`zRACTD7}Hl3(Ms{`~h#8YM`y4u}Ue?J6;`^1AmCb8{HZgPv#&A`#HxXTmefMcP zKWcQ6J}NJ9(XOGPAiO`mJ$qQn%gaAQ%2b+ua|UlYTOC{jYEIY4Nbst)^kWIfYqI^Y zn@2hdySut>Y&5&Z1a{1jqv}Q0MD`)@7Kmk9ARy4q)-B9UZjj-xiQ<3|C3}6Qdo7fs zq)JESZbM(R9iz_P(7`Ctlj)RVFhQ@;KI;zF9tgEq}ejjxcI1i6mNnc+dDxcMGHss@L zphap&k2cTTI!%5~ARnl!tE=2|W~`DydWA)r@0IVj_dR*~)HkzaPW^i>>2_4p0-jdU5YBG$-Y@2z%9)F_=DED(@9EW-N}{}Za{S~;ntl8BQLSE0!$k{*u5DzLs=d6$hgP8TMM+(Z ztG93p=-k8hWcqfW1rrm{X8os66ZP&iS?6Xo8Hi%&Dp|>x zK^i&No_q9HXpQFrB(QZL61#Tq_DoFNhRvWmb%3yctZt&L1OEWdkU_89$t)PL?7%2 z>)wK1d%GN9$S4f2djy;z}|zccMw? zXlMlKxTolPKIn>Y0xZW8pIZ&&6xU9}0s# z_DIHAU0c8DXD7G)Z9yriC`u@<wVWKH1+vsahADWSf;zHu7lfejy3r016X+%wqF(69&U zZN%@VG1sH3EX5-tB62>Pfr#kL7G3#6eS?o_3*RLdmn7JUsT0E^BN=zS_GJFSl1=%v zo{sLEIF0tc$Mb{Pckw+(BE^Iw^0G7hybdcax4T;DYcsnjTefUDkn;8ITUsR_I_(r5 zR@T+9LL!>j?a!Utb#3MX%H?-RERWc}q1KVbe{Fs^Prdc%=NH!N78j>Wm3FXgf6KRZ z>x!?1fny8*Dfu;74$}Gf_@JQ#>3jj55jQWR@bmL;*|%@wf$5#o^RqKE*#_J!ED26g zsrT_dU^{#=b@>XB_}44=Uz9C^f~wkV@IbKuc9@lLq9^Hd-a@<8F5tWHM;K>_o6#L-@9 z`#5s!$>PHNs{G5s*JJOw?M~5cv%GTh>S03TkCkW;Vrgzp z!37Y!@#2Nf&&jo=x_G|8CLx0Y=z(r`xE-mlxLPmnz4PZ}6jzx%QjphFfb#zfhQ(7dUkfG$K`agU~t; zC{xqWl*19Fdopb4RBQk5EP!+87t{zN&ZdRIfyJ4^&4iY7KfSWg<&VYv(|ES{>=P+G z%*-oa8F%)4+01i&hL@9z3$T~f8r$!e@0GDG0z;l266auN@2cKtrVON)d%h!}*loJ! zXQY{4%DyunIVokRtx3A3`ue`#YeOnQ$9_EzPfr(p@$zNFK=03=yt{Y5td?ry2uxa7 zF`dzPKQ#1-?!BleKbXv}uC8S*Eo?V>-xbSopYGVRcW)2c1cATLPNbS!TPMzRr>CSe z2nFAZx_@7I#%HS#;g6SIc`WSB`~zI@Y-0TN){w3UebX!b2GPweEqr2P$5PS~ zomV@3bji%3uRCA?PpLl4KLsxEDL4sEE*h4We5;{>VzVV5~f2CX|9>E@O#$?Y7z=fTnY65f>h(#6T0q&&mimVvr1EhEEBk#QM{ z(s6fQD4uGw6A72Mme@2qH>ZOj1MXSp`gM8x&(CjC>PaIeMH$bZOX|W6$Ya9Ww+9kq zml!rs4D#G$)0=JN!{2=SR%&vlR(8mxjzG+-58-bpD+4+2ahp0PAt|}dUs(~a^J&vl z%_g9x!-ye#+9@WMBx%&H(`dXD0`me+&(2SY|~5qO7K(sz)VV-U*G3iAUbFwA@3_KigB~ z&$4l&Dk?qI4NARTW^v0%R`!3Vx{3pHhLJ8O?RaRL>Ct;`ZpoYLfah4oJC`87JaFQHgo^%IZM?yHjV*sJ3R9#6!~sG_!FwksEUP z7RIlvJi)lOjNNL8R|UYoyTY4e^q&41kQ{twIiex^6H9$3KkWOq1@aqYGDaDemf9e0 zIS$ywCN66lJt6I`LkRl{k*StD_mB?Bsw2*n3V2Yw07cES+->S}Wn?8>svlcz>bC%Uw1Xmb2 zI7AF&gKTya!g)(#&mLtftN25{R*yIbOO4`YnUA?_NpFbPq=jS^ePGc|~ zj9Zp!8PUwZ^sd3_n1MrE*vHK&XW$mO-7cort(-KX{rfk=!FDK~)Qc>2fS!SOy~noF zk$l-444|&T!9Wy*#HQ7otmFbiLKwK+C$z6kG^h@rU*^ZR$h)C*LC{&L3LQ5YO$AO! zado>u_CE*$VU}HI>z}ZV_LZ{_Zo>U)pFHUg{m_N zLjeqqblux+gxhcUN?6*bPrAr3==N*D0|N}IdMxKgjQ+D8Yi};GRwl*(u z*4c{YM+!d|yY1uSl#tJ4%&_QykjrCOnn-R!F9BFdJI*6 znYA`A3tQ>C0ZaZ)wwggWVc+kv1E%>7!gH?>o(t46_HAi#W9i@D>tF~j{-e1!sY4e> zwu|nXyE}#FXZ`O@R0a{iK!f64)6HR_O;(_gW5JVSTk?3&aweu4rTbg#;EinWWl#_f zb~$ufmCif3&1TTmZo2^|@by zu=S|9=$rUt!F|ExZgmlt@&5;ZL+7C3qJnPJX$7{02cR!2$T!rU$FM>0m{)Oeu}%lp z^+w3I9b9}(%w$!u59$Kp)xb%BK+xP`bM9Q#jTNg9sGOpgOFg z5G*PWx%2ti2EJnl=A?+1W0lslo>Pi3&*9tooMY(MNJljsWfHfq`bR7)07frpt`rB5 z?xynkN1Xd3YVe!_ydlgqy^ZdF6V zRA+5%S6u$U_C*V99LGb{IepgArVGA-f5h#^+YXd z?{Z4bATkRAa!d>lsGpOwvp7&ytdgan7Zx9UL0dGBEqo9S%CSlNFGR6*`==CZxw!0w z;!oBrEUdc`5d>?3;1YQZA0#U$*PQz;$8O`|{I{i*6sV6~ciY5Un1F9;v7;_5{=LG#Z5y$? z@O-fI3kVK=|KUUT&=4Ks6I4`G=E%~CxEA!3?=RjM!Di-_pP zzC7`D%d>6h-PH)t#p$;E^|XcR4ScB=&IsKVJ&BTk*;)w-5#`t{n^u3l}?RdzF=PuU9sM71LWLx^5G9F4e=5^j`h3Pok*8VYiq zFc6WUywdmR@^j=3svYs!xZ|kkXgF&Y5GA0vfHW&?hD)X5w4)8JS9! z7TvCmm9Ncr8Rh`j@9yjKeez@rf=N?fcpl?HGuO#Yb0QGrO`q5 z=`!558bUtdVh%;)hhjSSW9b(hz*~n(poMH@Pmg6p?Qu;xZ)^oa`+4WV7K{0*;Q*ej zUY^ok53q&9ASo&KW>5j&zE#IH!v(8E`enP%7rPGBQ@|&6w~**1(VaV0wX_%kN0W0G zi0BDukSK9q!v{?!7I8XPkWj31x@B*|{OsA!1Md5xT)-n$BR{mI^bc|_L4ZnK zk1ltXLpr8GKn#^l^CJ7J%r5?f)kq6h;rRyqW5@~jDpRJ#vj33AjnA=dkHofvu?jM$ z?#s(=7XnX}Ur>-TO_^o*TPlt7FRXwZ=c6H@1brrEZfkt2ATP?$>-X=Kq0u)sHWKuQ zylD5UzQ@PgdjZjr5X0aW>9jr)mcci({xL^w0hsIh@q-G95^ReV z#59(A+j&qd(yI6UeX{7_u=@hWqV~w?Vx^t_5baH5+^VXH`^pLW0=(nn<1>8A?#CzdxkaIdL#_G-@JhZP+?I4ZLba^r$YWys>BeQgo z7F&q;x8>zGr+ahA9ajKnT|fcQ6Lj0%!VOG1HaV#R5!=$za;_@&K6j)S+|XFa|I4Xx zBAkCNy0Y8Y!~}{pJ#+^0X^o99HgB~~JHr7n5IclIVfLpY{yNb@fbxiejrifNUAq{i z)Vo1YtdPe03>0W~BhWb-HwkFBX6vD#GKjuc7kE|}KJY?7y&F+4H6&8efFS$@pr^%Q zb%S*tx^(FUiOkv|heqrY?0@1!a@3HctHrPr%h(j*4VwDpkCBFARwOZ+h|Z=)z}G9O+b z4~lR1$;;aZVRx5PzZL+fig(t*kFr{t^pI>Aq@pF>ZQBy{dG0as)8Nu< zcKiDU1XO%moZ5$_=W<*(D9hIwm-jwKfd`_DG6(`-VE3Oet-3dk5`tRRAbtGqoRP#=VFy=7bqa>_plQiz%s^xY|tK-o>own1a#JoL} zSILV$vC`Jo7PZ5lw6k}SJ2%R3w!HiK;|DR{A!`=9WEw?o{6nXXk*`}bSHh=@W~w&knGHhFn+8tcz%92`l_jWf2LC8 zBBGvW|Ky7yv?6{q-S`zVAqvoT5McGeL&vvNynD9}u`*?Q`}8N{L<2{X{8mlb92=H> zHCMUY*4uYNvD?y&^5_Ax@m=yiE?v>D#*I`00+5Oe!Hoo~+2yZEA&Ed0n<=EFr`M9I z8F{L$7D2e>lyQS~Tg38uk6HRFh(N0sdieLH#nGkEyLYMNipBOvH(6j?BIlr_r4L#1r_9)wLtSovN7+uS5tE+{copLab!lu`OHPn(hvMUka8ozWFi_NqP>C8 z1R#mToQ><2L&mY-*Zo>;r_P-V!iu2f@-a6w(sy_N;uQHf`{8VDY2V=CcB3=D0@1ol1zdkcv;_=>$IiiExfSUO7r2e!2t1^xm>+zFO8VxQI9G_PQG!6 z-Plz#mxN_h^!x30aAfu-t<=MQF!B-iDcpX~7aHAa#D`t?)FpU4{|iP(y>6W+bpIWq zqD+uXCpONi^0+4?+c`LdAVfPeKVOM_GwKT%*>X;Jaana4{Wk1E?2jv@*B5l>GM_!G z`10}$tFuf&;cWSMVnV_ez#DXmfERIbe3S!dVtVO@-atnrL!qaqM_f( zxF$$xX7MmP{&W@C+J8AK5j@rCWRfSD3so9^fdIO3KyYvp22RuuAC5)(0-|9yx&qt$ zIvSc-%wv$$_5#`}FJ%w-F)dse_y^i5DyxS-XCcVEJ+tU{{r9tWn}-f}dMvq1NJzvX za4+T-f{(iT`;DNaZTP)=hXA>V5QUuBiF29%XNM~D*HrqOX+za{y}UU;=P z;+p_-`@fpDPrmt&uD&S++4hCH;xv~T8>Yrs_J#wU@{$%``h(c63H=FJr_ zxjRNu7k~z@c!x=I-Tcu`2X#bz^1T7o}9!bP{JdtuVAr$Eq2=ou-laUC?rHP@F&n=J%(bW zx3$#Ntf};6P{n8k8ptXNM87ghTqyTg z#+eqz8b@`Lc65-^2CkE>12t@$HqlU`;KJ&>!h?nZqK#kJsv4Y}{Rd$Cs=RsG4Yqf~ zLqmpNP4~+Lca|Ys1Q>KcT|J;aR_H%mt7p~o{Z3Bv=j;Bpn82V?IG<4{@Fmfbw+IWT zRJ({W%{ivOIkOiq3;WifW(O&vg6pP$=Krr@27f zy`Ul>XRr~4aWw^c=@txqz)c?xei|=>%{cJ7;Jp3G%?M6aRVn`@!G4Ms|K;i3l9DH` zB3-@;v>%ly@e=;dc^W%iWuN;JJ8Ap9%)Ll~DPbFeMjHLSChMBw>HDRmea_S^s-)T7 z=iS@4&#y0B4dRq$M!^jzSsV}XpirKrrW%>ZKN26w&F=g2pU(XiAWj; zMgVW#ycwYElVcWmSPh}P7@6562;OI6aML|vzi+E@;o<}#3e5DRMB9ASz;y!!iDzJn$+`O zoKA5LC%!2-e;bPdDm`ta(3TBTjP??1wB%1B1^U<7XeWwHaa*OgLhjfB+wO6!dlV zeJ_ycR&jC3)#)fIlEr{p?&my&?bHp|FmMN9P|wA3;;o{h;+DvFMh{sRu`(*Ki+;Vm zhL(UvUWulfh42Yx<+f)SyaGuiHWT>XLF7sSARAM%a&yn^;Zk+8d6CgB@!uL+*3;L{ zG@Rn>C|>Up0hL;I`l}-XM!|0_Xkw27Xp}8^7$lv9F^! z8Acic!y6&@9u|p^pO9*6TjO>MjInz91pMU45`9&Hq@<@$!;wd5{2~rA$HLBz%=8SSjJWY)g zZY~aImD?gps2R!-d)2A*h+eeK&$6GebGhmF%Ape3z zX0g9X>X}*yU?hGfb^Wgta*l+$R7Z$zveHmPr^vtblU9Cl#;SZYqdgiW2B`E&Z0x}l zLIsc->z|m_4VvU1mQiWN)F6qpgR2lFtXUkf$wH^;jv^I4?Z7jJ*?6E-xcdWodTeL} zOq(|cK7YOoT4B#{-nWrkY9}2IDi_buJ#aA-1gc*PHA79mCQdRsb2jFG8;q#lGtrf^ z#_%fp=FQUq&K;Zcz65UENz5Y1YE_>KDi+AH59z&N}v`xKfP^@9w z;nxe$C`q3rANydh{lMEi%OIQk2t;CFBDQ({6`3CR^~>2xm*ET)FN|-RKYf(+E=qYp4`^sT&_fdBXt?MH6H6B7>|~ez{>jI1ULDP^W6EBb7lS5)I0ZdwG}$8H zJ46E~u9+sn4yp(peEs_MQ_Ep>^$HAzr>n`%Rdn7&HL?Ofl!E9M+2I#DX#$0TOmx8Y z7>9ySLqx9ve&t8#&U$CwAcpmwjD_HXAP~|bEn2;_jrlOfISlHc?2;)J3}|eXljDLe z9^e6snW%YqTPON=dk%hKG8{HTbC)gVdz{r%**3*Wqa`Esov)7D*x zQjV<4_r(}U6$anP2^PpnA=Lz>w3tj8j1zwm8Z11~wEAwRSEk`!vKY}h_A9dhv$mEK z{SCH*OM_-h6__i-F$KvJ7y#*S)=#yPGsC5jIT(ngiM7PkkBIOFRlIxmZnC|mGWwzq z+Oy`GR;|;g1@Hz};ffGHz^hQ~4>e#=Q8p;OE=OqLUnt}lAZC?GINx9w&Un-wi6l(9 zlB5A5jQ;q@YCA|4*mzNU?W5bK@Kz9j^?_1z+3Vd^X;61E7t-6yhr}4^f`$9yG5Uh| zTykcnFLX&HDTs88{j0*q#lb<#_2^jfUfy{R%tneMbqRCfJ5sn8E??eAG zPG;Z9Fcn@rxw6W`e&l?PfKnmg{ap~=FfNpuu3-I^<)OSg2ZVcHcqga=PiOMrJQ!qI zuIAZy+E&y!pAirS-R%&JTJ*2G=kH=j@${Uo9?)r z{`fS;wS9mh$fPKUHO8L2;p?QN2x}~rPL1pldA4sOSwU=_UO@u96NodUyZ8~;yj3ddG=xs zEkb%_kbItB9yKMxJ)-6pTwKmKIULv~I^9#f?%bCbs}V50iRni)(^fH;dkfsGyy_&C zzSFG4OO5{){uu1=G1zekvJ+4U8AZb2=f2M#SmvNKH<3rh`2iMi=0rtBp;K4jbQ3W5 zgO~{781jJRf<)%K;ARreEhvz2mF0%`Y;pcPJr~!bHS>YhmRLNWUVZHQ^B~(iGj_48 zFy$?#1W-QW%NJMPq@egad@ET!HsV1&`fZe`S;VkxIMItpHv(RzmAggr zcDA;Pm};Utfq#rwW~ibJQ~}R}94P}8pPX!f_YQFi0dX&cE}_Z9CMK?ix$^n*=hu%_ z)s9npE0WPhGzmO{UKx6AX5e|e>l(yaci}h@mr}jJ07jjiu)J&7p_rqgG{AE@bcS!_ zTWJUyvrOx^IyIQ8dkRcrV;3n;l9GJTbN9~uR?0GB|vptGmJrv5Bl4DSAlf&dN zB1YyvUWbvRQDS0Z&Y95gK2sElAd40zkP5OTR4Z2?0+dYQRSyYZY#ebO__TaD2hzjj zw1^`|j=-u495nFSgKkEPV)xv(StQLchc<*;t^#u_`N1k`;aW_Jua}_j_-F}LLD=p_ zJ>=nuIi$(4&FcshD{6XrAIxY%#~L5&R)SwIITtLdcz1m|4Q~QBnNor;&u%4kp^K8F z8S~MZU-1-z-dM56kiNBdMWaf#t4@S#ti0wFLDKDJEDqX z&$7cEg6zTwj0un-a=`Cx+9wVJ;}0^Z*Zs;A0V>JvrFg z)G(&}vq6KJmq*;bZ4WMh2<9N$W8>oof%RLn)b>M;x(PFo2ps6uBxnO03lGqK$oK8puE>HZKstxEz^au%7kvxL#jnKWmcm_7HgRPlX2*HM`@e{8Oj>fBC z<+dBZbYubqGaY#3HSnzSgO&X%v`!vXTZjDQYQsVdn8f0>{20R6 zD={^pCZVXV#e&V)%Cybt=UY}{@FTVX^*>fpCBcNe#2@c*Rtl_EGWhwgz#R-q+{88% zmJrxO6(2sVho{X*>y0Pnf<3IrQdVEj2w_zjM?H|UZ!9cs!WPG`1cTnvBPCOgzc2i1 z-1bn;jV&oD30Ld2Ti1&*4uJv}*WW*Ahz9^%T%V@4KLV$!Ae2FJT6jyaKNBMPm{*D@ zQfo^=BEwVX6%nBYDIjAP9|E$sj-FA#DG3(_TSPf!T^XSj;lq`Mu~*c#Z^Ent{#_Sh z5BP9%D9^|v_QFik{xG`bT(E-5bJ&16D`Sr>@WoPGOY;Pe^+(yhO$cI_TefcX95hJq$2E3g2n*+uSin+to#}ta>sCnhu*O-Q}%7yG*H&uJ*U6pfG7-(Q>|IU zw`B_j(ktvf*X;uT{*3ta^xMdt5Y-zf!Qp})KYj|R9>h->kV@62{V_(dwg5M$>ZL2< zB!P6d|E|j5#Ec7f?=dzXQlaKmf7{l^5yGQT6TrHi)N4=)IE^bPsI$=GC=_f7jAV_d zmKN~xOb;|~fB$IzMpe#aah>|#NiU$>fpOJ1@!$rc#(*lm!ZbEUSP;tI1O-66_tX_hchn>Ux;U1O*1NwkaoyTq zx~00S^{MvL6yTU$nc{+w+ps)=(=D|3G(bc}sKRIGlVR z2A*K_&_k`w#zn47sAcIf^1Eb#krMyD#krJp2O`i?JuYowJ?tTqJ)3z}85d;|Q$mPqJsA7larGm9iaw>?M-2}~l0+2)0CzNMiw!m`s zawn2C?B+&&8+jwb!Def*_WvJez z?uylae`$IEhS$JVUxh=PfMXrG0)_l^MXGUYnz#?Q-KIHkZoQCtN7_G|0ecXC0NNt=%VDn z5tt2qGR)nt@pm_$Qgw*vNIT{!WoULKhwWg)l*8`T2 zSOUsje`oQ2{L^k0QY&9$Llas8rDYWc=eh`F1>DD(44BeA;N-LiAyKq?>N96A)A7=h zR3SNV5G*%#rPn7*oL)jCmfgEIAlcC;L(ltgxtXSs#177h0F9>SlBsg$j7zAftVD7s z7<)d5M}Zr*k#}zHUJ`rYRr0CtIngMFSMB$ZRM#bBX-HfJC+A(@@)gR$yaNu);ev)k z+7?D{Fak=UprMjO%*e`wu1JzZZEbCwf&$7Vq*4UKXa%m;MBHpW^qrr$t4nB~#MFb1 zxQiGLB-Zy>!6O)X$?wo}K=70e4LP!Na>$GoiQAx0ipm&1X0%p9>AjSVh#t|gK#H9h zH`y@bA;U4?laP2(z85kHe!U6-j*Cd)fPY9yNs+Tl#-^r{zDdTtSiT}$DPb;P2SI~E z$|_jR&%e;UlH~R-{ZuF12wWpQG>UOHwnN(98CfMTIY&95tE(H{W@lql4uFCz&;f{CID96+q|n6z-Y1EU9p0(LFXD|l{DU9+ zl^phgTuQ69A1xVeM@E>Dhv(Z+2_ER@68HV4#aF$NF62eC!WuxL*L&q1W|WjO$jHVaVuP>t6|6STDS4M--z?_1{@@hO~=rXRz8Lt zKbR3Oqzms2#~3m|amt9#-UNh;gBoUk^wHrQCrkpO%dU}?l?68t@OAPMqQ~-lO=CqT z%N;VvPFu^4s_(sA_|42pJBqiea$EI|yN0%MuRw<;ryyb>R^tJV_cv^PB^$}8qSYru z;Z@E-rCHv}wOej?jk}Vd5F8_NSPZpy#a;y^RoxAM)`|c;eSa3t)Q(U?mCMjf)y+A$*qemPXB32m`9^Wn}avKsci%Pm4JZx3Q9GK%AXu-?8njKa=8*9=J5-_}3ps`bI`RQg#8evoSH1 z!=L&B4LM#&a;iUBR4^%5}4xNM-PDj}fnJGPH z+*jf6%@yVl$)7pP?bs27k*dAh>wN7IB(a6cwff`0b)zCc~xP1(W67x%{Mya)jHUT7kkR}Ue&r!(;3sGkI>vC^}j_RLtW1}T3`A`$goSniz`1|T=6_<2;sB?e)O?+s`um1jBSq3`__aUY0 z@^@zZ^v~JnM;!w~dxOYSaT+^?ZS!WM!8zcfO_bB{Q3|I@822M-lAa4g0LiiQ8kkMS z6-pqt-}IE>WmZ|xG0`F#ScpN zAmV9a^zPu^dK|-nc?w&Tm)Y6M(5l9MyxH(&cJpXn>ahoV)Afw7wXpP%ec@crXG51b zu!Yea?9g?r>*mn*C`iS~F2;V@^a_%l4>Yn2D@L8Qq_5D($ra1A;>Btk8$Ksd*&l z6z5UvbLWglCyzanSwX?!IMxbgMn;>#ew^E|jW$tEc^hF#letzf46g=PbzT-H4sg{Rh94jY1nTML49>zJSBy^`nDT$q0QOZM)n$)F2MC>+Bs7*>y z^hc)Tu~Td2nX*z@yE5xUY^~jv9@q)1_WIZ#+h4eU{QU0yeZQaY`}6+1-|ySf(sKFm zLeDAEf;DhFOLz-PVDM{6^!hWJL5lXZA@3X~BR)zZC2eR*1it|V#KR2BQAdF%lP*cv zc6!suo1M4keK^bf6F-bw-b?L~IZl1xKC zNXwgw5*9Ut9@pLu|{%Hrw=(+E<#{ltYfU-k4BLe(fYd;3fZqCFmE4N$|()bz#mp3P~5JZk}1 z6dLngQBh&6PTA@mot@=*`-cjb4+AvIF?32Xv%@(P7825cOQC}ge`{rGYHBb7$L*{; z?^Uag9na3TLpl>h9dwS*wV1Ta&$di&r%DydB+mC*x&ENPOH*%uov!ldD-@bwR>N*v z{}~g3MF770`GJlt4MZU%*Lk-AVrgph~!UWW~&|*eZd0ANr z_DU#-R0EUz3TePi#tSeZcVq)TN25#EZ`^2L-a>MNgsD|-bXJ}aCZN}YSh1Cb{yQ#) ze136%e2QnB1wk&e1|w^p;PyF-6)+z6iOyR`v;Gp5-~{S6GHl2xFs zibgaf{sm%H6G0o5X5^esOtTlbR#&G>i}|25ywgc0Kk%yI7hWB#m>@Xi5YkmvX~f$% zaRSYbWM!qWr3vBrPNRbBg+}#;B?CaI%<_T!Jsen)2oCWj;&Z{EAH@$EzKR|4neS`V zpR+`L^4VZ456cn(Byce~;QLr}mJ&H#Uk4f*8#goe5tE)>6M0qI{-d5YukeP8aj=Vk zD;~GQKF*L6ck~BCb}t8p0aa6=U5Qq?Qu|n?bVb{rjN-kE+&mIcldG%Um~PcPda0e| z>e|rre1*UN-QV|_D!($9_vTCgF99Y5G?NgNsGaL^{ghk$RJecjp+gcwNL@-2zl+V0 z>2B7DuaK@7czdZIjzD^*B(zW8`VMgx$^w{l@$AFNO29fMA(qG1d<)uk9 zoifW&q0lLi?+B=;390C>pa>rTvAV*qo1Zpn@sG#IUKRTKDw00F2-$7?^Np?w9SAAmNPT{wRXM4ezVTuY3J5Vs0WXVM+3eSL(3Vj zzAr7$UMP?*l)IV=8(?_AJz-(eJ2E=$KXNt72K50aS~FS}nKVm=BhTSKK2eeZsKs`G z7jYWPpNtF0y>C=Gi(w0`t*mtEq}Uc7?(Pbao@x*16$+S$!1h0i>Ca-m`ykh4 zZzAylQWAnQ&Du1Y#V8)^P@;&$0hr5I8;ol&)$`rT4p&mc-m8F!;g^#F_5S~J#n_0$ hS`&F~OZ*$-fzq9ihC@=e^Cb@ouSLr}ij_f${{Y_|uBHG0 diff --git a/pyproject.toml b/pyproject.toml index ceff5445..ebcb755c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "burr" -version = "0.29.2" +version = "0.30.0rc4" dependencies = [] # yes, there are none requires-python = ">=3.9" authors = [ diff --git a/tests/core/test_application.py b/tests/core/test_application.py index 53daa0e6..b6fbb967 100644 --- a/tests/core/test_application.py +++ b/tests/core/test_application.py @@ -41,6 +41,7 @@ ) from burr.core.graph import Graph, GraphBuilder, Transition from burr.core.persistence import BaseStatePersister, DevNullPersister, PersistedStateData +from burr.core.typing import TypingSystem from burr.lifecycle import ( PostRunStepHook, PostRunStepHookAsync, @@ -3184,3 +3185,44 @@ def recursive_action(state: State) -> State: len(hook.pre_called) == 62 ) # 63 - the initial one from the call to recursive_action outside the application assert len(hook.post_called) == 62 # ditto + + +class CounterState(State): + count: int + + +class SimpleTypingSystem(TypingSystem[CounterState]): + def state_type(self) -> type[CounterState]: + return CounterState + + def state_pre_action_run_type(self, action: Action, graph: Graph) -> type[Any]: + raise NotImplementedError + + def state_post_action_run_type(self, action: Action, graph: Graph) -> type[Any]: + raise NotImplementedError + + def construct_data(self, state: State[Any]) -> CounterState: + return CounterState({"count": state["count"]}) + + def construct_state(self, data: Any) -> State[Any]: + raise NotImplementedError + + +def test_builder_captures_typing_system(): + """Tests that the typing system is captured correctly""" + counter_action = base_counter_action.with_name("counter") + result_action = Result("count").with_name("result") + app = ( + ApplicationBuilder() + .with_actions(counter_action, result_action) + .with_transitions(("counter", "counter", expr("count < 10"))) + .with_transitions(("counter", "result", default)) + .with_entrypoint("counter") + .with_state(count=0) + .with_typing(SimpleTypingSystem()) + .build() + ) + assert isinstance(app.state.data, CounterState) + _, _, state = app.run(halt_after=["result"]) + assert isinstance(state.data, CounterState) + assert state.data["count"] == 10 diff --git a/tests/core/test_state.py b/tests/core/test_state.py index 4572f8e4..30374d2a 100644 --- a/tests/core/test_state.py +++ b/tests/core/test_state.py @@ -1,6 +1,10 @@ +from typing import Any + import pytest +from burr.core import Action, Graph from burr.core.state import State, register_field_serde +from burr.core.typing import TypingSystem def test_state_access(): @@ -158,3 +162,26 @@ def my_field_serializer(value: str, **kwargs) -> dict: with pytest.raises(ValueError): # deserializer still bad register_field_serde("my_field", my_field_serializer, my_field_deserializer) + + +class SimpleTypingSystem(TypingSystem[Any]): + def state_type(self) -> type[Any]: + raise NotImplementedError + + def state_pre_action_run_type(self, action: Action, graph: Graph) -> type[Any]: + raise NotImplementedError + + def state_post_action_run_type(self, action: Action, graph: Graph) -> type[Any]: + raise NotImplementedError + + def construct_data(self, state: State[Any]) -> Any: + raise NotImplementedError + + def construct_state(self, data: State[Any]) -> State[Any]: + raise NotImplementedError + + +def test_state_apply_keeps_typing_system(): + state = State({"foo": "bar"}, typing_system=SimpleTypingSystem()) + assert state.update(foo="baz").typing_system is state.typing_system + assert state.subset("foo").typing_system is state.typing_system diff --git a/tests/core/test_validation.py b/tests/core/test_validation.py index 21866be6..8303edf9 100644 --- a/tests/core/test_validation.py +++ b/tests/core/test_validation.py @@ -8,5 +8,5 @@ def test__assert_set(): def test__assert_set_unset(): - with pytest.raises(ValueError, match="foo"): + with pytest.raises(ValueError, match="bar"): assert_set(None, "foo", "bar") diff --git a/tests/integrations/test_burr_opentelemetry.py b/tests/integrations/test_burr_opentelemetry.py index d19ed6ea..e3789894 100644 --- a/tests/integrations/test_burr_opentelemetry.py +++ b/tests/integrations/test_burr_opentelemetry.py @@ -7,7 +7,7 @@ from burr.integrations.opentelemetry import convert_to_otel_attribute -class TestModel(pydantic.BaseModel): +class SampleModel(pydantic.BaseModel): foo: int bar: bool @@ -21,7 +21,7 @@ class TestModel(pydantic.BaseModel): ((1.0, 1.0), [1.0, 1.0]), ((True, True), [True, True]), (("hello", "hello"), ["hello", "hello"]), - (TestModel(foo=1, bar=True), json.dumps(serde.serialize(TestModel(foo=1, bar=True)))), + (SampleModel(foo=1, bar=True), json.dumps(serde.serialize(SampleModel(foo=1, bar=True)))), ], ) def test_convert_to_otel_attribute(value, expected): From 361601f521c0b98ae30e23c11342db3634f218b1 Mon Sep 17 00:00:00 2001 From: elijahbenizzy Date: Mon, 9 Sep 2024 14:56:48 -0700 Subject: [PATCH 02/10] Fixes issue in which streaming actions were not expandable in the UI --- .../ui/src/components/routes/app/StepList.tsx | 53 +++++++------------ 1 file changed, 19 insertions(+), 34 deletions(-) diff --git a/telemetry/ui/src/components/routes/app/StepList.tsx b/telemetry/ui/src/components/routes/app/StepList.tsx index b42911b4..2d297c0d 100644 --- a/telemetry/ui/src/components/routes/app/StepList.tsx +++ b/telemetry/ui/src/components/routes/app/StepList.tsx @@ -123,8 +123,7 @@ const CommonTableRow = (props: { } else { props.setCurrentSelectedIndex(props.sequenceID); } - }} - > + }}> {props.children} ); @@ -176,8 +175,7 @@ const ActionTableRow = (props: { currentSelectedIndex={currentSelectedIndex} step={props.step} setCurrentHoverIndex={setCurrentHoverIndex} - setCurrentSelectedIndex={setCurrentSelectedIndex} - > + setCurrentSelectedIndex={setCurrentSelectedIndex}> {sequenceID}

@@ -189,8 +187,7 @@ const ActionTableRow = (props: { />
+ className={`${props.minimized ? 'w-32' : 'w-72 max-w-72'} flex flex-row justify-start gap-1 items-center`}> + setCurrentSelectedIndex={setCurrentSelectedIndex}> + className={` ${normalText} w-48 min-w-48 max-w-48 truncate pl-9`}>
{ @@ -311,8 +306,7 @@ const LinkSubTable = (props: { `/project/${props.projectId}/${subApp.child.partition_key || 'null'}/${subApp.child.app_id}` ); e.stopPropagation(); - }} - > + }}> {subApp.child.app_id}
@@ -394,25 +388,21 @@ const StepSubTableRow = (props: { currentSelectedIndex={currentSelectedIndex} step={props.step} setCurrentHoverIndex={setCurrentHoverIndex} - setCurrentSelectedIndex={setCurrentSelectedIndex} - > + setCurrentSelectedIndex={setCurrentSelectedIndex}> + className={` ${lightText} w-10 min-w-10 ${props.displaySpanID ? '' : 'text-opacity-0'}`}> {spanIDUniqueToAction} {!props.minimized ? ( <> + className={`${normalText} ${props.minimized ? 'w-32 min-w-32' : 'w-72 max-w-72'} flex flex-col`}>
{[...Array(depth).keys()].map((i) => ( + className={`${i === depth - 1 ? 'opacity-0' : 'opacity-0'} text-lg text-gray-600 w-4 flex-shrink-0`}> ))} { e.stopPropagation(); - }} - > + }}>
+ }}>
) : (
+ }}> {
+ }}> {hoverItem}
} @@ -999,9 +985,7 @@ const ActionSubTable = (props: { isExpanded={isTraceExpanded} setExpanded={setTraceExpanded} allowExpand={ - step.spans.length > 0 || - // step.streaming_events.length > 0 || - step.attributes.length > 0 + step.spans.length > 0 || step.streaming_events.length > 0 || step.attributes.length > 0 } latestTimeSeen={latestTimeSeen} expandNonSpanAttributes={expandNonSpanAttributes} @@ -1159,7 +1143,9 @@ export const StepList = (props: { : new Date(); const MinimizeTableIcon = props.minimized ? ChevronRightIcon : ChevronLeftIcon; const FullScreenIcon = props.fullScreen ? AiOutlineFullscreenExit : AiOutlineFullscreen; - const displaySpansCol = stepsWithEllapsedTime.some((step) => step.spans.length > 0); + const displaySpansCol = stepsWithEllapsedTime.some( + (step) => step.spans.length > 0 || step.streaming_events.length > 0 + ); const displayLinksCol = props.links.length > 0; const linksBySequenceID = props.links.reduce((acc, child) => { const existing = acc.get(child.sequence_id || -1) || []; @@ -1381,8 +1367,7 @@ const ParentLink = (props: {
+ to={`/project/${props.projectId}/${props.parentPointer.partition_key}/${props.parentPointer.app_id}`}> {props.parentPointer.app_id} @ From ef9906ffbb579df94bd5a4851bad042d671c95ac Mon Sep 17 00:00:00 2001 From: elijahbenizzy Date: Mon, 9 Sep 2024 16:27:20 -0700 Subject: [PATCH 03/10] Adds schema tracking to action This is just a Quick wrapper class to represent a schema. Note that this is currently used internally, just to store the appropriate information. This does not validate or do conversion, currently that is done within the pydantic model state typing system (which is also internal in its implementation). We will likely centralize that logic at some point when we get more -- it would look something like this: 1. Action is passed an ActionSchema 2. Action is parameterized on the ActionSchema types 3. Action takes state, validates the type and converts to StateInputType 4. Action runs, returns intermediate result + state 5. Action validates intermediate result type (or converts to dict? Probably just keeps it 6. Action converts StateOutputType to State Also we don't have this split out into two classes (input/output schema), but we will likely do that shortly. --- burr/core/action.py | 34 +++++++++++++++++++++ burr/core/application.py | 39 ++++++++++++++---------- burr/core/typing.py | 41 ++++++++++++++++++++++++++ burr/integrations/pydantic.py | 54 ++++++++++++++++++++++++++++++++-- pyproject.toml | 2 +- tests/core/test_application.py | 12 +++++--- 6 files changed, 158 insertions(+), 24 deletions(-) diff --git a/burr/core/action.py b/burr/core/action.py index 9ab00a86..598d2263 100644 --- a/burr/core/action.py +++ b/burr/core/action.py @@ -29,6 +29,7 @@ from typing import Self from burr.core.state import State +from burr.core.typing import ActionSchema class Function(abc.ABC): @@ -130,6 +131,20 @@ def update(self, result: dict, state: State) -> State: pass +class DefaultSchema(ActionSchema): + def state_input_type(self) -> type[State]: + raise NotImplementedError + + def state_output_type(self) -> type[State]: + raise NotImplementedError + + def intermediate_result_type(self) -> type[dict]: + return dict + + +DEFAULT_SCHEMA = DefaultSchema() + + class Action(Function, Reducer, abc.ABC): def __init__(self): """Represents an action in a state machine. This is the base class from which @@ -173,6 +188,10 @@ def single_step(self) -> bool: def streaming(self) -> bool: return False + @property + def schema(self) -> ActionSchema: + return DEFAULT_SCHEMA + def get_source(self) -> str: """Returns the source code of the action. This will default to the source code of the class in which the action is implemented, @@ -524,6 +543,7 @@ def __init__( bound_params: Optional[dict] = None, input_spec: Optional[tuple[list[str], list[str]]] = None, originating_fn: Optional[Callable] = None, + schema: ActionSchema = DEFAULT_SCHEMA, ): """Instantiates a function-based action with the given function, reads, and writes. The function must take in a state and return a tuple of (result, new_state). @@ -548,6 +568,7 @@ def __init__( [item for item in input_spec[1] if item not in self._bound_params], ) ) + self._schema = schema @property def fn(self) -> Callable: @@ -565,6 +586,10 @@ def writes(self) -> list[str]: def inputs(self) -> tuple[list[str], list[str]]: return self._inputs + @property + def schema(self) -> ActionSchema: + return self._schema + def with_params(self, **kwargs: Any) -> "FunctionBasedAction": """Binds parameters to the function. Note that there is no reason to call this by the user. This *could* @@ -580,6 +605,8 @@ def with_params(self, **kwargs: Any) -> "FunctionBasedAction": self._writes, {**self._bound_params, **kwargs}, input_spec=self._inputs, + originating_fn=self._originating_fn, + schema=self._schema, ) def run_and_update(self, state: State, **run_kwargs) -> tuple[dict, State]: @@ -981,6 +1008,7 @@ def __init__( bound_params: Optional[dict] = None, input_spec: Optional[tuple[list[str], list[str]]] = None, originating_fn: Optional[Callable] = None, + schema: ActionSchema = DEFAULT_SCHEMA, ): """Instantiates a function-based streaming action with the given function, reads, and writes. The function must take in a state (and inputs) and return a generator of (result, new_state). @@ -1003,6 +1031,7 @@ def __init__( ) ) self._originating_fn = originating_fn if originating_fn is not None else fn + self._schema = schema async def _a_stream_run_and_update( self, state: State, **run_kwargs @@ -1046,6 +1075,7 @@ def with_params(self, **kwargs: Any) -> "FunctionBasedStreamingAction": {**self._bound_params, **kwargs}, input_spec=self._inputs, originating_fn=self._originating_fn, + schema=self._schema, ) @property @@ -1056,6 +1086,10 @@ def inputs(self) -> tuple[list[str], list[str]]: def fn(self) -> Union[StreamingFn, StreamingFnAsync]: return self._fn + @property + def schema(self) -> ActionSchema: + return self._schema + def is_async(self) -> bool: return inspect.isasyncgenfunction(self._fn) diff --git a/burr/core/application.py b/burr/core/application.py index dd13d968..0b4c8c77 100644 --- a/burr/core/application.py +++ b/burr/core/application.py @@ -30,6 +30,7 @@ from burr.common import types as burr_types from burr.core import persistence, validation from burr.core.action import ( + DEFAULT_SCHEMA, Action, AsyncStreamingAction, AsyncStreamingResultContainer, @@ -44,7 +45,7 @@ from burr.core.graph import Graph, GraphBuilder from burr.core.persistence import BaseStateLoader, BaseStateSaver from burr.core.state import State -from burr.core.typing import DictBasedTypingSystem +from burr.core.typing import ActionSchema, DictBasedTypingSystem, TypingSystem from burr.core.validation import BASE_ERROR_MESSAGE from burr.lifecycle.base import ExecuteMethod, LifecycleAdapter, PostRunStepHook, PreRunStepHook from burr.lifecycle.internal import LifecycleAdapterSet @@ -52,7 +53,8 @@ from burr.visibility.tracing import tracer_factory_context_var if TYPE_CHECKING: - from burr.core.typing import TypingSystem + # TODO -- push type-checking check back from here + # OR just put everything under type-chekcing... from burr.tracking.base import TrackingClient logger = logging.getLogger(__name__) @@ -64,8 +66,13 @@ StateTypeToSet = TypeVar("StateTypeToSet") -def _validate_result(result: dict, name: str) -> None: - if not isinstance(result, dict): +def _validate_result(result: Any, name: str, schema: ActionSchema = DEFAULT_SCHEMA) -> None: + # TODO -- validate the output type is action schema's output type... + # TODO -- split out the action schema into input/output schema types + # Then action schema will have both + # we'll just need to ensure we pass the right ones + result_type = schema.intermediate_result_type() + if not isinstance(result, result_type): raise ValueError( f"Action {name} returned a non-dict result: {result}. " f"All results must be dictionaries." @@ -80,13 +87,15 @@ def _raise_fn_return_validation_error(output: Any, action_name: str): ) -def _adjust_single_step_output(output: Union[State, Tuple[dict, State]], action_name: str): +def _adjust_single_step_output( + output: Union[State, Tuple[dict, State]], action_name: str, action_schema: ActionSchema +): """Adjusts the output of a single step action to be a tuple of (result, state) or just state""" if isinstance(output, tuple): if not len(output) == 2: _raise_fn_return_validation_error(output, action_name) - _validate_result(output[0], action_name) + _validate_result(output[0], action_name, action_schema) if not isinstance(output[1], State): _raise_fn_return_validation_error(output, action_name) return output @@ -242,11 +251,10 @@ def _run_single_step_action( # TODO -- guard all reads/writes with a subset of the state action.validate_inputs(inputs) result, new_state = _adjust_single_step_output( - action.run_and_update(state, **inputs), action.name + action.run_and_update(state, **inputs), action.name, action.schema ) - _validate_result(result, action.name) + _validate_result(result, action.name, action.schema) out = result, _state_update(state, new_state) - _validate_result(result, action.name) _validate_reducer_writes(action, new_state, action.name) return out @@ -300,8 +308,7 @@ def _run_single_step_streaming_action( f"Action {action.name} did not return a state update. For streaming actions, the last yield " f"statement must be a tuple of (result, state_update). For example, yield dict(foo='bar'), state.update(foo='bar')" ) - # TODO -- get this back in and use the action's schema (still not set) to validate the result... - # _validate_result(result, action.name) + _validate_result(result, action.name, action.schema) _validate_reducer_writes(action, state_update, action.name) yield result, state_update @@ -354,7 +361,7 @@ async def _arun_single_step_streaming_action( f"statement must be a tuple of (result, state_update). For example, yield dict(foo='bar'), state.update(foo='bar')" ) # TODO -- add back in validation when we have a schema - # _validate_result(result, action.name) + _validate_result(result, action.name, action.schema) _validate_reducer_writes(action, state_update, action.name) # TODO -- add guard against zero-length stream yield result, state_update @@ -405,7 +412,7 @@ def _run_multi_step_streaming_action( count += 1 yield next_result, None state_update = _run_reducer(action, state, result, action.name) - _validate_result(result, action.name) + _validate_result(result, action.name, action.schema) _validate_reducer_writes(action, state_update, action.name) yield result, state_update @@ -449,7 +456,7 @@ async def _arun_multi_step_streaming_action( count += 1 yield next_result, None state_update = _run_reducer(action, state, result, action.name) - _validate_result(result, action.name) + _validate_result(result, action.name, action.schema) _validate_reducer_writes(action, state_update, action.name) yield result, state_update @@ -461,9 +468,9 @@ async def _arun_single_step_action( state_to_use = state action.validate_inputs(inputs) result, new_state = _adjust_single_step_output( - await action.run_and_update(state_to_use, **inputs), action.name + await action.run_and_update(state_to_use, **inputs), action.name, action.schema ) - _validate_result(result, action.name) + _validate_result(result, action.name, action.schema) _validate_reducer_writes(action, new_state, action.name) return result, _state_update(state, new_state) diff --git a/burr/core/typing.py b/burr/core/typing.py index c0c9cdb3..39de1757 100644 --- a/burr/core/typing.py +++ b/burr/core/typing.py @@ -69,6 +69,47 @@ def construct_state(self, data: BaseType) -> State[BaseType]: """ +StateInputType = TypeVar("StateInputType") +StateOutputType = TypeVar("StateOutputType") +IntermediateResultType = TypeVar("IntermediateResultType") + + +class ActionSchema( + abc.ABC, + Generic[ + StateInputType, + StateOutputType, + IntermediateResultType, + ], +): + """Quick wrapper class to represent a schema. Note that this is currently used internally, + just to store the appropriate information. This does not validate or do conversion, currently that + is done within the pydantic model state typing system (which is also internal in its implementation). + + + + We will likely centralize that logic at some point when we get more -- it would look something like this: + 1. Action is passed an ActionSchema + 2. Action is parameterized on the ActionSchema types + 3. Action takes state, validates the type and converts to StateInputType + 4. Action runs, returns intermediate result + state + 5. Action validates intermediate result type (or converts to dict? Probably just keeps it + 6. Action converts StateOutputType to State + """ + + @abc.abstractmethod + def state_input_type() -> Type[StateInputType]: + pass + + @abc.abstractmethod + def state_output_type() -> Type[StateOutputType]: + pass + + @abc.abstractmethod + def intermediate_result_type() -> Type[IntermediateResultType]: + pass + + class DictBasedTypingSystem(TypingSystem[dict]): """Effectively a no-op. State is backed by a dictionary, which allows every state item to... be a dictionary.""" diff --git a/burr/integrations/pydantic.py b/burr/integrations/pydantic.py index 22a82a48..6665ab1a 100644 --- a/burr/integrations/pydantic.py +++ b/burr/integrations/pydantic.py @@ -22,7 +22,7 @@ from burr.core import Action, Graph, State from burr.core.action import FunctionBasedAction, FunctionBasedStreamingAction, bind, get_inputs -from burr.core.typing import TypingSystem +from burr.core.typing import ActionSchema, TypingSystem Inputs = ParamSpec("Inputs") @@ -133,8 +133,37 @@ def _validate_keys(model: Type[pydantic.BaseModel], keys: List[str], fn: Callabl ) +StateInputType = TypeVar("StateInputType", bound=pydantic.BaseModel) +StateOutputType = TypeVar("StateOutputType", bound=pydantic.BaseModel) +IntermediateResultType = TypeVar("IntermediateResultType", bound=Union[pydantic.BaseModel, dict]) + + +class PydanticActionSchema(ActionSchema[StateInputType, StateOutputType, IntermediateResultType]): + def __init__( + self, + input_type: Type[StateInputType], + output_type: Type[StateOutputType], + intermediate_result_type: Type[IntermediateResultType], + ): + self._input_type = input_type + self._output_type = output_type + self._intermediate_result_type = intermediate_result_type + + def state_input_type(self) -> Type[StateInputType]: + return self._input_type + + def state_output_type(self) -> Type[StateOutputType]: + return self._output_type + + def intermediate_result_type(self) -> type[IntermediateResultType]: + return self._intermediate_result_type + + def pydantic_action( - reads: List[str], writes: List[str] + reads: List[str], + writes: List[str], + state_input_type: Optional[Type[pydantic.BaseModel]] = None, + state_output_type: Optional[Type[pydantic.BaseModel]] = None, ) -> Callable[[PydanticActionFunction], PydanticActionFunction]: """Action that specifies inputs/outputs using pydantic models. This should make it easier to develop with guardrails. @@ -147,7 +176,15 @@ def pydantic_action( """ def decorator(fn: PydanticActionFunction) -> PydanticActionFunction: - itype, otype = _validate_and_extract_signature_types(fn) + if state_input_type is None and state_output_type is None: + itype, otype = _validate_and_extract_signature_types(fn) + + elif state_input_type is not None and state_output_type is not None: + itype, otype = state_input_type, state_output_type + else: + raise ValueError( + "If you specify state_input_type or state_output_type, you must specify both." + ) _validate_keys(model=itype, keys=reads, fn=fn) _validate_keys(model=otype, keys=writes, fn=fn) SubsetInputType = subset_model( @@ -162,6 +199,7 @@ def decorator(fn: PydanticActionFunction) -> PydanticActionFunction: force_optional_fields=[], model_name_suffix=f"{fn.__name__}_input", ) + # TODO -- figure out def action_function(state: State, **kwargs) -> State: model_to_use = model_from_state(model=SubsetInputType, state=state) @@ -191,6 +229,11 @@ async def async_action_function(state: State, **kwargs) -> State: writes, input_spec=get_inputs({}, fn), originating_fn=fn, + schema=PydanticActionSchema( + input_type=SubsetInputType, + output_type=SubsetOutputType, + intermediate_result_type=dict, + ), ), ) setattr(fn, "bind", types.MethodType(bind, fn)) @@ -316,6 +359,11 @@ async def async_action_generator( writes, input_spec=get_inputs({}, fn), originating_fn=fn, + schema=PydanticActionSchema( + input_type=SubsetInputType, + output_type=SubsetOutputType, + intermediate_result_type=dict, + ), ), ) setattr(fn, "bind", types.MethodType(bind, fn)) diff --git a/pyproject.toml b/pyproject.toml index ebcb755c..6834037c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "burr" -version = "0.30.0rc4" +version = "0.30.0rc5" dependencies = [] # yes, there are none requires-python = ">=3.9" authors = [ diff --git a/tests/core/test_application.py b/tests/core/test_application.py index b6fbb967..11256f16 100644 --- a/tests/core/test_application.py +++ b/tests/core/test_application.py @@ -9,6 +9,7 @@ from burr.core import State from burr.core.action import ( + DEFAULT_SCHEMA, Action, AsyncGenerator, AsyncStreamingAction, @@ -2426,25 +2427,28 @@ def test__validate_start_not_found(): def test__adjust_single_step_output_result_and_state(): state = State({"count": 1}) result = {"count": 1} - assert _adjust_single_step_output((result, state), "test_action") == (result, state) + assert _adjust_single_step_output((result, state), "test_action", DEFAULT_SCHEMA) == ( + result, + state, + ) def test__adjust_single_step_output_just_state(): state = State({"count": 1}) - assert _adjust_single_step_output(state, "test_action") == ({}, state) + assert _adjust_single_step_output(state, "test_action", DEFAULT_SCHEMA) == ({}, state) def test__adjust_single_step_output_errors_incorrect_type(): state = "foo" with pytest.raises(ValueError, match="must return either"): - _adjust_single_step_output(state, "test_action") + _adjust_single_step_output(state, "test_action", DEFAULT_SCHEMA) def test__adjust_single_step_output_errors_incorrect_result_type(): state = State() result = "bar" with pytest.raises(ValueError, match="non-dict"): - _adjust_single_step_output((state, result), "test_action") + _adjust_single_step_output((state, result), "test_action", DEFAULT_SCHEMA) def test_application_builder_unset(): From 73dc3cf7c2567fa7100e91ba3f71178f3f8caff2 Mon Sep 17 00:00:00 2001 From: elijahbenizzy Date: Mon, 9 Sep 2024 21:28:12 -0700 Subject: [PATCH 04/10] Fixes up typing for streaming result containers --- burr/core/action.py | 48 +++++++++++++------ burr/core/application.py | 11 +++-- burr/integrations/pydantic.py | 2 +- .../application.py | 9 +++- 4 files changed, 47 insertions(+), 23 deletions(-) diff --git a/burr/core/action.py b/burr/core/action.py index 598d2263..288422ba 100644 --- a/burr/core/action.py +++ b/burr/core/action.py @@ -6,6 +6,7 @@ import sys import types import typing +from collections.abc import AsyncIterator from typing import ( Any, AsyncGenerator, @@ -199,6 +200,11 @@ def get_source(self) -> str: to display a different source""" return inspect.getsource(self.__class__) + def input_schema(self) -> Any: + """Returns the input schema for the action. + The input schema is a type that can be used to validate the input to the action""" + return None + def __repr__(self): read_repr = ", ".join(self.reads) if self.reads else "{}" write_repr = ", ".join(self.writes) if self.writes else "{}" @@ -732,7 +738,10 @@ def is_async(self) -> bool: return True -class StreamingResultContainer(Iterator[dict], Generic[StateType]): +StreamResultType = TypeVar("StreamResultType") + + +class StreamingResultContainer(Generic[StateType, StreamResultType], Iterator[StreamResultType]): """Container for a streaming result. This allows you to: 1. Iterate over the result as it comes in @@ -758,13 +767,15 @@ class StreamingResultContainer(Iterator[dict], Generic[StateType]): @staticmethod def pass_through( - results: dict, final_state: State[StateType] - ) -> "StreamingResultContainer[StateType]": + results: StreamResultType, final_state: State[StateType] + ) -> "StreamingResultContainer[StreamResultType, StateType]": """Instantiates a streaming result container that just passes through the given results This is to be used internally -- it allows us to wrap non-streaming action results in a streaming result container.""" - def empty_generator() -> Generator[Tuple[dict, Optional[State[StateType]]], None, None]: + def empty_generator() -> ( + Generator[Tuple[StreamResultType, Optional[State[StateType]]], None, None] + ): yield results, final_state return StreamingResultContainer( @@ -798,7 +809,7 @@ def __init__( self._result = None self._callback_realized = False - def __next__(self): + def __next__(self) -> StreamResultType: if self._result is not None: # we're done, and we've run through it raise StopIteration @@ -808,7 +819,7 @@ def __next__(self): raise StopIteration return result - def __iter__(self): + def __iter__(self) -> Iterator[StreamResultType]: def gen_fn(): try: while True: @@ -836,7 +847,10 @@ def get(self) -> StreamType: return self._result -class AsyncStreamingResultContainer(typing.AsyncIterator[dict], Generic[StateType]): +class AsyncStreamingResultContainer( + Generic[StateType, StreamResultType], + AsyncIterator[StreamResultType], +): """Container for an async streaming result. This allows you to: 1. Iterate over the result as it comes in 2. Await the final result/state at the end @@ -863,9 +877,11 @@ def __init__( self, streaming_result_generator: AsyncGeneratorReturnType, initial_state: State[StateType], - process_result: Callable[[dict, State[StateType]], tuple[dict, State[StateType]]], + process_result: Callable[ + [StreamResultType, State[StateType]], tuple[StreamResultType, State[StateType]] + ], callback: Callable[ - [Optional[dict], State[StateType], Optional[Exception]], + [Optional[StreamResultType], State[StateType], Optional[Exception]], typing.Coroutine[None, None, None], ], ): @@ -885,7 +901,7 @@ def __init__( self._result = None self._callback_realized = False - async def __anext__(self): + async def __anext__(self) -> StreamResultType: """Moves to the next state in the streaming result""" if self._result is not None: # we're done, and we've run through it @@ -896,7 +912,7 @@ async def __anext__(self): raise StopAsyncIteration return result - def __aiter__(self): + def __aiter__(self) -> AsyncIterator[StreamResultType]: """Gives the iterator. Just calls anext, assigning the result in the finally block. Note this may not be perfect due to the complexity of callbacks for async generators, but it works in most cases.""" @@ -918,7 +934,7 @@ async def gen_fn(): # return it as `__aiter__` cannot be async/have awaits :/ return gen_fn() - async def get(self) -> tuple[Optional[dict], State[StateType]]: + async def get(self) -> tuple[Optional[StreamResultType], State[StateType]]: # exhaust the generator async for _ in self: pass @@ -927,7 +943,7 @@ async def get(self) -> tuple[Optional[dict], State[StateType]]: @staticmethod def pass_through( - results: dict, final_state: State[StateType] + results: StreamResultType, final_state: State[StateType] ) -> "AsyncStreamingResultContainer[StateType]": """Creates a streaming result container that just passes through the given results. This is not a public facing API.""" @@ -935,10 +951,12 @@ def pass_through( async def just_results() -> AsyncGeneratorReturnType: yield results, final_state - async def empty_callback(result: Optional[dict], state: State, exc: Optional[Exception]): + async def empty_callback( + result: Optional[StreamResultType], state: State, exc: Optional[Exception] + ): pass - return AsyncStreamingResultContainer[StateType]( + return AsyncStreamingResultContainer[StateType, StreamResultType]( just_results(), final_state, lambda result, state: (result, state), empty_callback ) diff --git a/burr/core/application.py b/burr/core/application.py index 0b4c8c77..2cdd2303 100644 --- a/burr/core/application.py +++ b/burr/core/application.py @@ -74,7 +74,7 @@ def _validate_result(result: Any, name: str, schema: ActionSchema = DEFAULT_SCHE result_type = schema.intermediate_result_type() if not isinstance(result, result_type): raise ValueError( - f"Action {name} returned a non-dict result: {result}. " + f"Action {name} returned a non-{result_type.__name__} result: {result}. " f"All results must be dictionaries." ) @@ -682,6 +682,7 @@ def post_run_step( ApplicationStateType = TypeVar("ApplicationStateType") +StreamResultType = TypeVar("StreamResultType", bound=Union[dict, Any]) class Application(Generic[ApplicationStateType]): @@ -1205,9 +1206,9 @@ async def arun( def stream_result( self, halt_after: list[str], - halt_before: list[str] = None, + halt_before: Optional[list[str]] = None, inputs: Optional[Dict[str, Any]] = None, - ) -> Tuple[Action, StreamingResultContainer[ApplicationStateType]]: + ) -> Tuple[Action, StreamingResultContainer[ApplicationStateType, Union[dict, Any]]]: """Streams a result out. :param halt_after: The list of actions to halt after execution of. It will halt on the first one. @@ -1454,9 +1455,9 @@ def callback( async def astream_result( self, halt_after: list[str], - halt_before: list[str] = None, + halt_before: Optional[list[str]] = None, inputs: Optional[Dict[str, Any]] = None, - ) -> Tuple[Action, AsyncStreamingResultContainer[ApplicationStateType]]: + ) -> Tuple[Action, AsyncStreamingResultContainer[ApplicationStateType, Union[dict, Any]]]: """Streams a result out in an asynchronous manner. :param halt_after: The list of actions to halt after execution of. It will halt on the first one. diff --git a/burr/integrations/pydantic.py b/burr/integrations/pydantic.py index 6665ab1a..bcc76453 100644 --- a/burr/integrations/pydantic.py +++ b/burr/integrations/pydantic.py @@ -362,7 +362,7 @@ async def async_action_generator( schema=PydanticActionSchema( input_type=SubsetInputType, output_type=SubsetOutputType, - intermediate_result_type=dict, + intermediate_result_type=stream_type_processed, ), ), ) diff --git a/examples/youtube-to-social-media-post/application.py b/examples/youtube-to-social-media-post/application.py index 3a7f80c1..a3a0a8e9 100644 --- a/examples/youtube-to-social-media-post/application.py +++ b/examples/youtube-to-social-media-post/application.py @@ -9,6 +9,7 @@ from youtube_transcript_api import YouTubeTranscriptApi from burr.core import Application, ApplicationBuilder +from burr.core.action import AsyncStreamingResultContainer, StreamingResultContainer from burr.integrations.pydantic import ( PydanticTypingSystem, pydantic_action, @@ -289,10 +290,13 @@ def build_streaming_application_async() -> Application[ApplicationState]: async def run_async(): console = Console() app = build_streaming_application_async() + action, streaming_container = await app.astream_result( halt_after=["generate_post"], inputs={"youtube_url": "https://www.youtube.com/watch?v=hqutVJyd3TI"}, - ) + ) # type: ignore + streaming_container: AsyncStreamingResultContainer[ApplicationState, SocialMediaPost] + async for post in streaming_container: obj = post.model_dump() console.clear() @@ -305,7 +309,8 @@ async def run_async(): action, streaming_container = app.stream_result( halt_after=["generate_post"], inputs={"youtube_url": "https://www.youtube.com/watch?v=hqutVJyd3TI"}, - ) + ) # type: ignore + streaming_container: StreamingResultContainer[ApplicationState, SocialMediaPost] for post in streaming_container: obj = post.model_dump() console.clear() From 5bb00629a4e2ef68e84eb9e1998fe6f84eebc5d6 Mon Sep 17 00:00:00 2001 From: elijahbenizzy Date: Wed, 11 Sep 2024 13:31:54 -0700 Subject: [PATCH 05/10] Adds pydantic field to action for easy use This is just syntactic sugar so we can call @action.pydantic(...). This does some tricky importing stuff, but does not change the required packages in any way --- burr/core/action.py | 157 +++++++++++++++++++++++----------- burr/integrations/pydantic.py | 6 +- 2 files changed, 112 insertions(+), 51 deletions(-) diff --git a/burr/core/action.py b/burr/core/action.py index 288422ba..a08c27e5 100644 --- a/burr/core/action.py +++ b/burr/core/action.py @@ -8,6 +8,7 @@ import typing from collections.abc import AsyncIterator from typing import ( + TYPE_CHECKING, Any, AsyncGenerator, Callable, @@ -20,6 +21,7 @@ Optional, Protocol, Tuple, + Type, TypeVar, Union, ) @@ -32,6 +34,17 @@ from burr.core.state import State from burr.core.typing import ActionSchema +# This is here to make accessing the pydantic actions easier +# we just attach them to action so you can call `@action.pyddantic...` +# The IDE will like it better and thus be able to auto-complete/type-check +# TODO - come up with a better way to attach integrations to core objects +imported_pydantic = False +if TYPE_CHECKING: + try: + from pydantic import BaseModel + except ImportError: + pass + class Function(abc.ABC): """Interface to represent the 'computing' part of an action""" @@ -1161,77 +1174,125 @@ def my_action(state: State, z: int) -> tuple[dict, State]: return self -def action(reads: List[str], writes: List[str]) -> Callable[[Callable], FunctionRepresentingAction]: - """Decorator to create a function-based action. This is user-facing. - Note that, in the future, with typed state, we may not need this for - all cases. +class action: + @staticmethod + def pydantic( + reads: List[str], + writes: List[str], + state_input_type: Optional[Type["BaseModel"]] = None, + state_output_type: Optional[Type["BaseModel"]] = None, + ) -> Callable: + try: + from burr.integrations.pydantic import pydantic_action + except ImportError: + raise ImportError( + "Please install pydantic to use the pydantic decorator. pip install burr[pydantic]" + ) + + return pydantic_action( + reads=reads, + writes=writes, + state_input_type=state_input_type, + state_output_type=state_output_type, + ) + + def __init__(self, reads: List[str], writes: List[str]): + """Decorator to create a function-based action. This is user-facing. + Note that, in the future, with typed state, we may not need this for + all cases. - If parameters are not bound, they will be interpreted as inputs and must - be passed in at runtime. If they have default values, they will be recorded - as optional inputs. These can (optionally) be provided at runtime. + If parameters are not bound, they will be interpreted as inputs and must + be passed in at runtime. If they have default values, they will be recorded + as optional inputs. These can (optionally) be provided at runtime. - :param reads: Items to read from the state - :param writes: Items to write to the state - :return: The decorator to assign the function as an action - """ + :param reads: Items to read from the state + :param writes: Items to write to the state + :return: The decorator to assign the function as an action + """ + self.reads = reads + self.writes = writes - def decorator(fn) -> FunctionRepresentingAction: - setattr(fn, FunctionBasedAction.ACTION_FUNCTION, FunctionBasedAction(fn, reads, writes)) + def __call__(self, fn) -> FunctionRepresentingAction: + setattr( + fn, + FunctionBasedAction.ACTION_FUNCTION, + FunctionBasedAction(fn, self.reads, self.writes), + ) setattr(fn, "bind", types.MethodType(bind, fn)) return fn - return decorator +class streaming_action: + @staticmethod + def pydantic( + reads: List[str], + writes: List[str], + state_input_type: Type["BaseModel"], + state_output_type: Type["BaseModel"], + stream_type: Type[StreamType], + ) -> Callable: + try: + from burr.integrations.pydantic import pydantic_streaming_action + except ImportError: + raise ImportError( + "Please install pydantic to use the pydantic decorator. pip install burr[pydantic]" + ) -def streaming_action( - reads: List[str], writes: List[str] -) -> Callable[[Callable], FunctionRepresentingAction]: - """Decorator to create a streaming function-based action. This is user-facing. + return pydantic_streaming_action( + reads=reads, + writes=writes, + state_input_type=state_input_type, + state_output_type=state_output_type, + stream_type=stream_type, + ) - If parameters are not bound, they will be interpreted as inputs and must be passed in at runtime. + def __init__(self, reads: List[str], writes: List[str]): + """Decorator to create a streaming function-based action. This is user-facing. - See the following example for how to use this decorator -- this reads ``prompt`` from the state and writes - ``response`` back out, yielding all intermediate chunks. + If parameters are not bound, they will be interpreted as inputs and must be passed in at runtime. - Note that this *must* return a value. If it does not, we will not know how to update the state, and - we will error out. + See the following example for how to use this decorator -- this reads ``prompt`` from the state and writes + ``response`` back out, yielding all intermediate chunks. - .. code-block:: python + Note that this *must* return a value. If it does not, we will not know how to update the state, and + we will error out. - @streaming_action(reads=["prompt"], writes=['response']) - def streaming_response(state: State) -> Generator[dict, None, tuple[dict, State]]: - response = client.chat.completions.create( - model='gpt-3.5-turbo', - messages=[{ - 'role': 'user', - 'content': state["prompt"] - }], - temperature=0, - ) - buffer = [] - for chunk in response: - delta = chunk.choices[0].delta.content - buffer.append(delta) - # yield partial results - yield {'response': delta}, None - full_response = ''.join(buffer) - # return the final result - return {'response': full_response}, state.update(response=full_response) + .. code-block:: python - """ + @streaming_action(reads=["prompt"], writes=['response']) + def streaming_response(state: State) -> Generator[dict, None, tuple[dict, State]]: + response = client.chat.completions.create( + model='gpt-3.5-turbo', + messages=[{ + 'role': 'user', + 'content': state["prompt"] + }], + temperature=0, + ) + buffer = [] + for chunk in response: + delta = chunk.choices[0].delta.content + buffer.append(delta) + # yield partial results + yield {'response': delta}, None + full_response = ''.join(buffer) + # return the final result + return {'response': full_response}, state.update(response=full_response) - def wrapped(fn) -> FunctionRepresentingAction: + """ + self.reads = reads + self.writes = writes + + def __call__(self, fn: Callable) -> FunctionRepresentingAction: fn = copy_func(fn) setattr( fn, FunctionBasedAction.ACTION_FUNCTION, - FunctionBasedStreamingAction(fn, reads, writes), + FunctionBasedStreamingAction(fn, self.reads, self.writes), ) setattr(fn, "bind", types.MethodType(bind, fn)) return fn - return wrapped - ActionT = TypeVar("ActionT", bound=Action) diff --git a/burr/integrations/pydantic.py b/burr/integrations/pydantic.py index bcc76453..3b237266 100644 --- a/burr/integrations/pydantic.py +++ b/burr/integrations/pydantic.py @@ -287,9 +287,9 @@ def _validate_and_extract_signature_types_streaming( def pydantic_streaming_action( reads: List[str], writes: List[str], - state_input_type: Optional[Type[pydantic.BaseModel]], - state_output_type: Optional[Type[pydantic.BaseModel]], - stream_type: Optional[PartialType], + state_input_type: Type[pydantic.BaseModel], + state_output_type: Type[pydantic.BaseModel], + stream_type: PartialType, ) -> Callable[[PydanticStreamingActionFunction], PydanticStreamingActionFunction]: """Creates a streaming action that uses pydantic models. From d9c6b59c3d694bbee167f2ddb46b9f4f4b6c2d1d Mon Sep 17 00:00:00 2001 From: elijahbenizzy Date: Wed, 11 Sep 2024 13:34:14 -0700 Subject: [PATCH 06/10] Updates youtube server example --- burr/core/action.py | 2 +- .../application.py | 31 ++++----- .../youtube-to-social-media-post/curls.sh | 20 ++++++ .../youtube-to-social-media-post/server.py | 63 ++++++++++++++----- 4 files changed, 85 insertions(+), 31 deletions(-) create mode 100755 examples/youtube-to-social-media-post/curls.sh diff --git a/burr/core/action.py b/burr/core/action.py index a08c27e5..ab80b2b0 100644 --- a/burr/core/action.py +++ b/burr/core/action.py @@ -1229,7 +1229,7 @@ def pydantic( writes: List[str], state_input_type: Type["BaseModel"], state_output_type: Type["BaseModel"], - stream_type: Type[StreamType], + stream_type: Union[Type["BaseModel"], Type[dict]], ) -> Callable: try: from burr.integrations.pydantic import pydantic_streaming_action diff --git a/examples/youtube-to-social-media-post/application.py b/examples/youtube-to-social-media-post/application.py index a3a0a8e9..e0b6983e 100644 --- a/examples/youtube-to-social-media-post/application.py +++ b/examples/youtube-to-social-media-post/application.py @@ -1,5 +1,5 @@ import textwrap -from typing import Any, Generator, Optional, Tuple, Union +from typing import Any, AsyncGenerator, Generator, Optional, Tuple, Union import instructor import openai @@ -8,13 +8,13 @@ from rich.console import Console from youtube_transcript_api import YouTubeTranscriptApi -from burr.core import Application, ApplicationBuilder -from burr.core.action import AsyncStreamingResultContainer, StreamingResultContainer -from burr.integrations.pydantic import ( - PydanticTypingSystem, - pydantic_action, - pydantic_streaming_action, +from burr.core import Application, ApplicationBuilder, action +from burr.core.action import ( + AsyncStreamingResultContainer, + StreamingResultContainer, + streaming_action, ) +from burr.integrations.pydantic import PydanticTypingSystem class Concept(BaseModel): @@ -122,7 +122,7 @@ def __copy__(self, memo: dict[int, Any] | None = None): # return new_obj -@pydantic_action(reads=[], writes=["transcript"]) +@action.pydantic(reads=[], writes=["transcript"]) def get_youtube_transcript(state: ApplicationState, youtube_url: str) -> ApplicationState: """Get the official YouTube transcript for a video given it's URL""" _, _, video_id = youtube_url.partition("?v=") @@ -134,7 +134,7 @@ def get_youtube_transcript(state: ApplicationState, youtube_url: str) -> Applica # store the transcript in state -@pydantic_action(reads=["transcript"], writes=["post"]) +@action.pydantic(reads=["transcript"], writes=["post"]) def generate_post(state: ApplicationState, llm_client) -> ApplicationState: """Use the Instructor LLM client to generate `SocialMediaPost` from the YouTube transcript.""" @@ -158,7 +158,7 @@ def generate_post(state: ApplicationState, llm_client) -> ApplicationState: return state -@pydantic_streaming_action( +@streaming_action.pydantic( reads=["transcript"], writes=["post"], state_input_type=ApplicationState, @@ -191,7 +191,7 @@ def generate_post_streaming( yield final_post, state -@pydantic_streaming_action( +@streaming_action.pydantic( reads=["transcript"], writes=["post"], state_input_type=ApplicationState, @@ -200,7 +200,7 @@ def generate_post_streaming( ) async def generate_post_streaming_async( state: ApplicationStateStream, llm_client -) -> Generator[Tuple[SocialMediaPost, Optional[ApplicationState]], None, None]: +) -> AsyncGenerator[Tuple[SocialMediaPost, Optional[ApplicationState]], None]: """Use the Instructor LLM client to generate `SocialMediaPost` from the YouTube transcript.""" transcript = state.transcript @@ -234,6 +234,7 @@ def build_application() -> Application[ApplicationState]: ) .with_transitions( ("get_youtube_transcript", "generate_post"), + ("generate_post", "get_youtube_transcript"), ) # .with_state_persister(SQLLitePersister(db_path=".burr.db", table_name="state")) .with_entrypoint("get_youtube_transcript") @@ -255,6 +256,7 @@ def build_streaming_application() -> Application[ApplicationState]: ) .with_transitions( ("get_youtube_transcript", "generate_post"), + ("generate_post", "get_youtube_transcript"), ) # .with_state_persister(SQLLitePersister(db_path=".burr.db", table_name="state")) .with_entrypoint("get_youtube_transcript") @@ -276,6 +278,7 @@ def build_streaming_application_async() -> Application[ApplicationState]: ) .with_transitions( ("get_youtube_transcript", "generate_post"), + ("generate_post", "get_youtube_transcript"), ) # .with_state_persister(SQLLitePersister(db_path=".burr.db", table_name="state")) .with_entrypoint("get_youtube_transcript") @@ -291,7 +294,7 @@ async def run_async(): console = Console() app = build_streaming_application_async() - action, streaming_container = await app.astream_result( + a, streaming_container = await app.astream_result( halt_after=["generate_post"], inputs={"youtube_url": "https://www.youtube.com/watch?v=hqutVJyd3TI"}, ) # type: ignore @@ -306,7 +309,7 @@ async def run_async(): if __name__ == "__main__": console = Console() app = build_streaming_application() - action, streaming_container = app.stream_result( + a, streaming_container = app.stream_result( halt_after=["generate_post"], inputs={"youtube_url": "https://www.youtube.com/watch?v=hqutVJyd3TI"}, ) # type: ignore diff --git a/examples/youtube-to-social-media-post/curls.sh b/examples/youtube-to-social-media-post/curls.sh new file mode 100755 index 00000000..fb479e56 --- /dev/null +++ b/examples/youtube-to-social-media-post/curls.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +# Default to the 'social_media_post' endpoint if no argument is passed +ENDPOINT="social_media_post" +if [[ "$1" == "streaming_async" ]]; then + ENDPOINT="social_media_post_streaming_async" +elif [[ "$1" == "streaming" ]]; then + ENDPOINT="social_media_post_streaming" +fi + +# Perform the curl request to the chosen endpoint +curl -X 'GET' "http://localhost:7443/$ENDPOINT" \ + -s -H 'Accept: application/json' \ + --no-buffer | jq --unbuffered -c '.' | while IFS= read -r line; do + if [[ "$line" != "" ]]; then # Check for non-empty lines + clear + echo "$line" | jq --color-output . + sleep .01 # Add a small delay for visual clarity + fi +done diff --git a/examples/youtube-to-social-media-post/server.py b/examples/youtube-to-social-media-post/server.py index 4c0fd20f..906bb155 100644 --- a/examples/youtube-to-social-media-post/server.py +++ b/examples/youtube-to-social-media-post/server.py @@ -1,57 +1,88 @@ import contextlib +import json import logging import fastapi import uvicorn from application import ( ApplicationState, - ApplicationStateStream, + SocialMediaPost, build_application, - build_application_iterator_streaming, + build_streaming_application, + build_streaming_application_async, ) from fastapi.responses import StreamingResponse from burr.core import Application +from burr.core.action import AsyncStreamingResultContainer, StreamingResultContainer logger = logging.getLogger(__name__) # define a global `burr_app` variable burr_app: Application[ApplicationState] = None -# Second variant -- this uses a stream + a self-loop -# Note this will save a *lot* to the tracker, each stream! -burr_app_streaming_iterator: Application[ApplicationStateStream] = None +# This does streaming, in sync mode +burr_app_streaming: Application[ApplicationState] = None + +# And this does streaming, in async mode +burr_app_streaming_async: Application[ApplicationState] = None + +DEFAULT_YOUTUBE_URL = "https://www.youtube.com/watch?v=hqutVJyd3TI" @contextlib.asynccontextmanager async def lifespan(app: fastapi.FastAPI): """Instantiate the Burr application on FastAPI startup.""" # set value for the global `burr_app` variable - global burr_app, burr_app_streaming_iterator + global burr_app, burr_app_streaming, burr_app_streaming_async burr_app = build_application() - burr_app_streaming_iterator = build_application_iterator_streaming() + burr_app_streaming = build_streaming_application() + burr_app_streaming_async = build_streaming_application_async() yield app = fastapi.FastAPI(lifespan=lifespan) -@app.get("/social_media_post", response_model=ApplicationState) -def social_media_post(youtube_url: str) -> ApplicationState: +@app.get("/social_media_post", response_model=SocialMediaPost) +def social_media_post(youtube_url: str = DEFAULT_YOUTUBE_URL) -> SocialMediaPost: """Creates a completion for the chat message""" _, _, state = burr_app.run(halt_after=["generate_post"], inputs={"youtube_url": youtube_url}) - return state.data + return state.data.post + + +@app.get("/social_media_post_streaming_async", response_class=StreamingResponse) +async def social_media_post_streaming_async( + youtube_url: str = DEFAULT_YOUTUBE_URL, +) -> StreamingResponse: + """Creates a completion for the chat message""" + + async def gen(): + _, streaming_container = await burr_app_streaming_async.astream_result( + halt_after=["generate_post"], + inputs={"youtube_url": youtube_url}, + ) # type: ignore + streaming_container: AsyncStreamingResultContainer[ApplicationState, SocialMediaPost] + async for post in streaming_container: + obj = post.model_dump() + yield json.dumps(obj) + + return StreamingResponse(gen()) -@app.get("/social_media_post_streaming_1", response_model=StreamingResponse) -def social_media_post_streaming(youtube_url: str) -> StreamingResponse: +@app.get("/social_media_post_streaming", response_class=StreamingResponse) +def social_media_post_streaming(youtube_url: str = DEFAULT_YOUTUBE_URL) -> StreamingResponse: """Creates a completion for the chat message""" def gen(): - for action, _, state in burr_app_streaming_iterator.iterate( - halt_after=["final"], inputs={"youtube_url": youtube_url} - ): - yield state.data.model_dump_json() + _, streaming_container = burr_app_streaming.stream_result( + halt_after=["generate_post"], + inputs={"youtube_url": youtube_url}, + ) # type: ignore + streaming_container: StreamingResultContainer[ApplicationState, SocialMediaPost] + for post in streaming_container: + obj = post.model_dump() + yield json.dumps(obj) return StreamingResponse(gen()) From a6705db0d2e7e50a929ef05ff2fd3b5184039e7b Mon Sep 17 00:00:00 2001 From: elijahbenizzy Date: Wed, 11 Sep 2024 15:08:40 -0700 Subject: [PATCH 07/10] Adds more tests for pydantic typed state implementation --- burr/core/action.py | 15 ++++++++--- burr/integrations/pydantic.py | 11 +++++--- tests/core/test_action.py | 51 +++++++++++++++++++++++++++++++++++ 3 files changed, 70 insertions(+), 7 deletions(-) diff --git a/burr/core/action.py b/burr/core/action.py index ab80b2b0..7f99f48e 100644 --- a/burr/core/action.py +++ b/burr/core/action.py @@ -534,7 +534,14 @@ def is_async(self) -> bool: # the following exist to share implementation between FunctionBasedStreamingAction and FunctionBasedAction # TODO -- think through the class hierarchy to simplify, for now this is OK -def get_inputs(bound_params: dict, fn: Callable) -> tuple[list[str], list[str]]: +def derive_inputs_from_fn(bound_params: dict, fn: Callable) -> tuple[list[str], list[str]]: + """Derives inputs from the function, given the bound parameters. This assumes that the function + has inputs named `state`, as well as any number of other kwarg-boundable parameters. + + :param bound_params: Parameters that are already bound to the function + :param fn: Function to derive inputs from + :return: Required and optional inputs + """ sig = inspect.signature(fn) required_inputs, optional_inputs = [], [] for param_name, param in sig.parameters.items(): @@ -580,7 +587,7 @@ def __init__( self._writes = writes self._bound_params = bound_params if bound_params is not None else {} self._inputs = ( - get_inputs(self._bound_params, self._fn) + derive_inputs_from_fn(self._bound_params, self._fn) if input_spec is None else ( [item for item in input_spec[0] if item not in self._bound_params], @@ -852,7 +859,7 @@ def gen_fn(): # as the async version return gen_fn() - def get(self) -> StreamType: + def get(self) -> Tuple[StreamResultType, State[StateType]]: # exhaust the generator for _ in self: pass @@ -1054,7 +1061,7 @@ def __init__( self._writes = writes self._bound_params = bound_params if bound_params is not None else {} self._inputs = ( - get_inputs(self._bound_params, self._fn) + derive_inputs_from_fn(self._bound_params, self._fn) if input_spec is None else ( [item for item in input_spec[0] if item not in self._bound_params], diff --git a/burr/integrations/pydantic.py b/burr/integrations/pydantic.py index 3b237266..4b458e7b 100644 --- a/burr/integrations/pydantic.py +++ b/burr/integrations/pydantic.py @@ -21,7 +21,12 @@ from pydantic_core import PydanticUndefined from burr.core import Action, Graph, State -from burr.core.action import FunctionBasedAction, FunctionBasedStreamingAction, bind, get_inputs +from burr.core.action import ( + FunctionBasedAction, + FunctionBasedStreamingAction, + bind, + derive_inputs_from_fn, +) from burr.core.typing import ActionSchema, TypingSystem Inputs = ParamSpec("Inputs") @@ -227,7 +232,7 @@ async def async_action_function(state: State, **kwargs) -> State: async_action_function if is_async else action_function, reads, writes, - input_spec=get_inputs({}, fn), + input_spec=derive_inputs_from_fn({}, fn), originating_fn=fn, schema=PydanticActionSchema( input_type=SubsetInputType, @@ -357,7 +362,7 @@ async def async_action_generator( async_action_generator if is_async else action_generator, reads, writes, - input_spec=get_inputs({}, fn), + input_spec=derive_inputs_from_fn({}, fn), originating_fn=fn, schema=PydanticActionSchema( input_type=SubsetInputType, diff --git a/tests/core/test_action.py b/tests/core/test_action.py index 6a477973..675f5dd3 100644 --- a/tests/core/test_action.py +++ b/tests/core/test_action.py @@ -19,6 +19,7 @@ action, create_action, default, + derive_inputs_from_fn, streaming_action, ) @@ -743,3 +744,53 @@ async def callback(r: Optional[dict], s: State, e: Exception): ((result, state, error),) = called assert state["foo"] == "bar" assert result is None + + +def test_derive_inputs_from_fn_state_only(): + def fn(state): + ... + + bound_params = {} + required, optional = derive_inputs_from_fn(bound_params, fn) + assert required == [] + assert optional == [] + + +def test_derive_inputs_from_fn_state_and_required(): + def fn(state, a, b): + ... + + bound_params = {"state": 1} + required, optional = derive_inputs_from_fn(bound_params, fn) + assert required == ["a", "b"] + assert optional == [] + + +def test_derive_inputs_from_fn_state_required_and_optional(): + def fn(state, a, b=2): + ... + + bound_params = {"state": 1} + required, optional = derive_inputs_from_fn(bound_params, fn) + assert required == ["a"] + assert optional == ["b"] + + +def test_derive_inputs_from_fnh_state_and_all_bound_except_state(): + def fn(state, a, b): + ... + + bound_params = {"a": 1, "b": 2} + required, optional = derive_inputs_from_fn(bound_params, fn) + assert required == [] + assert optional == [] + + +def test_non_existent_bound_parameters(): + def fn(state, a): + ... + + bound_params = {"a": 1, "non_existent": 2} + required, optional = derive_inputs_from_fn(bound_params, fn) + assert required == [] + assert optional == [] From 43a290b64091462808b8dba8dd03ed8e73e3b68d Mon Sep 17 00:00:00 2001 From: elijahbenizzy Date: Wed, 11 Sep 2024 18:06:45 -0700 Subject: [PATCH 08/10] Documentation + cleanup for @action.pydantic --- burr/core/action.py | 25 +- burr/core/application.py | 25 +- burr/integrations/pydantic.py | 23 +- docs/reference/actions.rst | 7 +- docs/reference/index.rst | 2 + docs/reference/tracking.rst | 2 +- examples/pydantic/centralized_state.py | 217 ----- examples/pydantic/decentralized_state.py | 224 ----- examples/typed-state/README.md | 263 ++++++ examples/typed-state/application.py | 292 ++++++ .../curls.sh | 0 examples/typed-state/notebook.ipynb | 836 ++++++++++++++++++ examples/typed-state/server.py | 109 +++ examples/typed-state/statemachine.png | Bin 0 -> 59709 bytes .../application.py | 240 +---- .../youtube-to-social-media-post/server.py | 73 +- .../statemachine.png | Bin 11515 -> 25106 bytes pyproject.toml | 2 +- 18 files changed, 1591 insertions(+), 749 deletions(-) delete mode 100644 examples/pydantic/centralized_state.py delete mode 100644 examples/pydantic/decentralized_state.py create mode 100644 examples/typed-state/README.md create mode 100644 examples/typed-state/application.py rename examples/{youtube-to-social-media-post => typed-state}/curls.sh (100%) create mode 100644 examples/typed-state/notebook.ipynb create mode 100644 examples/typed-state/server.py create mode 100644 examples/typed-state/statemachine.png diff --git a/burr/core/action.py b/burr/core/action.py index 7f99f48e..cb9a7ac5 100644 --- a/burr/core/action.py +++ b/burr/core/action.py @@ -1189,6 +1189,19 @@ def pydantic( state_input_type: Optional[Type["BaseModel"]] = None, state_output_type: Optional[Type["BaseModel"]] = None, ) -> Callable: + """Action that specifies inputs/outputs using pydantic models. + This should make it easier to develop with guardrails. + + :param reads: keys that this model reads. Note that this will be a subset of the pydantic model with which this is decorated. + We will be validating that the keys are present in the model. + :param writes: keys that this model writes. Note that this will be a subset of the pydantic model with which this is decorated. + We will be validating that the keys are present in the model. + :param state_input_type: The pydantic model type that is used to represent the input state. + If this is None it will attempt to derive from the signature. + :param state_output_type: The pydantic model type that is used to represent the output state. + If this is None it will attempt to derive from the signature. + :return: + """ try: from burr.integrations.pydantic import pydantic_action except ImportError: @@ -1238,11 +1251,21 @@ def pydantic( state_output_type: Type["BaseModel"], stream_type: Union[Type["BaseModel"], Type[dict]], ) -> Callable: + """Creates a streaming action that uses pydantic models. + + :param reads: The fields this consumes from the state. + :param writes: The fields this writes to the state. + :param stream_type: The pydantic model or dictionary type that is used to represent the partial results. + Use a dict if you want this untyped. + :param state_input_type: The pydantic model type that is used to represent the input state. + :param state_output_type: The pydantic model type that is used to represent the output state. + :return: The same function, decorated function. + """ try: from burr.integrations.pydantic import pydantic_streaming_action except ImportError: raise ImportError( - "Please install pydantic to use the pydantic decorator. pip install burr[pydantic]" + "Please install pydantic to use the pydantic decorator. pip install 'burr[pydantic]'" ) return pydantic_streaming_action( diff --git a/burr/core/application.py b/burr/core/application.py index 2cdd2303..30c92b9a 100644 --- a/burr/core/application.py +++ b/burr/core/application.py @@ -53,8 +53,8 @@ from burr.visibility.tracing import tracer_factory_context_var if TYPE_CHECKING: - # TODO -- push type-checking check back from here - # OR just put everything under type-chekcing... + # TODO -- figure out whether we want to just do if TYPE_CHECKING + # for all first-class imports as Ruff suggests... from burr.tracking.base import TrackingClient logger = logging.getLogger(__name__) @@ -67,10 +67,8 @@ def _validate_result(result: Any, name: str, schema: ActionSchema = DEFAULT_SCHEMA) -> None: - # TODO -- validate the output type is action schema's output type... # TODO -- split out the action schema into input/output schema types - # Then action schema will have both - # we'll just need to ensure we pass the right ones + # Currently they're tied together, but this doesn't make as much sense for single-step actions result_type = schema.intermediate_result_type() if not isinstance(result, result_type): raise ValueError( @@ -2318,7 +2316,7 @@ def _get_built_graph(self) -> Graph: return self.graph_builder.build() return self.prebuilt_graph - # @telemetry.capture_function_usage + @telemetry.capture_function_usage def build(self) -> Application[StateType]: """Builds the application. @@ -2368,18 +2366,3 @@ def build(self) -> Application[StateType]: else None ), ) - - -if __name__ == "__main__": - import pydantic - - class Foo(pydantic.BaseModel): - a: int - b: str - - from burr.integrations import pydantic - - app = ApplicationBuilder().with_typing(pydantic.PydanticTypingSystem(Foo)).build() - - _, _, foo = app.run(inputs={"a": 1, "b": "hello"}) - mod = foo.data diff --git a/burr/integrations/pydantic.py b/burr/integrations/pydantic.py index 4b458e7b..ebe8a63a 100644 --- a/burr/integrations/pydantic.py +++ b/burr/integrations/pydantic.py @@ -170,15 +170,7 @@ def pydantic_action( state_input_type: Optional[Type[pydantic.BaseModel]] = None, state_output_type: Optional[Type[pydantic.BaseModel]] = None, ) -> Callable[[PydanticActionFunction], PydanticActionFunction]: - """Action that specifies inputs/outputs using pydantic models. - This should make it easier to develop with guardrails. - - :param reads: keys that this model reads. Note that this will be a subset of the pydantic model with which this is decorated. - We will be validating that the keys are present in the model. - :param writes: keys that this model writes. Note that this will be a subset of the pydantic model with which this is decorated. - We will be validating that the keys are present in the model. - :return: - """ + """See docstring for @action.pydantic""" def decorator(fn: PydanticActionFunction) -> PydanticActionFunction: if state_input_type is None and state_output_type is None: @@ -296,16 +288,7 @@ def pydantic_streaming_action( state_output_type: Type[pydantic.BaseModel], stream_type: PartialType, ) -> Callable[[PydanticStreamingActionFunction], PydanticStreamingActionFunction]: - """Creates a streaming action that uses pydantic models. - - :param reads: The fields this consumes from the state. - :param writes: The fields this writes to the state. - :param stream_type: The pydantic model or dictionary type that is used to represent the partial results. If this is None it will attempt to derive from the signature. - Use a dict if you want this untyped. - :param state_input_type: The pydantic model type that is used to represent the input state. If this is None it will attempt to derive from the signature. - :param state_output_type: The pydantic model type that is used to represent the output state. If this is None it will attempt to derive from the signature. - :return: The same function, decorated function. - """ + """See docstring for @streaming_action.pydantic""" def decorator(fn: PydanticStreamingActionFunctionVar) -> PydanticStreamingActionFunctionVar: itype, otype, stream_type_processed = _validate_and_extract_signature_types_streaming( @@ -353,8 +336,6 @@ async def async_action_generator( is_async = inspect.isasyncgenfunction(fn) # This recreates the @streaming_action decorator # TODO -- use the @streaming_action decorator directly - # TODO -- ensure that the function is the right one -- specifically it probably won't show code in the UI - # now setattr( fn, FunctionBasedAction.ACTION_FUNCTION, diff --git a/docs/reference/actions.rst b/docs/reference/actions.rst index ce473ac8..72d52e1d 100644 --- a/docs/reference/actions.rst +++ b/docs/reference/actions.rst @@ -22,7 +22,12 @@ Actions .. automethod:: __init__ -.. autodecorator:: burr.core.action.action +.. autoclass:: burr.core.action.action + :members: + + .. automethod:: __init__ + + .. autofunction:: burr.core.action.bind diff --git a/docs/reference/index.rst b/docs/reference/index.rst index 0613b28b..f6e96eec 100644 --- a/docs/reference/index.rst +++ b/docs/reference/index.rst @@ -1,3 +1,4 @@ + .. _reference: ======================== @@ -20,5 +21,6 @@ need functionality that is not publicly exposed, please open an issue and we can tracking visibility lifecycle + typing integrations/index telemetry diff --git a/docs/reference/tracking.rst b/docs/reference/tracking.rst index 73c9c4fa..ea317156 100644 --- a/docs/reference/tracking.rst +++ b/docs/reference/tracking.rst @@ -3,7 +3,7 @@ Tracking ======== Reference on the Tracking/Telemetry API. -Rather, you should use this throug/in conjunction with :py:meth:`burr.core.application.ApplicationBuilder.with_tracker`. +Rather, you should use this through/in conjunction with :py:meth:`burr.core.application.ApplicationBuilder.with_tracker`. .. autoclass:: burr.tracking.LocalTrackingClient diff --git a/examples/pydantic/centralized_state.py b/examples/pydantic/centralized_state.py deleted file mode 100644 index 5a57f891..00000000 --- a/examples/pydantic/centralized_state.py +++ /dev/null @@ -1,217 +0,0 @@ -import copy -import os -from typing import List, Optional - -import openai -import pydantic - -from burr.core import ApplicationBuilder, State, action, default, graph, when -from burr.integrations.pydantic import PydanticTypingSystem, pydantic_action -from burr.lifecycle import LifecycleAdapter - -MODES = { - "answer_question": "text", - "generate_image": "image", - "generate_code": "code", - "unknown": "text", -} - - -class ApplicationState(pydantic.BaseModel): - chat_history: List[dict[str, str]] = pydantic.Field(default_factory=list) - prompt: Optional[str] - has_openai_key: Optional[bool] - safe: Optional[bool] - mode: Optional[str] - response: dict[str, str] - - -@pydantic_action(reads=[], writes=["chat_history", "prompt"]) -def process_prompt(state: ApplicationState, prompt: str) -> ApplicationState: - state.chat_history.append({"role": "user", "content": prompt, "type": "text"}) - state.prompt = prompt - return state - - -@pydantic_action(reads=["prompt"], writes=["safe"]) -def check_safety(state: ApplicationState) -> ApplicationState: - state.safe = "unsafe" not in state.prompt - return state - - -def _get_openai_client(): - return openai.Client() - - -@pydantic_action(reads=["prompt"], writes=["mode"]) -def choose_mode(state: ApplicationState) -> ApplicationState: - prompt = ( - f"You are a chatbot. You've been prompted this: {state.prompt}. " - f"You have the capability of responding in the following modes: {', '.join(MODES)}. " - "Please respond with *only* a single word representing the mode that most accurately " - "corresponds to the prompt. Fr instance, if the prompt is 'draw a picture of a cat', " - "the mode would be 'generate_image'. If the prompt is 'what is the capital of France', the mode would be 'answer_question'." - "If none of these modes apply, please respond with 'unknown'." - ) - - result = _get_openai_client().chat.completions.create( - model="gpt-4", - messages=[ - {"role": "system", "content": "You are a helpful assistant"}, - {"role": "user", "content": prompt}, - ], - ) - content = result.choices[0].message.content - mode = content.lower() - if mode not in MODES: - mode = "unknown" - state.mode = mode - return state - - -@pydantic_action(reads=["prompt", "chat_history"], writes=["response"]) -def prompt_for_more(state: ApplicationState) -> ApplicationState: - state.response = { - "content": "None of the response modes I support apply to your question. Please clarify?", - "type": "text", - "role": "assistant", - } - return state - - -@action(reads=[], writes=["has_openai_key"]) -def check_openai_key(state: State) -> State: - result = {"has_openai_key": "OPENAI_API_KEY" in os.environ} - return state.update(**result) - - -@pydantic_action(reads=["prompt", "chat_history", "mode"], writes=["response"]) -def chat_response( - state: ApplicationState, - prepend_prompt: str, - model: str = "gpt-3.5-turbo", -) -> ApplicationState: - chat_history = copy.deepcopy(state.chat_history) - chat_history[-1]["content"] = f"{prepend_prompt}: {chat_history[-1]['content']}" - chat_history_api_format = [ - { - "role": chat["role"], - "content": chat["content"], - } - for chat in chat_history - ] - client = _get_openai_client() - result = client.chat.completions.create( - model=model, - messages=chat_history_api_format, - ) - response = result.choices[0].message.content - state.response = {"content": response, "type": MODES[state.mode], "role": "assistant"} - return state - - -@pydantic_action(reads=["prompt", "chat_history", "mode"], writes=["response"]) -def image_response(state: ApplicationState, model: str = "dall-e-2") -> ApplicationState: - client = _get_openai_client() - result = client.images.generate( - model=model, prompt=state.prompt, size="1024x1024", quality="standard", n=1 - ) - response = result.data[0].url - state.response = {"content": response, "type": MODES[state.mode], "role": "assistant"} - return state - - -@pydantic_action(reads=["response", "mode", "safe", "has_openai_key"], writes=["chat_history"]) -def response(state: ApplicationState) -> ApplicationState: - if not state.has_openai_key: - chat_item = { - "role": "assistant", - "content": "You have not set an API key for [OpenAI](https://www.openai.com). Do this " - "by setting the environment variable `OPENAI_API_KEY` to your key. " - "You can get a key at [OpenAI](https://platform.openai.com). " - "You can still look at chat history/examples.", - "type": "error", - } - elif not state.safe: - chat_item = { - "role": "assistant", - "content": "I'm sorry, I can't respond to that.", - "type": "error", - } - else: - chat_item = state.response - state.chat_history.append(chat_item) - return state - - -graph_object = ( - graph.GraphBuilder() - .with_actions( - prompt=process_prompt, - check_openai_key=check_openai_key, - check_safety=check_safety, - decide_mode=choose_mode, - generate_image=image_response, - generate_code=chat_response.bind( - prepend_prompt="Please respond with *only* code and no other text (at all) to the following:", - ), - answer_question=chat_response.bind( - prepend_prompt="Please answer the following question:", - ), - prompt_for_more=prompt_for_more, - response=response, - ) - .with_transitions( - ("prompt", "check_openai_key", default), - ("check_openai_key", "check_safety", when(has_openai_key=True)), - ("check_openai_key", "response", default), - ("check_safety", "decide_mode", when(safe=True)), - ("check_safety", "response", default), - ("decide_mode", "generate_image", when(mode="generate_image")), - ("decide_mode", "generate_code", when(mode="generate_code")), - ("decide_mode", "answer_question", when(mode="answer_question")), - ("decide_mode", "prompt_for_more", default), - ( - ["generate_image", "answer_question", "generate_code", "prompt_for_more"], - "response", - ), - ("response", "prompt", default), - ) - .build() -) - - -def application( - hooks: Optional[List[LifecycleAdapter]] = None, - project_id: str = "test_centralized_state", -): - if hooks is None: - hooks = [] - # we're initializing above so we can load from this as well - # we could also use `with_tracker("local", project=project_id, params={"storage_dir": storage_dir})` - return ( - ApplicationBuilder() - .with_graph(graph_object) - # initializes from the tracking log if it does not already exist - .with_hooks(*hooks) - .with_tracker("local", project=project_id) - .with_entrypoint("prompt") - .with_state( - ApplicationState( - chat_history=[], - ) - ) - .with_typing(PydanticTypingSystem(model_type=ApplicationState)) - .build() - ) - - -if __name__ == "__main__": - app = application() - # app.visualize( - # output_file_path="statemachine", include_conditions=False, view=True, format="png" - # ) - action, result, state = app.run( - halt_after=["response"], inputs={"prompt": "Who was Aaron Burr, sir?"} - ) - state.data diff --git a/examples/pydantic/decentralized_state.py b/examples/pydantic/decentralized_state.py deleted file mode 100644 index 5f40da57..00000000 --- a/examples/pydantic/decentralized_state.py +++ /dev/null @@ -1,224 +0,0 @@ -import copy -import os -from typing import List, Optional - -import openai - -from burr.core import Application, ApplicationBuilder, State, default, graph, when -from burr.core.action import action -from burr.lifecycle import LifecycleAdapter -from burr.tracking import LocalTrackingClient - -MODES = { - "answer_question": "text", - "generate_image": "image", - "generate_code": "code", - "unknown": "text", -} - - -@action(reads=[], writes=["chat_history", "prompt"]) -def process_prompt(state: State, prompt: str) -> State: - result = {"chat_item": {"role": "user", "content": prompt, "type": "text"}} - return ( - state.wipe(keep=["prompt", "chat_history"]) - .append(chat_history=result["chat_item"]) - .update(prompt=prompt) - ) - - -@action(reads=[], writes=["has_openai_key"]) -def check_openai_key(state: State) -> State: - result = {"has_openai_key": "OPENAI_API_KEY" in os.environ} - return state.update(**result) - - -@action(reads=["prompt"], writes=["safe"]) -def check_safety(state: State) -> State: - result = {"safe": "unsafe" not in state["prompt"]} # quick hack to demonstrate - return state.update(safe=result["safe"]) - - -def _get_openai_client(): - return openai.Client() - - -@action(reads=["prompt"], writes=["mode"]) -def choose_mode(state: State) -> State: - prompt = ( - f"You are a chatbot. You've been prompted this: {state['prompt']}. " - f"You have the capability of responding in the following modes: {', '.join(MODES)}. " - "Please respond with *only* a single word representing the mode that most accurately " - "corresponds to the prompt. Fr instance, if the prompt is 'draw a picture of a cat', " - "the mode would be 'generate_image'. If the prompt is 'what is the capital of France', the mode would be 'answer_question'." - "If none of these modes apply, please respond with 'unknown'." - ) - - result = _get_openai_client().chat.completions.create( - model="gpt-4", - messages=[ - {"role": "system", "content": "You are a helpful assistant"}, - {"role": "user", "content": prompt}, - ], - ) - content = result.choices[0].message.content - mode = content.lower() - if mode not in MODES: - mode = "unknown" - result = {"mode": mode} - return state.update(**result) - - -@action(reads=["prompt", "chat_history"], writes=["response"]) -def prompt_for_more(state: State) -> State: - result = { - "response": { - "content": "None of the response modes I support apply to your question. Please clarify?", - "type": "text", - "role": "assistant", - } - } - return state.update(**result) - - -@action(reads=["prompt", "chat_history", "mode"], writes=["response"]) -def chat_response( - state: State, prepend_prompt: str, display_type: str = "text", model: str = "gpt-3.5-turbo" -) -> State: - chat_history = copy.deepcopy(state["chat_history"]) - chat_history[-1]["content"] = f"{prepend_prompt}: {chat_history[-1]['content']}" - chat_history_api_format = [ - { - "role": chat["role"], - "content": chat["content"], - } - for chat in chat_history - ] - client = _get_openai_client() - result = client.chat.completions.create( - model=model, - messages=chat_history_api_format, - ) - response = result.choices[0].message.content - result = {"response": {"content": response, "type": MODES[state["mode"]], "role": "assistant"}} - return state.update(**result) - - -@action(reads=["prompt", "chat_history", "mode"], writes=["response"]) -def image_response(state: State, model: str = "dall-e-2") -> State: - """Generates an image response to the prompt. Optional save function to save the image to a URL.""" - client = _get_openai_client() - result = client.images.generate( - model=model, prompt=state["prompt"], size="1024x1024", quality="standard", n=1 - ) - response = result.data[0].url - result = {"response": {"content": response, "type": MODES[state["mode"]], "role": "assistant"}} - return state.update(**result) - - -@action(reads=["response", "mode", "safe", "has_openai_key"], writes=["chat_history"]) -def response(state: State) -> State: - if not state["has_openai_key"]: - result = { - "chat_item": { - "role": "assistant", - "content": "You have not set an API key for [OpenAI](https://www.openai.com). Do this " - "by setting the environment variable `OPENAI_API_KEY` to your key. " - "You can get a key at [OpenAI](https://platform.openai.com). " - "You can still look at chat history/examples.", - "type": "error", - } - } - elif not state["safe"]: - result = { - "chat_item": { - "role": "assistant", - "content": "I'm sorry, I can't respond to that.", - "type": "error", - } - } - else: - result = {"chat_item": state["response"]} - return state.append(chat_history=result["chat_item"]) - - -graph = ( - graph.GraphBuilder() - .with_actions( - prompt=process_prompt, - check_openai_key=check_openai_key, - check_safety=check_safety, - decide_mode=choose_mode, - generate_image=image_response, - generate_code=chat_response.bind( - prepend_prompt="Please respond with *only* code and no other text (at all) to the following:", - ), - answer_question=chat_response.bind( - prepend_prompt="Please answer the following question:", - ), - prompt_for_more=prompt_for_more, - response=response, - ) - .with_transitions( - ("prompt", "check_openai_key", default), - ("check_openai_key", "check_safety", when(has_openai_key=True)), - ("check_openai_key", "response", default), - ("check_safety", "decide_mode", when(safe=True)), - ("check_safety", "response", default), - ("decide_mode", "generate_image", when(mode="generate_image")), - ("decide_mode", "generate_code", when(mode="generate_code")), - ("decide_mode", "answer_question", when(mode="answer_question")), - ("decide_mode", "prompt_for_more", default), - ( - ["generate_image", "answer_question", "generate_code", "prompt_for_more"], - "response", - ), - ("response", "prompt", default), - ) - .build() -) - - -def base_application( - hooks: List[LifecycleAdapter], - app_id: str, - storage_dir: str, - project_id: str, -): - if hooks is None: - hooks = [] - # we're initializing above so we can load from this as well - # we could also use `with_tracker("local", project=project_id, params={"storage_dir": storage_dir})` - tracker = LocalTrackingClient(project=project_id, storage_dir=storage_dir) - return ( - ApplicationBuilder() - .with_graph(graph) - # initializes from the tracking log if it does not already exist - .initialize_from( - tracker, - resume_at_next_action=False, # always resume from entrypoint in the case of failure - default_state={"chat_history": []}, - default_entrypoint="prompt", - ) - .with_hooks(*hooks) - .with_tracker(tracker) - .with_identifiers(app_id=app_id) - .build() - ) - - -def application( - app_id: Optional[str] = None, - project_id: str = "demo_chatbot", - storage_dir: Optional[str] = "~/.burr", - hooks: Optional[List[LifecycleAdapter]] = None, -) -> Application: - return base_application(hooks, app_id, storage_dir, project_id=project_id) - - -if __name__ == "__main__": - app = application() - app.visualize( - output_file_path="statemachine", include_conditions=False, view=True, format="png" - ) - print(app.run(halt_after=["response"], inputs={"prompt": "Who was Aaron Burr, sir?"})) diff --git a/examples/typed-state/README.md b/examples/typed-state/README.md new file mode 100644 index 00000000..361c9fed --- /dev/null +++ b/examples/typed-state/README.md @@ -0,0 +1,263 @@ +# Typed State + +This example goes over how to use the typed state features in Burr with pydantic. + +It will cover the following concepts: + +1. Why you might want to use typing for your state in the first place +1. Define typing at the application level +1. Defining/altering state at the action level +1. Doing so in a streaming manner +1. Wiring that through to a FastAPI app + +This README will contain snippets + link out to the code. + +This adapts the [instructor + youtube example](../youtube-to-social-media-post/). This +takes in a youtube video, creates a transcript, and uses OpenAI to generate a social media post based on that transcript. + +## Why type state? + +Burr originally came without typing -- typing is an additional (optional) plugin that requires some complexity. So, why use typing at all? And why use Pydantic? Lots of good reasons: + +### Typing provides guard-rails + +Typing state ensures you have guarentees about what the data in your state is, and what shape it takes. This makes it easier to work with/manipulate. + +### Typing makes development easier + +IDEs have integrations for handling type annotations, enabling you to +avoid the cognitive burden of tracking types in your head. You get auto-completion on typed classes, and errors if you attempt to access or assign to a field that does not exist. + +### Typing makes downstream integration easier + +Multiple tools use types (especially with pydantic) to make interacting with data easier. In this example we use [instructor](https://python.useinstructor.com/blog/), as well as [FastAPI](https://fastapi.tiangolo.com/) to leverage pydantic models that Burr also uses. + +### Typing provides a form of documentation + +Type-annotation in python allows you to read your code and get some sense of what it is actually doing. This can be a warm introduction to python for those who came from the world of java initially (the authors of this library included), and make reasoning about a complex codebase simpler. + +## Setting up your IDE + +VSCode (or an editor with a similar interface) is generally optimal for this. It has +pluggable typing (e.g. pylance), which handles generics cleanly. Unfortunately pycharm +is often behind on typing support. See issues like [this](https://youtrack.jetbrains.com/issue/PY-44364) and [this](https://youtrack.jetbrains.com/issue/PY-27627/Support-explicitly-parametrized-generic-class-instantiation-syntax). + +While it will still work in pycharm, you will not get some of the beter auto-completion capabilities. + +## Defining typed state at the application level + +This code for this is in [application.py](application.py). + +First, define a pydantic model -- make it as recursive as you want. This will represent +your entire state. In this case, we're going to have a transcript of a youtube video that +was given by the user, as well as the social media post. The high-level is here -- the rest is +in the code: + +```python +class ApplicationState(BaseModel): + # Make these have defaults as they are only set in actions + transcript: Optional[str] = Field( + description="The full transcript of the YouTube video.", default=None + ) + post: Optional[SocialMediaPost] = Field( + description="The generated social media post.", default=None + ) +``` + +Note that this should exactly model your state -- we need to make things optional, +as there are points in the state where the transcript/post will not have been assigned. + +Next, we add it in to the application object, both the initial value and the typing system. +The typing system is what's responsible for managing the schema: + +```python +app = ( + ApplicationBuilder() + .with_actions( + ... + ) + .with_transitions( + ... + ) + .with_entrypoint(...) + .with_typing(PydanticTypingSystem(ApplicationState)) + .with_state(ApplicationState()) + .build() + ) +``` + +That ensures that application and the application's state object are parameterized on the state type. + +To get the state, you can use `.data` on any state object returned by the application + +```python +# just from the application +print(app.state.data.transcript) + +# after execution +_, _, state = app.run(halt_after=..., inputs=...) +print(state.data.transcript) +``` + +## Defining/altering typed state at the action level + +This code for this is in [application.py](application.py). + +In addition to defining state centrally, we can define it at an action level. + +The code is simple, but the API is slightly different from standard Burr. Rather than +using the immutable state-based API, we in-place mutate pydantic models. Don't worry, it's still immutable, you're just modifying a copy and returning it. + +In this case, we call to `@action.pydantic`, which tells which fields to read/write to from state. It derives the classes from the function annotations, although you can also pass it the +pydantic classes as arguments to the decorator if you prefer. + +Note that the reads/writes have to be a subset of the state object. In this case we use the global `ApplicationState` object as described above, although it can use a subset/compatible set of fields (or, if you elect not to use centralized state, it just has to be compatible with upstream/downstream versions). + +Under the hood, burr will subset the state class so it only has the relevant fields (the reads/write) fields. + +```python +@action.pydantic(reads=["transcript"], writes=["post"]) +def generate_post(state: ApplicationState, llm_client) -> ApplicationState: + """Use the Instructor LLM client to generate `SocialMediaPost` from the YouTube transcript.""" + + # read the transcript from state + transcript = state.transcript + + response = llm_client.chat.completions.create( + model="gpt-4o-mini", + response_model=SocialMediaPost, + messages=[...] + ) + # mutate in place + state.post = response + # return the state + return state +``` + +## Typed state for streaming actions + +This code for this is in [application.py](application.py). + +For streaming actions, not only do we have to type the input/output state, but we can also type the intermediate result. + +In this case, we just use the `SocialMediaPost` as we did in the application state. Instructor will be streaming that in as it gets created. + +`@streaming_action.pydantic` currently requires you to pass in all the pydantic models as classes, although we will be adding the option to derive from the function signature. + +We first call out to OpenAI, then we stream through + +```python + +@streaming_action.pydantic( + reads=["transcript"], + writes=["post"], + state_input_type=ApplicationState, + state_output_type=ApplicationState, + stream_type=SocialMediaPost, +) +def generate_post_streaming( + state: ApplicationState, llm_client +) -> Generator[Tuple[SocialMediaPost, Optional[ApplicationState]], None, None]: + """Streams a post as it's getting created. This allows for interacting data on the UI side of partial + results, using instructor's streaming capabilities for partial responses: + https://python.useinstructor.com/concepts/partial/ + + :param state: input state -- of the shape `ApplicationState` + :param llm_client: the LLM client, we will bind this in the application + :yield: a tuple of the post and the state -- state will be non-null when it's done + """ + + transcript = state.transcript + response = llm_client.chat.completions.create_partial( + model="gpt-4o-mini", + response_model=SocialMediaPost, + messages=[...], + stream=True, + ) + for post in response: + yield post, None + state.post = post + yield post, state +``` + +When we call out to the application we built, we have to do a little magic to get typing to work +in the IDE, but we still have the same benefits as the non-streaming approach. + +```python +app = build_streaming_application(...) # builder left out for now +_, streaming_container = app.stream_result( + halt_after=["generate_post"], + inputs={"youtube_url": "https://www.youtube.com/watch?v=hqutVJyd3TI"}, +) +# annotate to make type-completion easier +streaming_container: StreamingResultContainer[ApplicationState, SocialMediaPost] +# post is of type SocialMediaPost +for post in streaming_container: + obj = post.model_dump() + console.clear() + console.print(obj) +``` + +## FastAPI integration + +This code for this is in [server.py](server.py). + +To integrate this with FastAPI is easy, and gets easier with the types cascading through. + +### Non-streaming + +For the non-streaming case, we declare an endpoint that returns the entire state. Note you +may want a subset, but for now this is simple as it matches the pydantic models we defined above. + +```python +@app.get("/social_media_post", response_model=SocialMediaPost) +def social_media_post(youtube_url: str = DEFAULT_YOUTUBE_URL) -> SocialMediaPost: + _, _, state = burr_app.run(halt_after=["generate_post"], inputs={"youtube_url": youtube_url}) + return state.data.post +``` + +### Streaming + +The streaming case involves using FastAPI's [StreamingResponse API](https://fastapi.tiangolo.com/advanced/custom-response/#streamingresponse). We define a generator, which simply yields all +intermediate results: + +```python + +@app.get("/social_media_post_streaming", response_class=StreamingResponse) +def social_media_post_streaming(youtube_url: str = DEFAULT_YOUTUBE_URL) -> StreamingResponse: + """Creates a completion for the chat message""" + + def gen(): + _, streaming_container = burr_app_streaming.stream_result( + halt_after=["generate_post"], + inputs={"youtube_url": youtube_url}, + ) # type: ignore + for post in streaming_container: + obj = post.model_dump() + yield json.dumps(obj) + + return StreamingResponse(gen()) +``` + +Note that `StreamingResponse` is not typed, but you have access to the types with the post +object, which corresponds to the stream from above! + +Async streaming is similar. + +You can run `server.py` with `python server.py`, which will open up on port 7443. You can use the `./curls.sh` command to query the server (it will use a default video, modify to pass your own): + +```bash +./curls.sh # default, non-streaming +./curls.sh streaming # streaming endpoint +./sucls.sh streaming_async # streaming async endpoint +``` + +Note you'll have to have [jq](https://jqlang.github.io/jq/) installed for this to work. + +## Caveats + next steps + +Some things we'll be building out shortly: + +1. The ability to derive application level schemas from individual actions +2. The ability to automatically generate a FastAPI application from state + Burr +3. Configurable validation for state -- guardrails to choose when/when not to validate in pydantic diff --git a/examples/typed-state/application.py b/examples/typed-state/application.py new file mode 100644 index 00000000..d737de97 --- /dev/null +++ b/examples/typed-state/application.py @@ -0,0 +1,292 @@ +import textwrap +from typing import AsyncGenerator, Generator, Optional, Tuple, Union + +import instructor +import openai +from pydantic import BaseModel, Field +from pydantic.json_schema import SkipJsonSchema +from rich.console import Console +from youtube_transcript_api import YouTubeTranscriptApi + +from burr.core import Application, ApplicationBuilder, action +from burr.core.action import ( + AsyncStreamingResultContainer, + StreamingResultContainer, + streaming_action, +) +from burr.integrations.pydantic import PydanticTypingSystem + + +class Concept(BaseModel): + term: str = Field(description="A key term or concept mentioned.") + definition: str = Field(description="A brief definition or explanation of the term.") + timestamp: float = Field(description="Timestamp when the concept is explained.") + + def display(self): + minutes, seconds = divmod(self.timestamp, 60) + return f"{int(minutes)}:{int(seconds)} - {self.term}: {self.definition}" + + +class SocialMediaPost(BaseModel): + """A social media post about a YouTube video generated its transcript""" + + topic: str = Field(description="Main topic discussed.") + hook: str = Field( + description="Statement to grab the attention of the reader and announce the topic." + ) + body: str = Field( + description="The body of the social media post. It should be informative and make the reader curious about viewing the video." + ) + concepts: list[Concept] = Field( + description="Important concepts about Hamilton or Burr mentioned in this post -- please have at least 1", + min_items=0, + max_items=3, + validate_default=False, + ) + key_takeaways: list[str] = Field( + description="A list of informative key takeways for the reader -- please have at least 1", + min_items=0, + max_items=4, + validate_default=False, + ) + youtube_url: SkipJsonSchema[Union[str, None]] = None + + def display(self) -> str: + formatted_takeways = " ".join([t for t in self.key_takeaways]) + formatted_concepts = "CONCEPTS\n" + "\n".join([c.display() for c in self.concepts]) + link = f"link: {self.youtube_url}\n\n" if self.youtube_url else "" + + return ( + textwrap.dedent( + f"""\ + TOPIC: {self.topic} + + {self.hook} + + {self.body} + + {formatted_takeways} + + """ + ) + + link + + formatted_concepts + ) + + +class ApplicationState(BaseModel): + # Make these have defaults as they are only set in actions + transcript: Optional[str] = Field( + description="The full transcript of the YouTube video.", default=None + ) + post: Optional[SocialMediaPost] = Field( + description="The generated social media post.", default=None + ) + + +@action.pydantic(reads=[], writes=["transcript"]) +def get_youtube_transcript(state: ApplicationState, youtube_url: str) -> ApplicationState: + """Get the official YouTube transcript for a video given its URL""" + _, _, video_id = youtube_url.partition("?v=") + + transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=["en"]) + state.transcript = " ".join([f"ts={entry['start']} - {entry['text']}" for entry in transcript]) + return state + + # store the transcript in state + + +@action.pydantic(reads=["transcript"], writes=["post"]) +def generate_post(state: ApplicationState, llm_client) -> ApplicationState: + """Use the Instructor LLM client to generate `SocialMediaPost` from the YouTube transcript.""" + + # read the transcript from state + transcript = state.transcript + + response = llm_client.chat.completions.create( + model="gpt-4o-mini", + response_model=SocialMediaPost, + messages=[ + { + "role": "system", + "content": "Analyze the given YouTube transcript and generate a compelling social media post.", + }, + {"role": "user", "content": transcript}, + ], + ) + state.post = response + + # store the chapters in state + return state + + +@streaming_action.pydantic( + reads=["transcript"], + writes=["post"], + state_input_type=ApplicationState, + state_output_type=ApplicationState, + stream_type=SocialMediaPost, +) +def generate_post_streaming( + state: ApplicationState, llm_client +) -> Generator[Tuple[SocialMediaPost, Optional[ApplicationState]], None, None]: + """Streams a post as it's getting created. This allows for interacting data on the UI side of partial + results, using instructor's streaming capabilities for partial responses: + https://python.useinstructor.com/concepts/partial/ + + :param state: input state -- of the shape `ApplicationState` + :param llm_client: the LLM client, we will bind this in the application + :yield: a tuple of the post and the state -- state will be non-null when it's done + """ + + transcript = state.transcript + response = llm_client.chat.completions.create_partial( + model="gpt-4o-mini", + response_model=SocialMediaPost, + messages=[ + { + "role": "system", + "content": "Analyze the given YouTube transcript and generate a compelling social media post.", + }, + {"role": "user", "content": transcript}, + ], + stream=True, + ) + final_post: SocialMediaPost = None # type: ignore + for post in response: + final_post = post + yield post, None + + yield final_post, state + + +@streaming_action.pydantic( + reads=["transcript"], + writes=["post"], + state_input_type=ApplicationState, + state_output_type=ApplicationState, + stream_type=SocialMediaPost, +) +async def generate_post_streaming_async( + state: ApplicationState, llm_client +) -> AsyncGenerator[Tuple[SocialMediaPost, Optional[ApplicationState]], None]: + """Async implementation of the streaming action above""" + + transcript = state.transcript + response = llm_client.chat.completions.create_partial( + model="gpt-4o-mini", + response_model=SocialMediaPost, + messages=[ + { + "role": "system", + "content": "Analyze the given YouTube transcript and generate a compelling social media post.", + }, + {"role": "user", "content": transcript}, + ], + stream=True, + ) + final_post = None + async for post in response: + final_post = post + yield post, None + + yield final_post, state + + +def build_application() -> Application[ApplicationState]: + """Builds the standard application (non-streaming)""" + llm_client = instructor.from_openai(openai.OpenAI()) + app = ( + ApplicationBuilder() + .with_actions( + get_youtube_transcript, + generate_post.bind(llm_client=llm_client), + ) + .with_transitions( + ("get_youtube_transcript", "generate_post"), + ("generate_post", "get_youtube_transcript"), + ) + .with_entrypoint("get_youtube_transcript") + .with_typing(PydanticTypingSystem(ApplicationState)) + .with_state(ApplicationState()) + .with_tracker(project="youtube-post") + .build() + ) + return app + + +def build_streaming_application() -> Application[ApplicationState]: + """Builds the streaming application -- this uses the generate_post_streaming action""" + llm_client = instructor.from_openai(openai.OpenAI()) + app = ( + ApplicationBuilder() + .with_actions( + get_youtube_transcript, + generate_post=generate_post_streaming.bind(llm_client=llm_client), + ) + .with_transitions( + ("get_youtube_transcript", "generate_post"), + ("generate_post", "get_youtube_transcript"), + ) + .with_entrypoint("get_youtube_transcript") + .with_typing(PydanticTypingSystem(ApplicationState)) + .with_state(ApplicationState()) + .with_tracker(project="youtube-post") + .build() + ) + return app + + +def build_streaming_application_async() -> Application[ApplicationState]: + """Builds the async streaming application -- uses the generate_post_streaming_async action""" + llm_client = instructor.from_openai(openai.AsyncOpenAI()) + app = ( + ApplicationBuilder() + .with_actions( + get_youtube_transcript, + generate_post=generate_post_streaming_async.bind(llm_client=llm_client), + ) + .with_transitions( + ("get_youtube_transcript", "generate_post"), + ("generate_post", "get_youtube_transcript"), + ) + .with_entrypoint("get_youtube_transcript") + .with_typing(PydanticTypingSystem(ApplicationState)) + .with_state(ApplicationState()) + .with_tracker(project="test-youtube-post") + .build() + ) + return app + + +async def run_async(): + """quick function to run async -- this is not called in the mainline, see commented out code""" + console = Console() + app = build_streaming_application_async() + + _, streaming_container = await app.astream_result( + halt_after=["generate_post"], + inputs={"youtube_url": "https://www.youtube.com/watch?v=hqutVJyd3TI"}, + ) # type: ignore + streaming_container: AsyncStreamingResultContainer[ApplicationState, SocialMediaPost] + + async for post in streaming_container: + obj = post.model_dump() + console.clear() + console.print(obj) + + +# mainline -- runs streaming and prints to console +if __name__ == "__main__": + # asyncio.run(run_async()) + console = Console() + app = build_streaming_application() + _, streaming_container = app.stream_result( + halt_after=["generate_post"], + inputs={"youtube_url": "https://www.youtube.com/watch?v=hqutVJyd3TI"}, + ) # type: ignore + streaming_container: StreamingResultContainer[ApplicationState, SocialMediaPost] + for post in streaming_container: + obj = post.model_dump() + console.clear() + console.print(obj) diff --git a/examples/youtube-to-social-media-post/curls.sh b/examples/typed-state/curls.sh similarity index 100% rename from examples/youtube-to-social-media-post/curls.sh rename to examples/typed-state/curls.sh diff --git a/examples/typed-state/notebook.ipynb b/examples/typed-state/notebook.ipynb new file mode 100644 index 00000000..cacaac34 --- /dev/null +++ b/examples/typed-state/notebook.ipynb @@ -0,0 +1,836 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Build trustworthy LLM agents & applications for production with Instructor + Burr\n", + "\n", + "The challenge with large language models (LLMs) is handling the 5% of the time they say crazy things. Being able to debug why an output is bad and having tools for fixing are critical requirements for making LLM features / agents trustworthy and available to users.\n", + "\n", + "In this notebook, you'll learn how `instructor` can make LLM reliability produce structured outputs, and `burr` helps you introspect and debug your application." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Instructor 101\n", + "Instructor is a tool to help you prompt LLM and constraint its outputs. First, you specify the desired output using a **model** with typed fields and textual descriptions; you can think of it as a template that the LLM will fill. This greatly improves the reliability of the content and format of generated text.\n", + "\n", + "To introduce Instructor, we'll write code to generate a social media post from the transcript of a YouTube video. \n", + "\n", + "> This post on the Instructor blog is also a great introduction: [Analyzing Youtube Transcripts with Instructor](https://python.useinstructor.com/blog/2024/07/11/youtube-transcripts/)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1. Define the `response_model`\n", + "\n", + "Instructor uses [Pydantic](https://docs.pydantic.dev/latest/) to create the response model. A model needs to inherit the `BaseModel` class and we use the `Field()` object to give a textual description.\n", + "\n", + "- `Field()` objects allow to specify constraints to the generated output. For instance, we want \"1 to 3 concepts\" and \"1 to 4 key takeaways\" generated per `SocialMediaPost`\n", + "- Notice that you can nest models. Indeed, `SocialMediaPost.concepts` is a list of `Concept` models.\n", + "- We use `SkipJsonSchema` on the `youtube_url` field to specify that this shouldn't be generated by the LLM. Instead, we'll manually pass it.\n", + "\n", + "Tip: We're adding a `.display()` method to format the text to be more easily human-readable. " + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import textwrap\n", + "from typing import Union\n", + "\n", + "from pydantic import BaseModel, Field\n", + "from pydantic.json_schema import SkipJsonSchema\n", + "\n", + "class Concept(BaseModel):\n", + " term: str = Field(description=\"A key term or concept mentioned.\")\n", + " definition: str = Field(description=\"A brief definition or explanation of the term.\")\n", + " timestamp: float = Field(description=\"Timestamp when the concept is explained.\")\n", + "\n", + " def display(self):\n", + " minutes, seconds = divmod(self.timestamp, 60)\n", + " return f\"{int(minutes)}:{int(seconds)} - {self.term}: {self.definition}\"\n", + "\n", + "\n", + "class SocialMediaPost(BaseModel):\n", + " \"\"\"A social media post about a YouTube video generated its transcript\"\"\"\n", + "\n", + " topic: str = Field(description=\"Main topic discussed.\")\n", + " hook: str = Field(description=\"Statement to grab the attention of the reader and announce the topic.\")\n", + " body: str = Field(description=\"The body of the social media post. It should be informative and make the reader curious about viewing the video.\")\n", + " concepts: list[Concept] = Field(\n", + " description=\"Important concepts about Hamilton or Burr mentioned in this post.\",\n", + " min_items=1,\n", + " max_items=3,\n", + " )\n", + " key_takeaways: list[str] = Field(\n", + " description=\"A list of informative key takeways for the reader.\",\n", + " min_items=1,\n", + " max_items=4,\n", + " )\n", + " youtube_url: SkipJsonSchema[Union[str, None]] = None\n", + "\n", + " def display(self) -> str:\n", + " formatted_takeways = \" \".join([t for t in self.key_takeaways])\n", + " formatted_concepts = \"CONCEPTS\\n\" + \"\\n\".join([c.display() for c in self.concepts])\n", + " link = f\"link: {self.youtube_url}\\n\\n\" if self.youtube_url else \"\"\n", + "\n", + " return textwrap.dedent(\n", + " f\"\"\"\\\n", + " TOPIC: {self.topic}\n", + "\n", + " {self.hook}\n", + "\n", + " {self.body}\n", + "\n", + " {formatted_takeways}\n", + "\n", + " \"\"\"\n", + " ) + link + formatted_concepts" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2. Write the application logic\n", + "\n", + "Instructor is not opiniated about how you write your application; it's only in contact with your LLM client. Here, we write a script in a few lines of code to retrieve a YouTube transcript." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from youtube_transcript_api import YouTubeTranscriptApi\n", + "\n", + "# get the video id from a YouTube url\n", + "youtube_url = \"https://www.youtube.com/watch?v=hqutVJyd3TI\" \n", + "_, _, video_id = youtube_url.partition(\"?v=\")\n", + "\n", + "# get the available YouTube transcript for the video\n", + "transcript = YouTubeTranscriptApi.get_transcript(video_id)\n", + "# join the transcript into a single block of text\n", + "full_transcript = \" \".join([f\"ts={entry['start']} - {entry['text']}\" for entry in transcript])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Wrap the LLM client\n", + "To use Instructor, we need to wrap the OpenAI client, creating a special client. \n", + "\n", + "> NOTE: If you have the environment variable `OPENAI_API_KEY` set, the client will be automatically created. Otherwise, you'll need to manually pass the key to `OpenAI(api_key=...)`." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "import instructor\n", + "from openai import OpenAI\n", + "\n", + "llm_client = instructor.from_openai(OpenAI())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Use the LLM client with the `response_model`\n", + "\n", + "1. Use the LLM client with `.create` to call the LLM API\n", + "2. Pass `SocialMediaPost` as the response model, enabling structured outputs.\n", + "3. The `messages` include the `system` message with the task instruction for the LLM and\n", + "the `user` message with the input content. " + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "response = llm_client.chat.completions.create(\n", + " model=\"gpt-4o-mini\",\n", + " response_model=SocialMediaPost,\n", + " messages=[\n", + " {\n", + " \"role\": \"system\",\n", + " \"content\": \"Analyze the given YouTube transcript and generate a compelling social media post.\",\n", + " },\n", + " {\"role\": \"user\", \"content\": full_transcript},\n", + " ],\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`response` will have the type of the provided `response_model`, `SocialMediaPost` in this case. You can use `Model.model_dump()` to get a Python dictionary." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "{'topic': 'Understanding B: A Powerful Tool for Agent Applications', 'hook': '🚀 Debug with ease using B!', 'body': 'Ever faced frustrating issues while building agent applications? Dive into our latest video where we unveil the capabilities of B, a framework designed to enhance observability and debugging. Learn how to fix bugs without restarting your entire agent and explore B’s unique graph-building capabilities for a seamless coding experience. Perfect for developers looking to streamline their coding processes!', 'concepts': [{'term': 'Agent Applications', 'definition': 'Applications that utilize AI agents to perform tasks or make decisions based on input and graphs of actions.', 'timestamp': 108.0}, {'term': 'Graph Building', 'definition': 'Creating a flowchart-like structure of actions and states for better process management in applications.', 'timestamp': 179.0}, {'term': 'Local Tracker', 'definition': 'A feature within B that allows interaction with the application state for tracking and debugging purposes.', 'timestamp': 260.0}], 'key_takeaways': ['B enables easy bug fixing without restarting your agent.', 'Graph-based modeling simplifies the coding process.', 'Integrate local tracking for comprehensive observability in applications.'], 'youtube_url': None}\n" + ] + } + ], + "source": [ + "print(type(response))\n", + "print(response.model_dump())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also use the `.display()` method we've defined!" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "TOPIC: Understanding B: A Powerful Tool for Agent Applications\n", + "\n", + "🚀 Debug with ease using B!\n", + "\n", + "Ever faced frustrating issues while building agent applications? Dive into our latest video where we unveil the capabilities of B, a framework designed to enhance observability and debugging. Learn how to fix bugs without restarting your entire agent and explore B’s unique graph-building capabilities for a seamless coding experience. Perfect for developers looking to streamline their coding processes!\n", + "\n", + "B enables easy bug fixing without restarting your agent. Graph-based modeling simplifies the coding process. Integrate local tracking for comprehensive observability in applications.\n", + "\n", + "CONCEPTS\n", + "1:48 - Agent Applications: Applications that utilize AI agents to perform tasks or make decisions based on input and graphs of actions.\n", + "2:59 - Graph Building: Creating a flowchart-like structure of actions and states for better process management in applications.\n", + "4:20 - Local Tracker: A feature within B that allows interaction with the application state for tracking and debugging purposes.\n" + ] + } + ], + "source": [ + "print(response.display())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Burr 101\n", + "Burr is a tool to build LLM applications, solving many challenges to get to production (monitoring, persistence, streaming, and more). With the concepts of \"state\" and \"action\", you can define complex apps that are easy-to-understand and debug.\n", + "\n", + "To show this, we'll rewrite the previous application logic with Burr." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1. Define `actions`\n", + "\n", + "First, you need to define the different actions your agent can take. This is done by writing Python functions with the `@action` decorator. The decorator must specify the information that can be read from state. Also, the function needs to take a `State` object as first argument and return a `State` object." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "from burr.core import State, action\n", + "\n", + "\n", + "@action(reads=[], writes=[\"transcript\"])\n", + "def get_youtube_transcript(state: State, youtube_url: str) -> State:\n", + " \"\"\"Get the official YouTube transcript for a video given it's URL\"\"\"\n", + " _, _, video_id = youtube_url.partition(\"?v=\")\n", + " \n", + " transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=[\"en\"])\n", + " full_transcript = \" \".join([f\"ts={entry['start']} - {entry['text']}\" for entry in transcript])\n", + "\n", + " # store the transcript in state\n", + " return state.update(transcript=full_transcript, youtube_url=youtube_url)\n", + "\n", + "\n", + "@action(reads=[\"transcript\"], writes=[\"post\"])\n", + "def generate_post(state: State, llm_client) -> State:\n", + " \"\"\"Use the Instructor LLM client to generate `SocialMediaPost` from the YouTube transcript.\"\"\"\n", + "\n", + " # read the transcript from state\n", + " transcript = state[\"transcript\"]\n", + "\n", + " response = llm_client.chat.completions.create(\n", + " model=\"gpt-4o-mini\",\n", + " response_model=SocialMediaPost,\n", + " messages=[\n", + " {\n", + " \"role\": \"system\",\n", + " \"content\": \"Analyze the given YouTube transcript and generate a compelling social media post.\",\n", + " },\n", + " {\"role\": \"user\", \"content\": transcript},\n", + " ],\n", + " )\n", + "\n", + " # add the youtube_url found in state to the SocialMediaPost\n", + " response.youtube_url = state[\"youtube_url\"]\n", + "\n", + " # store the chapters in state\n", + " return state.update(post=response)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2. Assemble the `Application`\n", + "\n", + "To create a Burr agent, we need to assemble the `actions` into an `Application`. This requires specifying the valid `transitions` between actions using tuples of action names `(from, to)` and defining an `entrypoint` from where to begin execution. Then, we can visualize the graph of possible states and actions.\n", + "\n", + "Notice that we create the Instructor LLM client *outside* the application and pass it to the `generate_chapters` action via the `.bind()` method. This method follows the same logic as the standard library `functools.partial()`." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "%3\n", + "\n", + "\n", + "\n", + "get_youtube_transcript\n", + "\n", + "get_youtube_transcript\n", + "\n", + "\n", + "\n", + "generate_post\n", + "\n", + "generate_post\n", + "\n", + "\n", + "\n", + "get_youtube_transcript->generate_post\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "input__youtube_url\n", + "\n", + "input: youtube_url\n", + "\n", + "\n", + "\n", + "input__youtube_url->get_youtube_transcript\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from burr.core import ApplicationBuilder\n", + "\n", + "application = (\n", + " ApplicationBuilder()\n", + " .with_actions(\n", + " get_youtube_transcript,\n", + " generate_post.bind(llm_client=llm_client),\n", + " )\n", + " .with_transitions(\n", + " (\"get_youtube_transcript\", \"generate_post\"),\n", + " )\n", + " .with_entrypoint(\"get_youtube_transcript\")\n", + " .build()\n", + ")\n", + "application.visualize()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3. Launch the application\n", + "\n", + "Using `application.run()` will make our application iterate through actions and state until it hits a `halt` condition. Here, we will simply halt after completing the `generate_post` action. This will return a tuple of (the last action take, the result of the last action, the state of the app). We also need to pass a `youtube_url` since it's a required input to the `get_youtube_transcript` action." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "TOPIC: Debugging with Burr and Graph States\n", + "\n", + "Ever struggled with debugging your AI agents?\n", + "\n", + "Dive into the world of Burr with our latest video! Learn how to leverage state tracking and condition-based flowcharts to handle failures and streamline debugging like never before. Discover how you can monitor your agents, and even resume operations from a specific point in their execution—without starting over! 🚀 Whether you’re a developer building complex applications or simply curious about AI functionality, this video will enhance your understanding of agent workflows. Don't miss out!\n", + "\n", + "Learn how to handle failures in AI agent applications effectively. Understand the relationship between actions and state in your applications. Discover how to resume operations from a specific state without restarting the entire application.\n", + "\n", + "link: https://www.youtube.com/watch?v=hqutVJyd3TI\n", + "\n", + "CONCEPTS\n", + "10:57 - State Tracking: State tracking allows you to monitor the state of your application at different points in time, making debugging easier.\n", + "1:50 - Flowchart: In this context, a flowchart represents the actions and their dependencies in your agent application as a directed graph.\n", + "3:36 - Modality: The condition that determines which action to take next in the flow of your agent’s tasks.\n" + ] + } + ], + "source": [ + "last_action, result, state = application.run(\n", + " halt_after=[\"generate_post\"],\n", + " inputs={\"youtube_url\": \"https://www.youtube.com/watch?v=hqutVJyd3TI\"},\n", + ")\n", + "\n", + "# print `post` stored in state\n", + "print(state[\"post\"].display())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Why create a Burr application?\n", + "\n", + "In a few lines of code, you can query an LLM API and can create powerful productivity utilities. However, user-facing features deserve much more scrutiny, which requires tooling and solving complex engineering problems.\n", + "\n", + "Building our app with Burr provides several benefits that we'll detail next:\n", + "- **Observability**: monitor in real-time and log the execution of your `Application` and view it in Burr's web user interface.\n", + "- **Persistence**: At any point, you can save the application `State`. This allows to create user sessions (e.g., the conversation history menu in ChatGPT), which helps developers investigate bugs and test potential solutions.\n", + "- **Portability**: your `Application` can run in a notebook, as a script, as a web service, or anywhere Python runs. We'll show how to use Burr with [FastAPI](https://fastapi.tiangolo.com/)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1. Observability\n", + "\n", + "Add the clause `.with_tracker(project=...)` to the `ApplicationBuilder()` to track execution. \n" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "from burr.core import ApplicationBuilder\n", + "\n", + "application = (\n", + " ApplicationBuilder()\n", + " .with_actions(\n", + " get_youtube_transcript,\n", + " generate_post.bind(llm_client=llm_client),\n", + " )\n", + " .with_transitions(\n", + " (\"get_youtube_transcript\", \"generate_post\"),\n", + " )\n", + " .with_entrypoint(\"get_youtube_transcript\")\n", + " .with_tracker(project=\"youtube-post\")\n", + " .build()\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "last_action, result, state = application.run(\n", + " halt_after=[\"generate_post\"],\n", + " inputs={\"youtube_url\": \"https://www.youtube.com/watch?v=hqutVJyd3TI\"},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then, you can launch the web UI via the CLI command `burr`" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2. Persistence\n", + "\n", + "To showcase this feature, we'll add a `rewrite()` action. It sends to the LLM the social media post and a user input to tweak its content. " + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "@action(reads=[\"post\"], writes=[\"post\"])\n", + "def rewrite_post(state: State, llm_client, user_prompt: str):\n", + " post = state[\"post\"]\n", + "\n", + " response = llm_client.chat.completions.create(\n", + " model=\"gpt-4o-mini\",\n", + " response_model=SocialMediaPost,\n", + " messages=[\n", + " {\n", + " \"role\": \"system\",\n", + " \"content\": f\"Take the previously generated social media post and modify it according to the following instructions: {user_prompt}\",\n", + " },\n", + " {\"role\": \"user\", \"content\": post.model_dump_json()},\n", + " ],\n", + " )\n", + "\n", + " # pass the youtube_url from the previous post version\n", + " response.youtube_url = post.youtube_url\n", + "\n", + " return state.update(post=response)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "By adding the transition `(\"rewrite_post\", \"rewrite_post\")`, we are introducing a graph cycle. Observability and persistence becomes particularly valuable to ensure that the LLM doesn't spiral into non-sense. If that's the case, it could be due to the prompts / instructions in the application code, but also user inputs.\n", + "\n", + "We also add a `.with_persister()` clause to store our results." + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "%3\n", + "\n", + "\n", + "\n", + "get_youtube_transcript\n", + "\n", + "get_youtube_transcript\n", + "\n", + "\n", + "\n", + "generate_post\n", + "\n", + "generate_post\n", + "\n", + "\n", + "\n", + "get_youtube_transcript->generate_post\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "input__youtube_url\n", + "\n", + "input: youtube_url\n", + "\n", + "\n", + "\n", + "input__youtube_url->get_youtube_transcript\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "rewrite_post\n", + "\n", + "rewrite_post\n", + "\n", + "\n", + "\n", + "generate_post->rewrite_post\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "rewrite_post->rewrite_post\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "input__user_prompt\n", + "\n", + "input: user_prompt\n", + "\n", + "\n", + "\n", + "input__user_prompt->rewrite_post\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from burr.core import ApplicationBuilder\n", + "from burr.core.persistence import SQLLitePersister\n", + "\n", + "persister = SQLLitePersister(db_path=\".burr.db\", table_name=\"burr_state\")\n", + "persister.initialize() # this will create the db and table\n", + "\n", + "application = (\n", + " ApplicationBuilder()\n", + " .with_actions(\n", + " get_youtube_transcript,\n", + " generate_post.bind(llm_client=llm_client),\n", + " rewrite_post.bind(llm_client=llm_client),\n", + " )\n", + " .with_transitions(\n", + " (\"get_youtube_transcript\", \"generate_post\"),\n", + " (\"generate_post\", \"rewrite_post\"),\n", + " (\"rewrite_post\", \"rewrite_post\"),\n", + " )\n", + " .with_state_persister(persister)\n", + " .with_entrypoint(\"get_youtube_transcript\")\n", + " .with_tracker(project=\"youtube-post\")\n", + " .build()\n", + ")\n", + "application.visualize()" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "LAST ACTION : generate_post: transcript -> post \n", + "\n", + "TOPIC: Enhancing AI Agent Applications with B and Hamilton\n", + "\n", + "🚀 Dive into the world of AI agents and discover how to streamline your debugging process with B!\n", + "\n", + "Are you tired of constant debugging cycles in your AI projects? In our latest video, we explore B, a framework designed to help you monitor and debug agent applications efficiently. Learn how B allows you to visualize your application’s state, manage complex action flows, and even fork your state at any point to tackle bugs without restarting from scratch. Plus, discover its sister framework, Hamilton, perfect for managing data pipelines! Curious to learn how to simplify your debugging and enhance your application development? 🎥 Watch the full video now!\n", + "\n", + "Learn to visualize and debug agent applications effectively with B. Understand how to manage states and actions using a graphical approach. Explore the integration of Hamilton for complex data pipeline processes.\n", + "\n", + "link: https://www.youtube.com/watch?v=hqutVJyd3TI\n", + "\n", + "CONCEPTS\n", + "0:1 - B Framework: A framework for building and debugging AI agent applications, allowing for state tracking and action visualization.\n", + "2:25 - State Object: An object that retains the state of an application, enabling actions to read and write within that context.\n", + "2:0 - Graph Visualization: A depiction of actions and states in a flowchart-like manner to streamline application behavior and debugging.\n" + ] + } + ], + "source": [ + "# this will run \n", + "last_action, result, state = application.run(\n", + " halt_after=[\"generate_post\"],\n", + " inputs={\"youtube_url\": \"https://www.youtube.com/watch?v=hqutVJyd3TI\"},\n", + ")\n", + "print(\"LAST ACTION :\", last_action, \"\\n\")\n", + "print(state[\"post\"].display())" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "LAST ACTION : rewrite_post: post -> post \n", + "\n", + "TOPIC: Enhancing AI Agent Applications with B and Hamilton\n", + "\n", + "🚀 Dive into the world of AI agents and discover how to streamline your debugging process with B!\n", + "\n", + "Are you encountering challenges with debugging cycles in your AI projects? In our latest video, we explore B, a framework designed for monitoring and debugging agent applications effectively. With B, you can visualize your application’s state, manage complex action flows, and fork your state to tackle bugs without having to restart from scratch. Additionally, we introduce Hamilton, a framework that complements B by simplifying the management of data pipelines. If you are interested in improving your debugging process and application development, we invite you to watch the full video!\n", + "\n", + "Learn to visualize and debug agent applications effectively with B. Understand how to manage states and actions using a graphical approach. Explore the integration of Hamilton for complex data pipeline processes.\n", + "\n", + "link: https://www.youtube.com/watch?v=hqutVJyd3TI\n", + "\n", + "CONCEPTS\n", + "0:1 - B Framework: A framework for building and debugging AI agent applications, allowing for state tracking and action visualization.\n", + "2:25 - State Object: An object that retains the state of an application, enabling actions to read and write within that context.\n", + "2:0 - Graph Visualization: A depiction of actions and states in a flowchart-like manner to streamline application behavior and debugging.\n" + ] + } + ], + "source": [ + "last_action, result, state = application.step(\n", + " inputs={\"user_prompt\": \"Adopt a professional tone that avoids incredible claims. Stay close to the facts, but demonstrate enthusiasm\"},\n", + ")\n", + "\n", + "print(\"LAST ACTION :\", last_action, \"\\n\")\n", + "print(state[\"post\"].display())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then, you can retrieve the store app ids and resume from there. This requires adding a `.initialize_from()` with the `persister`. Also, we're getting the `app_id` from storage via `persister.list_app_ids()`. By passing it to the `.with_identifiers(app_id=...)`, this \"reloaded application\" will logged and tracked in succession of the original app (as you can see in the Burr UI).\n", + "\n", + "Below, we're building a new `Application` from the persisted state. When we print the `State`, we're able to retrieve the previously generated (you can see the matching titles)!" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Enhancing AI Agent Applications with B and Hamilton\n" + ] + } + ], + "source": [ + "app_id = persister.list_app_ids(\"\")[0]\n", + "\n", + "reloaded_app = (\n", + " ApplicationBuilder()\n", + " .with_actions(\n", + " get_youtube_transcript,\n", + " generate_post.bind(llm_client=llm_client),\n", + " rewrite_post.bind(llm_client=llm_client),\n", + " )\n", + " .with_transitions(\n", + " (\"get_youtube_transcript\", \"generate_post\"),\n", + " (\"generate_post\", \"rewrite_post\"),\n", + " (\"rewrite_post\", \"rewrite_post\"),\n", + " )\n", + " .initialize_from(\n", + " persister,\n", + " resume_at_next_action=True,\n", + " default_state={},\n", + " default_entrypoint=\"get_youtube_transcript\",\n", + " )\n", + " .with_state_persister(persister)\n", + " .with_identifiers(app_id=app_id)\n", + " .with_tracker(project=\"youtube-post\")\n", + " .build()\n", + ")\n", + "\n", + "print(reloaded_app.state[\"post\"].topic)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Persistence is great for iterative development such as tuning your [Instructor model with validators and constraints](https://python.useinstructor.com/concepts/reask_validation/), but it's also a powerful tool for building [test cases and guard rails](https://burr.dagworks.io/examples/guardrails/creating_tests/). The CLI command `burr-test-case` can generate a `pytest.fixture` to resume your app from a given state." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3. Portability\n", + "\n", + "In the GitHub repository, you can find the same Burr `Application` defined in `application.py`, which can be executed via `python application.py`. Also, we provide a boilerplate FastAPI application in `server.py` which imports the `Application` defined in `application.py`" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/typed-state/server.py b/examples/typed-state/server.py new file mode 100644 index 00000000..b5592b55 --- /dev/null +++ b/examples/typed-state/server.py @@ -0,0 +1,109 @@ +import contextlib +import json +import logging + +import fastapi +import uvicorn +from application import ( + ApplicationState, + SocialMediaPost, + build_application, + build_streaming_application, + build_streaming_application_async, +) +from fastapi.responses import StreamingResponse + +from burr.core import Application +from burr.core.action import AsyncStreamingResultContainer, StreamingResultContainer + +logger = logging.getLogger(__name__) + +# define a global `burr_app` variable +burr_app: Application[ApplicationState] = None +# This does streaming, in sync mode +burr_app_streaming: Application[ApplicationState] = None +# And this does streaming, in async mode +burr_app_streaming_async: Application[ApplicationState] = None + +DEFAULT_YOUTUBE_URL = "https://www.youtube.com/watch?v=hqutVJyd3TI" + + +@contextlib.asynccontextmanager +async def lifespan(app: fastapi.FastAPI): + """Instantiate the Burr applications on FastAPI startup.""" + global burr_app, burr_app_streaming, burr_app_streaming_async + burr_app = build_application() + burr_app_streaming = build_streaming_application() + burr_app_streaming_async = build_streaming_application_async() + yield + + +app = fastapi.FastAPI(lifespan=lifespan) + + +@app.get("/social_media_post", response_model=SocialMediaPost) +def social_media_post(youtube_url: str = DEFAULT_YOUTUBE_URL) -> SocialMediaPost: + """Basic, synchronous single-step API. + This just returns the social media post, no streaming response. + + + :param youtube_url: youtube URL for the transcript, defaults to DEFAULT_YOUTUBE_URL + :return: the social media post + """ + # Note that state is of type State[ApplicationState] + # This means that it has a data field of type ApplicationState + # which means that our IDE will happily auto-complete for us + # and that we can get the pydantic model + _, _, state = burr_app.run(halt_after=["generate_post"], inputs={"youtube_url": youtube_url}) + return state.data.post + + +@app.get("/social_media_post_streaming", response_class=StreamingResponse) +def social_media_post_streaming(youtube_url: str = DEFAULT_YOUTUBE_URL) -> StreamingResponse: + """Creates a completion for the chat message""" + + def gen(): + _, streaming_container = burr_app_streaming.stream_result( + halt_after=["generate_post"], + inputs={"youtube_url": youtube_url}, + ) # type: ignore + # We annotate this so we can get the right types cascaded through + streaming_container: StreamingResultContainer[ApplicationState, SocialMediaPost] + # Every post is of type SocialMediaPost, and the IDE (if you're using PyLance or an equivalent) should know + for post in streaming_container: + obj = post.model_dump() + yield json.dumps(obj) + # if we called streaming_container.get(), we would get two objects -- + # This state will have a field data of type ApplicationState, which we can use if we want + + # post, state = streaming_container.get() + # state.data.transcript # valid + auto-completion in the IDE + + # return a final streaming result -- it'll just have strings + # for certain SSE frameworks you may want to delimit with data: + return StreamingResponse(gen()) + + +@app.get("/social_media_post_streaming_async", response_class=StreamingResponse) +async def social_media_post_streaming_async( + youtube_url: str = DEFAULT_YOUTUBE_URL, +) -> StreamingResponse: + """Creates a completion for the chat message""" + + async def gen(): + _, streaming_container = await burr_app_streaming_async.astream_result( + halt_after=["generate_post"], + inputs={"youtube_url": youtube_url}, + ) # type: ignore + # We annotate this so we can get the right types cascaded through + streaming_container: AsyncStreamingResultContainer[ApplicationState, SocialMediaPost] + # Every post is of type SocialMediaPost, and the IDE (if you're using PyLance or an equivalent) should know + async for post in streaming_container: + obj = post.model_dump() + yield json.dumps(obj) + + return StreamingResponse(gen()) + + +if __name__ == "__main__": + uvicorn.run("server:app", host="127.0.0.1", port=7443, reload=True) diff --git a/examples/typed-state/statemachine.png b/examples/typed-state/statemachine.png new file mode 100644 index 0000000000000000000000000000000000000000..ec7a8b2490c8e56b1e831eae1cbd200a2cadffb4 GIT binary patch literal 59709 zcmb@u1yGe;7%qwpB4GeZiUA_sDH~Brq`L*AyE_yl1w@cWq#LB0Eg~V^-6`E&_ucTH zGxwe|XXehGyZ`wC;oIN()>>~q&$G6#q__~)O~RXKXlPg>!mp*#(5}d#p* zo}ly&^-W7u=r!6I>c1aVsUc`+579(kzm&C)SsryzZ5ckhvCb^oo#PVpRak&Zz$b~{ z_HUSRNxrT|E^YHS7Yv2s_v0n1%AP9WG`{yRe3M-0zp=1x9ciz|YG^bUMl3M*E*+HO z1VwIf##yPytzI#kFX${)rMma zN)x|S#CLK7uIb3X_{;x37DX>=Y&1Sk9v+_0pFbNG|94^ATubDZ80PsS;1a^@6A~U} z$neqXtI~xp-a_4y|Jt={E+>aJEfI{eHw^xpgicm=_SV+cIMR*2ONo@n-h6LWjh>z! z3kxe){E6XjF5HnbOn7tX#_q0qxpjJ)UYXt6*kW%cGc&Wmi{LbjB|h`>WtG(3ocSv& zgiC?#{VJ9HLtPOC?rnHqRA!mgym&0vx4psU=H}sH?#GX7TU*5>By!a%Y>_9sDoRQZ z%$5f7v|U_W$aw4-Y?l>TSy^Rdx@lF4%{hq2`b0t~$|@@Qi5<4)TC1G)axyZa*i42# zT)BZx4sRbGDYJ4=(QAu27&b$mFnpcxookC-nw$&{3uANIHHKTMYinsu{dnoo)6?@_ zM<+Wwn_i>3g{-x?*$AF@ZLDf*d)s)l+y?1-0#7M`K3Ha@*Po--+uIu(8yoWV>mt(; z4o=dCQkV+by}bNcGIj?Ch1UWYLx7Wpr{nI=ag2=g*(73>C>pO5SJG z`s0U3ot2e^PEJZnTA58jK|#W8o5jhLTX}T2vs7#{?701}hC2KB_}E_MTWF{vO&`u< zjPH$YZEcN>jV&#oakUH#{?1yijaE=nQ)lMpA}wc|5U2ZzPJ64KUv`%Iw{YIb$jGRu z4FCD_e(H}e4Zgo%A}2RDb7YHVtycgs=hr%|B7^Rf^78V&K{xw{Hn(wcac|$2D!8_T zaskP?=))k~eB(hTZ137x%goG7T3TAXd-v>BB5^Q&1q<9zkU1G-QDeQLQPG5 zw6oNpl{Z%DP>V=|XZI*!<=}|GeMC#!I@~mC>7e)PK;v5b>B)zlPi18XYsfS8f@||` zVB389{I6cotCn_5j3y^1!)AykcA}G8b5$wis@K)k85kH;X8Za1ZOymOyQFk4P|Igc zSeBKTjvD@Yos*NJ-4J*(*r!Z6^i%UyLm&wTdHeSl;C@Q5!G%Rd4$)7ZJV|*QDGg>v zO4??m$#7pZjAqh>jh&s+b9<>j7vex=ShdU19e(vHN4dR0H356-@kvczOU+Cl}JiS(KRt?J!+}24k~%p zsApgxg6?p%OS_g?T+HTQH{~)%LV{F$;>mFT7Qe8Ljt+?)!Oin9^N=nnIT>j-bGnzM zMN69>(fpmR(hG%;`YIMgr$h-=> zK=VB6@O7`Rj#flz$Vf}0lM4z8dg2lW)FTDL=Ub!M6C(Z*nG6>{^&F|N%hjE(^~S*< zzkdCC!uP70|MA}4mEowMHyF2WJ@JIl^a(d1H#hF<*WbDY4$*h++^J4a@MP^|r=g)y zq`^OvMtt}2LBH<7`0f{)RQuSw^Qid4AT1|{6da*9Q{nENJ8Z@SKlR$bGiujwuZ~9J zc7ZE5H8oMmq`ZFpnwZn_{U4v(H*elNCfV8EMkn{iCUM-H!N?U56B82@Ou2N|uab0?VeIoQ}a?tsG+y_n_WhU z*?9HQ-rBs0a-n`FOtn3R(`v5e0SQU0?_J6}{Pq%}ilMYG8%C>$Le92^~ODeJaGOwP|!`k{tf{9Ri+9o^l)a?1J*6(5j|-QCQr zta7C|)RR(jaz(|(*ccd8G&G-meLFil85tQ5QY1`GO&uM}%gP*DkMs_hn3!f}Q7<=_ zgtgh386LM&RcYxi@PE5u%i12DT`p1|G{v*?gk)iBOZWWworQaE-nkd(92!>;EZ$b>FBL#Qi*ASXhj#fuj8t|fkApk23xA{ zvlx>8|FYP{2>)xQi7H4G3IfCFGk$rIkz!T`3n*!5vULB;F;N{I9hd}klChT!KV0Os zIS7YBfyD7#*Lp6W56{QCSWR?e^ySIs+3^#($}i@RhKz@axxq z_EtQuW91hVymIT@iDTAlV|O_>O!tm|OG_#$Dre?k)?F*ZB}g-NR#v>)Q}7MD)sY7* z`d=XEa?Bj;oU%gQV459^8bdks-@kw03~u}DFEupc7HNKaAE6TU|E++yNZer;;qm|c z)ED{u|CIOt|9boHBrep`q3u*VA28W0%0iGnQkInDLyvsnRGgJXlPe%7I0q$$wWF0H z_p9^G;e(jL%*3=hj%=T`JO}{$uf$;?Z0+qWa=7Pq^*pSi*;9ze2q$*B7Wa;hRx#H4 z+{@MtMy2)+Ej)aDLw)_9gUf5|f18`<=jOo1*j%4W{W?Tt3p8@iXH-<2oScmJKbs9b zee$IK^Mh_y0?#x#=2pmlSQ}MT3IDN3G$gZriVBELN+xwLpp(^Xvmh4k)f-j z7v9){$M9-Fm>r zbc2Gt{2l%Q<6WpV(gPnmbDWno>j)0wz~W+CTifFF^o8bddeoo(yB(P|mkZ=p&o6&N zC}m;=p8PyTne2LYszjW!ib`ryQlUY&7s{A+P{!nLX0tM6bGW_Gn!o?}{0{CEezlW4 z4x3LPOzs|R1(6}}@$qlEICH5*Q0rK8sYyxY#eVpZoILA(=}Niv0)KC8-9_Z|O;1Z3 zK!q*D*>S|zg!t_4u@?SUB>|NmAN7_Q*34SI@%Lwzks15*qPhJq+v0%5*hm@Fw5%HJ zT^twjTgGx{vG)@u!Q9LoS6QGPA*(p@l~BgrUZU`3m+R95rLT4%2p01jNbu zTkhu$9Oc#3C2hCo+n;-55Qm6Wr*o}okx-WMP);dM`bu$@n?1L6Qz+F9Y~nAWp~f)| zQmFXBclS$F6qohCj>69^$raev`K#hK#F~DArBzmcRjrr{G8J;u($iUge?FfZ-^f!+ z%Hg5iFnLMU6|%IPY*8EHNl}+Q+uE#j>9_SBY0n_!OG$X2Kh7UzDJiKS(ifSLcd8rc zg`1nCNQ7klFgL@?1A3$s{C)OSl$Di}tR@fKVErFfgJlQ?k%nl11~$G+jpE1;Irs*SLX03Wc+3(V0GT>2RVv*WvYMTe4E?jj1|+0s*euZD-K# zxJNvH_Dp}g+QVb1^e?7cDwpW=hDzD94T~Nq5+Gd*uw3gqgJJpJ9_;UTM6@t=&r6ad zNr@@{to)f~Ki?Lc5PcB>eG(HA65JDoge3M%$HGrsBt+y)2Hv;39PP@<%lqm7m-7SZ zpemiUJi@X|=0d-A{j)6;%M~_DVLtUj=l(tQPuKisnHc>8PKOK+Nj^E5Oc%-mD9QZv zoiB=pKvIP3NaydU#e1QQ#f7eS?mkYaIo}`m0kEbKay!c}bafZh+=6O;mv{vmL(WNd{UNvEb z73_#HR$?eH5ejJ=byP*%#@&1NMoOS)+`M0klv&@Ji+pB+ER(w0Ig&xIv+mW=>ZfCy zIZYn&85v9IUtv3D8h_+M!xxI!Eo0|KGtVB5={qUVYZeSn>$M{7|F!GwJuciC;tC+t zL$u5)PsAh7%A>-+Y3dI-9A;n*Ihq*7x~xSdwSotEN=ssfJNu0NgyDd|RPJVZu zR*BO_b$`~%qh5lAh4ry#l8bP0VUbF8vqt}~FU_;0BmW-PXqj+3i7Bah;4(E?g$2!f zTAjL9-N5SZWPj*+Z*EpR25G(gxyt!trhXh_@-Vjf`@}Y)C|G&HY)%kiEeli5TR6Mp6inLrWu7 zH^^tYD@ttQ)SS;Vfx9lhN-k}H;Ix}b_m~gv*T}=*J-L@k$u4?UWAmRwaXi1YjT#%S zn(7Zl;(iU@{Jtj#6Qv@rIiew_`fD_4m(u-U2Dum7qhp(Qk*uOHbXWtTG?J5&G015| zM8*^_@cohLNonr52|3;6x^A|!3jFA?HT!0Ek!JO&-BoL%n9QirgHpQbL0C(MwwdtqY#SOLV9X)nt?JMIhCo`R6 zZLh?#%iYFhl+?6+j(TPk==X9}HW(MghP-FGo$OwJ8Z9JSOc?DDDnyS^Q&aKAz2i7( z-Q{1Z!O5ptv!wyv^&^sDO*1ngXMx*{a4pX{wqhq@cKWEGO$Pt8Ej_2&_0hxqWej!y zty14^g&cyqW&El`YnGB*yiQXxmR%`#35a+;{pG4LzoNxRdSNe5!KgJjv zC@C2zm6hMxXW+4(CT?$PdgFVx`V#+97Alpv4+MvVJoa=xkskZvqC%y(y)4tv=Jz}U zziOF~LG+4l?!kV9^0#o8u(06U0vkdXxPuQlU_G3i2Qk~$5E&`hnO{L;{6E9ntHe&O z=*TKjpWGw5Cd0+-&`<$_^k6-=v&H1HvNGHoSr`{BCov}{7ctMBU$d4*)v~YL5ohI( z7li}ZxGVCRRwqwrquDVmTB0rrfT_m1x~JTaPXxAK!GIH;D;>Fb8iJqilP7FgoS`Zo zpLFOr<(QO~u;UWAgcGMMIWQY;F*350+U{idq+HZgKIzbZ@m7#MII-jCeM#7x-8_$| zbS=_!sf~NBNPV#y`-JSS$0y!Fd+Ny6uc1j?qT0`>sUkTW+eS?)0T5!Q%l{c z61uu`YQ`)0Cc`QXL@%Rq78f&im{ZS1HK1(5zFCfML%qKo zSCQ8C$kN-TpW+Wswc}2Pus`DCJQA*QkAPdYS64adwM3BDXa#(q6SE->p0MN)DltJ> z2INUVOz!Sj|CiFCL(0z2x2*Yi&4BFdeAal=y;haZG-)aIVwb-o18I#RloI8or7XWc zm!6_(zbG$g9H0?2EHq^{nRN`wd8F~gh*)AxfOv35W}|FosK^L9cH&D`~hNOvai+!pqCH`4%yDlqIWlq(8ibXV6(UZI z^VFyB$U!V_Z@&*k)W=u+`Ap2|fieh_?d|KF+)&6>OT_VpUcEr-#8jZCGc+{R)SSdA z(S85EC6ZaM&JPdZEj*jA(Br11rNw2>cj~i1*$N;R)DBXVw{M+iLKYQcwhFivbbwIi zd<_dT^?Xoay`ZJ1SK@MHi@{he@?g&mR*bKp2%7M4dJXkb^LPtC1iueJPhe!r%gao4 z`&ld(PyGF(hsOyZN+%})pFb0tNJF2*FKlgH7#XP~D=VAWszQHJ^R`MVD@T3$RQFS} z_VVS+gjQqQ+tzAoYW(OPH2|alwACC&Yt({fbJ5gAMMEcidUBMSoLu+kPh)d4`{pti z7uSOa4=USP2u%P>1S}OGiU8tQXfbEkztI$nYF(^J@Qkd z3wJu$)G;zznwgoYm)K;!h^tDzK0X7L4h3M+Wo4XCFF67LxpizPCnqN@{T22F@B^y9 zTQB~*_hU-RRDfcNi;Lm*Kc3e@c?q}+(jcjOL6y#Kp(`1l-rPpc>$O zfB{8*{CHW8u~an&j8#gib1(Y-EiVs`S^y|RLqiD>_sPyHKR)I>o25SF+3C^tLias< zHxzKvS7EnCMz;w#z<9Y$CRFBlcz6M1pD%iBmK)IB8}(%k=Ii(e1Yq?l3CHoe+`m=7 zw*nnlO%3_8XTopZWNgTsxZS##L`n`+WWRs^2E<{mHJXJmV3A{{Gg0V;<96EM@Apgq zdO`8OIy)=iz<*xcbeXx4q+!_ zVPVhEkn*cn(`~Wbh|2Mg{CRnKuzh`feaHCh>?I9BWRy>yNIGrZxCmHc{YfGr0P)2k z+;XzB#caZcl2cQO9z4ia%+mtL>7l4Ok5&3#14&;vF7~7Y48i6Lc9f8qSm1oH3DE|w zmx;c*gmR6J?Mj!UzNRKaz=fb5PT>-L0mgoxhzQ^!vKNk_zwcid0;;F_nU@zF8d_<$ zM(SUez$JbX!|USMl_UZV2skdL+=B~fh_HXA8@DR*7<4Al=#Q3IekFu?Kfq5HTc*dHKFFm55$h2a3wTMUl{8xfv=>!PpB zD*}cBASm;)kv#1NQZDOf?CkwrU6#;l0=k>bHAMe_;T<1D6KG|r90-X2Yg$eYclDo6=-X_y1HbAj-GC8Y(RGjeFxDCN9*bKz>5Y-*1{q$BLjR1Z;cwf z+nmE6pLVF+hWcrZcVEjlK~d3B_$K92A_e)*&u_2;-C+3F|FO-IXn!I9F^p1qOA5UsFXsShGU z!Y`v(O{W3qg@wWuf_@(2Ap}6g(!|V+^>lpz#ojS83KmDt*w`8757S|vtGrkyi+i_k zZ`4i&{e7RWm<5=nx0e^&0xFv5>vNcnlwuLFnRCR{#}HgRNaBwlKh~PLuX|{knv#-{ zApzRvs^Sb~1h*7<^QPQ>LrYuSZwr;&9@;_x2n&<9wPizo`upCpY(y?oK3l!2e8VgP zkIK$&Z@xVaX}^)sX=*5V-g;@xgB{4r%OB_7x?bDSvA#T504sejC33w@4%2+Rz36>q(aZbk1&Ns8^p^>l_046X@$J zqNVl7qo$*~n;Q}yK-g*GPn0z^HBkvKAOJ-!JbwFMD=Z2dK3r^Kz%o?nu;uLH5`CCC zw%9l_GSbz>#K7?L_S}C7p8n;_mxqUkZf7TZIcgQk!W%;9kdNBne>kqGBPqMYA82ocV|ffQkR58 zJ1qP0q5jA7l2u=p7@LxZ=NJH$_;^hLflr>eP~K6=;(epT_X$N< zHo_GP-wPZ#Ko`T=SwA}GR;ICUERfng=^QZi$^fLEagO==d2%$ zkYHM3x%Z9_w)$*OQfSoE`^S71Uq5^cQR%ju8fTdiW=V!&H^n$mB&^273JU#z8UjX) z$v(z@9t3C%I{_ZHRHpC~w;p(|#XqZCV8O4ET$k z(b1lsLI^&mr>6;x^&aP?mrnwO6+}gfwg}drV3kwT_U$LX= z>guWDu_*RIfQ|HGysyE*lT%Y&e}{Be>sMM1<8FGUh>kbfj|O*3Xnk*sU`(t&5|3p1 z)I9s0$3gmDjlKCP0TdNaGBPtO>`JI;sK+f2{R*>)doA2Ace;6>O|Cfnn|O(X6Xdw^ zZ(-qJqQ=A1^D1Egh2Oh#*TnU8cXuz{FHPp&OWZfgD<9;>irydIoJA7g;q@8`nVGEt z4GW>n_u2jP*fmK_L!(+|*(

ZjE&-z3sBB<(7J5ayKwLD!EmFe0623uy>ALK5xAM!Ci;5^tE!#o>7BCF|lVP zLQhREVT1qUd#z7r{^sU~4U@iBP;+cR;OJWCeXSx!ao&X z9N|e!SZ2TU2Ak=22MgNc5dB8E;7%#Y z$=7b&zL_&(o{h=8c#>FMdX(wn2kEhHo)HFE7@a*g1G-+Eco z#V={S7ElYp?Q)Oi)NPA-Ku8!B8v0~}=R8+=W@S~XN-G1D^H4D2&Yia@uYvM%u_I=B z?*pV`$J*udH2To_k)z2K7Bv>-SL)^U@0nik@C1@@_qp$(W1fR~e9k9U%>PU~bmDP{ zY^XUXHgh_>lbaH`HTU>WGep}{>?gn}p>^ud)gYLPI3M^=fA2330_;`1!=I^p#z@O; zchp-P+>qMbY`w{PGCbyLYNMB~TqovrR`k(k`ow_`@sw|xXo|Ne|9QJ4aLgB4fP{~W z_a32$jj5@xF)<^s2JyFi&OuTO056HmeedhkT5|A{6=)I*ezqlL4~ENG-Z$a`+Xd_$ zh7sRIkXox2JCb#0blBf}x9)}C*LNaO!`o4LhF3)2 z_?4t*CC%*Tw9?1%Jg1^zDPHFK)!W*Xqe0?5>wmN2f%T&wqBtt-YoCe)PeNnv{nR-b zCa}Pp)p1-2P)&8%TSWMj7&L#3G~tf2UhSJc^mQ*4l7imz#H%83GvZ}pw~<(d-Oe41 zu$hPjtINg4_B+cM9)JJ6AJwj3#FFk}s>G&GAREJ}x=pkrG24GLBxjUMW%$4y<+W znZU&zXBvZ9jrxEPIX-kh_q-ni{{OuF>#@b0&T^1T92Mk`yAe;!A~;zY*}O`MN*|u> zi6oBysT;zv=`>ArTk0BAqdv{<+RSH~4PK74y2;BaS!Ain=NC z2bVu+QwYBiAsL>muxrgPPjZQiSbs9r_Oa8$s(|V67HLjKy30}1L9FQfMsYpD^{3@x zshjC_rJ>!GH7;Iy=IQn#B-wabWXaCXRx15>o{Y7iljti1xDx%p*5L_`;@S6)=H4fb zR@sV(zl^`_haiP$6ca)yct_W1I)1b5q+&U@3B=m00?LFYKJ8*n06+Vr>N4iAmQ z9}NXNx;QffqY}%t+ZgznVQH$XH37VLX!?o^`a^qJ(a95e^bYLBIp){;ojMpuT(0m7 z>n;kl&+vZK8lbPvA=ymF&@Fqrm}C`bJ) ztP$t;M-1}aTEaEFb|@=ttgU&R_t)1aYu#K}Ecb7R)KKi8P}`ep_TzfaDG$!)nM z?g*vz>aKs=o!ylOCs<;wZ$$mJ@|h3>1Oj8Feboh=x$5Me3{Rf`=A5ZjKsItS-A0kW zanvZ>mL^)Sci6SJ+FjWn!7sd-NdQ?mVsLv34_YFMkOc!84i29@J)MvDH~PJQ zpYu}Rm=Gzr{M!Lsea6I0n$OP){JeBqr{2m}do<6=hlZOeF@zw$DcsR< zc@rG`4UcE}=#QtG1imFFG2JB7lC6l6VABnL;dxUwRw67h?z{X<(qmiIhD$}+2g^%r zD*@=_k?CaMW|+dwM;U2ZOl=% zO;$BR;?xPTjI&|pp61&;8+H%x4*wHHn(HBd&m`2P($_kIO>PAkk`xyvm9Blc4VKI6 zTYA@GT3IatF2&{Vu~F_fey`yA)N_5c_Mzvq%o7PQil?+<=$ayLZi+WAwII`HqaQ!@ zRK;G8GuE>)JzE7lpYDYTf$@-zmVsJ$%lOgchknBLk$QO%IojvT6y!6m17dXH!*>4~ zy{_pqP=s(AbR|LdhXzE0a zO~~%I-6%eD<6}uJKNMApOe)+iNE5E9C5(!q9hp*4R^5iEp{YUEtnQ*rMTHrd5#Q~F z`xW=@Xm;*nBqQT46}hLIwz00^n^rZ;u5NU4E|XylaccXqsgK8yl&GGi6Ia_vD9Z=- z+n@d-t#LYOIbV3|>xu0Xll>A`XLNK&aH`^a1#`8-bFeVisZ+$FQ7st&P>|@)nm8`P zsKppFvOdQU z2MC5J$Zjf@XF%ZXsAnw_G|}twr6S)ru0gO8X}m@xG2cI?p6o(Yw(swMK(Zb;Iq>_u z6uMhuaIHN$B)DzX+;P`#j=;?!^1+%0We?%Lx2yexjV~}2P^hVMTg-@!Fdu0q60#l9 zqbmH85(JQJ^no6zLdN}#-*dxSN{5t6k>hSXhpy&*Sa=gPmgt&yJ%@bSDV~33J~cOs zBGwd{C=Yu5TY5q>Kip_PI#SwR);2a!+tVZw3}h@?Bn8vSrmvTi@4r9A%X-k;e6l3( z{MIpKm;7LRj6_P4eZ{+6f2nA82Z*1uh?NZn9u~;GkPI{s4uvIKk}{dAAG>dUS18H; z5c+w!YGMhhM?L3b^P|O2b<^I>jZMJKq`C;>SLULz;#AUY+!AYr3eZ8q)8hW*(1e2< z$m@j6&CP9XcXMxjJnzo#MRS;8kdHK9y20NT_XsRFoqnu3$n9|k+KF0&TXq`y$ZkW` z7bH6Qt?LD6|29JKX{JX%NHu6eU$k5lJs0>7X|`f{Mr3zjWQ<_ZnOuumTwJ2O{IUJ} zL=i_&G>6Nqqw9?=(&FcfQPo>18)MjAWhy~Rywx@$)b52@BhKR_6$&)U2&?Dhuhn06 zF6|Y4Wn!ubYmIyHhx{3Navxa_j*HdB9?j0IW&aihp@0vPkrxxk!4VB&cLzB);IFyfzgAcZhm@dYVx&sJLP2QTkfOG}pK<8yikmq|xw_T%r%88R%MN_*cg7e7x=XIK8}bzALp z>&EVko?hMSvDj)umn&ljyJe+5&9l#``z^g~NOo37hkyN=nE4w59kI~>`bE{;QmWlx z6`m^{n?P=S1N-3H=(pY;BNv~~!Y5j;m-yS`a-PvIAsml?F=BCkh(27UCI8ddDEv%Q zpCt-b1)>4F$xzQvJ;~`q1VlA*%$qkYEi5=K)*=)9p7$#8(ZmdvFi$NWY!|Xa8JHQVq#(_NQCTur(s{7ngU`#^Og5ai(j@YLrN+tfOb`DR-Ci0 zxJ;a!WsHeJ-Z3vegc$RoJM6F=o-8TIq`bYo`V%VPD$t$^2nbx+6+i(H51j$DdE;=z zHI+-@fcFsnc6c+^NOhjw8t-TY`%I4BrTPFO&^F-X_kGdPJ?{s9>zSGYe27=N613UX zE~|Ix`H9O()A0Vh(;nNpN!?|ABctJ=A-3gv4ClQ|@W4;aE8eZVoD<4Eo*5a7(1ZV`kE?KfeK1*yS8 zgAkt-a~m5#tBjylznZAD4z_)KI1T8d;Z*ed% zvetVX;A4QG)t%093EXTDmzfS$9{lUWa+0U5S$Wtx7F{|}r|lg^Fo$s7{~;7an(*Be zs%u!}z17joe)WstOhEwv8pD7xCnK+d(G}|XGGgprU%vgDTb6dYily(5%|ozP63Gy3 zY;2~UB0D=f0EH66+T>QPpRz+x7L=BhoS2@rn)r2Tw0PzEXPdQ24oNu~|3%65Ope~( z-0ANEs>nMV{djVCy`WtRAbnBJcqL1{>hO>6U4TPa2(tq#&K*Nv76StVAZK2uT~#He z^uX_~OSU94_wGgBxg$IO(UF>-nzpg+jYEjyVcDOw#Eq5*-Kk>H6tuLXjf~LXLOWfu zzrPQFO1}lcxk2!60e}xGx$$b3it_Tq&DrM5j6DO^^g6X0aDx>T*`{vp3GHV)Cz(bso(>7G4Hn;z6V zpB!=qj4>r8JYBXBD1B(Kn)?yb~J4$1U`-Ggl zroG+lUq?I+8Sk;CH-Kdc`*-oLT=5X_#|KHrdtF^=cyniOFDoM>1`bZHw%hq=<4r9s z0L9Nh@PPuH%Vx3Abd(Fk}`*B-7iv-p3%^#z3g`^QWY^curs2 zxrv@OfK>pW2Qs6_)YPIjVPVZXK0v!LITh9LuVpu9@M?s^XjIly{Gk#0!3SPLp3qzlmj8~~~J z?29z|r%wgM#Z7;?Uy}9y7{|oQ8q4c)n6J|;7)YG{;|HHjOiDbs4QO?27JN}{zWy9Y z?|^b)WnrOs8v#+00jfr~(}UC>KPCXvloh)266B!2Ce8bDH6(1lP$a^;f&ZE^=P26! zv*QM!{_)Bv$N-l5!0;4B43)7-c{o7h4UFFX2M?rTlQHZMx6{9U>jf!}(TN4h>D}iZ zva;@gXcdUbrJnS=5V7^a4rysCdn*WLHvyLe97HJ|bD!NbX5#z#u87G2tR^Y> zrKH3TA|0qtE32x=5Uy`+p5anl_s9ct1Y{ZPS`tPD)e{RFTVOyyb`d2iZ~j>D1sPGE zmVmW2i+3$J(Zq}7_P##+wMqT3>)6B*At86~-u2L{=`jSUxss9+AiEzcCg;)7(8MGF zbAJ2wt#}+S$jnLZ-Ysz0(gkz4=hh04Q3WWhEkL50;lXkVpk@#-XBH8m2IPAS;Q0%; zQ|F0aA7pP_T7(^fo(KpKd7$MNX|!mWnDW8N0WJcD?sECL)b5Wkj4UkN2p-$g(gG4_ zp!%qvKTpHH)-w#qwTOssTpU^N%ReM*r>U?H7^~-y?4REdKEQ+kvcG{vXuZ@&d*g;T z5xePoJ-z8HI_NZddto;!!OK98m{H)v3WOLQ-Zuy_H&?w-mE1%86!F-mNptWyNQ;Gq zg=v9TwB`X-1x4vAuUmsiH$<<}A7z*uP%77Qza^kHu|6a&PD4&EIX?cS4X*yz%U7;c zRaQ<-O##D~-u9>(?g)50fMw5r;Gn2FcYVNfU$`7v17Qi0*r=!|*r+6~+bnXxIIngk zH$$LH$9~iY>a=yBuWDQ(0`5YhR*nMo7hvL^-rnQjPVh`5T-N5+)-j;2tg3p^+xa6s zp?d*J3$Pard;3z@#$Jm*p~1mYkf8wPb~nEIx?S+>FMdw z(Zf-!M$pFrnICu$4-a5B6ukZe$5rn#8lPGf7#6r*yZ9qs(jRdBK0bn2uWHM5^0pU$n?>09C@Oo5mSeF5fJxKWA zQRQT0An$K(ZFP5bErK|?w+zj&1SrB>4dm)zf%oD=Eg@*K;bHi0ptwr?GW&LnkdTmq zf`VzauKsix%8XZ+dTK#$0CDc*^u!6g3v4+U_~n2H%qjW#U;iF&MDwB;fkIW_I-Wmo zs;?L1=l{4!gN7xgtULtBCJqjchK2^}U2t#%YC@2?o-C^kqgJrP*rIHtw6qjbJvTQu z!M%IV;~&IRA-WUQk+G;*T3W)%1E|6!pmqaS@X5${PX2_Reoc=AHK0!swL(q|mraIx z1xS1UBRF8X@N6o@#!`}!Aj!Qg#gAs_;N(P2MFs4NFHH9;pw@gcpt@zz{Ou`~C>RSS zz3SBZ{RLi4;G_?WG*kSrcjBp- zX=f;fjXTFpOw4E=2g{c)FGaV8g@wUUmz-&(A)qME?yc+EP|5=-1F1EM3v=gc+!^#X9e2P|OT{97P6i=# zvre_U|B9}>U2meF)MkiyMe+6p?w#}@<@UbfJunmGT1Z5MzKIF+Mw{R%mjFKW{QDku z(w~NgkC{=Y>7I$n3KXf5B%Y2?DowaS$HK+UJ$?&Y$?o6GkByL@bClA6Cc0q9F2Atf zZw!8nboguq^$)BnP^%C)kq1b=)NUo3u%x6XXOZ;AY*56OZdhJn5d+5EpMaj%dEZ=6 z@OrP(Gp{2A4DdFRX|`*YTBS|37jU^n8d~9r;sCc;F30&@6F99xX4}|~deJ|7)|ear z7#ONS6a*h<_M>E>f2B zbBE&NS*=zcD(DSDBvw$V0Ga~I=Y^4ZS_jYq2@9k5zC?>Ekk3|b1>OyO;_~VTlQERi z>j@<#CC{<^8RK2(xL*s6|H^V|2OF0mx%wj#o{luSb$4$sl?$Eq$gwV)_%}he1A3d2W$C*xBii3k zRNVS3mXxFegL&d=0br`Cx(!(anr0R&I0yr$-(EQHGr)8nLT*Gq7^HfKdLRGh(VnuS z32=UAU45Xsg@-eI2N-P_id3VeQ231V>dZ)eovai|p+D@uU&dr@Gf_qZJs zEx-VL74#%_Yizr53_VlXXOnQE1HIvYCkOF%mj4^6h~m#iJ*f-2^8GPgq`tal%yCPCfAP4da9p)!M*F!WTx^`T>SCXD`@d5 zQ1~h;4uFTGV|UquN$}NLl&MOovOG@ETuLrX@&WnBknhlav<~!`*x1(ARuEJO*nFvf zynl#L#lc2H>`v}vK33_Ytn$g!QUD08!ht@2QM-LS>Tq=h)Z2jzdPUFQp?ZU!h?wm{8|Kuj zBnatn&4blz8sp{W#Yv%sne$o7iK`kVQ^y}(#AX@0s+126<&2Lmg)2B6A4OFFZ7DBK zve(}FP;|H&P}zAM-Q%*|y=iXtyZk_s;mOHjkQ+t~EpnBA3|aa|^ejF7E`Q+;R^j&0 zBH{gNM?$r&jGCCwg#cLprF!;b{|c@O^u3bu@~5C*gIyPf+)zg*hqg{j=WW&4FZ8i) z$a(XtJdq23JW0QFX zw{GO#x?yoAqR>LjvB`o@YVm2dN_R_Qu%?b;vP?e{#T_7*!9Qx>FH zmFn@KQm0U>zz|>E9KI#aIuCqiVwEn@{b+e-s_?JD0XHiq7kA7Y4j z&~UHe`?Ffjy#+BHKrDY7Sg)H>w7s8Ixy}E)sTr_Fs1gJm4#KW6AE-F&$oHk*d!VFLQ(G8AezT)ts!tG7`@vsOItUbsOis> zDCq8H;&pvSf0C7zvvI<)K=$?8le20#9UflsGCc9OY56hRW1yhLoR{B~{C!Jt`uZBK z5hP0hUPr5(O5nu5nL}0`iehxmEMjVI zBv-DWeS%mJ{^d(Y*Y|5DFOH4yew0zc@j5!XxfA6O`B)*zegoLsyJW+9>*lW0K;d$z z22M9JUbgM&F?ogF*=JN?KYWIG7$u_yYJ|7a%7;p_NfVkt3FDh?P}9;m-m#efnH!{& z%L{eVHKA;+0;KCfLVV&9uU+2JNZlugviXy}5e}05{=bQ}luo3>c_J@GX4kWwj)y$|zbGda0NOjw-tAKC8QQ#de*1`A!PSH2dSEcSDp91fshNL&Y&!}vElA7Hw z|59mo@T3tc!PCzK*el$#B_>M?)J53Tk12%`J7@5|@jz)>rPUc9V1#crPRU_7lK6~_ zeosJUZ+*2&Qd*8^mUFs(;aAt!shH2!bl%+sNM_`Ib90OR8P^B9VY8~QTc)@NUSKjm zriF51IW{R)2lodtug+gZ+h7C;4-{iiN!kAEfOaVq?_;jYPF_D80oz&r zONBIH%C`i(07UyX!^N3Smi#{D#n|LewtpK?hUcwtN8jcDkFdk>0m4pskBlBo(rABu z($Uco@Z9k#Cny-70j9y%2=J}6qGAN-1py^deAOBT4M>z+EMN@MSYY7L0V^Ed2H~u4 z`RM%(e-KLWqwiV%`tW5d6`dT8W%!q@O^*{%^SSkJXiz`DTykPTKSPhwqb~ztlXR`w zK3uuqLXj|l&1og2?D656O2x=oz1r#JvAranS$wnM3af+Tams+XqMuKmQqn5wj`n}R zWg53@SjF@2Yma$@9;_g_edv;%^tmNRXG;&q0{x9}x`L&@x(ip7%JsMx8&sSI-M;>n zYRlebF(Xwdn#;?S=rWqvRganoP~ZT;!frb932p8T;k(O3r zv*e_#{;{;V2V?C5pFp4SJkzMqwEw%pUl+RPG_+K62#*x?znGT!$vQd*JOBQlq+q0w zEgOd;%P#<#tGO@#%583M@9*s$Ezk>s?zdOz^;0gca_G*Hs2~8^ukSKIkz*fOm z;FSS`0JY#jue!K6HZm3L5q1M$3xKQO&@WI;KzG2)fwr|>vZHTwoKM>)CdQzr&iG?r>ee}L<60e&`Gakd@j z+zO&cefetF6M!lszkhG57zjl5eE?@AC3*G-{Cy86t^a|#+RzZfM?3&e@Dn@~KxDwR zzz-1mdPsdbicGVKLe@b8h^5JGj^2CQ_2KsMg5PHTX<`Eoarc+IrIiHUaGRUl1ZX{S z0G>nhadL731RV<&m=(m?HzFbj+Y2(VNoEg%N`8h%M>j9QX($$Wv9bbn0l2uaiAm8} zA{;k}kMHafuZoP02IaG_j9RM`rVC8{_io;mk)9C^~PWA z?koB?5-Qy}@agi>AD8|5=6Po|eZlc*Y24cJVba&ANM`2P`C2mOy|J=AxhU>6&b<+< zCisL6l=d0Eii*N{2fr6Nxw#F+w{PEuvtqST8!!#P$MPD$?SKGkDU%3O8yFaXHjaY+ zMRG&@GA@N^U;q;!dSWQu|vk6+J;81Km8ZPyL{4?V-CFe~6>`6_`zWwFau4YVGMN{NXIsa<8+u}4Ny(Y=Je(T4X4Ex3IVlB>Bqs+RVqM%AjXKu%lGy7hu2m|?2MjzFCvm|n3$Nr=W~G7 z1573Z2;=U3A|46&WDzwr7YGoEf&qlAhCP7!z+M6bs3|0Pm|0ZR+tRWuq;`@wta+?| ztV!|4&zI|8(F}00V`F1K3m&3xk^$H%_T~*Rex4+r-@#Ij1x-w7v*mJCYQF=80GuNP zb}z5%yLkdS&a$-`56|aIY?` zLOu+k5QC5LAmz4w$gG}EA$vxChQhglp#dd2yx<2YlmKl;b4TGrUR40*!ptqKtZLi* z8D48@#>ZB@5_37p*-{YF*RKt%o&0`P1A`~u3=ScIeLTjM%r&~IDFF5b^et#oyh%Jw zB~RFsii(Qhb4EHcs$dK_e2L}`HUs2Eaq(^hqc&=p9JlpFL^!t69{q$5OlpZ@rQSF> zQfiiORWAQZH0suhrh#xPU#{ThrrLmWZU9kxlH7RtMV$oUdJHmg;2xZvoqrZ|+{ifd z3ImsffD1V8+{_FN^G?Ci_jLk15v-e+S3R5>$fGX%8S-cyZ0kD^slPsWTz>Q@CzCzu zt<{VmvvkMby{_D#+-p2IW%0SGXM5v%-g8$I(SHlN9`6I51|MiNF*$jO8`I^-VNeK| zC>#pvj@pAf3`4-t5J zQuFrZ+iE9g%-OACz2C+$tqQvDXz`{{G&eRb!^dKECJ4MTQNMImQwBJrtlZonKfjM8 zp1hJL4N?%aAr;D;Qqt20@yDZMVj3A3bVdQHv)ysusWC1iEv*#J@!8wkTUx%cvbwv> zVf2w| zfHea1{C^Sk-tknw@%#9x6h(HFEz*z@71^sOGaAZHMInk1ak3MkWRyKhijbK-vKnS_ z>?9*IdpqCjyg#4M@8^#mJ<1uc*XzEY@68U@++Xx?htfW_~eBb7^*;kkCn#nNOZ*MN8gYo*9Jk z7RHPQB9Hm~_nF-Nb_H8$*)WYTh-eTkduQ7_^6vMksR!lc@z0jXG%~ zcI?=*urMEPM#@V=VzGOlg3>@O`F2CuJy=5s*d^wS_8vMk3hEFU2KNgu#5`kOPR@() zaNqxZq5Df2wamT7B^^!oo`MQZX?~%EDs7PJ<=|bq&I`rG^}pnfJe3mC2Kir@? zKnJB5Z$5KlCxL+c)D3nTTjJbDxitT~N$+2fo!pHyCkFVsdTYuqa2bD#fz*t#=FN!%B0G>3fr24;?a3T$1V|}lydxT;{a$=(obGsCQ)v0@U5RILkQi6h?J396} z?e*gu2jiJqS{l2MgQW#g2`H1CeJJ=b3n`k&PgA)}w{s_QQJl-!943(Az5~ zBl8fe2(go8zabf)Dr@)a5%>a(++coZ8O0uQVf=3fMae2qlP(56RsD1G^QwWnE56C# zO-|tApzs3g=!HsxRJ4IaF+DpAVouh5t>FE8E37l9m|($YI;_O#eFsq@B{8vO;E)VY znO(n<(6A!o^|x7hd6>i`uFM_8llc5$p4VSx2Mvvmo}RSxzxe$8MbvSatwW*u-+OGp z9`dou?~JbpS)rYxot@>Q5lg1yJo)X*m(e)lh6m+)eB5xI9}PoJ!DjRFQpT6SW}y^F ze)tmS5ud@J6^VQvm_`vX(!hY~%M|`U&>EG+J|?k8{UkyvhHMVuVslf|65>4k*qS#C}^7HfI%+(Lk5)7fd zxpycs3Qhm}htOaYVFz)}pX6~G&&+u24pdvE&eNM684JyVSB?q^oiQ=#>+2(zdu)Ni z0WCV$xC)J@XxRJr!Y?)6$nImLfvwZty?Yr2_;Kl=>-LF>F+1)kG@D<^$jfU3djPj1 zb#-BR`M%oP6a!9zC!exMTG}{P!l!}_*&F8%uu4cs6g*?3+M|nB3Hk~5jkWF4G;Wy= z>sSlYZf@&X`xv41_Vop^UZLg9M{|5~ax&-0BiwrsA9nxUbL?^CF$~7yy1h=1$sYNaI)hkn| zcMR)fN=m7EU)yF2i_-H;NXzcL8J-YT^KEn-wx=HiOl@4rjQ|g2*ZSOwde_+Xk zmHh1abIjh-NYi`rrB&D0`$t3oEeKQKD{t`Rs|yjo(=EAalSe^;Brfxj&uAI;mu8IA z&z6=MYz|9(YAQwdeSCbF`~CC3t&B30hXS16o&*P@IHlJgl%504j+QC^=J!{i+BW{4 z==1dSgwSKH&=7Sz0-vR;iwy$-N*$D@<9ByePQHqYLf)F4nYnFcb>%^j!1nF_5I$m9 zk)-Id@4x{_BvO--l0JN(m!@sWh9g)mR9*&>cYmvhi)WiixPx4@wjR&;+Pv_sBlc(H zt5@(qtkhDZK75`WBX$2a3I}(0ckqal=Hl}5@{l)~q_`eEco5dGGZ|glu`-`(SGL*6 zXHEn>d4g%T^Xs){kX`Y}J>1ReAR=_~O;SlrM6kAwzPEy4WkYq%)mKqy@ef@8m=@iw zVPT=Itqs#nQbGuslx+bOiFUsKz;-I7;CWTRYr!chIkNV77AJ1ryooG=&~aW@*E8mZ zj$MS+q>_>YQ|7moG{#$NZ&aHMHAFd$kDLd5Li!X!1ci^8c4x4M znO=P3sG>E2%%a#xlP51h_yJX)&V>ufX=!U?SL^0Y6ciq%q@-kIOkr8N7ae;?X7n~j z4huhOGClfJotD}nc&8k*n5Er)-+0~BsU!ifAfB}KWpmtqaFYo;&7ShnL_t-^Eq+rY zI6d<4>!}rnq4_>6@DEpW-^q&*X*`?GU%QqV+Da$KbiT}h;RIdN(*-*3s{X(ToT_xDYq?bJOr8?H#9QDpeDT7ImoJymw3t#h;4di8V2+FS+?09`tK>Z7U`ud6 z$+zzKXkJTyU=!nYOPBSP+%eCsdN&*UrWeD^9a0+;fv~`Zo29GEJkIA;Ilgif_LcSZx6u3|mcQja?kTE? z-rp-J$-DUXHhJP)o0r;E#pdB3)xg;y`>(`~P5TX7226kg{mJy2i%3Z5$8$AJ+0FW< zjfk0W#2G9rpq}OA2eVa3*eK(^J~pMPoZi@2ovLi~;S7%H=Q&B|7@nYIQTh3b`eoPx zi~$vha=V$2O{-G;#7xgHB|;E1x*@F-+{q7rZAN183Sftsw@Z zb`kG?j71qz{|8y0RCsuIO@MEVIseTCt-V#%8OA05K{docHNnx5k@%G<2`D&buxv6j zMUQvTdy4iU1QXYm%qB&*I5Pf`r=hC+e9#fD5uV&=6g*594HR}8lOeHNhiL#FC`!+3 zb6Jd3%NHL$EPjy|9sLn8UCn7F^xMbmj{pr#sTCRj1Va}2CM5;A6t_nqFrD06>NfWG z?grubn9^`BrXH{c>EL%7A2bOsyR%RWq`0?XNM6e(B<-Y!Cw>}w` z#HeevW2EMhTmz+dc=EZeZmbtYdb3hqtRg75iC`%uNrjXEYX$h*?H%VMDi|5XP=I4% z4~$PyIN&y+FA5C^kW)peg94Gq$kB&SSykOim}55h1QQL_-e*OhKJ5Wuq9YHO=K zs(?(dwM{ZS^0$Ayg>V%6XA%6K5K1tb=q9=dMzv%AgXc)i_Ro)M)Vqaot2I44x>9pBgtub-23US;bFP$r7b^n{h04zB1?%XwV7`)jb_U(eZQG=`deTv?99MLqC<_U7_4Q`8Op_xhYp>=$(Gcn@ zxa(!S_>is!4&`R5@%wl9RPHFdNcEQ?P1~T|-LFl2s63$c=!G3y{d%vdX-kf`QYEmX z^oNdSeB9bGjXd(;V-YeWzAE%%5DRM6_Ry0c=!0H3P}Ues5$q4(ZU`ANRvCP=vaZ)N zciK3iWs!y86k7wKMi}n~KhEJ{)dZc)T=bcc@fj^X{8Pt-=fmuu1Gys7!)8MG;VU2E zU}HT2>1f2^D=p^96b#p};2*Fw%EQ#r$^7`S@WHGQfif)wa04G1ebVMCOfuRdFsc9` z=B%39zu8$%4h}g~(PNzes*qnHsOcn;*5Cx(BlR3*4(<#Efxddx1!c(FY?>DBtylK= zmB>bPYIJ8g_qG}+_H2B3#mUhy)m8eX;)~7m(y3QB>a&`S+Svx(D8FH-{Pc>8fJA|$ zU~NfFxQSH3*j(ODhf^xN4U?S@?~N5c$*8t)ykmUbtiUbd>)3vVO@v^tBd(3nd8Ubz zv4eL8_nkH}(Y>ml>Eu)NKC9udTkd@JM6b%muuWxrG>xlSg?8v2%#)%}LuprZci;?` z_tjX)x#8b|z#N)4h~^*VM%MWN*rB5thkhH)#!zD%ar*aZv6mXF&mxW(0Zo#CN)59o z)@Cpi!)Ag$4@@4O;|3lY?h90dTL^%0DU%G>KGWdApwNIV2y%t|x@j@o&To=7^9eU+t_GkxY4&D9-y+!s{Q--FErqkD30D3trG$;sy@`u znYkD!z!l_XXOE4Hz#3>PFK-_b(d9IKk95ud$ko4v!}wj4dpmp*KYTbMAh3m?bNMna z8pgbn=;-jvvS-d|YX=7g9+!|HE>6b4YZc`j-W2e95)+Y@4|O9TDZE`fNk@7j9ko z_s)~%6*{dX8t$(x49>pjnEl2b{q@NXhBU+IsBk($ow|1YxpQ1={3{FlB=(E&x7FHQ zGIn{=;O)<=7pk@5JZ03+%%<##VKF;;cgZLxmN9n zljsF9^>?uW?K^Jhuu<-B?z|kP?BHaYjW!qbvg%MTahRn&s7%Pc}#1+}f-`^i{ zb*!ECv@0we9GxvKn+f{4x5J=j!d-*?5gr80e9SPAM(+EEbR;7yi+#3AW@f9X$tURj zO(-cb0@+?y_ckfX>xlF?)5#5Way~ESH-PU>93zz1*7_+W=%ii{dueTI$_KCQ^y(Te zDO|w93k@Oo!~V7_`19xbp|E!e3CAFA{oqLJ6k8XBM5^78Y|GtxhNc%z$&4jL%kI{C)laVyVf&4H)C#tpfR z4a9K zK}(;Rlas8Q3Vaw40D5V#EEf))plq{DoataKj`cqNgli!ca!Go^zxnysx~Yi1!~OlK z>FL#ZXh6e&?N&qbN=+bjcH$0Plc!nmhvRIZO~}iaFnQidSi+p3fdS_;HPmHNBC%y< z@)Pmv(|=p{6BK}l%I8~fqXk2)n5hR!cy$rO8_$|{x^j2I1{2_AP#AS&{hRicz zBf=A$(&G&47g_>RhVJFdwc+~u`UFDea=b2bj-Fn6S{lxe$jVjOdl!rVE*-TxCV0Np z)lnAo$B$xpmozoM1MP4%)YG$HU9iERRp($gX zeCGD~URBq2AKiO9NX%FE?GjG8LK>)9DI}!?|IE3s`ghb^okk^bg6p!b#c)^Mu+OKC z(+_x`rlov2aLC+h@S>{AWOv4gB<&34xnb2`l>-(cM*gywxYIWgnohh@iQ3S2^*lz1 zpPR+K?5|C$489_DCiJr)dE$;XA7fgKky=d5E!*F_V^}=uy1JzK`OV6YT0GNcAt)fE z`T5m?-Glww=c=kLgr??ZkgW)CTL}MiiU#5ZfdKJ6fdDLsK!D($Q0}{v6G$OAw@f3@ z4UZo`2GQ)$Lq^L5Q|6rnHFb6HDgWl?F6io3Ub3)u?7+-@fJyJ%zU?W! zjmsXXH#{sXYv6FXLjhR;LtWtSFuZnnLtX?Xsr4-Vf@Tjs< zsjxOAEUdazd=nv4CTE78@ThiR69Es3Ku8ndTi6#^Q(lhg5v(mNDZ-=JSmX9wed~W^ zwnj*sMvdGgPSei#%MN*+YnzI+H!7bUK%)dQMpQ5H^I3h=}{x z<(9U*<0m!~N+Z~2r)ReiUZ_5uT)FDEHQ=xFFj9M}@%G4bhr z?}jblI*btx$2uj}Oo{$MKr&K5CM`&#)l2@7m7$ zu#m1NpO`X}lJ49@K+BwDzjr*~w45eNVw9M&GBV&!z!TVX-UF72`v>7tZA}e=3lIuW zH|7&W)EN{K{v=4R$l~HPtXO0`lmy80Z4*M5(iERL9wsQ{FLzi<1P}<2hfgE&03m@y z+NjvM8AnZEE=S(0`J*)GWkNYL1g6w~75nD&92nm~OaGtr)S^4E7QTm4ZSK-Zs>tPp8a!aclsPl&!L`6Mt zexTR)c{5=~y!G37jHOi)ub=K)R!$4{LN3oB`GW+7b6R+p&^5DSpPe>lcdqUrC|p&% zTaZi_@JEG2+bXA7g8^_ZE(En%cm|-i(R?Ykb!^na++10Z8TbXhBMQq+1i-nFreohD zqzgMvFQQS#>_}Kh2yi$?R@ToHq_?XpD>M8t@`S#4; zj_lq<00UfDQnCkRVO&(yD*6EfX{r}kCZ+g!Rii0hQDk`jtH z_8b!75gJS&d5Lll)p&p8IM7fj-Tckj$t?Dgb#?C8g8rqmr}eF!Zz~-JiEU#yHPdHb zYxVGphF*KyAkWFeNnfqX+&3Ihdi5c8NSg9i65hKaPU~LW+G)hN1^V3>FZ8Y6{&* z0Gyeia{wfC@-8A^8&u9E+$fd;6!;w0Whg`4h@zRbh{y<#I1HmA7A=c;Pxq?$o`cjn z7vbk}Xl30|qxS6zxau^+Pc7K{U`V(0ZXe?dtp{)HJU4P)ORE9vAAA>{tfwZU9LC5% zU0@^{gd5eiZSay)hR$z6DQ$sbQUCxd53Zt~M{KMyT8NL=Qdc*~y?qhE1VA%39(odt zAR%d>tVh>@UkX2Bgm?`N7LI2chfiV3zQLg(%4m(UvR71;!R_0lSUB1sZ!uU2(kOdC zG>oS8@z=V}qxu)t4mJ{N-yo6s@Mbe)%v;}C%U755P%}jRsIE8Q|J`bb{LPCGMci2( z4*u~h+fd0qw#|2M)xKz%aCGyrUD>&O(&8(3N1C4+JIlCt$~d`jdzQ+lMJB)S>iS(7 z+%ddpbM#;x`O*dFv2lNiqp??{8eUw~HH)D%SQ{zz7@!3ji1AuY&OblF-WLt*9#cFb zB=qLLlX0%yzI})fFxBo8w_iMZqyX;&(EcD)P(S0?jRS&$xCuYZL*Wt<4Gho$@cxo6KNuiO*&WI(jGrIFv31y=R_EYBccBPOBw zi9UAF2LuazD6lEBTskT%f1{2ky@OJ*6m%z`2CzZlP)JQ?Rp0qRP0)yy>8Pt4K}(N0 zHvl5r2nZw}O7?CWfc)UPSO;hn{2%Fg*~SWOQ3lV~Ca`6UjCS**7spj4?T6?l*nWZA z0W%Hcb8A#yv;yB@gS>AL;q+wu5D3h(99YO}3uj-vQ})n~oe*zj{rl5wxxWaRUQPPO z!rDS!Uc5Q=K%Cze!awKA7pG=r+g449mnVCpHpR;o+?#$-rwPw#?>}z_h^><$ufOt- ztX171o#79SqcVZNQJ~1nlZ`$0weFX``orjw`0AHk0lI(qKCUAbA@!jNL;&xC_XLCx z>9w(d0MDJ*Uk%Uq3$)neZZ&}~|A8)XWojsz@cE#^=bv2Gv1OIIdVMKKFVOSYMd1n)a ze?iIfcXV{Y`5yXjOo}F-NN#OMJJf=sw49xRL}qe+FFXXJ*)Lx>#Bb8InPf(x-jy-bw4n_>y~<5y@@^P4qBJoV%?Q4XNW zFLb8@?Kd_yhM@pCmpPg{Cr7YR?^sw|HZeIWC`cv!l~|0}2O<>3PFWt8hlddv=h-pr z*g*lB!~^81q|Q9Nd?r!>)3je+-6kCzQ3gUT*Y@OG<`q|EWh0S!qWBrUZ#ax-BLPjpsnlS5%q5T0&{Q74N`vO;g#4i* zEiaEu&u^b4o}4?^(o(kP7^j&4*V?VUhYybrQ+_Bn2-A*x^4(i}Fhoi3_dYGnn>$(B z`bgG?0cWRq3o=oV`4$ukSRR(lqEu$=C_IZFKD0GCQ%>)>Mjdv}ZBeZ9)`|nDt3Qp9 zQziY*5Z?O?!on|}^CqsY()4};3-?%hmAR7oQ8Zmt+E+NWz6qAS&&=LEj_e0x;;(B{W2RvkouGc>YINWrQ z)06L8CQ5{l&lBsYoy@2eH%{}1W^gaYh*h&iv29lLEdVXy?BarNT?vpK?M_<9MtfJa z$v;&J38nzv6ncP>XSezCU<%?iX{fKN%D4bY-AJxg8&#Z4i@t>58}n;!N0p{n+5fDr z#`yy9*H;3Mj4I{*yLTW+_`+?-Ny_W(&CPe0{+R)YQVimk+5JDwZDvpqV2OlJULEZK z`5rv>R;PB_X6=kS}6?0tAm%#=L=f7x_+aF+@I}ZnX+Y zy;dS=D=3sx=-m{BZEE`ZOV|yzdj%$ARc*9HbB~``aS`E8Nc>17yOXZ?eu$5c$3b(X zgNzNpgDO0qM(C5vdfOJ>ZzSoaqR+6ixA!6~`SI29QMALd_CpJO`%Ps3g!B_Sv<%d< zr$%2Vw+%s1k~lrf7l-KBw-(M?(cR3UXU&C+n4s zbcc#pPos}N*+2IF-GRQc+jsTm8{yW3@ka`|oxq9eVrb|l>0n_})qEdjr%7F1*sU!` zcX{&JL9-_hqFM3RgR=X?pBJ;cx6*2bEeJUd7^)h}Orpqxo+fjEk48y5?Ft!Nc>dVF zo~f}iC@#Idy;0vbpTN7&Ma0Cop*({C#lfHy7PWQkN1xb>ZM?d@AUPuDuj0vU`}pnB z)!#)fe?PpYJ^cN9Z;9LcVH!+cayD~+wb5;$? z`S>x>>JU^}iP_oW&;tXMfULNwtqr59M{#K4eQ{da01!>{W5u?yr}D1Ne3l;8+C^0A z>HnKrl0L8e?R^sGrSy;aX~IV#(;ZxQU4#=7u}2rjIDP({oKHSTwPy!F>?6j-ICy|Y zx{hGh^WaMQ_DryC!Jwkn$*8`tAU;_%@?09(?|^GWVPX=2y2c3dI-ACk2`0A7IGW0= z%9mo<^5KJ)fq^TA$WaD@dYQ6$15b-mKru&+3Ms>@pq9(-FL;MMev9L3%0g%oB9=Wv zHY{!Kyh~j(UU?m&A%urUy=_}TVIeb(Y@GE$)-S+it12teM`gLMyPz+H9St%e4ulXr zb<*=n?(LuO*d-ddLoJIQHb>SgkDZx$W^QinzPO{}domCPQ;TIH5~-WRi4DzlC(s4> zZaAl*Mf#^g=G7bPDX;+QBasdW32npWBSuWMt6!!W15tC!(h>v5anJ-jd2&u3$G{{z zAYJ33PzH(jQ7X~4pFhpAeOtHst5DRr;x_}q*_AAuJqj2BicB!`z)2mt)8k<^D=2{e z5<)iM)}<0dzklCzqWsueSy2u-zz{2hY2d?e!jGJVMY;BZ+GXgOP-QQ*UUq{93IZ7* z9}r30PYP8H07cT3nHpMCJa2?P@=cM| z0JqH4qceimhw$jfj|(Kl4Uh*DknonIPrN6aoIP6)Xbsrah;is%nwb=6t6<`kb;=pt zo>}d33EDLr+zK*9MO8JJQ-aLH5)}o!1jtf+rA_F_MJcn3B(WZb!AXhJ%&TFz8n=1nE*zOg$hE$S%$~)L+n9e z;n4c)bNe^3hgIKf=W?bjGj1qS_l|B4;tPQ_6YP%ejN4*?%V^2{1Q4R^c;8%0N5+VXCvp49ncY* zhV_B1?RYzr1Wk<6Jnv!s^;%t`D4+vs0u;~ldAK=ovaUBRENXS-={)V8L+Y;VN56-Y z^Xr!{NAxq}L3%nSR-jm;FkG17N0aKelXIJN18-Ho9ddi7-r6yM39_;!JeP4yn+R+m z3JYJ=KO?E%yeV{Pd2o2x&)XZ?XuR6*HOIXbsD-!wB*Oh^-Xv{Z8!kKfJ6#&C0xyBm z1MBh3pi)S}At9lgAcXU2wfD)f@bu4f2FCoXW`T0*k9>`nG~8GJ4S8N%)RR= z!p<~(ei0EHm?Tb31wZk+1AR93G1TRlb6{m=evav)H(|VI@W|HLt14f$Q-m9AH z+HzDml&yE}+-;yRhYlTrEDA*%L@=J*JH^swTHD)e50&NQ9MAi(CJGd7ga{39@dg4y zM{Tu9Q`Z_V&i??{dZS``g~_|e$Jh7cd17hb-q3nfY0xv^(=k}#!%h8H3-l9Kcy+ap zhEN&WR@5T+e^>>r$MO4C2?`0}ukgULhL0T7q|gYD8!t2?twEvzp@V(P9$%wOD1}PL z%3`yDq9XDf6JqwX6BB#M#mid;k_wuy`GQURV_r#1NkK#g$tfE%b7g8Z#nuU4G*(u^ z5YYJS6rPxxBIAq$YHDiIT|S_vF-Q?;3^1>!ufKcYh4~YC0@R*jK1zzp1f>ekb|T~( z&8oxlP3G(9J3^zPP12ypCz-=gt z{rh+9>gB<&?4*Qkw8Ha3nyl|BY{;ufe7)LRs1t7i|-Qg@zA-G62vMw6kWlW|AOOP?O=T z3h4Dvpi~4^9Y7(EJGl-77C`{e2x6!tm}ofvU`y_DjoV+Gc*&m@kkkbWKbead}cbMW`A!5s2Lz5&z9Te0{4!Q+bJU&%L}5j!?Ye zEJr$N`sBiyD07x6o$3fIY$u}ZMUKWGf^R`!ART11l*x>N|s~zR$2RwdUt~E`Di`294F1`P!1;9H3iCQJSab1V_n3@1b zW>eE6RAx3RMq_MoWvf2(N@r@FnQ^amyxbg6nV+1=@`CitL*?pdf%ViSb*~ zTh$}s`2u(cWf$;9kaOt9F6ro$ukF@P^}Fbf1*n`iQw6jcpO~MUTR|`O2j4II>Z(a! z47ogGz7LNa0$iPZa9T9VBx56^ozkphiMpwP@F_wxh>n7}PHmQ^AFF)h z&hh)7b1@c0N{U6W6`l`59yRd=q#7uopf_Qr@rEK9mySNBVk+DmfDWFZ`8=p-{v%3W|(zNgP>I9#mC` zdk*g*dM-(VG>tli>cPar$xTK`T7-p17u}&aX8Ui=K)N*Oy`5zwHkAAPffqomL05Dm zgrYX4+Fa4zepyk`1Gd$GcCpLA+c=(SxU8$2Na?kz%TTF+9zZE)o0qf^frApU0^ice zQc+VcPDi1QfgbQt%-y&8P(cCk1^w_mG!%10R)5FGi|-Ad%l8482Z83$zyQ+lo);I! z2LV+g=L2@b@vBA=IP4mw0^6lS;kuASPxZ>8;}{>}-?{TC_&hW*i^I8~rMcwYWeDXp zHA86A0)xId0uAsDXXTOYj%x>$$STOqzAGQV}L+Y8sQ*^Uln;BF`u1uR*{nx*x zRq0~{f;JlaUYG^yQS*6WpHLChG&H<%m@KB7P~2`z-0T^XNBfD}AtuzKa~!3?-e;tN zF0qp**&L-Bn!CFLcQ$`CtJ+4oLc$3+XTE8!8~E zonxY-0sEa5e2#A1LxGQx23=ZaQr2E(e^%&?(yLz#0hZK+(etSAF=B3kPMTdEhteNAabhb0Q^h%5hj#d2RfVF0 zgdr^Y+|3^4-@nWLe_h+gUk~&gF?+M0#z;FYkf@fHXDvw0l6@4RK@it=Y(fkber{~+ zLrU)wAETBRy^k(-$SH6d9SeXnR~Za#)F3H@jKs#pzyd&FI=dDle{N^w>H{{2HD zMq4g|$;8v8u341I7|FCH_FB~fER*3XS4msFfHj{vxsbCD_Z`cFv@0+R>wf^P`Lv>KnX*Mq{z#tj!=)mbPjur2=%x1xz)Zzodd|akW$gD zp4V^21Q5|?Gpc;02$jf3ueWJorV+7j%5^B8cwb~&*6aqt<)?;zb|s z9im$O2B9t-3q8F$pEx8;!dSf(ZntDfM-Aq}Q?Ov5Q<~nQx zCex@WYQRnsGi{^nqkjx^SgQ^}dPikmfwbEkzuu`F&)odp<+ zCaZ#es~$e%-Np9=0^%J+Hehth?&#_3!|L}IoC>keOoKjg}@;KADTdUPIAfy)X&lcJ|Q9BV->zfFevxo zX%?7!6a=XKF+bWdAF2Q+N`upTpCO`Soq-0~MA!{p6Vp79Pa2z;Fi40GQw$k+doY%@ z_oVmX@yPMy^mH3Ep^{u5^51a;*FufVP~ndG)B5V_*R^F5kj#JsqTgV|qb`G}^Zxx4 z3#?IF(#IlR@))q4YC&8tiZvoH5A%}zCE=U9Yi#XZFt@bO!Gj%*v)_oLWnhPz^coGX zhoG6vauT5TYyJ5Xeb+0zA2$DZJ3}4NT}%=Gs@Os(2Z;c13y%uXWW1Q`eXW_log^1e zFo1oNB1aoa(hjty7(L=%;oMN!h0nG9c4^{DhwU**jCy=gf31Id3ELK57)2=>{r;gL zAx0XMY7hyIIr44xK)Fh3gO<#*a9H~xbc3CJ2Kd_JD$z#|9Rkq-9ho#u*tyrBckW}6 zt#>cUQ_6;l0eI2%-cWd#KtINHj*k(2%XVp6Dj2W>$f?#k+tbjXvUeJh>cXL16qWJu z1V`P5h}hWP3F0QyO-a_iyeM?oZpMVbY^D6?b+0fO>y+`Xt+=z>wr)ic z@B?VG{pzGGW;FYKeSPJ6v+BwZ>t{YX_xe?2B=q1F4V5y;KNzkfkqQ8x5r+u`#C(7jMUgO%Gw6tSI)lAatK%wE$&J?xmD*%_-^Pphr>S!Dp8&vl`Ll#AqYf8Nd>LIi! z`ttAHN8QgNX;4tb52+zUSbEw~Fj76yvzSzXZU7)(hs`OT;M$>=<*~XO`%zY@?W)y5 zBcg9;h`>J#knnA8a6M-GQGe#)%E04*(%z>Y4yij5Gb5#MXo$vPsFZUZE?>U9!He9F zp?CM4>e4ze#81{a0Pr_Q-Zm|Uq3C7WSU8$36yJE9Lv;@%@q8uV-@fDY_F6PAK)lU; z7Rs8MboM?Q`Sq)b9Pz8LL>R}hhQ`L?C{FDu>n(~2I-nJy2U*xy_vHzKKf4{py0H3JKqJ7I7V>U@7e)>Nx{f~)6EUESzhDKS7{FifC;kp#4Znc@g?iLS z=@Z05)Y#s=`8nTZL4F2K0hrfArWQWR7ym@n15vIvF{ zbl%m#%IIffVsY?Gf7{#d+&MV4yNjF}UmVoqJELU5+*a#p!9bC-UWV%xVgvt80s$)y z`c1qk6m*N^i#=ml7ERa_Cr+MhsbRcUibDoYShc$!S+XAceOsbW+02_GO;F7iTYYt; zE;C+z`Q=}XZD5|+wEdJ;WIRliAqv~DXgKinf|Kz~JI18CGt1ohiGN)Gwi73N9y*S_ z%E>`eO@Y9p;ST|}8mPUV`uiWtjgsqetEbTZ^pA87zWBy4Rq2%>ct%x9N((;ge;zXl zL`H9lIZI-i%cgz`I{N9;?*Q3Mx0YLX=KKY_isLCtp33(nP0pL#x%0u-_srF+udtj; zST=ZGYV<-Ii^aNMU(-16esEx-*sM6;y6S6P^EkOu_uq_RDu8zRTXD2-N{Y0UmaX+I z$JyR%(ynrsb;?wr%si|1+ zJ}RzGz4KR@9p*=|t6w@AItZCrWj62}N9F^MS%#6S?$~vx?;wx{LQ-)DH0C5_ES#B< zk%o@W!QMW}sWr8cCMxMCv0^1lQ^HB6>NcZ5~d@DOYeM*9yBhH zvt>UW>8l}RB<;R5o%%;*0w-~2BYEF zgjyIr+{O95xlWy&b}TU;Pt{0@e@$}YVGn}Dw_#*yk3~8zEd4}9gra|E{t}37JdD|Y|CU;RGpt>O zDKUKJ!ODy*Gn)7MlN z_q|_S&8mzsq2WFlbYZb*z2HOEUBpDlIq9UnV1e<m>&qg({Xw+Mj%Z=L+M_-j}`Cp zP(U`{AFua)vaiL-1y+4#6Vo+;BzYxvmi^kZ`epaw3i9mPGyIEGsGZ=MI9d0LSFXGa z3j=|6eTeIW1PY{R9GHr-aofMn$#oo0f$azUPorq7ps?^)bbh_jCbhrFIgI*fJU1~D#&6_itnr_c~ z92#S&M+ywQ6@%`~9cs{O{QCX%sv0YvTg7gNg^^}Rv@in(FhqopFu`z8OsoZ^4IDUt zyKixRz%W4s>xpCmh9vHF9KWV)Ej)TP)66K$$Y@s=tz*{WlbymRlZClc_cAK&WOUJ1n_0i>G@C_} zAjtn>SyoO?qQ1mJYlwb0Hu~kM{Wv)KS4F9|rNiWX%~!|&0%Tgx44T2LYTkh1{5Pr) zE5=HBw)8$-jEn;>h*blMyBhoacmG5c%dHTP;3sbu3ijA<){6NZR*w4pVCvhaai@YW zZ{}YwpW7$w^KS_=wg1lX|~%cxf=rFC(lYxwWqddZn?{UZ$K!WY!$6Akx4f-Fii#JVNO`OAV z^*=65o<6oaaCMQlpuvb{|DJ8^N|luZH@}s*PVr<=DXEKT{7RS~d4iEe2;PHs^Jo6P zj`_Gl2M&vH5`=`NtrQYHZ0z<>ZWOo2bIC@;=`klylqiZDW0X zzpfV8Ju^j*el`85`_3LpNve@o(DmOvdWfH2(NKvwoQXGZu6N$1j2cv0BR}4ZM#%e8+P_|--=PN$)ZE89bi(l8Y zuaNWkW5>5)4yWpMGzcLAp*!ys9aBUIaF;=2EgPHkx88z__xLs|Y8ZODif@kJx98@+ zFD`2+!wqn{4@`aKy`9wU?Pq~7V3y*fEf0fZ_Wi%)1f93YD)9Y{Ao?imrrWlyt#?l% z5DPkvDz_cGRW@^Msjbi4^0dEVY18AF3e$p!Elf1K4~Oq> zW|W=tmsVC_hHL_ObbpJa&12*dsJ3x@)bLsL5V?QzdTO7$B0SqRqZM{h*$Dpj%iuES z=n}Jw2c5C+x9o9D$-*M1t-1Lx`3-Uoy6hlc`41QdVDX4vlLuDH$r*(xFq{0kU-_o) znip-#?L)!^?DPLBQ#tns+a0AAV;uQe{x&taA*<=WxQ@~AHv6ASQXYPezJAI3KO0Mp z8yKBGVtJZx^P8$AE{;aALtz|TT;qd-KgpVqJi5BL1o6ne@=JFLWwe5@0!$1l{o>oU zk|8|2uERg3-`(TUSIFNdJe0l?<9aH@Lm^|^nVZ*S(qG@`XzLj^aU06N7E(dOFhNIE zX)hJnm@s{~2w9#d7$Ef&`3+hI9l$y3EBATs+!(7N8x+_j;F&XsXDYi?FI}0`V{_SY zHK2cUKs{^D4zjkg5n8==(x3KVs`tZ>M)%R!nq}!9W_$ltyFcA;F6=|>4KGWbEd4ZK z!7k3uAQ=|yTd~!z(B)h&k^0NnvMeff5YxV-C3^|t;OT&k520!iMF_Ge&UKlKioPxB56j|q# z@jS8eR=UWUOL=cjI)-+$Cmo9vUv&~xJxw5Jt!I6&Yq|No*DKn`Ug+PH+xMqWqk`8M zmUlQvxiuGl4?Y^B9=Mj6mT4L$6iZNe+0G+V5T7A71?VC{8UI zyty&5vy-~7@YT$Tb62%`ZwwWGDoa0oTi(G%nAV0wI5MklTahhXHZ&7P8Xo##|#+`lB{uUQTO| z`YkL7gygh9VO54T4b59gt;gqgvfGkwPRP(NKdwrrCg5MU7v7WGFB=yB=FXJV_D5u@ z{oCe6f0@dk>a)CRH8!Abq3ji>bMW;Jm0kAIetnLUEKDq_E=w6-^PjaoPZF4nnU%=R zn(Z#$a%;LiY<9Xof%-#t_4e)aj&JRR_OGwT=+)Y{=$)9ODDJ- z-jTaL`PW}x;gYYCTb_Pxf7dK7sN2Q6!gNz@6zQkL`u&G_#Bh(vv?Igvdle~WW>7iE zxXitc>-yf%fFm)!Lzafz`Q4zt$JX~MPHmw8>as35m`?A_>oI8gUApWd!oF*p~p3v4%+IviFJfklnqw^CSEG`lr6@Qxp z^&A;I_2R($|E)q#CTnqI3qjIEedXB|<@6={?=5S*FV!aEr}`2XKNt!M z9XVEGlb0N*anIrNCwV8m9kiFM$;zkoqN3N6(h790=taFv_?d3kaDCtYAL~)8u-uM0 zcTh7Zh%w@Y80QJ8HBGzQ)*7Vu7H7{hwV4~TUcB(T^x=y6a-~(=HAWLZujhu2Lxka~ zP-^WD(GuK)q7O<{buW;fhWe@32~L#-do|p-aYku;Tnv}6yxU81Abgjhm z$X7KrB-hj|xrjUq34bYRYi91|Hn-YNO*H9u+Fxgvd}7!jj)QY{c=R3&GF2JL;{;Qv=zx%(^X-{0Y7EyBd;fiCty4{Ug0Ulm2Fpy?cLWqfz^e zVwUKlg_XTHfe&kTxsT6q`^y*47{6ZoNO%+V|F4|;R`*ZMr4zQ(&;U<9CN4hKcm}96 z7L1LhB}O~VVri#($vD!(1iEfWuP9~*Y=)aO*+Ta?Tk>kD9r#txdHM#X!^Xp`PBz9= z%JtXOSKX-g+0R@0LxOiZ;U8&aU@Z5~iof!a?4h%2XSWc(JfR(akZzE7Ut)*PwPP=% zhBeifIXbGW0@OZ*2bnuAo9O&(+e+{*aSy66{qXQHTj&ca!nfJ0WnX@77>xMs>LN#G z-xCR~_;yaqZt-e{jb%4Wzii5@tNouI)OzNEh9AC_yu ziH5o%!Nq^#1s7vZg&Y*&%jg`Gw)(_QD7SB2kiT?z?cbk@AE#prYi|B3%YTj{g=YqP zle>&;clwTW>dpB2P|0i9Q3&#hoexwZ=BM{0q z&r9rL{y!~1b`|49cq}nPE=SI}wox=k{IJk$iYk{RY}S6RjmK1{;Vuo-14^^D#tA_DrJxnT z{_wJ7%FYa<`iFp+8)I*?L#|qPZgLzHHvdw&@A3I<1k2GqlG^Jj1QRzKP15^e0gdm^ zj_n|n|F%PB$Z!7JJk_^%il%=$K*CzNO)Igu>zw`Vn+Y-!6Q<|Yfc<%qgMI<=6@B576G zA9XLJgT=MwV&8&_g&c-rA@8H;cpIPg0`5gjM+3MP(wB5S5^^LvCntHkU*$l_sdjBX zEW)>ia3Nv}vtW&e|Tt}CbiUt8}1 zj`iP$58q@}GD;cQG*lv^?46KNN<>+yWR@gEl&m5vvPV`+M)oWcimYs9uSoXhe$V@P zp8tEi?|Z!W@fF`<)4|npp^hIPHIz)TgEI zawoq}YJ2GWVvWXq=AS+lu`K++DnG&VO~%#r@Am?=$Os2kU24A@FS-RSYIdqeyJ4)9<4rAW>5;tS)IFAwt1DPhCAM9m~FESCDc#6Gfosa9W(Kz<3 zUj1e1LVl;&C!=87bK7Xq%UU;|biHnF|8YD*`&zKKSNo44xe8~c^yZeK@*IO>FQev_ zIDehp+$V~Iz(F%5cDLg9Plu06CKV`qZ#!VIo1ie%xBfWM){=dFZJdBX$&>xteA4+KLY89HV;qrj?XUStGY**;H zi@Iy_l>dVJFkr4Y+6tMF8jV+V4pCk0dqA9AjBN?(K$B{`koYUl*6qg^>)LjH|7svF z!3CoHjT7B>zB798osH3MoA#fHpPwW*5PSPwx>oT&&zA4qOae8(u?hv@Q~trf_Sdg! z?IkG8_Kj1L$F=Vun8sxN-_O;5a%4*3|M^_I|7Z|AV|kNAvt#Ytm9=)QET@z2hcbTp z|9+l?PY9kb`_2enDY(ocG&lPBZ$yQI%WG*hmjH2K=e}bwjB7;O`Zl{SU4lXyi^%RFl#r$sb#&5u zF1C>oP@DZdb>akT!01<1Ge*WAp${I|+Uku2hMh^V7(QMR#DIM3x0~~b3cMY-* zt8WZF2%XQ3nmQ#fPj8+R8Ean_%L^$n8}l`JGm-VyoI#~&z1UiHnmaHNOv z5o=5Jx^D+mee#4>m&aSWDDzdj`obj6PXEmtIZ%D<;N68U0yZTIf`WD5HOBmJ8-Kp6 zzIAU@Au;lWuciyPoAVmOM#-0qZBnWS4cL!MuE{@}*S&VFi6tO)$l(9`^kgsG<&lva zv1VG2dmL*+M1E;r;ZVNFr&*8SyX?d)!eVQd8Gko3^wWgi*nMssoa9EySsg;bFGc2d z+||#ZgunLbFJA&P&bY5Lg}w|oyK#z*V0zL@Y)4@E-8G7RzRFA9Gx?K9&YM^?&4-zt zHr{7{I4(YYbe-aWULIbJlM$t4S1Qp^_($~Zt+cB=2G;G=s5b$lz^bKHzJWv!O^~~) zs5pRB+2EMRcfI9GV(ZVJ;4=&*cx+w990+;uVkK>V;cFB2MXYbOO7g$>`cn0kk4`QU zS1q$7dUG;LqS^lF*!4xmgcuqI6Q^AJt9GWVK6g4fW&Sm#vE!}Pt)bJ^yuT@sRUYOE z1N~z7qh;FbNawQ`E-(r}Mf&kMFQn}^3R+v#M94*X>LA+kgP7;LF@J_pV0k(!>^x6qjaEW(@71Rfa=E`Bnl5M@ zlwTM&ne=kqpjDRU6HFJrJMk8Y_n6!LP0jVKRhOo0+sP9w^)I~Rv8H9tyF}N_ZA`tt z;IT4%BW3hP3dgK*tAR<4yUF%gJL!aNsuAri4s*+mtLb0AinVu)rFc|0b}|z@zbp|5 zNV^}Vxlw{)TZyulf-d3JiF;NdFiRf#>YwTj|XEbZt0I|XSKgC03QV-LalJj zp}n@*bOj6wOg;eXMp;P-G}7VoJTz3u&UkfU zBl}5iec{B%YW~#egn4Ghmjr@lY{Bci)RDE>iV)sb7Fs7@W~oL_ODP5#nIHBt?&)(| zvG^2kOa0J(##(Oy3Y;`CRa!yvSpx%gt{}jp82c{bwdQ zs8ln*%w5qb|3CRgd`$kfPbl{WdwlN`2st~R${qT_Ya}Pxw>HwVlq+`1Tc&td^qY+a`KL;dmop_C$Ai%yhSFO;_jGzIy3GkMUNA8-f==2? zFZjx}ByA2{BoGNevKJ8cG$mc^0V<(Ba;hpF(C)vy-rCJC0~tspZUh_^}Fk1=UyKXLQQ>5 zhwFB-LBhl(^-l#qP#c_VD|h;Pf~IUPjPp`Edht8 z6!)%{*$KUQlMbobK=i4Sj*hF%$jirKQS&8#m#1I_F{W^y%5d)|U4=xjwD7UpRYq?&HUwj*j6M zr7u@hj|g|l+??CEEK0{A^|NQEbQAU2mOblTdou(ZQn;e_9;gl#$zkJLNF5noPZYIJ zU=t7jiCBr0(uI8X^QxzGy3Q46j<=*+&&_|ElN0djy6Mk1&*~KS{OEh(RW$TYe8_(h z&IV{UgHcyO{IUG4`oD8X!I*MeU&_E-=ZYBR@x`e*WQw4hd_;2vM26!YUz3_G4|+9M zh20Vqd=SylQINIJy?IFF67Fa>Uxnr7sdZ1k$U=XuL!$1oN%|ccYTvo7#k(u`n(cZ_ zeZ_Xg?T{ySaMEcN*2h|#nzbpryQW&mJ02u>)|ul3Cm-MLN3ri+hd6KbfmB4p>47?g z4?d2s3;F)#dCzGB%73kK{;5m9)7E1==(HgmB~zF0shS)m`{9IC>|a`AmE3oaKCiZa z-xDtBe5jXh8dn#Da7y&O+EKNBRPa#a=Uw5U;bEcS zlU}6Ikv{R1!P)L!Pt~5uY^Xdz@zZ;^V*J79vndq)$0Q_H&^_54-FPIo;$bN!B`n<3 z-MxmMA3*uxG+P1nH)&tvZ!9oY4ppb7n*1*MZD3+A{Ua|ae{`tzO!#%Ce-%2nlf6t& z>=%!&N*CGj)Xl1ZOX_1@YE*Ok!BzXEpK@MmmUcGd#M{g!1CtrdKMMy18+PrIxlG0f z($ajrS?lY15T($_xVhK70a1>mLo59rA4+hyJ9R>1BHfg70 zCOv-?Oq^cAgxtO}%M3ylcm+Ov>ds|^i8?3Le=&c!@p1D@(V0g#LzAoIWgqY#<9%yw zlS3TbW%uy%4)&V5&1lV1*^OyU4Kp&#%Z}^W+yM29;Mqc+Lw-Kh_odpmT8A#ngb=-R zQ)ZKHWsBtO`Z7;+FD52^eiN)ffBwRS$>0~hqPTAMaWO+m@5d};$0xiFupV0&*uvVc z6=0OONyh&3_AOb1V-XOofL@PYuFGq^^j+v(O#><*QJL@D8L&30XqjB}pn)IntR#nNEVIT)qcLa-Tu_m>u_F?DJ=MqAF=+{8=~x1 z8GRn_Q0^+YA9G=e{n6 zMho@vj2bvcV8SZ=T1@SJKYOK7MtNroJulc}eS}PFE;H%Fc=D}1%*>JoU+w4zM1?yTVOY zn`hIp_g|BX27e6>sxlZYgTxW;D^pwKTc6r8d+-$AzIh{fIVWYTXHqK5tjedhK&YhQ z^>)@PGDQxGy1EWzLA1+YOF;Dez`$+02*vBShl#*X*6vL@>)E;kVK0iBkQH#IXO(bs z-70+hD%STNGa46kxB#dvw{6=7-kRXi@0s_8u_&;rLBs~Y4Oe@IW#*Y(AL@^C27wm% z6;cA?as_cws}f2Dcv&6aZ1}sItM*e;E;n)U*M48R&4zbY{w%bk=sYGQP+#Z&GN;0A z;dyuH3(sZNC~Wt6jYJUkAWYP!BdVu2J0I9nsgL7kI##M2I(80@4^}q|s0~902&dYU z;jIBa6p7rYH#4XY5*+etC^ahFRz+OJ(gS-o2?g?*AX33duMp@M#9GshTX`;G-FnWwnYbTjs^~0WP3|)U;obg4P*jqg!TMn9GTLAB_q?Zo@_9G%z#i>M8>Bk(al! zzCJ)FV}YGhGZSLtD%j{i#|$32dtonzI#fU1OIA{_Z**(enAOj;=Z_ za2GVMIIkE6p?iwvEO_D2ILsu=-Gdwt8VRVdU>Un=(Yr?QIYj0p!%1?l>;FW?7TE5~ z?!!WR1Nxg&AFi%148F;mFHCxw;4U#U-TX^*-N|NowyWswp@e9b6Xs9gYLQ*BWOhgC z>!o}*D@tC5w{P}(xVp6Ky#V?7yzZZz8%!@X(LWyY4GQ{TES8%Seu!c9=Z=&{{?pzZ zh80$oU$%o9HCl6xh0HLNhibX~+Tx!!^dccW-7R~Z{@2~(Cgr8DZoo-EO-bqhY&msK zI-x(1NKL>n*-N^*-O$xy$3Ud8%)G%2W;TQg%#HECD;%}^dNt0U4nJh5xJI_`**#(` zG4Gm({qn3NrZ*GcpCp(@Lv^PH(oTvC>Qk}{QU%@TJX;x ziZqsaSQx5nWomYyK6zIP!`7^*c(r|N^y}b`Qy)zFitkxJ7z?U4hY|;5)BLO~1_ERa z&N+vhuvsB)==)cj&K$JeitSspVej`RG*8Kc`OJ2Bf#m<6pCc>V_k2LA2g)i@aDf^Z`HU@K9S zb^6_@;vexs2L(c=0MQqyw=C$< zKxzjqbVdeD9LLh`!5_rXn6~!z1Kvh^H{Y1;_=)1Q8 zg0`Ax0^3_og@%Uj9h|P&{zOzzt3f<`FWP=DB5S99T9Ahn+W#|#RZ;w*UE`97!fq>O zvJrwtOHk26_=Pj*_pKf9lEoI_fhY+&pWhmzCyvPlzkHdFxi~NtLwvRy1`Qd8G`35z zl1IuCw0vf^<6w>|S_Np!$IH9s1XWH8awITcf`fT5{8L7rKQGY zu7EcVj`PgWldc_n3n;N3t0woAJ*Wx$Tr~>R4XQpctiVGk4X?lJ%tcgI)7EwZ|6Ww| z!Ny89RQ}84X#c=2^p>!Dg#yIx!4VOo1ApL3`xN;ImM8wilVA(P?4`jOA@Iq|p8gSK z2>=?6plkewdg3YgQ)u{~g>v=#cWXGV$;L{7LW&b&qtmSBC>$3czm18Bq0CUzfm{mv zOB;ryToG4x?f^>|W2%npd(MQ%8-+sZ2YN5^bC54FzD6@nDv#^u{`i@8*WR)R9XOkz z8-$obhLMHr1| z0s#)1)or99BRIL7rh8do5}>Ddz><=Oh<97l(_;kT#rgBi9Ub>QDdv6lBj?;kSP;BT zH{gRH>FG(p`#{Ksl?_Puiq;ep0w81IMj>z;NUDdMC1_IQk7j0v&Tu{*IqJ4RG&pw2 z#iag;t-BD%8ce2@->f5dT*uK#l_R+=Rwp1wexX1P0LQ>QfD4s-WvXyhdrYNC`hqjH1z$%KeFdLpVG3Kz()AO!(aWwu1JHu#BB5+r>vQD}wvBNXBU z2pa%3f+Y_9Oux>~M_yl)AUg;#P;+%HP84T5u0YI`uWM*n#8ic$S+)9-;$p)lHbvJO z(8BLX^uh{a!cj_x0=!v}`SG$L{S;=Qjjx?LwK7fQ86F+|0gG&cFf>rer?ze}iSUz# zL;3lE%?-9(Sa>)k0X5RcbpNE&`?wJzah$__6(bjrG|mW>ad~c;5QAU57@nB$|7@C@ zm$!|ays)>MhLHwomvYiMkBwPsofNo~!Qd0bnqR+wN;va=Oqe)Cq#r;$1Amj?34a!7 zu@`Q(1}AB(-!dmCtlt{k*l<&{KE;lpjwx`EyAcS~)V>%gW!&&l;UVRk#|9}0Bia0# z+S$Q^U(2{Ukb6@d)23j_)?}Frx8LO!$Ys$jI|Na9QW7IOdwrRLVsqveS0&DV80XOt zvhwpEVxYsPt_^zDDJ=16ZnNSGADrH;lB zlf)K?D-?60H=t>qxmj;I1`r0*RG{!d1R2p~iqod2rR8U!0dj`?v7w8h0zSWA#IA{E;E;$Se3y0Y@N2hj#F+#+!E@C@LZCnwhjs;DL0TkYM5Tfd-;^sT}~ z11PC6%2m-?EX8w+t5GrH1Pcob$W|b#f!Ih|{L(ngB0@uRFv@Ap9`c{RV5I<{fG8Fx z5Nx7lbk7CT)Dx94is3gb^F!2BlABRvInVTy(e74oh8GV!tqo006EScF;z9!9S+(Ki z%b13NQ41dv6BU|v(78kAkJS&uMliqd1NDSM6d4|dGfhq)$*Q93D<+%>SO#*t`)Z^f zH_WB+R?4_q5)q@o5vPrE!Ur^GDldpcpaU653VnR~(-s$boHk0SqL`TdR9eNG01u9NI1%*vv45 z9Sep&1^EE#DffLX-QBM68Uc%$ovr|GwiqJ7X(hqH!mz~^gD?NDbH_FUhGhJ!Aa23E zw@?B6LOlPj)-P(T=GZMuVzZ_uzfTAwCKe$?fZH9snunLX-7r~^c7a%k@nQ7p*WhJE zXpN*NLxhGPhJs*F?074oRS{Ma>#M66&c_qdpES10Lwk%Eq@}9*6(=Ls1Q`L;YUq>@ zz_6_zx9=2rk<>bY5I4WPoFIMwc!(MW0nVb3!|;F9gvcauFS{=HDUnSxKU&xZfdq%so&p`IU1I@-hv029+VOW=6LC^ft(G*&GR=`y z!)t$xy$^JiyEQ z_-DuC=l2Q_Tn{F*b@lXktj?wCcOW2u=~Ii52i{6N>f=6U zmtB~iRg-(|gRlepkE9y+^}X=sA}$O@RH&#)6DMJS8THXiVMPPhX^?1PhXQTK2evza z*0CB;BcW=8Y&a)_qUA2Myd>Kyh!v&c%5kP}adF{|D;^bB@DB{+5VvCTS80I_YVPrE zAqexR;^JjoLY0z^LE8gWrCL8hk^@Wl&xN&F* z(GGI@aAcu$*f2x0&We)~O#o+M`hlYlc0SSY2m(qK%r?kCF;3jnc=)iqqJkKHRA1%v zX8uiSwUsTCREgt^5A{SwMgaXB9$?=D{#E{+yu`@KaI}At-aZd26S85VvHZ??p(1 z4@GHd?B>TW-@aKveumfaxcyFlrG*Gc@1)0tGr*!pi#~%GOpWg)y>4Xepig7U1||dz zj*J|@msqB9UxD@hZ9GRDy$JJs9_#qw-_lzo!btS+fzJz^7GUTjbUBAK67j&B*K zV)y@iW4~MjZi3_ym1O3tz+4Fd3!-Y&V9Zu1e!tMSP)|v z>g$utv;e#9#gfU@eT>LOGxf;K#-9H*E^bQ$4+Ks5ubTL@vmHQvj_#-hP%sr3}e7D=f?ii7ZMEpT7rV z%%+JTf+9JCBp2eus!uabK+IuKJco1al~7UjQW`@uDwX zasr|Or69%yJg#^pDeAUaL5Iu2zl50%hxqyNC;0ow$Pfw;a*%yN9rWFQJRN4pc z-RURocQFwRZPh{2rh{hu36B+pNR%5)onTx59s^|J*uxj_4NV%euu6@<@6R#8WMlx5 zV*tQ#&eTdDX0k#dp!_hnX<`A~A}*lYe0zx4LNu zIUHacu!6D3uu+pR?id<;fFjb8lAmiF=^t&Gi~)g!z7GjKDa}erft^p&gd_B$Ha0R) z<S8w$u(nrGvcsqyRTAA%_ia*u^{jZIBtdf1Cw04HJh(hw&NnF+JqJ1!{ie!{fy zSE-Arh={JT@-;wls9&b6K00BtHz|aayYJ`Z)J|^3co2pH`h(Qtq_c)mj7<>v#E1zb z)~LMT&Vk2>90O4VB`-dZ3Cw8nQA|*GA9{&v$J$m1{hoD->0{V=dSwI<_#-&O2Nf2~ zfV8Kf$ZT)Fgtzew3!FpN^@Vmiag7mSVtKhQ%S@SQ-9``B2v9A6(BwmjhUJcTf?+^= zb9IB|l#-hPCITjmL!8-0xKSUO?J)8UHaZ(@#demKD**XP7R-o+2?>~$DvFvHl3tyd zle=!r_fw4%7a1|?xaB$6^9REOAmGG1!6{>I{&*u3lS*v3>KMv(uUuhAam2x~35*tV zGf_<9-4TU=ZdKhLq~>urx*a%>`tXu}-?_uEqV~ic zmDz2Lcl3C#{CLP6qFxq(naio-=sQK1PugUAxxcq6FU~SNgL)_LD)Xy0%?vsl9{r776#bLbqA2l6zlRPxfq(=kKDm)_*5jfpI9358o zW#Be+(>C{!3z+EZzpQe)Pm?m1HLuObX@;QVtLLRGH+CK0i7f;G_So^`q#gkr5$HUw zAjSaSprsmuagSFuCG1^)HZ_f#mSLJMw8e=uL3;Z7C545E@R?VNNU@ni?z*o%R>djX z?%{!f6NRrn2IIM7|HB1v1GO=p170K|Gsd@F``4i4R*nS)MJWc(Vco+(64GUSA_o)u zJhu|Kkr~K)Y}Mo6`TqSZ00;a~UvFb&MNYs^mPE{3+*3GAS$ROsl3|uI`PD0o55*+| ztztB3`uy4K_14T`At8Lj2BPPLqpPn~7JPzeXlN2!Ck`|H!H6*gDW>;tWxrq^Fve}y zS?~(s+=+?l!4{)BGImRLv#@wFTF7)TgeNPW57rT-Ir*;eB9T|H^pp>su0z}myy z?5`FgzzGpIPTU>U#4|t&7Zt^d-M(+%5>8X>V%*~Y(owX#kUlh3|MN&Bp2`C5$QM?J zyhfgQ0Ct-4@?2e$fYlP;y^|g^3BDHTMb&cZ;jeSF1eWe1D!59+_09=c4_+`tDoEol z;ErJjoSn@mN#?Eeze%T$+ddwGt>y2Yo|w<3+(PFM|AgfeqT5k}jN;VNQj(7eo{WkO z8=Y?tmKn+;MAMb!K98}{QIcH)acdJMJ&)U&TKKRAFjFgJdoKVmWR5UD$MbBAmAs&- ziJ6Y}ScwP$IIS=k3A2+hSLBSF95TJg*E(4WM0*A>{9xJ@ylevVa}>LuJovn8r>~7O zDXkH_m0+l;tmIWO%9dMDz$J2B%~Grz>jX|uA#rgTFyfDkYgl^4fHBB`K#m<=bcLG) z<^bC?0Q7;ShyG4>Y8v2CI3SVLStpcI`I53asFTrBuB@p!1M#URo06*_H;1Ug5kivVZgmDK8d18Bt-sLH>)>zx*2wvWr_uVj=9M5?2XQgJH0_xzYb>nw&{ z0J?YA&|n3l!t<$n>p`>3our7^KzCgYjtZ)nJ>7Xq5a1GE2@I^uYE^NihM2p+-UJ8F zQN$v|u0vE_D9=!>Is!XUwEnVvhxg#rRDFDdsh1Ll?M)%w@=;!mP}IWi!ucevGJF77cV8A zdyPTP5XLW7T#_YGpAnYMEYNS?9wl+-RigNxP|W7RLWb{;c9|^_R~#;6oTcpXfW}cc zV@PYc2T@K^lGV3LQbNK2zw_HSIPCy%M6Uyd-w`kay1IDwP@)lgJAEmUu;!thfHi{i zGa*5TH>8_rVr=|8mXsKyw}(nTHMLp}LBjKqYi*`5O8{U;Crja@N1xSMz(IF-tQ=Jv zfS1I>B|j`g;Ji?4;h=x=c7KT{;wFS#Vr(e?)3D+{eMp1gPa(E~Zo74-scFqb`n4sN7b*{6^5Z^W{))Y1hI;$LiyDs z<@5t5Fd}QsDmw&v;5MU7TL{u{{{$fH@L@|-t#I|;mwP-8mNb*Y!z!t|(t#V<=kZk` zYEAhxPqF`TA_G7~4R-LrffG@h~1y9Hb61!6mSj~g5ZI>hqchx4Ibp=Kp}<&L`?NPkTra*t|lWOP2TtHx=2)< z)L~08498uBLke4@izT8StURai`!T;|r{PO&KwM4^4mgF@#OHj0v$ANe+mTE_(d1me zS~GAKhBLdXgJ9IGsR<+#&lK1yX*=cR5j>YXN|9s3o^Vg@HwI(!O?fpP0Sx}?XAZ3! z#;$-j_%H>w{1;{mX>VlzsKiuw8AqMie2|ua`tx_P0zUwyHvpc1wWT-F$-1(}weQ(! z3|L{(z;E_e5+%D21I6R)06CMfv}f-35#38uU0se05agY7evZ- zxOZy9w-*lx_7J!=MDaQK0OD*o3Q8$61k}nY`4us?HG_ewvSYr z+@Yv&Uq@Q})l?YQ!FZ+#M?b1jh11LcD6p{@DzXX-BbAagBZZTC9KQh8K}f?S8C(OpcXRUHXy^jaA7LMbUzbs4XU)-w9^oN;&;$# zCt-_Y^Pt{JZ1bIYhD;BxZ7I89b)o_*ruDYdUdI>3fIlJtBZq~J@`Ph?KrCqvZj)!! z#3&EgV&ZsmZF-f?rIXGbZSTt9Ttd|x$3}n?dYKjC0kDWtycH*2j|y!0mh@6^g=-ymhp1s96zqbq%79hZKIN70KJj`&Y|(LjLuA{{JwSS^?JF`| zS4BGSwB$k#bWT@~IahU4OHq4iZ$$+pi9q{fgUltyC%mw zI&^rLx4M+3&wx|XL1v64puiv^QX;w*r zOo-*T*TN1wJs%nvLFMK$`7^0GMD?;xNUV zd)&?}Qut_#%VJzPxf!;0gG`tXh7aF)W&Ukyu*O=~L^?_S$a94R+u=XJpLpB2T*R|| z=Qz*h_NnY76~&uw(n4eKvg$>J~6o*#!=)sxv`OS?ohOBYyI9KQcG5 z(1t>^tas$J06H*Z$~wf|GlW!&(LOUPD_A2n=N+SYwgEB-jKsjyMs3u5b#>c+Tvc%1 zi7isaVPRNQTpTU?v3HAzbKU8oAr>aC(L(7mj?JnKk=QHyj$O6oop`G@i-tI+h6c*m zAxkE)L9W#RKax1q0*{USWL5vVBkZM4Tv8q!mba{WCr+H9$%1t|Jozayeh|=&1!RDB zcRD)7hYsDmejU~ZK0Z`S+bzWmQ2=8mXl122SUpHOQG0F^PUo{YOo}oo`-3muMplnw z7}-GW6Op+b1U%a>h_R>|M8X5uVNpp*NxAgs@niDd{9bv-68|qal)=F%OQs=s%2;EI zqp+zr(;9$9r)Z7`r^k$v5^756M}~$uwx1US!Vdl}zjwB&n1)I!PWxckT^vn!?_zve zJTe@Nu)2KNU&bor;xQIl9|dmG(6jDtAST_2`xh?kaeWZ{kZR3?fkq#gOm=!GV+Z1K zA@DkQJt-vtUWV6Tjtyg^NC=V8*>e_P^USP@jt=!|R{ zZuH|sg5%_D35SJ^>{}Bit`}~bE7MJ?c$vc35{i?RCO|&a`E@bb3jj4P79Hk?TN{@z zUGkv_Z?+V3@T?lZ3PtjRt_2K_F%cI-naMnMC{fVig)?lm^!C=rUzz^n=`O+dGHVUq9RR`Dtdy zZdG?JC|ZIAHvCeKoeu59tHt$=4G-hQ{Zgg8gO#sJ9qSrnyDvKf=ud>L5iBQ(H+Z%& zU`s=?jse>o$EFq%H|h4mQ-Z9c(;~6WP1}Chb)l;OV6@A>^&#c<-J+Ymzh-AcgH}`V0v(mt`HL4-tj@q83fcdI zH=fAE!s=9x7-2Uc7$eodb^#|3ryB|Pn_(0&K#z|?goH=`l~o$XOC))~y}39X8Mgpc zQ_T?L{d349Btd#O#6UAgRN`jvad~SjEnR{$^Nt+#<+liE7__p5K8vK3y(ry>{)z% zSi$&*wM1oya8fRd`sTqV7hB+0^K4qGZR)u`s>%5MSg^ZjX>m^D2Yiq*;IB*Z6*r{Y z^@88hb1HH6Rk%5O90*4|?fzSbemW2hbse1w$BEsI;<%GTVRhJe_~2BVoky1v>;}w{ z7c*2*p$Jq(UxV)hC!OzwOP4so9>bA@)C0KzJ_rHF?$WZF(<%Hay9md|feizLCI!@Sd~TZlhc-jetJT)cWU4wIe} zVvx@qMKXvHM~#hVN@d|B#~Xt0V>knkLfDifH(TPFJ)eQ!7!WVKqYNho|Jx6_M2`Ju zp4zs`$v`$S!q@;k{Ti58a6X zSV2j6fNnQ3>=H|81B<2D0)48@L#$+R`M7qqu6WQa5W6v!UXIJR3cC+gGQ~WBKx%qm zYh82m?)u5O+D-*j=0%NK60-1``=OYw*3m1Zz*^ zusjO{>W|cBx3mqH;*@7>?3F$x2`qIM+B6s@UtYS8v;~0vb1;~p-T>~irFR2{Rl&lV z4V}_YPOD&Tbyw_RWpx403{ue2&f{HK2FHN#0b|QQODe>x%^|x*l$%o~_pK5*as=rT zJ{XA0mwLaGdUL6bJD7(+3cNHGSTJ}j`233RLIQ)!_(j@2cnMR7ulXJ~uH+Z!uB^!^R9m$(R3hlAO)j>S7IdM+{K@ThGl$jPK!a7T}3b zBnM4HVBjR`&gIKITp%N*_1&8iKX!1B__=f~mjxY>KbykTrT4C8f5dxID|KDdZk&H8kRa?Fie(6Ab}9?zfRW zk{kJV6Z|422>s&`eR;G2nznzvFz9%d?SSB+e5 zlDA)^^7X$E67*xL=lP!92<<~{$<(5|hlC9R`~x?;-Kl4JU&Zbpc`x`97CC6qf+PTv zK4#0#&2^S@2jH=7l8^(2+N@-fT{O3#pe%aWq|N~VItA{Tb8-`BZtTh%Y!p8`?_u$W zC5oKz`bA3|=UuCJN3Pyz*b^44_V0tsCV@Z`PvQau(5_D|kGyl5PAIb3OX%M)c_3Fo z;}CU1zo+(zZZL6rwhXeoJ@m!|Lg+Q#yDb&+MGn6mUsMgRa;l#2jeRY#)R~%RUrKjSfj4(7QKwOVlHU|DoGj z)|+y>#;EWAxm$b1oD=94vJ=h$oVvS;AadcqOOJyT0{}!yI`9*hqV|P22y)a+%~d46 z&A=4*J9D=$-x;X$6CN2?+PhO;#zpvENc!`TTm3w6MQEB`Z@j)YoIrpM zbpGndY=5Od`Ww+;>+k=6juG1liLI9GY$J&oPhN;6xY81uYnH0A%xmp~+n7E{@6H0@N`<|Z72MikY%RK#}; zr$(mmOV9WyC68rxfvkYa@~)^Z{?{7Z>T7U20^isg;A*~?>ntvgo;rq?3p0hKDIgp~|h1O=lijO*$Rz4?Zl|`xH*HdOD-FM=O&uk}8&eBie~Q-#o|M*QLKQ)Vg?1l>jfWe&dOR|&=G#S|6<&=JoY9Pn<+)gAdzuTH9a0fpk z`|l(6IN#m`e(%`;TI+=AmFCosJ8c4UDh{Q(1sa-KGcB!A&Xot0R`%*ylM!~^Bii@~ z9vle|CdwNv`li#95xS{V|EF@6Ds?w{lYPRH_G`iPk#J*kbk#i!WAZIlNQ~;l= zm7V*1;bNi>kBYj##E##;s>Sa4-eI@*&dV30mGsp1D}18(MwlY?eWJuwb1@pD0B!ZO zs6F{ALvB(3ez2t7<~e^tHL$heNdph_AjMVjFBUs#3`F;;q$yYa+#7MO`0Webi2M@M zQ^)gCcf{^4PxyX!O}`&diyzI@ZM&ILpobia2cYn39GBCVQzw+Xu*0*{Fp7&GZ zNzy*9lklpXKIwin7s*h{4;OpWqSlGd0;hZKiz8>~AId*?pE6fse^aPF{-mg&nEBwV zpi6mqVq8Z%SF;wk68_rWe9m6zG=d;q9hTg;z+R_(zVVjtx-l6G>lBgvQ&vm`f5-2R z`^C;Cf#n*!A>iizFo`a47&`gp;#(SB+v9Q$x7U3*j=rSZFgW-!v$s%i@fUHeg8$bs z9&_F1HJz*4=_y#ganUCW9!dvL^GW^V)D{6M(j*7?^(T{)qoIwJ|8|ucCJVZtQ-Q#*{)BJa-{rD;2-wC_1+59!No8s8 z+)H~mTpe7~dM@xN89026eOddTi}o@r=z5_sa#o01_`UNMJ)x9=V!*%1Xo@L0pt@I( z=5eP0q3{fZ5XP2bY(rH`Tjk{;LL6+Ij50iq!;|H_ttN4e@dn_SSJRPyl3ns3`?NDo zd(2THns!lCbnN-7Dp- z?YLmM2fu!sXMbENsmyyP7#9@7wtw3;moGFk7wU=ok^4q>k9iH_h510AD(Q+~ z>gOenv#(T>8Kvnw70tG&339Dgc~n)4ul)yNB92StJecfs6Ef9Lze7wD@6kV|uB|D2 zr9yk2I)E3?sQ*CGl@CZA(Cl}N()_s3qR}`qE;21FNZ&8c+PUk!Y{8_J^+Nb!+sowS zbb#RC!1#0Q@U$Pte;U3(8m@q z49fBJvyF{#4Q&XovoIec?-yN@C$O-g%?junMbC|oT{k}ZbKD#UvZ@Hn%-!gOuY?H+ zzio!|DM>LNm7soFTjhh2>Er(kF+5>d literal 0 HcmV?d00001 diff --git a/examples/youtube-to-social-media-post/application.py b/examples/youtube-to-social-media-post/application.py index e0b6983e..683cec3e 100644 --- a/examples/youtube-to-social-media-post/application.py +++ b/examples/youtube-to-social-media-post/application.py @@ -1,20 +1,14 @@ import textwrap -from typing import Any, AsyncGenerator, Generator, Optional, Tuple, Union +from typing import Union import instructor import openai from pydantic import BaseModel, Field from pydantic.json_schema import SkipJsonSchema -from rich.console import Console from youtube_transcript_api import YouTubeTranscriptApi -from burr.core import Application, ApplicationBuilder, action -from burr.core.action import ( - AsyncStreamingResultContainer, - StreamingResultContainer, - streaming_action, -) -from burr.integrations.pydantic import PydanticTypingSystem +from burr.core import Application, ApplicationBuilder, State, action +from burr.core.persistence import SQLLitePersister class Concept(BaseModel): @@ -38,16 +32,14 @@ class SocialMediaPost(BaseModel): description="The body of the social media post. It should be informative and make the reader curious about viewing the video." ) concepts: list[Concept] = Field( - description="Important concepts about Hamilton or Burr mentioned in this post -- please have at least 1", - min_items=0, + description="Important concepts about Hamilton or Burr mentioned in this post.", + min_items=1, max_items=3, - validate_default=False, ) key_takeaways: list[str] = Field( - description="A list of informative key takeways for the reader -- please have at least 1", - min_items=0, + description="A list of informative key takeways for the reader.", + min_items=1, max_items=4, - validate_default=False, ) youtube_url: SkipJsonSchema[Union[str, None]] = None @@ -74,72 +66,24 @@ def display(self) -> str: ) -class ApplicationState(BaseModel): - # Make these have defaults as they are only set in actions - transcript: Optional[str] = Field( - description="The full transcript of the YouTube video.", default=None - ) - post: Optional[SocialMediaPost] = Field( - description="The generated social media post.", default=None - ) - - -class ApplicationStateStream(ApplicationState): - # Make these have defaults as they are only set in actions - post_generator: Optional[Generator[SocialMediaPost, None, None]] = None - - class Config: - arbitrary_types_allowed = True - - def __copy__(self, memo: dict[int, Any] | None = None): - post_generator = self.post_generator - self.post_generator = None - out = self.model_copy(deep=True, update={"post_generator": post_generator}) - self.post_generator = post_generator - return out - # # TODO -- ensure that post_generator is copied by reference, not value... - # # Ignore this for now -- this is specifically dealing with a copy() issue - # # then delegate to the superclass - # if memo is None: - # memo = {} - # if id(self) in memo: - # return memo[id(self)] - - # # Create a shallow copy to modify - # new_obj = copy.copy(self) - - # # Copy each attribute except the generator which should be shared - # for k, v in self.__dict__.items(): - # if k != "post_generator": - # setattr(new_obj, k, copy.deepcopy(v, memo)) - - # # Reference the same generator instance - # new_obj.post_generator = self.post_generator - - # # Store in memoization dictionary - # memo[id(self)] = new_obj - - # return new_obj - - -@action.pydantic(reads=[], writes=["transcript"]) -def get_youtube_transcript(state: ApplicationState, youtube_url: str) -> ApplicationState: +@action(reads=[], writes=["transcript"]) +def get_youtube_transcript(state: State, youtube_url: str) -> State: """Get the official YouTube transcript for a video given it's URL""" _, _, video_id = youtube_url.partition("?v=") transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=["en"]) - state.transcript = " ".join([f"ts={entry['start']} - {entry['text']}" for entry in transcript]) - return state + full_transcript = " ".join([f"ts={entry['start']} - {entry['text']}" for entry in transcript]) # store the transcript in state + return state.update(transcript=full_transcript) -@action.pydantic(reads=["transcript"], writes=["post"]) -def generate_post(state: ApplicationState, llm_client) -> ApplicationState: +@action(reads=["transcript"], writes=["post"]) +def generate_post(state: State, llm_client) -> State: """Use the Instructor LLM client to generate `SocialMediaPost` from the YouTube transcript.""" # read the transcript from state - transcript = state.transcript + transcript = state["transcript"] response = llm_client.chat.completions.create( model="gpt-4o-mini", @@ -152,169 +96,59 @@ def generate_post(state: ApplicationState, llm_client) -> ApplicationState: {"role": "user", "content": transcript}, ], ) - state.post = response # store the chapters in state - return state - - -@streaming_action.pydantic( - reads=["transcript"], - writes=["post"], - state_input_type=ApplicationState, - state_output_type=ApplicationState, - stream_type=SocialMediaPost, -) -def generate_post_streaming( - state: ApplicationStateStream, llm_client -) -> Generator[Tuple[SocialMediaPost, Optional[ApplicationState]], None, None]: - """Use the Instructor LLM client to generate `SocialMediaPost` from the YouTube transcript.""" + return state.update(post=response) - transcript = state.transcript - response = llm_client.chat.completions.create_partial( - model="gpt-4o-mini", - response_model=SocialMediaPost, - messages=[ - { - "role": "system", - "content": "Analyze the given YouTube transcript and generate a compelling social media post.", - }, - {"role": "user", "content": transcript}, - ], - stream=True, - ) - final_post = None - for post in response: - final_post = post - yield post, None - - yield final_post, state - - -@streaming_action.pydantic( - reads=["transcript"], - writes=["post"], - state_input_type=ApplicationState, - state_output_type=ApplicationState, - stream_type=SocialMediaPost, -) -async def generate_post_streaming_async( - state: ApplicationStateStream, llm_client -) -> AsyncGenerator[Tuple[SocialMediaPost, Optional[ApplicationState]], None]: - """Use the Instructor LLM client to generate `SocialMediaPost` from the YouTube transcript.""" - transcript = state.transcript - response = llm_client.chat.completions.create_partial( +@action(reads=["post"], writes=["post"]) +def rewrite_post(state: State, llm_client, user_prompt: str): + post = state["post"] + + response = llm_client.chat.completions.create( model="gpt-4o-mini", response_model=SocialMediaPost, messages=[ { "role": "system", - "content": "Analyze the given YouTube transcript and generate a compelling social media post.", + "content": f"Take the previously generated social media post and modify it according to the following instructions: {user_prompt}", }, - {"role": "user", "content": transcript}, + {"role": "user", "content": post.model_dump_json()}, ], - stream=True, ) - final_post = None - async for post in response: - final_post = post - yield post, None - - yield final_post, state + # pass the youtube_url from the previous post version + response.youtube_url = post.youtube_url -def build_application() -> Application[ApplicationState]: - llm_client = instructor.from_openai(openai.OpenAI()) - app = ( - ApplicationBuilder() - .with_actions( - get_youtube_transcript, - generate_post.bind(llm_client=llm_client), - ) - .with_transitions( - ("get_youtube_transcript", "generate_post"), - ("generate_post", "get_youtube_transcript"), - ) - # .with_state_persister(SQLLitePersister(db_path=".burr.db", table_name="state")) - .with_entrypoint("get_youtube_transcript") - .with_typing(PydanticTypingSystem(ApplicationState)) - .with_state(ApplicationState()) - .with_tracker(project="youtube-post") - .build() - ) - return app + return state.update(post=response) -def build_streaming_application() -> Application[ApplicationState]: +def build_application() -> Application: llm_client = instructor.from_openai(openai.OpenAI()) - app = ( + return ( ApplicationBuilder() .with_actions( get_youtube_transcript, - generate_post=generate_post_streaming.bind(llm_client=llm_client), + generate_post.bind(llm_client=llm_client), + rewrite_post.bind(llm_client=llm_client), ) .with_transitions( ("get_youtube_transcript", "generate_post"), - ("generate_post", "get_youtube_transcript"), + ("generate_post", "rewrite_post"), + ("rewrite_post", "rewrite_post"), ) - # .with_state_persister(SQLLitePersister(db_path=".burr.db", table_name="state")) + .with_state_persister(SQLLitePersister(db_path=".burr.db", table_name="state")) .with_entrypoint("get_youtube_transcript") - .with_typing(PydanticTypingSystem(ApplicationState)) - .with_state(ApplicationState()) .with_tracker(project="youtube-post") .build() ) - return app - - -def build_streaming_application_async() -> Application[ApplicationState]: - llm_client = instructor.from_openai(openai.AsyncOpenAI()) - app = ( - ApplicationBuilder() - .with_actions( - get_youtube_transcript, - generate_post=generate_post_streaming_async.bind(llm_client=llm_client), - ) - .with_transitions( - ("get_youtube_transcript", "generate_post"), - ("generate_post", "get_youtube_transcript"), - ) - # .with_state_persister(SQLLitePersister(db_path=".burr.db", table_name="state")) - .with_entrypoint("get_youtube_transcript") - .with_typing(PydanticTypingSystem(ApplicationState)) - .with_state(ApplicationState()) - .with_tracker(project="test-youtube-post") - .build() - ) - return app - - -async def run_async(): - console = Console() - app = build_streaming_application_async() - - a, streaming_container = await app.astream_result( - halt_after=["generate_post"], - inputs={"youtube_url": "https://www.youtube.com/watch?v=hqutVJyd3TI"}, - ) # type: ignore - streaming_container: AsyncStreamingResultContainer[ApplicationState, SocialMediaPost] - - async for post in streaming_container: - obj = post.model_dump() - console.clear() - console.print(obj) if __name__ == "__main__": - console = Console() - app = build_streaming_application() - a, streaming_container = app.stream_result( + app = build_application() + app.visualize(output_file_path="statemachine.png") + + _, _, state = app.run( halt_after=["generate_post"], inputs={"youtube_url": "https://www.youtube.com/watch?v=hqutVJyd3TI"}, - ) # type: ignore - streaming_container: StreamingResultContainer[ApplicationState, SocialMediaPost] - for post in streaming_container: - obj = post.model_dump() - console.clear() - console.print(obj) + ) diff --git a/examples/youtube-to-social-media-post/server.py b/examples/youtube-to-social-media-post/server.py index 906bb155..75b8a4d4 100644 --- a/examples/youtube-to-social-media-post/server.py +++ b/examples/youtube-to-social-media-post/server.py @@ -1,90 +1,45 @@ import contextlib -import json import logging +from typing import Optional +import application import fastapi import uvicorn -from application import ( - ApplicationState, - SocialMediaPost, - build_application, - build_streaming_application, - build_streaming_application_async, -) -from fastapi.responses import StreamingResponse from burr.core import Application -from burr.core.action import AsyncStreamingResultContainer, StreamingResultContainer logger = logging.getLogger(__name__) # define a global `burr_app` variable -burr_app: Application[ApplicationState] = None -# This does streaming, in sync mode -burr_app_streaming: Application[ApplicationState] = None +burr_app: Optional[Application] = None -# And this does streaming, in async mode -burr_app_streaming_async: Application[ApplicationState] = None -DEFAULT_YOUTUBE_URL = "https://www.youtube.com/watch?v=hqutVJyd3TI" +def get_burr_app() -> Application: + """Retrieve the global Burr app.""" + if burr_app is None: + raise RuntimeError("Burr app wasn't instantiated.") + return burr_app @contextlib.asynccontextmanager async def lifespan(app: fastapi.FastAPI): """Instantiate the Burr application on FastAPI startup.""" # set value for the global `burr_app` variable - global burr_app, burr_app_streaming, burr_app_streaming_async - burr_app = build_application() - burr_app_streaming = build_streaming_application() - burr_app_streaming_async = build_streaming_application_async() + global burr_app + burr_app = application.build_application() yield app = fastapi.FastAPI(lifespan=lifespan) -@app.get("/social_media_post", response_model=SocialMediaPost) -def social_media_post(youtube_url: str = DEFAULT_YOUTUBE_URL) -> SocialMediaPost: +@app.get("/social_media_post") +def social_media_post(youtube_url: str, burr_app: Application = fastapi.Depends(get_burr_app)): """Creates a completion for the chat message""" _, _, state = burr_app.run(halt_after=["generate_post"], inputs={"youtube_url": youtube_url}) - return state.data.post - - -@app.get("/social_media_post_streaming_async", response_class=StreamingResponse) -async def social_media_post_streaming_async( - youtube_url: str = DEFAULT_YOUTUBE_URL, -) -> StreamingResponse: - """Creates a completion for the chat message""" - - async def gen(): - _, streaming_container = await burr_app_streaming_async.astream_result( - halt_after=["generate_post"], - inputs={"youtube_url": youtube_url}, - ) # type: ignore - streaming_container: AsyncStreamingResultContainer[ApplicationState, SocialMediaPost] - async for post in streaming_container: - obj = post.model_dump() - yield json.dumps(obj) - - return StreamingResponse(gen()) - - -@app.get("/social_media_post_streaming", response_class=StreamingResponse) -def social_media_post_streaming(youtube_url: str = DEFAULT_YOUTUBE_URL) -> StreamingResponse: - """Creates a completion for the chat message""" - - def gen(): - _, streaming_container = burr_app_streaming.stream_result( - halt_after=["generate_post"], - inputs={"youtube_url": youtube_url}, - ) # type: ignore - streaming_container: StreamingResultContainer[ApplicationState, SocialMediaPost] - for post in streaming_container: - obj = post.model_dump() - yield json.dumps(obj) - - return StreamingResponse(gen()) + post = state["post"] + return {"formatted_post": post.display(), "post": post.model_dump()} if __name__ == "__main__": diff --git a/examples/youtube-to-social-media-post/statemachine.png b/examples/youtube-to-social-media-post/statemachine.png index e9ae43146bf50bebad32d3b98a3ed6a224b0c708..bdf72f90c624b630570a76bc5c0fcea6f18910a6 100644 GIT binary patch literal 25106 zcmdSBcRbgB|3CV!R7h4xG9p<;Q3@GlL{dgV_DF@KGE$Pt9vMlbAv3a4R`%AmLJ^@t zwz7ra{poXE=Xj5mjZ%68Z0w;-}#h$`;CD<%7De4~IIg80%UuDJ-^9 zDkWCY1ya8jpw18M*fGLudCNcUZ0P+XKO1}Odut0nNEGf)Z%^AHe7x{oVNj97I$FmM z=`8D&Rg_Pf9B>JB=W$(H9DDbd(|5(1o4d>AXIZ_LS(&Pa z-6|;Se9~4YN|1OB```m98Vmf7s4f5gd%}zU2L8W`?b)*jtgW|+Y~S8cUw>n6?yByI z6T+q&1FP93ZSS^Cy?XUZ%CT1?Fmn?JM@45RcZlq7zqYBLKYywnIpSGTBA=imtjx#s z{{4FjrKYBaF|eA8b5j%BL$6&m20J6}-}m?Pqt?~ek1r_T!0!m{+^H|iv-cc(u6=t_ z?!x%@T54BWc&R}~dkDk>@M|GGHz<)!Q2@!IARZgzG*C13iSxy8}a zCHYeloA{Y&@TK@4LAP)7hN*Q83gp^@Jd(C7Q={K9a~Fo&^RgWKj;ST;sPQvLMMaf;`otn}pDs5ymtEXaIdydO z`*%;P+TeqHVP$1yl&e>-?h+T*E;?^(+h#joR(2qH|B9D!aVz=4lAEVWN=jb8etqER z(e<`=b`76D`~CiXi84KP$H2%augz2}auwypix*laPIwOt*xtQ&FYehh+7l;Ej7?8F zNYc#ysZ+?Zs9a-UU{Ln;D<>Np+w<&fuZRfNw6rvGSLL2-^Eg)7J@V6Runx+~XvXV# z_>l9IwRQK@aQo{IA5_kt7u&I8$Mf?YGC9Ly#`!n8y37p>4Mi3h_b%-}^_tPk%ggDz z+T1-aFRG!>S*tFOcAk(82n^IVF!00G$j<%<_w)19YL}Om-XtzAe&ND}8^y(P4tIZu>f!eiuW92|P~iFX z>zA6Qrs|nq2#mq)&qCmarcer@(r&hW=a`+ae*bR2uAJ=d(i6(7HKV&ZD+4%%ai z>{c4L0|IC@9?NdR?IfqAQBm00*~_Y{SD!zBo?YS;wfx^PrJM1mikk&vuHEuj{1YQ2 zD!TfD!_MfJFLxgbV!vro>0^Oq#V&U>yG&c2lYFAPj*X3Q&-d@*)^G3X396M$wisQw zbm>-V>h?A3IjGjI_1!C|E75Wv51iGlOXv9Uu)BA?5)$}}7Uu>Hvd@OqcgwMmk100A z<`Xk1@LIP~tgFI%onK&}($%Xne$@NUf4%YY<;!|riI!icuHCCoojzUB-oDw?)O76k z?_0NT->$b1?#7dBN%i*f>S{IfP_eSwIyXIvY{t%D;Ns$Py>g|zp+UR<0{aH#Ki#$Y z`BGKY)!py!?^e^$P+nSGSbaMX%fcY!`0?YO-#nJMN*Cw0|NZ-S?9*{g8ZKt5;$sI7 z9*h(ezV?{iDos^r$BwFf->!K|JN7;?E4>~i=eC!5!v+g;^DRzigC=GC{r#WkUHF{oZ4cKl4om}o}JCIT|YxvO^tfx%9UFp z)V=Bbv*Z&~QfT@#MQE4T~SSU*5{MM^aLFNcLI`=g{l)2!qgI^G8W8d(~a^N z+1S?X-n|<|icd(08W$39o%^9I`^JrHJv=;6h!3FR*3~hbse63l)3^MK@o|d_zP$82 z@@Yb&t(h_EODLUEZc}M-!H*w5uCWpm6+OZ6K#jkm=Jw_lluNISQ?7LuT?ufhuC6wC zd3rtBhVJg}xE8CqiC&+f&??Ef5P+soJ2s-wX9 zbKzx&cdY*6H3e@p6&92wB_$Po8WO>Kf0a^Mz$W8BZ~8Sj*pzbo>D_@Ni8|4{EN-bQ;4V+u*re!WWL)2^prG*L z`ST}^eIIkAdvDyJG&VHEPE0sOwBqJf-KIxQ$ku)Mu$4=SXA1z&&+I&J+ zx4t=5|4nOaB;ES;!U6(^?2R9>I{b0z9UMF&sQEbI*)tItk44#E0|Po17O|EZE-o${ zwR~PDQuV^_-`~W_!jfE6g9RKU_avR4Uq;4o`K?eL7+=-!zOvGjQlEGv^6uMA<5!B5 z*Oiq@DMc?m((szzeEnKvqVP~Pn3I<>(G$2+BqB!I(PwCAD7mD>R6F<78|IYR*Et;B|ITxMv ziaxY|K`U*Z`VxL86c;bCZ{Jh0bc$o^MDHLgGX%Xi{9pOVE-08Y| zFa1!XW8X!gp`k&q9eyFD`AEjQ@bakT*Zhn2Esyrva~ulh^miy-V8eO~_$_Lb+coO3 zw7<$SD{hB`1T$rqRSjLDJRKcf*XU?SbTkL2^reLGLk-e+sN$#JvnRf|*P^7W%i`qZ zqVto{qX+1;ZXeSNW$vXs~>Ti)TWyjb2K+M&*14%C@3aU7kjAg?KI0sZn}0+T1iO> z8!UOk!rp#;S10Ed`$pN!o`chwzP(vkvIhmiQ{G3savkTRY!MYrH?kZ#nkO{#Jw#z_ zq;T}vT}SfeCdmL)pDH@Uqr*ey9U?N~<2`{EBMPuHM5j6$Lk21ho|~^oOHa>opLhAx z+-xz}l5SAsn$^f}xoXv_*QX>twY0>=#VIC+{QY|k7>RSw;1HH14<$$cXR(JIX~^&2 z(*bRrJ$shY@!-Xi-@qMqk|iJg^73Y9`(h#l9QJ)D(4j!f-d?n)r|0flbd#{~@KxAM ziThWyx3%%@-*1$19#BPM_im#hBfc=9h5Ys0vg~-KLl@H4tXbnaUcFIUOX~(|k>zM- zvFq&5$P)t}?(bep8Ebba6^>|aZtg}u+9e^Oc;LW+<$7_$O~2x(nwpk~osAq2)d8`{ zJlB@@zC72dRaI4A|Gbnl+H@zls{Mg+>2yPHsXg81h z{QY_50(d%L9{ikGS-vt=4sSW1h4^un)t;BuU^d8r3{SKF@ zuBfXORiCgme*gZ>w{z!CPy($hk4T@-JACBGRtE=%g6j(f^|9GP5)yYPLUL?73=W`y z9v3#tJI}=wrap^$%CaE2q-9*_avP2IbybxYsdOkIXkhWb7qLXLoqp-O$jRBZb?eq> zsf&ErGht7kK8;&yHFZ0xtspIZU*Kr8vQElfPEJlCSy|(hQegU=`Jt$JY^)$k{ja8{ z%`L(rB5^29;Q(d+)t1-`ER2i?YD0KNe_zbWE5#2AJ#PQj9ka8uVOd!hQwL-7 z^P^hQPAZ9siSe@qzIppLAnE9XLwE*FXQgd99FDYq@tmg};c4=0kIPMY_ACyS^+k4e z!jB(6*arRl45bX-VBtS0D&kzRVujkVV>Q#?JTfo2s5tDtI&qeTmDRcZ3(KRQ4;)Gk zq5%VfZ*dYz^DVgYN1*rPqd>evPWfx>Ky0Ed-=%rM#;j~@1HuJ0EpbIr(vAZ6#7x?r z3iM5~>~|R|i-_1nXZ_^drp)oq{DOmh3Jdq= zI(!TE@mUkAH2BhGwDUFrST9~M<6{mSK1>4wqR?)W^>rx1v^ZUDnFA`m?|YC*7~g9>_vZE>qBdu&H80bGy*z8SQY^wu}T&}IZE2vOmE-4YyaBR zz}C5;f_=lo@bC?STY_v}B_#=<4pPO&#sc5h;`%ZLSPH8QOU+ym zY76In7dpTVx-oH3|fzC=}5Y5UQJzvUsfCcPi3@C2-*#ay+ zeDuhhJ|xRD1duV*B-C-RUw^i}S68HICE<*-V($da{mk7MNnt;duP~M$GHG~`BbM-#} z&oWIlIV0nch6dfS82Q^zA77=#C$a8eTL$`_u}Z$7QBl;`V6IoMDhh-Xz}yiR8@mEP zn81F#E5hPH5>Qt13kvvTWjULYj&9f?|IhD(G(Wd~$5t)5|7%A__wev~d{PjHBrRaV zE=fsN8I2m54m_M-28F+j0s;bK6BAyMksFJOi^Z@1b^YAlj$0u&T(VVAa4pzCnfJPl zEFP?EtgPK0OY=OvA07xgxd!DNF)`sH@Y}96qr9<^8SCD5u(QN{Gdc^N+Ab%j>`^ZR zazXBf;GY^A8s_KDrR9E1%`6FMY}CPJS2Q=X68;4yl%Qj?@6g-1xU{q#f9D1>3D*O+ z0U5gZy=FabvVU;U>Q`eTH3P#&nfLVH2Y*Y{%iwWtj^d+broWRpy z$4E``ex6Yxf2>Da;@!GcX*|*foO7>z+oyq3t=8}E5vE^Wz5+0wu;82=$#dt<8Fs9y zyjpVS4)e98h3S09K6)Z6U`vC4^Wp)nU%!5AY^?0(yGU>PP6lwafL5p#sH*0cmT|f6>gu$REkN1ZoR9BndF4lq#jb)4`!7;{>()xt9m;{! zeS$^KgKXEXT{CxZU~qGD!?F`z9PRI?78Deel96#~)yk$PQb)_}AD5(o<-#H(NwH@E zlpXJ>wjJEJcdw$E8PCz9M}Y%$X&oIMAu$Loc9eQ}q=wUHnikXgE3d>qRZDD?k9f_< zq<-XxHY5r(09`#jyey4zW3kBZzn;eEwabI0@7=pscDqjw&+-!s1lZxlW&x8xNjT{2Ecxu&Gc9fHo6v1u|NO0&x!1_VNx!oc(A2^rdp%X=rV&jZ+DH5?`sH z5X4Xm1!0g+nF?DM8(JvBtiXvTUNwYGNF?c~6st8h?lx$qY+}aAPHHvP)!rKzlgbEV z$`W&2;@y;G5VY;cJ?hPjO(NH%eDJs;FF86AO^87A^%)r%D^;tqS1;GImPEFlrgRj@ zQhNlqXwZC9i4<@7PPjODxSE>UfrAHEh=_=I(fjAbp$(Q5`#(eHU8!1WXm$21H3|S7 zk31*Df$C)K_=1Ur8MM?i)zEi8@uJFtx%OWe`6}K2>zBprP~PV+UwWcT)puv@eJFKt z<@cJ~o?&5O^|83!Ry9MK`47T4VPV;Gn9u)(VozOz#7SddQuZJFl$WSv;Zh z)bxA|-egn@U`g2v>vzu`zRACTD7}Hl3(Ms{`~h#8YM`y4u}Ue?J6;`^1AmCb8{HZgPv#&A`#HxXTmefMcP zKWcQ6J}NJ9(XOGPAiO`mJ$qQn%gaAQ%2b+ua|UlYTOC{jYEIY4Nbst)^kWIfYqI^Y zn@2hdySut>Y&5&Z1a{1jqv}Q0MD`)@7Kmk9ARy4q)-B9UZjj-xiQ<3|C3}6Qdo7fs zq)JESZbM(R9iz_P(7`Ctlj)RVFhQ@;KI;zF9tgEq}ejjxcI1i6mNnc+dDxcMGHss@L zphap&k2cTTI!%5~ARnl!tE=2|W~`DydWA)r@0IVj_dR*~)HkzaPW^i>>2_4p0-jdU5YBG$-Y@2z%9)F_=DED(@9EW-N}{}Za{S~;ntl8BQLSE0!$k{*u5DzLs=d6$hgP8TMM+(Z ztG93p=-k8hWcqfW1rrm{X8os66ZP&iS?6Xo8Hi%&Dp|>x zK^i&No_q9HXpQFrB(QZL61#Tq_DoFNhRvWmb%3yctZt&L1OEWdkU_89$t)PL?7%2 z>)wK1d%GN9$S4f2djy;z}|zccMw? zXlMlKxTolPKIn>Y0xZW8pIZ&&6xU9}0s# z_DIHAU0c8DXD7G)Z9yriC`u@<wVWKH1+vsahADWSf;zHu7lfejy3r016X+%wqF(69&U zZN%@VG1sH3EX5-tB62>Pfr#kL7G3#6eS?o_3*RLdmn7JUsT0E^BN=zS_GJFSl1=%v zo{sLEIF0tc$Mb{Pckw+(BE^Iw^0G7hybdcax4T;DYcsnjTefUDkn;8ITUsR_I_(r5 zR@T+9LL!>j?a!Utb#3MX%H?-RERWc}q1KVbe{Fs^Prdc%=NH!N78j>Wm3FXgf6KRZ z>x!?1fny8*Dfu;74$}Gf_@JQ#>3jj55jQWR@bmL;*|%@wf$5#o^RqKE*#_J!ED26g zsrT_dU^{#=b@>XB_}44=Uz9C^f~wkV@IbKuc9@lLq9^Hd-a@<8F5tWHM;K>_o6#L-@9 z`#5s!$>PHNs{G5s*JJOw?M~5cv%GTh>S03TkCkW;Vrgzp z!37Y!@#2Nf&&jo=x_G|8CLx0Y=z(r`xE-mlxLPmnz4PZ}6jzx%QjphFfb#zfhQ(7dUkfG$K`agU~t; zC{xqWl*19Fdopb4RBQk5EP!+87t{zN&ZdRIfyJ4^&4iY7KfSWg<&VYv(|ES{>=P+G z%*-oa8F%)4+01i&hL@9z3$T~f8r$!e@0GDG0z;l266auN@2cKtrVON)d%h!}*loJ! zXQY{4%DyunIVokRtx3A3`ue`#YeOnQ$9_EzPfr(p@$zNFK=03=yt{Y5td?ry2uxa7 zF`dzPKQ#1-?!BleKbXv}uC8S*Eo?V>-xbSopYGVRcW)2c1cATLPNbS!TPMzRr>CSe z2nFAZx_@7I#%HS#;g6SIc`WSB`~zI@Y-0TN){w3UebX!b2GPweEqr2P$5PS~ zomV@3bji%3uRCA?PpLl4KLsxEDL4sEE*h4We5;{>VzVV5~f2CX|9>E@O#$?Y7z=fTnY65f>h(#6T0q&&mimVvr1EhEEBk#QM{ z(s6fQD4uGw6A72Mme@2qH>ZOj1MXSp`gM8x&(CjC>PaIeMH$bZOX|W6$Ya9Ww+9kq zml!rs4D#G$)0=JN!{2=SR%&vlR(8mxjzG+-58-bpD+4+2ahp0PAt|}dUs(~a^J&vl z%_g9x!-ye#+9@WMBx%&H(`dXD0`me+&(2SY|~5qO7K(sz)VV-U*G3iAUbFwA@3_KigB~ z&$4l&Dk?qI4NARTW^v0%R`!3Vx{3pHhLJ8O?RaRL>Ct;`ZpoYLfah4oJC`87JaFQHgo^%IZM?yHjV*sJ3R9#6!~sG_!FwksEUP z7RIlvJi)lOjNNL8R|UYoyTY4e^q&41kQ{twIiex^6H9$3KkWOq1@aqYGDaDemf9e0 zIS$ywCN66lJt6I`LkRl{k*StD_mB?Bsw2*n3V2Yw07cES+->S}Wn?8>svlcz>bC%Uw1Xmb2 zI7AF&gKTya!g)(#&mLtftN25{R*yIbOO4`YnUA?_NpFbPq=jS^ePGc|~ zj9Zp!8PUwZ^sd3_n1MrE*vHK&XW$mO-7cort(-KX{rfk=!FDK~)Qc>2fS!SOy~noF zk$l-444|&T!9Wy*#HQ7otmFbiLKwK+C$z6kG^h@rU*^ZR$h)C*LC{&L3LQ5YO$AO! zado>u_CE*$VU}HI>z}ZV_LZ{_Zo>U)pFHUg{m_N zLjeqqblux+gxhcUN?6*bPrAr3==N*D0|N}IdMxKgjQ+D8Yi};GRwl*(u z*4c{YM+!d|yY1uSl#tJ4%&_QykjrCOnn-R!F9BFdJI*6 znYA`A3tQ>C0ZaZ)wwggWVc+kv1E%>7!gH?>o(t46_HAi#W9i@D>tF~j{-e1!sY4e> zwu|nXyE}#FXZ`O@R0a{iK!f64)6HR_O;(_gW5JVSTk?3&aweu4rTbg#;EinWWl#_f zb~$ufmCif3&1TTmZo2^|@by zu=S|9=$rUt!F|ExZgmlt@&5;ZL+7C3qJnPJX$7{02cR!2$T!rU$FM>0m{)Oeu}%lp z^+w3I9b9}(%w$!u59$Kp)xb%BK+xP`bM9Q#jTNg9sGOpgOFg z5G*PWx%2ti2EJnl=A?+1W0lslo>Pi3&*9tooMY(MNJljsWfHfq`bR7)07frpt`rB5 z?xynkN1Xd3YVe!_ydlgqy^ZdF6V zRA+5%S6u$U_C*V99LGb{IepgArVGA-f5h#^+YXd z?{Z4bATkRAa!d>lsGpOwvp7&ytdgan7Zx9UL0dGBEqo9S%CSlNFGR6*`==CZxw!0w z;!oBrEUdc`5d>?3;1YQZA0#U$*PQz;$8O`|{I{i*6sV6~ciY5Un1F9;v7;_5{=LG#Z5y$? z@O-fI3kVK=|KUUT&=4Ks6I4`G=E%~CxEA!3?=RjM!Di-_pP zzC7`D%d>6h-PH)t#p$;E^|XcR4ScB=&IsKVJ&BTk*;)w-5#`t{n^u3l}?RdzF=PuU9sM71LWLx^5G9F4e=5^j`h3Pok*8VYiq zFc6WUywdmR@^j=3svYs!xZ|kkXgF&Y5GA0vfHW&?hD)X5w4)8JS9! z7TvCmm9Ncr8Rh`j@9yjKeez@rf=N?fcpl?HGuO#Yb0QGrO`q5 z=`!558bUtdVh%;)hhjSSW9b(hz*~n(poMH@Pmg6p?Qu;xZ)^oa`+4WV7K{0*;Q*ej zUY^ok53q&9ASo&KW>5j&zE#IH!v(8E`enP%7rPGBQ@|&6w~**1(VaV0wX_%kN0W0G zi0BDukSK9q!v{?!7I8XPkWj31x@B*|{OsA!1Md5xT)-n$BR{mI^bc|_L4ZnK zk1ltXLpr8GKn#^l^CJ7J%r5?f)kq6h;rRyqW5@~jDpRJ#vj33AjnA=dkHofvu?jM$ z?#s(=7XnX}Ur>-TO_^o*TPlt7FRXwZ=c6H@1brrEZfkt2ATP?$>-X=Kq0u)sHWKuQ zylD5UzQ@PgdjZjr5X0aW>9jr)mcci({xL^w0hsIh@q-G95^ReV z#59(A+j&qd(yI6UeX{7_u=@hWqV~w?Vx^t_5baH5+^VXH`^pLW0=(nn<1>8A?#CzdxkaIdL#_G-@JhZP+?I4ZLba^r$YWys>BeQgo z7F&q;x8>zGr+ahA9ajKnT|fcQ6Lj0%!VOG1HaV#R5!=$za;_@&K6j)S+|XFa|I4Xx zBAkCNy0Y8Y!~}{pJ#+^0X^o99HgB~~JHr7n5IclIVfLpY{yNb@fbxiejrifNUAq{i z)Vo1YtdPe03>0W~BhWb-HwkFBX6vD#GKjuc7kE|}KJY?7y&F+4H6&8efFS$@pr^%Q zb%S*tx^(FUiOkv|heqrY?0@1!a@3HctHrPr%h(j*4VwDpkCBFARwOZ+h|Z=)z}G9O+b z4~lR1$;;aZVRx5PzZL+fig(t*kFr{t^pI>Aq@pF>ZQBy{dG0as)8Nu< zcKiDU1XO%moZ5$_=W<*(D9hIwm-jwKfd`_DG6(`-VE3Oet-3dk5`tRRAbtGqoRP#=VFy=7bqa>_plQiz%s^xY|tK-o>own1a#JoL} zSILV$vC`Jo7PZ5lw6k}SJ2%R3w!HiK;|DR{A!`=9WEw?o{6nXXk*`}bSHh=@W~w&knGHhFn+8tcz%92`l_jWf2LC8 zBBGvW|Ky7yv?6{q-S`zVAqvoT5McGeL&vvNynD9}u`*?Q`}8N{L<2{X{8mlb92=H> zHCMUY*4uYNvD?y&^5_Ax@m=yiE?v>D#*I`00+5Oe!Hoo~+2yZEA&Ed0n<=EFr`M9I z8F{L$7D2e>lyQS~Tg38uk6HRFh(N0sdieLH#nGkEyLYMNipBOvH(6j?BIlr_r4L#1r_9)wLtSovN7+uS5tE+{copLab!lu`OHPn(hvMUka8ozWFi_NqP>C8 z1R#mToQ><2L&mY-*Zo>;r_P-V!iu2f@-a6w(sy_N;uQHf`{8VDY2V=CcB3=D0@1ol1zdkcv;_=>$IiiExfSUO7r2e!2t1^xm>+zFO8VxQI9G_PQG!6 z-Plz#mxN_h^!x30aAfu-t<=MQF!B-iDcpX~7aHAa#D`t?)FpU4{|iP(y>6W+bpIWq zqD+uXCpONi^0+4?+c`LdAVfPeKVOM_GwKT%*>X;Jaana4{Wk1E?2jv@*B5l>GM_!G z`10}$tFuf&;cWSMVnV_ez#DXmfERIbe3S!dVtVO@-atnrL!qaqM_f( zxF$$xX7MmP{&W@C+J8AK5j@rCWRfSD3so9^fdIO3KyYvp22RuuAC5)(0-|9yx&qt$ zIvSc-%wv$$_5#`}FJ%w-F)dse_y^i5DyxS-XCcVEJ+tU{{r9tWn}-f}dMvq1NJzvX za4+T-f{(iT`;DNaZTP)=hXA>V5QUuBiF29%XNM~D*HrqOX+za{y}UU;=P z;+p_-`@fpDPrmt&uD&S++4hCH;xv~T8>Yrs_J#wU@{$%``h(c63H=FJr_ zxjRNu7k~z@c!x=I-Tcu`2X#bz^1T7o}9!bP{JdtuVAr$Eq2=ou-laUC?rHP@F&n=J%(bW zx3$#Ntf};6P{n8k8ptXNM87ghTqyTg z#+eqz8b@`Lc65-^2CkE>12t@$HqlU`;KJ&>!h?nZqK#kJsv4Y}{Rd$Cs=RsG4Yqf~ zLqmpNP4~+Lca|Ys1Q>KcT|J;aR_H%mt7p~o{Z3Bv=j;Bpn82V?IG<4{@Fmfbw+IWT zRJ({W%{ivOIkOiq3;WifW(O&vg6pP$=Krr@27f zy`Ul>XRr~4aWw^c=@txqz)c?xei|=>%{cJ7;Jp3G%?M6aRVn`@!G4Ms|K;i3l9DH` zB3-@;v>%ly@e=;dc^W%iWuN;JJ8Ap9%)Ll~DPbFeMjHLSChMBw>HDRmea_S^s-)T7 z=iS@4&#y0B4dRq$M!^jzSsV}XpirKrrW%>ZKN26w&F=g2pU(XiAWj; zMgVW#ycwYElVcWmSPh}P7@6562;OI6aML|vzi+E@;o<}#3e5DRMB9ASz;y!!iDzJn$+`O zoKA5LC%!2-e;bPdDm`ta(3TBTjP??1wB%1B1^U<7XeWwHaa*OgLhjfB+wO6!dlV zeJ_ycR&jC3)#)fIlEr{p?&my&?bHp|FmMN9P|wA3;;o{h;+DvFMh{sRu`(*Ki+;Vm zhL(UvUWulfh42Yx<+f)SyaGuiHWT>XLF7sSARAM%a&yn^;Zk+8d6CgB@!uL+*3;L{ zG@Rn>C|>Up0hL;I`l}-XM!|0_Xkw27Xp}8^7$lv9F^! z8Acic!y6&@9u|p^pO9*6TjO>MjInz91pMU45`9&Hq@<@$!;wd5{2~rA$HLBz%=8SSjJWY)g zZY~aImD?gps2R!-d)2A*h+eeK&$6GebGhmF%Ape3z zX0g9X>X}*yU?hGfb^Wgta*l+$R7Z$zveHmPr^vtblU9Cl#;SZYqdgiW2B`E&Z0x}l zLIsc->z|m_4VvU1mQiWN)F6qpgR2lFtXUkf$wH^;jv^I4?Z7jJ*?6E-xcdWodTeL} zOq(|cK7YOoT4B#{-nWrkY9}2IDi_buJ#aA-1gc*PHA79mCQdRsb2jFG8;q#lGtrf^ z#_%fp=FQUq&K;Zcz65UENz5Y1YE_>KDi+AH59z&N}v`xKfP^@9w z;nxe$C`q3rANydh{lMEi%OIQk2t;CFBDQ({6`3CR^~>2xm*ET)FN|-RKYf(+E=qYp4`^sT&_fdBXt?MH6H6B7>|~ez{>jI1ULDP^W6EBb7lS5)I0ZdwG}$8H zJ46E~u9+sn4yp(peEs_MQ_Ep>^$HAzr>n`%Rdn7&HL?Ofl!E9M+2I#DX#$0TOmx8Y z7>9ySLqx9ve&t8#&U$CwAcpmwjD_HXAP~|bEn2;_jrlOfISlHc?2;)J3}|eXljDLe z9^e6snW%YqTPON=dk%hKG8{HTbC)gVdz{r%**3*Wqa`Esov)7D*x zQjV<4_r(}U6$anP2^PpnA=Lz>w3tj8j1zwm8Z11~wEAwRSEk`!vKY}h_A9dhv$mEK z{SCH*OM_-h6__i-F$KvJ7y#*S)=#yPGsC5jIT(ngiM7PkkBIOFRlIxmZnC|mGWwzq z+Oy`GR;|;g1@Hz};ffGHz^hQ~4>e#=Q8p;OE=OqLUnt}lAZC?GINx9w&Un-wi6l(9 zlB5A5jQ;q@YCA|4*mzNU?W5bK@Kz9j^?_1z+3Vd^X;61E7t-6yhr}4^f`$9yG5Uh| zTykcnFLX&HDTs88{j0*q#lb<#_2^jfUfy{R%tneMbqRCfJ5sn8E??eAG zPG;Z9Fcn@rxw6W`e&l?PfKnmg{ap~=FfNpuu3-I^<)OSg2ZVcHcqga=PiOMrJQ!qI zuIAZy+E&y!pAirS-R%&JTJ*2G=kH=j@${Uo9?)r z{`fS;wS9mh$fPKUHO8L2;p?QN2x}~rPL1pldA4sOSwU=_UO@u96NodUyZ8~;yj3ddG=xs zEkb%_kbItB9yKMxJ)-6pTwKmKIULv~I^9#f?%bCbs}V50iRni)(^fH;dkfsGyy_&C zzSFG4OO5{){uu1=G1zekvJ+4U8AZb2=f2M#SmvNKH<3rh`2iMi=0rtBp;K4jbQ3W5 zgO~{781jJRf<)%K;ARreEhvz2mF0%`Y;pcPJr~!bHS>YhmRLNWUVZHQ^B~(iGj_48 zFy$?#1W-QW%NJMPq@egad@ET!HsV1&`fZe`S;VkxIMItpHv(RzmAggr zcDA;Pm};Utfq#rwW~ibJQ~}R}94P}8pPX!f_YQFi0dX&cE}_Z9CMK?ix$^n*=hu%_ z)s9npE0WPhGzmO{UKx6AX5e|e>l(yaci}h@mr}jJ07jjiu)J&7p_rqgG{AE@bcS!_ zTWJUyvrOx^IyIQ8dkRcrV;3n;l9GJTbN9~uR?0GB|vptGmJrv5Bl4DSAlf&dN zB1YyvUWbvRQDS0Z&Y95gK2sElAd40zkP5OTR4Z2?0+dYQRSyYZY#ebO__TaD2hzjj zw1^`|j=-u495nFSgKkEPV)xv(StQLchc<*;t^#u_`N1k`;aW_Jua}_j_-F}LLD=p_ zJ>=nuIi$(4&FcshD{6XrAIxY%#~L5&R)SwIITtLdcz1m|4Q~QBnNor;&u%4kp^K8F z8S~MZU-1-z-dM56kiNBdMWaf#t4@S#ti0wFLDKDJEDqX z&$7cEg6zTwj0un-a=`Cx+9wVJ;}0^Z*Zs;A0V>JvrFg z)G(&}vq6KJmq*;bZ4WMh2<9N$W8>oof%RLn)b>M;x(PFo2ps6uBxnO03lGqK$oK8puE>HZKstxEz^au%7kvxL#jnKWmcm_7HgRPlX2*HM`@e{8Oj>fBC z<+dBZbYubqGaY#3HSnzSgO&X%v`!vXTZjDQYQsVdn8f0>{20R6 zD={^pCZVXV#e&V)%Cybt=UY}{@FTVX^*>fpCBcNe#2@c*Rtl_EGWhwgz#R-q+{88% zmJrxO6(2sVho{X*>y0Pnf<3IrQdVEj2w_zjM?H|UZ!9cs!WPG`1cTnvBPCOgzc2i1 z-1bn;jV&oD30Ld2Ti1&*4uJv}*WW*Ahz9^%T%V@4KLV$!Ae2FJT6jyaKNBMPm{*D@ zQfo^=BEwVX6%nBYDIjAP9|E$sj-FA#DG3(_TSPf!T^XSj;lq`Mu~*c#Z^Ent{#_Sh z5BP9%D9^|v_QFik{xG`bT(E-5bJ&16D`Sr>@WoPGOY;Pe^+(yhO$cI_TefcX95hJq$2E3g2n*+uSin+to#}ta>sCnhu*O-Q}%7yG*H&uJ*U6pfG7-(Q>|IU zw`B_j(ktvf*X;uT{*3ta^xMdt5Y-zf!Qp})KYj|R9>h->kV@62{V_(dwg5M$>ZL2< zB!P6d|E|j5#Ec7f?=dzXQlaKmf7{l^5yGQT6TrHi)N4=)IE^bPsI$=GC=_f7jAV_d zmKN~xOb;|~fB$IzMpe#aah>|#NiU$>fpOJ1@!$rc#(*lm!ZbEUSP;tI1O-66_tX_hchn>Ux;U1O*1NwkaoyTq zx~00S^{MvL6yTU$nc{+w+ps)=(=D|3G(bc}sKRIGlVR z2A*K_&_k`w#zn47sAcIf^1Eb#krMyD#krJp2O`i?JuYowJ?tTqJ)3z}85d;|Q$mPqJsA7larGm9iaw>?M-2}~l0+2)0CzNMiw!m`s zawn2C?B+&&8+jwb!Def*_WvJez z?uylae`$IEhS$JVUxh=PfMXrG0)_l^MXGUYnz#?Q-KIHkZoQCtN7_G|0ecXC0NNt=%VDn z5tt2qGR)nt@pm_$Qgw*vNIT{!WoULKhwWg)l*8`T2 zSOUsje`oQ2{L^k0QY&9$Llas8rDYWc=eh`F1>DD(44BeA;N-LiAyKq?>N96A)A7=h zR3SNV5G*%#rPn7*oL)jCmfgEIAlcC;L(ltgxtXSs#177h0F9>SlBsg$j7zAftVD7s z7<)d5M}Zr*k#}zHUJ`rYRr0CtIngMFSMB$ZRM#bBX-HfJC+A(@@)gR$yaNu);ev)k z+7?D{Fak=UprMjO%*e`wu1JzZZEbCwf&$7Vq*4UKXa%m;MBHpW^qrr$t4nB~#MFb1 zxQiGLB-Zy>!6O)X$?wo}K=70e4LP!Na>$GoiQAx0ipm&1X0%p9>AjSVh#t|gK#H9h zH`y@bA;U4?laP2(z85kHe!U6-j*Cd)fPY9yNs+Tl#-^r{zDdTtSiT}$DPb;P2SI~E z$|_jR&%e;UlH~R-{ZuF12wWpQG>UOHwnN(98CfMTIY&95tE(H{W@lql4uFCz&;f{CID96+q|n6z-Y1EU9p0(LFXD|l{DU9+ zl^phgTuQ69A1xVeM@E>Dhv(Z+2_ER@68HV4#aF$NF62eC!WuxL*L&q1W|WjO$jHVaVuP>t6|6STDS4M--z?_1{@@hO~=rXRz8Lt zKbR3Oqzms2#~3m|amt9#-UNh;gBoUk^wHrQCrkpO%dU}?l?68t@OAPMqQ~-lO=CqT z%N;VvPFu^4s_(sA_|42pJBqiea$EI|yN0%MuRw<;ryyb>R^tJV_cv^PB^$}8qSYru z;Z@E-rCHv}wOej?jk}Vd5F8_NSPZpy#a;y^RoxAM)`|c;eSa3t)Q(U?mCMjf)y+A$*qemPXB32m`9^Wn}avKsci%Pm4JZx3Q9GK%AXu-?8njKa=8*9=J5-_}3ps`bI`RQg#8evoSH1 z!=L&B4LM#&a;iUBR4^%5}4xNM-PDj}fnJGPH z+*jf6%@yVl$)7pP?bs27k*dAh>wN7IB(a6cwff`0b)zCc~xP1(W67x%{Mya)jHUT7kkR}Ue&r!(;3sGkI>vC^}j_RLtW1}T3`A`$goSniz`1|T=6_<2;sB?e)O?+s`um1jBSq3`__aUY0 z@^@zZ^v~JnM;!w~dxOYSaT+^?ZS!WM!8zcfO_bB{Q3|I@822M-lAa4g0LiiQ8kkMS z6-pqt-}IE>WmZ|xG0`F#ScpN zAmV9a^zPu^dK|-nc?w&Tm)Y6M(5l9MyxH(&cJpXn>ahoV)Afw7wXpP%ec@crXG51b zu!Yea?9g?r>*mn*C`iS~F2;V@^a_%l4>Yn2D@L8Qq_5D($ra1A;>Btk8$Ksd*&l z6z5UvbLWglCyzanSwX?!IMxbgMn;>#ew^E|jW$tEc^hF#letzf46g=PbzT-H4sg{Rh94jY1nTML49>zJSBy^`nDT$q0QOZM)n$)F2MC>+Bs7*>y z^hc)Tu~Td2nX*z@yE5xUY^~jv9@q)1_WIZ#+h4eU{QU0yeZQaY`}6+1-|ySf(sKFm zLeDAEf;DhFOLz-PVDM{6^!hWJL5lXZA@3X~BR)zZC2eR*1it|V#KR2BQAdF%lP*cv zc6!suo1M4keK^bf6F-bw-b?L~IZl1xKC zNXwgw5*9Ut9@pLu|{%Hrw=(+E<#{ltYfU-k4BLe(fYd;3fZqCFmE4N$|()bz#mp3P~5JZk}1 z6dLngQBh&6PTA@mot@=*`-cjb4+AvIF?32Xv%@(P7825cOQC}ge`{rGYHBb7$L*{; z?^Uag9na3TLpl>h9dwS*wV1Ta&$di&r%DydB+mC*x&ENPOH*%uov!ldD-@bwR>N*v z{}~g3MF770`GJlt4MZU%*Lk-AVrgph~!UWW~&|*eZd0ANr z_DU#-R0EUz3TePi#tSeZcVq)TN25#EZ`^2L-a>MNgsD|-bXJ}aCZN}YSh1Cb{yQ#) ze136%e2QnB1wk&e1|w^p;PyF-6)+z6iOyR`v;Gp5-~{S6GHl2xFs zibgaf{sm%H6G0o5X5^esOtTlbR#&G>i}|25ywgc0Kk%yI7hWB#m>@Xi5YkmvX~f$% zaRSYbWM!qWr3vBrPNRbBg+}#;B?CaI%<_T!Jsen)2oCWj;&Z{EAH@$EzKR|4neS`V zpR+`L^4VZ456cn(Byce~;QLr}mJ&H#Uk4f*8#goe5tE)>6M0qI{-d5YukeP8aj=Vk zD;~GQKF*L6ck~BCb}t8p0aa6=U5Qq?Qu|n?bVb{rjN-kE+&mIcldG%Um~PcPda0e| z>e|rre1*UN-QV|_D!($9_vTCgF99Y5G?NgNsGaL^{ghk$RJecjp+gcwNL@-2zl+V0 z>2B7DuaK@7czdZIjzD^*B(zW8`VMgx$^w{l@$AFNO29fMA(qG1d<)uk9 zoifW&q0lLi?+B=;390C>pa>rTvAV*qo1Zpn@sG#IUKRTKDw00F2-$7?^Np?w9SAAmNPT{wRXM4ezVTuY3J5Vs0WXVM+3eSL(3Vj zzAr7$UMP?*l)IV=8(?_AJz-(eJ2E=$KXNt72K50aS~FS}nKVm=BhTSKK2eeZsKs`G z7jYWPpNtF0y>C=Gi(w0`t*mtEq}Uc7?(Pbao@x*16$+S$!1h0i>Ca-m`ykh4 zZzAylQWAnQ&Du1Y#V8)^P@;&$0hr5I8;ol&)$`rT4p&mc-m8F!;g^#F_5S~J#n_0$ hS`&F~OZ*$-fzq9ihC@=e^Cb@ouSLr}ij_f${{Y_|uBHG0 literal 11515 zcmb`tbyQVR*Dk&(X^;@41qCD|q`SMjBqXH;q#Hy)x*Mbs>25??y1To(>n`7KeBUp| zxc}U7*BP9{+2icB_nvdExt{qvGgwhx0v&}A1%e=SDM?Xf@VN;7c9CGgyYQiv4)}Ox zC?g>XJ^g$AXwHv;pjVKTsF12l>Oq>bCXN(Yw>No!9-JhOkl^>MKfd`{qpG6x5?DDp zg5}wU0(6F4?i%AN^wdaBLRhG@K@>rwFDN7fMcfo57X>Cc@L@!JBq{p0PEKahlG1Jt zH~abz_xI7l+YBJsHgBY3Z3uRRxhENkA3+PJE&sWfatjs~mZPKNLA$k;m8h7Q((Pk> z3lEbQ#_sMe37$lzqT;PrVhhpo%WY(mHp+AX@AQu`4oJ;gu2Y-#;#Q*;NJ17WA?-v9GflmyK!(H+Z#HBZt{J*zxU%!6s z=y;K#D{W;})Y|GTCDl8`5TrxcV_f0y-#!%Ift;6@M@vhq%WZrxTm9|Zw|VT}l_$wE z2@JZ8PVZ`^pRgG9TjrZw9qjB*z==a__ur4Ln)34V^CKcKYo(9mw!d(fQl6u_IF;6W z+?>`sY#GiiE-r%e-_g?A+u0GYev16|t*goPYg<(?(x{QF#d7yJQYqJ$F8x8Y=<; zuluc`3@tc5KEC3jqED=Zkg>6`Y#O(kyu7@s>iG2ZbypZszp?JwUz;JkTwl#fucXb5rQX-5nE+a$0{c_(I4pFb}b6a~mT!`oCdhe3=6QASmczk?3 z0xBNxLy>-~SDPP#rj_BpL18(lrKQEp%>0GhvnP`L`}gk2)Gsdol#12b8}eV=QubC(mMfD$C7a0Pd^iur&R9Mfzei#W>R`+K z`uh6xv?dWzN({9^VPT7|>S8#gz15tmV%p@Rd9f7@>x@0jT5#By+2 zTU*<~#rVWTaBwgI0RgiJ1~DfDT^%lDkL`eHd;k7@KtO<|s*FrXKmbCp=$;rvZRgH=N;zf0JwVb46&%i*Zu&{`TiHQjauEk~$p$j~ysHl{bl=R;v-wY

qA9GW_Hfa&4uydatw}-$J^8n%@hfQ zg#v_EN}h3W)b=GX1_lJ&+}vzzZMC|dnVzh6ic&Ds(V;?RZUMoWndFei?Rjt`Atb1w zk>EJ>=MOAoZ*N~l*icduexdLyFYj617aHL9(6BJ~Xptr2QW|>tdH%?gCncrn140}t zbrN;DN@XRb5k7Pf!YC*xA3uJCp5x(lU3*--Y78Vvo!6_*zFi=GI)d!K34r@0E z2_E+HX{uBoMo>{PvIjOv*2&Sa^HIG~I>jCV0iiLfcgWwedQ*Xi z@@Ze0y^kjo@Y>g-apCp4+@FyMC+-~@!bC@(7#@DfVu}VCoZU5n6);<6-roUy4>7qM zFPB(sv)Y-OQtg3&M^k^U4(@U?j!q-7ytud+_>F>sA}yT+pHcPbceS>PzW&peT-5Q- z7Z7YEMn>el@Ol1`feF~7(4#opB6;i2XaUtgGD zQ40qh0uqw2?ls{5hQiFTj-CIE{spt4^p($?mX?-a;c{`+_w-0^mP2LkS2pJ6JJ;9O z@(N*%_=OzHlV_?L=4W3QH&^WX;@?4$>FI+8Og+Z$F#^ZN#u|b5EG%}zNqFGl;fYB| z%42z)_8|x#KLO01#^!zf=Ao>t{GNq{*)=^qePQ@FKXZ~S9xg8Ndt*sO#o@VHTj%|0-}aTn zQih>49z-ZSJRB@Wkc~h>!^g!)h$XlY4zy#fYl@_M)h)?z7jado}f9?32jDLWq>9eslx999N4CD`BVy)jyF zAW}4nG}~^e57yTFUyP`s@8iVu8r$01Iy*b>K*nI_$Pfk{H zwhG4Vqu3hZl4R50o1pl&6%-V>fFEdUYu~YgL2HcSn$2y|x6~s3FI`KnmE78ydJ785uWGZ}o$;Doszc zVH3ax4}gO0QcKu^mP0~pEN;ox47q-b$4;8x^HiVBDbZt^`_n9J-%oHARDKFVli+*n zZyfGb4VZyafF5o)UF6`i&_8(rIB|;FBqDLHpy(gItP;tFhANq zckjTJV_RBtk4ObNlffqBd4Fwp_j$9s(eu5hgM)*LN(@Nl8QUuv=L>Lr&+NipON8$) zG}aar1WgIwB5msdjP(CO9sjuo%KzNe{{X4~gC7ll`LKYfqaRYA5*MeYr4{_HBZS%G z`WU3k82r&&*zJ(NhhyX8Z@}KWi8|FDh*_(JgxbbcAaukpEG&$Hf$;|WXS*gb#?+O1 z?zUVcoBP+05E(G;KJ${=nwsY2w5rTdBb z@Awz6={P!;ThqqvuCD&k;q!zumOETtS@C_B$v{bIVs74R$>j+Hjxhl!F*&JDjIp)9 z-|w~*yv^Q|JTD|CH!wX7;`qm8s?Luni4OpIWY^VA19*`(xwx4KA zl4bV!uO=raXJuz+@O$1qJ_JUOMuSwFk+ZMyRX!gT>4$rc?ekz!04H=wNQ{h(h=_>T z*s2*w77;8#8a{Z%M`sp&J-_Hz2o*=8E2ou3>T6OZM$z`(hg zIieKplG{C`LVBzZZyvSA`CD`I)~M&3-M1tT?=$})b2-_ru=3=Fd2ccdK1Yf=IdzLu zaZE%^=c}=N1?R*cM*plqNE8vRL|XSSueLwI1l#7QClEYe-G2vA4~duaY&o0m!hCwO ziz^eo@CR)43GsVTVYmo=9=o@E@8m>8vgK3}Cp|89C+h6h9(nB^okMz?bDxhWWY*T) z&gx)Jj5fJn$)^<5Od2!26c-og#5y_HD!I%O7Jb?f`Ou$NUzwei1rSdU;o{}ZCP1A> zi_K@6)h+nd12*f0-c;;m`aDRV48ja1@uGCD2HnN0;jW;q(5}%mZLwKFW zR#{mIBWUt@uG#V_+Qen|J%N&fs-zd>>d_!3)A80lyD-+aeZ>-;W)m-$T2=~-uFvMD0kD(XB zDb-s%wxgoJi^ffY{_fz}G$+bs293_+$lTfJ)p&9}8A}PNz%dt}oScsHzOiLR(;xpx zI+@xdtc2)iKDWuqKSbP>ms(1G7LBvr5BzQG>=}oJ!mYLL_%A6T;+ZP#p<$Pmj7O{Q zb@pGpPg690T@tImyk26BN1VD-f4n=Y`TdGFJyM~UyVbcnKNeFC{ldShiUg0IEs6I+ zr^1nL`%A-8i%~zE2%JKb=cDme?jCimtyQ+KoFVVyT!0(?+5K#=NCwO3dTgwmwJG;4 zy^;9Cw(-o^qjKs@ef`h!5yzR`;oobFWb=2_3Y5?xqQZC2k`;aJ424^ZXw?$Wna-6TGx8&kMq(Jnu}a$3m;Vx->6^cD{L|aBzKgJ~&Vw zwK5PE1}W+(H_cD(?i`eT?8>H|;fa5NXQj`&B|ppsDXJ6EzoW*4q%HhkApWdm6f-F^ zbu;wnZ@=r0tjO}quWbjRb>brIW_T}+nBpN_Qyr5smh}SG<^FI3)lY%6OW8C(*Em17 zY*YQ5lA4b>UC69z3RHGADd{g;+q+fdrDkkeBIIs}iElm|w3g z%vq!I{dq!;R#AQCv(8bJU5D7W;vhadw@poFHgm6| z%-Uv}SL8RkmO_p=Lf2tS&}3LNedQvX$~w@bA|^J1&a|Q>9m@5qW7!y^h+c-=(Fx-?Zn){pGPP}fOvoN zW>{w@=?KTnm64!`C)SfD`yDlEa8Q@{qVus{Xh75VPzX9Ba6)^wCi`2RBDm^k)|%i3 z1~NFRcKMZHdHnPBTK*t#Nk7#bgZb|Q`X}f1C>APpc4)b2+Bu|lt}m#Og|GRG_{$1l z#rATZSfxl2W@593$6=bdwq)p-_nw_JLID3lT2&YpB5q1>kt06a)Eb{>ODQ|g4Yz@`hF&)s*nl+D5L1}xr*nPwu53khJk z{$API^n-6V0M~!pmQCb_n=2_Uar1hriGM`e&xeIzqdu|q1&HUixSjuy$wfjA2~A_A zq+_eEVj@Egc6HGg*6#Y8NQcOXCTI8-LfzNl*{PjJZM8+uiHqk8F8*m8PYJpATf@_)962&_J-u$&0{tpH z{5e|KuBBz4%=evhbx1NeB0DGFkS4!z5LxCnhx7Au%GYevES^;xxcv#xkN27mD;YfB z&^WdgMTkTx-NW5**@kfL_v&_WCXBt=jG%(hwb*I;0_@UIU3pfMF?JRTmdXRmp|FOx?$ zSfAKXdMU-o`jBP|8;gPjHN`J_4ZEi+t?JWq1O=IF_gCfOIP_@UDJaV)o%;4Lf^v6_ zKT-1P2OpxovA;!EHuJPno)~@Rv)aVCz&S%UGPb3KOk+N&yFY{rSrpR300wUb8Gcj1 zj>f!lr$+Y9elTccEyrZT@O3-PE2{UYZ3c;QwFo}Rg(y(u(<7X#g$xU15P@{?v$$ln zX`VfJXUQAOhkE&6b?R_sTwbKLj|92q}_(TgQ^6S@GGe16O zo*R0BySGl4hWbL}D?!VBbDq5TbDFC0Pdd6cFQA@Nfz*J2Yk#$MCgb&IWrrxpsBq9; zhQ!d0d2!O?;lbN8seWD4_RJqyD3G-OxE7hqPH}PA#f6`4#?}WYBEx{?>@fEXBp7DQ zBf3J2QGy#H2u)EP!Kdo$wUX&m*us#cG(5Dq#_KO{Hc-eE_$TkxWiTqgS><6KbxSA8 z>S1q9X2a@UCtM?CZm$_xvoXI`U@HIL-f~vj>iTj!pNj8u_5!sVUU3^w-%`2Vo^}J= z6P_lSuhAiXDr&j1q+Z#lbElVSLK&a-uGi>Wl~NP>8g$e(p@ZaM($Q>foCxYSoh`

&V6qBtNhzwcD9OOoAo`<{$Z0MD#B6R~fkkXtTyTTxT*)D+HLGW0so!tbA^j zD_g$ptx#51?fEg*Enj1LThUav?O;ks-RuBv6ahr4*P0x6>KjwR z8i=7mP!#Ogt@*g(gYdTpGjWqWAv)a+qKcVe=?!B#H3dUke`VOS%U~ENupNQ^!p)6`o{o*{dVK}tej{oHHKMGso!*MT zhg?M^GWs_Zx9?YN?kfwj7`Y;Bb*~)KX((tX$A=Yv*C0R;pU7*Piv4PwKww~yK`O_h z;he1zAR`qSnu>W@%+mP?0_vKQA!#`;H=g~{37KP;rVFd^NyeNp9cP9lGW-iB#ygec z-gaONzD2iN#q1>>K9a#5C4c^ITmOQu9ce!t6l?;&%Sfdt)V_#|Y;szSEKB5sWSu_S z*jO}_*;2F9LlM-n=^Ojy_9RYsRtE2d%)Wl?sFt~jEm}wr=As$&__nao?oE)Gh@p5> zdgM7+h<-jf*_bdW@uBf+faZ{se%;R8`wg&_nYJi|ws`a0K3bnb2q8(A&~ri;*bwV% zUr$4*?7H1|cHYA{M&FVS9qN)xs<9ZV#ZbMZ?jyZS5Y|-fcb@WXC(=05WW|R%D07=Q ztZ;C#xT*%7ds+K<%m=%}bEY?hPFq4y>7 z#X1@)@2JR;q5KDq)-W%RM=4^K!t$2z==8~P#Zq=r3H9;=>Dm`2xqq^)70sqEVKS6^OjLpt5^W`=zD-Vb9|u_Ol|SsqH6-sAjbj?SBB%|J>vE@EewfCOJmcqQ zCif*c)h(u>qIw1mgrN&lWThH!ZLx~D=VV@evrZ&RCL~(y6#eDBG(t;H%ie!|MYiE{ z;gdSw5+z52$8t~}m@dIl5{3Bo-iG<7Cm~Hmk#F!$gneWzd2Ou{5jruLRmtuIz&@VP z-D6`e-bVKKv#%H^`Rwb8JYN_`6Ws@ zhzaaCR))D7>mu@G58B$%$M-vb1e}?cw4P|cxH&cFea)a@WEdyC*5x@{7%D0*mcz#F zmzUcdPPV+vsPo#KzSCe#pKoAzOP|H{WjcNge`Y*Nz;HfRo=VBGw z#fqG?^aaMi(A1`{RcQDupl}0F-gHLxBf1q&d4ET6Oo1PTC1(1*^5g|zA+xeR2@+FGb@z#^%<(1eN9P4fg>Il>~ zG+-llfE<1(_A$syZer>*zg8Ik=DKPJ+uTwP897)kh5cI^pOU)jaLDQNhXk3xOKHg< zMy-k*ITys@%*E^I3ek0{4Cq8~4K2)Zq z-Y4`oy6@Z%DFtmu#Nw?NJKSVPp3>0nVx_!*;jtNR?OZuwF&o6i!h5uw+Z-Y=7de;s zE>%OqLZj--Q?o5(WCDgOTYAj4u~MLD-V=(sSMn5xylNuoGVAIl{Gb!zm1z#Tv*WgJgc@@ z{m@x0WUSTHNGa*wtqi14N12EPsN~NQ^ST)HJP#ND1ZC5)va-S>APn5EVFo)HQD6mO zw5^@ApI-ohFET1>gtF33+p>llkn>elfGVmv%X#pZh#*~P^?uq`3g>^jhxuQe3ot4c zR+J6#;zM}^V9~)?;BPvLijP-UQ=2f=r4p?bY7cns@qdS6`$q`=KQpyXH*I@}fgwNW z-Ny~215ktjDa;gbmO&*Y42(G!2n|nSlzzgQpUln6>wQr18F)Q!zt~Fe%KUiNeD~?o zr)RogdcMN86#~AMf_{>(~AL{UA{aX!sK=kYx1ec2NN{fryWfPaF5WeWfd$qiNk2qiFu!AdyjTmQIk9VSbL$Q#tu8BDt}c06er@^qaJ6JPRbsUK zxrA;~`*B6^6}D?}K`Y=b8e9yfQu39YE-$+yNK{l+*Q+TY{&K1?aiFAX0sRV1fpM#o zPOP&N{9YjX`aSpOEz=Kl7eM#|)bM&WF@Kq?*HvyAdAsY9-z%n=Ji3k!0ts3bM z(#h1Y&r_G{J|{uGpSOp}(_;sTET17DAfq5FRAdaqjA9W}(9@c3tUDdvyKZh#HY3{> zx$aFKFvk=j1blT>=Js=t&3{l**CxcrpSlw}`<)yX7It!CizZ8T?llrrl9g4G`Qtqk zH+RET$fhqFkcD(~bP)G24j!_w76f(8K7j{0O6Y^LjE{|lAXgvZBmA)KXX`L1ot>1; z`V5#wGsD9Wv@L$7A*868QxlFTQtxUYxBq~XVornQfl%u8_K|QpRFFw%a-9(H5pDV5 zs-6!CQ^HgUcRcq60o0=Mj zCz*}8y>yf_$t_v2RmWfu&s*Ye<^JCjRNg6kAp_-RP5@(CebKGqW<9$=3is~AIlu-EK7uuCtwR87$1!|*;%$5$U2>BIhxlKXXm7KZ=0GsN$3xvyIs>4_Dl@pt;&S#I{MhX^DCti0V7x(};w|%?YPvsGP0`JR zWqo<}g(m%-kpsV}ag%$ab*99%fRO$7zfk1;<^GOjN`v**w-ibH>j=wJUyHgK(%y>-atU*N@0yP!Iy@GO#m`x~ud{S#Q3v z4o^MTJ8g2G?jyaHSJ&$w=`YtW!^ra^FjxrrY!6J0R6_EdZLQLeW;2EP1D7o2o~3zF z8L&i&a(lcM){{bjthU~rS3lVXZGoAkQO_{H&X19PgrJhX`x>`K-Dxs|lR*Ev99$?C zZmR+x{c}cw)cH;`Z!b6-c zj3>#Qn*)^v^_J{21814Fqby6*gQt3a&D*=I4wtxFo+sL^7ZF^G&~l66z(4xi242C@ zX=->(0H-WYlH#DzNk`lZf~MWyE$(g*dhGUG|72N@Q_VVBWMZ#AU+{e1Cn2Qvj>$mg z301L_6iT`fW$mrgn??0P#*4psP-dcd?^5yhVGK{WJ69(OQ9I($R>3=V=R7BPi0+QFGn_mDeMb;(*DR{^5fg& z@lrRTTp1IwE$y*?XV^6Xu;!jcA&lVa9agvI0tB(C#X*8^qK@HV&ob&_t7g9FS+C zD|p*jo83>%43|_K=l7i!0#A1}=Nw8PV{2_)REm##xK84!5m8)n?RN6pM#}tjMuIWm zNdQr$XD9nZZxdgqYkg6pNdN-$qo{;gQ0oOWioGh4Bxl@Z`i2tL=Z~NoVchKeG^e0e zr*(bCoPF!xQ*RM@l4kFVKh1sK?nNKErRwkDz^#Bf43O2z`!c4IQeKO9b+(Ow0HQSM zIWkNi`^0MMx98iPCcfh-EpM3Npb_zoj@Ed1$B0^o#Zyb~!#NjA7+q3rKGT-NNWxP* zdUyi?EadNa4wyzD)LfP7l~KtDN8tQkL+?)V{BLH2g#LrJ`ru+8;^Wfwz4mb-&|?%EpAJ#odFhP|D}Q>Axm9#wSQMD?_8xl~WVkL0m+tNT7Ae4?hU0 z%pDl1jUxiCxFxiX=!w2?TV`-gXX+u=xte z?|TGP+VMi4cSLPE|M<2upp?hvHn!P@mnqscN~kg%EwXy#CRhhwy3c_B^T|q-Q-EiVME3 z1McH%X9tWyg+Gw1#{xWg^!e)$As60{r}SnKaeJHuGvm2NAa{E>lW;cKU4)A{zzBX& z{_Eb(E`${&vgeT?TiFy7Z~6ZtF5D^5An{NFiFl3aYD1XI774J0bm};JHop4BKCGj2 zq{mHWcYuv&$T#P?l1Gv#+3MMiQF67g%#0dBfsEo-^uf%^6dAI{wWsh8JU00Lb#JPa zhl^`rh<R01nNCsr%=$F&a%`3)d#)H=^iT%$V0z7s@mWFcU{k|_olLak%3)N zM;l(tZOILtR8vJ<{MkQh*JOR2M@|+S8#^Yt!Ffa2(;9H6!HyGZ5w?0y+S(N!TaOu~ z=Vr|RRnBwO(Kk!UnII@o6>RykfxaG?SB!hFsVCum*zOcQ2srFN{K8kT(Z$Eb8zJVl z>e~rfD0zM3Yp24UM5ZV%uP3aX=iKo{8ivv`uUZY@r<9$#@$m@>g`}0k#u^!kiK$5t z>8saPe@|NEtR3T0dV4IN?pZ7+i}V60 zC@mY*Z>4yDKKtO!kj~}c z_S>iAV#onYPg_M+cK`9=ZosknPMy)l++sBo8=ej@5Arrk%ST5?pv6FhfcG$jfb;Y6 zLZkCr8ZrnMFNrNf`!>loxxP+MZz!I&d!UGY{@f%O9yq^?cL|_q6(lnaOiNc z>C0zlOPRiU2eX6yeV1Y+AMc`IyS>R`wO{IS@o`D=ckLeEnoE2~O0SzI7(1p$qwBgc zUzh3+^gUJ?w846Nd#8u4Z$E#mpn_Q0*g%Wa<@nAR!Ol*W;p#58+j}_NVYlshAoNVC zX>6$Qzu54^!ox~BSYO}U`&C*xWU0H<<~(V93i==O6*9c8k268%Ps8ohT~$B^IkpsN z4h5|eU0q$9o14NUo?retTu&tjd0`x741 Date: Thu, 12 Sep 2024 16:24:41 -0700 Subject: [PATCH 09/10] Adds notebook to typed state prototpye --- examples/typed-state/__init__.py | 0 examples/typed-state/application.py | 29 - examples/typed-state/notebook.ipynb | 892 +++++++++------------------- 3 files changed, 267 insertions(+), 654 deletions(-) create mode 100644 examples/typed-state/__init__.py diff --git a/examples/typed-state/__init__.py b/examples/typed-state/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/examples/typed-state/application.py b/examples/typed-state/application.py index d737de97..88f1fad8 100644 --- a/examples/typed-state/application.py +++ b/examples/typed-state/application.py @@ -1,4 +1,3 @@ -import textwrap from typing import AsyncGenerator, Generator, Optional, Tuple, Union import instructor @@ -22,10 +21,6 @@ class Concept(BaseModel): definition: str = Field(description="A brief definition or explanation of the term.") timestamp: float = Field(description="Timestamp when the concept is explained.") - def display(self): - minutes, seconds = divmod(self.timestamp, 60) - return f"{int(minutes)}:{int(seconds)} - {self.term}: {self.definition}" - class SocialMediaPost(BaseModel): """A social media post about a YouTube video generated its transcript""" @@ -51,28 +46,6 @@ class SocialMediaPost(BaseModel): ) youtube_url: SkipJsonSchema[Union[str, None]] = None - def display(self) -> str: - formatted_takeways = " ".join([t for t in self.key_takeaways]) - formatted_concepts = "CONCEPTS\n" + "\n".join([c.display() for c in self.concepts]) - link = f"link: {self.youtube_url}\n\n" if self.youtube_url else "" - - return ( - textwrap.dedent( - f"""\ - TOPIC: {self.topic} - - {self.hook} - - {self.body} - - {formatted_takeways} - - """ - ) - + link - + formatted_concepts - ) - class ApplicationState(BaseModel): # Make these have defaults as they are only set in actions @@ -93,8 +66,6 @@ def get_youtube_transcript(state: ApplicationState, youtube_url: str) -> Applica state.transcript = " ".join([f"ts={entry['start']} - {entry['text']}" for entry in transcript]) return state - # store the transcript in state - @action.pydantic(reads=["transcript"], writes=["post"]) def generate_post(state: ApplicationState, llm_client) -> ApplicationState: diff --git a/examples/typed-state/notebook.ipynb b/examples/typed-state/notebook.ipynb index cacaac34..bd14801e 100644 --- a/examples/typed-state/notebook.ipynb +++ b/examples/typed-state/notebook.ipynb @@ -2,290 +2,241 @@ "cells": [ { "cell_type": "markdown", + "id": "8419f04e-f945-491d-9526-6aebbabbad6a", "metadata": {}, "source": [ - "# Build trustworthy LLM agents & applications for production with Instructor + Burr\n", + "# Typed State\n", "\n", - "The challenge with large language models (LLMs) is handling the 5% of the time they say crazy things. Being able to debug why an output is bad and having tools for fixing are critical requirements for making LLM features / agents trustworthy and available to users.\n", + "In this example we're going to be using state-typing with instructor + Burr to generate social media posts from youtube videos.\n", "\n", - "In this notebook, you'll learn how `instructor` can make LLM reliability produce structured outputs, and `burr` helps you introspect and debug your application." + "First, let's define some pydantic models. Note you'll need the env var `OPENAI_API_KEY` set. " ] }, { - "cell_type": "markdown", - "metadata": {}, + "cell_type": "code", + "execution_count": 22, + "id": "d62b0737-683c-4748-8b55-c15402aa1b2f", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Requirement already satisfied: burr[pydantic] in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (0.24.0)\n", + "Requirement already satisfied: instructor in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (1.4.1)\n", + "Requirement already satisfied: openai in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (1.44.0)\n", + "Requirement already satisfied: rich in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (13.7.1)\n", + "\u001b[33mWARNING: burr 0.24.0 does not provide the extra 'pydantic'\u001b[0m\u001b[33m\n", + "\u001b[0mRequirement already satisfied: aiohttp<4.0.0,>=3.9.1 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from instructor) (3.9.5)\n", + "Requirement already satisfied: docstring-parser<0.17,>=0.16 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from instructor) (0.16)\n", + "Requirement already satisfied: jiter<0.6.0,>=0.5.0 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from instructor) (0.5.0)\n", + "Requirement already satisfied: pydantic<3.0.0,>=2.8.0 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from instructor) (2.8.2)\n", + "Requirement already satisfied: pydantic-core<3.0.0,>=2.18.0 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from instructor) (2.20.1)\n", + "Requirement already satisfied: tenacity<9.0.0,>=8.4.1 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from instructor) (8.5.0)\n", + "Requirement already satisfied: typer<1.0.0,>=0.9.0 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from instructor) (0.12.3)\n", + "Requirement already satisfied: anyio<5,>=3.5.0 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from openai) (4.4.0)\n", + "Requirement already satisfied: distro<2,>=1.7.0 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from openai) (1.9.0)\n", + "Requirement already satisfied: httpx<1,>=0.23.0 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from openai) (0.27.0)\n", + "Requirement already satisfied: sniffio in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from openai) (1.3.1)\n", + "Requirement already satisfied: tqdm>4 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from openai) (4.66.4)\n", + "Requirement already satisfied: typing-extensions<5,>=4.11 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from openai) (4.12.2)\n", + "Requirement already satisfied: markdown-it-py>=2.2.0 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from rich) (3.0.0)\n", + "Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from rich) (2.18.0)\n", + "Requirement already satisfied: aiosignal>=1.1.2 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.9.1->instructor) (1.3.1)\n", + "Requirement already satisfied: attrs>=17.3.0 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.9.1->instructor) (23.2.0)\n", + "Requirement already satisfied: frozenlist>=1.1.1 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.9.1->instructor) (1.4.1)\n", + "Requirement already satisfied: multidict<7.0,>=4.5 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.9.1->instructor) (6.0.5)\n", + "Requirement already satisfied: yarl<2.0,>=1.0 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from aiohttp<4.0.0,>=3.9.1->instructor) (1.9.4)\n", + "Requirement already satisfied: idna>=2.8 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from anyio<5,>=3.5.0->openai) (3.7)\n", + "Requirement already satisfied: certifi in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from httpx<1,>=0.23.0->openai) (2024.7.4)\n", + "Requirement already satisfied: httpcore==1.* in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from httpx<1,>=0.23.0->openai) (1.0.5)\n", + "Requirement already satisfied: h11<0.15,>=0.13 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from httpcore==1.*->httpx<1,>=0.23.0->openai) (0.14.0)\n", + "Requirement already satisfied: mdurl~=0.1 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from markdown-it-py>=2.2.0->rich) (0.1.2)\n", + "Requirement already satisfied: annotated-types>=0.4.0 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from pydantic<3.0.0,>=2.8.0->instructor) (0.7.0)\n", + "Requirement already satisfied: click>=8.0.0 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from typer<1.0.0,>=0.9.0->instructor) (8.1.7)\n", + "Requirement already satisfied: shellingham>=1.3.0 in /Users/elijahbenizzy/.pyenv/versions/3.12.0/envs/burr-3-12/lib/python3.12/site-packages (from typer<1.0.0,>=0.9.0->instructor) (1.5.4)\n", + "\n", + "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.2\u001b[0m\n", + "\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpython3.12 -m pip install --upgrade pip\u001b[0m\n", + "Note: you may need to restart the kernel to use updated packages.\n" + ] + } + ], "source": [ - "## Instructor 101\n", - "Instructor is a tool to help you prompt LLM and constraint its outputs. First, you specify the desired output using a **model** with typed fields and textual descriptions; you can think of it as a template that the LLM will fill. This greatly improves the reliability of the content and format of generated text.\n", - "\n", - "To introduce Instructor, we'll write code to generate a social media post from the transcript of a YouTube video. \n", - "\n", - "> This post on the Instructor blog is also a great introduction: [Analyzing Youtube Transcripts with Instructor](https://python.useinstructor.com/blog/2024/07/11/youtube-transcripts/)." + "%pip install 'burr[pydantic]' instructor openai rich" ] }, { "cell_type": "markdown", + "id": "66fe6bbc-3c75-4ff6-9974-4d82109dc47a", "metadata": {}, "source": [ - "### 1. Define the `response_model`\n", - "\n", - "Instructor uses [Pydantic](https://docs.pydantic.dev/latest/) to create the response model. A model needs to inherit the `BaseModel` class and we use the `Field()` object to give a textual description.\n", - "\n", - "- `Field()` objects allow to specify constraints to the generated output. For instance, we want \"1 to 3 concepts\" and \"1 to 4 key takeaways\" generated per `SocialMediaPost`\n", - "- Notice that you can nest models. Indeed, `SocialMediaPost.concepts` is a list of `Concept` models.\n", - "- We use `SkipJsonSchema` on the `youtube_url` field to specify that this shouldn't be generated by the LLM. Instead, we'll manually pass it.\n", - "\n", - "Tip: We're adding a `.display()` method to format the text to be more easily human-readable. " + "# Imports/setup" ] }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 26, + "id": "c3da83a6-0047-4599-aa0c-c4d7a0cc7e78", "metadata": {}, "outputs": [], "source": [ - "import textwrap\n", - "from typing import Union\n", + "from typing import AsyncGenerator, Generator, Optional, Tuple, Union\n", "\n", + "import instructor\n", + "import openai\n", "from pydantic import BaseModel, Field\n", "from pydantic.json_schema import SkipJsonSchema\n", + "from rich.console import Console\n", + "from youtube_transcript_api import YouTubeTranscriptApi\n", "\n", - "class Concept(BaseModel):\n", - " term: str = Field(description=\"A key term or concept mentioned.\")\n", - " definition: str = Field(description=\"A brief definition or explanation of the term.\")\n", - " timestamp: float = Field(description=\"Timestamp when the concept is explained.\")\n", - "\n", - " def display(self):\n", - " minutes, seconds = divmod(self.timestamp, 60)\n", - " return f\"{int(minutes)}:{int(seconds)} - {self.term}: {self.definition}\"\n", - "\n", - "\n", - "class SocialMediaPost(BaseModel):\n", - " \"\"\"A social media post about a YouTube video generated its transcript\"\"\"\n", - "\n", - " topic: str = Field(description=\"Main topic discussed.\")\n", - " hook: str = Field(description=\"Statement to grab the attention of the reader and announce the topic.\")\n", - " body: str = Field(description=\"The body of the social media post. It should be informative and make the reader curious about viewing the video.\")\n", - " concepts: list[Concept] = Field(\n", - " description=\"Important concepts about Hamilton or Burr mentioned in this post.\",\n", - " min_items=1,\n", - " max_items=3,\n", - " )\n", - " key_takeaways: list[str] = Field(\n", - " description=\"A list of informative key takeways for the reader.\",\n", - " min_items=1,\n", - " max_items=4,\n", - " )\n", - " youtube_url: SkipJsonSchema[Union[str, None]] = None\n", - "\n", - " def display(self) -> str:\n", - " formatted_takeways = \" \".join([t for t in self.key_takeaways])\n", - " formatted_concepts = \"CONCEPTS\\n\" + \"\\n\".join([c.display() for c in self.concepts])\n", - " link = f\"link: {self.youtube_url}\\n\\n\" if self.youtube_url else \"\"\n", - "\n", - " return textwrap.dedent(\n", - " f\"\"\"\\\n", - " TOPIC: {self.topic}\n", - "\n", - " {self.hook}\n", - "\n", - " {self.body}\n", - "\n", - " {formatted_takeways}\n", - "\n", - " \"\"\"\n", - " ) + link + formatted_concepts" + "from burr.core import Application, ApplicationBuilder, action\n", + "from burr.core.action import (\n", + " AsyncStreamingResultContainer,\n", + " StreamingResultContainer,\n", + " streaming_action,\n", + ")\n", + "from burr.integrations.pydantic import PydanticTypingSystem\n", + "import json\n", + "import time\n", + "from rich import print_json\n", + "from IPython.display import clear_output" ] }, { "cell_type": "markdown", + "id": "1c5d6d06-3a82-44df-b4fe-82c95b3d3d05", "metadata": {}, "source": [ - "### 2. Write the application logic\n", + "# Constructs\n", "\n", - "Instructor is not opiniated about how you write your application; it's only in contact with your LLM client. Here, we write a script in a few lines of code to retrieve a YouTube transcript." + "Let's define some pydantic models to use -- these will help shape our application" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 11, + "id": "90258678-b33e-4e84-9a22-c7a6d29aca5d", "metadata": {}, "outputs": [], "source": [ - "from youtube_transcript_api import YouTubeTranscriptApi\n", + "class Concept(BaseModel):\n", + " term: str = Field(description=\"A key term or concept mentioned.\")\n", + " definition: str = Field(description=\"A brief definition or explanation of the term.\")\n", + " timestamp: float = Field(description=\"Timestamp when the concept is explained.\")\n", "\n", - "# get the video id from a YouTube url\n", - "youtube_url = \"https://www.youtube.com/watch?v=hqutVJyd3TI\" \n", - "_, _, video_id = youtube_url.partition(\"?v=\")\n", + "class SocialMediaPost(BaseModel):\n", + " \"\"\"A social media post about a YouTube video generated its transcript\"\"\"\n", "\n", - "# get the available YouTube transcript for the video\n", - "transcript = YouTubeTranscriptApi.get_transcript(video_id)\n", - "# join the transcript into a single block of text\n", - "full_transcript = \" \".join([f\"ts={entry['start']} - {entry['text']}\" for entry in transcript])" + " topic: str = Field(description=\"Main topic discussed.\")\n", + " hook: str = Field(\n", + " description=\"Statement to grab the attention of the reader and announce the topic.\"\n", + " )\n", + " body: str = Field(\n", + " description=\"The body of the social media post. It should be informative and make the reader curious about viewing the video.\"\n", + " )\n", + " concepts: list[Concept] = Field(\n", + " description=\"Important concepts about Hamilton or Burr mentioned in this post -- please have at least 1\",\n", + " min_items=0,\n", + " max_items=3,\n", + " validate_default=False,\n", + " )\n", + " key_takeaways: list[str] = Field(\n", + " description=\"A list of informative key takeways for the reader -- please have at least 1\",\n", + " min_items=0,\n", + " max_items=4,\n", + " validate_default=False,\n", + " )\n", + " youtube_url: SkipJsonSchema[Union[str, None]] = None" ] }, { "cell_type": "markdown", + "id": "03534c74-aa1f-4a1d-91c7-7c6328228c8d", "metadata": {}, "source": [ - "### Wrap the LLM client\n", - "To use Instructor, we need to wrap the OpenAI client, creating a special client. \n", + "# State Type\n", "\n", - "> NOTE: If you have the environment variable `OPENAI_API_KEY` set, the client will be automatically created. Otherwise, you'll need to manually pass the key to `OpenAI(api_key=...)`." + "Using those, we'll define a core pydantic model that sets up the central schema for our application. Note these are optional, they won't be set when our application starts!" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 12, + "id": "5158c112-f054-4881-89dd-12ca7f1e0e79", "metadata": {}, "outputs": [], "source": [ - "import instructor\n", - "from openai import OpenAI\n", - "\n", - "llm_client = instructor.from_openai(OpenAI())" + "class ApplicationState(BaseModel):\n", + " # Make these have defaults as they are only set in actions\n", + " transcript: Optional[str] = Field(\n", + " description=\"The full transcript of the YouTube video.\", default=None\n", + " )\n", + " post: Optional[SocialMediaPost] = Field(\n", + " description=\"The generated social media post.\", default=None\n", + " )" ] }, { "cell_type": "markdown", + "id": "f8e0d745-6561-4331-9a5d-86948fee706d", "metadata": {}, "source": [ - "#### Use the LLM client with the `response_model`\n", + "# Write an action to transcribe a youtube URL\n", "\n", - "1. Use the LLM client with `.create` to call the LLM API\n", - "2. Pass `SocialMediaPost` as the response model, enabling structured outputs.\n", - "3. The `messages` include the `system` message with the task instruction for the LLM and\n", - "the `user` message with the input content. " + "Note we take in a youtube URL + the state in the format we want, and write to `transcript`. We actually read nothing, as the transcript is an input.\n", + "Different than normal Burr, we actually mutate the model we send in (this allows us to leverage pydantic validation)." ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 16, + "id": "df35e278-3578-4611-a9de-c3948911ff70", "metadata": {}, "outputs": [], "source": [ - "response = llm_client.chat.completions.create(\n", - " model=\"gpt-4o-mini\",\n", - " response_model=SocialMediaPost,\n", - " messages=[\n", - " {\n", - " \"role\": \"system\",\n", - " \"content\": \"Analyze the given YouTube transcript and generate a compelling social media post.\",\n", - " },\n", - " {\"role\": \"user\", \"content\": full_transcript},\n", - " ],\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`response` will have the type of the provided `response_model`, `SocialMediaPost` in this case. You can use `Model.model_dump()` to get a Python dictionary." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "{'topic': 'Understanding B: A Powerful Tool for Agent Applications', 'hook': '🚀 Debug with ease using B!', 'body': 'Ever faced frustrating issues while building agent applications? Dive into our latest video where we unveil the capabilities of B, a framework designed to enhance observability and debugging. Learn how to fix bugs without restarting your entire agent and explore B’s unique graph-building capabilities for a seamless coding experience. Perfect for developers looking to streamline their coding processes!', 'concepts': [{'term': 'Agent Applications', 'definition': 'Applications that utilize AI agents to perform tasks or make decisions based on input and graphs of actions.', 'timestamp': 108.0}, {'term': 'Graph Building', 'definition': 'Creating a flowchart-like structure of actions and states for better process management in applications.', 'timestamp': 179.0}, {'term': 'Local Tracker', 'definition': 'A feature within B that allows interaction with the application state for tracking and debugging purposes.', 'timestamp': 260.0}], 'key_takeaways': ['B enables easy bug fixing without restarting your agent.', 'Graph-based modeling simplifies the coding process.', 'Integrate local tracking for comprehensive observability in applications.'], 'youtube_url': None}\n" - ] - } - ], - "source": [ - "print(type(response))\n", - "print(response.model_dump())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can also use the `.display()` method we've defined!" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "TOPIC: Understanding B: A Powerful Tool for Agent Applications\n", - "\n", - "🚀 Debug with ease using B!\n", - "\n", - "Ever faced frustrating issues while building agent applications? Dive into our latest video where we unveil the capabilities of B, a framework designed to enhance observability and debugging. Learn how to fix bugs without restarting your entire agent and explore B’s unique graph-building capabilities for a seamless coding experience. Perfect for developers looking to streamline their coding processes!\n", - "\n", - "B enables easy bug fixing without restarting your agent. Graph-based modeling simplifies the coding process. Integrate local tracking for comprehensive observability in applications.\n", - "\n", - "CONCEPTS\n", - "1:48 - Agent Applications: Applications that utilize AI agents to perform tasks or make decisions based on input and graphs of actions.\n", - "2:59 - Graph Building: Creating a flowchart-like structure of actions and states for better process management in applications.\n", - "4:20 - Local Tracker: A feature within B that allows interaction with the application state for tracking and debugging purposes.\n" - ] - } - ], - "source": [ - "print(response.display())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Burr 101\n", - "Burr is a tool to build LLM applications, solving many challenges to get to production (monitoring, persistence, streaming, and more). With the concepts of \"state\" and \"action\", you can define complex apps that are easy-to-understand and debug.\n", + "@action.pydantic(reads=[], writes=[\"transcript\"])\n", + "def get_youtube_transcript(state: ApplicationState, youtube_url: str) -> ApplicationState:\n", + " \"\"\"Get the official YouTube transcript for a video given its URL\"\"\"\n", + " _, _, video_id = youtube_url.partition(\"?v=\")\n", "\n", - "To show this, we'll rewrite the previous application logic with Burr." + " transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=[\"en\"])\n", + " state.transcript = \" \".join([f\"ts={entry['start']} - {entry['text']}\" for entry in transcript])\n", + " return state" ] }, { "cell_type": "markdown", + "id": "f5dbcaf5-5471-44b0-82e3-c4b32f01a115", "metadata": {}, "source": [ - "### 1. Define `actions`\n", + "# Write an action to stream back pydantic models\n", "\n", - "First, you need to define the different actions your agent can take. This is done by writing Python functions with the `@action` decorator. The decorator must specify the information that can be read from state. Also, the function needs to take a `State` object as first argument and return a `State` object." + "We specify the state input type, state output type, and the stream type, streaming it all back using [instructors streaming capability](https://python.useinstructor.com/concepts/partial/)" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 18, + "id": "4737d8b1-e1c1-472c-bc01-a47d328797d8", "metadata": {}, "outputs": [], "source": [ - "from burr.core import State, action\n", - "\n", - "\n", - "@action(reads=[], writes=[\"transcript\"])\n", - "def get_youtube_transcript(state: State, youtube_url: str) -> State:\n", - " \"\"\"Get the official YouTube transcript for a video given it's URL\"\"\"\n", - " _, _, video_id = youtube_url.partition(\"?v=\")\n", - " \n", - " transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=[\"en\"])\n", - " full_transcript = \" \".join([f\"ts={entry['start']} - {entry['text']}\" for entry in transcript])\n", - "\n", - " # store the transcript in state\n", - " return state.update(transcript=full_transcript, youtube_url=youtube_url)\n", - "\n", - "\n", - "@action(reads=[\"transcript\"], writes=[\"post\"])\n", - "def generate_post(state: State, llm_client) -> State:\n", - " \"\"\"Use the Instructor LLM client to generate `SocialMediaPost` from the YouTube transcript.\"\"\"\n", - "\n", - " # read the transcript from state\n", - " transcript = state[\"transcript\"]\n", + "@streaming_action.pydantic(\n", + " reads=[\"transcript\"],\n", + " writes=[\"post\"],\n", + " state_input_type=ApplicationState,\n", + " state_output_type=ApplicationState,\n", + " stream_type=SocialMediaPost,\n", + ")\n", + "def generate_post(\n", + " state: ApplicationState, llm_client\n", + ") -> Generator[Tuple[SocialMediaPost, Optional[ApplicationState]], None, None]:\n", "\n", - " response = llm_client.chat.completions.create(\n", + " transcript = state.transcript\n", + " response = llm_client.chat.completions.create_partial(\n", " model=\"gpt-4o-mini\",\n", " response_model=SocialMediaPost,\n", " messages=[\n", @@ -295,29 +246,30 @@ " },\n", " {\"role\": \"user\", \"content\": transcript},\n", " ],\n", + " stream=True,\n", " )\n", + " final_post: SocialMediaPost = None # type: ignore\n", + " for post in response:\n", + " final_post = post\n", + " yield post, None\n", "\n", - " # add the youtube_url found in state to the SocialMediaPost\n", - " response.youtube_url = state[\"youtube_url\"]\n", - "\n", - " # store the chapters in state\n", - " return state.update(post=response)" + " yield final_post, state" ] }, { "cell_type": "markdown", + "id": "968daa7a-c83f-4890-b775-513dbc347068", "metadata": {}, "source": [ - "### 2. Assemble the `Application`\n", - "\n", - "To create a Burr agent, we need to assemble the `actions` into an `Application`. This requires specifying the valid `transitions` between actions using tuples of action names `(from, to)` and defining an `entrypoint` from where to begin execution. Then, we can visualize the graph of possible states and actions.\n", + "# Wire together in an application\n", "\n", - "Notice that we create the Instructor LLM client *outside* the application and pass it to the `generate_chapters` action via the `.bind()` method. This method follows the same logic as the standard library `functools.partial()`." + "We specify the application to have type `ApplicationState` as the state, and pass it an initial value" ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 24, + "id": "11bdf44a-5ff2-4cd3-87b9-175eb68de8e4", "metadata": {}, "outputs": [ { @@ -326,151 +278,64 @@ "\n", "\n", - "\n", - "\n", - "\n", - "\n", - "%3\n", - "\n", + "\n", + "\n", + "\n", + "\n", "\n", "\n", "get_youtube_transcript\n", - "\n", - "get_youtube_transcript\n", + "\n", + "get_youtube_transcript\n", "\n", "\n", "\n", "generate_post\n", - "\n", - "generate_post\n", + "\n", + "generate_post\n", "\n", "\n", "\n", "get_youtube_transcript->generate_post\n", - "\n", - "\n", + "\n", + "\n", "\n", "\n", "\n", "input__youtube_url\n", - "\n", - "input: youtube_url\n", + "\n", + "input: youtube_url\n", "\n", "\n", "\n", "input__youtube_url->get_youtube_transcript\n", - "\n", - "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "generate_post->get_youtube_transcript\n", + "\n", + "\n", "\n", "\n", "\n" ], "text/plain": [ - "" + "" ] }, - "execution_count": 10, + "execution_count": 24, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "from burr.core import ApplicationBuilder\n", - "\n", - "application = (\n", - " ApplicationBuilder()\n", - " .with_actions(\n", - " get_youtube_transcript,\n", - " generate_post.bind(llm_client=llm_client),\n", - " )\n", - " .with_transitions(\n", - " (\"get_youtube_transcript\", \"generate_post\"),\n", - " )\n", - " .with_entrypoint(\"get_youtube_transcript\")\n", - " .build()\n", - ")\n", - "application.visualize()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 3. Launch the application\n", - "\n", - "Using `application.run()` will make our application iterate through actions and state until it hits a `halt` condition. Here, we will simply halt after completing the `generate_post` action. This will return a tuple of (the last action take, the result of the last action, the state of the app). We also need to pass a `youtube_url` since it's a required input to the `get_youtube_transcript` action." - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "TOPIC: Debugging with Burr and Graph States\n", - "\n", - "Ever struggled with debugging your AI agents?\n", - "\n", - "Dive into the world of Burr with our latest video! Learn how to leverage state tracking and condition-based flowcharts to handle failures and streamline debugging like never before. Discover how you can monitor your agents, and even resume operations from a specific point in their execution—without starting over! 🚀 Whether you’re a developer building complex applications or simply curious about AI functionality, this video will enhance your understanding of agent workflows. Don't miss out!\n", - "\n", - "Learn how to handle failures in AI agent applications effectively. Understand the relationship between actions and state in your applications. Discover how to resume operations from a specific state without restarting the entire application.\n", - "\n", - "link: https://www.youtube.com/watch?v=hqutVJyd3TI\n", - "\n", - "CONCEPTS\n", - "10:57 - State Tracking: State tracking allows you to monitor the state of your application at different points in time, making debugging easier.\n", - "1:50 - Flowchart: In this context, a flowchart represents the actions and their dependencies in your agent application as a directed graph.\n", - "3:36 - Modality: The condition that determines which action to take next in the flow of your agent’s tasks.\n" - ] - } - ], - "source": [ - "last_action, result, state = application.run(\n", - " halt_after=[\"generate_post\"],\n", - " inputs={\"youtube_url\": \"https://www.youtube.com/watch?v=hqutVJyd3TI\"},\n", - ")\n", - "\n", - "# print `post` stored in state\n", - "print(state[\"post\"].display())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Why create a Burr application?\n", - "\n", - "In a few lines of code, you can query an LLM API and can create powerful productivity utilities. However, user-facing features deserve much more scrutiny, which requires tooling and solving complex engineering problems.\n", - "\n", - "Building our app with Burr provides several benefits that we'll detail next:\n", - "- **Observability**: monitor in real-time and log the execution of your `Application` and view it in Burr's web user interface.\n", - "- **Persistence**: At any point, you can save the application `State`. This allows to create user sessions (e.g., the conversation history menu in ChatGPT), which helps developers investigate bugs and test potential solutions.\n", - "- **Portability**: your `Application` can run in a notebook, as a script, as a web service, or anywhere Python runs. We'll show how to use Burr with [FastAPI](https://fastapi.tiangolo.com/)." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 1. Observability\n", - "\n", - "Add the clause `.with_tracker(project=...)` to the `ApplicationBuilder()` to track execution. \n" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "from burr.core import ApplicationBuilder\n", - "\n", - "application = (\n", + "llm_client = instructor.from_openai(openai.OpenAI())\n", + "app = (\n", " ApplicationBuilder()\n", " .with_actions(\n", " get_youtube_transcript,\n", @@ -478,337 +343,114 @@ " )\n", " .with_transitions(\n", " (\"get_youtube_transcript\", \"generate_post\"),\n", + " (\"generate_post\", \"get_youtube_transcript\"),\n", " )\n", " .with_entrypoint(\"get_youtube_transcript\")\n", + " .with_typing(PydanticTypingSystem(ApplicationState))\n", + " .with_state(ApplicationState())\n", " .with_tracker(project=\"youtube-post\")\n", " .build()\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "last_action, result, state = application.run(\n", - " halt_after=[\"generate_post\"],\n", - " inputs={\"youtube_url\": \"https://www.youtube.com/watch?v=hqutVJyd3TI\"},\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Then, you can launch the web UI via the CLI command `burr`" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 2. Persistence\n", - "\n", - "To showcase this feature, we'll add a `rewrite()` action. It sends to the LLM the social media post and a user input to tweak its content. " - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [], - "source": [ - "@action(reads=[\"post\"], writes=[\"post\"])\n", - "def rewrite_post(state: State, llm_client, user_prompt: str):\n", - " post = state[\"post\"]\n", - "\n", - " response = llm_client.chat.completions.create(\n", - " model=\"gpt-4o-mini\",\n", - " response_model=SocialMediaPost,\n", - " messages=[\n", - " {\n", - " \"role\": \"system\",\n", - " \"content\": f\"Take the previously generated social media post and modify it according to the following instructions: {user_prompt}\",\n", - " },\n", - " {\"role\": \"user\", \"content\": post.model_dump_json()},\n", - " ],\n", - " )\n", - "\n", - " # pass the youtube_url from the previous post version\n", - " response.youtube_url = post.youtube_url\n", - "\n", - " return state.update(post=response)" + ")\n", + "# in case we want to access the state\n", + "assert isinstance(app.state.data, ApplicationState)\n", + "app" ] }, { "cell_type": "markdown", + "id": "29eb49bc-f1c0-4172-831d-bd748ac90548", "metadata": {}, "source": [ - "By adding the transition `(\"rewrite_post\", \"rewrite_post\")`, we are introducing a graph cycle. Observability and persistence becomes particularly valuable to ensure that the LLM doesn't spiral into non-sense. If that's the case, it could be due to the prompts / instructions in the application code, but also user inputs.\n", + "# Run it!\n", "\n", - "We also add a `.with_persister()` clause to store our results." + "Now we can run it!" ] }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 32, + "id": "ba1486aa-f79f-4e02-959c-ffc0ba58b6ac", "metadata": {}, "outputs": [ { "data": { - "image/svg+xml": [ - "\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "%3\n", - "\n", - "\n", - "\n", - "get_youtube_transcript\n", - "\n", - "get_youtube_transcript\n", - "\n", - "\n", - "\n", - "generate_post\n", - "\n", - "generate_post\n", - "\n", - "\n", - "\n", - "get_youtube_transcript->generate_post\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "input__youtube_url\n", - "\n", - "input: youtube_url\n", - "\n", - "\n", - "\n", - "input__youtube_url->get_youtube_transcript\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "rewrite_post\n", - "\n", - "rewrite_post\n", - "\n", - "\n", - "\n", - "generate_post->rewrite_post\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "rewrite_post->rewrite_post\n", - "\n", - "\n", - "\n", - "\n", - "\n", - "input__user_prompt\n", - "\n", - "input: user_prompt\n", - "\n", - "\n", - "\n", - "input__user_prompt->rewrite_post\n", - "\n", - "\n", - "\n", - "\n", - "\n" + "text/html": [ + "

{\n",
+       "  \"topic\": \"Burr Framework Overview\",\n",
+       "  \"hook\": \"Ever faced challenges while debugging AI applications? Here's a solution!\",\n",
+       "  \"body\": \"Dive into the world of agent applications with Burr! In this quick overview, we explore how Burr helps you debug failing AI calls and track state effectively. Learn to build a graph that connects actions and states, allowing you to resume your application exactly where you left off. Whether it’s fixing an error mid-run or replaying past actions, Burr's observability features enhance your development workflow. Ready to optimize your debugging process? Check out the full video to unravel the potential of Burr!\",\n",
+       "  \"concepts\": [\n",
+       "    {\n",
+       "      \"term\": \"Agent Application\",\n",
+       "      \"definition\": \"A system that models states and actions to create decision-making processes.\",\n",
+       "      \"timestamp\": 105.479\n",
+       "    },\n",
+       "    {\n",
+       "      \"term\": \"State Object\",\n",
+       "      \"definition\": \"An object that holds the state information for actions to read and write during execution.\",\n",
+       "      \"timestamp\": 145.56\n",
+       "    },\n",
+       "    {\n",
+       "      \"term\": \"Graph Representation\",\n",
+       "      \"definition\": \"A structural representation of actions and their interconnections in an agent system, depicted as nodes and edges.\",\n",
+       "      \"timestamp\": 179.28\n",
+       "    }\n",
+       "  ],\n",
+       "  \"key_takeaways\": [\n",
+       "    \"Burr allows near-instantaneous debugging without restarting from scratch.\",\n",
+       "    \"The framework promotes building a stateful graph structure for actions.\",\n",
+       "    \"Use local tracking to effortlessly monitor and interact with your agent's state.\"\n",
+       "  ],\n",
+       "  \"youtube_url\": null\n",
+       "}\n",
+       "
\n" ], "text/plain": [ - "" + "\u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"topic\"\u001b[0m: \u001b[32m\"Burr Framework Overview\"\u001b[0m,\n", + " \u001b[1;34m\"hook\"\u001b[0m: \u001b[32m\"Ever faced challenges while debugging AI applications? Here's a solution!\"\u001b[0m,\n", + " \u001b[1;34m\"body\"\u001b[0m: \u001b[32m\"Dive into the world of agent applications with Burr! In this quick overview, we explore how Burr helps you debug failing AI calls and track state effectively. Learn to build a graph that connects actions and states, allowing you to resume your application exactly where you left off. Whether it’s fixing an error mid-run or replaying past actions, Burr's observability features enhance your development workflow. Ready to optimize your debugging process? Check out the full video to unravel the potential of Burr!\"\u001b[0m,\n", + " \u001b[1;34m\"concepts\"\u001b[0m: \u001b[1m[\u001b[0m\n", + " \u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"term\"\u001b[0m: \u001b[32m\"Agent Application\"\u001b[0m,\n", + " \u001b[1;34m\"definition\"\u001b[0m: \u001b[32m\"A system that models states and actions to create decision-making processes.\"\u001b[0m,\n", + " \u001b[1;34m\"timestamp\"\u001b[0m: \u001b[1;36m105.479\u001b[0m\n", + " \u001b[1m}\u001b[0m,\n", + " \u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"term\"\u001b[0m: \u001b[32m\"State Object\"\u001b[0m,\n", + " \u001b[1;34m\"definition\"\u001b[0m: \u001b[32m\"An object that holds the state information for actions to read and write during execution.\"\u001b[0m,\n", + " \u001b[1;34m\"timestamp\"\u001b[0m: \u001b[1;36m145.56\u001b[0m\n", + " \u001b[1m}\u001b[0m,\n", + " \u001b[1m{\u001b[0m\n", + " \u001b[1;34m\"term\"\u001b[0m: \u001b[32m\"Graph Representation\"\u001b[0m,\n", + " \u001b[1;34m\"definition\"\u001b[0m: \u001b[32m\"A structural representation of actions and their interconnections in an agent system, depicted as nodes and edges.\"\u001b[0m,\n", + " \u001b[1;34m\"timestamp\"\u001b[0m: \u001b[1;36m179.28\u001b[0m\n", + " \u001b[1m}\u001b[0m\n", + " \u001b[1m]\u001b[0m,\n", + " \u001b[1;34m\"key_takeaways\"\u001b[0m: \u001b[1m[\u001b[0m\n", + " \u001b[32m\"Burr allows near-instantaneous debugging without restarting from scratch.\"\u001b[0m,\n", + " \u001b[32m\"The framework promotes building a stateful graph structure for actions.\"\u001b[0m,\n", + " \u001b[32m\"Use local tracking to effortlessly monitor and interact with your agent's state.\"\u001b[0m\n", + " \u001b[1m]\u001b[0m,\n", + " \u001b[1;34m\"youtube_url\"\u001b[0m: \u001b[3;35mnull\u001b[0m\n", + "\u001b[1m}\u001b[0m\n" ] }, - "execution_count": 22, "metadata": {}, - "output_type": "execute_result" + "output_type": "display_data" } ], "source": [ - "from burr.core import ApplicationBuilder\n", - "from burr.core.persistence import SQLLitePersister\n", - "\n", - "persister = SQLLitePersister(db_path=\".burr.db\", table_name=\"burr_state\")\n", - "persister.initialize() # this will create the db and table\n", - "\n", - "application = (\n", - " ApplicationBuilder()\n", - " .with_actions(\n", - " get_youtube_transcript,\n", - " generate_post.bind(llm_client=llm_client),\n", - " rewrite_post.bind(llm_client=llm_client),\n", - " )\n", - " .with_transitions(\n", - " (\"get_youtube_transcript\", \"generate_post\"),\n", - " (\"generate_post\", \"rewrite_post\"),\n", - " (\"rewrite_post\", \"rewrite_post\"),\n", - " )\n", - " .with_state_persister(persister)\n", - " .with_entrypoint(\"get_youtube_transcript\")\n", - " .with_tracker(project=\"youtube-post\")\n", - " .build()\n", - ")\n", - "application.visualize()" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "LAST ACTION : generate_post: transcript -> post \n", - "\n", - "TOPIC: Enhancing AI Agent Applications with B and Hamilton\n", - "\n", - "🚀 Dive into the world of AI agents and discover how to streamline your debugging process with B!\n", - "\n", - "Are you tired of constant debugging cycles in your AI projects? In our latest video, we explore B, a framework designed to help you monitor and debug agent applications efficiently. Learn how B allows you to visualize your application’s state, manage complex action flows, and even fork your state at any point to tackle bugs without restarting from scratch. Plus, discover its sister framework, Hamilton, perfect for managing data pipelines! Curious to learn how to simplify your debugging and enhance your application development? 🎥 Watch the full video now!\n", - "\n", - "Learn to visualize and debug agent applications effectively with B. Understand how to manage states and actions using a graphical approach. Explore the integration of Hamilton for complex data pipeline processes.\n", - "\n", - "link: https://www.youtube.com/watch?v=hqutVJyd3TI\n", - "\n", - "CONCEPTS\n", - "0:1 - B Framework: A framework for building and debugging AI agent applications, allowing for state tracking and action visualization.\n", - "2:25 - State Object: An object that retains the state of an application, enabling actions to read and write within that context.\n", - "2:0 - Graph Visualization: A depiction of actions and states in a flowchart-like manner to streamline application behavior and debugging.\n" - ] - } - ], - "source": [ - "# this will run \n", - "last_action, result, state = application.run(\n", + "_, streaming_container = app.stream_result(\n", " halt_after=[\"generate_post\"],\n", " inputs={\"youtube_url\": \"https://www.youtube.com/watch?v=hqutVJyd3TI\"},\n", ")\n", - "print(\"LAST ACTION :\", last_action, \"\\n\")\n", - "print(state[\"post\"].display())" - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "LAST ACTION : rewrite_post: post -> post \n", - "\n", - "TOPIC: Enhancing AI Agent Applications with B and Hamilton\n", - "\n", - "🚀 Dive into the world of AI agents and discover how to streamline your debugging process with B!\n", - "\n", - "Are you encountering challenges with debugging cycles in your AI projects? In our latest video, we explore B, a framework designed for monitoring and debugging agent applications effectively. With B, you can visualize your application’s state, manage complex action flows, and fork your state to tackle bugs without having to restart from scratch. Additionally, we introduce Hamilton, a framework that complements B by simplifying the management of data pipelines. If you are interested in improving your debugging process and application development, we invite you to watch the full video!\n", - "\n", - "Learn to visualize and debug agent applications effectively with B. Understand how to manage states and actions using a graphical approach. Explore the integration of Hamilton for complex data pipeline processes.\n", - "\n", - "link: https://www.youtube.com/watch?v=hqutVJyd3TI\n", - "\n", - "CONCEPTS\n", - "0:1 - B Framework: A framework for building and debugging AI agent applications, allowing for state tracking and action visualization.\n", - "2:25 - State Object: An object that retains the state of an application, enabling actions to read and write within that context.\n", - "2:0 - Graph Visualization: A depiction of actions and states in a flowchart-like manner to streamline application behavior and debugging.\n" - ] - } - ], - "source": [ - "last_action, result, state = application.step(\n", - " inputs={\"user_prompt\": \"Adopt a professional tone that avoids incredible claims. Stay close to the facts, but demonstrate enthusiasm\"},\n", - ")\n", - "\n", - "print(\"LAST ACTION :\", last_action, \"\\n\")\n", - "print(state[\"post\"].display())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Then, you can retrieve the store app ids and resume from there. This requires adding a `.initialize_from()` with the `persister`. Also, we're getting the `app_id` from storage via `persister.list_app_ids()`. By passing it to the `.with_identifiers(app_id=...)`, this \"reloaded application\" will logged and tracked in succession of the original app (as you can see in the Burr UI).\n", - "\n", - "Below, we're building a new `Application` from the persisted state. When we print the `State`, we're able to retrieve the previously generated (you can see the matching titles)!" - ] - }, - { - "cell_type": "code", - "execution_count": 36, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Enhancing AI Agent Applications with B and Hamilton\n" - ] - } - ], - "source": [ - "app_id = persister.list_app_ids(\"\")[0]\n", - "\n", - "reloaded_app = (\n", - " ApplicationBuilder()\n", - " .with_actions(\n", - " get_youtube_transcript,\n", - " generate_post.bind(llm_client=llm_client),\n", - " rewrite_post.bind(llm_client=llm_client),\n", - " )\n", - " .with_transitions(\n", - " (\"get_youtube_transcript\", \"generate_post\"),\n", - " (\"generate_post\", \"rewrite_post\"),\n", - " (\"rewrite_post\", \"rewrite_post\"),\n", - " )\n", - " .initialize_from(\n", - " persister,\n", - " resume_at_next_action=True,\n", - " default_state={},\n", - " default_entrypoint=\"get_youtube_transcript\",\n", - " )\n", - " .with_state_persister(persister)\n", - " .with_identifiers(app_id=app_id)\n", - " .with_tracker(project=\"youtube-post\")\n", - " .build()\n", - ")\n", - "\n", - "print(reloaded_app.state[\"post\"].topic)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Persistence is great for iterative development such as tuning your [Instructor model with validators and constraints](https://python.useinstructor.com/concepts/reask_validation/), but it's also a powerful tool for building [test cases and guard rails](https://burr.dagworks.io/examples/guardrails/creating_tests/). The CLI command `burr-test-case` can generate a `pytest.fixture` to resume your app from a given state." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 3. Portability\n", - "\n", - "In the GitHub repository, you can find the same Burr `Application` defined in `application.py`, which can be executed via `python application.py`. Also, we provide a boilerplate FastAPI application in `server.py` which imports the `Application` defined in `application.py`" + "for post in streaming_container:\n", + " assert isinstance(post, SocialMediaPost)\n", + " clear_output(wait=True)\n", + " obj = post.model_dump()\n", + " json_str = json.dumps(obj, indent=2)\n", + " print_json(json_str)" ] } ], @@ -828,9 +470,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.9" + "version": "3.12.0" } }, "nbformat": 4, - "nbformat_minor": 4 + "nbformat_minor": 5 } From 8048275104d57db232ed41bedbd4fb4fae034a66 Mon Sep 17 00:00:00 2001 From: elijahbenizzy Date: Sun, 15 Sep 2024 12:27:31 -0700 Subject: [PATCH 10/10] Typo + wording changes for comments/docs --- examples/typed-state/README.md | 41 +++++++++++++++++----------------- pyproject.toml | 2 +- 2 files changed, 22 insertions(+), 21 deletions(-) diff --git a/examples/typed-state/README.md b/examples/typed-state/README.md index 361c9fed..fde60cce 100644 --- a/examples/typed-state/README.md +++ b/examples/typed-state/README.md @@ -5,9 +5,9 @@ This example goes over how to use the typed state features in Burr with pydantic It will cover the following concepts: 1. Why you might want to use typing for your state in the first place -1. Define typing at the application level +1. IDE setup to make use of typing. 1. Defining/altering state at the action level -1. Doing so in a streaming manner +1. Defining types for streaming contexts 1. Wiring that through to a FastAPI app This README will contain snippets + link out to the code. @@ -39,16 +39,16 @@ Type-annotation in python allows you to read your code and get some sense of wha ## Setting up your IDE VSCode (or an editor with a similar interface) is generally optimal for this. It has -pluggable typing (e.g. pylance), which handles generics cleanly. Unfortunately pycharm +pluggable typing (e.g. pylance), which handles generics cleanly. Note: pycharm is often behind on typing support. See issues like [this](https://youtrack.jetbrains.com/issue/PY-44364) and [this](https://youtrack.jetbrains.com/issue/PY-27627/Support-explicitly-parametrized-generic-class-instantiation-syntax). -While it will still work in pycharm, you will not get some of the beter auto-completion capabilities. +While it will still work in pycharm, you will not get some of the better auto-completion capabilities until those issues are resolved. ## Defining typed state at the application level -This code for this is in [application.py](application.py). +The code for this is in [application.py](application.py). -First, define a pydantic model -- make it as recursive as you want. This will represent +First, define a pydantic model -- make it as nested/recursive as you want. This will represent your entire state. In this case, we're going to have a transcript of a youtube video that was given by the user, as well as the social media post. The high-level is here -- the rest is in the code: @@ -65,9 +65,9 @@ class ApplicationState(BaseModel): ``` Note that this should exactly model your state -- we need to make things optional, -as there are points in the state where the transcript/post will not have been assigned. +as there are points in time when running your application where we have not populated all fields in the application state. For our example the transcript/post will not have been assigned. -Next, we add it in to the application object, both the initial value and the typing system. +Next, we add it in to the application object, both the initial value(s) and the typing system. The typing system is what's responsible for managing the schema: ```python @@ -88,7 +88,7 @@ app = ( That ensures that application and the application's state object are parameterized on the state type. -To get the state, you can use `.data` on any state object returned by the application +To get the typed state object you defined, you can use `.data` on any state object returned by the application: ```python # just from the application @@ -101,17 +101,18 @@ print(state.data.transcript) ## Defining/altering typed state at the action level -This code for this is in [application.py](application.py). +The code for this is in [application.py](application.py). In addition to defining state centrally, we can define it at an action level. -The code is simple, but the API is slightly different from standard Burr. Rather than -using the immutable state-based API, we in-place mutate pydantic models. Don't worry, it's still immutable, you're just modifying a copy and returning it. +The code is straightforward, but the API is slightly different from standard Burr. Rather than +using the immutable state-based API, we in-place mutate pydantic models in the action function. Don't worry, the state object you are modifying is still immutable, you're just modifying a copy and returning it. -In this case, we call to `@action.pydantic`, which tells which fields to read/write to from state. It derives the classes from the function annotations, although you can also pass it the -pydantic classes as arguments to the decorator if you prefer. +In this case, we use `@action.pydantic`, which tells that we're using typed state and which fields to read/write to from state. It derives your typed state class(es) from the function annotations, although you can also pass it the pydantic classes as arguments to the decorator if you prefer. -Note that the reads/writes have to be a subset of the state object. In this case we use the global `ApplicationState` object as described above, although it can use a subset/compatible set of fields (or, if you elect not to use centralized state, it just has to be compatible with upstream/downstream versions). +Note that the reads/writes have to be a subset of your defined state object -- fields that exist in your pydantic model. +This is because the action will pull all its data from state -- it listens to specific sub-fields. +In this case we use the global `ApplicationState` object as described above, although it can use a subset/compatible set of fields (or, if you elect not to use centralized state, it just has to be compatible with upstream/downstream versions). Under the hood, burr will subset the state class so it only has the relevant fields (the reads/write) fields. @@ -136,9 +137,9 @@ def generate_post(state: ApplicationState, llm_client) -> ApplicationState: ## Typed state for streaming actions -This code for this is in [application.py](application.py). +The code for this is in [application.py](application.py). -For streaming actions, not only do we have to type the input/output state, but we can also type the intermediate result. +For streaming actions, not only can we type the input/output state, but we can also type the intermediate result. In this case, we just use the `SocialMediaPost` as we did in the application state. Instructor will be streaming that in as it gets created. @@ -180,8 +181,8 @@ def generate_post_streaming( yield post, state ``` -When we call out to the application we built, we have to do a little magic to get typing to work -in the IDE, but we still have the same benefits as the non-streaming approach. +When we call out to the application we built, we have to add a type-hint to get typing to work +in the IDE (see the line `streaming_container: StreamingResultContainer[...]`), but we still have the same benefits as the non-streaming approach. ```python app = build_streaming_application(...) # builder left out for now @@ -200,7 +201,7 @@ for post in streaming_container: ## FastAPI integration -This code for this is in [server.py](server.py). +The code for this is in [server.py](server.py). To integrate this with FastAPI is easy, and gets easier with the types cascading through. diff --git a/pyproject.toml b/pyproject.toml index 2305a50b..ceff5445 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "burr" -version = "0.30.0rc6" +version = "0.29.2" dependencies = [] # yes, there are none requires-python = ">=3.9" authors = [