From aedba240371a627a6839566a5ae7dbec0b42db4e Mon Sep 17 00:00:00 2001 From: Diwank Singh Tomer Date: Sun, 8 Sep 2024 12:48:36 -0400 Subject: [PATCH] feat: New python client Signed-off-by: Diwank Singh Tomer --- agents-api/agents_api/autogen/Chat.py | 468 ++++++++++-------- agents-api/agents_api/autogen/Docs.py | 32 +- agents-api/agents_api/autogen/Entries.py | 23 +- agents-api/agents_api/autogen/Executions.py | 49 +- agents-api/agents_api/autogen/Tasks.py | 19 +- agents-api/agents_api/autogen/Tools.py | 64 +-- .../agents_api/autogen/openapi_model.py | 4 + sdks/python/config.yaml | 191 ++++++- sdks/python/julep/__init__.py | 110 ++++ .../sdk/api/default/chat_route_generate.py | 194 ++++++++ sdks/python/julep/sdk/models/__init__.py | 166 +++++-- sdks/python/julep/sdk/models/agent.py | 15 +- sdks/python/julep/sdk/models/base_agent.py | 15 +- .../julep/sdk/models/base_chat_output.py | 24 +- ...content_type_8_item_type_0_item_type_0.py} | 24 +- ...nt_type_8_item_type_0_item_type_0_type.py} | 2 +- ...content_type_8_item_type_0_item_type_1.py} | 42 +- ...pe_8_item_type_0_item_type_1_image_url.py} | 10 +- ...nt_type_8_item_type_0_item_type_1_type.py} | 2 +- sdks/python/julep/sdk/models/chat_input.py | 432 ++++++++++++++++ ...essages_item_content_type_2_item_type_1.py | 102 ---- .../julep/sdk/models/chat_input_logit_bias.py | 56 +++ .../sdk/models/chat_input_messages_item.py | 227 +++++++++ ...ssages_item_content_type_2_item_type_0.py} | 24 +- ...s_item_content_type_2_item_type_0_type.py} | 2 +- ...essages_item_content_type_2_item_type_1.py | 102 ++++ ...m_content_type_2_item_type_1_image_url.py} | 10 +- ...s_item_content_type_2_item_type_1_type.py} | 2 +- ..._0.py => chat_input_tool_choice_type_0.py} | 2 +- .../julep/sdk/models/chat_output_chunk.py | 122 +++++ .../sdk/models/chat_output_chunk_delta.py | 228 +++++++++ ..._chunk_delta_content_type_2_item_type_0.py | 85 ++++ ...k_delta_content_type_2_item_type_0_type.py | 8 + ..._chunk_delta_content_type_2_item_type_1.py | 102 ++++ ...ta_content_type_2_item_type_1_image_url.py | 81 +++ ...k_delta_content_type_2_item_type_1_type.py | 8 + sdks/python/julep/sdk/models/chat_settings.py | 276 +++++++++++ .../sdk/models/chat_settings_logit_bias.py | 56 +++ .../julep/sdk/models/chat_token_log_prob.py | 116 +++++ .../julep/sdk/models/chosen_api_call.py | 88 ++++ .../julep/sdk/models/chosen_api_call_type.py | 8 + .../julep/sdk/models/chosen_function_call.py | 97 ++++ .../sdk/models/chosen_function_call_type.py | 8 + .../sdk/models/chosen_integration_call.py | 88 ++++ .../models/chosen_integration_call_type.py | 8 + .../julep/sdk/models/chosen_system_call.py | 88 ++++ .../sdk/models/chosen_system_call_type.py | 8 + .../julep/sdk/models/chosen_tool_call.py | 134 ----- ...hat_response.py => chunk_chat_response.py} | 43 +- ..._competion_usage.py => competion_usage.py} | 10 +- .../julep/sdk/models/create_agent_request.py | 15 +- .../julep/sdk/models/create_task_request.py | 12 +- ...i_settings.py => default_chat_settings.py} | 43 +- .../models/docs_search_route_search_body.py | 36 +- ...chat_finish_reason.py => finish_reason.py} | 2 +- .../sdk/models/hybrid_doc_search_request.py | 117 +++++ ...g.py => hybrid_doc_search_request_lang.py} | 2 +- ..._prob_response.py => log_prob_response.py} | 26 +- .../julep/sdk/models/message_chat_response.py | 174 +++++++ .../julep/sdk/models/patch_agent_request.py | 15 +- .../julep/sdk/models/patch_task_request.py | 14 +- sdks/python/julep/sdk/models/prompt_step.py | 14 +- .../route_list_response_200_results_item.py | 12 +- sdks/python/julep/sdk/models/search_step.py | 36 +- sdks/python/julep/sdk/models/task.py | 12 +- .../{create_tool_request.py => task_tool.py} | 23 +- ...or_update_route_create_or_update_accept.py | 3 - .../sdk/models/tasks_route_create_accept.py | 3 - ...est.py => text_only_doc_search_request.py} | 28 +- .../text_only_doc_search_request_lang.py | 8 + sdks/python/julep/sdk/models/transition.py | 159 ++++++ .../julep/sdk/models/transition_metadata.py | 56 +++ .../julep/sdk/models/transition_type.py | 16 + .../julep/sdk/models/update_agent_request.py | 15 +- .../julep/sdk/models/update_task_request.py | 12 +- .../user_docs_search_route_search_body.py | 36 +- .../sdk/models/vector_doc_search_request.py | 99 ++++ .../models/vector_doc_search_request_lang.py | 8 + sdks/python/pyproject.toml | 2 +- sdks/ts/src/api/index.ts | 18 +- .../src/api/models/Chat_BaseChatResponse.ts | 26 - sdks/ts/src/api/models/Chat_ChatInput.ts | 100 ++-- sdks/ts/src/api/models/Chat_ChatInputData.ts | 47 -- .../ts/src/api/models/Chat_ChatOutputChunk.ts | 14 +- sdks/ts/src/api/models/Chat_ChatSettings.ts | 31 +- .../src/api/models/Chat_ChunkChatResponse.ts | 23 +- .../api/models/Chat_DefaultChatSettings.ts | 19 +- .../api/models/Chat_MessageChatResponse.ts | 23 +- .../src/api/models/Chat_MultipleChatOutput.ts | 12 +- sdks/ts/src/api/models/Chat_OpenAISettings.ts | 22 - .../src/api/models/Chat_SingleChatOutput.ts | 12 +- sdks/ts/src/api/models/Chat_TokenLogProb.ts | 8 +- .../api/models/Docs_BaseDocSearchRequest.ts | 11 - .../api/models/Docs_HybridDocSearchRequest.ts | 8 +- .../models/Docs_TextOnlyDocSearchRequest.ts | 8 +- .../api/models/Docs_VectorDocSearchRequest.ts | 8 +- sdks/ts/src/api/models/Entries_BaseEntry.ts | 14 +- .../src/api/models/Executions_Transition.ts | 22 +- sdks/ts/src/api/models/Tasks_TaskTool.ts | 21 +- sdks/ts/src/api/models/Tools_ChosenApiCall.ts | 10 + .../api/models/Tools_ChosenFunctionCall.ts | 6 +- .../api/models/Tools_ChosenIntegrationCall.ts | 10 + .../src/api/models/Tools_ChosenSystemCall.ts | 10 + .../ts/src/api/models/Tools_ChosenToolCall.ts | 21 - .../src/api/models/Tools_CreateToolRequest.ts | 27 - .../src/api/schemas/$Chat_BaseChatResponse.ts | 50 -- sdks/ts/src/api/schemas/$Chat_ChatInput.ts | 300 ++++++----- .../ts/src/api/schemas/$Chat_ChatInputData.ts | 78 --- .../src/api/schemas/$Chat_ChatOutputChunk.ts | 96 ++-- sdks/ts/src/api/schemas/$Chat_ChatSettings.ts | 166 ++++--- .../api/schemas/$Chat_ChunkChatResponse.ts | 60 ++- .../api/schemas/$Chat_DefaultChatSettings.ts | 70 ++- .../api/schemas/$Chat_MessageChatResponse.ts | 76 ++- .../api/schemas/$Chat_MultipleChatOutput.ts | 16 +- .../src/api/schemas/$Chat_OpenAISettings.ts | 34 -- .../src/api/schemas/$Chat_SingleChatOutput.ts | 16 +- sdks/ts/src/api/schemas/$Chat_TokenLogProb.ts | 39 +- .../api/schemas/$Docs_BaseDocSearchRequest.ts | 19 - .../schemas/$Docs_HybridDocSearchRequest.ts | 65 +-- .../schemas/$Docs_TextOnlyDocSearchRequest.ts | 29 +- .../schemas/$Docs_VectorDocSearchRequest.ts | 43 +- sdks/ts/src/api/schemas/$Entries_BaseEntry.ts | 11 +- .../src/api/schemas/$Executions_Transition.ts | 118 +++-- sdks/ts/src/api/schemas/$Tasks_TaskTool.ts | 55 +- .../src/api/schemas/$Tools_ChosenApiCall.ts | 26 + .../api/schemas/$Tools_ChosenFunctionCall.ts | 44 +- .../schemas/$Tools_ChosenIntegrationCall.ts | 26 + .../api/schemas/$Tools_ChosenSystemCall.ts | 26 + .../src/api/schemas/$Tools_ChosenToolCall.ts | 41 -- .../api/schemas/$Tools_CreateToolRequest.ts | 48 -- sdks/ts/src/api/services/DefaultService.ts | 12 +- typespec/chat/models.tsp | 23 +- typespec/docs/models.tsp | 12 +- typespec/executions/models.tsp | 4 +- typespec/tasks/endpoints.tsp | 8 +- typespec/tasks/models.tsp | 4 +- typespec/tools/models.tsp | 31 +- 137 files changed, 5457 insertions(+), 1793 deletions(-) create mode 100644 sdks/python/julep/sdk/api/default/chat_route_generate.py rename sdks/python/julep/sdk/models/{base_entry_content_type_5_item_type_0_item_type_0.py => base_entry_content_type_8_item_type_0_item_type_0.py} (68%) rename sdks/python/julep/sdk/models/{base_entry_content_type_5_item_type_0_item_type_0_type.py => base_entry_content_type_8_item_type_0_item_type_0_type.py} (62%) rename sdks/python/julep/sdk/models/{base_entry_content_type_5_item_type_0_item_type_1.py => base_entry_content_type_8_item_type_0_item_type_1.py} (57%) rename sdks/python/julep/sdk/models/{base_entry_content_type_5_item_type_0_item_type_1_image_url.py => base_entry_content_type_8_item_type_0_item_type_1_image_url.py} (85%) rename sdks/python/julep/sdk/models/{base_entry_content_type_5_item_type_0_item_type_1_type.py => base_entry_content_type_8_item_type_0_item_type_1_type.py} (64%) create mode 100644 sdks/python/julep/sdk/models/chat_input.py delete mode 100644 sdks/python/julep/sdk/models/chat_input_data_messages_item_content_type_2_item_type_1.py create mode 100644 sdks/python/julep/sdk/models/chat_input_logit_bias.py create mode 100644 sdks/python/julep/sdk/models/chat_input_messages_item.py rename sdks/python/julep/sdk/models/{chat_input_data_messages_item_content_type_2_item_type_0.py => chat_input_messages_item_content_type_2_item_type_0.py} (62%) rename sdks/python/julep/sdk/models/{chat_input_data_messages_item_content_type_2_item_type_0_type.py => chat_input_messages_item_content_type_2_item_type_0_type.py} (60%) create mode 100644 sdks/python/julep/sdk/models/chat_input_messages_item_content_type_2_item_type_1.py rename sdks/python/julep/sdk/models/{chat_input_data_messages_item_content_type_2_item_type_1_image_url.py => chat_input_messages_item_content_type_2_item_type_1_image_url.py} (81%) rename sdks/python/julep/sdk/models/{chat_input_data_messages_item_content_type_2_item_type_1_type.py => chat_input_messages_item_content_type_2_item_type_1_type.py} (62%) rename sdks/python/julep/sdk/models/{chat_input_data_tool_choice_type_0.py => chat_input_tool_choice_type_0.py} (72%) create mode 100644 sdks/python/julep/sdk/models/chat_output_chunk.py create mode 100644 sdks/python/julep/sdk/models/chat_output_chunk_delta.py create mode 100644 sdks/python/julep/sdk/models/chat_output_chunk_delta_content_type_2_item_type_0.py create mode 100644 sdks/python/julep/sdk/models/chat_output_chunk_delta_content_type_2_item_type_0_type.py create mode 100644 sdks/python/julep/sdk/models/chat_output_chunk_delta_content_type_2_item_type_1.py create mode 100644 sdks/python/julep/sdk/models/chat_output_chunk_delta_content_type_2_item_type_1_image_url.py create mode 100644 sdks/python/julep/sdk/models/chat_output_chunk_delta_content_type_2_item_type_1_type.py create mode 100644 sdks/python/julep/sdk/models/chat_settings.py create mode 100644 sdks/python/julep/sdk/models/chat_settings_logit_bias.py create mode 100644 sdks/python/julep/sdk/models/chat_token_log_prob.py create mode 100644 sdks/python/julep/sdk/models/chosen_api_call.py create mode 100644 sdks/python/julep/sdk/models/chosen_api_call_type.py create mode 100644 sdks/python/julep/sdk/models/chosen_function_call.py create mode 100644 sdks/python/julep/sdk/models/chosen_function_call_type.py create mode 100644 sdks/python/julep/sdk/models/chosen_integration_call.py create mode 100644 sdks/python/julep/sdk/models/chosen_integration_call_type.py create mode 100644 sdks/python/julep/sdk/models/chosen_system_call.py create mode 100644 sdks/python/julep/sdk/models/chosen_system_call_type.py delete mode 100644 sdks/python/julep/sdk/models/chosen_tool_call.py rename sdks/python/julep/sdk/models/{base_chat_response.py => chunk_chat_response.py} (69%) rename sdks/python/julep/sdk/models/{chat_competion_usage.py => competion_usage.py} (92%) rename sdks/python/julep/sdk/models/{chat_open_ai_settings.py => default_chat_settings.py} (67%) rename sdks/python/julep/sdk/models/{chat_finish_reason.py => finish_reason.py} (84%) create mode 100644 sdks/python/julep/sdk/models/hybrid_doc_search_request.py rename sdks/python/julep/sdk/models/{base_doc_search_request_lang.py => hybrid_doc_search_request_lang.py} (70%) rename sdks/python/julep/sdk/models/{chat_log_prob_response.py => log_prob_response.py} (76%) create mode 100644 sdks/python/julep/sdk/models/message_chat_response.py rename sdks/python/julep/sdk/models/{create_tool_request.py => task_tool.py} (84%) rename sdks/python/julep/sdk/models/{base_doc_search_request.py => text_only_doc_search_request.py} (64%) create mode 100644 sdks/python/julep/sdk/models/text_only_doc_search_request_lang.py create mode 100644 sdks/python/julep/sdk/models/transition.py create mode 100644 sdks/python/julep/sdk/models/transition_metadata.py create mode 100644 sdks/python/julep/sdk/models/transition_type.py create mode 100644 sdks/python/julep/sdk/models/vector_doc_search_request.py create mode 100644 sdks/python/julep/sdk/models/vector_doc_search_request_lang.py delete mode 100644 sdks/ts/src/api/models/Chat_BaseChatResponse.ts delete mode 100644 sdks/ts/src/api/models/Chat_ChatInputData.ts delete mode 100644 sdks/ts/src/api/models/Chat_OpenAISettings.ts delete mode 100644 sdks/ts/src/api/models/Docs_BaseDocSearchRequest.ts create mode 100644 sdks/ts/src/api/models/Tools_ChosenApiCall.ts create mode 100644 sdks/ts/src/api/models/Tools_ChosenIntegrationCall.ts create mode 100644 sdks/ts/src/api/models/Tools_ChosenSystemCall.ts delete mode 100644 sdks/ts/src/api/models/Tools_ChosenToolCall.ts delete mode 100644 sdks/ts/src/api/models/Tools_CreateToolRequest.ts delete mode 100644 sdks/ts/src/api/schemas/$Chat_BaseChatResponse.ts delete mode 100644 sdks/ts/src/api/schemas/$Chat_ChatInputData.ts delete mode 100644 sdks/ts/src/api/schemas/$Chat_OpenAISettings.ts delete mode 100644 sdks/ts/src/api/schemas/$Docs_BaseDocSearchRequest.ts create mode 100644 sdks/ts/src/api/schemas/$Tools_ChosenApiCall.ts create mode 100644 sdks/ts/src/api/schemas/$Tools_ChosenIntegrationCall.ts create mode 100644 sdks/ts/src/api/schemas/$Tools_ChosenSystemCall.ts delete mode 100644 sdks/ts/src/api/schemas/$Tools_ChosenToolCall.ts delete mode 100644 sdks/ts/src/api/schemas/$Tools_CreateToolRequest.ts diff --git a/agents-api/agents_api/autogen/Chat.py b/agents-api/agents_api/autogen/Chat.py index 21cefc069..e059b529f 100644 --- a/agents-api/agents_api/autogen/Chat.py +++ b/agents-api/agents_api/autogen/Chat.py @@ -11,7 +11,10 @@ from .Common import LogitBias from .Docs import DocReference from .Tools import ( - ChosenToolCall, + ChosenApiCall, + ChosenFunctionCall, + ChosenIntegrationCall, + ChosenSystemCall, NamedApiCallChoice, NamedFunctionChoice, NamedIntegrationChoice, @@ -35,29 +38,6 @@ class BaseChatOutput(BaseModel): """ -class BaseChatResponse(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - usage: CompetionUsage | None = None - """ - Usage statistics for the completion request - """ - jobs: Annotated[list[UUID], Field([], json_schema_extra={"readOnly": True})] - """ - Background job IDs that may have been spawned from this interaction. - """ - docs: Annotated[list[DocReference], Field([], json_schema_extra={"readOnly": True})] - """ - Documents referenced for this request (for citation purposes). - """ - created_at: Annotated[AwareDatetime, Field(json_schema_extra={"readOnly": True})] - """ - When this resource was created as UTC date-time - """ - id: Annotated[UUID, Field(json_schema_extra={"readOnly": True})] - - class BaseTokenLogProb(BaseModel): model_config = ConfigDict( populate_by_name=True, @@ -70,7 +50,7 @@ class BaseTokenLogProb(BaseModel): bytes: list[int] | None = None -class ChatInputData(BaseModel): +class ChatInput(BaseModel): model_config = ConfigDict( populate_by_name=True, ) @@ -93,9 +73,90 @@ class ChatInputData(BaseModel): """ Can be one of existing tools given to the agent earlier or the ones provided in this request. """ + remember: Annotated[StrictBool, Field(False, json_schema_extra={"readOnly": True})] + """ + DISABLED: Whether this interaction should form new memories or not (will be enabled in a future release) + """ + recall: StrictBool = True + """ + Whether previous memories and docs should be recalled or not + """ + save: StrictBool = True + """ + Whether this interaction should be stored in the session history or not + """ + frequency_penalty: Annotated[float | None, Field(None, ge=-2.0, le=2.0)] + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + """ + presence_penalty: Annotated[float | None, Field(None, ge=-2.0, le=2.0)] + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + """ + temperature: Annotated[float | None, Field(None, ge=0.0, le=5.0)] + """ + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + """ + top_p: Annotated[float | None, Field(None, ge=0.0, le=1.0)] + """ + Defaults to 1 An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. + """ + repetition_penalty: Annotated[float | None, Field(None, ge=0.0, le=2.0)] + """ + Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + """ + length_penalty: Annotated[float | None, Field(None, ge=0.0, le=2.0)] + """ + Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize number of tokens generated. + """ + min_p: Annotated[float | None, Field(None, ge=0.0, le=1.0)] + """ + Minimum probability compared to leading token to be considered + """ + model: Annotated[ + str | None, + Field( + None, + max_length=120, + pattern="^[\\p{L}\\p{Nl}\\p{Pattern_Syntax}\\p{Pattern_White_Space}]+[\\p{ID_Start}\\p{Mn}\\p{Mc}\\p{Nd}\\p{Pc}\\p{Pattern_Syntax}\\p{Pattern_White_Space}]*$", + ), + ] + """ + Identifier of the model to be used + """ + stream: StrictBool = False + """ + Indicates if the server should stream the response as it's generated + """ + stop: Annotated[list[str], Field([], max_length=4)] + """ + Up to 4 sequences where the API will stop generating further tokens. + """ + seed: Annotated[int | None, Field(None, ge=-1, le=1000)] + """ + If specified, the system will make a best effort to sample deterministically for that particular seed value + """ + max_tokens: Annotated[int | None, Field(None, ge=1)] + """ + The maximum number of tokens to generate in the chat completion + """ + logit_bias: dict[str, LogitBias] | None = None + """ + Modify the likelihood of specified tokens appearing in the completion + """ + response_format: ( + SimpleCompletionResponseFormat | SchemaCompletionResponseFormat | None + ) = None + """ + Response format (set to `json_object` to restrict output to JSON) + """ + agent: UUID | None = None + """ + Agent ID of the agent to use for this interaction. (Only applicable for multi-agent sessions) + """ -class ChatOutputChunk(BaseChatOutput): +class ChatOutputChunk(BaseModel): """ Streaming chat completion output """ @@ -103,13 +164,97 @@ class ChatOutputChunk(BaseChatOutput): model_config = ConfigDict( populate_by_name=True, ) + index: int + finish_reason: Literal["stop", "length", "content_filter", "tool_calls"] = "stop" + """ + The reason the model stopped generating tokens + """ + logprobs: LogProbResponse | None = None + """ + The log probabilities of tokens + """ delta: Delta """ The message generated by the model """ -class ChunkChatResponse(BaseChatResponse): +class ChatSettings(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + frequency_penalty: Annotated[float | None, Field(None, ge=-2.0, le=2.0)] + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + """ + presence_penalty: Annotated[float | None, Field(None, ge=-2.0, le=2.0)] + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + """ + temperature: Annotated[float | None, Field(None, ge=0.0, le=5.0)] + """ + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + """ + top_p: Annotated[float | None, Field(None, ge=0.0, le=1.0)] + """ + Defaults to 1 An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. + """ + repetition_penalty: Annotated[float | None, Field(None, ge=0.0, le=2.0)] + """ + Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + """ + length_penalty: Annotated[float | None, Field(None, ge=0.0, le=2.0)] + """ + Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize number of tokens generated. + """ + min_p: Annotated[float | None, Field(None, ge=0.0, le=1.0)] + """ + Minimum probability compared to leading token to be considered + """ + model: Annotated[ + str | None, + Field( + None, + max_length=120, + pattern="^[\\p{L}\\p{Nl}\\p{Pattern_Syntax}\\p{Pattern_White_Space}]+[\\p{ID_Start}\\p{Mn}\\p{Mc}\\p{Nd}\\p{Pc}\\p{Pattern_Syntax}\\p{Pattern_White_Space}]*$", + ), + ] + """ + Identifier of the model to be used + """ + stream: StrictBool = False + """ + Indicates if the server should stream the response as it's generated + """ + stop: Annotated[list[str], Field([], max_length=4)] + """ + Up to 4 sequences where the API will stop generating further tokens. + """ + seed: Annotated[int | None, Field(None, ge=-1, le=1000)] + """ + If specified, the system will make a best effort to sample deterministically for that particular seed value + """ + max_tokens: Annotated[int | None, Field(None, ge=1)] + """ + The maximum number of tokens to generate in the chat completion + """ + logit_bias: dict[str, LogitBias] | None = None + """ + Modify the likelihood of specified tokens appearing in the completion + """ + response_format: ( + SimpleCompletionResponseFormat | SchemaCompletionResponseFormat | None + ) = None + """ + Response format (set to `json_object` to restrict output to JSON) + """ + agent: UUID | None = None + """ + Agent ID of the agent to use for this interaction. (Only applicable for multi-agent sessions) + """ + + +class ChunkChatResponse(BaseModel): model_config = ConfigDict( populate_by_name=True, ) @@ -117,6 +262,23 @@ class ChunkChatResponse(BaseChatResponse): """ The deltas generated by the model """ + usage: CompetionUsage | None = None + """ + Usage statistics for the completion request + """ + jobs: Annotated[list[UUID], Field([], json_schema_extra={"readOnly": True})] + """ + Background job IDs that may have been spawned from this interaction. + """ + docs: Annotated[list[DocReference], Field([], json_schema_extra={"readOnly": True})] + """ + Documents referenced for this request (for citation purposes). + """ + created_at: Annotated[AwareDatetime, Field(json_schema_extra={"readOnly": True})] + """ + When this resource was created as UTC date-time + """ + id: Annotated[UUID, Field(json_schema_extra={"readOnly": True})] class CompetionUsage(BaseModel): @@ -172,6 +334,44 @@ class ContentModel(BaseModel): """ +class DefaultChatSettings(BaseModel): + """ + Default settings for the chat session (also used by the agent) + """ + + model_config = ConfigDict( + populate_by_name=True, + ) + frequency_penalty: Annotated[float | None, Field(None, ge=-2.0, le=2.0)] + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + """ + presence_penalty: Annotated[float | None, Field(None, ge=-2.0, le=2.0)] + """ + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + """ + temperature: Annotated[float | None, Field(None, ge=0.0, le=5.0)] + """ + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + """ + top_p: Annotated[float | None, Field(None, ge=0.0, le=1.0)] + """ + Defaults to 1 An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. + """ + repetition_penalty: Annotated[float | None, Field(None, ge=0.0, le=2.0)] + """ + Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + """ + length_penalty: Annotated[float | None, Field(None, ge=0.0, le=2.0)] + """ + Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize number of tokens generated. + """ + min_p: Annotated[float | None, Field(None, ge=0.0, le=1.0)] + """ + Minimum probability compared to leading token to be considered + """ + + class Delta(BaseModel): """ The message generated by the model @@ -264,7 +464,7 @@ class Message(BaseModel): """ -class MessageChatResponse(BaseChatResponse): +class MessageChatResponse(BaseModel): model_config = ConfigDict( populate_by_name=True, ) @@ -272,6 +472,23 @@ class MessageChatResponse(BaseChatResponse): """ The deltas generated by the model """ + usage: CompetionUsage | None = None + """ + Usage statistics for the completion request + """ + jobs: Annotated[list[UUID], Field([], json_schema_extra={"readOnly": True})] + """ + Background job IDs that may have been spawned from this interaction. + """ + docs: Annotated[list[DocReference], Field([], json_schema_extra={"readOnly": True})] + """ + Documents referenced for this request (for citation purposes). + """ + created_at: Annotated[AwareDatetime, Field(json_schema_extra={"readOnly": True})] + """ + When this resource was created as UTC date-time + """ + id: Annotated[UUID, Field(json_schema_extra={"readOnly": True})] class MessageModel(BaseModel): @@ -299,7 +516,14 @@ class MessageModel(BaseModel): Name """ tool_calls: Annotated[ - list[ChosenToolCall] | None, Field([], json_schema_extra={"readOnly": True}) + list[ + ChosenFunctionCall + | ChosenIntegrationCall + | ChosenSystemCall + | ChosenApiCall + ] + | None, + Field([], json_schema_extra={"readOnly": True}), ] """ Tool calls generated by the model. @@ -326,28 +550,6 @@ class MultipleChatOutput(BaseChatOutput): ] -class OpenAISettings(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - frequency_penalty: Annotated[float | None, Field(None, ge=-2.0, le=2.0)] - """ - Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - """ - presence_penalty: Annotated[float | None, Field(None, ge=-2.0, le=2.0)] - """ - Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - """ - temperature: Annotated[float | None, Field(None, ge=0.0, le=5.0)] - """ - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - """ - top_p: Annotated[float | None, Field(None, ge=0.0, le=1.0)] - """ - Defaults to 1 An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. - """ - - class SchemaCompletionResponseFormat(BaseModel): model_config = ConfigDict( populate_by_name=True, @@ -383,10 +585,16 @@ class SingleChatOutput(BaseChatOutput): message: MessageModel -class TokenLogProb(BaseTokenLogProb): +class TokenLogProb(BaseModel): model_config = ConfigDict( populate_by_name=True, ) + token: str + logprob: float + """ + The log probability of the token + """ + bytes: list[int] | None = None top_logprobs: Annotated[ list[BaseTokenLogProb], Field(json_schema_extra={"readOnly": True}, min_length=1), @@ -394,159 +602,3 @@ class TokenLogProb(BaseTokenLogProb): """ The log probabilities of the tokens """ - - -class ChatInput(ChatInputData): - model_config = ConfigDict( - populate_by_name=True, - ) - remember: Annotated[StrictBool, Field(False, json_schema_extra={"readOnly": True})] - """ - DISABLED: Whether this interaction should form new memories or not (will be enabled in a future release) - """ - recall: StrictBool = True - """ - Whether previous memories and docs should be recalled or not - """ - save: StrictBool = True - """ - Whether this interaction should be stored in the session history or not - """ - model: Annotated[ - str | None, - Field( - None, - max_length=120, - pattern="^[\\p{L}\\p{Nl}\\p{Pattern_Syntax}\\p{Pattern_White_Space}]+[\\p{ID_Start}\\p{Mn}\\p{Mc}\\p{Nd}\\p{Pc}\\p{Pattern_Syntax}\\p{Pattern_White_Space}]*$", - ), - ] - """ - Identifier of the model to be used - """ - stream: StrictBool = False - """ - Indicates if the server should stream the response as it's generated - """ - stop: Annotated[list[str], Field([], max_length=4)] - """ - Up to 4 sequences where the API will stop generating further tokens. - """ - seed: Annotated[int | None, Field(None, ge=-1, le=1000)] - """ - If specified, the system will make a best effort to sample deterministically for that particular seed value - """ - max_tokens: Annotated[int | None, Field(None, ge=1)] - """ - The maximum number of tokens to generate in the chat completion - """ - logit_bias: dict[str, LogitBias] | None = None - """ - Modify the likelihood of specified tokens appearing in the completion - """ - response_format: ( - SimpleCompletionResponseFormat | SchemaCompletionResponseFormat | None - ) = None - """ - Response format (set to `json_object` to restrict output to JSON) - """ - agent: UUID | None = None - """ - Agent ID of the agent to use for this interaction. (Only applicable for multi-agent sessions) - """ - repetition_penalty: Annotated[float | None, Field(None, ge=0.0, le=2.0)] - """ - Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - """ - length_penalty: Annotated[float | None, Field(None, ge=0.0, le=2.0)] - """ - Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize number of tokens generated. - """ - min_p: Annotated[float | None, Field(None, ge=0.0, le=1.0)] - """ - Minimum probability compared to leading token to be considered - """ - frequency_penalty: Annotated[float | None, Field(None, ge=-2.0, le=2.0)] - """ - Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - """ - presence_penalty: Annotated[float | None, Field(None, ge=-2.0, le=2.0)] - """ - Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - """ - temperature: Annotated[float | None, Field(None, ge=0.0, le=5.0)] - """ - What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - """ - top_p: Annotated[float | None, Field(None, ge=0.0, le=1.0)] - """ - Defaults to 1 An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. - """ - - -class DefaultChatSettings(OpenAISettings): - """ - Default settings for the chat session (also used by the agent) - """ - - model_config = ConfigDict( - populate_by_name=True, - ) - repetition_penalty: Annotated[float | None, Field(None, ge=0.0, le=2.0)] - """ - Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - """ - length_penalty: Annotated[float | None, Field(None, ge=0.0, le=2.0)] - """ - Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize number of tokens generated. - """ - min_p: Annotated[float | None, Field(None, ge=0.0, le=1.0)] - """ - Minimum probability compared to leading token to be considered - """ - - -class ChatSettings(DefaultChatSettings): - model_config = ConfigDict( - populate_by_name=True, - ) - model: Annotated[ - str | None, - Field( - None, - max_length=120, - pattern="^[\\p{L}\\p{Nl}\\p{Pattern_Syntax}\\p{Pattern_White_Space}]+[\\p{ID_Start}\\p{Mn}\\p{Mc}\\p{Nd}\\p{Pc}\\p{Pattern_Syntax}\\p{Pattern_White_Space}]*$", - ), - ] - """ - Identifier of the model to be used - """ - stream: StrictBool = False - """ - Indicates if the server should stream the response as it's generated - """ - stop: Annotated[list[str], Field([], max_length=4)] - """ - Up to 4 sequences where the API will stop generating further tokens. - """ - seed: Annotated[int | None, Field(None, ge=-1, le=1000)] - """ - If specified, the system will make a best effort to sample deterministically for that particular seed value - """ - max_tokens: Annotated[int | None, Field(None, ge=1)] - """ - The maximum number of tokens to generate in the chat completion - """ - logit_bias: dict[str, LogitBias] | None = None - """ - Modify the likelihood of specified tokens appearing in the completion - """ - response_format: ( - SimpleCompletionResponseFormat | SchemaCompletionResponseFormat | None - ) = None - """ - Response format (set to `json_object` to restrict output to JSON) - """ - agent: UUID | None = None - """ - Agent ID of the agent to use for this interaction. (Only applicable for multi-agent sessions) - """ diff --git a/agents-api/agents_api/autogen/Docs.py b/agents-api/agents_api/autogen/Docs.py index a7023ddfc..dc65802ed 100644 --- a/agents-api/agents_api/autogen/Docs.py +++ b/agents-api/agents_api/autogen/Docs.py @@ -9,17 +9,6 @@ from pydantic import AwareDatetime, BaseModel, ConfigDict, Field -class BaseDocSearchRequest(BaseModel): - model_config = ConfigDict( - populate_by_name=True, - ) - limit: Annotated[int, Field(10, ge=1, le=100)] - lang: Literal["en-US"] = "en-US" - """ - The language to be used for text-only search. Support for other languages coming soon. - """ - - class CreateDocRequest(BaseModel): """ Payload for creating a doc @@ -118,10 +107,15 @@ class EmbedQueryResponse(BaseModel): """ -class HybridDocSearchRequest(BaseDocSearchRequest): +class HybridDocSearchRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) + limit: Annotated[int, Field(10, ge=1, le=100)] + lang: Literal["en-US"] = "en-US" + """ + The language to be used for text-only search. Support for other languages coming soon. + """ confidence: Annotated[float, Field(0.5, ge=0.0, le=1.0)] """ The confidence cutoff level @@ -148,20 +142,30 @@ class Snippet(BaseModel): content: str -class TextOnlyDocSearchRequest(BaseDocSearchRequest): +class TextOnlyDocSearchRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) + limit: Annotated[int, Field(10, ge=1, le=100)] + lang: Literal["en-US"] = "en-US" + """ + The language to be used for text-only search. Support for other languages coming soon. + """ text: str """ Text to use in the search. """ -class VectorDocSearchRequest(BaseDocSearchRequest): +class VectorDocSearchRequest(BaseModel): model_config = ConfigDict( populate_by_name=True, ) + limit: Annotated[int, Field(10, ge=1, le=100)] + lang: Literal["en-US"] = "en-US" + """ + The language to be used for text-only search. Support for other languages coming soon. + """ confidence: Annotated[float, Field(0.5, ge=0.0, le=1.0)] """ The confidence cutoff level diff --git a/agents-api/agents_api/autogen/Entries.py b/agents-api/agents_api/autogen/Entries.py index 1f4286eb9..406ab8a4c 100644 --- a/agents-api/agents_api/autogen/Entries.py +++ b/agents-api/agents_api/autogen/Entries.py @@ -8,7 +8,14 @@ from pydantic import AwareDatetime, BaseModel, ConfigDict, Field, RootModel -from .Tools import ChosenToolCall, Tool, ToolResponse +from .Tools import ( + ChosenApiCall, + ChosenFunctionCall, + ChosenIntegrationCall, + ChosenSystemCall, + Tool, + ToolResponse, +) class BaseEntry(BaseModel): @@ -31,11 +38,21 @@ class BaseEntry(BaseModel): content: ( list[Content | ContentModel] | Tool - | ChosenToolCall + | ChosenFunctionCall + | ChosenIntegrationCall + | ChosenSystemCall + | ChosenApiCall | str | ToolResponse | list[ - list[Content | ContentModel] | Tool | ChosenToolCall | str | ToolResponse + list[Content | ContentModel] + | Tool + | ChosenFunctionCall + | ChosenIntegrationCall + | ChosenSystemCall + | ChosenApiCall + | str + | ToolResponse ] ) source: Literal[ diff --git a/agents-api/agents_api/autogen/Executions.py b/agents-api/agents_api/autogen/Executions.py index a4621c0ff..1e1c3e88b 100644 --- a/agents-api/agents_api/autogen/Executions.py +++ b/agents-api/agents_api/autogen/Executions.py @@ -90,6 +90,42 @@ class TaskTokenResumeExecutionRequest(BaseModel): """ +class Transition(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Annotated[ + Literal[ + "init", + "init_branch", + "finish", + "finish_branch", + "wait", + "resume", + "error", + "step", + "cancelled", + ], + Field(json_schema_extra={"readOnly": True}), + ] + output: Annotated[Any, Field(json_schema_extra={"readOnly": True})] + created_at: Annotated[AwareDatetime, Field(json_schema_extra={"readOnly": True})] + """ + When this resource was created as UTC date-time + """ + updated_at: Annotated[AwareDatetime, Field(json_schema_extra={"readOnly": True})] + """ + When this resource was updated as UTC date-time + """ + execution_id: Annotated[UUID, Field(json_schema_extra={"readOnly": True})] + current: Annotated[TransitionTarget, Field(json_schema_extra={"readOnly": True})] + next: Annotated[ + TransitionTarget | None, Field(json_schema_extra={"readOnly": True}) + ] + id: Annotated[UUID, Field(json_schema_extra={"readOnly": True})] + metadata: dict[str, Any] | None = None + + class TransitionEvent(BaseModel): model_config = ConfigDict( populate_by_name=True, @@ -173,16 +209,3 @@ class StopExecutionRequest(UpdateExecutionRequest): """ The reason for stopping the execution """ - - -class Transition(TransitionEvent): - model_config = ConfigDict( - populate_by_name=True, - ) - execution_id: Annotated[UUID, Field(json_schema_extra={"readOnly": True})] - current: Annotated[TransitionTarget, Field(json_schema_extra={"readOnly": True})] - next: Annotated[ - TransitionTarget | None, Field(json_schema_extra={"readOnly": True}) - ] - id: Annotated[UUID, Field(json_schema_extra={"readOnly": True})] - metadata: dict[str, Any] | None = None diff --git a/agents-api/agents_api/autogen/Tasks.py b/agents-api/agents_api/autogen/Tasks.py index c1e69d492..8653d81e7 100644 --- a/agents-api/agents_api/autogen/Tasks.py +++ b/agents-api/agents_api/autogen/Tasks.py @@ -15,7 +15,7 @@ TextOnlyDocSearchRequest, VectorDocSearchRequest, ) -from .Tools import CreateToolRequest +from .Tools import FunctionDef class CaseThen(BaseModel): @@ -686,7 +686,7 @@ class Task(BaseModel): metadata: dict[str, Any] | None = None -class TaskTool(CreateToolRequest): +class TaskTool(BaseModel): model_config = ConfigDict( populate_by_name=True, ) @@ -694,6 +694,21 @@ class TaskTool(CreateToolRequest): """ Read-only: Whether the tool was inherited or not. Only applies within tasks. """ + type: Literal["function", "integration", "system", "api_call"] = "function" + """ + Whether this tool is a `function`, `api_call`, `system` etc. (Only `function` tool supported right now)The type of the tool + """ + name: Annotated[str, Field(max_length=40, pattern="^[^\\W0-9]\\w*$")] + """ + Name of the tool (must be unique for this agent and a valid python identifier string ) + """ + function: FunctionDef + """ + The function to call + """ + integration: Any | None = None + system: Any | None = None + api_call: Any | None = None class ToolCallStep(BaseModel): diff --git a/agents-api/agents_api/autogen/Tools.py b/agents-api/agents_api/autogen/Tools.py index a45d03734..0c7d8aa05 100644 --- a/agents-api/agents_api/autogen/Tools.py +++ b/agents-api/agents_api/autogen/Tools.py @@ -9,48 +9,43 @@ from pydantic import AwareDatetime, BaseModel, ConfigDict, Field -class ChosenToolCall(BaseModel): - """ - The response tool value generated by the model - """ +class ChosenApiCall(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Literal["api_call"] = "api_call" + api_call: Any + id: Annotated[UUID, Field(json_schema_extra={"readOnly": True})] + +class ChosenFunctionCall(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - type: Literal["function", "integration", "system", "api_call"] + type: Literal["function"] = "function" + function: FunctionCallOption """ - Whether this tool is a `function`, `api_call`, `system` etc. (Only `function` tool supported right now) + The function to call """ - function: FunctionCallOption | None = None - integration: Any | None = None - system: Any | None = None - api_call: Any | None = None id: Annotated[UUID, Field(json_schema_extra={"readOnly": True})] -class CreateToolRequest(BaseModel): - """ - Payload for creating a tool - """ +class ChosenIntegrationCall(BaseModel): + model_config = ConfigDict( + populate_by_name=True, + ) + type: Literal["integration"] = "integration" + integration: Any + id: Annotated[UUID, Field(json_schema_extra={"readOnly": True})] + +class ChosenSystemCall(BaseModel): model_config = ConfigDict( populate_by_name=True, ) - type: Literal["function", "integration", "system", "api_call"] = "function" - """ - Whether this tool is a `function`, `api_call`, `system` etc. (Only `function` tool supported right now)The type of the tool - """ - name: Annotated[str, Field(max_length=40, pattern="^[^\\W0-9]\\w*$")] - """ - Name of the tool (must be unique for this agent and a valid python identifier string ) - """ - function: FunctionDef - """ - The function to call - """ - integration: Any | None = None - system: Any | None = None - api_call: Any | None = None + type: Literal["system"] = "system" + system: Any + id: Annotated[UUID, Field(json_schema_extra={"readOnly": True})] class FunctionCallOption(BaseModel): @@ -212,14 +207,3 @@ class UpdateToolRequest(BaseModel): integration: Any | None = None system: Any | None = None api_call: Any | None = None - - -class ChosenFunctionCall(ChosenToolCall): - model_config = ConfigDict( - populate_by_name=True, - ) - type: Literal["function"] = "function" - function: FunctionCallOption - """ - The function to call - """ diff --git a/agents-api/agents_api/autogen/openapi_model.py b/agents-api/agents_api/autogen/openapi_model.py index 04be4c28b..d52b703b6 100644 --- a/agents-api/agents_api/autogen/openapi_model.py +++ b/agents-api/agents_api/autogen/openapi_model.py @@ -68,6 +68,10 @@ class InputChatMLMessage(Message): pass +ChosenToolCall = ( + ChosenIntegrationCall | ChosenApiCall | ChosenFunctionCall | ChosenSystemCall +) + # Custom types (not generated correctly) # -------------------------------------- diff --git a/sdks/python/config.yaml b/sdks/python/config.yaml index f8eb19b98..94588a019 100644 --- a/sdks/python/config.yaml +++ b/sdks/python/config.yaml @@ -905,4 +905,193 @@ class_overrides: ChatSimpleCompletionResponseFormatType: class_name: SimpleCompletionResponseFormatType - module_name: simple_completion_response_format_type \ No newline at end of file + module_name: simple_completion_response_format_type + + DocsHybridDocSearchRequest: + class_name: HybridDocSearchRequest + module_name: hybrid_doc_search_request + + DocsHybridDocSearchRequestLang: + class_name: HybridDocSearchRequestLang + module_name: hybrid_doc_search_request_lang + + DocsTextOnlyDocSearchRequest: + class_name: TextOnlyDocSearchRequest + module_name: text_only_doc_search_request + + DocsTextOnlyDocSearchRequestLang: + class_name: TextOnlyDocSearchRequestLang + module_name: text_only_doc_search_request_lang + + DocsVectorDocSearchRequest: + class_name: VectorDocSearchRequest + module_name: vector_doc_search_request + + DocsVectorDocSearchRequestLang: + class_name: VectorDocSearchRequestLang + module_name: vector_doc_search_request_lang + + EntriesBaseEntryContentType8ItemType0ItemType0: + class_name: BaseEntryContentType8ItemType0ItemType0 + module_name: base_entry_content_type_8_item_type_0_item_type_0 + + EntriesBaseEntryContentType8ItemType0ItemType0Type: + class_name: BaseEntryContentType8ItemType0ItemType0Type + module_name: base_entry_content_type_8_item_type_0_item_type_0_type + + EntriesBaseEntryContentType8ItemType0ItemType1: + class_name: BaseEntryContentType8ItemType0ItemType1 + module_name: base_entry_content_type_8_item_type_0_item_type_1 + + EntriesBaseEntryContentType8ItemType0ItemType1ImageUrl: + class_name: BaseEntryContentType8ItemType0ItemType1ImageUrl + module_name: base_entry_content_type_8_item_type_0_item_type_1_image_url + + EntriesBaseEntryContentType8ItemType0ItemType1Type: + class_name: BaseEntryContentType8ItemType0ItemType1Type + module_name: base_entry_content_type_8_item_type_0_item_type_1_type + + ExecutionsTransition: + class_name: Transition + module_name: transition + + ExecutionsTransitionMetadata: + class_name: TransitionMetadata + module_name: transition_metadata + + ExecutionsTransitionType: + class_name: TransitionType + module_name: transition_type + + # New overrides for TODO items + ChatChatInput: + class_name: ChatInput + module_name: chat_input + + ChatChatInputLogitBias: + class_name: ChatInputLogitBias + module_name: chat_input_logit_bias + + ChatChatInputMessagesItem: + class_name: ChatInputMessagesItem + module_name: chat_input_messages_item + + ChatChatInputMessagesItemContentType2ItemType0: + class_name: ChatInputMessagesItemContentType2ItemType0 + module_name: chat_input_messages_item_content_type_2_item_type_0 + + ChatChatInputMessagesItemContentType2ItemType0Type: + class_name: ChatInputMessagesItemContentType2ItemType0Type + module_name: chat_input_messages_item_content_type_2_item_type_0_type + + ChatChatInputMessagesItemContentType2ItemType1: + class_name: ChatInputMessagesItemContentType2ItemType1 + module_name: chat_input_messages_item_content_type_2_item_type_1 + + ChatChatInputMessagesItemContentType2ItemType1ImageUrl: + class_name: ChatInputMessagesItemContentType2ItemType1ImageUrl + module_name: chat_input_messages_item_content_type_2_item_type_1_image_url + + ChatChatInputMessagesItemContentType2ItemType1Type: + class_name: ChatInputMessagesItemContentType2ItemType1Type + module_name: chat_input_messages_item_content_type_2_item_type_1_type + + ChatChatInputToolChoiceType0: + class_name: ChatInputToolChoiceType0 + module_name: chat_input_tool_choice_type_0 + + ChatChatOutputChunk: + class_name: ChatOutputChunk + module_name: chat_output_chunk + + ChatChatOutputChunkDelta: + class_name: ChatOutputChunkDelta + module_name: chat_output_chunk_delta + + ChatChatOutputChunkDeltaContentType2ItemType0: + class_name: ChatOutputChunkDeltaContentType2ItemType0 + module_name: chat_output_chunk_delta_content_type_2_item_type_0 + + ChatChatOutputChunkDeltaContentType2ItemType0Type: + class_name: ChatOutputChunkDeltaContentType2ItemType0Type + module_name: chat_output_chunk_delta_content_type_2_item_type_0_type + + ChatChatOutputChunkDeltaContentType2ItemType1: + class_name: ChatOutputChunkDeltaContentType2ItemType1 + module_name: chat_output_chunk_delta_content_type_2_item_type_1 + + ChatChatOutputChunkDeltaContentType2ItemType1ImageUrl: + class_name: ChatOutputChunkDeltaContentType2ItemType1ImageUrl + module_name: chat_output_chunk_delta_content_type_2_item_type_1_image_url + + ChatChatOutputChunkDeltaContentType2ItemType1Type: + class_name: ChatOutputChunkDeltaContentType2ItemType1Type + module_name: chat_output_chunk_delta_content_type_2_item_type_1_type + + ChatChatSettings: + class_name: ChatSettings + module_name: chat_settings + + ChatChatSettingsLogitBias: + class_name: ChatSettingsLogitBias + module_name: chat_settings_logit_bias + + ChatChunkChatResponse: + class_name: ChunkChatResponse + module_name: chunk_chat_response + + ChatCompetionUsage: + class_name: CompetionUsage + module_name: competion_usage + + ChatDefaultChatSettings: + class_name: DefaultChatSettings + module_name: default_chat_settings + + ChatFinishReason: + class_name: FinishReason + module_name: finish_reason + + ChatLogProbResponse: + class_name: LogProbResponse + module_name: log_prob_response + + ChatMessageChatResponse: + class_name: MessageChatResponse + module_name: message_chat_response + + TasksTaskTool: + class_name: TaskTool + module_name: task_tool + + ToolsChosenApiCall: + class_name: ChosenApiCall + module_name: chosen_api_call + + ToolsChosenApiCallType: + class_name: ChosenApiCallType + module_name: chosen_api_call_type + + ToolsChosenFunctionCall: + class_name: ChosenFunctionCall + module_name: chosen_function_call + + ToolsChosenFunctionCallType: + class_name: ChosenFunctionCallType + module_name: chosen_function_call_type + + ToolsChosenIntegrationCall: + class_name: ChosenIntegrationCall + module_name: chosen_integration_call + + ToolsChosenIntegrationCallType: + class_name: ChosenIntegrationCallType + module_name: chosen_integration_call_type + + ToolsChosenSystemCall: + class_name: ChosenSystemCall + module_name: chosen_system_call + + ToolsChosenSystemCallType: + class_name: ChosenSystemCallType + module_name: chosen_system_call_type \ No newline at end of file diff --git a/sdks/python/julep/__init__.py b/sdks/python/julep/__init__.py index e69de29bb..c51793c62 100644 --- a/sdks/python/julep/__init__.py +++ b/sdks/python/julep/__init__.py @@ -0,0 +1,110 @@ +import json +import os +from functools import partial, wraps +from typing import TypeVar, Callable, Any, ParamSpec + +from .sdk.client import AuthenticatedClient +import julep.sdk.api.default as ops +from .sdk import errors + + +# Need to get all files from ops module and import them +from importlib import import_module + +# Get all the files from the ops module +ops_files = [ + file + for file in os.listdir(ops.__file__.rstrip("__init__.py")) + if file.endswith(".py") and file != "__init__.py" +] + +# Import all the files +for file in ops_files: + file_name = file[:-3] + try: + import_module(f"julep.sdk.api.default.{file_name}") + except ImportError: + breakpoint() + raise + + setattr(ops, file_name, import_module(f"julep.sdk.api.default.{file_name}")) + + +T = TypeVar('T', bound=Callable[..., Any]) + +P = ParamSpec('P') +R = TypeVar('R') + +def parse_response(fn: Callable[P, R]) -> Callable[P, dict[str, Any]]: + @wraps(fn) + def wrapper(*args: P.args, **kwargs: P.kwargs) -> dict[str, Any]: + response = fn(*args, **kwargs) + if response.status_code.is_client_error or response.status_code.is_server_error: + raise errors.UnexpectedStatus(response.status_code, response.content) + else: + result = json.loads(response.content) + return result # type: ignore + + return wrapper + + +class JulepNamespace: + def __init__(self, name: str, client: AuthenticatedClient): + self.name = name + self.client = client + + +class Julep: + def __init__(self, *, api_key: str, base_url: str = "https://api.julep.ai/api", **client_kwargs): + self.client = AuthenticatedClient(token=api_key, base_url=base_url, **client_kwargs) + + # Get a list of all the available operations (attributes of the ops object) + op_names: list[str] = [attr for attr in dir(ops) if not attr.startswith("_") and "route" in attr] + + # These look like: agents_route_create / agents_route_list etc. + # The conventions are: + # - The first part of the name is the resource name (agents, chat, etc.) + # - The second part of the name is the method (route_create, route_list, etc.) + # - The `_route_` prefix can be omitted. + + # We want to create a method on the Julep class for each of these that proxies to the ops object + # But also ensures that the first argument (self.client) is passed through + + namespaces, operations = list(zip(*[name.split("_route_") for name in op_names])) + + # Some namespaces have aliases + # This is because the API is organized by resource, but the SDK is organized by namespace + # And we want to allow the user to use the resource name as the namespace name + namespace_aliases = { + "agents_docs_search": "agents_docs", + "user_docs_search": "user_docs", + "execution_transitions": "transitions", + "execution_transitions_stream": "transitions", + "individual_docs": "docs", + "job": "jobs", + "task_executions": "executions", + "tasks_create_or_update": "tasks", + } + + # First let's add the namespaces to the Julep class as attributes + for namespace in namespaces: + namespace = namespace_aliases.get(namespace, namespace) + if not hasattr(self, namespace): + setattr(self, namespace, JulepNamespace(namespace, self.client)) + + # Now let's add the operations to the Julep class as attributes + async_prefix = {"a": "asyncio_detailed", "": "sync_detailed"} + + for prefix, async_op in async_prefix.items(): + for namespace, operation in zip(namespaces, operations): + op = getattr(ops, f"{namespace}_route_{operation}") + op = getattr(op, async_op) + op = partial(op, client=self.client) + op = parse_response(op) + op_name = prefix + operation + + namespace = namespace_aliases.get(namespace, namespace) + ns = getattr(self, namespace) + + if not hasattr(ns, op_name): + setattr(ns, op_name, op) \ No newline at end of file diff --git a/sdks/python/julep/sdk/api/default/chat_route_generate.py b/sdks/python/julep/sdk/api/default/chat_route_generate.py new file mode 100644 index 000000000..86bb01de9 --- /dev/null +++ b/sdks/python/julep/sdk/api/default/chat_route_generate.py @@ -0,0 +1,194 @@ +from http import HTTPStatus +from typing import Any, Dict, List, Optional, Union, cast + +import httpx + +from ... import errors +from ...client import AuthenticatedClient, Client +from ...models.chat_input import ChatInput +from ...models.chunk_chat_response import ChunkChatResponse +from ...models.message_chat_response import MessageChatResponse +from ...types import UNSET, Response + + +def _get_kwargs( + id: str, + *, + body: ChatInput, +) -> Dict[str, Any]: + headers: Dict[str, Any] = {} + + _kwargs: Dict[str, Any] = { + "method": "post", + "url": "/sessions/{id}/chat".format( + id=id, + ), + } + + _body = body.to_dict() + + _kwargs["json"] = _body + headers["Content-Type"] = "application/json" + + _kwargs["headers"] = headers + return _kwargs + + +def _parse_response( + *, client: Union[AuthenticatedClient, Client], response: httpx.Response +) -> Optional[Union["ChunkChatResponse", "MessageChatResponse"]]: + if response.status_code == HTTPStatus.OK: + + def _parse_response_200( + data: object, + ) -> Union["ChunkChatResponse", "MessageChatResponse"]: + try: + if not isinstance(data, dict): + raise TypeError() + response_200_type_0 = ChunkChatResponse.from_dict(data) + + return response_200_type_0 + except: # noqa: E722 + pass + if not isinstance(data, dict): + raise TypeError() + response_200_type_1 = MessageChatResponse.from_dict(data) + + return response_200_type_1 + + response_200 = _parse_response_200(response.json()) + + return response_200 + if client.raise_on_unexpected_status: + raise errors.UnexpectedStatus(response.status_code, response.content) + else: + return None + + +def _build_response( + *, client: Union[AuthenticatedClient, Client], response: httpx.Response +) -> Response[Union["ChunkChatResponse", "MessageChatResponse"]]: + return Response( + status_code=HTTPStatus(response.status_code), + content=response.content, + headers=response.headers, + parsed=_parse_response(client=client, response=response), + ) + + +def sync_detailed( + id: str, + *, + client: Union[AuthenticatedClient, Client], + body: ChatInput, +) -> Response[Union["ChunkChatResponse", "MessageChatResponse"]]: + """Generate a response from the model + + Args: + id (str): + body (ChatInput): + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Response[Union['ChunkChatResponse', 'MessageChatResponse']] + """ + + kwargs = _get_kwargs( + id=id, + body=body, + ) + + response = client.get_httpx_client().request( + **kwargs, + ) + + return _build_response(client=client, response=response) + + +def sync( + id: str, + *, + client: Union[AuthenticatedClient, Client], + body: ChatInput, +) -> Optional[Union["ChunkChatResponse", "MessageChatResponse"]]: + """Generate a response from the model + + Args: + id (str): + body (ChatInput): + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Union['ChunkChatResponse', 'MessageChatResponse'] + """ + + return sync_detailed( + id=id, + client=client, + body=body, + ).parsed + + +async def asyncio_detailed( + id: str, + *, + client: Union[AuthenticatedClient, Client], + body: ChatInput, +) -> Response[Union["ChunkChatResponse", "MessageChatResponse"]]: + """Generate a response from the model + + Args: + id (str): + body (ChatInput): + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Response[Union['ChunkChatResponse', 'MessageChatResponse']] + """ + + kwargs = _get_kwargs( + id=id, + body=body, + ) + + response = await client.get_async_httpx_client().request(**kwargs) + + return _build_response(client=client, response=response) + + +async def asyncio( + id: str, + *, + client: Union[AuthenticatedClient, Client], + body: ChatInput, +) -> Optional[Union["ChunkChatResponse", "MessageChatResponse"]]: + """Generate a response from the model + + Args: + id (str): + body (ChatInput): + + Raises: + errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True. + httpx.TimeoutException: If the request takes longer than Client.timeout. + + Returns: + Union['ChunkChatResponse', 'MessageChatResponse'] + """ + + return ( + await asyncio_detailed( + id=id, + client=client, + body=body, + ) + ).parsed diff --git a/sdks/python/julep/sdk/models/__init__.py b/sdks/python/julep/sdk/models/__init__.py index 14a6efce3..d44bfb6c9 100644 --- a/sdks/python/julep/sdk/models/__init__.py +++ b/sdks/python/julep/sdk/models/__init__.py @@ -11,9 +11,6 @@ from .base_agent import BaseAgent from .base_agent_metadata import BaseAgentMetadata from .base_chat_output import BaseChatOutput -from .base_chat_response import BaseChatResponse -from .base_doc_search_request import BaseDocSearchRequest -from .base_doc_search_request_lang import BaseDocSearchRequestLang from .base_entry_content_type_0_item_type_0 import BaseEntryContentType0ItemType0 from .base_entry_content_type_0_item_type_0_type import ( BaseEntryContentType0ItemType0Type, @@ -25,46 +22,74 @@ from .base_entry_content_type_0_item_type_1_type import ( BaseEntryContentType0ItemType1Type, ) -from .base_entry_content_type_5_item_type_0_item_type_0 import ( - BaseEntryContentType5ItemType0ItemType0, +from .base_entry_content_type_8_item_type_0_item_type_0 import ( + BaseEntryContentType8ItemType0ItemType0, ) -from .base_entry_content_type_5_item_type_0_item_type_0_type import ( - BaseEntryContentType5ItemType0ItemType0Type, +from .base_entry_content_type_8_item_type_0_item_type_0_type import ( + BaseEntryContentType8ItemType0ItemType0Type, ) -from .base_entry_content_type_5_item_type_0_item_type_1 import ( - BaseEntryContentType5ItemType0ItemType1, +from .base_entry_content_type_8_item_type_0_item_type_1 import ( + BaseEntryContentType8ItemType0ItemType1, ) -from .base_entry_content_type_5_item_type_0_item_type_1_image_url import ( - BaseEntryContentType5ItemType0ItemType1ImageUrl, +from .base_entry_content_type_8_item_type_0_item_type_1_image_url import ( + BaseEntryContentType8ItemType0ItemType1ImageUrl, ) -from .base_entry_content_type_5_item_type_0_item_type_1_type import ( - BaseEntryContentType5ItemType0ItemType1Type, +from .base_entry_content_type_8_item_type_0_item_type_1_type import ( + BaseEntryContentType8ItemType0ItemType1Type, ) from .base_entry_source import BaseEntrySource from .base_token_log_prob import BaseTokenLogProb from .case_then import CaseThen from .case_then_case_type_1 import CaseThenCaseType1 -from .chat_competion_usage import ChatCompetionUsage -from .chat_finish_reason import ChatFinishReason -from .chat_input_data_messages_item_content_type_2_item_type_0 import ( - ChatInputDataMessagesItemContentType2ItemType0, +from .chat_input import ChatInput +from .chat_input_logit_bias import ChatInputLogitBias +from .chat_input_messages_item import ChatInputMessagesItem +from .chat_input_messages_item_content_type_2_item_type_0 import ( + ChatInputMessagesItemContentType2ItemType0, ) -from .chat_input_data_messages_item_content_type_2_item_type_0_type import ( - ChatInputDataMessagesItemContentType2ItemType0Type, +from .chat_input_messages_item_content_type_2_item_type_0_type import ( + ChatInputMessagesItemContentType2ItemType0Type, ) -from .chat_input_data_messages_item_content_type_2_item_type_1 import ( - ChatInputDataMessagesItemContentType2ItemType1, +from .chat_input_messages_item_content_type_2_item_type_1 import ( + ChatInputMessagesItemContentType2ItemType1, ) -from .chat_input_data_messages_item_content_type_2_item_type_1_image_url import ( - ChatInputDataMessagesItemContentType2ItemType1ImageUrl, +from .chat_input_messages_item_content_type_2_item_type_1_image_url import ( + ChatInputMessagesItemContentType2ItemType1ImageUrl, ) -from .chat_input_data_messages_item_content_type_2_item_type_1_type import ( - ChatInputDataMessagesItemContentType2ItemType1Type, +from .chat_input_messages_item_content_type_2_item_type_1_type import ( + ChatInputMessagesItemContentType2ItemType1Type, ) -from .chat_input_data_tool_choice_type_0 import ChatInputDataToolChoiceType0 -from .chat_log_prob_response import ChatLogProbResponse -from .chat_open_ai_settings import ChatOpenAISettings -from .chosen_tool_call import ChosenToolCall +from .chat_input_tool_choice_type_0 import ChatInputToolChoiceType0 +from .chat_output_chunk import ChatOutputChunk +from .chat_output_chunk_delta import ChatOutputChunkDelta +from .chat_output_chunk_delta_content_type_2_item_type_0 import ( + ChatOutputChunkDeltaContentType2ItemType0, +) +from .chat_output_chunk_delta_content_type_2_item_type_0_type import ( + ChatOutputChunkDeltaContentType2ItemType0Type, +) +from .chat_output_chunk_delta_content_type_2_item_type_1 import ( + ChatOutputChunkDeltaContentType2ItemType1, +) +from .chat_output_chunk_delta_content_type_2_item_type_1_image_url import ( + ChatOutputChunkDeltaContentType2ItemType1ImageUrl, +) +from .chat_output_chunk_delta_content_type_2_item_type_1_type import ( + ChatOutputChunkDeltaContentType2ItemType1Type, +) +from .chat_settings import ChatSettings +from .chat_settings_logit_bias import ChatSettingsLogitBias +from .chat_token_log_prob import ChatTokenLogProb +from .chosen_api_call import ChosenApiCall +from .chosen_api_call_type import ChosenApiCallType +from .chosen_function_call import ChosenFunctionCall +from .chosen_function_call_type import ChosenFunctionCallType +from .chosen_integration_call import ChosenIntegrationCall +from .chosen_integration_call_type import ChosenIntegrationCallType +from .chosen_system_call import ChosenSystemCall +from .chosen_system_call_type import ChosenSystemCallType +from .chunk_chat_response import ChunkChatResponse +from .competion_usage import CompetionUsage from .context_overflow_type import ContextOverflowType from .create_agent_request import CreateAgentRequest from .create_agent_request_metadata import CreateAgentRequestMetadata @@ -88,9 +113,9 @@ CreateTaskRequestMainItemType17Kind, ) from .create_task_request_metadata import CreateTaskRequestMetadata -from .create_tool_request import CreateToolRequest from .create_user_request import CreateUserRequest from .create_user_request_metadata import CreateUserRequestMetadata +from .default_chat_settings import DefaultChatSettings from .doc import Doc from .doc_metadata import DocMetadata from .doc_owner import DocOwner @@ -114,6 +139,7 @@ from .execution_input import ExecutionInput from .execution_metadata import ExecutionMetadata from .execution_status import ExecutionStatus +from .finish_reason import FinishReason from .foreach_do import ForeachDo from .foreach_step import ForeachStep from .foreach_step_kind import ForeachStepKind @@ -122,12 +148,16 @@ from .function_def_parameters import FunctionDefParameters from .get_step import GetStep from .get_step_kind import GetStepKind +from .hybrid_doc_search_request import HybridDocSearchRequest +from .hybrid_doc_search_request_lang import HybridDocSearchRequestLang from .if_else_workflow_step import IfElseWorkflowStep from .if_else_workflow_step_kind import IfElseWorkflowStepKind from .job_state import JobState from .job_status import JobStatus +from .log_prob_response import LogProbResponse from .log_step import LogStep from .log_step_kind import LogStepKind +from .message_chat_response import MessageChatResponse from .named_api_call_choice import NamedApiCallChoice from .named_function_choice import NamedFunctionChoice from .named_integration_choice import NamedIntegrationChoice @@ -214,10 +244,13 @@ from .task_token_resume_execution_request_status import ( TaskTokenResumeExecutionRequestStatus, ) +from .task_tool import TaskTool from .tasks_create_or_update_route_create_or_update_accept import ( TasksCreateOrUpdateRouteCreateOrUpdateAccept, ) from .tasks_route_create_accept import TasksRouteCreateAccept +from .text_only_doc_search_request import TextOnlyDocSearchRequest +from .text_only_doc_search_request_lang import TextOnlyDocSearchRequestLang from .tool import Tool from .tool_call_step import ToolCallStep from .tool_call_step_arguments_type_0 import ToolCallStepArgumentsType0 @@ -226,9 +259,12 @@ from .tool_response import ToolResponse from .tool_response_output import ToolResponseOutput from .tool_type import ToolType +from .transition import Transition from .transition_event import TransitionEvent from .transition_event_type import TransitionEventType +from .transition_metadata import TransitionMetadata from .transition_target import TransitionTarget +from .transition_type import TransitionType from .update_agent_request import UpdateAgentRequest from .update_agent_request_metadata import UpdateAgentRequestMetadata from .update_execution_request import UpdateExecutionRequest @@ -257,6 +293,8 @@ from .user_docs_route_list_sort_by import UserDocsRouteListSortBy from .user_docs_search_route_search_body import UserDocsSearchRouteSearchBody from .user_metadata import UserMetadata +from .vector_doc_search_request import VectorDocSearchRequest +from .vector_doc_search_request_lang import VectorDocSearchRequestLang from .wait_for_input_info import WaitForInputInfo from .wait_for_input_info_info import WaitForInputInfoInfo from .wait_for_input_step import WaitForInputStep @@ -278,34 +316,49 @@ "BaseAgent", "BaseAgentMetadata", "BaseChatOutput", - "BaseChatResponse", - "BaseDocSearchRequest", - "BaseDocSearchRequestLang", "BaseEntryContentType0ItemType0", "BaseEntryContentType0ItemType0Type", "BaseEntryContentType0ItemType1", "BaseEntryContentType0ItemType1ImageUrl", "BaseEntryContentType0ItemType1Type", - "BaseEntryContentType5ItemType0ItemType0", - "BaseEntryContentType5ItemType0ItemType0Type", - "BaseEntryContentType5ItemType0ItemType1", - "BaseEntryContentType5ItemType0ItemType1ImageUrl", - "BaseEntryContentType5ItemType0ItemType1Type", + "BaseEntryContentType8ItemType0ItemType0", + "BaseEntryContentType8ItemType0ItemType0Type", + "BaseEntryContentType8ItemType0ItemType1", + "BaseEntryContentType8ItemType0ItemType1ImageUrl", + "BaseEntryContentType8ItemType0ItemType1Type", "BaseEntrySource", "BaseTokenLogProb", "CaseThen", "CaseThenCaseType1", - "ChatCompetionUsage", - "ChatFinishReason", - "ChatInputDataMessagesItemContentType2ItemType0", - "ChatInputDataMessagesItemContentType2ItemType0Type", - "ChatInputDataMessagesItemContentType2ItemType1", - "ChatInputDataMessagesItemContentType2ItemType1ImageUrl", - "ChatInputDataMessagesItemContentType2ItemType1Type", - "ChatInputDataToolChoiceType0", - "ChatLogProbResponse", - "ChatOpenAISettings", - "ChosenToolCall", + "ChatInput", + "ChatInputLogitBias", + "ChatInputMessagesItem", + "ChatInputMessagesItemContentType2ItemType0", + "ChatInputMessagesItemContentType2ItemType0Type", + "ChatInputMessagesItemContentType2ItemType1", + "ChatInputMessagesItemContentType2ItemType1ImageUrl", + "ChatInputMessagesItemContentType2ItemType1Type", + "ChatInputToolChoiceType0", + "ChatOutputChunk", + "ChatOutputChunkDelta", + "ChatOutputChunkDeltaContentType2ItemType0", + "ChatOutputChunkDeltaContentType2ItemType0Type", + "ChatOutputChunkDeltaContentType2ItemType1", + "ChatOutputChunkDeltaContentType2ItemType1ImageUrl", + "ChatOutputChunkDeltaContentType2ItemType1Type", + "ChatSettings", + "ChatSettingsLogitBias", + "ChatTokenLogProb", + "ChosenApiCall", + "ChosenApiCallType", + "ChosenFunctionCall", + "ChosenFunctionCallType", + "ChosenIntegrationCall", + "ChosenIntegrationCallType", + "ChosenSystemCall", + "ChosenSystemCallType", + "ChunkChatResponse", + "CompetionUsage", "ContextOverflowType", "CreateAgentRequest", "CreateAgentRequestMetadata", @@ -323,9 +376,9 @@ "CreateTaskRequestMainItemType17", "CreateTaskRequestMainItemType17Kind", "CreateTaskRequestMetadata", - "CreateToolRequest", "CreateUserRequest", "CreateUserRequestMetadata", + "DefaultChatSettings", "Doc", "DocMetadata", "DocOwner", @@ -349,6 +402,7 @@ "ExecutionInput", "ExecutionMetadata", "ExecutionStatus", + "FinishReason", "ForeachDo", "ForeachStep", "ForeachStepKind", @@ -357,12 +411,16 @@ "FunctionDefParameters", "GetStep", "GetStepKind", + "HybridDocSearchRequest", + "HybridDocSearchRequestLang", "IfElseWorkflowStep", "IfElseWorkflowStepKind", "JobState", "JobStatus", + "LogProbResponse", "LogStep", "LogStepKind", + "MessageChatResponse", "NamedApiCallChoice", "NamedFunctionChoice", "NamedIntegrationChoice", @@ -431,6 +489,9 @@ "TaskTokenResumeExecutionRequest", "TaskTokenResumeExecutionRequestInput", "TaskTokenResumeExecutionRequestStatus", + "TaskTool", + "TextOnlyDocSearchRequest", + "TextOnlyDocSearchRequestLang", "Tool", "ToolCallStep", "ToolCallStepArgumentsType0", @@ -439,9 +500,12 @@ "ToolResponse", "ToolResponseOutput", "ToolType", + "Transition", "TransitionEvent", "TransitionEventType", + "TransitionMetadata", "TransitionTarget", + "TransitionType", "UpdateAgentRequest", "UpdateAgentRequestMetadata", "UpdateExecutionRequest", @@ -464,6 +528,8 @@ "UserDocsRouteListSortBy", "UserDocsSearchRouteSearchBody", "UserMetadata", + "VectorDocSearchRequest", + "VectorDocSearchRequestLang", "WaitForInputInfo", "WaitForInputInfoInfo", "WaitForInputStep", diff --git a/sdks/python/julep/sdk/models/agent.py b/sdks/python/julep/sdk/models/agent.py index 1ea0c1fb7..1dddcb814 100644 --- a/sdks/python/julep/sdk/models/agent.py +++ b/sdks/python/julep/sdk/models/agent.py @@ -22,7 +22,7 @@ if TYPE_CHECKING: from ..models.agent_metadata import AgentMetadata - from ..models.chat_open_ai_settings import ChatOpenAISettings + from ..models.default_chat_settings import DefaultChatSettings T = TypeVar("T", bound="Agent") @@ -42,7 +42,8 @@ class Agent: created_at (datetime.datetime): When this resource was created as UTC date-time updated_at (datetime.datetime): When this resource was updated as UTC date-time metadata (Union[Unset, AgentMetadata]): - default_settings (Union[Unset, ChatOpenAISettings]): + default_settings (Union[Unset, DefaultChatSettings]): Default settings for the chat session (also used by the + agent) """ id: str @@ -53,12 +54,12 @@ class Agent: model: str = "" instructions: Union[List[str], str] = "[]" metadata: Union[Unset, "AgentMetadata"] = UNSET - default_settings: Union[Unset, "ChatOpenAISettings"] = UNSET + default_settings: Union[Unset, "DefaultChatSettings"] = UNSET additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: from ..models.agent_metadata import AgentMetadata - from ..models.chat_open_ai_settings import ChatOpenAISettings + from ..models.default_chat_settings import DefaultChatSettings name = self.name @@ -110,7 +111,7 @@ def to_dict(self) -> Dict[str, Any]: @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.agent_metadata import AgentMetadata - from ..models.chat_open_ai_settings import ChatOpenAISettings + from ..models.default_chat_settings import DefaultChatSettings d = src_dict.copy() name = d.pop("name") @@ -146,11 +147,11 @@ def _parse_instructions(data: object) -> Union[List[str], str]: metadata = AgentMetadata.from_dict(_metadata) _default_settings = d.pop("default_settings", UNSET) - default_settings: Union[Unset, ChatOpenAISettings] + default_settings: Union[Unset, DefaultChatSettings] if isinstance(_default_settings, Unset): default_settings = UNSET else: - default_settings = ChatOpenAISettings.from_dict(_default_settings) + default_settings = DefaultChatSettings.from_dict(_default_settings) agent = cls( name=name, diff --git a/sdks/python/julep/sdk/models/base_agent.py b/sdks/python/julep/sdk/models/base_agent.py index f3298dace..ba805f871 100644 --- a/sdks/python/julep/sdk/models/base_agent.py +++ b/sdks/python/julep/sdk/models/base_agent.py @@ -20,7 +20,7 @@ if TYPE_CHECKING: from ..models.base_agent_metadata import BaseAgentMetadata - from ..models.chat_open_ai_settings import ChatOpenAISettings + from ..models.default_chat_settings import DefaultChatSettings T = TypeVar("T", bound="BaseAgent") @@ -37,7 +37,8 @@ class BaseAgent: model (str): Model name to use (gpt-4-turbo, gemini-nano etc) Default: ''. instructions (Union[List[str], str]): Instructions for the agent Default: '[]'. metadata (Union[Unset, BaseAgentMetadata]): - default_settings (Union[Unset, ChatOpenAISettings]): + default_settings (Union[Unset, DefaultChatSettings]): Default settings for the chat session (also used by the + agent) """ name: str = "" @@ -45,12 +46,12 @@ class BaseAgent: model: str = "" instructions: Union[List[str], str] = "[]" metadata: Union[Unset, "BaseAgentMetadata"] = UNSET - default_settings: Union[Unset, "ChatOpenAISettings"] = UNSET + default_settings: Union[Unset, "DefaultChatSettings"] = UNSET additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: from ..models.base_agent_metadata import BaseAgentMetadata - from ..models.chat_open_ai_settings import ChatOpenAISettings + from ..models.default_chat_settings import DefaultChatSettings name = self.name @@ -93,7 +94,7 @@ def to_dict(self) -> Dict[str, Any]: @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.base_agent_metadata import BaseAgentMetadata - from ..models.chat_open_ai_settings import ChatOpenAISettings + from ..models.default_chat_settings import DefaultChatSettings d = src_dict.copy() name = d.pop("name") @@ -123,11 +124,11 @@ def _parse_instructions(data: object) -> Union[List[str], str]: metadata = BaseAgentMetadata.from_dict(_metadata) _default_settings = d.pop("default_settings", UNSET) - default_settings: Union[Unset, ChatOpenAISettings] + default_settings: Union[Unset, DefaultChatSettings] if isinstance(_default_settings, Unset): default_settings = UNSET else: - default_settings = ChatOpenAISettings.from_dict(_default_settings) + default_settings = DefaultChatSettings.from_dict(_default_settings) base_agent = cls( name=name, diff --git a/sdks/python/julep/sdk/models/base_chat_output.py b/sdks/python/julep/sdk/models/base_chat_output.py index ccac75987..773818c13 100644 --- a/sdks/python/julep/sdk/models/base_chat_output.py +++ b/sdks/python/julep/sdk/models/base_chat_output.py @@ -16,11 +16,11 @@ from attrs import define as _attrs_define from attrs import field as _attrs_field -from ..models.chat_finish_reason import ChatFinishReason +from ..models.finish_reason import FinishReason from ..types import UNSET, Unset if TYPE_CHECKING: - from ..models.chat_log_prob_response import ChatLogProbResponse + from ..models.log_prob_response import LogProbResponse T = TypeVar("T", bound="BaseChatOutput") @@ -31,21 +31,21 @@ class BaseChatOutput: """ Attributes: index (int): - finish_reason (ChatFinishReason): The reason the model stopped generating tokens. This will be `stop` + finish_reason (FinishReason): The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag - from our content filters, `tool_calls` if the model called a tool. Default: ChatFinishReason.STOP. - logprobs (Union[Unset, ChatLogProbResponse]): + from our content filters, `tool_calls` if the model called a tool. Default: FinishReason.STOP. + logprobs (Union[Unset, LogProbResponse]): """ index: int - finish_reason: ChatFinishReason = ChatFinishReason.STOP - logprobs: Union[Unset, "ChatLogProbResponse"] = UNSET + finish_reason: FinishReason = FinishReason.STOP + logprobs: Union[Unset, "LogProbResponse"] = UNSET additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - from ..models.chat_log_prob_response import ChatLogProbResponse + from ..models.log_prob_response import LogProbResponse index = self.index @@ -70,19 +70,19 @@ def to_dict(self) -> Dict[str, Any]: @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: - from ..models.chat_log_prob_response import ChatLogProbResponse + from ..models.log_prob_response import LogProbResponse d = src_dict.copy() index = d.pop("index") - finish_reason = ChatFinishReason(d.pop("finish_reason")) + finish_reason = FinishReason(d.pop("finish_reason")) _logprobs = d.pop("logprobs", UNSET) - logprobs: Union[Unset, ChatLogProbResponse] + logprobs: Union[Unset, LogProbResponse] if isinstance(_logprobs, Unset): logprobs = UNSET else: - logprobs = ChatLogProbResponse.from_dict(_logprobs) + logprobs = LogProbResponse.from_dict(_logprobs) base_chat_output = cls( index=index, diff --git a/sdks/python/julep/sdk/models/base_entry_content_type_5_item_type_0_item_type_0.py b/sdks/python/julep/sdk/models/base_entry_content_type_8_item_type_0_item_type_0.py similarity index 68% rename from sdks/python/julep/sdk/models/base_entry_content_type_5_item_type_0_item_type_0.py rename to sdks/python/julep/sdk/models/base_entry_content_type_8_item_type_0_item_type_0.py index f1587a813..830d9b616 100644 --- a/sdks/python/julep/sdk/models/base_entry_content_type_5_item_type_0_item_type_0.py +++ b/sdks/python/julep/sdk/models/base_entry_content_type_8_item_type_0_item_type_0.py @@ -14,26 +14,26 @@ from attrs import define as _attrs_define from attrs import field as _attrs_field -from ..models.base_entry_content_type_5_item_type_0_item_type_0_type import ( - BaseEntryContentType5ItemType0ItemType0Type, +from ..models.base_entry_content_type_8_item_type_0_item_type_0_type import ( + BaseEntryContentType8ItemType0ItemType0Type, ) from ..types import UNSET, Unset -T = TypeVar("T", bound="BaseEntryContentType5ItemType0ItemType0") +T = TypeVar("T", bound="BaseEntryContentType8ItemType0ItemType0") @_attrs_define -class BaseEntryContentType5ItemType0ItemType0: +class BaseEntryContentType8ItemType0ItemType0: """ Attributes: text (str): - type (BaseEntryContentType5ItemType0ItemType0Type): The type (fixed to 'text') Default: - BaseEntryContentType5ItemType0ItemType0Type.TEXT. + type (BaseEntryContentType8ItemType0ItemType0Type): The type (fixed to 'text') Default: + BaseEntryContentType8ItemType0ItemType0Type.TEXT. """ text: str - type: BaseEntryContentType5ItemType0ItemType0Type = ( - BaseEntryContentType5ItemType0ItemType0Type.TEXT + type: BaseEntryContentType8ItemType0ItemType0Type = ( + BaseEntryContentType8ItemType0ItemType0Type.TEXT ) additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) @@ -58,15 +58,15 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() text = d.pop("text") - type = BaseEntryContentType5ItemType0ItemType0Type(d.pop("type")) + type = BaseEntryContentType8ItemType0ItemType0Type(d.pop("type")) - base_entry_content_type_5_item_type_0_item_type_0 = cls( + base_entry_content_type_8_item_type_0_item_type_0 = cls( text=text, type=type, ) - base_entry_content_type_5_item_type_0_item_type_0.additional_properties = d - return base_entry_content_type_5_item_type_0_item_type_0 + base_entry_content_type_8_item_type_0_item_type_0.additional_properties = d + return base_entry_content_type_8_item_type_0_item_type_0 @property def additional_keys(self) -> List[str]: diff --git a/sdks/python/julep/sdk/models/base_entry_content_type_5_item_type_0_item_type_0_type.py b/sdks/python/julep/sdk/models/base_entry_content_type_8_item_type_0_item_type_0_type.py similarity index 62% rename from sdks/python/julep/sdk/models/base_entry_content_type_5_item_type_0_item_type_0_type.py rename to sdks/python/julep/sdk/models/base_entry_content_type_8_item_type_0_item_type_0_type.py index c09f08376..3cd19dcff 100644 --- a/sdks/python/julep/sdk/models/base_entry_content_type_5_item_type_0_item_type_0_type.py +++ b/sdks/python/julep/sdk/models/base_entry_content_type_8_item_type_0_item_type_0_type.py @@ -1,7 +1,7 @@ from enum import Enum -class BaseEntryContentType5ItemType0ItemType0Type(str, Enum): +class BaseEntryContentType8ItemType0ItemType0Type(str, Enum): TEXT = "text" def __str__(self) -> str: diff --git a/sdks/python/julep/sdk/models/base_entry_content_type_5_item_type_0_item_type_1.py b/sdks/python/julep/sdk/models/base_entry_content_type_8_item_type_0_item_type_1.py similarity index 57% rename from sdks/python/julep/sdk/models/base_entry_content_type_5_item_type_0_item_type_1.py rename to sdks/python/julep/sdk/models/base_entry_content_type_8_item_type_0_item_type_1.py index fac5b3c69..8ef804382 100644 --- a/sdks/python/julep/sdk/models/base_entry_content_type_5_item_type_0_item_type_1.py +++ b/sdks/python/julep/sdk/models/base_entry_content_type_8_item_type_0_item_type_1.py @@ -15,38 +15,38 @@ from attrs import define as _attrs_define from attrs import field as _attrs_field -from ..models.base_entry_content_type_5_item_type_0_item_type_1_type import ( - BaseEntryContentType5ItemType0ItemType1Type, +from ..models.base_entry_content_type_8_item_type_0_item_type_1_type import ( + BaseEntryContentType8ItemType0ItemType1Type, ) from ..types import UNSET, Unset if TYPE_CHECKING: - from ..models.base_entry_content_type_5_item_type_0_item_type_1_image_url import ( - BaseEntryContentType5ItemType0ItemType1ImageUrl, + from ..models.base_entry_content_type_8_item_type_0_item_type_1_image_url import ( + BaseEntryContentType8ItemType0ItemType1ImageUrl, ) -T = TypeVar("T", bound="BaseEntryContentType5ItemType0ItemType1") +T = TypeVar("T", bound="BaseEntryContentType8ItemType0ItemType1") @_attrs_define -class BaseEntryContentType5ItemType0ItemType1: +class BaseEntryContentType8ItemType0ItemType1: """ Attributes: - image_url (BaseEntryContentType5ItemType0ItemType1ImageUrl): The image URL - type (BaseEntryContentType5ItemType0ItemType1Type): The type (fixed to 'image_url') Default: - BaseEntryContentType5ItemType0ItemType1Type.IMAGE_URL. + image_url (BaseEntryContentType8ItemType0ItemType1ImageUrl): The image URL + type (BaseEntryContentType8ItemType0ItemType1Type): The type (fixed to 'image_url') Default: + BaseEntryContentType8ItemType0ItemType1Type.IMAGE_URL. """ - image_url: "BaseEntryContentType5ItemType0ItemType1ImageUrl" - type: BaseEntryContentType5ItemType0ItemType1Type = ( - BaseEntryContentType5ItemType0ItemType1Type.IMAGE_URL + image_url: "BaseEntryContentType8ItemType0ItemType1ImageUrl" + type: BaseEntryContentType8ItemType0ItemType1Type = ( + BaseEntryContentType8ItemType0ItemType1Type.IMAGE_URL ) additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - from ..models.base_entry_content_type_5_item_type_0_item_type_1_image_url import ( - BaseEntryContentType5ItemType0ItemType1ImageUrl, + from ..models.base_entry_content_type_8_item_type_0_item_type_1_image_url import ( + BaseEntryContentType8ItemType0ItemType1ImageUrl, ) image_url = self.image_url.to_dict() @@ -66,24 +66,24 @@ def to_dict(self) -> Dict[str, Any]: @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: - from ..models.base_entry_content_type_5_item_type_0_item_type_1_image_url import ( - BaseEntryContentType5ItemType0ItemType1ImageUrl, + from ..models.base_entry_content_type_8_item_type_0_item_type_1_image_url import ( + BaseEntryContentType8ItemType0ItemType1ImageUrl, ) d = src_dict.copy() - image_url = BaseEntryContentType5ItemType0ItemType1ImageUrl.from_dict( + image_url = BaseEntryContentType8ItemType0ItemType1ImageUrl.from_dict( d.pop("image_url") ) - type = BaseEntryContentType5ItemType0ItemType1Type(d.pop("type")) + type = BaseEntryContentType8ItemType0ItemType1Type(d.pop("type")) - base_entry_content_type_5_item_type_0_item_type_1 = cls( + base_entry_content_type_8_item_type_0_item_type_1 = cls( image_url=image_url, type=type, ) - base_entry_content_type_5_item_type_0_item_type_1.additional_properties = d - return base_entry_content_type_5_item_type_0_item_type_1 + base_entry_content_type_8_item_type_0_item_type_1.additional_properties = d + return base_entry_content_type_8_item_type_0_item_type_1 @property def additional_keys(self) -> List[str]: diff --git a/sdks/python/julep/sdk/models/base_entry_content_type_5_item_type_0_item_type_1_image_url.py b/sdks/python/julep/sdk/models/base_entry_content_type_8_item_type_0_item_type_1_image_url.py similarity index 85% rename from sdks/python/julep/sdk/models/base_entry_content_type_5_item_type_0_item_type_1_image_url.py rename to sdks/python/julep/sdk/models/base_entry_content_type_8_item_type_0_item_type_1_image_url.py index 8a27b3abb..d93890a16 100644 --- a/sdks/python/julep/sdk/models/base_entry_content_type_5_item_type_0_item_type_1_image_url.py +++ b/sdks/python/julep/sdk/models/base_entry_content_type_8_item_type_0_item_type_1_image_url.py @@ -17,11 +17,11 @@ from ..models.entries_image_detail import EntriesImageDetail from ..types import UNSET, Unset -T = TypeVar("T", bound="BaseEntryContentType5ItemType0ItemType1ImageUrl") +T = TypeVar("T", bound="BaseEntryContentType8ItemType0ItemType1ImageUrl") @_attrs_define -class BaseEntryContentType5ItemType0ItemType1ImageUrl: +class BaseEntryContentType8ItemType0ItemType1ImageUrl: """The image URL Attributes: @@ -56,13 +56,13 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: detail = EntriesImageDetail(d.pop("detail")) - base_entry_content_type_5_item_type_0_item_type_1_image_url = cls( + base_entry_content_type_8_item_type_0_item_type_1_image_url = cls( url=url, detail=detail, ) - base_entry_content_type_5_item_type_0_item_type_1_image_url.additional_properties = d - return base_entry_content_type_5_item_type_0_item_type_1_image_url + base_entry_content_type_8_item_type_0_item_type_1_image_url.additional_properties = d + return base_entry_content_type_8_item_type_0_item_type_1_image_url @property def additional_keys(self) -> List[str]: diff --git a/sdks/python/julep/sdk/models/base_entry_content_type_5_item_type_0_item_type_1_type.py b/sdks/python/julep/sdk/models/base_entry_content_type_8_item_type_0_item_type_1_type.py similarity index 64% rename from sdks/python/julep/sdk/models/base_entry_content_type_5_item_type_0_item_type_1_type.py rename to sdks/python/julep/sdk/models/base_entry_content_type_8_item_type_0_item_type_1_type.py index 2e8bd64ac..c46f23dc0 100644 --- a/sdks/python/julep/sdk/models/base_entry_content_type_5_item_type_0_item_type_1_type.py +++ b/sdks/python/julep/sdk/models/base_entry_content_type_8_item_type_0_item_type_1_type.py @@ -1,7 +1,7 @@ from enum import Enum -class BaseEntryContentType5ItemType0ItemType1Type(str, Enum): +class BaseEntryContentType8ItemType0ItemType1Type(str, Enum): IMAGE_URL = "image_url" def __str__(self) -> str: diff --git a/sdks/python/julep/sdk/models/chat_input.py b/sdks/python/julep/sdk/models/chat_input.py new file mode 100644 index 000000000..7a1b6af54 --- /dev/null +++ b/sdks/python/julep/sdk/models/chat_input.py @@ -0,0 +1,432 @@ +from typing import ( + TYPE_CHECKING, + Any, + BinaryIO, + Dict, + List, + Optional, + TextIO, + Tuple, + Type, + TypeVar, + Union, + cast, +) + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +from ..models.chat_input_tool_choice_type_0 import ChatInputToolChoiceType0 +from ..types import UNSET, Unset + +if TYPE_CHECKING: + from ..models.chat_input_logit_bias import ChatInputLogitBias + from ..models.chat_input_messages_item import ChatInputMessagesItem + from ..models.named_api_call_choice import NamedApiCallChoice + from ..models.named_function_choice import NamedFunctionChoice + from ..models.named_integration_choice import NamedIntegrationChoice + from ..models.named_system_choice import NamedSystemChoice + from ..models.schema_completion_response_format import ( + SchemaCompletionResponseFormat, + ) + from ..models.simple_completion_response_format import ( + SimpleCompletionResponseFormat, + ) + from ..models.tool import Tool + + +T = TypeVar("T", bound="ChatInput") + + +@_attrs_define +class ChatInput: + """ + Attributes: + messages (List['ChatInputMessagesItem']): A list of new input messages comprising the conversation so far. + tools (List['Tool']): (Advanced) List of tools that are provided in addition to agent's default set of tools. + remember (bool): DISABLED: Whether this interaction should form new memories or not (will be enabled in a future + release) Default: False. + recall (bool): Whether previous memories and docs should be recalled or not Default: True. + save (bool): Whether this interaction should be stored in the session history or not Default: True. + stream (bool): Indicates if the server should stream the response as it's generated Default: False. + stop (List[str]): Up to 4 sequences where the API will stop generating further tokens. + tool_choice (Union['NamedApiCallChoice', 'NamedFunctionChoice', 'NamedIntegrationChoice', 'NamedSystemChoice', + ChatInputToolChoiceType0, Unset]): Can be one of existing tools given to the agent earlier or the ones provided + in this request. + frequency_penalty (Union[Unset, float]): Number between -2.0 and 2.0. Positive values penalize new tokens based + on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line + verbatim. + presence_penalty (Union[Unset, float]): Number between -2.0 and 2.0. Positive values penalize new tokens based + on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line + verbatim. + temperature (Union[Unset, float]): What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + top_p (Union[Unset, float]): Defaults to 1 An alternative to sampling with temperature, called nucleus sampling, + where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but + not both. + repetition_penalty (Union[Unset, float]): Number between 0 and 2.0. 1.0 is neutral and values larger than that + penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to + repeat the same line verbatim. + length_penalty (Union[Unset, float]): Number between 0 and 2.0. 1.0 is neutral and values larger than that + penalize number of tokens generated. + min_p (Union[Unset, float]): Minimum probability compared to leading token to be considered + model (Union[Unset, str]): For Unicode character safety + See: https://unicode.org/reports/tr31/ + See: https://www.unicode.org/reports/tr39/#Identifier_Characters + seed (Union[Unset, int]): If specified, the system will make a best effort to sample deterministically for that + particular seed value + max_tokens (Union[Unset, int]): The maximum number of tokens to generate in the chat completion + logit_bias (Union[Unset, ChatInputLogitBias]): Modify the likelihood of specified tokens appearing in the + completion + response_format (Union['SchemaCompletionResponseFormat', 'SimpleCompletionResponseFormat', Unset]): Response + format (set to `json_object` to restrict output to JSON) + agent (Union[Unset, str]): + """ + + messages: List["ChatInputMessagesItem"] + tools: List["Tool"] + stop: List[str] + remember: bool = False + recall: bool = True + save: bool = True + stream: bool = False + tool_choice: Union[ + "NamedApiCallChoice", + "NamedFunctionChoice", + "NamedIntegrationChoice", + "NamedSystemChoice", + ChatInputToolChoiceType0, + Unset, + ] = UNSET + frequency_penalty: Union[Unset, float] = UNSET + presence_penalty: Union[Unset, float] = UNSET + temperature: Union[Unset, float] = UNSET + top_p: Union[Unset, float] = UNSET + repetition_penalty: Union[Unset, float] = UNSET + length_penalty: Union[Unset, float] = UNSET + min_p: Union[Unset, float] = UNSET + model: Union[Unset, str] = UNSET + seed: Union[Unset, int] = UNSET + max_tokens: Union[Unset, int] = UNSET + logit_bias: Union[Unset, "ChatInputLogitBias"] = UNSET + response_format: Union[ + "SchemaCompletionResponseFormat", "SimpleCompletionResponseFormat", Unset + ] = UNSET + agent: Union[Unset, str] = UNSET + additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> Dict[str, Any]: + from ..models.chat_input_logit_bias import ChatInputLogitBias + from ..models.chat_input_messages_item import ChatInputMessagesItem + from ..models.named_api_call_choice import NamedApiCallChoice + from ..models.named_function_choice import NamedFunctionChoice + from ..models.named_integration_choice import NamedIntegrationChoice + from ..models.named_system_choice import NamedSystemChoice + from ..models.schema_completion_response_format import ( + SchemaCompletionResponseFormat, + ) + from ..models.simple_completion_response_format import ( + SimpleCompletionResponseFormat, + ) + from ..models.tool import Tool + + messages = [] + for messages_item_data in self.messages: + messages_item = messages_item_data.to_dict() + messages.append(messages_item) + + tools = [] + for tools_item_data in self.tools: + tools_item = tools_item_data.to_dict() + tools.append(tools_item) + + remember = self.remember + + recall = self.recall + + save = self.save + + stream = self.stream + + stop = self.stop + + tool_choice: Union[Dict[str, Any], Unset, str] + if isinstance(self.tool_choice, Unset): + tool_choice = UNSET + elif isinstance(self.tool_choice, ChatInputToolChoiceType0): + tool_choice = self.tool_choice.value + elif isinstance(self.tool_choice, NamedFunctionChoice): + tool_choice = self.tool_choice.to_dict() + elif isinstance(self.tool_choice, NamedIntegrationChoice): + tool_choice = self.tool_choice.to_dict() + elif isinstance(self.tool_choice, NamedSystemChoice): + tool_choice = self.tool_choice.to_dict() + else: + tool_choice = self.tool_choice.to_dict() + + frequency_penalty = self.frequency_penalty + + presence_penalty = self.presence_penalty + + temperature = self.temperature + + top_p = self.top_p + + repetition_penalty = self.repetition_penalty + + length_penalty = self.length_penalty + + min_p = self.min_p + + model = self.model + + seed = self.seed + + max_tokens = self.max_tokens + + logit_bias: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.logit_bias, Unset): + logit_bias = self.logit_bias.to_dict() + + response_format: Union[Dict[str, Any], Unset] + if isinstance(self.response_format, Unset): + response_format = UNSET + elif isinstance(self.response_format, SimpleCompletionResponseFormat): + response_format = self.response_format.to_dict() + else: + response_format = self.response_format.to_dict() + + agent = self.agent + + field_dict: Dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "messages": messages, + "tools": tools, + "remember": remember, + "recall": recall, + "save": save, + "stream": stream, + "stop": stop, + } + ) + if tool_choice is not UNSET: + field_dict["tool_choice"] = tool_choice + if frequency_penalty is not UNSET: + field_dict["frequency_penalty"] = frequency_penalty + if presence_penalty is not UNSET: + field_dict["presence_penalty"] = presence_penalty + if temperature is not UNSET: + field_dict["temperature"] = temperature + if top_p is not UNSET: + field_dict["top_p"] = top_p + if repetition_penalty is not UNSET: + field_dict["repetition_penalty"] = repetition_penalty + if length_penalty is not UNSET: + field_dict["length_penalty"] = length_penalty + if min_p is not UNSET: + field_dict["min_p"] = min_p + if model is not UNSET: + field_dict["model"] = model + if seed is not UNSET: + field_dict["seed"] = seed + if max_tokens is not UNSET: + field_dict["max_tokens"] = max_tokens + if logit_bias is not UNSET: + field_dict["logit_bias"] = logit_bias + if response_format is not UNSET: + field_dict["response_format"] = response_format + if agent is not UNSET: + field_dict["agent"] = agent + + return field_dict + + @classmethod + def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + from ..models.chat_input_logit_bias import ChatInputLogitBias + from ..models.chat_input_messages_item import ChatInputMessagesItem + from ..models.named_api_call_choice import NamedApiCallChoice + from ..models.named_function_choice import NamedFunctionChoice + from ..models.named_integration_choice import NamedIntegrationChoice + from ..models.named_system_choice import NamedSystemChoice + from ..models.schema_completion_response_format import ( + SchemaCompletionResponseFormat, + ) + from ..models.simple_completion_response_format import ( + SimpleCompletionResponseFormat, + ) + from ..models.tool import Tool + + d = src_dict.copy() + messages = [] + _messages = d.pop("messages") + for messages_item_data in _messages: + messages_item = ChatInputMessagesItem.from_dict(messages_item_data) + + messages.append(messages_item) + + tools = [] + _tools = d.pop("tools") + for tools_item_data in _tools: + tools_item = Tool.from_dict(tools_item_data) + + tools.append(tools_item) + + remember = d.pop("remember") + + recall = d.pop("recall") + + save = d.pop("save") + + stream = d.pop("stream") + + stop = cast(List[str], d.pop("stop")) + + def _parse_tool_choice( + data: object, + ) -> Union[ + "NamedApiCallChoice", + "NamedFunctionChoice", + "NamedIntegrationChoice", + "NamedSystemChoice", + ChatInputToolChoiceType0, + Unset, + ]: + if isinstance(data, Unset): + return data + try: + if not isinstance(data, str): + raise TypeError() + tool_choice_type_0 = ChatInputToolChoiceType0(data) + + return tool_choice_type_0 + except: # noqa: E722 + pass + try: + if not isinstance(data, dict): + raise TypeError() + tool_choice_type_1 = NamedFunctionChoice.from_dict(data) + + return tool_choice_type_1 + except: # noqa: E722 + pass + try: + if not isinstance(data, dict): + raise TypeError() + tool_choice_type_2 = NamedIntegrationChoice.from_dict(data) + + return tool_choice_type_2 + except: # noqa: E722 + pass + try: + if not isinstance(data, dict): + raise TypeError() + tool_choice_type_3 = NamedSystemChoice.from_dict(data) + + return tool_choice_type_3 + except: # noqa: E722 + pass + if not isinstance(data, dict): + raise TypeError() + tool_choice_type_4 = NamedApiCallChoice.from_dict(data) + + return tool_choice_type_4 + + tool_choice = _parse_tool_choice(d.pop("tool_choice", UNSET)) + + frequency_penalty = d.pop("frequency_penalty", UNSET) + + presence_penalty = d.pop("presence_penalty", UNSET) + + temperature = d.pop("temperature", UNSET) + + top_p = d.pop("top_p", UNSET) + + repetition_penalty = d.pop("repetition_penalty", UNSET) + + length_penalty = d.pop("length_penalty", UNSET) + + min_p = d.pop("min_p", UNSET) + + model = d.pop("model", UNSET) + + seed = d.pop("seed", UNSET) + + max_tokens = d.pop("max_tokens", UNSET) + + _logit_bias = d.pop("logit_bias", UNSET) + logit_bias: Union[Unset, ChatInputLogitBias] + if isinstance(_logit_bias, Unset): + logit_bias = UNSET + else: + logit_bias = ChatInputLogitBias.from_dict(_logit_bias) + + def _parse_response_format( + data: object, + ) -> Union[ + "SchemaCompletionResponseFormat", "SimpleCompletionResponseFormat", Unset + ]: + if isinstance(data, Unset): + return data + try: + if not isinstance(data, dict): + raise TypeError() + response_format_type_0 = SimpleCompletionResponseFormat.from_dict(data) + + return response_format_type_0 + except: # noqa: E722 + pass + if not isinstance(data, dict): + raise TypeError() + response_format_type_1 = SchemaCompletionResponseFormat.from_dict(data) + + return response_format_type_1 + + response_format = _parse_response_format(d.pop("response_format", UNSET)) + + agent = d.pop("agent", UNSET) + + chat_input = cls( + messages=messages, + tools=tools, + remember=remember, + recall=recall, + save=save, + stream=stream, + stop=stop, + tool_choice=tool_choice, + frequency_penalty=frequency_penalty, + presence_penalty=presence_penalty, + temperature=temperature, + top_p=top_p, + repetition_penalty=repetition_penalty, + length_penalty=length_penalty, + min_p=min_p, + model=model, + seed=seed, + max_tokens=max_tokens, + logit_bias=logit_bias, + response_format=response_format, + agent=agent, + ) + + chat_input.additional_properties = d + return chat_input + + @property + def additional_keys(self) -> List[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/sdks/python/julep/sdk/models/chat_input_data_messages_item_content_type_2_item_type_1.py b/sdks/python/julep/sdk/models/chat_input_data_messages_item_content_type_2_item_type_1.py deleted file mode 100644 index 0eba8e7c9..000000000 --- a/sdks/python/julep/sdk/models/chat_input_data_messages_item_content_type_2_item_type_1.py +++ /dev/null @@ -1,102 +0,0 @@ -from typing import ( - TYPE_CHECKING, - Any, - BinaryIO, - Dict, - List, - Optional, - TextIO, - Tuple, - Type, - TypeVar, - cast, -) - -from attrs import define as _attrs_define -from attrs import field as _attrs_field - -from ..models.chat_input_data_messages_item_content_type_2_item_type_1_type import ( - ChatInputDataMessagesItemContentType2ItemType1Type, -) -from ..types import UNSET, Unset - -if TYPE_CHECKING: - from ..models.chat_input_data_messages_item_content_type_2_item_type_1_image_url import ( - ChatInputDataMessagesItemContentType2ItemType1ImageUrl, - ) - - -T = TypeVar("T", bound="ChatInputDataMessagesItemContentType2ItemType1") - - -@_attrs_define -class ChatInputDataMessagesItemContentType2ItemType1: - """ - Attributes: - image_url (ChatInputDataMessagesItemContentType2ItemType1ImageUrl): The image URL - type (ChatInputDataMessagesItemContentType2ItemType1Type): The type (fixed to 'image_url') Default: - ChatInputDataMessagesItemContentType2ItemType1Type.IMAGE_URL. - """ - - image_url: "ChatInputDataMessagesItemContentType2ItemType1ImageUrl" - type: ChatInputDataMessagesItemContentType2ItemType1Type = ( - ChatInputDataMessagesItemContentType2ItemType1Type.IMAGE_URL - ) - additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) - - def to_dict(self) -> Dict[str, Any]: - from ..models.chat_input_data_messages_item_content_type_2_item_type_1_image_url import ( - ChatInputDataMessagesItemContentType2ItemType1ImageUrl, - ) - - image_url = self.image_url.to_dict() - - type = self.type.value - - field_dict: Dict[str, Any] = {} - field_dict.update(self.additional_properties) - field_dict.update( - { - "image_url": image_url, - "type": type, - } - ) - - return field_dict - - @classmethod - def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: - from ..models.chat_input_data_messages_item_content_type_2_item_type_1_image_url import ( - ChatInputDataMessagesItemContentType2ItemType1ImageUrl, - ) - - d = src_dict.copy() - image_url = ChatInputDataMessagesItemContentType2ItemType1ImageUrl.from_dict( - d.pop("image_url") - ) - - type = ChatInputDataMessagesItemContentType2ItemType1Type(d.pop("type")) - - chat_input_data_messages_item_content_type_2_item_type_1 = cls( - image_url=image_url, - type=type, - ) - - chat_input_data_messages_item_content_type_2_item_type_1.additional_properties = d - return chat_input_data_messages_item_content_type_2_item_type_1 - - @property - def additional_keys(self) -> List[str]: - return list(self.additional_properties.keys()) - - def __getitem__(self, key: str) -> Any: - return self.additional_properties[key] - - def __setitem__(self, key: str, value: Any) -> None: - self.additional_properties[key] = value - - def __delitem__(self, key: str) -> None: - del self.additional_properties[key] - - def __contains__(self, key: str) -> bool: - return key in self.additional_properties diff --git a/sdks/python/julep/sdk/models/chat_input_logit_bias.py b/sdks/python/julep/sdk/models/chat_input_logit_bias.py new file mode 100644 index 000000000..9f1bb46e2 --- /dev/null +++ b/sdks/python/julep/sdk/models/chat_input_logit_bias.py @@ -0,0 +1,56 @@ +from typing import ( + TYPE_CHECKING, + Any, + BinaryIO, + Dict, + List, + Optional, + TextIO, + Tuple, + Type, + TypeVar, +) + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +from ..types import UNSET, Unset + +T = TypeVar("T", bound="ChatInputLogitBias") + + +@_attrs_define +class ChatInputLogitBias: + """Modify the likelihood of specified tokens appearing in the completion""" + + additional_properties: Dict[str, float] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> Dict[str, Any]: + field_dict: Dict[str, Any] = {} + field_dict.update(self.additional_properties) + + return field_dict + + @classmethod + def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + d = src_dict.copy() + chat_input_logit_bias = cls() + + chat_input_logit_bias.additional_properties = d + return chat_input_logit_bias + + @property + def additional_keys(self) -> List[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> float: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: float) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/sdks/python/julep/sdk/models/chat_input_messages_item.py b/sdks/python/julep/sdk/models/chat_input_messages_item.py new file mode 100644 index 000000000..9ff788635 --- /dev/null +++ b/sdks/python/julep/sdk/models/chat_input_messages_item.py @@ -0,0 +1,227 @@ +from typing import ( + TYPE_CHECKING, + Any, + BinaryIO, + Dict, + List, + Optional, + TextIO, + Tuple, + Type, + TypeVar, + Union, + cast, +) + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +from ..models.entries_chat_ml_role import EntriesChatMLRole +from ..types import UNSET, Unset + +if TYPE_CHECKING: + from ..models.chat_input_messages_item_content_type_2_item_type_0 import ( + ChatInputMessagesItemContentType2ItemType0, + ) + from ..models.chat_input_messages_item_content_type_2_item_type_1 import ( + ChatInputMessagesItemContentType2ItemType1, + ) + + +T = TypeVar("T", bound="ChatInputMessagesItem") + + +@_attrs_define +class ChatInputMessagesItem: + """ + Attributes: + role (EntriesChatMLRole): ChatML role (system|assistant|user|function_call|function|function_response|auto) + content (Union[List[Union['ChatInputMessagesItemContentType2ItemType0', + 'ChatInputMessagesItemContentType2ItemType1']], List[str], str]): The content parts of the message + name (Union[Unset, str]): Name + continue_ (Union[Unset, bool]): Whether to continue this message or return a new one + """ + + role: EntriesChatMLRole + content: Union[ + List[ + Union[ + "ChatInputMessagesItemContentType2ItemType0", + "ChatInputMessagesItemContentType2ItemType1", + ] + ], + List[str], + str, + ] + name: Union[Unset, str] = UNSET + continue_: Union[Unset, bool] = UNSET + additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> Dict[str, Any]: + from ..models.chat_input_messages_item_content_type_2_item_type_0 import ( + ChatInputMessagesItemContentType2ItemType0, + ) + from ..models.chat_input_messages_item_content_type_2_item_type_1 import ( + ChatInputMessagesItemContentType2ItemType1, + ) + + role = self.role.value + + content: Union[List[Dict[str, Any]], List[str], str] + if isinstance(self.content, list): + content = self.content + + elif isinstance(self.content, list): + content = [] + for content_type_2_item_data in self.content: + content_type_2_item: Dict[str, Any] + if isinstance( + content_type_2_item_data, ChatInputMessagesItemContentType2ItemType0 + ): + content_type_2_item = content_type_2_item_data.to_dict() + else: + content_type_2_item = content_type_2_item_data.to_dict() + + content.append(content_type_2_item) + + else: + content = self.content + + name = self.name + + continue_ = self.continue_ + + field_dict: Dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "role": role, + "content": content, + } + ) + if name is not UNSET: + field_dict["name"] = name + if continue_ is not UNSET: + field_dict["continue"] = continue_ + + return field_dict + + @classmethod + def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + from ..models.chat_input_messages_item_content_type_2_item_type_0 import ( + ChatInputMessagesItemContentType2ItemType0, + ) + from ..models.chat_input_messages_item_content_type_2_item_type_1 import ( + ChatInputMessagesItemContentType2ItemType1, + ) + + d = src_dict.copy() + role = EntriesChatMLRole(d.pop("role")) + + def _parse_content( + data: object, + ) -> Union[ + List[ + Union[ + "ChatInputMessagesItemContentType2ItemType0", + "ChatInputMessagesItemContentType2ItemType1", + ] + ], + List[str], + str, + ]: + try: + if not isinstance(data, list): + raise TypeError() + content_type_1 = cast(List[str], data) + + return content_type_1 + except: # noqa: E722 + pass + try: + if not isinstance(data, list): + raise TypeError() + content_type_2 = [] + _content_type_2 = data + for content_type_2_item_data in _content_type_2: + + def _parse_content_type_2_item( + data: object, + ) -> Union[ + "ChatInputMessagesItemContentType2ItemType0", + "ChatInputMessagesItemContentType2ItemType1", + ]: + try: + if not isinstance(data, dict): + raise TypeError() + content_type_2_item_type_0 = ( + ChatInputMessagesItemContentType2ItemType0.from_dict( + data + ) + ) + + return content_type_2_item_type_0 + except: # noqa: E722 + pass + if not isinstance(data, dict): + raise TypeError() + content_type_2_item_type_1 = ( + ChatInputMessagesItemContentType2ItemType1.from_dict(data) + ) + + return content_type_2_item_type_1 + + content_type_2_item = _parse_content_type_2_item( + content_type_2_item_data + ) + + content_type_2.append(content_type_2_item) + + return content_type_2 + except: # noqa: E722 + pass + return cast( + Union[ + List[ + Union[ + "ChatInputMessagesItemContentType2ItemType0", + "ChatInputMessagesItemContentType2ItemType1", + ] + ], + List[str], + str, + ], + data, + ) + + content = _parse_content(d.pop("content")) + + name = d.pop("name", UNSET) + + continue_ = d.pop("continue", UNSET) + + chat_input_messages_item = cls( + role=role, + content=content, + name=name, + continue_=continue_, + ) + + chat_input_messages_item.additional_properties = d + return chat_input_messages_item + + @property + def additional_keys(self) -> List[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/sdks/python/julep/sdk/models/chat_input_data_messages_item_content_type_2_item_type_0.py b/sdks/python/julep/sdk/models/chat_input_messages_item_content_type_2_item_type_0.py similarity index 62% rename from sdks/python/julep/sdk/models/chat_input_data_messages_item_content_type_2_item_type_0.py rename to sdks/python/julep/sdk/models/chat_input_messages_item_content_type_2_item_type_0.py index b6a381191..be8afad42 100644 --- a/sdks/python/julep/sdk/models/chat_input_data_messages_item_content_type_2_item_type_0.py +++ b/sdks/python/julep/sdk/models/chat_input_messages_item_content_type_2_item_type_0.py @@ -14,26 +14,26 @@ from attrs import define as _attrs_define from attrs import field as _attrs_field -from ..models.chat_input_data_messages_item_content_type_2_item_type_0_type import ( - ChatInputDataMessagesItemContentType2ItemType0Type, +from ..models.chat_input_messages_item_content_type_2_item_type_0_type import ( + ChatInputMessagesItemContentType2ItemType0Type, ) from ..types import UNSET, Unset -T = TypeVar("T", bound="ChatInputDataMessagesItemContentType2ItemType0") +T = TypeVar("T", bound="ChatInputMessagesItemContentType2ItemType0") @_attrs_define -class ChatInputDataMessagesItemContentType2ItemType0: +class ChatInputMessagesItemContentType2ItemType0: """ Attributes: text (str): - type (ChatInputDataMessagesItemContentType2ItemType0Type): The type (fixed to 'text') Default: - ChatInputDataMessagesItemContentType2ItemType0Type.TEXT. + type (ChatInputMessagesItemContentType2ItemType0Type): The type (fixed to 'text') Default: + ChatInputMessagesItemContentType2ItemType0Type.TEXT. """ text: str - type: ChatInputDataMessagesItemContentType2ItemType0Type = ( - ChatInputDataMessagesItemContentType2ItemType0Type.TEXT + type: ChatInputMessagesItemContentType2ItemType0Type = ( + ChatInputMessagesItemContentType2ItemType0Type.TEXT ) additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) @@ -58,15 +58,15 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() text = d.pop("text") - type = ChatInputDataMessagesItemContentType2ItemType0Type(d.pop("type")) + type = ChatInputMessagesItemContentType2ItemType0Type(d.pop("type")) - chat_input_data_messages_item_content_type_2_item_type_0 = cls( + chat_input_messages_item_content_type_2_item_type_0 = cls( text=text, type=type, ) - chat_input_data_messages_item_content_type_2_item_type_0.additional_properties = d - return chat_input_data_messages_item_content_type_2_item_type_0 + chat_input_messages_item_content_type_2_item_type_0.additional_properties = d + return chat_input_messages_item_content_type_2_item_type_0 @property def additional_keys(self) -> List[str]: diff --git a/sdks/python/julep/sdk/models/chat_input_data_messages_item_content_type_2_item_type_0_type.py b/sdks/python/julep/sdk/models/chat_input_messages_item_content_type_2_item_type_0_type.py similarity index 60% rename from sdks/python/julep/sdk/models/chat_input_data_messages_item_content_type_2_item_type_0_type.py rename to sdks/python/julep/sdk/models/chat_input_messages_item_content_type_2_item_type_0_type.py index dc472d903..732083108 100644 --- a/sdks/python/julep/sdk/models/chat_input_data_messages_item_content_type_2_item_type_0_type.py +++ b/sdks/python/julep/sdk/models/chat_input_messages_item_content_type_2_item_type_0_type.py @@ -1,7 +1,7 @@ from enum import Enum -class ChatInputDataMessagesItemContentType2ItemType0Type(str, Enum): +class ChatInputMessagesItemContentType2ItemType0Type(str, Enum): TEXT = "text" def __str__(self) -> str: diff --git a/sdks/python/julep/sdk/models/chat_input_messages_item_content_type_2_item_type_1.py b/sdks/python/julep/sdk/models/chat_input_messages_item_content_type_2_item_type_1.py new file mode 100644 index 000000000..b7bb96ca2 --- /dev/null +++ b/sdks/python/julep/sdk/models/chat_input_messages_item_content_type_2_item_type_1.py @@ -0,0 +1,102 @@ +from typing import ( + TYPE_CHECKING, + Any, + BinaryIO, + Dict, + List, + Optional, + TextIO, + Tuple, + Type, + TypeVar, + cast, +) + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +from ..models.chat_input_messages_item_content_type_2_item_type_1_type import ( + ChatInputMessagesItemContentType2ItemType1Type, +) +from ..types import UNSET, Unset + +if TYPE_CHECKING: + from ..models.chat_input_messages_item_content_type_2_item_type_1_image_url import ( + ChatInputMessagesItemContentType2ItemType1ImageUrl, + ) + + +T = TypeVar("T", bound="ChatInputMessagesItemContentType2ItemType1") + + +@_attrs_define +class ChatInputMessagesItemContentType2ItemType1: + """ + Attributes: + image_url (ChatInputMessagesItemContentType2ItemType1ImageUrl): The image URL + type (ChatInputMessagesItemContentType2ItemType1Type): The type (fixed to 'image_url') Default: + ChatInputMessagesItemContentType2ItemType1Type.IMAGE_URL. + """ + + image_url: "ChatInputMessagesItemContentType2ItemType1ImageUrl" + type: ChatInputMessagesItemContentType2ItemType1Type = ( + ChatInputMessagesItemContentType2ItemType1Type.IMAGE_URL + ) + additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> Dict[str, Any]: + from ..models.chat_input_messages_item_content_type_2_item_type_1_image_url import ( + ChatInputMessagesItemContentType2ItemType1ImageUrl, + ) + + image_url = self.image_url.to_dict() + + type = self.type.value + + field_dict: Dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "image_url": image_url, + "type": type, + } + ) + + return field_dict + + @classmethod + def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + from ..models.chat_input_messages_item_content_type_2_item_type_1_image_url import ( + ChatInputMessagesItemContentType2ItemType1ImageUrl, + ) + + d = src_dict.copy() + image_url = ChatInputMessagesItemContentType2ItemType1ImageUrl.from_dict( + d.pop("image_url") + ) + + type = ChatInputMessagesItemContentType2ItemType1Type(d.pop("type")) + + chat_input_messages_item_content_type_2_item_type_1 = cls( + image_url=image_url, + type=type, + ) + + chat_input_messages_item_content_type_2_item_type_1.additional_properties = d + return chat_input_messages_item_content_type_2_item_type_1 + + @property + def additional_keys(self) -> List[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/sdks/python/julep/sdk/models/chat_input_data_messages_item_content_type_2_item_type_1_image_url.py b/sdks/python/julep/sdk/models/chat_input_messages_item_content_type_2_item_type_1_image_url.py similarity index 81% rename from sdks/python/julep/sdk/models/chat_input_data_messages_item_content_type_2_item_type_1_image_url.py rename to sdks/python/julep/sdk/models/chat_input_messages_item_content_type_2_item_type_1_image_url.py index 1512a32ee..ee8cde1f8 100644 --- a/sdks/python/julep/sdk/models/chat_input_data_messages_item_content_type_2_item_type_1_image_url.py +++ b/sdks/python/julep/sdk/models/chat_input_messages_item_content_type_2_item_type_1_image_url.py @@ -17,11 +17,11 @@ from ..models.entries_image_detail import EntriesImageDetail from ..types import UNSET, Unset -T = TypeVar("T", bound="ChatInputDataMessagesItemContentType2ItemType1ImageUrl") +T = TypeVar("T", bound="ChatInputMessagesItemContentType2ItemType1ImageUrl") @_attrs_define -class ChatInputDataMessagesItemContentType2ItemType1ImageUrl: +class ChatInputMessagesItemContentType2ItemType1ImageUrl: """The image URL Attributes: @@ -56,13 +56,13 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: detail = EntriesImageDetail(d.pop("detail")) - chat_input_data_messages_item_content_type_2_item_type_1_image_url = cls( + chat_input_messages_item_content_type_2_item_type_1_image_url = cls( url=url, detail=detail, ) - chat_input_data_messages_item_content_type_2_item_type_1_image_url.additional_properties = d - return chat_input_data_messages_item_content_type_2_item_type_1_image_url + chat_input_messages_item_content_type_2_item_type_1_image_url.additional_properties = d + return chat_input_messages_item_content_type_2_item_type_1_image_url @property def additional_keys(self) -> List[str]: diff --git a/sdks/python/julep/sdk/models/chat_input_data_messages_item_content_type_2_item_type_1_type.py b/sdks/python/julep/sdk/models/chat_input_messages_item_content_type_2_item_type_1_type.py similarity index 62% rename from sdks/python/julep/sdk/models/chat_input_data_messages_item_content_type_2_item_type_1_type.py rename to sdks/python/julep/sdk/models/chat_input_messages_item_content_type_2_item_type_1_type.py index b849f3bd5..2a03edf29 100644 --- a/sdks/python/julep/sdk/models/chat_input_data_messages_item_content_type_2_item_type_1_type.py +++ b/sdks/python/julep/sdk/models/chat_input_messages_item_content_type_2_item_type_1_type.py @@ -1,7 +1,7 @@ from enum import Enum -class ChatInputDataMessagesItemContentType2ItemType1Type(str, Enum): +class ChatInputMessagesItemContentType2ItemType1Type(str, Enum): IMAGE_URL = "image_url" def __str__(self) -> str: diff --git a/sdks/python/julep/sdk/models/chat_input_data_tool_choice_type_0.py b/sdks/python/julep/sdk/models/chat_input_tool_choice_type_0.py similarity index 72% rename from sdks/python/julep/sdk/models/chat_input_data_tool_choice_type_0.py rename to sdks/python/julep/sdk/models/chat_input_tool_choice_type_0.py index f8d2e612a..42a9d7fa6 100644 --- a/sdks/python/julep/sdk/models/chat_input_data_tool_choice_type_0.py +++ b/sdks/python/julep/sdk/models/chat_input_tool_choice_type_0.py @@ -1,7 +1,7 @@ from enum import Enum -class ChatInputDataToolChoiceType0(str, Enum): +class ChatInputToolChoiceType0(str, Enum): AUTO = "auto" NONE = "none" diff --git a/sdks/python/julep/sdk/models/chat_output_chunk.py b/sdks/python/julep/sdk/models/chat_output_chunk.py new file mode 100644 index 000000000..64c0ba4a1 --- /dev/null +++ b/sdks/python/julep/sdk/models/chat_output_chunk.py @@ -0,0 +1,122 @@ +from typing import ( + TYPE_CHECKING, + Any, + BinaryIO, + Dict, + List, + Optional, + TextIO, + Tuple, + Type, + TypeVar, + Union, + cast, +) + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +from ..models.finish_reason import FinishReason +from ..types import UNSET, Unset + +if TYPE_CHECKING: + from ..models.chat_output_chunk_delta import ChatOutputChunkDelta + from ..models.log_prob_response import LogProbResponse + + +T = TypeVar("T", bound="ChatOutputChunk") + + +@_attrs_define +class ChatOutputChunk: + """Streaming chat completion output + + Attributes: + index (int): + finish_reason (FinishReason): The reason the model stopped generating tokens. This will be `stop` + if the model hit a natural stop point or a provided stop sequence, + `length` if the maximum number of tokens specified in the request + was reached, `content_filter` if content was omitted due to a flag + from our content filters, `tool_calls` if the model called a tool. Default: FinishReason.STOP. + delta (ChatOutputChunkDelta): The message generated by the model + logprobs (Union[Unset, LogProbResponse]): + """ + + index: int + delta: "ChatOutputChunkDelta" + finish_reason: FinishReason = FinishReason.STOP + logprobs: Union[Unset, "LogProbResponse"] = UNSET + additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> Dict[str, Any]: + from ..models.chat_output_chunk_delta import ChatOutputChunkDelta + from ..models.log_prob_response import LogProbResponse + + index = self.index + + finish_reason = self.finish_reason.value + + delta = self.delta.to_dict() + + logprobs: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.logprobs, Unset): + logprobs = self.logprobs.to_dict() + + field_dict: Dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "index": index, + "finish_reason": finish_reason, + "delta": delta, + } + ) + if logprobs is not UNSET: + field_dict["logprobs"] = logprobs + + return field_dict + + @classmethod + def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + from ..models.chat_output_chunk_delta import ChatOutputChunkDelta + from ..models.log_prob_response import LogProbResponse + + d = src_dict.copy() + index = d.pop("index") + + finish_reason = FinishReason(d.pop("finish_reason")) + + delta = ChatOutputChunkDelta.from_dict(d.pop("delta")) + + _logprobs = d.pop("logprobs", UNSET) + logprobs: Union[Unset, LogProbResponse] + if isinstance(_logprobs, Unset): + logprobs = UNSET + else: + logprobs = LogProbResponse.from_dict(_logprobs) + + chat_output_chunk = cls( + index=index, + finish_reason=finish_reason, + delta=delta, + logprobs=logprobs, + ) + + chat_output_chunk.additional_properties = d + return chat_output_chunk + + @property + def additional_keys(self) -> List[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/sdks/python/julep/sdk/models/chat_output_chunk_delta.py b/sdks/python/julep/sdk/models/chat_output_chunk_delta.py new file mode 100644 index 000000000..1a65910d0 --- /dev/null +++ b/sdks/python/julep/sdk/models/chat_output_chunk_delta.py @@ -0,0 +1,228 @@ +from typing import ( + TYPE_CHECKING, + Any, + BinaryIO, + Dict, + List, + Optional, + TextIO, + Tuple, + Type, + TypeVar, + Union, + cast, +) + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +from ..models.entries_chat_ml_role import EntriesChatMLRole +from ..types import UNSET, Unset + +if TYPE_CHECKING: + from ..models.chat_output_chunk_delta_content_type_2_item_type_0 import ( + ChatOutputChunkDeltaContentType2ItemType0, + ) + from ..models.chat_output_chunk_delta_content_type_2_item_type_1 import ( + ChatOutputChunkDeltaContentType2ItemType1, + ) + + +T = TypeVar("T", bound="ChatOutputChunkDelta") + + +@_attrs_define +class ChatOutputChunkDelta: + """The message generated by the model + + Attributes: + role (EntriesChatMLRole): ChatML role (system|assistant|user|function_call|function|function_response|auto) + content (Union[List[Union['ChatOutputChunkDeltaContentType2ItemType0', + 'ChatOutputChunkDeltaContentType2ItemType1']], List[str], str]): The content parts of the message + name (Union[Unset, str]): Name + continue_ (Union[Unset, bool]): Whether to continue this message or return a new one + """ + + role: EntriesChatMLRole + content: Union[ + List[ + Union[ + "ChatOutputChunkDeltaContentType2ItemType0", + "ChatOutputChunkDeltaContentType2ItemType1", + ] + ], + List[str], + str, + ] + name: Union[Unset, str] = UNSET + continue_: Union[Unset, bool] = UNSET + additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> Dict[str, Any]: + from ..models.chat_output_chunk_delta_content_type_2_item_type_0 import ( + ChatOutputChunkDeltaContentType2ItemType0, + ) + from ..models.chat_output_chunk_delta_content_type_2_item_type_1 import ( + ChatOutputChunkDeltaContentType2ItemType1, + ) + + role = self.role.value + + content: Union[List[Dict[str, Any]], List[str], str] + if isinstance(self.content, list): + content = self.content + + elif isinstance(self.content, list): + content = [] + for content_type_2_item_data in self.content: + content_type_2_item: Dict[str, Any] + if isinstance( + content_type_2_item_data, ChatOutputChunkDeltaContentType2ItemType0 + ): + content_type_2_item = content_type_2_item_data.to_dict() + else: + content_type_2_item = content_type_2_item_data.to_dict() + + content.append(content_type_2_item) + + else: + content = self.content + + name = self.name + + continue_ = self.continue_ + + field_dict: Dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "role": role, + "content": content, + } + ) + if name is not UNSET: + field_dict["name"] = name + if continue_ is not UNSET: + field_dict["continue"] = continue_ + + return field_dict + + @classmethod + def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + from ..models.chat_output_chunk_delta_content_type_2_item_type_0 import ( + ChatOutputChunkDeltaContentType2ItemType0, + ) + from ..models.chat_output_chunk_delta_content_type_2_item_type_1 import ( + ChatOutputChunkDeltaContentType2ItemType1, + ) + + d = src_dict.copy() + role = EntriesChatMLRole(d.pop("role")) + + def _parse_content( + data: object, + ) -> Union[ + List[ + Union[ + "ChatOutputChunkDeltaContentType2ItemType0", + "ChatOutputChunkDeltaContentType2ItemType1", + ] + ], + List[str], + str, + ]: + try: + if not isinstance(data, list): + raise TypeError() + content_type_1 = cast(List[str], data) + + return content_type_1 + except: # noqa: E722 + pass + try: + if not isinstance(data, list): + raise TypeError() + content_type_2 = [] + _content_type_2 = data + for content_type_2_item_data in _content_type_2: + + def _parse_content_type_2_item( + data: object, + ) -> Union[ + "ChatOutputChunkDeltaContentType2ItemType0", + "ChatOutputChunkDeltaContentType2ItemType1", + ]: + try: + if not isinstance(data, dict): + raise TypeError() + content_type_2_item_type_0 = ( + ChatOutputChunkDeltaContentType2ItemType0.from_dict( + data + ) + ) + + return content_type_2_item_type_0 + except: # noqa: E722 + pass + if not isinstance(data, dict): + raise TypeError() + content_type_2_item_type_1 = ( + ChatOutputChunkDeltaContentType2ItemType1.from_dict(data) + ) + + return content_type_2_item_type_1 + + content_type_2_item = _parse_content_type_2_item( + content_type_2_item_data + ) + + content_type_2.append(content_type_2_item) + + return content_type_2 + except: # noqa: E722 + pass + return cast( + Union[ + List[ + Union[ + "ChatOutputChunkDeltaContentType2ItemType0", + "ChatOutputChunkDeltaContentType2ItemType1", + ] + ], + List[str], + str, + ], + data, + ) + + content = _parse_content(d.pop("content")) + + name = d.pop("name", UNSET) + + continue_ = d.pop("continue", UNSET) + + chat_output_chunk_delta = cls( + role=role, + content=content, + name=name, + continue_=continue_, + ) + + chat_output_chunk_delta.additional_properties = d + return chat_output_chunk_delta + + @property + def additional_keys(self) -> List[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/sdks/python/julep/sdk/models/chat_output_chunk_delta_content_type_2_item_type_0.py b/sdks/python/julep/sdk/models/chat_output_chunk_delta_content_type_2_item_type_0.py new file mode 100644 index 000000000..3c0992bab --- /dev/null +++ b/sdks/python/julep/sdk/models/chat_output_chunk_delta_content_type_2_item_type_0.py @@ -0,0 +1,85 @@ +from typing import ( + TYPE_CHECKING, + Any, + BinaryIO, + Dict, + List, + Optional, + TextIO, + Tuple, + Type, + TypeVar, +) + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +from ..models.chat_output_chunk_delta_content_type_2_item_type_0_type import ( + ChatOutputChunkDeltaContentType2ItemType0Type, +) +from ..types import UNSET, Unset + +T = TypeVar("T", bound="ChatOutputChunkDeltaContentType2ItemType0") + + +@_attrs_define +class ChatOutputChunkDeltaContentType2ItemType0: + """ + Attributes: + text (str): + type (ChatOutputChunkDeltaContentType2ItemType0Type): The type (fixed to 'text') Default: + ChatOutputChunkDeltaContentType2ItemType0Type.TEXT. + """ + + text: str + type: ChatOutputChunkDeltaContentType2ItemType0Type = ( + ChatOutputChunkDeltaContentType2ItemType0Type.TEXT + ) + additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> Dict[str, Any]: + text = self.text + + type = self.type.value + + field_dict: Dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "text": text, + "type": type, + } + ) + + return field_dict + + @classmethod + def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + d = src_dict.copy() + text = d.pop("text") + + type = ChatOutputChunkDeltaContentType2ItemType0Type(d.pop("type")) + + chat_output_chunk_delta_content_type_2_item_type_0 = cls( + text=text, + type=type, + ) + + chat_output_chunk_delta_content_type_2_item_type_0.additional_properties = d + return chat_output_chunk_delta_content_type_2_item_type_0 + + @property + def additional_keys(self) -> List[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/sdks/python/julep/sdk/models/chat_output_chunk_delta_content_type_2_item_type_0_type.py b/sdks/python/julep/sdk/models/chat_output_chunk_delta_content_type_2_item_type_0_type.py new file mode 100644 index 000000000..f4d50d874 --- /dev/null +++ b/sdks/python/julep/sdk/models/chat_output_chunk_delta_content_type_2_item_type_0_type.py @@ -0,0 +1,8 @@ +from enum import Enum + + +class ChatOutputChunkDeltaContentType2ItemType0Type(str, Enum): + TEXT = "text" + + def __str__(self) -> str: + return str(self.value) diff --git a/sdks/python/julep/sdk/models/chat_output_chunk_delta_content_type_2_item_type_1.py b/sdks/python/julep/sdk/models/chat_output_chunk_delta_content_type_2_item_type_1.py new file mode 100644 index 000000000..3d677164c --- /dev/null +++ b/sdks/python/julep/sdk/models/chat_output_chunk_delta_content_type_2_item_type_1.py @@ -0,0 +1,102 @@ +from typing import ( + TYPE_CHECKING, + Any, + BinaryIO, + Dict, + List, + Optional, + TextIO, + Tuple, + Type, + TypeVar, + cast, +) + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +from ..models.chat_output_chunk_delta_content_type_2_item_type_1_type import ( + ChatOutputChunkDeltaContentType2ItemType1Type, +) +from ..types import UNSET, Unset + +if TYPE_CHECKING: + from ..models.chat_output_chunk_delta_content_type_2_item_type_1_image_url import ( + ChatOutputChunkDeltaContentType2ItemType1ImageUrl, + ) + + +T = TypeVar("T", bound="ChatOutputChunkDeltaContentType2ItemType1") + + +@_attrs_define +class ChatOutputChunkDeltaContentType2ItemType1: + """ + Attributes: + image_url (ChatOutputChunkDeltaContentType2ItemType1ImageUrl): The image URL + type (ChatOutputChunkDeltaContentType2ItemType1Type): The type (fixed to 'image_url') Default: + ChatOutputChunkDeltaContentType2ItemType1Type.IMAGE_URL. + """ + + image_url: "ChatOutputChunkDeltaContentType2ItemType1ImageUrl" + type: ChatOutputChunkDeltaContentType2ItemType1Type = ( + ChatOutputChunkDeltaContentType2ItemType1Type.IMAGE_URL + ) + additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> Dict[str, Any]: + from ..models.chat_output_chunk_delta_content_type_2_item_type_1_image_url import ( + ChatOutputChunkDeltaContentType2ItemType1ImageUrl, + ) + + image_url = self.image_url.to_dict() + + type = self.type.value + + field_dict: Dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "image_url": image_url, + "type": type, + } + ) + + return field_dict + + @classmethod + def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + from ..models.chat_output_chunk_delta_content_type_2_item_type_1_image_url import ( + ChatOutputChunkDeltaContentType2ItemType1ImageUrl, + ) + + d = src_dict.copy() + image_url = ChatOutputChunkDeltaContentType2ItemType1ImageUrl.from_dict( + d.pop("image_url") + ) + + type = ChatOutputChunkDeltaContentType2ItemType1Type(d.pop("type")) + + chat_output_chunk_delta_content_type_2_item_type_1 = cls( + image_url=image_url, + type=type, + ) + + chat_output_chunk_delta_content_type_2_item_type_1.additional_properties = d + return chat_output_chunk_delta_content_type_2_item_type_1 + + @property + def additional_keys(self) -> List[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/sdks/python/julep/sdk/models/chat_output_chunk_delta_content_type_2_item_type_1_image_url.py b/sdks/python/julep/sdk/models/chat_output_chunk_delta_content_type_2_item_type_1_image_url.py new file mode 100644 index 000000000..acfdd7838 --- /dev/null +++ b/sdks/python/julep/sdk/models/chat_output_chunk_delta_content_type_2_item_type_1_image_url.py @@ -0,0 +1,81 @@ +from typing import ( + TYPE_CHECKING, + Any, + BinaryIO, + Dict, + List, + Optional, + TextIO, + Tuple, + Type, + TypeVar, +) + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +from ..models.entries_image_detail import EntriesImageDetail +from ..types import UNSET, Unset + +T = TypeVar("T", bound="ChatOutputChunkDeltaContentType2ItemType1ImageUrl") + + +@_attrs_define +class ChatOutputChunkDeltaContentType2ItemType1ImageUrl: + """The image URL + + Attributes: + url (str): Image URL or base64 data url (e.g. `data:image/jpeg;base64,`) + detail (EntriesImageDetail): Image detail level Default: EntriesImageDetail.AUTO. + """ + + url: str + detail: EntriesImageDetail = EntriesImageDetail.AUTO + additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> Dict[str, Any]: + url = self.url + + detail = self.detail.value + + field_dict: Dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "url": url, + "detail": detail, + } + ) + + return field_dict + + @classmethod + def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + d = src_dict.copy() + url = d.pop("url") + + detail = EntriesImageDetail(d.pop("detail")) + + chat_output_chunk_delta_content_type_2_item_type_1_image_url = cls( + url=url, + detail=detail, + ) + + chat_output_chunk_delta_content_type_2_item_type_1_image_url.additional_properties = d + return chat_output_chunk_delta_content_type_2_item_type_1_image_url + + @property + def additional_keys(self) -> List[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/sdks/python/julep/sdk/models/chat_output_chunk_delta_content_type_2_item_type_1_type.py b/sdks/python/julep/sdk/models/chat_output_chunk_delta_content_type_2_item_type_1_type.py new file mode 100644 index 000000000..3d39148f5 --- /dev/null +++ b/sdks/python/julep/sdk/models/chat_output_chunk_delta_content_type_2_item_type_1_type.py @@ -0,0 +1,8 @@ +from enum import Enum + + +class ChatOutputChunkDeltaContentType2ItemType1Type(str, Enum): + IMAGE_URL = "image_url" + + def __str__(self) -> str: + return str(self.value) diff --git a/sdks/python/julep/sdk/models/chat_settings.py b/sdks/python/julep/sdk/models/chat_settings.py new file mode 100644 index 000000000..d390a1ace --- /dev/null +++ b/sdks/python/julep/sdk/models/chat_settings.py @@ -0,0 +1,276 @@ +from typing import ( + TYPE_CHECKING, + Any, + BinaryIO, + Dict, + List, + Optional, + TextIO, + Tuple, + Type, + TypeVar, + Union, + cast, +) + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +from ..types import UNSET, Unset + +if TYPE_CHECKING: + from ..models.chat_settings_logit_bias import ChatSettingsLogitBias + from ..models.schema_completion_response_format import ( + SchemaCompletionResponseFormat, + ) + from ..models.simple_completion_response_format import ( + SimpleCompletionResponseFormat, + ) + + +T = TypeVar("T", bound="ChatSettings") + + +@_attrs_define +class ChatSettings: + """ + Attributes: + stream (bool): Indicates if the server should stream the response as it's generated Default: False. + stop (List[str]): Up to 4 sequences where the API will stop generating further tokens. + frequency_penalty (Union[Unset, float]): Number between -2.0 and 2.0. Positive values penalize new tokens based + on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line + verbatim. + presence_penalty (Union[Unset, float]): Number between -2.0 and 2.0. Positive values penalize new tokens based + on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line + verbatim. + temperature (Union[Unset, float]): What sampling temperature to use, between 0 and 2. Higher values like 0.8 + will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + top_p (Union[Unset, float]): Defaults to 1 An alternative to sampling with temperature, called nucleus sampling, + where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens + comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but + not both. + repetition_penalty (Union[Unset, float]): Number between 0 and 2.0. 1.0 is neutral and values larger than that + penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to + repeat the same line verbatim. + length_penalty (Union[Unset, float]): Number between 0 and 2.0. 1.0 is neutral and values larger than that + penalize number of tokens generated. + min_p (Union[Unset, float]): Minimum probability compared to leading token to be considered + model (Union[Unset, str]): For Unicode character safety + See: https://unicode.org/reports/tr31/ + See: https://www.unicode.org/reports/tr39/#Identifier_Characters + seed (Union[Unset, int]): If specified, the system will make a best effort to sample deterministically for that + particular seed value + max_tokens (Union[Unset, int]): The maximum number of tokens to generate in the chat completion + logit_bias (Union[Unset, ChatSettingsLogitBias]): Modify the likelihood of specified tokens appearing in the + completion + response_format (Union['SchemaCompletionResponseFormat', 'SimpleCompletionResponseFormat', Unset]): Response + format (set to `json_object` to restrict output to JSON) + agent (Union[Unset, str]): + """ + + stop: List[str] + stream: bool = False + frequency_penalty: Union[Unset, float] = UNSET + presence_penalty: Union[Unset, float] = UNSET + temperature: Union[Unset, float] = UNSET + top_p: Union[Unset, float] = UNSET + repetition_penalty: Union[Unset, float] = UNSET + length_penalty: Union[Unset, float] = UNSET + min_p: Union[Unset, float] = UNSET + model: Union[Unset, str] = UNSET + seed: Union[Unset, int] = UNSET + max_tokens: Union[Unset, int] = UNSET + logit_bias: Union[Unset, "ChatSettingsLogitBias"] = UNSET + response_format: Union[ + "SchemaCompletionResponseFormat", "SimpleCompletionResponseFormat", Unset + ] = UNSET + agent: Union[Unset, str] = UNSET + additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> Dict[str, Any]: + from ..models.chat_settings_logit_bias import ChatSettingsLogitBias + from ..models.schema_completion_response_format import ( + SchemaCompletionResponseFormat, + ) + from ..models.simple_completion_response_format import ( + SimpleCompletionResponseFormat, + ) + + stream = self.stream + + stop = self.stop + + frequency_penalty = self.frequency_penalty + + presence_penalty = self.presence_penalty + + temperature = self.temperature + + top_p = self.top_p + + repetition_penalty = self.repetition_penalty + + length_penalty = self.length_penalty + + min_p = self.min_p + + model = self.model + + seed = self.seed + + max_tokens = self.max_tokens + + logit_bias: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.logit_bias, Unset): + logit_bias = self.logit_bias.to_dict() + + response_format: Union[Dict[str, Any], Unset] + if isinstance(self.response_format, Unset): + response_format = UNSET + elif isinstance(self.response_format, SimpleCompletionResponseFormat): + response_format = self.response_format.to_dict() + else: + response_format = self.response_format.to_dict() + + agent = self.agent + + field_dict: Dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "stream": stream, + "stop": stop, + } + ) + if frequency_penalty is not UNSET: + field_dict["frequency_penalty"] = frequency_penalty + if presence_penalty is not UNSET: + field_dict["presence_penalty"] = presence_penalty + if temperature is not UNSET: + field_dict["temperature"] = temperature + if top_p is not UNSET: + field_dict["top_p"] = top_p + if repetition_penalty is not UNSET: + field_dict["repetition_penalty"] = repetition_penalty + if length_penalty is not UNSET: + field_dict["length_penalty"] = length_penalty + if min_p is not UNSET: + field_dict["min_p"] = min_p + if model is not UNSET: + field_dict["model"] = model + if seed is not UNSET: + field_dict["seed"] = seed + if max_tokens is not UNSET: + field_dict["max_tokens"] = max_tokens + if logit_bias is not UNSET: + field_dict["logit_bias"] = logit_bias + if response_format is not UNSET: + field_dict["response_format"] = response_format + if agent is not UNSET: + field_dict["agent"] = agent + + return field_dict + + @classmethod + def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + from ..models.chat_settings_logit_bias import ChatSettingsLogitBias + from ..models.schema_completion_response_format import ( + SchemaCompletionResponseFormat, + ) + from ..models.simple_completion_response_format import ( + SimpleCompletionResponseFormat, + ) + + d = src_dict.copy() + stream = d.pop("stream") + + stop = cast(List[str], d.pop("stop")) + + frequency_penalty = d.pop("frequency_penalty", UNSET) + + presence_penalty = d.pop("presence_penalty", UNSET) + + temperature = d.pop("temperature", UNSET) + + top_p = d.pop("top_p", UNSET) + + repetition_penalty = d.pop("repetition_penalty", UNSET) + + length_penalty = d.pop("length_penalty", UNSET) + + min_p = d.pop("min_p", UNSET) + + model = d.pop("model", UNSET) + + seed = d.pop("seed", UNSET) + + max_tokens = d.pop("max_tokens", UNSET) + + _logit_bias = d.pop("logit_bias", UNSET) + logit_bias: Union[Unset, ChatSettingsLogitBias] + if isinstance(_logit_bias, Unset): + logit_bias = UNSET + else: + logit_bias = ChatSettingsLogitBias.from_dict(_logit_bias) + + def _parse_response_format( + data: object, + ) -> Union[ + "SchemaCompletionResponseFormat", "SimpleCompletionResponseFormat", Unset + ]: + if isinstance(data, Unset): + return data + try: + if not isinstance(data, dict): + raise TypeError() + response_format_type_0 = SimpleCompletionResponseFormat.from_dict(data) + + return response_format_type_0 + except: # noqa: E722 + pass + if not isinstance(data, dict): + raise TypeError() + response_format_type_1 = SchemaCompletionResponseFormat.from_dict(data) + + return response_format_type_1 + + response_format = _parse_response_format(d.pop("response_format", UNSET)) + + agent = d.pop("agent", UNSET) + + chat_settings = cls( + stream=stream, + stop=stop, + frequency_penalty=frequency_penalty, + presence_penalty=presence_penalty, + temperature=temperature, + top_p=top_p, + repetition_penalty=repetition_penalty, + length_penalty=length_penalty, + min_p=min_p, + model=model, + seed=seed, + max_tokens=max_tokens, + logit_bias=logit_bias, + response_format=response_format, + agent=agent, + ) + + chat_settings.additional_properties = d + return chat_settings + + @property + def additional_keys(self) -> List[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/sdks/python/julep/sdk/models/chat_settings_logit_bias.py b/sdks/python/julep/sdk/models/chat_settings_logit_bias.py new file mode 100644 index 000000000..be3aee2cf --- /dev/null +++ b/sdks/python/julep/sdk/models/chat_settings_logit_bias.py @@ -0,0 +1,56 @@ +from typing import ( + TYPE_CHECKING, + Any, + BinaryIO, + Dict, + List, + Optional, + TextIO, + Tuple, + Type, + TypeVar, +) + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +from ..types import UNSET, Unset + +T = TypeVar("T", bound="ChatSettingsLogitBias") + + +@_attrs_define +class ChatSettingsLogitBias: + """Modify the likelihood of specified tokens appearing in the completion""" + + additional_properties: Dict[str, float] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> Dict[str, Any]: + field_dict: Dict[str, Any] = {} + field_dict.update(self.additional_properties) + + return field_dict + + @classmethod + def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + d = src_dict.copy() + chat_settings_logit_bias = cls() + + chat_settings_logit_bias.additional_properties = d + return chat_settings_logit_bias + + @property + def additional_keys(self) -> List[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> float: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: float) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/sdks/python/julep/sdk/models/chat_token_log_prob.py b/sdks/python/julep/sdk/models/chat_token_log_prob.py new file mode 100644 index 000000000..69659947b --- /dev/null +++ b/sdks/python/julep/sdk/models/chat_token_log_prob.py @@ -0,0 +1,116 @@ +from typing import ( + TYPE_CHECKING, + Any, + BinaryIO, + Dict, + List, + Optional, + TextIO, + Tuple, + Type, + TypeVar, + Union, + cast, +) + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +from ..types import UNSET, Unset + +if TYPE_CHECKING: + from ..models.base_token_log_prob import BaseTokenLogProb + + +T = TypeVar("T", bound="ChatTokenLogProb") + + +@_attrs_define +class ChatTokenLogProb: + """ + Attributes: + token (str): + logprob (float): The log probability of the token + top_logprobs (List['BaseTokenLogProb']): The log probabilities of the tokens + bytes_ (Union[Unset, List[int]]): + """ + + token: str + logprob: float + top_logprobs: List["BaseTokenLogProb"] + bytes_: Union[Unset, List[int]] = UNSET + additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> Dict[str, Any]: + from ..models.base_token_log_prob import BaseTokenLogProb + + token = self.token + + logprob = self.logprob + + top_logprobs = [] + for top_logprobs_item_data in self.top_logprobs: + top_logprobs_item = top_logprobs_item_data.to_dict() + top_logprobs.append(top_logprobs_item) + + bytes_: Union[Unset, List[int]] = UNSET + if not isinstance(self.bytes_, Unset): + bytes_ = self.bytes_ + + field_dict: Dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "token": token, + "logprob": logprob, + "top_logprobs": top_logprobs, + } + ) + if bytes_ is not UNSET: + field_dict["bytes"] = bytes_ + + return field_dict + + @classmethod + def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + from ..models.base_token_log_prob import BaseTokenLogProb + + d = src_dict.copy() + token = d.pop("token") + + logprob = d.pop("logprob") + + top_logprobs = [] + _top_logprobs = d.pop("top_logprobs") + for top_logprobs_item_data in _top_logprobs: + top_logprobs_item = BaseTokenLogProb.from_dict(top_logprobs_item_data) + + top_logprobs.append(top_logprobs_item) + + bytes_ = cast(List[int], d.pop("bytes", UNSET)) + + chat_token_log_prob = cls( + token=token, + logprob=logprob, + top_logprobs=top_logprobs, + bytes_=bytes_, + ) + + chat_token_log_prob.additional_properties = d + return chat_token_log_prob + + @property + def additional_keys(self) -> List[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/sdks/python/julep/sdk/models/chosen_api_call.py b/sdks/python/julep/sdk/models/chosen_api_call.py new file mode 100644 index 000000000..233537c80 --- /dev/null +++ b/sdks/python/julep/sdk/models/chosen_api_call.py @@ -0,0 +1,88 @@ +from typing import ( + TYPE_CHECKING, + Any, + BinaryIO, + Dict, + List, + Optional, + TextIO, + Tuple, + Type, + TypeVar, +) + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +from ..models.chosen_api_call_type import ChosenApiCallType +from ..types import UNSET, Unset + +T = TypeVar("T", bound="ChosenApiCall") + + +@_attrs_define +class ChosenApiCall: + """ + Attributes: + type (ChosenApiCallType): + api_call (Any): + id (str): + """ + + type: ChosenApiCallType + api_call: Any + id: str + additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> Dict[str, Any]: + type = self.type.value + + api_call = self.api_call + + id = self.id + + field_dict: Dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "type": type, + "api_call": api_call, + "id": id, + } + ) + + return field_dict + + @classmethod + def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + d = src_dict.copy() + type = ChosenApiCallType(d.pop("type")) + + api_call = d.pop("api_call") + + id = d.pop("id") + + chosen_api_call = cls( + type=type, + api_call=api_call, + id=id, + ) + + chosen_api_call.additional_properties = d + return chosen_api_call + + @property + def additional_keys(self) -> List[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/sdks/python/julep/sdk/models/chosen_api_call_type.py b/sdks/python/julep/sdk/models/chosen_api_call_type.py new file mode 100644 index 000000000..a18cc91d0 --- /dev/null +++ b/sdks/python/julep/sdk/models/chosen_api_call_type.py @@ -0,0 +1,8 @@ +from enum import Enum + + +class ChosenApiCallType(str, Enum): + API_CALL = "api_call" + + def __str__(self) -> str: + return str(self.value) diff --git a/sdks/python/julep/sdk/models/chosen_function_call.py b/sdks/python/julep/sdk/models/chosen_function_call.py new file mode 100644 index 000000000..d604bba1e --- /dev/null +++ b/sdks/python/julep/sdk/models/chosen_function_call.py @@ -0,0 +1,97 @@ +from typing import ( + TYPE_CHECKING, + Any, + BinaryIO, + Dict, + List, + Optional, + TextIO, + Tuple, + Type, + TypeVar, + cast, +) + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +from ..models.chosen_function_call_type import ChosenFunctionCallType +from ..types import UNSET, Unset + +if TYPE_CHECKING: + from ..models.function_call_option import FunctionCallOption + + +T = TypeVar("T", bound="ChosenFunctionCall") + + +@_attrs_define +class ChosenFunctionCall: + """ + Attributes: + type (ChosenFunctionCallType): + function (FunctionCallOption): + id (str): + """ + + type: ChosenFunctionCallType + function: "FunctionCallOption" + id: str + additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> Dict[str, Any]: + from ..models.function_call_option import FunctionCallOption + + type = self.type.value + + function = self.function.to_dict() + + id = self.id + + field_dict: Dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "type": type, + "function": function, + "id": id, + } + ) + + return field_dict + + @classmethod + def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + from ..models.function_call_option import FunctionCallOption + + d = src_dict.copy() + type = ChosenFunctionCallType(d.pop("type")) + + function = FunctionCallOption.from_dict(d.pop("function")) + + id = d.pop("id") + + chosen_function_call = cls( + type=type, + function=function, + id=id, + ) + + chosen_function_call.additional_properties = d + return chosen_function_call + + @property + def additional_keys(self) -> List[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/sdks/python/julep/sdk/models/chosen_function_call_type.py b/sdks/python/julep/sdk/models/chosen_function_call_type.py new file mode 100644 index 000000000..e795f1364 --- /dev/null +++ b/sdks/python/julep/sdk/models/chosen_function_call_type.py @@ -0,0 +1,8 @@ +from enum import Enum + + +class ChosenFunctionCallType(str, Enum): + FUNCTION = "function" + + def __str__(self) -> str: + return str(self.value) diff --git a/sdks/python/julep/sdk/models/chosen_integration_call.py b/sdks/python/julep/sdk/models/chosen_integration_call.py new file mode 100644 index 000000000..49131a7b4 --- /dev/null +++ b/sdks/python/julep/sdk/models/chosen_integration_call.py @@ -0,0 +1,88 @@ +from typing import ( + TYPE_CHECKING, + Any, + BinaryIO, + Dict, + List, + Optional, + TextIO, + Tuple, + Type, + TypeVar, +) + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +from ..models.chosen_integration_call_type import ChosenIntegrationCallType +from ..types import UNSET, Unset + +T = TypeVar("T", bound="ChosenIntegrationCall") + + +@_attrs_define +class ChosenIntegrationCall: + """ + Attributes: + type (ChosenIntegrationCallType): + integration (Any): + id (str): + """ + + type: ChosenIntegrationCallType + integration: Any + id: str + additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> Dict[str, Any]: + type = self.type.value + + integration = self.integration + + id = self.id + + field_dict: Dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "type": type, + "integration": integration, + "id": id, + } + ) + + return field_dict + + @classmethod + def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + d = src_dict.copy() + type = ChosenIntegrationCallType(d.pop("type")) + + integration = d.pop("integration") + + id = d.pop("id") + + chosen_integration_call = cls( + type=type, + integration=integration, + id=id, + ) + + chosen_integration_call.additional_properties = d + return chosen_integration_call + + @property + def additional_keys(self) -> List[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/sdks/python/julep/sdk/models/chosen_integration_call_type.py b/sdks/python/julep/sdk/models/chosen_integration_call_type.py new file mode 100644 index 000000000..33ddd46fd --- /dev/null +++ b/sdks/python/julep/sdk/models/chosen_integration_call_type.py @@ -0,0 +1,8 @@ +from enum import Enum + + +class ChosenIntegrationCallType(str, Enum): + INTEGRATION = "integration" + + def __str__(self) -> str: + return str(self.value) diff --git a/sdks/python/julep/sdk/models/chosen_system_call.py b/sdks/python/julep/sdk/models/chosen_system_call.py new file mode 100644 index 000000000..9195c8a4b --- /dev/null +++ b/sdks/python/julep/sdk/models/chosen_system_call.py @@ -0,0 +1,88 @@ +from typing import ( + TYPE_CHECKING, + Any, + BinaryIO, + Dict, + List, + Optional, + TextIO, + Tuple, + Type, + TypeVar, +) + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +from ..models.chosen_system_call_type import ChosenSystemCallType +from ..types import UNSET, Unset + +T = TypeVar("T", bound="ChosenSystemCall") + + +@_attrs_define +class ChosenSystemCall: + """ + Attributes: + type (ChosenSystemCallType): + system (Any): + id (str): + """ + + type: ChosenSystemCallType + system: Any + id: str + additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> Dict[str, Any]: + type = self.type.value + + system = self.system + + id = self.id + + field_dict: Dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "type": type, + "system": system, + "id": id, + } + ) + + return field_dict + + @classmethod + def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + d = src_dict.copy() + type = ChosenSystemCallType(d.pop("type")) + + system = d.pop("system") + + id = d.pop("id") + + chosen_system_call = cls( + type=type, + system=system, + id=id, + ) + + chosen_system_call.additional_properties = d + return chosen_system_call + + @property + def additional_keys(self) -> List[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/sdks/python/julep/sdk/models/chosen_system_call_type.py b/sdks/python/julep/sdk/models/chosen_system_call_type.py new file mode 100644 index 000000000..f947cfb4b --- /dev/null +++ b/sdks/python/julep/sdk/models/chosen_system_call_type.py @@ -0,0 +1,8 @@ +from enum import Enum + + +class ChosenSystemCallType(str, Enum): + SYSTEM = "system" + + def __str__(self) -> str: + return str(self.value) diff --git a/sdks/python/julep/sdk/models/chosen_tool_call.py b/sdks/python/julep/sdk/models/chosen_tool_call.py deleted file mode 100644 index 2e367aa55..000000000 --- a/sdks/python/julep/sdk/models/chosen_tool_call.py +++ /dev/null @@ -1,134 +0,0 @@ -from typing import ( - TYPE_CHECKING, - Any, - BinaryIO, - Dict, - List, - Optional, - TextIO, - Tuple, - Type, - TypeVar, - Union, - cast, -) - -from attrs import define as _attrs_define -from attrs import field as _attrs_field - -from ..models.tool_type import ToolType -from ..types import UNSET, Unset - -if TYPE_CHECKING: - from ..models.function_call_option import FunctionCallOption - - -T = TypeVar("T", bound="ChosenToolCall") - - -@_attrs_define -class ChosenToolCall: - """The response tool value generated by the model - - Attributes: - type (ToolType): - id (str): - function (Union[Unset, FunctionCallOption]): - integration (Union[Unset, Any]): - system (Union[Unset, Any]): - api_call (Union[Unset, Any]): - """ - - type: ToolType - id: str - function: Union[Unset, "FunctionCallOption"] = UNSET - integration: Union[Unset, Any] = UNSET - system: Union[Unset, Any] = UNSET - api_call: Union[Unset, Any] = UNSET - additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) - - def to_dict(self) -> Dict[str, Any]: - from ..models.function_call_option import FunctionCallOption - - type = self.type.value - - id = self.id - - function: Union[Unset, Dict[str, Any]] = UNSET - if not isinstance(self.function, Unset): - function = self.function.to_dict() - - integration = self.integration - - system = self.system - - api_call = self.api_call - - field_dict: Dict[str, Any] = {} - field_dict.update(self.additional_properties) - field_dict.update( - { - "type": type, - "id": id, - } - ) - if function is not UNSET: - field_dict["function"] = function - if integration is not UNSET: - field_dict["integration"] = integration - if system is not UNSET: - field_dict["system"] = system - if api_call is not UNSET: - field_dict["api_call"] = api_call - - return field_dict - - @classmethod - def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: - from ..models.function_call_option import FunctionCallOption - - d = src_dict.copy() - type = ToolType(d.pop("type")) - - id = d.pop("id") - - _function = d.pop("function", UNSET) - function: Union[Unset, FunctionCallOption] - if isinstance(_function, Unset): - function = UNSET - else: - function = FunctionCallOption.from_dict(_function) - - integration = d.pop("integration", UNSET) - - system = d.pop("system", UNSET) - - api_call = d.pop("api_call", UNSET) - - chosen_tool_call = cls( - type=type, - id=id, - function=function, - integration=integration, - system=system, - api_call=api_call, - ) - - chosen_tool_call.additional_properties = d - return chosen_tool_call - - @property - def additional_keys(self) -> List[str]: - return list(self.additional_properties.keys()) - - def __getitem__(self, key: str) -> Any: - return self.additional_properties[key] - - def __setitem__(self, key: str, value: Any) -> None: - self.additional_properties[key] = value - - def __delitem__(self, key: str) -> None: - del self.additional_properties[key] - - def __contains__(self, key: str) -> bool: - return key in self.additional_properties diff --git a/sdks/python/julep/sdk/models/base_chat_response.py b/sdks/python/julep/sdk/models/chunk_chat_response.py similarity index 69% rename from sdks/python/julep/sdk/models/base_chat_response.py rename to sdks/python/julep/sdk/models/chunk_chat_response.py index 837bcbeb8..6f24ee71e 100644 --- a/sdks/python/julep/sdk/models/base_chat_response.py +++ b/sdks/python/julep/sdk/models/chunk_chat_response.py @@ -21,35 +21,44 @@ from ..types import UNSET, Unset if TYPE_CHECKING: - from ..models.chat_competion_usage import ChatCompetionUsage + from ..models.chat_output_chunk import ChatOutputChunk + from ..models.competion_usage import CompetionUsage from ..models.doc_reference import DocReference -T = TypeVar("T", bound="BaseChatResponse") +T = TypeVar("T", bound="ChunkChatResponse") @_attrs_define -class BaseChatResponse: +class ChunkChatResponse: """ Attributes: + choices (List['ChatOutputChunk']): The deltas generated by the model jobs (List[str]): Background job IDs that may have been spawned from this interaction. docs (List['DocReference']): Documents referenced for this request (for citation purposes). created_at (datetime.datetime): When this resource was created as UTC date-time id (str): - usage (Union[Unset, ChatCompetionUsage]): Usage statistics for the completion request + usage (Union[Unset, CompetionUsage]): Usage statistics for the completion request """ + choices: List["ChatOutputChunk"] jobs: List[str] docs: List["DocReference"] created_at: datetime.datetime id: str - usage: Union[Unset, "ChatCompetionUsage"] = UNSET + usage: Union[Unset, "CompetionUsage"] = UNSET additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - from ..models.chat_competion_usage import ChatCompetionUsage + from ..models.chat_output_chunk import ChatOutputChunk + from ..models.competion_usage import CompetionUsage from ..models.doc_reference import DocReference + choices = [] + for choices_item_data in self.choices: + choices_item = choices_item_data.to_dict() + choices.append(choices_item) + jobs = self.jobs docs = [] @@ -69,6 +78,7 @@ def to_dict(self) -> Dict[str, Any]: field_dict.update(self.additional_properties) field_dict.update( { + "choices": choices, "jobs": jobs, "docs": docs, "created_at": created_at, @@ -82,10 +92,18 @@ def to_dict(self) -> Dict[str, Any]: @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: - from ..models.chat_competion_usage import ChatCompetionUsage + from ..models.chat_output_chunk import ChatOutputChunk + from ..models.competion_usage import CompetionUsage from ..models.doc_reference import DocReference d = src_dict.copy() + choices = [] + _choices = d.pop("choices") + for choices_item_data in _choices: + choices_item = ChatOutputChunk.from_dict(choices_item_data) + + choices.append(choices_item) + jobs = cast(List[str], d.pop("jobs")) docs = [] @@ -100,13 +118,14 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: id = d.pop("id") _usage = d.pop("usage", UNSET) - usage: Union[Unset, ChatCompetionUsage] + usage: Union[Unset, CompetionUsage] if isinstance(_usage, Unset): usage = UNSET else: - usage = ChatCompetionUsage.from_dict(_usage) + usage = CompetionUsage.from_dict(_usage) - base_chat_response = cls( + chunk_chat_response = cls( + choices=choices, jobs=jobs, docs=docs, created_at=created_at, @@ -114,8 +133,8 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: usage=usage, ) - base_chat_response.additional_properties = d - return base_chat_response + chunk_chat_response.additional_properties = d + return chunk_chat_response @property def additional_keys(self) -> List[str]: diff --git a/sdks/python/julep/sdk/models/chat_competion_usage.py b/sdks/python/julep/sdk/models/competion_usage.py similarity index 92% rename from sdks/python/julep/sdk/models/chat_competion_usage.py rename to sdks/python/julep/sdk/models/competion_usage.py index 40840f0c5..606665efc 100644 --- a/sdks/python/julep/sdk/models/chat_competion_usage.py +++ b/sdks/python/julep/sdk/models/competion_usage.py @@ -17,11 +17,11 @@ from ..types import UNSET, Unset -T = TypeVar("T", bound="ChatCompetionUsage") +T = TypeVar("T", bound="CompetionUsage") @_attrs_define -class ChatCompetionUsage: +class CompetionUsage: """Usage statistics for the completion request Attributes: @@ -63,14 +63,14 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: total_tokens = d.pop("total_tokens", UNSET) - chat_competion_usage = cls( + competion_usage = cls( completion_tokens=completion_tokens, prompt_tokens=prompt_tokens, total_tokens=total_tokens, ) - chat_competion_usage.additional_properties = d - return chat_competion_usage + competion_usage.additional_properties = d + return competion_usage @property def additional_keys(self) -> List[str]: diff --git a/sdks/python/julep/sdk/models/create_agent_request.py b/sdks/python/julep/sdk/models/create_agent_request.py index 9f98784fb..b228c926d 100644 --- a/sdks/python/julep/sdk/models/create_agent_request.py +++ b/sdks/python/julep/sdk/models/create_agent_request.py @@ -19,8 +19,8 @@ from ..types import UNSET, Unset if TYPE_CHECKING: - from ..models.chat_open_ai_settings import ChatOpenAISettings from ..models.create_agent_request_metadata import CreateAgentRequestMetadata + from ..models.default_chat_settings import DefaultChatSettings T = TypeVar("T", bound="CreateAgentRequest") @@ -38,7 +38,8 @@ class CreateAgentRequest: model (str): Model name to use (gpt-4-turbo, gemini-nano etc) Default: ''. instructions (Union[List[str], str]): Instructions for the agent Default: '[]'. metadata (Union[Unset, CreateAgentRequestMetadata]): - default_settings (Union[Unset, ChatOpenAISettings]): + default_settings (Union[Unset, DefaultChatSettings]): Default settings for the chat session (also used by the + agent) """ name: str = "" @@ -46,12 +47,12 @@ class CreateAgentRequest: model: str = "" instructions: Union[List[str], str] = "[]" metadata: Union[Unset, "CreateAgentRequestMetadata"] = UNSET - default_settings: Union[Unset, "ChatOpenAISettings"] = UNSET + default_settings: Union[Unset, "DefaultChatSettings"] = UNSET additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - from ..models.chat_open_ai_settings import ChatOpenAISettings from ..models.create_agent_request_metadata import CreateAgentRequestMetadata + from ..models.default_chat_settings import DefaultChatSettings name = self.name @@ -93,8 +94,8 @@ def to_dict(self) -> Dict[str, Any]: @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: - from ..models.chat_open_ai_settings import ChatOpenAISettings from ..models.create_agent_request_metadata import CreateAgentRequestMetadata + from ..models.default_chat_settings import DefaultChatSettings d = src_dict.copy() name = d.pop("name") @@ -124,11 +125,11 @@ def _parse_instructions(data: object) -> Union[List[str], str]: metadata = CreateAgentRequestMetadata.from_dict(_metadata) _default_settings = d.pop("default_settings", UNSET) - default_settings: Union[Unset, ChatOpenAISettings] + default_settings: Union[Unset, DefaultChatSettings] if isinstance(_default_settings, Unset): default_settings = UNSET else: - default_settings = ChatOpenAISettings.from_dict(_default_settings) + default_settings = DefaultChatSettings.from_dict(_default_settings) create_agent_request = cls( name=name, diff --git a/sdks/python/julep/sdk/models/create_task_request.py b/sdks/python/julep/sdk/models/create_task_request.py index 2f8bc786d..22e4d6f64 100644 --- a/sdks/python/julep/sdk/models/create_task_request.py +++ b/sdks/python/julep/sdk/models/create_task_request.py @@ -29,7 +29,6 @@ CreateTaskRequestMainItemType17, ) from ..models.create_task_request_metadata import CreateTaskRequestMetadata - from ..models.create_tool_request import CreateToolRequest from ..models.embed_step import EmbedStep from ..models.error_workflow_step import ErrorWorkflowStep from ..models.evaluate_step import EvaluateStep @@ -44,6 +43,7 @@ from ..models.set_step import SetStep from ..models.sleep_step import SleepStep from ..models.switch_step import SwitchStep + from ..models.task_tool import TaskTool from ..models.tool_call_step import ToolCallStep from ..models.wait_for_input_step import WaitForInputStep from ..models.yield_step import YieldStep @@ -65,7 +65,7 @@ class CreateTaskRequest: entrypoint of the task. input_schema (Union['CreateTaskRequestInputSchemaType0', None]): The schema for the input to the task. `null` means all inputs are valid. - tools (List['CreateToolRequest']): Tools defined specifically for this task not included in the Agent itself. + tools (List['TaskTool']): Tools defined specifically for this task not included in the Agent itself. inherit_tools (bool): Whether to inherit tools from the parent agent or not. Defaults to true. Default: True. metadata (Union[Unset, CreateTaskRequestMetadata]): """ @@ -94,7 +94,7 @@ class CreateTaskRequest: ] ] input_schema: Union["CreateTaskRequestInputSchemaType0", None] - tools: List["CreateToolRequest"] + tools: List["TaskTool"] description: str = "" inherit_tools: bool = True metadata: Union[Unset, "CreateTaskRequestMetadata"] = UNSET @@ -135,7 +135,6 @@ def to_dict(self) -> Dict[str, Any]: CreateTaskRequestMainItemType17, ) from ..models.create_task_request_metadata import CreateTaskRequestMetadata - from ..models.create_tool_request import CreateToolRequest from ..models.embed_step import EmbedStep from ..models.error_workflow_step import ErrorWorkflowStep from ..models.evaluate_step import EvaluateStep @@ -150,6 +149,7 @@ def to_dict(self) -> Dict[str, Any]: from ..models.set_step import SetStep from ..models.sleep_step import SleepStep from ..models.switch_step import SwitchStep + from ..models.task_tool import TaskTool from ..models.tool_call_step import ToolCallStep from ..models.wait_for_input_step import WaitForInputStep from ..models.yield_step import YieldStep @@ -288,7 +288,6 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: CreateTaskRequestMainItemType17, ) from ..models.create_task_request_metadata import CreateTaskRequestMetadata - from ..models.create_tool_request import CreateToolRequest from ..models.embed_step import EmbedStep from ..models.error_workflow_step import ErrorWorkflowStep from ..models.evaluate_step import EvaluateStep @@ -303,6 +302,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.set_step import SetStep from ..models.sleep_step import SleepStep from ..models.switch_step import SwitchStep + from ..models.task_tool import TaskTool from ..models.tool_call_step import ToolCallStep from ..models.wait_for_input_step import WaitForInputStep from ..models.yield_step import YieldStep @@ -504,7 +504,7 @@ def _parse_input_schema( tools = [] _tools = d.pop("tools") for tools_item_data in _tools: - tools_item = CreateToolRequest.from_dict(tools_item_data) + tools_item = TaskTool.from_dict(tools_item_data) tools.append(tools_item) diff --git a/sdks/python/julep/sdk/models/chat_open_ai_settings.py b/sdks/python/julep/sdk/models/default_chat_settings.py similarity index 67% rename from sdks/python/julep/sdk/models/chat_open_ai_settings.py rename to sdks/python/julep/sdk/models/default_chat_settings.py index b70d55431..5b6e47d02 100644 --- a/sdks/python/julep/sdk/models/chat_open_ai_settings.py +++ b/sdks/python/julep/sdk/models/default_chat_settings.py @@ -17,12 +17,13 @@ from ..types import UNSET, Unset -T = TypeVar("T", bound="ChatOpenAISettings") +T = TypeVar("T", bound="DefaultChatSettings") @_attrs_define -class ChatOpenAISettings: - """ +class DefaultChatSettings: + """Default settings for the chat session (also used by the agent) + Attributes: frequency_penalty (Union[Unset, float]): Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line @@ -36,12 +37,21 @@ class ChatOpenAISettings: where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. + repetition_penalty (Union[Unset, float]): Number between 0 and 2.0. 1.0 is neutral and values larger than that + penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to + repeat the same line verbatim. + length_penalty (Union[Unset, float]): Number between 0 and 2.0. 1.0 is neutral and values larger than that + penalize number of tokens generated. + min_p (Union[Unset, float]): Minimum probability compared to leading token to be considered """ frequency_penalty: Union[Unset, float] = UNSET presence_penalty: Union[Unset, float] = UNSET temperature: Union[Unset, float] = UNSET top_p: Union[Unset, float] = UNSET + repetition_penalty: Union[Unset, float] = UNSET + length_penalty: Union[Unset, float] = UNSET + min_p: Union[Unset, float] = UNSET additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: @@ -53,6 +63,12 @@ def to_dict(self) -> Dict[str, Any]: top_p = self.top_p + repetition_penalty = self.repetition_penalty + + length_penalty = self.length_penalty + + min_p = self.min_p + field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) @@ -64,6 +80,12 @@ def to_dict(self) -> Dict[str, Any]: field_dict["temperature"] = temperature if top_p is not UNSET: field_dict["top_p"] = top_p + if repetition_penalty is not UNSET: + field_dict["repetition_penalty"] = repetition_penalty + if length_penalty is not UNSET: + field_dict["length_penalty"] = length_penalty + if min_p is not UNSET: + field_dict["min_p"] = min_p return field_dict @@ -78,15 +100,24 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: top_p = d.pop("top_p", UNSET) - chat_open_ai_settings = cls( + repetition_penalty = d.pop("repetition_penalty", UNSET) + + length_penalty = d.pop("length_penalty", UNSET) + + min_p = d.pop("min_p", UNSET) + + default_chat_settings = cls( frequency_penalty=frequency_penalty, presence_penalty=presence_penalty, temperature=temperature, top_p=top_p, + repetition_penalty=repetition_penalty, + length_penalty=length_penalty, + min_p=min_p, ) - chat_open_ai_settings.additional_properties = d - return chat_open_ai_settings + default_chat_settings.additional_properties = d + return default_chat_settings @property def additional_keys(self) -> List[str]: diff --git a/sdks/python/julep/sdk/models/docs_search_route_search_body.py b/sdks/python/julep/sdk/models/docs_search_route_search_body.py index f2b8d200d..c40997f53 100644 --- a/sdks/python/julep/sdk/models/docs_search_route_search_body.py +++ b/sdks/python/julep/sdk/models/docs_search_route_search_body.py @@ -19,7 +19,9 @@ from ..types import UNSET, Unset if TYPE_CHECKING: - from ..models.base_doc_search_request import BaseDocSearchRequest + from ..models.hybrid_doc_search_request import HybridDocSearchRequest + from ..models.text_only_doc_search_request import TextOnlyDocSearchRequest + from ..models.vector_doc_search_request import VectorDocSearchRequest T = TypeVar("T", bound="DocsSearchRouteSearchBody") @@ -29,19 +31,23 @@ class DocsSearchRouteSearchBody: """ Attributes: - body ('BaseDocSearchRequest'): + body (Union['HybridDocSearchRequest', 'TextOnlyDocSearchRequest', 'VectorDocSearchRequest']): """ - body: "BaseDocSearchRequest" + body: Union[ + "HybridDocSearchRequest", "TextOnlyDocSearchRequest", "VectorDocSearchRequest" + ] additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - from ..models.base_doc_search_request import BaseDocSearchRequest + from ..models.hybrid_doc_search_request import HybridDocSearchRequest + from ..models.text_only_doc_search_request import TextOnlyDocSearchRequest + from ..models.vector_doc_search_request import VectorDocSearchRequest body: Dict[str, Any] - if isinstance(self.body, BaseDocSearchRequest): + if isinstance(self.body, VectorDocSearchRequest): body = self.body.to_dict() - elif isinstance(self.body, BaseDocSearchRequest): + elif isinstance(self.body, TextOnlyDocSearchRequest): body = self.body.to_dict() else: body = self.body.to_dict() @@ -58,15 +64,23 @@ def to_dict(self) -> Dict[str, Any]: @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: - from ..models.base_doc_search_request import BaseDocSearchRequest + from ..models.hybrid_doc_search_request import HybridDocSearchRequest + from ..models.text_only_doc_search_request import TextOnlyDocSearchRequest + from ..models.vector_doc_search_request import VectorDocSearchRequest d = src_dict.copy() - def _parse_body(data: object) -> "BaseDocSearchRequest": + def _parse_body( + data: object, + ) -> Union[ + "HybridDocSearchRequest", + "TextOnlyDocSearchRequest", + "VectorDocSearchRequest", + ]: try: if not isinstance(data, dict): raise TypeError() - body_type_0 = BaseDocSearchRequest.from_dict(data) + body_type_0 = VectorDocSearchRequest.from_dict(data) return body_type_0 except: # noqa: E722 @@ -74,14 +88,14 @@ def _parse_body(data: object) -> "BaseDocSearchRequest": try: if not isinstance(data, dict): raise TypeError() - body_type_1 = BaseDocSearchRequest.from_dict(data) + body_type_1 = TextOnlyDocSearchRequest.from_dict(data) return body_type_1 except: # noqa: E722 pass if not isinstance(data, dict): raise TypeError() - body_type_2 = BaseDocSearchRequest.from_dict(data) + body_type_2 = HybridDocSearchRequest.from_dict(data) return body_type_2 diff --git a/sdks/python/julep/sdk/models/chat_finish_reason.py b/sdks/python/julep/sdk/models/finish_reason.py similarity index 84% rename from sdks/python/julep/sdk/models/chat_finish_reason.py rename to sdks/python/julep/sdk/models/finish_reason.py index 465ff5661..a899ad033 100644 --- a/sdks/python/julep/sdk/models/chat_finish_reason.py +++ b/sdks/python/julep/sdk/models/finish_reason.py @@ -1,7 +1,7 @@ from enum import Enum -class ChatFinishReason(str, Enum): +class FinishReason(str, Enum): CONTENT_FILTER = "content_filter" LENGTH = "length" STOP = "stop" diff --git a/sdks/python/julep/sdk/models/hybrid_doc_search_request.py b/sdks/python/julep/sdk/models/hybrid_doc_search_request.py new file mode 100644 index 000000000..4307ee41c --- /dev/null +++ b/sdks/python/julep/sdk/models/hybrid_doc_search_request.py @@ -0,0 +1,117 @@ +from typing import ( + TYPE_CHECKING, + Any, + BinaryIO, + Dict, + List, + Optional, + TextIO, + Tuple, + Type, + TypeVar, + cast, +) + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +from ..models.hybrid_doc_search_request_lang import HybridDocSearchRequestLang +from ..types import UNSET, Unset + +T = TypeVar("T", bound="HybridDocSearchRequest") + + +@_attrs_define +class HybridDocSearchRequest: + """ + Attributes: + limit (int): Default: 10. + lang (HybridDocSearchRequestLang): The language to be used for text-only search. Support for other languages + coming soon. Default: HybridDocSearchRequestLang.EN_US. + confidence (float): The confidence cutoff level Default: 0.5. + alpha (float): The weight to apply to BM25 vs Vector search results. 0 => pure BM25; 1 => pure vector; Default: + 0.75. + text (str): Text to use in the search. In `hybrid` search mode, either `text` or both `text` and `vector` fields + are required. + vector (List[float]): Vector to use in the search. Must be the same dimensions as the embedding model or else an + error will be thrown. + """ + + text: str + vector: List[float] + limit: int = 10 + lang: HybridDocSearchRequestLang = HybridDocSearchRequestLang.EN_US + confidence: float = 0.5 + alpha: float = 0.75 + additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> Dict[str, Any]: + limit = self.limit + + lang = self.lang.value + + confidence = self.confidence + + alpha = self.alpha + + text = self.text + + vector = self.vector + + field_dict: Dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "limit": limit, + "lang": lang, + "confidence": confidence, + "alpha": alpha, + "text": text, + "vector": vector, + } + ) + + return field_dict + + @classmethod + def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + d = src_dict.copy() + limit = d.pop("limit") + + lang = HybridDocSearchRequestLang(d.pop("lang")) + + confidence = d.pop("confidence") + + alpha = d.pop("alpha") + + text = d.pop("text") + + vector = cast(List[float], d.pop("vector")) + + hybrid_doc_search_request = cls( + limit=limit, + lang=lang, + confidence=confidence, + alpha=alpha, + text=text, + vector=vector, + ) + + hybrid_doc_search_request.additional_properties = d + return hybrid_doc_search_request + + @property + def additional_keys(self) -> List[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/sdks/python/julep/sdk/models/base_doc_search_request_lang.py b/sdks/python/julep/sdk/models/hybrid_doc_search_request_lang.py similarity index 70% rename from sdks/python/julep/sdk/models/base_doc_search_request_lang.py rename to sdks/python/julep/sdk/models/hybrid_doc_search_request_lang.py index f078e74dc..92f756add 100644 --- a/sdks/python/julep/sdk/models/base_doc_search_request_lang.py +++ b/sdks/python/julep/sdk/models/hybrid_doc_search_request_lang.py @@ -1,7 +1,7 @@ from enum import Enum -class BaseDocSearchRequestLang(str, Enum): +class HybridDocSearchRequestLang(str, Enum): EN_US = "en-US" def __str__(self) -> str: diff --git a/sdks/python/julep/sdk/models/chat_log_prob_response.py b/sdks/python/julep/sdk/models/log_prob_response.py similarity index 76% rename from sdks/python/julep/sdk/models/chat_log_prob_response.py rename to sdks/python/julep/sdk/models/log_prob_response.py index 86973c563..b0c76ba3c 100644 --- a/sdks/python/julep/sdk/models/chat_log_prob_response.py +++ b/sdks/python/julep/sdk/models/log_prob_response.py @@ -19,24 +19,24 @@ from ..types import UNSET, Unset if TYPE_CHECKING: - from ..models.base_token_log_prob import BaseTokenLogProb + from ..models.chat_token_log_prob import ChatTokenLogProb -T = TypeVar("T", bound="ChatLogProbResponse") +T = TypeVar("T", bound="LogProbResponse") @_attrs_define -class ChatLogProbResponse: +class LogProbResponse: """ Attributes: - content (Union[List['BaseTokenLogProb'], None]): The log probabilities of the tokens + content (Union[List['ChatTokenLogProb'], None]): The log probabilities of the tokens """ - content: Union[List["BaseTokenLogProb"], None] + content: Union[List["ChatTokenLogProb"], None] additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - from ..models.base_token_log_prob import BaseTokenLogProb + from ..models.chat_token_log_prob import ChatTokenLogProb content: Union[List[Dict[str, Any]], None] if isinstance(self.content, list): @@ -60,11 +60,11 @@ def to_dict(self) -> Dict[str, Any]: @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: - from ..models.base_token_log_prob import BaseTokenLogProb + from ..models.chat_token_log_prob import ChatTokenLogProb d = src_dict.copy() - def _parse_content(data: object) -> Union[List["BaseTokenLogProb"], None]: + def _parse_content(data: object) -> Union[List["ChatTokenLogProb"], None]: if data is None: return data try: @@ -73,7 +73,7 @@ def _parse_content(data: object) -> Union[List["BaseTokenLogProb"], None]: content_type_0 = [] _content_type_0 = data for content_type_0_item_data in _content_type_0: - content_type_0_item = BaseTokenLogProb.from_dict( + content_type_0_item = ChatTokenLogProb.from_dict( content_type_0_item_data ) @@ -82,16 +82,16 @@ def _parse_content(data: object) -> Union[List["BaseTokenLogProb"], None]: return content_type_0 except: # noqa: E722 pass - return cast(Union[List["BaseTokenLogProb"], None], data) + return cast(Union[List["ChatTokenLogProb"], None], data) content = _parse_content(d.pop("content")) - chat_log_prob_response = cls( + log_prob_response = cls( content=content, ) - chat_log_prob_response.additional_properties = d - return chat_log_prob_response + log_prob_response.additional_properties = d + return log_prob_response @property def additional_keys(self) -> List[str]: diff --git a/sdks/python/julep/sdk/models/message_chat_response.py b/sdks/python/julep/sdk/models/message_chat_response.py new file mode 100644 index 000000000..3a9d70a5b --- /dev/null +++ b/sdks/python/julep/sdk/models/message_chat_response.py @@ -0,0 +1,174 @@ +import datetime +from typing import ( + TYPE_CHECKING, + Any, + BinaryIO, + Dict, + List, + Optional, + TextIO, + Tuple, + Type, + TypeVar, + Union, + cast, +) + +from attrs import define as _attrs_define +from attrs import field as _attrs_field +from dateutil.parser import isoparse + +from ..types import UNSET, Unset + +if TYPE_CHECKING: + from ..models.base_chat_output import BaseChatOutput + from ..models.competion_usage import CompetionUsage + from ..models.doc_reference import DocReference + + +T = TypeVar("T", bound="MessageChatResponse") + + +@_attrs_define +class MessageChatResponse: + """ + Attributes: + choices (List['BaseChatOutput']): The deltas generated by the model + jobs (List[str]): Background job IDs that may have been spawned from this interaction. + docs (List['DocReference']): Documents referenced for this request (for citation purposes). + created_at (datetime.datetime): When this resource was created as UTC date-time + id (str): + usage (Union[Unset, CompetionUsage]): Usage statistics for the completion request + """ + + choices: List["BaseChatOutput"] + jobs: List[str] + docs: List["DocReference"] + created_at: datetime.datetime + id: str + usage: Union[Unset, "CompetionUsage"] = UNSET + additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> Dict[str, Any]: + from ..models.base_chat_output import BaseChatOutput + from ..models.competion_usage import CompetionUsage + from ..models.doc_reference import DocReference + + choices = [] + for choices_item_data in self.choices: + choices_item: Dict[str, Any] + if isinstance(choices_item_data, BaseChatOutput): + choices_item = choices_item_data.to_dict() + else: + choices_item = choices_item_data.to_dict() + + choices.append(choices_item) + + jobs = self.jobs + + docs = [] + for docs_item_data in self.docs: + docs_item = docs_item_data.to_dict() + docs.append(docs_item) + + created_at = self.created_at.isoformat() + + id = self.id + + usage: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.usage, Unset): + usage = self.usage.to_dict() + + field_dict: Dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "choices": choices, + "jobs": jobs, + "docs": docs, + "created_at": created_at, + "id": id, + } + ) + if usage is not UNSET: + field_dict["usage"] = usage + + return field_dict + + @classmethod + def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + from ..models.base_chat_output import BaseChatOutput + from ..models.competion_usage import CompetionUsage + from ..models.doc_reference import DocReference + + d = src_dict.copy() + choices = [] + _choices = d.pop("choices") + for choices_item_data in _choices: + + def _parse_choices_item(data: object) -> "BaseChatOutput": + try: + if not isinstance(data, dict): + raise TypeError() + choices_item_type_0 = BaseChatOutput.from_dict(data) + + return choices_item_type_0 + except: # noqa: E722 + pass + if not isinstance(data, dict): + raise TypeError() + choices_item_type_1 = BaseChatOutput.from_dict(data) + + return choices_item_type_1 + + choices_item = _parse_choices_item(choices_item_data) + + choices.append(choices_item) + + jobs = cast(List[str], d.pop("jobs")) + + docs = [] + _docs = d.pop("docs") + for docs_item_data in _docs: + docs_item = DocReference.from_dict(docs_item_data) + + docs.append(docs_item) + + created_at = isoparse(d.pop("created_at")) + + id = d.pop("id") + + _usage = d.pop("usage", UNSET) + usage: Union[Unset, CompetionUsage] + if isinstance(_usage, Unset): + usage = UNSET + else: + usage = CompetionUsage.from_dict(_usage) + + message_chat_response = cls( + choices=choices, + jobs=jobs, + docs=docs, + created_at=created_at, + id=id, + usage=usage, + ) + + message_chat_response.additional_properties = d + return message_chat_response + + @property + def additional_keys(self) -> List[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/sdks/python/julep/sdk/models/patch_agent_request.py b/sdks/python/julep/sdk/models/patch_agent_request.py index b9811bd1d..bfd2ccdd3 100644 --- a/sdks/python/julep/sdk/models/patch_agent_request.py +++ b/sdks/python/julep/sdk/models/patch_agent_request.py @@ -19,7 +19,7 @@ from ..types import UNSET, Unset if TYPE_CHECKING: - from ..models.chat_open_ai_settings import ChatOpenAISettings + from ..models.default_chat_settings import DefaultChatSettings from ..models.patch_agent_request_metadata import PatchAgentRequestMetadata @@ -38,7 +38,8 @@ class PatchAgentRequest: about (Union[Unset, str]): About the agent Default: ''. model (Union[Unset, str]): Model name to use (gpt-4-turbo, gemini-nano etc) Default: ''. instructions (Union[List[str], Unset, str]): Instructions for the agent Default: '[]'. - default_settings (Union[Unset, ChatOpenAISettings]): + default_settings (Union[Unset, DefaultChatSettings]): Default settings for the chat session (also used by the + agent) """ metadata: Union[Unset, "PatchAgentRequestMetadata"] = UNSET @@ -46,11 +47,11 @@ class PatchAgentRequest: about: Union[Unset, str] = "" model: Union[Unset, str] = "" instructions: Union[List[str], Unset, str] = "[]" - default_settings: Union[Unset, "ChatOpenAISettings"] = UNSET + default_settings: Union[Unset, "DefaultChatSettings"] = UNSET additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - from ..models.chat_open_ai_settings import ChatOpenAISettings + from ..models.default_chat_settings import DefaultChatSettings from ..models.patch_agent_request_metadata import PatchAgentRequestMetadata metadata: Union[Unset, Dict[str, Any]] = UNSET @@ -96,7 +97,7 @@ def to_dict(self) -> Dict[str, Any]: @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: - from ..models.chat_open_ai_settings import ChatOpenAISettings + from ..models.default_chat_settings import DefaultChatSettings from ..models.patch_agent_request_metadata import PatchAgentRequestMetadata d = src_dict.copy() @@ -129,11 +130,11 @@ def _parse_instructions(data: object) -> Union[List[str], Unset, str]: instructions = _parse_instructions(d.pop("instructions", UNSET)) _default_settings = d.pop("default_settings", UNSET) - default_settings: Union[Unset, ChatOpenAISettings] + default_settings: Union[Unset, DefaultChatSettings] if isinstance(_default_settings, Unset): default_settings = UNSET else: - default_settings = ChatOpenAISettings.from_dict(_default_settings) + default_settings = DefaultChatSettings.from_dict(_default_settings) patch_agent_request = cls( metadata=metadata, diff --git a/sdks/python/julep/sdk/models/patch_task_request.py b/sdks/python/julep/sdk/models/patch_task_request.py index 8b18439d7..83823a949 100644 --- a/sdks/python/julep/sdk/models/patch_task_request.py +++ b/sdks/python/julep/sdk/models/patch_task_request.py @@ -19,7 +19,6 @@ from ..types import UNSET, Unset if TYPE_CHECKING: - from ..models.create_tool_request import CreateToolRequest from ..models.embed_step import EmbedStep from ..models.error_workflow_step import ErrorWorkflowStep from ..models.evaluate_step import EvaluateStep @@ -44,6 +43,7 @@ from ..models.set_step import SetStep from ..models.sleep_step import SleepStep from ..models.switch_step import SwitchStep + from ..models.task_tool import TaskTool from ..models.tool_call_step import ToolCallStep from ..models.wait_for_input_step import WaitForInputStep from ..models.yield_step import YieldStep @@ -64,8 +64,8 @@ class PatchTaskRequest: entrypoint of the task. input_schema (Union['PatchTaskRequestInputSchemaType0', None, Unset]): The schema for the input to the task. `null` means all inputs are valid. - tools (Union[Unset, List['CreateToolRequest']]): Tools defined specifically for this task not included in the - Agent itself. + tools (Union[Unset, List['TaskTool']]): Tools defined specifically for this task not included in the Agent + itself. inherit_tools (Union[Unset, bool]): Whether to inherit tools from the parent agent or not. Defaults to true. Default: True. metadata (Union[Unset, PatchTaskRequestMetadata]): @@ -98,7 +98,7 @@ class PatchTaskRequest: ], ] = UNSET input_schema: Union["PatchTaskRequestInputSchemaType0", None, Unset] = UNSET - tools: Union[Unset, List["CreateToolRequest"]] = UNSET + tools: Union[Unset, List["TaskTool"]] = UNSET inherit_tools: Union[Unset, bool] = True metadata: Union[Unset, "PatchTaskRequestMetadata"] = UNSET additional_properties: Dict[ @@ -128,7 +128,6 @@ class PatchTaskRequest: ] = _attrs_field(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - from ..models.create_tool_request import CreateToolRequest from ..models.embed_step import EmbedStep from ..models.error_workflow_step import ErrorWorkflowStep from ..models.evaluate_step import EvaluateStep @@ -153,6 +152,7 @@ def to_dict(self) -> Dict[str, Any]: from ..models.set_step import SetStep from ..models.sleep_step import SleepStep from ..models.switch_step import SwitchStep + from ..models.task_tool import TaskTool from ..models.tool_call_step import ToolCallStep from ..models.wait_for_input_step import WaitForInputStep from ..models.yield_step import YieldStep @@ -286,7 +286,6 @@ def to_dict(self) -> Dict[str, Any]: @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: - from ..models.create_tool_request import CreateToolRequest from ..models.embed_step import EmbedStep from ..models.error_workflow_step import ErrorWorkflowStep from ..models.evaluate_step import EvaluateStep @@ -311,6 +310,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.set_step import SetStep from ..models.sleep_step import SleepStep from ..models.switch_step import SwitchStep + from ..models.task_tool import TaskTool from ..models.tool_call_step import ToolCallStep from ..models.wait_for_input_step import WaitForInputStep from ..models.yield_step import YieldStep @@ -512,7 +512,7 @@ def _parse_input_schema( tools = [] _tools = d.pop("tools", UNSET) for tools_item_data in _tools or []: - tools_item = CreateToolRequest.from_dict(tools_item_data) + tools_item = TaskTool.from_dict(tools_item_data) tools.append(tools_item) diff --git a/sdks/python/julep/sdk/models/prompt_step.py b/sdks/python/julep/sdk/models/prompt_step.py index 10f3b12ed..1d3498636 100644 --- a/sdks/python/julep/sdk/models/prompt_step.py +++ b/sdks/python/julep/sdk/models/prompt_step.py @@ -20,7 +20,7 @@ from ..types import UNSET, Unset if TYPE_CHECKING: - from ..models.chat_open_ai_settings import ChatOpenAISettings + from ..models.chat_settings import ChatSettings from ..models.prompt_step_prompt_type_1_item import PromptStepPromptType1Item @@ -33,16 +33,16 @@ class PromptStep: Attributes: kind (PromptStepKind): The kind of step prompt (Union[List['PromptStepPromptType1Item'], str]): The prompt to run - settings (Union[Unset, ChatOpenAISettings]): + settings (Union[Unset, ChatSettings]): """ kind: PromptStepKind prompt: Union[List["PromptStepPromptType1Item"], str] - settings: Union[Unset, "ChatOpenAISettings"] = UNSET + settings: Union[Unset, "ChatSettings"] = UNSET additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - from ..models.chat_open_ai_settings import ChatOpenAISettings + from ..models.chat_settings import ChatSettings from ..models.prompt_step_prompt_type_1_item import PromptStepPromptType1Item kind = self.kind.value @@ -76,7 +76,7 @@ def to_dict(self) -> Dict[str, Any]: @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: - from ..models.chat_open_ai_settings import ChatOpenAISettings + from ..models.chat_settings import ChatSettings from ..models.prompt_step_prompt_type_1_item import PromptStepPromptType1Item d = src_dict.copy() @@ -105,11 +105,11 @@ def _parse_prompt( prompt = _parse_prompt(d.pop("prompt")) _settings = d.pop("settings", UNSET) - settings: Union[Unset, ChatOpenAISettings] + settings: Union[Unset, ChatSettings] if isinstance(_settings, Unset): settings = UNSET else: - settings = ChatOpenAISettings.from_dict(_settings) + settings = ChatSettings.from_dict(_settings) prompt_step = cls( kind=kind, diff --git a/sdks/python/julep/sdk/models/route_list_response_200_results_item.py b/sdks/python/julep/sdk/models/route_list_response_200_results_item.py index 31a5d09dd..182115af7 100644 --- a/sdks/python/julep/sdk/models/route_list_response_200_results_item.py +++ b/sdks/python/julep/sdk/models/route_list_response_200_results_item.py @@ -18,7 +18,7 @@ from ..types import UNSET, Unset if TYPE_CHECKING: - from ..models.transition_event import TransitionEvent + from ..models.transition import Transition T = TypeVar("T", bound="RouteListResponse200ResultsItem") @@ -28,14 +28,14 @@ class RouteListResponse200ResultsItem: """ Attributes: - transitions (List['TransitionEvent']): + transitions (List['Transition']): """ - transitions: List["TransitionEvent"] + transitions: List["Transition"] additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - from ..models.transition_event import TransitionEvent + from ..models.transition import Transition transitions = [] for transitions_item_data in self.transitions: @@ -54,13 +54,13 @@ def to_dict(self) -> Dict[str, Any]: @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: - from ..models.transition_event import TransitionEvent + from ..models.transition import Transition d = src_dict.copy() transitions = [] _transitions = d.pop("transitions") for transitions_item_data in _transitions: - transitions_item = TransitionEvent.from_dict(transitions_item_data) + transitions_item = Transition.from_dict(transitions_item_data) transitions.append(transitions_item) diff --git a/sdks/python/julep/sdk/models/search_step.py b/sdks/python/julep/sdk/models/search_step.py index 0d82502bd..0feb73165 100644 --- a/sdks/python/julep/sdk/models/search_step.py +++ b/sdks/python/julep/sdk/models/search_step.py @@ -20,7 +20,9 @@ from ..types import UNSET, Unset if TYPE_CHECKING: - from ..models.base_doc_search_request import BaseDocSearchRequest + from ..models.hybrid_doc_search_request import HybridDocSearchRequest + from ..models.text_only_doc_search_request import TextOnlyDocSearchRequest + from ..models.vector_doc_search_request import VectorDocSearchRequest T = TypeVar("T", bound="SearchStep") @@ -31,22 +33,26 @@ class SearchStep: """ Attributes: kind (SearchStepKind): The kind of step - search ('BaseDocSearchRequest'): The search query + search (Union['HybridDocSearchRequest', 'TextOnlyDocSearchRequest', 'VectorDocSearchRequest']): The search query """ kind: SearchStepKind - search: "BaseDocSearchRequest" + search: Union[ + "HybridDocSearchRequest", "TextOnlyDocSearchRequest", "VectorDocSearchRequest" + ] additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - from ..models.base_doc_search_request import BaseDocSearchRequest + from ..models.hybrid_doc_search_request import HybridDocSearchRequest + from ..models.text_only_doc_search_request import TextOnlyDocSearchRequest + from ..models.vector_doc_search_request import VectorDocSearchRequest kind = self.kind.value search: Dict[str, Any] - if isinstance(self.search, BaseDocSearchRequest): + if isinstance(self.search, VectorDocSearchRequest): search = self.search.to_dict() - elif isinstance(self.search, BaseDocSearchRequest): + elif isinstance(self.search, TextOnlyDocSearchRequest): search = self.search.to_dict() else: search = self.search.to_dict() @@ -64,16 +70,24 @@ def to_dict(self) -> Dict[str, Any]: @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: - from ..models.base_doc_search_request import BaseDocSearchRequest + from ..models.hybrid_doc_search_request import HybridDocSearchRequest + from ..models.text_only_doc_search_request import TextOnlyDocSearchRequest + from ..models.vector_doc_search_request import VectorDocSearchRequest d = src_dict.copy() kind = SearchStepKind(d.pop("kind_")) - def _parse_search(data: object) -> "BaseDocSearchRequest": + def _parse_search( + data: object, + ) -> Union[ + "HybridDocSearchRequest", + "TextOnlyDocSearchRequest", + "VectorDocSearchRequest", + ]: try: if not isinstance(data, dict): raise TypeError() - search_type_0 = BaseDocSearchRequest.from_dict(data) + search_type_0 = VectorDocSearchRequest.from_dict(data) return search_type_0 except: # noqa: E722 @@ -81,14 +95,14 @@ def _parse_search(data: object) -> "BaseDocSearchRequest": try: if not isinstance(data, dict): raise TypeError() - search_type_1 = BaseDocSearchRequest.from_dict(data) + search_type_1 = TextOnlyDocSearchRequest.from_dict(data) return search_type_1 except: # noqa: E722 pass if not isinstance(data, dict): raise TypeError() - search_type_2 = BaseDocSearchRequest.from_dict(data) + search_type_2 = HybridDocSearchRequest.from_dict(data) return search_type_2 diff --git a/sdks/python/julep/sdk/models/task.py b/sdks/python/julep/sdk/models/task.py index 5f0dec121..a969661e3 100644 --- a/sdks/python/julep/sdk/models/task.py +++ b/sdks/python/julep/sdk/models/task.py @@ -21,7 +21,6 @@ from ..types import UNSET, Unset if TYPE_CHECKING: - from ..models.create_tool_request import CreateToolRequest from ..models.embed_step import EmbedStep from ..models.error_workflow_step import ErrorWorkflowStep from ..models.evaluate_step import EvaluateStep @@ -42,6 +41,7 @@ from ..models.task_input_schema_type_0 import TaskInputSchemaType0 from ..models.task_main_item_type_17 import TaskMainItemType17 from ..models.task_metadata import TaskMetadata + from ..models.task_tool import TaskTool from ..models.tool_call_step import ToolCallStep from ..models.wait_for_input_step import WaitForInputStep from ..models.yield_step import YieldStep @@ -63,7 +63,7 @@ class Task: entrypoint of the task. input_schema (Union['TaskInputSchemaType0', None]): The schema for the input to the task. `null` means all inputs are valid. - tools (List['CreateToolRequest']): Tools defined specifically for this task not included in the Agent itself. + tools (List['TaskTool']): Tools defined specifically for this task not included in the Agent itself. inherit_tools (bool): Whether to inherit tools from the parent agent or not. Defaults to true. Default: True. id (str): created_at (datetime.datetime): When this resource was created as UTC date-time @@ -95,7 +95,7 @@ class Task: ] ] input_schema: Union["TaskInputSchemaType0", None] - tools: List["CreateToolRequest"] + tools: List["TaskTool"] id: str created_at: datetime.datetime updated_at: datetime.datetime @@ -129,7 +129,6 @@ class Task: ] = _attrs_field(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - from ..models.create_tool_request import CreateToolRequest from ..models.embed_step import EmbedStep from ..models.error_workflow_step import ErrorWorkflowStep from ..models.evaluate_step import EvaluateStep @@ -150,6 +149,7 @@ def to_dict(self) -> Dict[str, Any]: from ..models.task_input_schema_type_0 import TaskInputSchemaType0 from ..models.task_main_item_type_17 import TaskMainItemType17 from ..models.task_metadata import TaskMetadata + from ..models.task_tool import TaskTool from ..models.tool_call_step import ToolCallStep from ..models.wait_for_input_step import WaitForInputStep from ..models.yield_step import YieldStep @@ -287,7 +287,6 @@ def to_dict(self) -> Dict[str, Any]: @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: - from ..models.create_tool_request import CreateToolRequest from ..models.embed_step import EmbedStep from ..models.error_workflow_step import ErrorWorkflowStep from ..models.evaluate_step import EvaluateStep @@ -308,6 +307,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.task_input_schema_type_0 import TaskInputSchemaType0 from ..models.task_main_item_type_17 import TaskMainItemType17 from ..models.task_metadata import TaskMetadata + from ..models.task_tool import TaskTool from ..models.tool_call_step import ToolCallStep from ..models.wait_for_input_step import WaitForInputStep from ..models.yield_step import YieldStep @@ -507,7 +507,7 @@ def _parse_input_schema(data: object) -> Union["TaskInputSchemaType0", None]: tools = [] _tools = d.pop("tools") for tools_item_data in _tools: - tools_item = CreateToolRequest.from_dict(tools_item_data) + tools_item = TaskTool.from_dict(tools_item_data) tools.append(tools_item) diff --git a/sdks/python/julep/sdk/models/create_tool_request.py b/sdks/python/julep/sdk/models/task_tool.py similarity index 84% rename from sdks/python/julep/sdk/models/create_tool_request.py rename to sdks/python/julep/sdk/models/task_tool.py index 2a7c7ddea..6bb088d90 100644 --- a/sdks/python/julep/sdk/models/create_tool_request.py +++ b/sdks/python/julep/sdk/models/task_tool.py @@ -23,17 +23,18 @@ from ..models.function_def import FunctionDef -T = TypeVar("T", bound="CreateToolRequest") +T = TypeVar("T", bound="TaskTool") @_attrs_define -class CreateToolRequest: - """Payload for creating a tool - +class TaskTool: + """ Attributes: type (ToolType): Default: ToolType.FUNCTION. name (str): Valid python identifier names function (FunctionDef): Function definition + inherited (Union[Unset, bool]): Read-only: Whether the tool was inherited or not. Only applies within tasks. + Default: False. integration (Union[Unset, Any]): system (Union[Unset, Any]): api_call (Union[Unset, Any]): @@ -42,6 +43,7 @@ class CreateToolRequest: name: str function: "FunctionDef" type: ToolType = ToolType.FUNCTION + inherited: Union[Unset, bool] = False integration: Union[Unset, Any] = UNSET system: Union[Unset, Any] = UNSET api_call: Union[Unset, Any] = UNSET @@ -56,6 +58,8 @@ def to_dict(self) -> Dict[str, Any]: function = self.function.to_dict() + inherited = self.inherited + integration = self.integration system = self.system @@ -71,6 +75,8 @@ def to_dict(self) -> Dict[str, Any]: "function": function, } ) + if inherited is not UNSET: + field_dict["inherited"] = inherited if integration is not UNSET: field_dict["integration"] = integration if system is not UNSET: @@ -91,23 +97,26 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: function = FunctionDef.from_dict(d.pop("function")) + inherited = d.pop("inherited", UNSET) + integration = d.pop("integration", UNSET) system = d.pop("system", UNSET) api_call = d.pop("api_call", UNSET) - create_tool_request = cls( + task_tool = cls( type=type, name=name, function=function, + inherited=inherited, integration=integration, system=system, api_call=api_call, ) - create_tool_request.additional_properties = d - return create_tool_request + task_tool.additional_properties = d + return task_tool @property def additional_keys(self) -> List[str]: diff --git a/sdks/python/julep/sdk/models/tasks_create_or_update_route_create_or_update_accept.py b/sdks/python/julep/sdk/models/tasks_create_or_update_route_create_or_update_accept.py index fe93f83ad..8a4a27b84 100644 --- a/sdks/python/julep/sdk/models/tasks_create_or_update_route_create_or_update_accept.py +++ b/sdks/python/julep/sdk/models/tasks_create_or_update_route_create_or_update_accept.py @@ -3,9 +3,6 @@ class TasksCreateOrUpdateRouteCreateOrUpdateAccept(str, Enum): APPLICATIONJSON = "application/json" - APPLICATIONYAML = "application/yaml" - TEXTX_YAML = "text/x-yaml" - TEXTYAML = "text/yaml" def __str__(self) -> str: return str(self.value) diff --git a/sdks/python/julep/sdk/models/tasks_route_create_accept.py b/sdks/python/julep/sdk/models/tasks_route_create_accept.py index 1a905a374..e91a4afd6 100644 --- a/sdks/python/julep/sdk/models/tasks_route_create_accept.py +++ b/sdks/python/julep/sdk/models/tasks_route_create_accept.py @@ -3,9 +3,6 @@ class TasksRouteCreateAccept(str, Enum): APPLICATIONJSON = "application/json" - APPLICATIONYAML = "application/yaml" - TEXTX_YAML = "text/x-yaml" - TEXTYAML = "text/yaml" def __str__(self) -> str: return str(self.value) diff --git a/sdks/python/julep/sdk/models/base_doc_search_request.py b/sdks/python/julep/sdk/models/text_only_doc_search_request.py similarity index 64% rename from sdks/python/julep/sdk/models/base_doc_search_request.py rename to sdks/python/julep/sdk/models/text_only_doc_search_request.py index 3ded6ffa8..d3de3145a 100644 --- a/sdks/python/julep/sdk/models/base_doc_search_request.py +++ b/sdks/python/julep/sdk/models/text_only_doc_search_request.py @@ -14,23 +14,25 @@ from attrs import define as _attrs_define from attrs import field as _attrs_field -from ..models.base_doc_search_request_lang import BaseDocSearchRequestLang +from ..models.text_only_doc_search_request_lang import TextOnlyDocSearchRequestLang from ..types import UNSET, Unset -T = TypeVar("T", bound="BaseDocSearchRequest") +T = TypeVar("T", bound="TextOnlyDocSearchRequest") @_attrs_define -class BaseDocSearchRequest: +class TextOnlyDocSearchRequest: """ Attributes: limit (int): Default: 10. - lang (BaseDocSearchRequestLang): The language to be used for text-only search. Support for other languages - coming soon. Default: BaseDocSearchRequestLang.EN_US. + lang (TextOnlyDocSearchRequestLang): The language to be used for text-only search. Support for other languages + coming soon. Default: TextOnlyDocSearchRequestLang.EN_US. + text (str): Text to use in the search. """ + text: str limit: int = 10 - lang: BaseDocSearchRequestLang = BaseDocSearchRequestLang.EN_US + lang: TextOnlyDocSearchRequestLang = TextOnlyDocSearchRequestLang.EN_US additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: @@ -38,12 +40,15 @@ def to_dict(self) -> Dict[str, Any]: lang = self.lang.value + text = self.text + field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( { "limit": limit, "lang": lang, + "text": text, } ) @@ -54,15 +59,18 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() limit = d.pop("limit") - lang = BaseDocSearchRequestLang(d.pop("lang")) + lang = TextOnlyDocSearchRequestLang(d.pop("lang")) + + text = d.pop("text") - base_doc_search_request = cls( + text_only_doc_search_request = cls( limit=limit, lang=lang, + text=text, ) - base_doc_search_request.additional_properties = d - return base_doc_search_request + text_only_doc_search_request.additional_properties = d + return text_only_doc_search_request @property def additional_keys(self) -> List[str]: diff --git a/sdks/python/julep/sdk/models/text_only_doc_search_request_lang.py b/sdks/python/julep/sdk/models/text_only_doc_search_request_lang.py new file mode 100644 index 000000000..1c10f35da --- /dev/null +++ b/sdks/python/julep/sdk/models/text_only_doc_search_request_lang.py @@ -0,0 +1,8 @@ +from enum import Enum + + +class TextOnlyDocSearchRequestLang(str, Enum): + EN_US = "en-US" + + def __str__(self) -> str: + return str(self.value) diff --git a/sdks/python/julep/sdk/models/transition.py b/sdks/python/julep/sdk/models/transition.py new file mode 100644 index 000000000..7a12cf19a --- /dev/null +++ b/sdks/python/julep/sdk/models/transition.py @@ -0,0 +1,159 @@ +import datetime +from typing import ( + TYPE_CHECKING, + Any, + BinaryIO, + Dict, + List, + Optional, + TextIO, + Tuple, + Type, + TypeVar, + Union, + cast, +) + +from attrs import define as _attrs_define +from attrs import field as _attrs_field +from dateutil.parser import isoparse + +from ..models.transition_type import TransitionType +from ..types import UNSET, Unset + +if TYPE_CHECKING: + from ..models.transition_metadata import TransitionMetadata + from ..models.transition_target import TransitionTarget + + +T = TypeVar("T", bound="Transition") + + +@_attrs_define +class Transition: + """ + Attributes: + type (TransitionType): + output (Any): + created_at (datetime.datetime): When this resource was created as UTC date-time + updated_at (datetime.datetime): When this resource was updated as UTC date-time + execution_id (str): + current (TransitionTarget): + next_ (TransitionTarget): + id (str): + metadata (Union[Unset, TransitionMetadata]): + """ + + type: TransitionType + output: Any + created_at: datetime.datetime + updated_at: datetime.datetime + execution_id: str + current: "TransitionTarget" + next_: "TransitionTarget" + id: str + metadata: Union[Unset, "TransitionMetadata"] = UNSET + additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> Dict[str, Any]: + from ..models.transition_metadata import TransitionMetadata + from ..models.transition_target import TransitionTarget + + type = self.type.value + + output = self.output + + created_at = self.created_at.isoformat() + + updated_at = self.updated_at.isoformat() + + execution_id = self.execution_id + + current = self.current.to_dict() + + next_ = self.next_.to_dict() + + id = self.id + + metadata: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.metadata, Unset): + metadata = self.metadata.to_dict() + + field_dict: Dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "type": type, + "output": output, + "created_at": created_at, + "updated_at": updated_at, + "execution_id": execution_id, + "current": current, + "next": next_, + "id": id, + } + ) + if metadata is not UNSET: + field_dict["metadata"] = metadata + + return field_dict + + @classmethod + def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + from ..models.transition_metadata import TransitionMetadata + from ..models.transition_target import TransitionTarget + + d = src_dict.copy() + type = TransitionType(d.pop("type")) + + output = d.pop("output") + + created_at = isoparse(d.pop("created_at")) + + updated_at = isoparse(d.pop("updated_at")) + + execution_id = d.pop("execution_id") + + current = TransitionTarget.from_dict(d.pop("current")) + + next_ = TransitionTarget.from_dict(d.pop("next")) + + id = d.pop("id") + + _metadata = d.pop("metadata", UNSET) + metadata: Union[Unset, TransitionMetadata] + if isinstance(_metadata, Unset): + metadata = UNSET + else: + metadata = TransitionMetadata.from_dict(_metadata) + + transition = cls( + type=type, + output=output, + created_at=created_at, + updated_at=updated_at, + execution_id=execution_id, + current=current, + next_=next_, + id=id, + metadata=metadata, + ) + + transition.additional_properties = d + return transition + + @property + def additional_keys(self) -> List[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/sdks/python/julep/sdk/models/transition_metadata.py b/sdks/python/julep/sdk/models/transition_metadata.py new file mode 100644 index 000000000..1dcd9dd15 --- /dev/null +++ b/sdks/python/julep/sdk/models/transition_metadata.py @@ -0,0 +1,56 @@ +from typing import ( + TYPE_CHECKING, + Any, + BinaryIO, + Dict, + List, + Optional, + TextIO, + Tuple, + Type, + TypeVar, +) + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +from ..types import UNSET, Unset + +T = TypeVar("T", bound="TransitionMetadata") + + +@_attrs_define +class TransitionMetadata: + """ """ + + additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> Dict[str, Any]: + field_dict: Dict[str, Any] = {} + field_dict.update(self.additional_properties) + + return field_dict + + @classmethod + def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + d = src_dict.copy() + transition_metadata = cls() + + transition_metadata.additional_properties = d + return transition_metadata + + @property + def additional_keys(self) -> List[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/sdks/python/julep/sdk/models/transition_type.py b/sdks/python/julep/sdk/models/transition_type.py new file mode 100644 index 000000000..3bfcc31e8 --- /dev/null +++ b/sdks/python/julep/sdk/models/transition_type.py @@ -0,0 +1,16 @@ +from enum import Enum + + +class TransitionType(str, Enum): + CANCELLED = "cancelled" + ERROR = "error" + FINISH = "finish" + FINISH_BRANCH = "finish_branch" + INIT = "init" + INIT_BRANCH = "init_branch" + RESUME = "resume" + STEP = "step" + WAIT = "wait" + + def __str__(self) -> str: + return str(self.value) diff --git a/sdks/python/julep/sdk/models/update_agent_request.py b/sdks/python/julep/sdk/models/update_agent_request.py index cc6330c83..b48913a41 100644 --- a/sdks/python/julep/sdk/models/update_agent_request.py +++ b/sdks/python/julep/sdk/models/update_agent_request.py @@ -19,7 +19,7 @@ from ..types import UNSET, Unset if TYPE_CHECKING: - from ..models.chat_open_ai_settings import ChatOpenAISettings + from ..models.default_chat_settings import DefaultChatSettings from ..models.update_agent_request_metadata import UpdateAgentRequestMetadata @@ -38,7 +38,8 @@ class UpdateAgentRequest: model (str): Model name to use (gpt-4-turbo, gemini-nano etc) Default: ''. instructions (Union[List[str], str]): Instructions for the agent Default: '[]'. metadata (Union[Unset, UpdateAgentRequestMetadata]): - default_settings (Union[Unset, ChatOpenAISettings]): + default_settings (Union[Unset, DefaultChatSettings]): Default settings for the chat session (also used by the + agent) """ name: str = "" @@ -46,11 +47,11 @@ class UpdateAgentRequest: model: str = "" instructions: Union[List[str], str] = "[]" metadata: Union[Unset, "UpdateAgentRequestMetadata"] = UNSET - default_settings: Union[Unset, "ChatOpenAISettings"] = UNSET + default_settings: Union[Unset, "DefaultChatSettings"] = UNSET additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - from ..models.chat_open_ai_settings import ChatOpenAISettings + from ..models.default_chat_settings import DefaultChatSettings from ..models.update_agent_request_metadata import UpdateAgentRequestMetadata name = self.name @@ -93,7 +94,7 @@ def to_dict(self) -> Dict[str, Any]: @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: - from ..models.chat_open_ai_settings import ChatOpenAISettings + from ..models.default_chat_settings import DefaultChatSettings from ..models.update_agent_request_metadata import UpdateAgentRequestMetadata d = src_dict.copy() @@ -124,11 +125,11 @@ def _parse_instructions(data: object) -> Union[List[str], str]: metadata = UpdateAgentRequestMetadata.from_dict(_metadata) _default_settings = d.pop("default_settings", UNSET) - default_settings: Union[Unset, ChatOpenAISettings] + default_settings: Union[Unset, DefaultChatSettings] if isinstance(_default_settings, Unset): default_settings = UNSET else: - default_settings = ChatOpenAISettings.from_dict(_default_settings) + default_settings = DefaultChatSettings.from_dict(_default_settings) update_agent_request = cls( name=name, diff --git a/sdks/python/julep/sdk/models/update_task_request.py b/sdks/python/julep/sdk/models/update_task_request.py index 5118fad6f..e4ddc2d51 100644 --- a/sdks/python/julep/sdk/models/update_task_request.py +++ b/sdks/python/julep/sdk/models/update_task_request.py @@ -19,7 +19,6 @@ from ..types import UNSET, Unset if TYPE_CHECKING: - from ..models.create_tool_request import CreateToolRequest from ..models.embed_step import EmbedStep from ..models.error_workflow_step import ErrorWorkflowStep from ..models.evaluate_step import EvaluateStep @@ -34,6 +33,7 @@ from ..models.set_step import SetStep from ..models.sleep_step import SleepStep from ..models.switch_step import SwitchStep + from ..models.task_tool import TaskTool from ..models.tool_call_step import ToolCallStep from ..models.update_task_request_additional_property_item_type_17 import ( UpdateTaskRequestAdditionalPropertyItemType17, @@ -64,7 +64,7 @@ class UpdateTaskRequest: 'YieldStep']]): The entrypoint of the task. input_schema (Union['UpdateTaskRequestInputSchemaType0', None]): The schema for the input to the task. `null` means all inputs are valid. - tools (List['CreateToolRequest']): Tools defined specifically for this task not included in the Agent itself. + tools (List['TaskTool']): Tools defined specifically for this task not included in the Agent itself. inherit_tools (bool): Whether to inherit tools from the parent agent or not. Defaults to true. Default: True. metadata (Union[Unset, UpdateTaskRequestMetadata]): """ @@ -92,7 +92,7 @@ class UpdateTaskRequest: ] ] input_schema: Union["UpdateTaskRequestInputSchemaType0", None] - tools: List["CreateToolRequest"] + tools: List["TaskTool"] description: str = "" inherit_tools: bool = True metadata: Union[Unset, "UpdateTaskRequestMetadata"] = UNSET @@ -123,7 +123,6 @@ class UpdateTaskRequest: ] = _attrs_field(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - from ..models.create_tool_request import CreateToolRequest from ..models.embed_step import EmbedStep from ..models.error_workflow_step import ErrorWorkflowStep from ..models.evaluate_step import EvaluateStep @@ -138,6 +137,7 @@ def to_dict(self) -> Dict[str, Any]: from ..models.set_step import SetStep from ..models.sleep_step import SleepStep from ..models.switch_step import SwitchStep + from ..models.task_tool import TaskTool from ..models.tool_call_step import ToolCallStep from ..models.update_task_request_additional_property_item_type_17 import ( UpdateTaskRequestAdditionalPropertyItemType17, @@ -273,7 +273,6 @@ def to_dict(self) -> Dict[str, Any]: @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: - from ..models.create_tool_request import CreateToolRequest from ..models.embed_step import EmbedStep from ..models.error_workflow_step import ErrorWorkflowStep from ..models.evaluate_step import EvaluateStep @@ -288,6 +287,7 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: from ..models.set_step import SetStep from ..models.sleep_step import SleepStep from ..models.switch_step import SwitchStep + from ..models.task_tool import TaskTool from ..models.tool_call_step import ToolCallStep from ..models.update_task_request_additional_property_item_type_17 import ( UpdateTaskRequestAdditionalPropertyItemType17, @@ -497,7 +497,7 @@ def _parse_input_schema( tools = [] _tools = d.pop("tools") for tools_item_data in _tools: - tools_item = CreateToolRequest.from_dict(tools_item_data) + tools_item = TaskTool.from_dict(tools_item_data) tools.append(tools_item) diff --git a/sdks/python/julep/sdk/models/user_docs_search_route_search_body.py b/sdks/python/julep/sdk/models/user_docs_search_route_search_body.py index 486b148a9..f5b770fe7 100644 --- a/sdks/python/julep/sdk/models/user_docs_search_route_search_body.py +++ b/sdks/python/julep/sdk/models/user_docs_search_route_search_body.py @@ -19,7 +19,9 @@ from ..types import UNSET, Unset if TYPE_CHECKING: - from ..models.base_doc_search_request import BaseDocSearchRequest + from ..models.hybrid_doc_search_request import HybridDocSearchRequest + from ..models.text_only_doc_search_request import TextOnlyDocSearchRequest + from ..models.vector_doc_search_request import VectorDocSearchRequest T = TypeVar("T", bound="UserDocsSearchRouteSearchBody") @@ -29,19 +31,23 @@ class UserDocsSearchRouteSearchBody: """ Attributes: - body ('BaseDocSearchRequest'): + body (Union['HybridDocSearchRequest', 'TextOnlyDocSearchRequest', 'VectorDocSearchRequest']): """ - body: "BaseDocSearchRequest" + body: Union[ + "HybridDocSearchRequest", "TextOnlyDocSearchRequest", "VectorDocSearchRequest" + ] additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - from ..models.base_doc_search_request import BaseDocSearchRequest + from ..models.hybrid_doc_search_request import HybridDocSearchRequest + from ..models.text_only_doc_search_request import TextOnlyDocSearchRequest + from ..models.vector_doc_search_request import VectorDocSearchRequest body: Dict[str, Any] - if isinstance(self.body, BaseDocSearchRequest): + if isinstance(self.body, VectorDocSearchRequest): body = self.body.to_dict() - elif isinstance(self.body, BaseDocSearchRequest): + elif isinstance(self.body, TextOnlyDocSearchRequest): body = self.body.to_dict() else: body = self.body.to_dict() @@ -58,15 +64,23 @@ def to_dict(self) -> Dict[str, Any]: @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: - from ..models.base_doc_search_request import BaseDocSearchRequest + from ..models.hybrid_doc_search_request import HybridDocSearchRequest + from ..models.text_only_doc_search_request import TextOnlyDocSearchRequest + from ..models.vector_doc_search_request import VectorDocSearchRequest d = src_dict.copy() - def _parse_body(data: object) -> "BaseDocSearchRequest": + def _parse_body( + data: object, + ) -> Union[ + "HybridDocSearchRequest", + "TextOnlyDocSearchRequest", + "VectorDocSearchRequest", + ]: try: if not isinstance(data, dict): raise TypeError() - body_type_0 = BaseDocSearchRequest.from_dict(data) + body_type_0 = VectorDocSearchRequest.from_dict(data) return body_type_0 except: # noqa: E722 @@ -74,14 +88,14 @@ def _parse_body(data: object) -> "BaseDocSearchRequest": try: if not isinstance(data, dict): raise TypeError() - body_type_1 = BaseDocSearchRequest.from_dict(data) + body_type_1 = TextOnlyDocSearchRequest.from_dict(data) return body_type_1 except: # noqa: E722 pass if not isinstance(data, dict): raise TypeError() - body_type_2 = BaseDocSearchRequest.from_dict(data) + body_type_2 = HybridDocSearchRequest.from_dict(data) return body_type_2 diff --git a/sdks/python/julep/sdk/models/vector_doc_search_request.py b/sdks/python/julep/sdk/models/vector_doc_search_request.py new file mode 100644 index 000000000..62115cf82 --- /dev/null +++ b/sdks/python/julep/sdk/models/vector_doc_search_request.py @@ -0,0 +1,99 @@ +from typing import ( + TYPE_CHECKING, + Any, + BinaryIO, + Dict, + List, + Optional, + TextIO, + Tuple, + Type, + TypeVar, + cast, +) + +from attrs import define as _attrs_define +from attrs import field as _attrs_field + +from ..models.vector_doc_search_request_lang import VectorDocSearchRequestLang +from ..types import UNSET, Unset + +T = TypeVar("T", bound="VectorDocSearchRequest") + + +@_attrs_define +class VectorDocSearchRequest: + """ + Attributes: + limit (int): Default: 10. + lang (VectorDocSearchRequestLang): The language to be used for text-only search. Support for other languages + coming soon. Default: VectorDocSearchRequestLang.EN_US. + confidence (float): The confidence cutoff level Default: 0.5. + vector (List[float]): Vector to use in the search. Must be the same dimensions as the embedding model or else an + error will be thrown. + """ + + vector: List[float] + limit: int = 10 + lang: VectorDocSearchRequestLang = VectorDocSearchRequestLang.EN_US + confidence: float = 0.5 + additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) + + def to_dict(self) -> Dict[str, Any]: + limit = self.limit + + lang = self.lang.value + + confidence = self.confidence + + vector = self.vector + + field_dict: Dict[str, Any] = {} + field_dict.update(self.additional_properties) + field_dict.update( + { + "limit": limit, + "lang": lang, + "confidence": confidence, + "vector": vector, + } + ) + + return field_dict + + @classmethod + def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + d = src_dict.copy() + limit = d.pop("limit") + + lang = VectorDocSearchRequestLang(d.pop("lang")) + + confidence = d.pop("confidence") + + vector = cast(List[float], d.pop("vector")) + + vector_doc_search_request = cls( + limit=limit, + lang=lang, + confidence=confidence, + vector=vector, + ) + + vector_doc_search_request.additional_properties = d + return vector_doc_search_request + + @property + def additional_keys(self) -> List[str]: + return list(self.additional_properties.keys()) + + def __getitem__(self, key: str) -> Any: + return self.additional_properties[key] + + def __setitem__(self, key: str, value: Any) -> None: + self.additional_properties[key] = value + + def __delitem__(self, key: str) -> None: + del self.additional_properties[key] + + def __contains__(self, key: str) -> bool: + return key in self.additional_properties diff --git a/sdks/python/julep/sdk/models/vector_doc_search_request_lang.py b/sdks/python/julep/sdk/models/vector_doc_search_request_lang.py new file mode 100644 index 000000000..894c033ac --- /dev/null +++ b/sdks/python/julep/sdk/models/vector_doc_search_request_lang.py @@ -0,0 +1,8 @@ +from enum import Enum + + +class VectorDocSearchRequestLang(str, Enum): + EN_US = "en-US" + + def __str__(self) -> str: + return str(self.value) diff --git a/sdks/python/pyproject.toml b/sdks/python/pyproject.toml index 6d5ec9304..d219f0fa4 100644 --- a/sdks/python/pyproject.toml +++ b/sdks/python/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "julep" -version = "0.4.0-rc0" +version = "0.4.0a0" description = "Julep is a platform for creating \"persistent\" agents which can do long-horizon tasks" authors = ["Julep Developers "] license = "ISC" diff --git a/sdks/ts/src/api/index.ts b/sdks/ts/src/api/index.ts index abf11890a..942bd8c55 100644 --- a/sdks/ts/src/api/index.ts +++ b/sdks/ts/src/api/index.ts @@ -17,10 +17,8 @@ export type { Agents_CreateOrUpdateAgentRequest_id } from "./models/Agents_Creat export type { Agents_PatchAgentRequest } from "./models/Agents_PatchAgentRequest"; export type { Agents_UpdateAgentRequest } from "./models/Agents_UpdateAgentRequest"; export type { Chat_BaseChatOutput } from "./models/Chat_BaseChatOutput"; -export type { Chat_BaseChatResponse } from "./models/Chat_BaseChatResponse"; export type { Chat_BaseTokenLogProb } from "./models/Chat_BaseTokenLogProb"; export type { Chat_ChatInput } from "./models/Chat_ChatInput"; -export type { Chat_ChatInputData } from "./models/Chat_ChatInputData"; export type { Chat_ChatOutputChunk } from "./models/Chat_ChatOutputChunk"; export type { Chat_ChatSettings } from "./models/Chat_ChatSettings"; export type { Chat_ChunkChatResponse } from "./models/Chat_ChunkChatResponse"; @@ -30,7 +28,6 @@ export type { Chat_FinishReason } from "./models/Chat_FinishReason"; export type { Chat_LogProbResponse } from "./models/Chat_LogProbResponse"; export type { Chat_MessageChatResponse } from "./models/Chat_MessageChatResponse"; export type { Chat_MultipleChatOutput } from "./models/Chat_MultipleChatOutput"; -export type { Chat_OpenAISettings } from "./models/Chat_OpenAISettings"; export type { Chat_SchemaCompletionResponseFormat } from "./models/Chat_SchemaCompletionResponseFormat"; export type { Chat_SimpleCompletionResponseFormat } from "./models/Chat_SimpleCompletionResponseFormat"; export type { Chat_SingleChatOutput } from "./models/Chat_SingleChatOutput"; @@ -52,7 +49,6 @@ export type { Common_ResourceUpdatedResponse } from "./models/Common_ResourceUpd export type { Common_toolRef } from "./models/Common_toolRef"; export type { Common_uuid } from "./models/Common_uuid"; export type { Common_validPythonIdentifier } from "./models/Common_validPythonIdentifier"; -export type { Docs_BaseDocSearchRequest } from "./models/Docs_BaseDocSearchRequest"; export type { Docs_CreateDocRequest } from "./models/Docs_CreateDocRequest"; export type { Docs_Doc } from "./models/Docs_Doc"; export type { Docs_DocOwner } from "./models/Docs_DocOwner"; @@ -120,9 +116,10 @@ export type { Tasks_UpdateTaskRequest } from "./models/Tasks_UpdateTaskRequest"; export type { Tasks_WaitForInputInfo } from "./models/Tasks_WaitForInputInfo"; export type { Tasks_WaitForInputStep } from "./models/Tasks_WaitForInputStep"; export type { Tasks_YieldStep } from "./models/Tasks_YieldStep"; +export type { Tools_ChosenApiCall } from "./models/Tools_ChosenApiCall"; export type { Tools_ChosenFunctionCall } from "./models/Tools_ChosenFunctionCall"; -export type { Tools_ChosenToolCall } from "./models/Tools_ChosenToolCall"; -export type { Tools_CreateToolRequest } from "./models/Tools_CreateToolRequest"; +export type { Tools_ChosenIntegrationCall } from "./models/Tools_ChosenIntegrationCall"; +export type { Tools_ChosenSystemCall } from "./models/Tools_ChosenSystemCall"; export type { Tools_FunctionCallOption } from "./models/Tools_FunctionCallOption"; export type { Tools_FunctionDef } from "./models/Tools_FunctionDef"; export type { Tools_NamedApiCallChoice } from "./models/Tools_NamedApiCallChoice"; @@ -147,10 +144,8 @@ export { $Agents_CreateOrUpdateAgentRequest_id } from "./schemas/$Agents_CreateO export { $Agents_PatchAgentRequest } from "./schemas/$Agents_PatchAgentRequest"; export { $Agents_UpdateAgentRequest } from "./schemas/$Agents_UpdateAgentRequest"; export { $Chat_BaseChatOutput } from "./schemas/$Chat_BaseChatOutput"; -export { $Chat_BaseChatResponse } from "./schemas/$Chat_BaseChatResponse"; export { $Chat_BaseTokenLogProb } from "./schemas/$Chat_BaseTokenLogProb"; export { $Chat_ChatInput } from "./schemas/$Chat_ChatInput"; -export { $Chat_ChatInputData } from "./schemas/$Chat_ChatInputData"; export { $Chat_ChatOutputChunk } from "./schemas/$Chat_ChatOutputChunk"; export { $Chat_ChatSettings } from "./schemas/$Chat_ChatSettings"; export { $Chat_ChunkChatResponse } from "./schemas/$Chat_ChunkChatResponse"; @@ -160,7 +155,6 @@ export { $Chat_FinishReason } from "./schemas/$Chat_FinishReason"; export { $Chat_LogProbResponse } from "./schemas/$Chat_LogProbResponse"; export { $Chat_MessageChatResponse } from "./schemas/$Chat_MessageChatResponse"; export { $Chat_MultipleChatOutput } from "./schemas/$Chat_MultipleChatOutput"; -export { $Chat_OpenAISettings } from "./schemas/$Chat_OpenAISettings"; export { $Chat_SchemaCompletionResponseFormat } from "./schemas/$Chat_SchemaCompletionResponseFormat"; export { $Chat_SimpleCompletionResponseFormat } from "./schemas/$Chat_SimpleCompletionResponseFormat"; export { $Chat_SingleChatOutput } from "./schemas/$Chat_SingleChatOutput"; @@ -182,7 +176,6 @@ export { $Common_ResourceUpdatedResponse } from "./schemas/$Common_ResourceUpdat export { $Common_toolRef } from "./schemas/$Common_toolRef"; export { $Common_uuid } from "./schemas/$Common_uuid"; export { $Common_validPythonIdentifier } from "./schemas/$Common_validPythonIdentifier"; -export { $Docs_BaseDocSearchRequest } from "./schemas/$Docs_BaseDocSearchRequest"; export { $Docs_CreateDocRequest } from "./schemas/$Docs_CreateDocRequest"; export { $Docs_Doc } from "./schemas/$Docs_Doc"; export { $Docs_DocOwner } from "./schemas/$Docs_DocOwner"; @@ -250,9 +243,10 @@ export { $Tasks_UpdateTaskRequest } from "./schemas/$Tasks_UpdateTaskRequest"; export { $Tasks_WaitForInputInfo } from "./schemas/$Tasks_WaitForInputInfo"; export { $Tasks_WaitForInputStep } from "./schemas/$Tasks_WaitForInputStep"; export { $Tasks_YieldStep } from "./schemas/$Tasks_YieldStep"; +export { $Tools_ChosenApiCall } from "./schemas/$Tools_ChosenApiCall"; export { $Tools_ChosenFunctionCall } from "./schemas/$Tools_ChosenFunctionCall"; -export { $Tools_ChosenToolCall } from "./schemas/$Tools_ChosenToolCall"; -export { $Tools_CreateToolRequest } from "./schemas/$Tools_CreateToolRequest"; +export { $Tools_ChosenIntegrationCall } from "./schemas/$Tools_ChosenIntegrationCall"; +export { $Tools_ChosenSystemCall } from "./schemas/$Tools_ChosenSystemCall"; export { $Tools_FunctionCallOption } from "./schemas/$Tools_FunctionCallOption"; export { $Tools_FunctionDef } from "./schemas/$Tools_FunctionDef"; export { $Tools_NamedApiCallChoice } from "./schemas/$Tools_NamedApiCallChoice"; diff --git a/sdks/ts/src/api/models/Chat_BaseChatResponse.ts b/sdks/ts/src/api/models/Chat_BaseChatResponse.ts deleted file mode 100644 index 412fd72c9..000000000 --- a/sdks/ts/src/api/models/Chat_BaseChatResponse.ts +++ /dev/null @@ -1,26 +0,0 @@ -/* generated using openapi-typescript-codegen -- do no edit */ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ -import type { Chat_CompetionUsage } from "./Chat_CompetionUsage"; -import type { Common_uuid } from "./Common_uuid"; -import type { Docs_DocReference } from "./Docs_DocReference"; -export type Chat_BaseChatResponse = { - /** - * Usage statistics for the completion request - */ - usage?: Chat_CompetionUsage; - /** - * Background job IDs that may have been spawned from this interaction. - */ - readonly jobs: Array; - /** - * Documents referenced for this request (for citation purposes). - */ - readonly docs: Array; - /** - * When this resource was created as UTC date-time - */ - readonly created_at: string; - readonly id: Common_uuid; -}; diff --git a/sdks/ts/src/api/models/Chat_ChatInput.ts b/sdks/ts/src/api/models/Chat_ChatInput.ts index c8f568be5..3a1fa6a36 100644 --- a/sdks/ts/src/api/models/Chat_ChatInput.ts +++ b/sdks/ts/src/api/models/Chat_ChatInput.ts @@ -2,13 +2,53 @@ /* istanbul ignore file */ /* tslint:disable */ /* eslint-disable */ -import type { Chat_ChatInputData } from "./Chat_ChatInputData"; import type { Chat_SchemaCompletionResponseFormat } from "./Chat_SchemaCompletionResponseFormat"; import type { Chat_SimpleCompletionResponseFormat } from "./Chat_SimpleCompletionResponseFormat"; import type { Common_identifierSafeUnicode } from "./Common_identifierSafeUnicode"; import type { Common_logit_bias } from "./Common_logit_bias"; import type { Common_uuid } from "./Common_uuid"; -export type Chat_ChatInput = Chat_ChatInputData & { +import type { Entries_ChatMLRole } from "./Entries_ChatMLRole"; +import type { Tools_NamedApiCallChoice } from "./Tools_NamedApiCallChoice"; +import type { Tools_NamedFunctionChoice } from "./Tools_NamedFunctionChoice"; +import type { Tools_NamedIntegrationChoice } from "./Tools_NamedIntegrationChoice"; +import type { Tools_NamedSystemChoice } from "./Tools_NamedSystemChoice"; +import type { Tools_Tool } from "./Tools_Tool"; +export type Chat_ChatInput = { + /** + * A list of new input messages comprising the conversation so far. + */ + messages: Array<{ + /** + * The role of the message + */ + role: Entries_ChatMLRole; + /** + * The content parts of the message + */ + content: string | Array; + /** + * Name + */ + name?: string; + /** + * Whether to continue this message or return a new one + */ + continue?: boolean; + }>; + /** + * (Advanced) List of tools that are provided in addition to agent's default set of tools. + */ + tools: Array; + /** + * Can be one of existing tools given to the agent earlier or the ones provided in this request. + */ + tool_choice?: + | "auto" + | "none" + | Tools_NamedFunctionChoice + | Tools_NamedIntegrationChoice + | Tools_NamedSystemChoice + | Tools_NamedApiCallChoice; /** * DISABLED: Whether this interaction should form new memories or not (will be enabled in a future release) */ @@ -21,6 +61,34 @@ export type Chat_ChatInput = Chat_ChatInputData & { * Whether this interaction should be stored in the session history or not */ save: boolean; + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + */ + frequency_penalty?: number; + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + */ + presence_penalty?: number; + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + */ + temperature?: number; + /** + * Defaults to 1 An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. + */ + top_p?: number; + /** + * Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + */ + repetition_penalty?: number; + /** + * Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize number of tokens generated. + */ + length_penalty?: number; + /** + * Minimum probability compared to leading token to be considered + */ + min_p?: number; /** * Identifier of the model to be used */ @@ -55,32 +123,4 @@ export type Chat_ChatInput = Chat_ChatInputData & { * Agent ID of the agent to use for this interaction. (Only applicable for multi-agent sessions) */ agent?: Common_uuid; - /** - * Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - */ - repetition_penalty?: number; - /** - * Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize number of tokens generated. - */ - length_penalty?: number; - /** - * Minimum probability compared to leading token to be considered - */ - min_p?: number; - /** - * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - */ - frequency_penalty?: number; - /** - * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - */ - presence_penalty?: number; - /** - * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - */ - temperature?: number; - /** - * Defaults to 1 An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. - */ - top_p?: number; }; diff --git a/sdks/ts/src/api/models/Chat_ChatInputData.ts b/sdks/ts/src/api/models/Chat_ChatInputData.ts deleted file mode 100644 index b1569fc65..000000000 --- a/sdks/ts/src/api/models/Chat_ChatInputData.ts +++ /dev/null @@ -1,47 +0,0 @@ -/* generated using openapi-typescript-codegen -- do no edit */ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ -import type { Entries_ChatMLRole } from "./Entries_ChatMLRole"; -import type { Tools_NamedApiCallChoice } from "./Tools_NamedApiCallChoice"; -import type { Tools_NamedFunctionChoice } from "./Tools_NamedFunctionChoice"; -import type { Tools_NamedIntegrationChoice } from "./Tools_NamedIntegrationChoice"; -import type { Tools_NamedSystemChoice } from "./Tools_NamedSystemChoice"; -import type { Tools_Tool } from "./Tools_Tool"; -export type Chat_ChatInputData = { - /** - * A list of new input messages comprising the conversation so far. - */ - messages: Array<{ - /** - * The role of the message - */ - role: Entries_ChatMLRole; - /** - * The content parts of the message - */ - content: string | Array; - /** - * Name - */ - name?: string; - /** - * Whether to continue this message or return a new one - */ - continue?: boolean; - }>; - /** - * (Advanced) List of tools that are provided in addition to agent's default set of tools. - */ - tools: Array; - /** - * Can be one of existing tools given to the agent earlier or the ones provided in this request. - */ - tool_choice?: - | "auto" - | "none" - | Tools_NamedFunctionChoice - | Tools_NamedIntegrationChoice - | Tools_NamedSystemChoice - | Tools_NamedApiCallChoice; -}; diff --git a/sdks/ts/src/api/models/Chat_ChatOutputChunk.ts b/sdks/ts/src/api/models/Chat_ChatOutputChunk.ts index af379458a..996b18d9a 100644 --- a/sdks/ts/src/api/models/Chat_ChatOutputChunk.ts +++ b/sdks/ts/src/api/models/Chat_ChatOutputChunk.ts @@ -2,12 +2,22 @@ /* istanbul ignore file */ /* tslint:disable */ /* eslint-disable */ -import type { Chat_BaseChatOutput } from "./Chat_BaseChatOutput"; +import type { Chat_FinishReason } from "./Chat_FinishReason"; +import type { Chat_LogProbResponse } from "./Chat_LogProbResponse"; import type { Entries_ChatMLRole } from "./Entries_ChatMLRole"; /** * Streaming chat completion output */ -export type Chat_ChatOutputChunk = Chat_BaseChatOutput & { +export type Chat_ChatOutputChunk = { + index: number; + /** + * The reason the model stopped generating tokens + */ + finish_reason: Chat_FinishReason; + /** + * The log probabilities of tokens + */ + logprobs?: Chat_LogProbResponse; /** * The message generated by the model */ diff --git a/sdks/ts/src/api/models/Chat_ChatSettings.ts b/sdks/ts/src/api/models/Chat_ChatSettings.ts index 5f55c45e3..ec47f3621 100644 --- a/sdks/ts/src/api/models/Chat_ChatSettings.ts +++ b/sdks/ts/src/api/models/Chat_ChatSettings.ts @@ -2,13 +2,40 @@ /* istanbul ignore file */ /* tslint:disable */ /* eslint-disable */ -import type { Chat_DefaultChatSettings } from "./Chat_DefaultChatSettings"; import type { Chat_SchemaCompletionResponseFormat } from "./Chat_SchemaCompletionResponseFormat"; import type { Chat_SimpleCompletionResponseFormat } from "./Chat_SimpleCompletionResponseFormat"; import type { Common_identifierSafeUnicode } from "./Common_identifierSafeUnicode"; import type { Common_logit_bias } from "./Common_logit_bias"; import type { Common_uuid } from "./Common_uuid"; -export type Chat_ChatSettings = Chat_DefaultChatSettings & { +export type Chat_ChatSettings = { + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + */ + frequency_penalty?: number; + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + */ + presence_penalty?: number; + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + */ + temperature?: number; + /** + * Defaults to 1 An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. + */ + top_p?: number; + /** + * Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + */ + repetition_penalty?: number; + /** + * Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize number of tokens generated. + */ + length_penalty?: number; + /** + * Minimum probability compared to leading token to be considered + */ + min_p?: number; /** * Identifier of the model to be used */ diff --git a/sdks/ts/src/api/models/Chat_ChunkChatResponse.ts b/sdks/ts/src/api/models/Chat_ChunkChatResponse.ts index 86e614b6f..7ba639ccd 100644 --- a/sdks/ts/src/api/models/Chat_ChunkChatResponse.ts +++ b/sdks/ts/src/api/models/Chat_ChunkChatResponse.ts @@ -2,11 +2,30 @@ /* istanbul ignore file */ /* tslint:disable */ /* eslint-disable */ -import type { Chat_BaseChatResponse } from "./Chat_BaseChatResponse"; import type { Chat_ChatOutputChunk } from "./Chat_ChatOutputChunk"; -export type Chat_ChunkChatResponse = Chat_BaseChatResponse & { +import type { Chat_CompetionUsage } from "./Chat_CompetionUsage"; +import type { Common_uuid } from "./Common_uuid"; +import type { Docs_DocReference } from "./Docs_DocReference"; +export type Chat_ChunkChatResponse = { /** * The deltas generated by the model */ choices: Array; + /** + * Usage statistics for the completion request + */ + usage?: Chat_CompetionUsage; + /** + * Background job IDs that may have been spawned from this interaction. + */ + readonly jobs: Array; + /** + * Documents referenced for this request (for citation purposes). + */ + readonly docs: Array; + /** + * When this resource was created as UTC date-time + */ + readonly created_at: string; + readonly id: Common_uuid; }; diff --git a/sdks/ts/src/api/models/Chat_DefaultChatSettings.ts b/sdks/ts/src/api/models/Chat_DefaultChatSettings.ts index 5bfeb6bc5..7bd8aef6d 100644 --- a/sdks/ts/src/api/models/Chat_DefaultChatSettings.ts +++ b/sdks/ts/src/api/models/Chat_DefaultChatSettings.ts @@ -2,11 +2,26 @@ /* istanbul ignore file */ /* tslint:disable */ /* eslint-disable */ -import type { Chat_OpenAISettings } from "./Chat_OpenAISettings"; /** * Default settings for the chat session (also used by the agent) */ -export type Chat_DefaultChatSettings = Chat_OpenAISettings & { +export type Chat_DefaultChatSettings = { + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + */ + frequency_penalty?: number; + /** + * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + */ + presence_penalty?: number; + /** + * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + */ + temperature?: number; + /** + * Defaults to 1 An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. + */ + top_p?: number; /** * Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. */ diff --git a/sdks/ts/src/api/models/Chat_MessageChatResponse.ts b/sdks/ts/src/api/models/Chat_MessageChatResponse.ts index 4ed0e111f..13b79f232 100644 --- a/sdks/ts/src/api/models/Chat_MessageChatResponse.ts +++ b/sdks/ts/src/api/models/Chat_MessageChatResponse.ts @@ -2,12 +2,31 @@ /* istanbul ignore file */ /* tslint:disable */ /* eslint-disable */ -import type { Chat_BaseChatResponse } from "./Chat_BaseChatResponse"; +import type { Chat_CompetionUsage } from "./Chat_CompetionUsage"; import type { Chat_MultipleChatOutput } from "./Chat_MultipleChatOutput"; import type { Chat_SingleChatOutput } from "./Chat_SingleChatOutput"; -export type Chat_MessageChatResponse = Chat_BaseChatResponse & { +import type { Common_uuid } from "./Common_uuid"; +import type { Docs_DocReference } from "./Docs_DocReference"; +export type Chat_MessageChatResponse = { /** * The deltas generated by the model */ choices: Array; + /** + * Usage statistics for the completion request + */ + usage?: Chat_CompetionUsage; + /** + * Background job IDs that may have been spawned from this interaction. + */ + readonly jobs: Array; + /** + * Documents referenced for this request (for citation purposes). + */ + readonly docs: Array; + /** + * When this resource was created as UTC date-time + */ + readonly created_at: string; + readonly id: Common_uuid; }; diff --git a/sdks/ts/src/api/models/Chat_MultipleChatOutput.ts b/sdks/ts/src/api/models/Chat_MultipleChatOutput.ts index 72d9a7333..fc20566f2 100644 --- a/sdks/ts/src/api/models/Chat_MultipleChatOutput.ts +++ b/sdks/ts/src/api/models/Chat_MultipleChatOutput.ts @@ -5,7 +5,10 @@ import type { Chat_BaseChatOutput } from "./Chat_BaseChatOutput"; import type { Common_uuid } from "./Common_uuid"; import type { Entries_ChatMLRole } from "./Entries_ChatMLRole"; -import type { Tools_ChosenToolCall } from "./Tools_ChosenToolCall"; +import type { Tools_ChosenApiCall } from "./Tools_ChosenApiCall"; +import type { Tools_ChosenFunctionCall } from "./Tools_ChosenFunctionCall"; +import type { Tools_ChosenIntegrationCall } from "./Tools_ChosenIntegrationCall"; +import type { Tools_ChosenSystemCall } from "./Tools_ChosenSystemCall"; /** * The output returned by the model. Note that, depending on the model provider, they might return more than one message. */ @@ -26,7 +29,12 @@ export type Chat_MultipleChatOutput = Chat_BaseChatOutput & { /** * Tool calls generated by the model. */ - readonly tool_calls?: Array | null; + readonly tool_calls?: Array< + | Tools_ChosenFunctionCall + | Tools_ChosenIntegrationCall + | Tools_ChosenSystemCall + | Tools_ChosenApiCall + > | null; /** * When this resource was created as UTC date-time */ diff --git a/sdks/ts/src/api/models/Chat_OpenAISettings.ts b/sdks/ts/src/api/models/Chat_OpenAISettings.ts deleted file mode 100644 index 2ab230a6f..000000000 --- a/sdks/ts/src/api/models/Chat_OpenAISettings.ts +++ /dev/null @@ -1,22 +0,0 @@ -/* generated using openapi-typescript-codegen -- do no edit */ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ -export type Chat_OpenAISettings = { - /** - * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - */ - frequency_penalty?: number; - /** - * Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. - */ - presence_penalty?: number; - /** - * What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. - */ - temperature?: number; - /** - * Defaults to 1 An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. - */ - top_p?: number; -}; diff --git a/sdks/ts/src/api/models/Chat_SingleChatOutput.ts b/sdks/ts/src/api/models/Chat_SingleChatOutput.ts index 87f7472cd..8f18320f9 100644 --- a/sdks/ts/src/api/models/Chat_SingleChatOutput.ts +++ b/sdks/ts/src/api/models/Chat_SingleChatOutput.ts @@ -5,7 +5,10 @@ import type { Chat_BaseChatOutput } from "./Chat_BaseChatOutput"; import type { Common_uuid } from "./Common_uuid"; import type { Entries_ChatMLRole } from "./Entries_ChatMLRole"; -import type { Tools_ChosenToolCall } from "./Tools_ChosenToolCall"; +import type { Tools_ChosenApiCall } from "./Tools_ChosenApiCall"; +import type { Tools_ChosenFunctionCall } from "./Tools_ChosenFunctionCall"; +import type { Tools_ChosenIntegrationCall } from "./Tools_ChosenIntegrationCall"; +import type { Tools_ChosenSystemCall } from "./Tools_ChosenSystemCall"; /** * The output returned by the model. Note that, depending on the model provider, they might return more than one message. */ @@ -26,7 +29,12 @@ export type Chat_SingleChatOutput = Chat_BaseChatOutput & { /** * Tool calls generated by the model. */ - readonly tool_calls?: Array | null; + readonly tool_calls?: Array< + | Tools_ChosenFunctionCall + | Tools_ChosenIntegrationCall + | Tools_ChosenSystemCall + | Tools_ChosenApiCall + > | null; /** * When this resource was created as UTC date-time */ diff --git a/sdks/ts/src/api/models/Chat_TokenLogProb.ts b/sdks/ts/src/api/models/Chat_TokenLogProb.ts index eca12f731..86098572a 100644 --- a/sdks/ts/src/api/models/Chat_TokenLogProb.ts +++ b/sdks/ts/src/api/models/Chat_TokenLogProb.ts @@ -3,7 +3,13 @@ /* tslint:disable */ /* eslint-disable */ import type { Chat_BaseTokenLogProb } from "./Chat_BaseTokenLogProb"; -export type Chat_TokenLogProb = Chat_BaseTokenLogProb & { +export type Chat_TokenLogProb = { + token: string; + /** + * The log probability of the token + */ + logprob: number; + bytes?: Array; /** * The log probabilities of the tokens */ diff --git a/sdks/ts/src/api/models/Docs_BaseDocSearchRequest.ts b/sdks/ts/src/api/models/Docs_BaseDocSearchRequest.ts deleted file mode 100644 index b6dd20f99..000000000 --- a/sdks/ts/src/api/models/Docs_BaseDocSearchRequest.ts +++ /dev/null @@ -1,11 +0,0 @@ -/* generated using openapi-typescript-codegen -- do no edit */ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ -export type Docs_BaseDocSearchRequest = { - limit: number; - /** - * The language to be used for text-only search. Support for other languages coming soon. - */ - lang: "en-US"; -}; diff --git a/sdks/ts/src/api/models/Docs_HybridDocSearchRequest.ts b/sdks/ts/src/api/models/Docs_HybridDocSearchRequest.ts index a1ba32811..644da1efd 100644 --- a/sdks/ts/src/api/models/Docs_HybridDocSearchRequest.ts +++ b/sdks/ts/src/api/models/Docs_HybridDocSearchRequest.ts @@ -2,8 +2,12 @@ /* istanbul ignore file */ /* tslint:disable */ /* eslint-disable */ -import type { Docs_BaseDocSearchRequest } from "./Docs_BaseDocSearchRequest"; -export type Docs_HybridDocSearchRequest = Docs_BaseDocSearchRequest & { +export type Docs_HybridDocSearchRequest = { + limit: number; + /** + * The language to be used for text-only search. Support for other languages coming soon. + */ + lang: "en-US"; /** * The confidence cutoff level */ diff --git a/sdks/ts/src/api/models/Docs_TextOnlyDocSearchRequest.ts b/sdks/ts/src/api/models/Docs_TextOnlyDocSearchRequest.ts index 2d05e9f9b..2a6678c7d 100644 --- a/sdks/ts/src/api/models/Docs_TextOnlyDocSearchRequest.ts +++ b/sdks/ts/src/api/models/Docs_TextOnlyDocSearchRequest.ts @@ -2,8 +2,12 @@ /* istanbul ignore file */ /* tslint:disable */ /* eslint-disable */ -import type { Docs_BaseDocSearchRequest } from "./Docs_BaseDocSearchRequest"; -export type Docs_TextOnlyDocSearchRequest = Docs_BaseDocSearchRequest & { +export type Docs_TextOnlyDocSearchRequest = { + limit: number; + /** + * The language to be used for text-only search. Support for other languages coming soon. + */ + lang: "en-US"; /** * Text to use in the search. */ diff --git a/sdks/ts/src/api/models/Docs_VectorDocSearchRequest.ts b/sdks/ts/src/api/models/Docs_VectorDocSearchRequest.ts index 7a720c46a..5273cb8ea 100644 --- a/sdks/ts/src/api/models/Docs_VectorDocSearchRequest.ts +++ b/sdks/ts/src/api/models/Docs_VectorDocSearchRequest.ts @@ -2,8 +2,12 @@ /* istanbul ignore file */ /* tslint:disable */ /* eslint-disable */ -import type { Docs_BaseDocSearchRequest } from "./Docs_BaseDocSearchRequest"; -export type Docs_VectorDocSearchRequest = Docs_BaseDocSearchRequest & { +export type Docs_VectorDocSearchRequest = { + limit: number; + /** + * The language to be used for text-only search. Support for other languages coming soon. + */ + lang: "en-US"; /** * The confidence cutoff level */ diff --git a/sdks/ts/src/api/models/Entries_BaseEntry.ts b/sdks/ts/src/api/models/Entries_BaseEntry.ts index d397d851e..f9813d2f4 100644 --- a/sdks/ts/src/api/models/Entries_BaseEntry.ts +++ b/sdks/ts/src/api/models/Entries_BaseEntry.ts @@ -3,13 +3,23 @@ /* tslint:disable */ /* eslint-disable */ import type { Entries_ChatMLRole } from "./Entries_ChatMLRole"; -import type { Tools_ChosenToolCall } from "./Tools_ChosenToolCall"; +import type { Tools_ChosenApiCall } from "./Tools_ChosenApiCall"; +import type { Tools_ChosenFunctionCall } from "./Tools_ChosenFunctionCall"; +import type { Tools_ChosenIntegrationCall } from "./Tools_ChosenIntegrationCall"; +import type { Tools_ChosenSystemCall } from "./Tools_ChosenSystemCall"; import type { Tools_Tool } from "./Tools_Tool"; import type { Tools_ToolResponse } from "./Tools_ToolResponse"; export type Entries_BaseEntry = { role: Entries_ChatMLRole; name: string | null; - content: Tools_Tool | Tools_ChosenToolCall | string | Tools_ToolResponse; + content: + | Tools_Tool + | Tools_ChosenFunctionCall + | Tools_ChosenIntegrationCall + | Tools_ChosenSystemCall + | Tools_ChosenApiCall + | string + | Tools_ToolResponse; source: | "api_request" | "api_response" diff --git a/sdks/ts/src/api/models/Executions_Transition.ts b/sdks/ts/src/api/models/Executions_Transition.ts index f6d4e9372..99b73f7d1 100644 --- a/sdks/ts/src/api/models/Executions_Transition.ts +++ b/sdks/ts/src/api/models/Executions_Transition.ts @@ -3,9 +3,27 @@ /* tslint:disable */ /* eslint-disable */ import type { Common_uuid } from "./Common_uuid"; -import type { Executions_TransitionEvent } from "./Executions_TransitionEvent"; import type { Executions_TransitionTarget } from "./Executions_TransitionTarget"; -export type Executions_Transition = Executions_TransitionEvent & { +export type Executions_Transition = { + readonly type: + | "init" + | "init_branch" + | "finish" + | "finish_branch" + | "wait" + | "resume" + | "error" + | "step" + | "cancelled"; + readonly output: any; + /** + * When this resource was created as UTC date-time + */ + readonly created_at: string; + /** + * When this resource was updated as UTC date-time + */ + readonly updated_at: string; readonly execution_id: Common_uuid; readonly current: Executions_TransitionTarget; readonly next: Executions_TransitionTarget | null; diff --git a/sdks/ts/src/api/models/Tasks_TaskTool.ts b/sdks/ts/src/api/models/Tasks_TaskTool.ts index 8f53afa9e..79b79a94e 100644 --- a/sdks/ts/src/api/models/Tasks_TaskTool.ts +++ b/sdks/ts/src/api/models/Tasks_TaskTool.ts @@ -2,10 +2,27 @@ /* istanbul ignore file */ /* tslint:disable */ /* eslint-disable */ -import type { Tools_CreateToolRequest } from "./Tools_CreateToolRequest"; -export type Tasks_TaskTool = Tools_CreateToolRequest & { +import type { Common_validPythonIdentifier } from "./Common_validPythonIdentifier"; +import type { Tools_FunctionDef } from "./Tools_FunctionDef"; +import type { Tools_ToolType } from "./Tools_ToolType"; +export type Tasks_TaskTool = { /** * Read-only: Whether the tool was inherited or not. Only applies within tasks. */ readonly inherited?: boolean; + /** + * Whether this tool is a `function`, `api_call`, `system` etc. (Only `function` tool supported right now)The type of the tool + */ + type: Tools_ToolType; + /** + * Name of the tool (must be unique for this agent and a valid python identifier string ) + */ + name: Common_validPythonIdentifier; + /** + * The function to call + */ + function: Tools_FunctionDef; + integration?: any; + system?: any; + api_call?: any; }; diff --git a/sdks/ts/src/api/models/Tools_ChosenApiCall.ts b/sdks/ts/src/api/models/Tools_ChosenApiCall.ts new file mode 100644 index 000000000..2a129782c --- /dev/null +++ b/sdks/ts/src/api/models/Tools_ChosenApiCall.ts @@ -0,0 +1,10 @@ +/* generated using openapi-typescript-codegen -- do no edit */ +/* istanbul ignore file */ +/* tslint:disable */ +/* eslint-disable */ +import type { Common_uuid } from "./Common_uuid"; +export type Tools_ChosenApiCall = { + type: "api_call"; + api_call: any; + readonly id: Common_uuid; +}; diff --git a/sdks/ts/src/api/models/Tools_ChosenFunctionCall.ts b/sdks/ts/src/api/models/Tools_ChosenFunctionCall.ts index dcd3267ad..16d43f542 100644 --- a/sdks/ts/src/api/models/Tools_ChosenFunctionCall.ts +++ b/sdks/ts/src/api/models/Tools_ChosenFunctionCall.ts @@ -2,13 +2,13 @@ /* istanbul ignore file */ /* tslint:disable */ /* eslint-disable */ -import type { Tools_ChosenToolCall } from "./Tools_ChosenToolCall"; +import type { Common_uuid } from "./Common_uuid"; import type { Tools_FunctionCallOption } from "./Tools_FunctionCallOption"; -export type Tools_ChosenFunctionCall = Tools_ChosenToolCall & { - function: Tools_FunctionCallOption; +export type Tools_ChosenFunctionCall = { type: "function"; /** * The function to call */ function: Tools_FunctionCallOption; + readonly id: Common_uuid; }; diff --git a/sdks/ts/src/api/models/Tools_ChosenIntegrationCall.ts b/sdks/ts/src/api/models/Tools_ChosenIntegrationCall.ts new file mode 100644 index 000000000..408bfb04b --- /dev/null +++ b/sdks/ts/src/api/models/Tools_ChosenIntegrationCall.ts @@ -0,0 +1,10 @@ +/* generated using openapi-typescript-codegen -- do no edit */ +/* istanbul ignore file */ +/* tslint:disable */ +/* eslint-disable */ +import type { Common_uuid } from "./Common_uuid"; +export type Tools_ChosenIntegrationCall = { + type: "integration"; + integration: any; + readonly id: Common_uuid; +}; diff --git a/sdks/ts/src/api/models/Tools_ChosenSystemCall.ts b/sdks/ts/src/api/models/Tools_ChosenSystemCall.ts new file mode 100644 index 000000000..5beb61298 --- /dev/null +++ b/sdks/ts/src/api/models/Tools_ChosenSystemCall.ts @@ -0,0 +1,10 @@ +/* generated using openapi-typescript-codegen -- do no edit */ +/* istanbul ignore file */ +/* tslint:disable */ +/* eslint-disable */ +import type { Common_uuid } from "./Common_uuid"; +export type Tools_ChosenSystemCall = { + type: "system"; + system: any; + readonly id: Common_uuid; +}; diff --git a/sdks/ts/src/api/models/Tools_ChosenToolCall.ts b/sdks/ts/src/api/models/Tools_ChosenToolCall.ts deleted file mode 100644 index d44bad5c5..000000000 --- a/sdks/ts/src/api/models/Tools_ChosenToolCall.ts +++ /dev/null @@ -1,21 +0,0 @@ -/* generated using openapi-typescript-codegen -- do no edit */ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ -import type { Common_uuid } from "./Common_uuid"; -import type { Tools_FunctionCallOption } from "./Tools_FunctionCallOption"; -import type { Tools_ToolType } from "./Tools_ToolType"; -/** - * The response tool value generated by the model - */ -export type Tools_ChosenToolCall = { - /** - * Whether this tool is a `function`, `api_call`, `system` etc. (Only `function` tool supported right now) - */ - type: Tools_ToolType; - function?: Tools_FunctionCallOption; - integration?: any; - system?: any; - api_call?: any; - readonly id: Common_uuid; -}; diff --git a/sdks/ts/src/api/models/Tools_CreateToolRequest.ts b/sdks/ts/src/api/models/Tools_CreateToolRequest.ts deleted file mode 100644 index f234a6ab6..000000000 --- a/sdks/ts/src/api/models/Tools_CreateToolRequest.ts +++ /dev/null @@ -1,27 +0,0 @@ -/* generated using openapi-typescript-codegen -- do no edit */ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ -import type { Common_validPythonIdentifier } from "./Common_validPythonIdentifier"; -import type { Tools_FunctionDef } from "./Tools_FunctionDef"; -import type { Tools_ToolType } from "./Tools_ToolType"; -/** - * Payload for creating a tool - */ -export type Tools_CreateToolRequest = { - /** - * Whether this tool is a `function`, `api_call`, `system` etc. (Only `function` tool supported right now)The type of the tool - */ - type: Tools_ToolType; - /** - * Name of the tool (must be unique for this agent and a valid python identifier string ) - */ - name: Common_validPythonIdentifier; - /** - * The function to call - */ - function: Tools_FunctionDef; - integration?: any; - system?: any; - api_call?: any; -}; diff --git a/sdks/ts/src/api/schemas/$Chat_BaseChatResponse.ts b/sdks/ts/src/api/schemas/$Chat_BaseChatResponse.ts deleted file mode 100644 index 764a0127b..000000000 --- a/sdks/ts/src/api/schemas/$Chat_BaseChatResponse.ts +++ /dev/null @@ -1,50 +0,0 @@ -/* generated using openapi-typescript-codegen -- do no edit */ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ -export const $Chat_BaseChatResponse = { - properties: { - usage: { - type: "all-of", - description: `Usage statistics for the completion request`, - contains: [ - { - type: "Chat_CompetionUsage", - }, - ], - }, - jobs: { - type: "array", - contains: { - type: "Common_uuid", - }, - isReadOnly: true, - isRequired: true, - }, - docs: { - type: "array", - contains: { - type: "Docs_DocReference", - }, - isReadOnly: true, - isRequired: true, - }, - created_at: { - type: "string", - description: `When this resource was created as UTC date-time`, - isReadOnly: true, - isRequired: true, - format: "date-time", - }, - id: { - type: "all-of", - contains: [ - { - type: "Common_uuid", - }, - ], - isReadOnly: true, - isRequired: true, - }, - }, -} as const; diff --git a/sdks/ts/src/api/schemas/$Chat_ChatInput.ts b/sdks/ts/src/api/schemas/$Chat_ChatInput.ts index e42c5cdfa..571969e92 100644 --- a/sdks/ts/src/api/schemas/$Chat_ChatInput.ts +++ b/sdks/ts/src/api/schemas/$Chat_ChatInput.ts @@ -3,135 +3,197 @@ /* tslint:disable */ /* eslint-disable */ export const $Chat_ChatInput = { - type: "all-of", - contains: [ - { - type: "Chat_ChatInputData", - }, - { - properties: { - remember: { - type: "boolean", - description: `DISABLED: Whether this interaction should form new memories or not (will be enabled in a future release)`, - isReadOnly: true, - isRequired: true, - }, - recall: { - type: "boolean", - description: `Whether previous memories and docs should be recalled or not`, - isRequired: true, - }, - save: { - type: "boolean", - description: `Whether this interaction should be stored in the session history or not`, - isRequired: true, - }, - model: { - type: "all-of", - description: `Identifier of the model to be used`, - contains: [ - { - type: "Common_identifierSafeUnicode", - }, - ], - }, - stream: { - type: "boolean", - description: `Indicates if the server should stream the response as it's generated`, - isRequired: true, - }, - stop: { - type: "array", - contains: { + properties: { + messages: { + type: "array", + contains: { + properties: { + role: { + type: "all-of", + description: `The role of the message`, + contains: [ + { + type: "Entries_ChatMLRole", + }, + ], + isRequired: true, + }, + content: { + type: "any-of", + description: `The content parts of the message`, + contains: [ + { + type: "string", + }, + { + type: "array", + contains: { + type: "string", + }, + }, + ], + isRequired: true, + }, + name: { type: "string", + description: `Name`, }, - isRequired: true, - }, - seed: { - type: "number", - description: `If specified, the system will make a best effort to sample deterministically for that particular seed value`, - format: "int16", - maximum: 1000, - minimum: -1, - }, - max_tokens: { - type: "number", - description: `The maximum number of tokens to generate in the chat completion`, - format: "uint32", - minimum: 1, - }, - logit_bias: { - type: "dictionary", - contains: { - type: "Common_logit_bias", + continue: { + type: "boolean", + description: `Whether to continue this message or return a new one`, }, }, - response_format: { - type: "any-of", - description: `Response format (set to \`json_object\` to restrict output to JSON)`, - contains: [ - { - type: "Chat_SimpleCompletionResponseFormat", - }, - { - type: "Chat_SchemaCompletionResponseFormat", - }, - ], - }, - agent: { - type: "all-of", - description: `Agent ID of the agent to use for this interaction. (Only applicable for multi-agent sessions)`, - contains: [ - { - type: "Common_uuid", - }, - ], - }, - repetition_penalty: { - type: "number", - description: `Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`, - format: "float", - maximum: 2, + }, + isRequired: true, + }, + tools: { + type: "array", + contains: { + type: "Tools_Tool", + }, + isRequired: true, + }, + tool_choice: { + type: "any-of", + description: `Can be one of existing tools given to the agent earlier or the ones provided in this request.`, + contains: [ + { + type: "Enum", }, - length_penalty: { - type: "number", - description: `Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize number of tokens generated.`, - format: "float", - maximum: 2, + { + type: "Tools_NamedFunctionChoice", }, - min_p: { - type: "number", - description: `Minimum probability compared to leading token to be considered`, - format: "float", - maximum: 1, + { + type: "Tools_NamedIntegrationChoice", }, - frequency_penalty: { - type: "number", - description: `Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`, - format: "float", - maximum: 2, - minimum: -2, + { + type: "Tools_NamedSystemChoice", }, - presence_penalty: { - type: "number", - description: `Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`, - format: "float", - maximum: 2, - minimum: -2, + { + type: "Tools_NamedApiCallChoice", }, - temperature: { - type: "number", - description: `What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.`, - format: "float", - maximum: 5, + ], + }, + remember: { + type: "boolean", + description: `DISABLED: Whether this interaction should form new memories or not (will be enabled in a future release)`, + isReadOnly: true, + isRequired: true, + }, + recall: { + type: "boolean", + description: `Whether previous memories and docs should be recalled or not`, + isRequired: true, + }, + save: { + type: "boolean", + description: `Whether this interaction should be stored in the session history or not`, + isRequired: true, + }, + frequency_penalty: { + type: "number", + description: `Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`, + format: "float", + maximum: 2, + minimum: -2, + }, + presence_penalty: { + type: "number", + description: `Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`, + format: "float", + maximum: 2, + minimum: -2, + }, + temperature: { + type: "number", + description: `What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.`, + format: "float", + maximum: 5, + }, + top_p: { + type: "number", + description: `Defaults to 1 An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both.`, + format: "float", + maximum: 1, + }, + repetition_penalty: { + type: "number", + description: `Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`, + format: "float", + maximum: 2, + }, + length_penalty: { + type: "number", + description: `Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize number of tokens generated.`, + format: "float", + maximum: 2, + }, + min_p: { + type: "number", + description: `Minimum probability compared to leading token to be considered`, + format: "float", + maximum: 1, + }, + model: { + type: "all-of", + description: `Identifier of the model to be used`, + contains: [ + { + type: "Common_identifierSafeUnicode", + }, + ], + }, + stream: { + type: "boolean", + description: `Indicates if the server should stream the response as it's generated`, + isRequired: true, + }, + stop: { + type: "array", + contains: { + type: "string", + }, + isRequired: true, + }, + seed: { + type: "number", + description: `If specified, the system will make a best effort to sample deterministically for that particular seed value`, + format: "int16", + maximum: 1000, + minimum: -1, + }, + max_tokens: { + type: "number", + description: `The maximum number of tokens to generate in the chat completion`, + format: "uint32", + minimum: 1, + }, + logit_bias: { + type: "dictionary", + contains: { + type: "Common_logit_bias", + }, + }, + response_format: { + type: "any-of", + description: `Response format (set to \`json_object\` to restrict output to JSON)`, + contains: [ + { + type: "Chat_SimpleCompletionResponseFormat", }, - top_p: { - type: "number", - description: `Defaults to 1 An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both.`, - format: "float", - maximum: 1, + { + type: "Chat_SchemaCompletionResponseFormat", }, - }, + ], + }, + agent: { + type: "all-of", + description: `Agent ID of the agent to use for this interaction. (Only applicable for multi-agent sessions)`, + contains: [ + { + type: "Common_uuid", + }, + ], }, - ], + }, } as const; diff --git a/sdks/ts/src/api/schemas/$Chat_ChatInputData.ts b/sdks/ts/src/api/schemas/$Chat_ChatInputData.ts deleted file mode 100644 index de1776578..000000000 --- a/sdks/ts/src/api/schemas/$Chat_ChatInputData.ts +++ /dev/null @@ -1,78 +0,0 @@ -/* generated using openapi-typescript-codegen -- do no edit */ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ -export const $Chat_ChatInputData = { - properties: { - messages: { - type: "array", - contains: { - properties: { - role: { - type: "all-of", - description: `The role of the message`, - contains: [ - { - type: "Entries_ChatMLRole", - }, - ], - isRequired: true, - }, - content: { - type: "any-of", - description: `The content parts of the message`, - contains: [ - { - type: "string", - }, - { - type: "array", - contains: { - type: "string", - }, - }, - ], - isRequired: true, - }, - name: { - type: "string", - description: `Name`, - }, - continue: { - type: "boolean", - description: `Whether to continue this message or return a new one`, - }, - }, - }, - isRequired: true, - }, - tools: { - type: "array", - contains: { - type: "Tools_Tool", - }, - isRequired: true, - }, - tool_choice: { - type: "any-of", - description: `Can be one of existing tools given to the agent earlier or the ones provided in this request.`, - contains: [ - { - type: "Enum", - }, - { - type: "Tools_NamedFunctionChoice", - }, - { - type: "Tools_NamedIntegrationChoice", - }, - { - type: "Tools_NamedSystemChoice", - }, - { - type: "Tools_NamedApiCallChoice", - }, - ], - }, - }, -} as const; diff --git a/sdks/ts/src/api/schemas/$Chat_ChatOutputChunk.ts b/sdks/ts/src/api/schemas/$Chat_ChatOutputChunk.ts index 869be9d62..b43dbf0b8 100644 --- a/sdks/ts/src/api/schemas/$Chat_ChatOutputChunk.ts +++ b/sdks/ts/src/api/schemas/$Chat_ChatOutputChunk.ts @@ -3,55 +3,71 @@ /* tslint:disable */ /* eslint-disable */ export const $Chat_ChatOutputChunk = { - type: "all-of", description: `Streaming chat completion output`, - contains: [ - { - type: "Chat_BaseChatOutput", + properties: { + index: { + type: "number", + isRequired: true, + format: "uint32", }, - { + finish_reason: { + type: "all-of", + description: `The reason the model stopped generating tokens`, + contains: [ + { + type: "Chat_FinishReason", + }, + ], + isRequired: true, + }, + logprobs: { + type: "all-of", + description: `The log probabilities of tokens`, + contains: [ + { + type: "Chat_LogProbResponse", + }, + ], + }, + delta: { + description: `The message generated by the model`, properties: { - delta: { - description: `The message generated by the model`, - properties: { - role: { - type: "all-of", - description: `The role of the message`, - contains: [ - { - type: "Entries_ChatMLRole", - }, - ], - isRequired: true, - }, - content: { - type: "any-of", - description: `The content parts of the message`, - contains: [ - { - type: "string", - }, - { - type: "array", - contains: { - type: "string", - }, - }, - ], - isRequired: true, + role: { + type: "all-of", + description: `The role of the message`, + contains: [ + { + type: "Entries_ChatMLRole", }, - name: { + ], + isRequired: true, + }, + content: { + type: "any-of", + description: `The content parts of the message`, + contains: [ + { type: "string", - description: `Name`, }, - continue: { - type: "boolean", - description: `Whether to continue this message or return a new one`, + { + type: "array", + contains: { + type: "string", + }, }, - }, + ], isRequired: true, }, + name: { + type: "string", + description: `Name`, + }, + continue: { + type: "boolean", + description: `Whether to continue this message or return a new one`, + }, }, + isRequired: true, }, - ], + }, } as const; diff --git a/sdks/ts/src/api/schemas/$Chat_ChatSettings.ts b/sdks/ts/src/api/schemas/$Chat_ChatSettings.ts index 27e08e729..6e5c62bbb 100644 --- a/sdks/ts/src/api/schemas/$Chat_ChatSettings.ts +++ b/sdks/ts/src/api/schemas/$Chat_ChatSettings.ts @@ -3,75 +3,111 @@ /* tslint:disable */ /* eslint-disable */ export const $Chat_ChatSettings = { - type: "all-of", - contains: [ - { - type: "Chat_DefaultChatSettings", + properties: { + frequency_penalty: { + type: "number", + description: `Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`, + format: "float", + maximum: 2, + minimum: -2, }, - { - properties: { - model: { - type: "all-of", - description: `Identifier of the model to be used`, - contains: [ - { - type: "Common_identifierSafeUnicode", - }, - ], - }, - stream: { - type: "boolean", - description: `Indicates if the server should stream the response as it's generated`, - isRequired: true, - }, - stop: { - type: "array", - contains: { - type: "string", - }, - isRequired: true, - }, - seed: { - type: "number", - description: `If specified, the system will make a best effort to sample deterministically for that particular seed value`, - format: "int16", - maximum: 1000, - minimum: -1, - }, - max_tokens: { - type: "number", - description: `The maximum number of tokens to generate in the chat completion`, - format: "uint32", - minimum: 1, + presence_penalty: { + type: "number", + description: `Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`, + format: "float", + maximum: 2, + minimum: -2, + }, + temperature: { + type: "number", + description: `What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.`, + format: "float", + maximum: 5, + }, + top_p: { + type: "number", + description: `Defaults to 1 An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both.`, + format: "float", + maximum: 1, + }, + repetition_penalty: { + type: "number", + description: `Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`, + format: "float", + maximum: 2, + }, + length_penalty: { + type: "number", + description: `Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize number of tokens generated.`, + format: "float", + maximum: 2, + }, + min_p: { + type: "number", + description: `Minimum probability compared to leading token to be considered`, + format: "float", + maximum: 1, + }, + model: { + type: "all-of", + description: `Identifier of the model to be used`, + contains: [ + { + type: "Common_identifierSafeUnicode", }, - logit_bias: { - type: "dictionary", - contains: { - type: "Common_logit_bias", - }, + ], + }, + stream: { + type: "boolean", + description: `Indicates if the server should stream the response as it's generated`, + isRequired: true, + }, + stop: { + type: "array", + contains: { + type: "string", + }, + isRequired: true, + }, + seed: { + type: "number", + description: `If specified, the system will make a best effort to sample deterministically for that particular seed value`, + format: "int16", + maximum: 1000, + minimum: -1, + }, + max_tokens: { + type: "number", + description: `The maximum number of tokens to generate in the chat completion`, + format: "uint32", + minimum: 1, + }, + logit_bias: { + type: "dictionary", + contains: { + type: "Common_logit_bias", + }, + }, + response_format: { + type: "any-of", + description: `Response format (set to \`json_object\` to restrict output to JSON)`, + contains: [ + { + type: "Chat_SimpleCompletionResponseFormat", }, - response_format: { - type: "any-of", - description: `Response format (set to \`json_object\` to restrict output to JSON)`, - contains: [ - { - type: "Chat_SimpleCompletionResponseFormat", - }, - { - type: "Chat_SchemaCompletionResponseFormat", - }, - ], + { + type: "Chat_SchemaCompletionResponseFormat", }, - agent: { - type: "all-of", - description: `Agent ID of the agent to use for this interaction. (Only applicable for multi-agent sessions)`, - contains: [ - { - type: "Common_uuid", - }, - ], + ], + }, + agent: { + type: "all-of", + description: `Agent ID of the agent to use for this interaction. (Only applicable for multi-agent sessions)`, + contains: [ + { + type: "Common_uuid", }, - }, + ], }, - ], + }, } as const; diff --git a/sdks/ts/src/api/schemas/$Chat_ChunkChatResponse.ts b/sdks/ts/src/api/schemas/$Chat_ChunkChatResponse.ts index 07b83d11e..03246e8fc 100644 --- a/sdks/ts/src/api/schemas/$Chat_ChunkChatResponse.ts +++ b/sdks/ts/src/api/schemas/$Chat_ChunkChatResponse.ts @@ -3,21 +3,55 @@ /* tslint:disable */ /* eslint-disable */ export const $Chat_ChunkChatResponse = { - type: "all-of", - contains: [ - { - type: "Chat_BaseChatResponse", + properties: { + choices: { + type: "array", + contains: { + type: "Chat_ChatOutputChunk", + }, + isRequired: true, }, - { - properties: { - choices: { - type: "array", - contains: { - type: "Chat_ChatOutputChunk", - }, - isRequired: true, + usage: { + type: "all-of", + description: `Usage statistics for the completion request`, + contains: [ + { + type: "Chat_CompetionUsage", }, + ], + }, + jobs: { + type: "array", + contains: { + type: "Common_uuid", + }, + isReadOnly: true, + isRequired: true, + }, + docs: { + type: "array", + contains: { + type: "Docs_DocReference", }, + isReadOnly: true, + isRequired: true, + }, + created_at: { + type: "string", + description: `When this resource was created as UTC date-time`, + isReadOnly: true, + isRequired: true, + format: "date-time", + }, + id: { + type: "all-of", + contains: [ + { + type: "Common_uuid", + }, + ], + isReadOnly: true, + isRequired: true, }, - ], + }, } as const; diff --git a/sdks/ts/src/api/schemas/$Chat_DefaultChatSettings.ts b/sdks/ts/src/api/schemas/$Chat_DefaultChatSettings.ts index 573bf31bd..978aa99da 100644 --- a/sdks/ts/src/api/schemas/$Chat_DefaultChatSettings.ts +++ b/sdks/ts/src/api/schemas/$Chat_DefaultChatSettings.ts @@ -3,33 +3,51 @@ /* tslint:disable */ /* eslint-disable */ export const $Chat_DefaultChatSettings = { - type: "all-of", description: `Default settings for the chat session (also used by the agent)`, - contains: [ - { - type: "Chat_OpenAISettings", + properties: { + frequency_penalty: { + type: "number", + description: `Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`, + format: "float", + maximum: 2, + minimum: -2, }, - { - properties: { - repetition_penalty: { - type: "number", - description: `Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`, - format: "float", - maximum: 2, - }, - length_penalty: { - type: "number", - description: `Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize number of tokens generated.`, - format: "float", - maximum: 2, - }, - min_p: { - type: "number", - description: `Minimum probability compared to leading token to be considered`, - format: "float", - maximum: 1, - }, - }, + presence_penalty: { + type: "number", + description: `Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`, + format: "float", + maximum: 2, + minimum: -2, }, - ], + temperature: { + type: "number", + description: `What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.`, + format: "float", + maximum: 5, + }, + top_p: { + type: "number", + description: `Defaults to 1 An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both.`, + format: "float", + maximum: 1, + }, + repetition_penalty: { + type: "number", + description: `Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`, + format: "float", + maximum: 2, + }, + length_penalty: { + type: "number", + description: `Number between 0 and 2.0. 1.0 is neutral and values larger than that penalize number of tokens generated.`, + format: "float", + maximum: 2, + }, + min_p: { + type: "number", + description: `Minimum probability compared to leading token to be considered`, + format: "float", + maximum: 1, + }, + }, } as const; diff --git a/sdks/ts/src/api/schemas/$Chat_MessageChatResponse.ts b/sdks/ts/src/api/schemas/$Chat_MessageChatResponse.ts index 8b80d8698..c92f47a26 100644 --- a/sdks/ts/src/api/schemas/$Chat_MessageChatResponse.ts +++ b/sdks/ts/src/api/schemas/$Chat_MessageChatResponse.ts @@ -3,29 +3,63 @@ /* tslint:disable */ /* eslint-disable */ export const $Chat_MessageChatResponse = { - type: "all-of", - contains: [ - { - type: "Chat_BaseChatResponse", - }, - { - properties: { - choices: { - type: "array", - contains: { - type: "any-of", - contains: [ - { - type: "Chat_SingleChatOutput", - }, - { - type: "Chat_MultipleChatOutput", - }, - ], + properties: { + choices: { + type: "array", + contains: { + type: "any-of", + contains: [ + { + type: "Chat_SingleChatOutput", + }, + { + type: "Chat_MultipleChatOutput", }, - isRequired: true, + ], + }, + isRequired: true, + }, + usage: { + type: "all-of", + description: `Usage statistics for the completion request`, + contains: [ + { + type: "Chat_CompetionUsage", }, + ], + }, + jobs: { + type: "array", + contains: { + type: "Common_uuid", }, + isReadOnly: true, + isRequired: true, + }, + docs: { + type: "array", + contains: { + type: "Docs_DocReference", + }, + isReadOnly: true, + isRequired: true, + }, + created_at: { + type: "string", + description: `When this resource was created as UTC date-time`, + isReadOnly: true, + isRequired: true, + format: "date-time", + }, + id: { + type: "all-of", + contains: [ + { + type: "Common_uuid", + }, + ], + isReadOnly: true, + isRequired: true, }, - ], + }, } as const; diff --git a/sdks/ts/src/api/schemas/$Chat_MultipleChatOutput.ts b/sdks/ts/src/api/schemas/$Chat_MultipleChatOutput.ts index 6b75cf9be..dec80f7db 100644 --- a/sdks/ts/src/api/schemas/$Chat_MultipleChatOutput.ts +++ b/sdks/ts/src/api/schemas/$Chat_MultipleChatOutput.ts @@ -48,7 +48,21 @@ export const $Chat_MultipleChatOutput = { tool_calls: { type: "array", contains: { - type: "Tools_ChosenToolCall", + type: "any-of", + contains: [ + { + type: "Tools_ChosenFunctionCall", + }, + { + type: "Tools_ChosenIntegrationCall", + }, + { + type: "Tools_ChosenSystemCall", + }, + { + type: "Tools_ChosenApiCall", + }, + ], }, isReadOnly: true, isNullable: true, diff --git a/sdks/ts/src/api/schemas/$Chat_OpenAISettings.ts b/sdks/ts/src/api/schemas/$Chat_OpenAISettings.ts deleted file mode 100644 index 61a12ed7e..000000000 --- a/sdks/ts/src/api/schemas/$Chat_OpenAISettings.ts +++ /dev/null @@ -1,34 +0,0 @@ -/* generated using openapi-typescript-codegen -- do no edit */ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ -export const $Chat_OpenAISettings = { - properties: { - frequency_penalty: { - type: "number", - description: `Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`, - format: "float", - maximum: 2, - minimum: -2, - }, - presence_penalty: { - type: "number", - description: `Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.`, - format: "float", - maximum: 2, - minimum: -2, - }, - temperature: { - type: "number", - description: `What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.`, - format: "float", - maximum: 5, - }, - top_p: { - type: "number", - description: `Defaults to 1 An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both.`, - format: "float", - maximum: 1, - }, - }, -} as const; diff --git a/sdks/ts/src/api/schemas/$Chat_SingleChatOutput.ts b/sdks/ts/src/api/schemas/$Chat_SingleChatOutput.ts index 1947955a3..b45f09d34 100644 --- a/sdks/ts/src/api/schemas/$Chat_SingleChatOutput.ts +++ b/sdks/ts/src/api/schemas/$Chat_SingleChatOutput.ts @@ -46,7 +46,21 @@ export const $Chat_SingleChatOutput = { tool_calls: { type: "array", contains: { - type: "Tools_ChosenToolCall", + type: "any-of", + contains: [ + { + type: "Tools_ChosenFunctionCall", + }, + { + type: "Tools_ChosenIntegrationCall", + }, + { + type: "Tools_ChosenSystemCall", + }, + { + type: "Tools_ChosenApiCall", + }, + ], }, isReadOnly: true, isNullable: true, diff --git a/sdks/ts/src/api/schemas/$Chat_TokenLogProb.ts b/sdks/ts/src/api/schemas/$Chat_TokenLogProb.ts index 5ca3aa5f3..32b9a6903 100644 --- a/sdks/ts/src/api/schemas/$Chat_TokenLogProb.ts +++ b/sdks/ts/src/api/schemas/$Chat_TokenLogProb.ts @@ -3,22 +3,31 @@ /* tslint:disable */ /* eslint-disable */ export const $Chat_TokenLogProb = { - type: "all-of", - contains: [ - { - type: "Chat_BaseTokenLogProb", + properties: { + token: { + type: "string", + isRequired: true, }, - { - properties: { - top_logprobs: { - type: "array", - contains: { - type: "Chat_BaseTokenLogProb", - }, - isReadOnly: true, - isRequired: true, - }, + logprob: { + type: "number", + description: `The log probability of the token`, + isRequired: true, + format: "float", + }, + bytes: { + type: "array", + contains: { + type: "number", + format: "uint16", + }, + }, + top_logprobs: { + type: "array", + contains: { + type: "Chat_BaseTokenLogProb", }, + isReadOnly: true, + isRequired: true, }, - ], + }, } as const; diff --git a/sdks/ts/src/api/schemas/$Docs_BaseDocSearchRequest.ts b/sdks/ts/src/api/schemas/$Docs_BaseDocSearchRequest.ts deleted file mode 100644 index 99188755e..000000000 --- a/sdks/ts/src/api/schemas/$Docs_BaseDocSearchRequest.ts +++ /dev/null @@ -1,19 +0,0 @@ -/* generated using openapi-typescript-codegen -- do no edit */ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ -export const $Docs_BaseDocSearchRequest = { - properties: { - limit: { - type: "number", - isRequired: true, - format: "uint16", - maximum: 100, - minimum: 1, - }, - lang: { - type: "Enum", - isRequired: true, - }, - }, -} as const; diff --git a/sdks/ts/src/api/schemas/$Docs_HybridDocSearchRequest.ts b/sdks/ts/src/api/schemas/$Docs_HybridDocSearchRequest.ts index 2bc5005fb..39e0bcf2b 100644 --- a/sdks/ts/src/api/schemas/$Docs_HybridDocSearchRequest.ts +++ b/sdks/ts/src/api/schemas/$Docs_HybridDocSearchRequest.ts @@ -3,38 +3,41 @@ /* tslint:disable */ /* eslint-disable */ export const $Docs_HybridDocSearchRequest = { - type: "all-of", - contains: [ - { - type: "Docs_BaseDocSearchRequest", + properties: { + limit: { + type: "number", + isRequired: true, + format: "uint16", + maximum: 100, + minimum: 1, }, - { - properties: { - confidence: { - type: "number", - description: `The confidence cutoff level`, - isRequired: true, - maximum: 1, - }, - alpha: { - type: "number", - description: `The weight to apply to BM25 vs Vector search results. 0 => pure BM25; 1 => pure vector;`, - isRequired: true, - maximum: 1, - }, - text: { - type: "string", - description: `Text to use in the search. In \`hybrid\` search mode, either \`text\` or both \`text\` and \`vector\` fields are required.`, - isRequired: true, - }, - vector: { - type: "array", - contains: { - type: "number", - }, - isRequired: true, - }, + lang: { + type: "Enum", + isRequired: true, + }, + confidence: { + type: "number", + description: `The confidence cutoff level`, + isRequired: true, + maximum: 1, + }, + alpha: { + type: "number", + description: `The weight to apply to BM25 vs Vector search results. 0 => pure BM25; 1 => pure vector;`, + isRequired: true, + maximum: 1, + }, + text: { + type: "string", + description: `Text to use in the search. In \`hybrid\` search mode, either \`text\` or both \`text\` and \`vector\` fields are required.`, + isRequired: true, + }, + vector: { + type: "array", + contains: { + type: "number", }, + isRequired: true, }, - ], + }, } as const; diff --git a/sdks/ts/src/api/schemas/$Docs_TextOnlyDocSearchRequest.ts b/sdks/ts/src/api/schemas/$Docs_TextOnlyDocSearchRequest.ts index b9711dbc4..e1184f8ae 100644 --- a/sdks/ts/src/api/schemas/$Docs_TextOnlyDocSearchRequest.ts +++ b/sdks/ts/src/api/schemas/$Docs_TextOnlyDocSearchRequest.ts @@ -3,19 +3,22 @@ /* tslint:disable */ /* eslint-disable */ export const $Docs_TextOnlyDocSearchRequest = { - type: "all-of", - contains: [ - { - type: "Docs_BaseDocSearchRequest", + properties: { + limit: { + type: "number", + isRequired: true, + format: "uint16", + maximum: 100, + minimum: 1, }, - { - properties: { - text: { - type: "string", - description: `Text to use in the search.`, - isRequired: true, - }, - }, + lang: { + type: "Enum", + isRequired: true, }, - ], + text: { + type: "string", + description: `Text to use in the search.`, + isRequired: true, + }, + }, } as const; diff --git a/sdks/ts/src/api/schemas/$Docs_VectorDocSearchRequest.ts b/sdks/ts/src/api/schemas/$Docs_VectorDocSearchRequest.ts index af6de0b12..98e977899 100644 --- a/sdks/ts/src/api/schemas/$Docs_VectorDocSearchRequest.ts +++ b/sdks/ts/src/api/schemas/$Docs_VectorDocSearchRequest.ts @@ -3,27 +3,30 @@ /* tslint:disable */ /* eslint-disable */ export const $Docs_VectorDocSearchRequest = { - type: "all-of", - contains: [ - { - type: "Docs_BaseDocSearchRequest", + properties: { + limit: { + type: "number", + isRequired: true, + format: "uint16", + maximum: 100, + minimum: 1, }, - { - properties: { - confidence: { - type: "number", - description: `The confidence cutoff level`, - isRequired: true, - maximum: 1, - }, - vector: { - type: "array", - contains: { - type: "number", - }, - isRequired: true, - }, + lang: { + type: "Enum", + isRequired: true, + }, + confidence: { + type: "number", + description: `The confidence cutoff level`, + isRequired: true, + maximum: 1, + }, + vector: { + type: "array", + contains: { + type: "number", }, + isRequired: true, }, - ], + }, } as const; diff --git a/sdks/ts/src/api/schemas/$Entries_BaseEntry.ts b/sdks/ts/src/api/schemas/$Entries_BaseEntry.ts index bcdb7122e..2faa79caa 100644 --- a/sdks/ts/src/api/schemas/$Entries_BaseEntry.ts +++ b/sdks/ts/src/api/schemas/$Entries_BaseEntry.ts @@ -20,7 +20,16 @@ export const $Entries_BaseEntry = { type: "Tools_Tool", }, { - type: "Tools_ChosenToolCall", + type: "Tools_ChosenFunctionCall", + }, + { + type: "Tools_ChosenIntegrationCall", + }, + { + type: "Tools_ChosenSystemCall", + }, + { + type: "Tools_ChosenApiCall", }, { type: "string", diff --git a/sdks/ts/src/api/schemas/$Executions_Transition.ts b/sdks/ts/src/api/schemas/$Executions_Transition.ts index 1fbdf61e0..c6067922e 100644 --- a/sdks/ts/src/api/schemas/$Executions_Transition.ts +++ b/sdks/ts/src/api/schemas/$Executions_Transition.ts @@ -3,61 +3,77 @@ /* tslint:disable */ /* eslint-disable */ export const $Executions_Transition = { - type: "all-of", - contains: [ - { - type: "Executions_TransitionEvent", - }, - { - properties: { - execution_id: { - type: "all-of", - contains: [ - { - type: "Common_uuid", - }, - ], - isReadOnly: true, - isRequired: true, - }, - current: { - type: "all-of", - contains: [ - { - type: "Executions_TransitionTarget", - }, - ], - isReadOnly: true, - isRequired: true, + properties: { + type: { + type: "Enum", + isReadOnly: true, + isRequired: true, + }, + output: { + properties: {}, + isReadOnly: true, + isRequired: true, + }, + created_at: { + type: "string", + description: `When this resource was created as UTC date-time`, + isReadOnly: true, + isRequired: true, + format: "date-time", + }, + updated_at: { + type: "string", + description: `When this resource was updated as UTC date-time`, + isReadOnly: true, + isRequired: true, + format: "date-time", + }, + execution_id: { + type: "all-of", + contains: [ + { + type: "Common_uuid", }, - next: { - type: "all-of", - contains: [ - { - type: "Executions_TransitionTarget", - }, - ], - isReadOnly: true, - isRequired: true, - isNullable: true, + ], + isReadOnly: true, + isRequired: true, + }, + current: { + type: "all-of", + contains: [ + { + type: "Executions_TransitionTarget", }, - id: { - type: "all-of", - contains: [ - { - type: "Common_uuid", - }, - ], - isReadOnly: true, - isRequired: true, + ], + isReadOnly: true, + isRequired: true, + }, + next: { + type: "all-of", + contains: [ + { + type: "Executions_TransitionTarget", }, - metadata: { - type: "dictionary", - contains: { - properties: {}, - }, + ], + isReadOnly: true, + isRequired: true, + isNullable: true, + }, + id: { + type: "all-of", + contains: [ + { + type: "Common_uuid", }, + ], + isReadOnly: true, + isRequired: true, + }, + metadata: { + type: "dictionary", + contains: { + properties: {}, }, }, - ], + }, } as const; diff --git a/sdks/ts/src/api/schemas/$Tasks_TaskTool.ts b/sdks/ts/src/api/schemas/$Tasks_TaskTool.ts index f329deb7c..e2e924543 100644 --- a/sdks/ts/src/api/schemas/$Tasks_TaskTool.ts +++ b/sdks/ts/src/api/schemas/$Tasks_TaskTool.ts @@ -3,19 +3,50 @@ /* tslint:disable */ /* eslint-disable */ export const $Tasks_TaskTool = { - type: "all-of", - contains: [ - { - type: "Tools_CreateToolRequest", + properties: { + inherited: { + type: "boolean", + description: `Read-only: Whether the tool was inherited or not. Only applies within tasks.`, + isReadOnly: true, }, - { - properties: { - inherited: { - type: "boolean", - description: `Read-only: Whether the tool was inherited or not. Only applies within tasks.`, - isReadOnly: true, + type: { + type: "all-of", + description: `Whether this tool is a \`function\`, \`api_call\`, \`system\` etc. (Only \`function\` tool supported right now)The type of the tool`, + contains: [ + { + type: "Tools_ToolType", }, - }, + ], + isRequired: true, }, - ], + name: { + type: "all-of", + description: `Name of the tool (must be unique for this agent and a valid python identifier string )`, + contains: [ + { + type: "Common_validPythonIdentifier", + }, + ], + isRequired: true, + }, + function: { + type: "all-of", + description: `The function to call`, + contains: [ + { + type: "Tools_FunctionDef", + }, + ], + isRequired: true, + }, + integration: { + properties: {}, + }, + system: { + properties: {}, + }, + api_call: { + properties: {}, + }, + }, } as const; diff --git a/sdks/ts/src/api/schemas/$Tools_ChosenApiCall.ts b/sdks/ts/src/api/schemas/$Tools_ChosenApiCall.ts new file mode 100644 index 000000000..178481bec --- /dev/null +++ b/sdks/ts/src/api/schemas/$Tools_ChosenApiCall.ts @@ -0,0 +1,26 @@ +/* generated using openapi-typescript-codegen -- do no edit */ +/* istanbul ignore file */ +/* tslint:disable */ +/* eslint-disable */ +export const $Tools_ChosenApiCall = { + properties: { + type: { + type: "Enum", + isRequired: true, + }, + api_call: { + properties: {}, + isRequired: true, + }, + id: { + type: "all-of", + contains: [ + { + type: "Common_uuid", + }, + ], + isReadOnly: true, + isRequired: true, + }, + }, +} as const; diff --git a/sdks/ts/src/api/schemas/$Tools_ChosenFunctionCall.ts b/sdks/ts/src/api/schemas/$Tools_ChosenFunctionCall.ts index 3d550db11..a00a3eeb4 100644 --- a/sdks/ts/src/api/schemas/$Tools_ChosenFunctionCall.ts +++ b/sdks/ts/src/api/schemas/$Tools_ChosenFunctionCall.ts @@ -3,32 +3,30 @@ /* tslint:disable */ /* eslint-disable */ export const $Tools_ChosenFunctionCall = { - type: "all-of", - contains: [ - { - type: "Tools_ChosenToolCall", + properties: { + type: { + type: "Enum", + isRequired: true, }, - { - properties: { - function: { + function: { + type: "all-of", + description: `The function to call`, + contains: [ + { type: "Tools_FunctionCallOption", - isRequired: true, }, - type: { - type: "Enum", - isRequired: true, - }, - function: { - type: "all-of", - description: `The function to call`, - contains: [ - { - type: "Tools_FunctionCallOption", - }, - ], - isRequired: true, + ], + isRequired: true, + }, + id: { + type: "all-of", + contains: [ + { + type: "Common_uuid", }, - }, + ], + isReadOnly: true, + isRequired: true, }, - ], + }, } as const; diff --git a/sdks/ts/src/api/schemas/$Tools_ChosenIntegrationCall.ts b/sdks/ts/src/api/schemas/$Tools_ChosenIntegrationCall.ts new file mode 100644 index 000000000..35f2f7871 --- /dev/null +++ b/sdks/ts/src/api/schemas/$Tools_ChosenIntegrationCall.ts @@ -0,0 +1,26 @@ +/* generated using openapi-typescript-codegen -- do no edit */ +/* istanbul ignore file */ +/* tslint:disable */ +/* eslint-disable */ +export const $Tools_ChosenIntegrationCall = { + properties: { + type: { + type: "Enum", + isRequired: true, + }, + integration: { + properties: {}, + isRequired: true, + }, + id: { + type: "all-of", + contains: [ + { + type: "Common_uuid", + }, + ], + isReadOnly: true, + isRequired: true, + }, + }, +} as const; diff --git a/sdks/ts/src/api/schemas/$Tools_ChosenSystemCall.ts b/sdks/ts/src/api/schemas/$Tools_ChosenSystemCall.ts new file mode 100644 index 000000000..32e5c6f2f --- /dev/null +++ b/sdks/ts/src/api/schemas/$Tools_ChosenSystemCall.ts @@ -0,0 +1,26 @@ +/* generated using openapi-typescript-codegen -- do no edit */ +/* istanbul ignore file */ +/* tslint:disable */ +/* eslint-disable */ +export const $Tools_ChosenSystemCall = { + properties: { + type: { + type: "Enum", + isRequired: true, + }, + system: { + properties: {}, + isRequired: true, + }, + id: { + type: "all-of", + contains: [ + { + type: "Common_uuid", + }, + ], + isReadOnly: true, + isRequired: true, + }, + }, +} as const; diff --git a/sdks/ts/src/api/schemas/$Tools_ChosenToolCall.ts b/sdks/ts/src/api/schemas/$Tools_ChosenToolCall.ts deleted file mode 100644 index 9db118f36..000000000 --- a/sdks/ts/src/api/schemas/$Tools_ChosenToolCall.ts +++ /dev/null @@ -1,41 +0,0 @@ -/* generated using openapi-typescript-codegen -- do no edit */ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ -export const $Tools_ChosenToolCall = { - description: `The response tool value generated by the model`, - properties: { - type: { - type: "all-of", - description: `Whether this tool is a \`function\`, \`api_call\`, \`system\` etc. (Only \`function\` tool supported right now)`, - contains: [ - { - type: "Tools_ToolType", - }, - ], - isRequired: true, - }, - function: { - type: "Tools_FunctionCallOption", - }, - integration: { - properties: {}, - }, - system: { - properties: {}, - }, - api_call: { - properties: {}, - }, - id: { - type: "all-of", - contains: [ - { - type: "Common_uuid", - }, - ], - isReadOnly: true, - isRequired: true, - }, - }, -} as const; diff --git a/sdks/ts/src/api/schemas/$Tools_CreateToolRequest.ts b/sdks/ts/src/api/schemas/$Tools_CreateToolRequest.ts deleted file mode 100644 index 14e86d6ec..000000000 --- a/sdks/ts/src/api/schemas/$Tools_CreateToolRequest.ts +++ /dev/null @@ -1,48 +0,0 @@ -/* generated using openapi-typescript-codegen -- do no edit */ -/* istanbul ignore file */ -/* tslint:disable */ -/* eslint-disable */ -export const $Tools_CreateToolRequest = { - description: `Payload for creating a tool`, - properties: { - type: { - type: "all-of", - description: `Whether this tool is a \`function\`, \`api_call\`, \`system\` etc. (Only \`function\` tool supported right now)The type of the tool`, - contains: [ - { - type: "Tools_ToolType", - }, - ], - isRequired: true, - }, - name: { - type: "all-of", - description: `Name of the tool (must be unique for this agent and a valid python identifier string )`, - contains: [ - { - type: "Common_validPythonIdentifier", - }, - ], - isRequired: true, - }, - function: { - type: "all-of", - description: `The function to call`, - contains: [ - { - type: "Tools_FunctionDef", - }, - ], - isRequired: true, - }, - integration: { - properties: {}, - }, - system: { - properties: {}, - }, - api_call: { - properties: {}, - }, - }, -} as const; diff --git a/sdks/ts/src/api/services/DefaultService.ts b/sdks/ts/src/api/services/DefaultService.ts index 70b811466..8c3c332ac 100644 --- a/sdks/ts/src/api/services/DefaultService.ts +++ b/sdks/ts/src/api/services/DefaultService.ts @@ -431,11 +431,7 @@ export class DefaultService { id, requestBody, }: { - accept: - | "application/yaml" - | "text/x-yaml" - | "text/yaml" - | "application/json"; + accept: "application/json"; /** * ID of parent resource */ @@ -724,11 +720,7 @@ export class DefaultService { id, requestBody, }: { - accept: - | "application/yaml" - | "text/x-yaml" - | "text/yaml" - | "application/json"; + accept: "application/json"; /** * ID of the agent */ diff --git a/typespec/chat/models.tsp b/typespec/chat/models.tsp index 2b2fea34f..5cd66f004 100644 --- a/typespec/chat/models.tsp +++ b/typespec/chat/models.tsp @@ -130,11 +130,13 @@ model vLLMSettings { } /** Default settings for the chat session (also used by the agent) */ -model DefaultChatSettings extends OpenAISettings { +model DefaultChatSettings { + ...OpenAISettings; ...vLLMSettings; } -model ChatSettings extends DefaultChatSettings { +model ChatSettings { + ...DefaultChatSettings; ...CommonChatSettings; } @@ -165,7 +167,8 @@ model ChatInputData { tool_choice?: ToolChoiceOption; } -model ChatInput extends ChatInputData { +model ChatInput { + ...ChatInputData; ...MemoryAccessOptions; ...ChatSettings; } @@ -179,7 +182,9 @@ model BaseTokenLogProb { bytes?: uint16[]; } -model TokenLogProb extends BaseTokenLogProb { +model TokenLogProb { + ...BaseTokenLogProb; + /** The log probabilities of the tokens */ @visibility("read") @minItems(1) @@ -217,7 +222,9 @@ model MultipleChatOutput extends BaseChatOutput { alias ChatOutput = SingleChatOutput | MultipleChatOutput; /** Streaming chat completion output */ -model ChatOutputChunk extends BaseChatOutput { +model ChatOutputChunk { + ...BaseChatOutput; + /** The message generated by the model */ delta: InputChatMLMessage; } @@ -238,18 +245,20 @@ model BaseChatResponse { ...HasId; } -model ChunkChatResponse extends BaseChatResponse { +model ChunkChatResponse { @header contentType: eventStream; /** The deltas generated by the model */ choices: ChatOutputChunk[]; + ...BaseChatResponse; } -model MessageChatResponse extends BaseChatResponse { +model MessageChatResponse { @header contentType: json; /** The deltas generated by the model */ choices: ChatOutput[]; + ...BaseChatResponse; } alias ChatResponse = ChunkChatResponse | MessageChatResponse; diff --git a/typespec/docs/models.tsp b/typespec/docs/models.tsp index ee283fe02..b6f3ac3c1 100644 --- a/typespec/docs/models.tsp +++ b/typespec/docs/models.tsp @@ -74,7 +74,9 @@ model BaseDocSearchRequest { lang: "en-US" = "en-US"; } -model VectorDocSearchRequest extends BaseDocSearchRequest { +model VectorDocSearchRequest { + ...BaseDocSearchRequest; + /** The confidence cutoff level */ @minValue(0) @maxValue(1) @@ -86,14 +88,18 @@ model VectorDocSearchRequest extends BaseDocSearchRequest { text?: never; } -model TextOnlyDocSearchRequest extends BaseDocSearchRequest { +model TextOnlyDocSearchRequest { + ...BaseDocSearchRequest; + /** Text to use in the search. */ text: string; vector?: never; } -model HybridDocSearchRequest extends BaseDocSearchRequest { +model HybridDocSearchRequest { + ...BaseDocSearchRequest; + /** The confidence cutoff level */ @minValue(0) @maxValue(1) diff --git a/typespec/executions/models.tsp b/typespec/executions/models.tsp index 45d3e752e..778eca72f 100644 --- a/typespec/executions/models.tsp +++ b/typespec/executions/models.tsp @@ -132,7 +132,9 @@ model TransitionEvent { ...HasTimestamps; } -model Transition extends TransitionEvent { +model Transition { + ...TransitionEvent; + @visibility("read") execution_id: Execution.id; diff --git a/typespec/tasks/endpoints.tsp b/typespec/tasks/endpoints.tsp index 3eed2b36a..e42d40aee 100644 --- a/typespec/tasks/endpoints.tsp +++ b/typespec/tasks/endpoints.tsp @@ -19,8 +19,8 @@ interface CreateOrUpdateEndpoints { @post @doc("Create or update a task") createOrUpdate( - @header contentType: json, - @header accept: yaml | json, + @header contentType: yaml | json, + @header accept: json, @path @doc("ID of the agent") @@ -50,8 +50,8 @@ interface Endpoints @post @doc("Create a new task") create( - @header contentType: json, - @header accept: yaml | json, + @header contentType: yaml | json, + @header accept: json, @path @doc("ID of parent resource") diff --git a/typespec/tasks/models.tsp b/typespec/tasks/models.tsp index 4ec23f31b..c2d48bbcc 100644 --- a/typespec/tasks/models.tsp +++ b/typespec/tasks/models.tsp @@ -24,10 +24,12 @@ model Workflow { steps: WorkflowStep[]; } -model TaskTool extends CreateToolRequest { +model TaskTool { /** Read-only: Whether the tool was inherited or not. Only applies within tasks. */ @visibility("read") inherited?: boolean = false; + + ...CreateToolRequest; } /** Object describing a Task */ diff --git a/typespec/tools/models.tsp b/typespec/tools/models.tsp index 61964bafc..ac755f7e0 100644 --- a/typespec/tools/models.tsp +++ b/typespec/tools/models.tsp @@ -108,22 +108,29 @@ model UpdateToolRequest { model PatchToolRequest is UpdateToolRequest {} /** The response tool value generated by the model */ -@discriminator("type") -model ChosenToolCall { - /** Whether this tool is a `function`, `api_call`, `system` etc. (Only `function` tool supported right now) */ - type: ToolType; +alias ChosenToolCall = ChosenFunctionCall | ChosenIntegrationCall | ChosenSystemCall | ChosenApiCall; - function?: FunctionCallOption; - integration?: unknown; - system?: unknown; - api_call?: unknown; +model ChosenFunctionCall { + type: ToolType.function; + /** The function to call */ + function: FunctionCallOption; + ...HasId; +} +model ChosenIntegrationCall { + type: ToolType.integration; + integration: unknown; ...HasId; } -model ChosenFunctionCall extends ChosenToolCall { - type: ToolType.function; +model ChosenSystemCall { + type: ToolType.system; + system: unknown; + ...HasId; +} - /** The function to call */ - function: FunctionCallOption; +model ChosenApiCall { + type: ToolType.api_call; + api_call: unknown; + ...HasId; }