Skip to content

Commit

Permalink
chore: auto-gen by protobufs
Browse files Browse the repository at this point in the history
triggered by commit: instill-ai/protobufs@039399f
  • Loading branch information
droplet-bot committed Jan 19, 2024
1 parent 40b28f2 commit 59c3b36
Show file tree
Hide file tree
Showing 31 changed files with 1,138 additions and 1,013 deletions.
58 changes: 29 additions & 29 deletions common/task/v1alpha/task_pb2.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -22,63 +22,63 @@ class _Task:
class _TaskEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_Task.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
TASK_UNSPECIFIED: _Task.ValueType # 0
"""Task: UNSPECIFIED"""
"""Unspecified."""
TASK_CLASSIFICATION: _Task.ValueType # 1
"""Task: CLASSIFICATION"""
"""Image Classification - classify images into predefined categories."""
TASK_DETECTION: _Task.ValueType # 2
"""Task: DETECTION"""
"""Object Detection - detect and localize multiple objects in images."""
TASK_KEYPOINT: _Task.ValueType # 3
"""Task: KEYPOINT"""
"""Keypoint Detection - detect and localize multiple keypoints of objects in images."""
TASK_OCR: _Task.ValueType # 4
"""Task: OCR"""
"""OCR (Optical Character Recognition) - detect and recognize text in images."""
TASK_INSTANCE_SEGMENTATION: _Task.ValueType # 5
"""Task: INSTANCE SEGMENTATION"""
"""Instance Segmentation - detect, localize and delineate multiple objects in images."""
TASK_SEMANTIC_SEGMENTATION: _Task.ValueType # 6
"""Task: SEMANTIC SEGMENTATION"""
"""Semantic Segmentation - classify image pixels into predefined categories."""
TASK_TEXT_TO_IMAGE: _Task.ValueType # 7
"""Task: TEXT TO IMAGE"""
"""Text to Image - generate images from input text prompts."""
TASK_TEXT_GENERATION: _Task.ValueType # 8
"""Task: TEXT GENERATION"""
"""Text Generation - generate texts from input text prompts."""
TASK_TEXT_GENERATION_CHAT: _Task.ValueType # 9
"""Task: TEXT GENERATION CHAT"""
"""Conversational Text Generation - generate text as responses to a dialog input."""
TASK_VISUAL_QUESTION_ANSWERING: _Task.ValueType # 10
"""Task: VISUAL QUESTION ANSWERING"""
"""Visual Question Answering - generate text as a response to a visual prompt."""
TASK_IMAGE_TO_IMAGE: _Task.ValueType # 11
"""Task: IMAGE TO IMAGE"""
"""Image to Image - generate an image from another image."""
TASK_TEXT_EMBEDDINGS: _Task.ValueType # 12
"""Task: TEXT EMBEDDINGS"""
"""Text Embeddings - generate an embedding (a representation as coordinates) from a text input."""
TASK_SPEECH_RECOGNITION: _Task.ValueType # 13
"""Task: SPEECH RECOGNITION"""
"""Speech Recognition - transcribe the words in an audio input."""

class Task(_Task, metaclass=_TaskEnumTypeWrapper):
"""Task enumerates the AI task type"""
"""Task enumerates the AI task that a model is designed to solve."""

TASK_UNSPECIFIED: Task.ValueType # 0
"""Task: UNSPECIFIED"""
"""Unspecified."""
TASK_CLASSIFICATION: Task.ValueType # 1
"""Task: CLASSIFICATION"""
"""Image Classification - classify images into predefined categories."""
TASK_DETECTION: Task.ValueType # 2
"""Task: DETECTION"""
"""Object Detection - detect and localize multiple objects in images."""
TASK_KEYPOINT: Task.ValueType # 3
"""Task: KEYPOINT"""
"""Keypoint Detection - detect and localize multiple keypoints of objects in images."""
TASK_OCR: Task.ValueType # 4
"""Task: OCR"""
"""OCR (Optical Character Recognition) - detect and recognize text in images."""
TASK_INSTANCE_SEGMENTATION: Task.ValueType # 5
"""Task: INSTANCE SEGMENTATION"""
"""Instance Segmentation - detect, localize and delineate multiple objects in images."""
TASK_SEMANTIC_SEGMENTATION: Task.ValueType # 6
"""Task: SEMANTIC SEGMENTATION"""
"""Semantic Segmentation - classify image pixels into predefined categories."""
TASK_TEXT_TO_IMAGE: Task.ValueType # 7
"""Task: TEXT TO IMAGE"""
"""Text to Image - generate images from input text prompts."""
TASK_TEXT_GENERATION: Task.ValueType # 8
"""Task: TEXT GENERATION"""
"""Text Generation - generate texts from input text prompts."""
TASK_TEXT_GENERATION_CHAT: Task.ValueType # 9
"""Task: TEXT GENERATION CHAT"""
"""Conversational Text Generation - generate text as responses to a dialog input."""
TASK_VISUAL_QUESTION_ANSWERING: Task.ValueType # 10
"""Task: VISUAL QUESTION ANSWERING"""
"""Visual Question Answering - generate text as a response to a visual prompt."""
TASK_IMAGE_TO_IMAGE: Task.ValueType # 11
"""Task: IMAGE TO IMAGE"""
"""Image to Image - generate an image from another image."""
TASK_TEXT_EMBEDDINGS: Task.ValueType # 12
"""Task: TEXT EMBEDDINGS"""
"""Text Embeddings - generate an embedding (a representation as coordinates) from a text input."""
TASK_SPEECH_RECOGNITION: Task.ValueType # 13
"""Task: SPEECH RECOGNITION"""
"""Speech Recognition - transcribe the words in an audio input."""
global___Task = Task
26 changes: 0 additions & 26 deletions core/mgmt/v1beta/openapi_pb2.py

This file was deleted.

7 changes: 0 additions & 7 deletions core/mgmt/v1beta/openapi_pb2.pyi

This file was deleted.

4 changes: 0 additions & 4 deletions core/mgmt/v1beta/openapi_pb2_grpc.py

This file was deleted.

17 changes: 0 additions & 17 deletions core/mgmt/v1beta/openapi_pb2_grpc.pyi

This file was deleted.

20 changes: 9 additions & 11 deletions model/model/v1alpha/common_pb2.py

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

72 changes: 30 additions & 42 deletions model/model/v1alpha/common_pb2.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@ DESCRIPTOR: google.protobuf.descriptor.FileDescriptor

@typing_extensions.final
class BoundingBox(google.protobuf.message.Message):
"""BoundingBox represents the bounding box data structure"""
"""BoundingBox represents a frame in an image that can be drawn when detecting
patterns in objects.
"""

DESCRIPTOR: google.protobuf.descriptor.Descriptor

Expand All @@ -27,13 +29,13 @@ class BoundingBox(google.protobuf.message.Message):
WIDTH_FIELD_NUMBER: builtins.int
HEIGHT_FIELD_NUMBER: builtins.int
top: builtins.float
"""Bounding box top y-axis value"""
"""Top Y-axis."""
left: builtins.float
"""Bounding box left x-axis value"""
"""Left X-axis."""
width: builtins.float
"""Bounding box width value"""
"""Width."""
height: builtins.float
"""Bounding box height value"""
"""Height."""
def __init__(
self,
*,
Expand All @@ -46,42 +48,18 @@ class BoundingBox(google.protobuf.message.Message):

global___BoundingBox = BoundingBox

@typing_extensions.final
class ExtraParamObject(google.protobuf.message.Message):
"""Additional hyperparameters for model inferences
or other configuration not listsed in protobuf
"""

DESCRIPTOR: google.protobuf.descriptor.Descriptor

PARAM_NAME_FIELD_NUMBER: builtins.int
PARAM_VALUE_FIELD_NUMBER: builtins.int
param_name: builtins.str
"""Name of the hyperparameter"""
param_value: builtins.str
"""Value of the hyperparameter"""
def __init__(
self,
*,
param_name: builtins.str = ...,
param_value: builtins.str = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["param_name", b"param_name", "param_value", b"param_value"]) -> None: ...

global___ExtraParamObject = ExtraParamObject

@typing_extensions.final
class PromptImage(google.protobuf.message.Message):
"""Prompt Image for text generation model"""
"""PromptImage is an image input for model inference."""

DESCRIPTOR: google.protobuf.descriptor.Descriptor

PROMPT_IMAGE_URL_FIELD_NUMBER: builtins.int
PROMPT_IMAGE_BASE64_FIELD_NUMBER: builtins.int
prompt_image_url: builtins.str
"""Image URL"""
"""Image URL."""
prompt_image_base64: builtins.str
"""Base64 encoded Image"""
"""Base64-encoded image."""
def __init__(
self,
*,
Expand All @@ -96,17 +74,21 @@ global___PromptImage = PromptImage

@typing_extensions.final
class ImageContent(google.protobuf.message.Message):
"""Image content for Message Content"""
"""ImageContent holds an image with some details in plain text."""

DESCRIPTOR: google.protobuf.descriptor.Descriptor

IMAGE_URL_FIELD_NUMBER: builtins.int
DETAIL_FIELD_NUMBER: builtins.int
@property
def image_url(self) -> global___PromptImage:
"""Image url or base64 code of Message Content"""
"""Image as URL or base64 code.
NOTE: the `image_url` name comes from a convention from OpenAI, it doesn't
determine the format of the image.
"""
detail: builtins.str
"""Additinoal information for Image Content"""
"""Additional information."""
def __init__(
self,
*,
Expand All @@ -121,20 +103,26 @@ global___ImageContent = ImageContent

@typing_extensions.final
class MessageContent(google.protobuf.message.Message):
"""Content used for chat history message in text generation model"""
"""MessageContent is a message in a chat history message, used in text
generation models.
"""

DESCRIPTOR: google.protobuf.descriptor.Descriptor

TYPE_FIELD_NUMBER: builtins.int
IMAGE_URL_FIELD_NUMBER: builtins.int
TEXT_FIELD_NUMBER: builtins.int
type: builtins.str
"""Type of Content"""
"""Content type."""
@property
def image_url(self) -> global___ImageContent:
"""Image Url is the naming convention by openAi but not necessarily a url"""
"""Image content.
NOTE: the `image_url` name comes from a convention from OpenAI, it doesn't
determine the format of the image.
"""
text: builtins.str
"""Field for text"""
"""Text content."""
def __init__(
self,
*,
Expand All @@ -150,17 +138,17 @@ global___MessageContent = MessageContent

@typing_extensions.final
class Message(google.protobuf.message.Message):
"""Message used for chat history in text generation model"""
"""Message is used in chat history in text generation models."""

DESCRIPTOR: google.protobuf.descriptor.Descriptor

ROLE_FIELD_NUMBER: builtins.int
CONTENT_FIELD_NUMBER: builtins.int
role: builtins.str
"""The Role of a message"""
"""The Role of a message, e.g. `system`, `user`, etc. Defines the way the model answers questions."""
@property
def content(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___MessageContent]:
"""The context of the message"""
"""Message content."""
def __init__(
self,
*,
Expand Down
Loading

0 comments on commit 59c3b36

Please sign in to comment.