diff --git a/Makefile b/Makefile index f36630f5..551d67d5 100644 --- a/Makefile +++ b/Makefile @@ -21,7 +21,8 @@ generate: install-generator ## Generate the SDK from our public openapi spec -g python \ -o ./generated \ --additional-properties=packageName=groundlight_openapi_client - poetry run datamodel-codegen --input spec/public-api.yaml --output generated/model.py +# strict-nullable makes nullable fields Optional in the generated Pydantic classes: https://github.com/koxudaxi/datamodel-code-generator/issues/327 + poetry run datamodel-codegen --input spec/public-api.yaml --output generated/model.py --strict-nullable poetry run black . PYTEST=poetry run pytest -v diff --git a/generated/.openapi-generator/FILES b/generated/.openapi-generator/FILES index 4d3ebbad..ac70605c 100644 --- a/generated/.openapi-generator/FILES +++ b/generated/.openapi-generator/FILES @@ -3,38 +3,42 @@ .travis.yml README.md docs/Action.md -docs/ClassificationResult.md +docs/ActionRequest.md +docs/ActionsApi.md +docs/AllNotes.md +docs/BinaryClassificationResult.md +docs/ChannelEnum.md docs/Condition.md +docs/ConditionRequest.md +docs/CountingResult.md docs/Detector.md -docs/DetectorCreationInput.md +docs/DetectorCreationInputRequest.md docs/DetectorTypeEnum.md docs/DetectorsApi.md docs/ImageQueriesApi.md docs/ImageQuery.md docs/ImageQueryTypeEnum.md -docs/ImagesApi.md docs/InlineResponse200.md +docs/ModeEnum.md docs/Note.md -docs/NoteCreationInput.md +docs/NoteRequest.md docs/NotesApi.md docs/PaginatedDetectorList.md docs/PaginatedImageQueryList.md docs/PaginatedRuleList.md docs/ResultTypeEnum.md docs/Rule.md -docs/RuleBase.md -docs/RuleCreationInput.md -docs/RulesApi.md -docs/User.md +docs/RuleRequest.md +docs/SnoozeTimeUnitEnum.md docs/UserApi.md +docs/VerbEnum.md git_push.sh groundlight_openapi_client/__init__.py groundlight_openapi_client/api/__init__.py +groundlight_openapi_client/api/actions_api.py groundlight_openapi_client/api/detectors_api.py groundlight_openapi_client/api/image_queries_api.py -groundlight_openapi_client/api/images_api.py groundlight_openapi_client/api/notes_api.py -groundlight_openapi_client/api/rules_api.py groundlight_openapi_client/api/user_api.py groundlight_openapi_client/api_client.py groundlight_openapi_client/apis/__init__.py @@ -42,24 +46,30 @@ groundlight_openapi_client/configuration.py groundlight_openapi_client/exceptions.py groundlight_openapi_client/model/__init__.py groundlight_openapi_client/model/action.py -groundlight_openapi_client/model/classification_result.py +groundlight_openapi_client/model/action_request.py +groundlight_openapi_client/model/all_notes.py +groundlight_openapi_client/model/binary_classification_result.py +groundlight_openapi_client/model/channel_enum.py groundlight_openapi_client/model/condition.py +groundlight_openapi_client/model/condition_request.py +groundlight_openapi_client/model/counting_result.py groundlight_openapi_client/model/detector.py -groundlight_openapi_client/model/detector_creation_input.py +groundlight_openapi_client/model/detector_creation_input_request.py groundlight_openapi_client/model/detector_type_enum.py groundlight_openapi_client/model/image_query.py groundlight_openapi_client/model/image_query_type_enum.py groundlight_openapi_client/model/inline_response200.py +groundlight_openapi_client/model/mode_enum.py groundlight_openapi_client/model/note.py -groundlight_openapi_client/model/note_creation_input.py +groundlight_openapi_client/model/note_request.py groundlight_openapi_client/model/paginated_detector_list.py groundlight_openapi_client/model/paginated_image_query_list.py groundlight_openapi_client/model/paginated_rule_list.py groundlight_openapi_client/model/result_type_enum.py groundlight_openapi_client/model/rule.py -groundlight_openapi_client/model/rule_base.py -groundlight_openapi_client/model/rule_creation_input.py -groundlight_openapi_client/model/user.py +groundlight_openapi_client/model/rule_request.py +groundlight_openapi_client/model/snooze_time_unit_enum.py +groundlight_openapi_client/model/verb_enum.py groundlight_openapi_client/model_utils.py groundlight_openapi_client/models/__init__.py groundlight_openapi_client/rest.py diff --git a/generated/README.md b/generated/README.md index efc43630..d5a3c167 100644 --- a/generated/README.md +++ b/generated/README.md @@ -1,9 +1,9 @@ # groundlight-openapi-client -Easy Computer Vision powered by Natural Language +Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. This Python package is automatically generated by the [OpenAPI Generator](https://openapi-generator.tech) project: -- API version: 0.6.0 +- API version: 0.15.3 - Package version: 1.0.0 - Build package: org.openapitools.codegen.languages.PythonClientCodegen @@ -49,10 +49,10 @@ Please follow the [installation procedure](#installation--usage) and then run th import time import groundlight_openapi_client from pprint import pprint -from groundlight_openapi_client.api import detectors_api -from groundlight_openapi_client.model.detector import Detector -from groundlight_openapi_client.model.detector_creation_input import DetectorCreationInput -from groundlight_openapi_client.model.paginated_detector_list import PaginatedDetectorList +from groundlight_openapi_client.api import actions_api +from groundlight_openapi_client.model.paginated_rule_list import PaginatedRuleList +from groundlight_openapi_client.model.rule import Rule +from groundlight_openapi_client.model.rule_request import RuleRequest # Defining the host is optional and defaults to https://api.groundlight.ai/device-api # See configuration.py for a list of all supported configuration parameters. configuration = groundlight_openapi_client.Configuration( @@ -74,21 +74,33 @@ configuration.api_key['ApiToken'] = 'YOUR_API_KEY' # Enter a context with an instance of the API client with groundlight_openapi_client.ApiClient(configuration) as api_client: # Create an instance of the API class - api_instance = detectors_api.DetectorsApi(api_client) - detector_creation_input = DetectorCreationInput( + api_instance = actions_api.ActionsApi(api_client) + detector_id = "detector_id_example" # str | +rule_request = RuleRequest( name="name_example", - query="query_example", - group_name="group_name_example", - confidence_threshold=0.75, - pipeline_config="pipeline_config_example", - metadata="metadata_example", - ) # DetectorCreationInput | + enabled=True, + snooze_time_enabled=False, + snooze_time_value=0, + snooze_time_unit=None, + human_review_required=False, + condition=ConditionRequest( + verb=VerbEnum("ANSWERED_CONSECUTIVELY"), + parameters={ + "key": None, + }, + ), + action=ActionRequest( + channel=ChannelEnum("EMAIL"), + recipient="recipient_example", + include_image=True, + ), + ) # RuleRequest | try: - api_response = api_instance.create_detector(detector_creation_input) + api_response = api_instance.create_rule(detector_id, rule_request) pprint(api_response) except groundlight_openapi_client.ApiException as e: - print("Exception when calling DetectorsApi->create_detector: %s\n" % e) + print("Exception when calling ActionsApi->create_rule: %s\n" % e) ``` ## Documentation for API Endpoints @@ -97,43 +109,51 @@ All URIs are relative to *https://api.groundlight.ai/device-api* Class | Method | HTTP request | Description ------------ | ------------- | ------------- | ------------- +*ActionsApi* | [**create_rule**](docs/ActionsApi.md#create_rule) | **POST** /v1/actions/detector/{detector_id}/rules | +*ActionsApi* | [**delete_rule**](docs/ActionsApi.md#delete_rule) | **DELETE** /v1/actions/rules/{id} | +*ActionsApi* | [**get_rule**](docs/ActionsApi.md#get_rule) | **GET** /v1/actions/rules/{id} | +*ActionsApi* | [**list_detector_rules**](docs/ActionsApi.md#list_detector_rules) | **GET** /v1/actions/detector/{detector_id}/rules | +*ActionsApi* | [**list_rules**](docs/ActionsApi.md#list_rules) | **GET** /v1/actions/rules | *DetectorsApi* | [**create_detector**](docs/DetectorsApi.md#create_detector) | **POST** /v1/detectors | +*DetectorsApi* | [**delete_detector**](docs/DetectorsApi.md#delete_detector) | **DELETE** /v1/detectors/{id} | *DetectorsApi* | [**get_detector**](docs/DetectorsApi.md#get_detector) | **GET** /v1/detectors/{id} | *DetectorsApi* | [**list_detectors**](docs/DetectorsApi.md#list_detectors) | **GET** /v1/detectors | +*ImageQueriesApi* | [**get_image**](docs/ImageQueriesApi.md#get_image) | **GET** /v1/image-queries/{id}/image | *ImageQueriesApi* | [**get_image_query**](docs/ImageQueriesApi.md#get_image_query) | **GET** /v1/image-queries/{id} | *ImageQueriesApi* | [**list_image_queries**](docs/ImageQueriesApi.md#list_image_queries) | **GET** /v1/image-queries | *ImageQueriesApi* | [**submit_image_query**](docs/ImageQueriesApi.md#submit_image_query) | **POST** /v1/image-queries | -*ImagesApi* | [**get_image**](docs/ImagesApi.md#get_image) | **GET** /v1/image-queries/{id}/image | *NotesApi* | [**create_note**](docs/NotesApi.md#create_note) | **POST** /v1/notes | *NotesApi* | [**get_notes**](docs/NotesApi.md#get_notes) | **GET** /v1/notes | -*RulesApi* | [**create_rule**](docs/RulesApi.md#create_rule) | **POST** /v1/actions/detector/{detector_id}/rules | -*RulesApi* | [**delete_rule**](docs/RulesApi.md#delete_rule) | **DELETE** /v1/actions/rules/{id} | -*RulesApi* | [**get_rule**](docs/RulesApi.md#get_rule) | **GET** /v1/actions/rules/{id} | -*RulesApi* | [**list_rules**](docs/RulesApi.md#list_rules) | **GET** /v1/actions/rules | -*UserApi* | [**who_am_i**](docs/UserApi.md#who_am_i) | **GET** /me | +*UserApi* | [**who_am_i**](docs/UserApi.md#who_am_i) | **GET** /v1/me | ## Documentation For Models - [Action](docs/Action.md) - - [ClassificationResult](docs/ClassificationResult.md) + - [ActionRequest](docs/ActionRequest.md) + - [AllNotes](docs/AllNotes.md) + - [BinaryClassificationResult](docs/BinaryClassificationResult.md) + - [ChannelEnum](docs/ChannelEnum.md) - [Condition](docs/Condition.md) + - [ConditionRequest](docs/ConditionRequest.md) + - [CountingResult](docs/CountingResult.md) - [Detector](docs/Detector.md) - - [DetectorCreationInput](docs/DetectorCreationInput.md) + - [DetectorCreationInputRequest](docs/DetectorCreationInputRequest.md) - [DetectorTypeEnum](docs/DetectorTypeEnum.md) - [ImageQuery](docs/ImageQuery.md) - [ImageQueryTypeEnum](docs/ImageQueryTypeEnum.md) - [InlineResponse200](docs/InlineResponse200.md) + - [ModeEnum](docs/ModeEnum.md) - [Note](docs/Note.md) - - [NoteCreationInput](docs/NoteCreationInput.md) + - [NoteRequest](docs/NoteRequest.md) - [PaginatedDetectorList](docs/PaginatedDetectorList.md) - [PaginatedImageQueryList](docs/PaginatedImageQueryList.md) - [PaginatedRuleList](docs/PaginatedRuleList.md) - [ResultTypeEnum](docs/ResultTypeEnum.md) - [Rule](docs/Rule.md) - - [RuleBase](docs/RuleBase.md) - - [RuleCreationInput](docs/RuleCreationInput.md) - - [User](docs/User.md) + - [RuleRequest](docs/RuleRequest.md) + - [SnoozeTimeUnitEnum](docs/SnoozeTimeUnitEnum.md) + - [VerbEnum](docs/VerbEnum.md) ## Documentation For Authorization diff --git a/generated/docs/Action.md b/generated/docs/Action.md index e52bc34a..0eee9350 100644 --- a/generated/docs/Action.md +++ b/generated/docs/Action.md @@ -1,13 +1,12 @@ # Action -What action should be taken when the rule is triggered? ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**channel** | **str** | The channel to send the action to. | [optional] -**include_image** | **bool** | Should the image be included in the action? | [optional] -**recipient** | **str** | The recipient of the action. | [optional] +**channel** | [**ChannelEnum**](ChannelEnum.md) | | +**recipient** | **str** | | +**include_image** | **bool** | | **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/ActionRequest.md b/generated/docs/ActionRequest.md new file mode 100644 index 00000000..ad8e15a9 --- /dev/null +++ b/generated/docs/ActionRequest.md @@ -0,0 +1,14 @@ +# ActionRequest + + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**channel** | [**ChannelEnum**](ChannelEnum.md) | | +**recipient** | **str** | | +**include_image** | **bool** | | +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/ActionsApi.md b/generated/docs/ActionsApi.md new file mode 100644 index 00000000..24045150 --- /dev/null +++ b/generated/docs/ActionsApi.md @@ -0,0 +1,420 @@ +# groundlight_openapi_client.ActionsApi + +All URIs are relative to *https://api.groundlight.ai/device-api* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**create_rule**](ActionsApi.md#create_rule) | **POST** /v1/actions/detector/{detector_id}/rules | +[**delete_rule**](ActionsApi.md#delete_rule) | **DELETE** /v1/actions/rules/{id} | +[**get_rule**](ActionsApi.md#get_rule) | **GET** /v1/actions/rules/{id} | +[**list_detector_rules**](ActionsApi.md#list_detector_rules) | **GET** /v1/actions/detector/{detector_id}/rules | +[**list_rules**](ActionsApi.md#list_rules) | **GET** /v1/actions/rules | + + +# **create_rule** +> Rule create_rule(detector_id, rule_request) + + + +Create a new rule for a detector + +### Example + +* Api Key Authentication (ApiToken): + +```python +import time +import groundlight_openapi_client +from groundlight_openapi_client.api import actions_api +from groundlight_openapi_client.model.rule import Rule +from groundlight_openapi_client.model.rule_request import RuleRequest +from pprint import pprint +# Defining the host is optional and defaults to https://api.groundlight.ai/device-api +# See configuration.py for a list of all supported configuration parameters. +configuration = groundlight_openapi_client.Configuration( + host = "https://api.groundlight.ai/device-api" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure API key authorization: ApiToken +configuration.api_key['ApiToken'] = 'YOUR_API_KEY' + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['ApiToken'] = 'Bearer' + +# Enter a context with an instance of the API client +with groundlight_openapi_client.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = actions_api.ActionsApi(api_client) + detector_id = "detector_id_example" # str | + rule_request = RuleRequest( + name="name_example", + enabled=True, + snooze_time_enabled=False, + snooze_time_value=0, + snooze_time_unit=None, + human_review_required=False, + condition=ConditionRequest( + verb=VerbEnum("ANSWERED_CONSECUTIVELY"), + parameters={ + "key": None, + }, + ), + action=ActionRequest( + channel=ChannelEnum("EMAIL"), + recipient="recipient_example", + include_image=True, + ), + ) # RuleRequest | + + # example passing only required values which don't have defaults set + try: + api_response = api_instance.create_rule(detector_id, rule_request) + pprint(api_response) + except groundlight_openapi_client.ApiException as e: + print("Exception when calling ActionsApi->create_rule: %s\n" % e) +``` + + +### Parameters + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **detector_id** | **str**| | + **rule_request** | [**RuleRequest**](RuleRequest.md)| | + +### Return type + +[**Rule**](Rule.md) + +### Authorization + +[ApiToken](../README.md#ApiToken) + +### HTTP request headers + + - **Content-Type**: application/json, application/x-www-form-urlencoded, multipart/form-data + - **Accept**: application/json + + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**201** | | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **delete_rule** +> delete_rule(id) + + + +Delete a rule + +### Example + +* Api Key Authentication (ApiToken): + +```python +import time +import groundlight_openapi_client +from groundlight_openapi_client.api import actions_api +from pprint import pprint +# Defining the host is optional and defaults to https://api.groundlight.ai/device-api +# See configuration.py for a list of all supported configuration parameters. +configuration = groundlight_openapi_client.Configuration( + host = "https://api.groundlight.ai/device-api" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure API key authorization: ApiToken +configuration.api_key['ApiToken'] = 'YOUR_API_KEY' + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['ApiToken'] = 'Bearer' + +# Enter a context with an instance of the API client +with groundlight_openapi_client.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = actions_api.ActionsApi(api_client) + id = 1 # int | + + # example passing only required values which don't have defaults set + try: + api_instance.delete_rule(id) + except groundlight_openapi_client.ApiException as e: + print("Exception when calling ActionsApi->delete_rule: %s\n" % e) +``` + + +### Parameters + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **id** | **int**| | + +### Return type + +void (empty response body) + +### Authorization + +[ApiToken](../README.md#ApiToken) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: Not defined + + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**204** | No response body | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **get_rule** +> Rule get_rule(id) + + + +Retrieve a rule + +### Example + +* Api Key Authentication (ApiToken): + +```python +import time +import groundlight_openapi_client +from groundlight_openapi_client.api import actions_api +from groundlight_openapi_client.model.rule import Rule +from pprint import pprint +# Defining the host is optional and defaults to https://api.groundlight.ai/device-api +# See configuration.py for a list of all supported configuration parameters. +configuration = groundlight_openapi_client.Configuration( + host = "https://api.groundlight.ai/device-api" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure API key authorization: ApiToken +configuration.api_key['ApiToken'] = 'YOUR_API_KEY' + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['ApiToken'] = 'Bearer' + +# Enter a context with an instance of the API client +with groundlight_openapi_client.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = actions_api.ActionsApi(api_client) + id = 1 # int | + + # example passing only required values which don't have defaults set + try: + api_response = api_instance.get_rule(id) + pprint(api_response) + except groundlight_openapi_client.ApiException as e: + print("Exception when calling ActionsApi->get_rule: %s\n" % e) +``` + + +### Parameters + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **id** | **int**| | + +### Return type + +[**Rule**](Rule.md) + +### Authorization + +[ApiToken](../README.md#ApiToken) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **list_detector_rules** +> PaginatedRuleList list_detector_rules(detector_id) + + + +List all rules for a detector + +### Example + +* Api Key Authentication (ApiToken): + +```python +import time +import groundlight_openapi_client +from groundlight_openapi_client.api import actions_api +from groundlight_openapi_client.model.paginated_rule_list import PaginatedRuleList +from pprint import pprint +# Defining the host is optional and defaults to https://api.groundlight.ai/device-api +# See configuration.py for a list of all supported configuration parameters. +configuration = groundlight_openapi_client.Configuration( + host = "https://api.groundlight.ai/device-api" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure API key authorization: ApiToken +configuration.api_key['ApiToken'] = 'YOUR_API_KEY' + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['ApiToken'] = 'Bearer' + +# Enter a context with an instance of the API client +with groundlight_openapi_client.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = actions_api.ActionsApi(api_client) + detector_id = "detector_id_example" # str | + + # example passing only required values which don't have defaults set + try: + api_response = api_instance.list_detector_rules(detector_id) + pprint(api_response) + except groundlight_openapi_client.ApiException as e: + print("Exception when calling ActionsApi->list_detector_rules: %s\n" % e) +``` + + +### Parameters + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **detector_id** | **str**| | + +### Return type + +[**PaginatedRuleList**](PaginatedRuleList.md) + +### Authorization + +[ApiToken](../README.md#ApiToken) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **list_rules** +> PaginatedRuleList list_rules() + + + +Lists all rules over all detectors owned by the requester. + +### Example + +* Api Key Authentication (ApiToken): + +```python +import time +import groundlight_openapi_client +from groundlight_openapi_client.api import actions_api +from groundlight_openapi_client.model.paginated_rule_list import PaginatedRuleList +from pprint import pprint +# Defining the host is optional and defaults to https://api.groundlight.ai/device-api +# See configuration.py for a list of all supported configuration parameters. +configuration = groundlight_openapi_client.Configuration( + host = "https://api.groundlight.ai/device-api" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure API key authorization: ApiToken +configuration.api_key['ApiToken'] = 'YOUR_API_KEY' + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['ApiToken'] = 'Bearer' + +# Enter a context with an instance of the API client +with groundlight_openapi_client.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = actions_api.ActionsApi(api_client) + page = 1 # int | A page number within the paginated result set. (optional) + page_size = 1 # int | Number of results to return per page. (optional) + + # example passing only required values which don't have defaults set + # and optional values + try: + api_response = api_instance.list_rules(page=page, page_size=page_size) + pprint(api_response) + except groundlight_openapi_client.ApiException as e: + print("Exception when calling ActionsApi->list_rules: %s\n" % e) +``` + + +### Parameters + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **page** | **int**| A page number within the paginated result set. | [optional] + **page_size** | **int**| Number of results to return per page. | [optional] + +### Return type + +[**PaginatedRuleList**](PaginatedRuleList.md) + +### Authorization + +[ApiToken](../README.md#ApiToken) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/generated/docs/AllNotes.md b/generated/docs/AllNotes.md new file mode 100644 index 00000000..f14d4ea6 --- /dev/null +++ b/generated/docs/AllNotes.md @@ -0,0 +1,14 @@ +# AllNotes + +Serializes all notes for a given detector, grouped by type as listed in UserProfile.NoteCategoryChoices The fields must match whats in USERPROFILE.NoteCategoryChoices + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**customer** | [**[Note]**](Note.md) | | +**gl** | [**[Note]**](Note.md) | | +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/BinaryClassificationResult.md b/generated/docs/BinaryClassificationResult.md new file mode 100644 index 00000000..ce36dcd4 --- /dev/null +++ b/generated/docs/BinaryClassificationResult.md @@ -0,0 +1,13 @@ +# BinaryClassificationResult + + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**confidence** | **float** | | +**label** | **str** | | +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/ChannelEnum.md b/generated/docs/ChannelEnum.md new file mode 100644 index 00000000..de03cb18 --- /dev/null +++ b/generated/docs/ChannelEnum.md @@ -0,0 +1,12 @@ +# ChannelEnum + +* `EMAIL` - EMAIL * `TEXT` - TEXT + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**value** | **str** | * `EMAIL` - EMAIL * `TEXT` - TEXT | must be one of ["EMAIL", "TEXT", ] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/ClassificationResult.md b/generated/docs/ClassificationResult.md index 2af929bd..c4dfae48 100644 --- a/generated/docs/ClassificationResult.md +++ b/generated/docs/ClassificationResult.md @@ -1,12 +1,11 @@ # ClassificationResult -Our classification result. This result can come from the detector, or a human reviewer. ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**label** | **str** | What is the predicted label? | -**confidence** | **float, none_type** | On a scale of 0 to 1, how confident are we in the predicted label? | [optional] +**confidence** | **float** | | +**label** | **str** | | **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/Condition.md b/generated/docs/Condition.md index 34a702f0..edfe01a8 100644 --- a/generated/docs/Condition.md +++ b/generated/docs/Condition.md @@ -1,12 +1,11 @@ # Condition -What condition should trigger the rule? ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**verb** | **str** | The verb to use in the condition. | [optional] -**parameters** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}** | The parameters to use in the condition. | [optional] +**verb** | [**VerbEnum**](VerbEnum.md) | | +**parameters** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}** | | **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/ConditionRequest.md b/generated/docs/ConditionRequest.md new file mode 100644 index 00000000..a9a2cead --- /dev/null +++ b/generated/docs/ConditionRequest.md @@ -0,0 +1,13 @@ +# ConditionRequest + + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**verb** | [**VerbEnum**](VerbEnum.md) | | +**parameters** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}** | | +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/CountingResult.md b/generated/docs/CountingResult.md new file mode 100644 index 00000000..bdc002b5 --- /dev/null +++ b/generated/docs/CountingResult.md @@ -0,0 +1,13 @@ +# CountingResult + + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**confidence** | **float** | | +**value** | **int** | | +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/Detector.md b/generated/docs/Detector.md index 67ce424c..590349f1 100644 --- a/generated/docs/Detector.md +++ b/generated/docs/Detector.md @@ -11,8 +11,10 @@ Name | Type | Description | Notes **name** | **str** | A short, descriptive name for the detector. | **query** | **str** | A question about the image. | [readonly] **group_name** | **str** | Which group should this detector be part of? | [readonly] -**confidence_threshold** | **float** | If the detector's prediction is below this confidence threshold, send the image query for human review. | [optional] if omitted the server will use the default value of 0.75 -**metadata** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type** | A dictionary of custom key/value metadata to associate with the detector (limited to 1KB). This is encoded as a URL-safe, base64-encoded JSON string. | [optional] +**metadata** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type** | Metadata about the detector. | [readonly] +**mode** | **str** | | [readonly] +**mode_configuration** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type** | | [readonly] +**confidence_threshold** | **float** | If the detector's prediction is below this confidence threshold, send the image query for human review. | [optional] if omitted the server will use the default value of 0.9 **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/DetectorCreationInputRequest.md b/generated/docs/DetectorCreationInputRequest.md new file mode 100644 index 00000000..0dd6e3ab --- /dev/null +++ b/generated/docs/DetectorCreationInputRequest.md @@ -0,0 +1,21 @@ +# DetectorCreationInputRequest + +Helper serializer for validating POST /detectors input. + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**name** | **str** | A short, descriptive name for the detector. | +**query** | **str** | A question about the image. | +**group_name** | **str** | Which group should this detector be part of? | [optional] +**confidence_threshold** | **float** | If the detector's prediction is below this confidence threshold, send the image query for human review. | [optional] if omitted the server will use the default value of 0.9 +**patience_time** | **float** | How long Groundlight will attempt to generate a confident prediction | [optional] if omitted the server will use the default value of 30.0 +**pipeline_config** | **str, none_type** | (Advanced usage) Configuration needed to instantiate a prediction pipeline. | [optional] +**metadata** | **str** | Base64-encoded metadata for the detector. This should be a JSON object with string keys. The size after encoding should not exceed 1362 bytes, corresponding to 1KiB before encoding. | [optional] +**mode** | **bool, date, datetime, dict, float, int, list, str, none_type** | Mode in which this detector will work. * `BINARY` - BINARY * `COUNT` - COUNT | [optional] +**mode_configuration** | **bool, date, datetime, dict, float, int, list, str, none_type** | Configuration for each detector mode. | [optional] +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/DetectorsApi.md b/generated/docs/DetectorsApi.md index c9a22ff2..6a748263 100644 --- a/generated/docs/DetectorsApi.md +++ b/generated/docs/DetectorsApi.md @@ -5,12 +5,13 @@ All URIs are relative to *https://api.groundlight.ai/device-api* Method | HTTP request | Description ------------- | ------------- | ------------- [**create_detector**](DetectorsApi.md#create_detector) | **POST** /v1/detectors | +[**delete_detector**](DetectorsApi.md#delete_detector) | **DELETE** /v1/detectors/{id} | [**get_detector**](DetectorsApi.md#get_detector) | **GET** /v1/detectors/{id} | [**list_detectors**](DetectorsApi.md#list_detectors) | **GET** /v1/detectors | # **create_detector** -> Detector create_detector(detector_creation_input) +> Detector create_detector(detector_creation_input_request) @@ -24,8 +25,8 @@ Create a new detector. import time import groundlight_openapi_client from groundlight_openapi_client.api import detectors_api +from groundlight_openapi_client.model.detector_creation_input_request import DetectorCreationInputRequest from groundlight_openapi_client.model.detector import Detector -from groundlight_openapi_client.model.detector_creation_input import DetectorCreationInput from pprint import pprint # Defining the host is optional and defaults to https://api.groundlight.ai/device-api # See configuration.py for a list of all supported configuration parameters. @@ -48,18 +49,21 @@ configuration.api_key['ApiToken'] = 'YOUR_API_KEY' with groundlight_openapi_client.ApiClient(configuration) as api_client: # Create an instance of the API class api_instance = detectors_api.DetectorsApi(api_client) - detector_creation_input = DetectorCreationInput( + detector_creation_input_request = DetectorCreationInputRequest( name="name_example", query="query_example", group_name="group_name_example", - confidence_threshold=0.75, + confidence_threshold=0.9, + patience_time=30.0, pipeline_config="pipeline_config_example", metadata="metadata_example", - ) # DetectorCreationInput | + mode=None, + mode_configuration=None, + ) # DetectorCreationInputRequest | # example passing only required values which don't have defaults set try: - api_response = api_instance.create_detector(detector_creation_input) + api_response = api_instance.create_detector(detector_creation_input_request) pprint(api_response) except groundlight_openapi_client.ApiException as e: print("Exception when calling DetectorsApi->create_detector: %s\n" % e) @@ -70,7 +74,7 @@ with groundlight_openapi_client.ApiClient(configuration) as api_client: Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- - **detector_creation_input** | [**DetectorCreationInput**](DetectorCreationInput.md)| | + **detector_creation_input_request** | [**DetectorCreationInputRequest**](DetectorCreationInputRequest.md)| | ### Return type @@ -94,6 +98,81 @@ Name | Type | Description | Notes [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) +# **delete_detector** +> delete_detector(id) + + + +Delete a detector by its ID. + +### Example + +* Api Key Authentication (ApiToken): + +```python +import time +import groundlight_openapi_client +from groundlight_openapi_client.api import detectors_api +from pprint import pprint +# Defining the host is optional and defaults to https://api.groundlight.ai/device-api +# See configuration.py for a list of all supported configuration parameters. +configuration = groundlight_openapi_client.Configuration( + host = "https://api.groundlight.ai/device-api" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure API key authorization: ApiToken +configuration.api_key['ApiToken'] = 'YOUR_API_KEY' + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['ApiToken'] = 'Bearer' + +# Enter a context with an instance of the API client +with groundlight_openapi_client.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = detectors_api.DetectorsApi(api_client) + id = "id_example" # str | Choose a detector by its ID. + + # example passing only required values which don't have defaults set + try: + api_instance.delete_detector(id) + except groundlight_openapi_client.ApiException as e: + print("Exception when calling DetectorsApi->delete_detector: %s\n" % e) +``` + + +### Parameters + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **id** | **str**| Choose a detector by its ID. | + +### Return type + +void (empty response body) + +### Authorization + +[ApiToken](../README.md#ApiToken) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: Not defined + + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**204** | No response body | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + # **get_detector** > Detector get_detector(id) @@ -210,7 +289,7 @@ with groundlight_openapi_client.ApiClient(configuration) as api_client: # Create an instance of the API class api_instance = detectors_api.DetectorsApi(api_client) page = 1 # int | A page number within the paginated result set. (optional) - page_size = 1 # int | Number of results to return per page. (optional) + page_size = 1 # int | Number of items to return per page. (optional) # example passing only required values which don't have defaults set # and optional values @@ -227,7 +306,7 @@ with groundlight_openapi_client.ApiClient(configuration) as api_client: Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- **page** | **int**| A page number within the paginated result set. | [optional] - **page_size** | **int**| Number of results to return per page. | [optional] + **page_size** | **int**| Number of items to return per page. | [optional] ### Return type diff --git a/generated/docs/ImageQueriesApi.md b/generated/docs/ImageQueriesApi.md index 207ddadd..4dcb25a8 100644 --- a/generated/docs/ImageQueriesApi.md +++ b/generated/docs/ImageQueriesApi.md @@ -4,11 +4,88 @@ All URIs are relative to *https://api.groundlight.ai/device-api* Method | HTTP request | Description ------------- | ------------- | ------------- +[**get_image**](ImageQueriesApi.md#get_image) | **GET** /v1/image-queries/{id}/image | [**get_image_query**](ImageQueriesApi.md#get_image_query) | **GET** /v1/image-queries/{id} | [**list_image_queries**](ImageQueriesApi.md#list_image_queries) | **GET** /v1/image-queries | [**submit_image_query**](ImageQueriesApi.md#submit_image_query) | **POST** /v1/image-queries | +# **get_image** +> file_type get_image(id) + + + +Retrieve an image by its ID. + +### Example + +* Api Key Authentication (ApiToken): + +```python +import time +import groundlight_openapi_client +from groundlight_openapi_client.api import image_queries_api +from pprint import pprint +# Defining the host is optional and defaults to https://api.groundlight.ai/device-api +# See configuration.py for a list of all supported configuration parameters. +configuration = groundlight_openapi_client.Configuration( + host = "https://api.groundlight.ai/device-api" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure API key authorization: ApiToken +configuration.api_key['ApiToken'] = 'YOUR_API_KEY' + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['ApiToken'] = 'Bearer' + +# Enter a context with an instance of the API client +with groundlight_openapi_client.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = image_queries_api.ImageQueriesApi(api_client) + id = "id_example" # str | Retrieve the image associated with the image query ID. + + # example passing only required values which don't have defaults set + try: + api_response = api_instance.get_image(id) + pprint(api_response) + except groundlight_openapi_client.ApiException as e: + print("Exception when calling ImageQueriesApi->get_image: %s\n" % e) +``` + + +### Parameters + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **id** | **str**| Retrieve the image associated with the image query ID. | + +### Return type + +**file_type** + +### Authorization + +[ApiToken](../README.md#ApiToken) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: image/jpeg + + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + # **get_image_query** > ImageQuery get_image_query(id) @@ -125,7 +202,7 @@ with groundlight_openapi_client.ApiClient(configuration) as api_client: # Create an instance of the API class api_instance = image_queries_api.ImageQueriesApi(api_client) page = 1 # int | A page number within the paginated result set. (optional) - page_size = 1 # int | Number of results to return per page. (optional) + page_size = 1 # int | Number of items to return per page. (optional) # example passing only required values which don't have defaults set # and optional values @@ -142,7 +219,7 @@ with groundlight_openapi_client.ApiClient(configuration) as api_client: Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- **page** | **int**| A page number within the paginated result set. | [optional] - **page_size** | **int**| Number of results to return per page. | [optional] + **page_size** | **int**| Number of items to return per page. | [optional] ### Return type @@ -205,12 +282,12 @@ with groundlight_openapi_client.ApiClient(configuration) as api_client: # Create an instance of the API class api_instance = image_queries_api.ImageQueriesApi(api_client) detector_id = "detector_id_example" # str | Choose a detector by its ID. + human_review = "human_review_example" # str | If set to `DEFAULT`, use the regular escalation logic (i.e., send the image query for human review if the ML model is not confident). If set to `ALWAYS`, always send the image query for human review even if the ML model is confident. If set to `NEVER`, never send the image query for human review even if the ML model is not confident. (optional) inspection_id = "inspection_id_example" # str | Associate the image query with an inspection. (optional) - human_review = "human_review_example" # str | If set to `DEFAULT`, use the regular escalation logic (i.e., send the image query for human review if the ML model is not confident). If set to `ALWAYS`, always send the image query for human review even if the ML model is confident. If set to `NEVER`, never send the image query for human review even if the ML model is not confident. (optional) + metadata = "metadata_example" # str | A dictionary of custom key/value metadata to associate with the image query (limited to 1KB). (optional) patience_time = 3.14 # float | How long to wait for a confident response. (optional) want_async = "want_async_example" # str | If \"true\" then submitting an image query returns immediately without a result. The result will be computed asynchronously and can be retrieved later. (optional) - metadata = "metadata_example" # str | A dictionary of custom key/value metadata to associate with the image query (limited to 1KB). (optional) - body = open('@path/to/image.jpeg', 'rb') # file_type | (optional) + body = open('/path/to/file', 'rb') # file_type | (optional) # example passing only required values which don't have defaults set try: @@ -222,7 +299,7 @@ with groundlight_openapi_client.ApiClient(configuration) as api_client: # example passing only required values which don't have defaults set # and optional values try: - api_response = api_instance.submit_image_query(detector_id, inspection_id=inspection_id, human_review=human_review, patience_time=patience_time, want_async=want_async, metadata=metadata, body=body) + api_response = api_instance.submit_image_query(detector_id, human_review=human_review, inspection_id=inspection_id, metadata=metadata, patience_time=patience_time, want_async=want_async, body=body) pprint(api_response) except groundlight_openapi_client.ApiException as e: print("Exception when calling ImageQueriesApi->submit_image_query: %s\n" % e) @@ -234,11 +311,11 @@ with groundlight_openapi_client.ApiClient(configuration) as api_client: Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- **detector_id** | **str**| Choose a detector by its ID. | + **human_review** | **str**| If set to `DEFAULT`, use the regular escalation logic (i.e., send the image query for human review if the ML model is not confident). If set to `ALWAYS`, always send the image query for human review even if the ML model is confident. If set to `NEVER`, never send the image query for human review even if the ML model is not confident. | [optional] **inspection_id** | **str**| Associate the image query with an inspection. | [optional] - **human_review** | **str**| If set to `DEFAULT`, use the regular escalation logic (i.e., send the image query for human review if the ML model is not confident). If set to `ALWAYS`, always send the image query for human review even if the ML model is confident. If set to `NEVER`, never send the image query for human review even if the ML model is not confident. | [optional] + **metadata** | **str**| A dictionary of custom key/value metadata to associate with the image query (limited to 1KB). | [optional] **patience_time** | **float**| How long to wait for a confident response. | [optional] **want_async** | **str**| If \"true\" then submitting an image query returns immediately without a result. The result will be computed asynchronously and can be retrieved later. | [optional] - **metadata** | **str**| A dictionary of custom key/value metadata to associate with the image query (limited to 1KB). | [optional] **body** | **file_type**| | [optional] ### Return type diff --git a/generated/docs/ImageQuery.md b/generated/docs/ImageQuery.md index 5db66277..adbe840f 100644 --- a/generated/docs/ImageQuery.md +++ b/generated/docs/ImageQuery.md @@ -5,14 +5,16 @@ Spec for serializing a image-query object in the public API. ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- +**metadata** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type** | Metadata about the image query. | [readonly] **id** | **str** | A unique ID for this object. | [readonly] **type** | **bool, date, datetime, dict, float, int, list, str, none_type** | The type of this object. | [readonly] **created_at** | **datetime** | When was this detector created? | [readonly] **query** | **str** | A question about the image. | [readonly] **detector_id** | **str** | Which detector was used on this image query? | [readonly] **result_type** | **bool, date, datetime, dict, float, int, list, str, none_type** | What type of result are we returning? | [readonly] -**result** | **bool, date, datetime, dict, float, int, list, str, none_type** | | [optional] [readonly] -**metadata** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type** | A dictionary of custom key/value metadata to associate with the image query (limited to 1KB). | [optional] [readonly] +**result** | **bool, date, datetime, dict, float, int, list, str, none_type** | The result of the image query. | [readonly] +**patience_time** | **float** | How long to wait for a confident response. | [readonly] +**confidence_threshold** | **float** | Min confidence needed to accept the response of the image query. | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/InlineResponse200.md b/generated/docs/InlineResponse200.md index 57958321..020894d5 100644 --- a/generated/docs/InlineResponse200.md +++ b/generated/docs/InlineResponse200.md @@ -4,8 +4,7 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**customer** | [**[Note]**](Note.md) | | -**gl** | [**[Note]**](Note.md) | | +**username** | **str** | The user's username | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/ModeEnum.md b/generated/docs/ModeEnum.md new file mode 100644 index 00000000..82b90c28 --- /dev/null +++ b/generated/docs/ModeEnum.md @@ -0,0 +1,12 @@ +# ModeEnum + +* `BINARY` - BINARY * `COUNT` - COUNT + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**value** | **str** | * `BINARY` - BINARY * `COUNT` - COUNT | must be one of ["BINARY", "COUNT", ] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/Note.md b/generated/docs/Note.md index 99493d5e..baaad6ad 100644 --- a/generated/docs/Note.md +++ b/generated/docs/Note.md @@ -4,8 +4,8 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**content** | **str** | The text inside the note | [optional] -**note_type** | **str** | The type of note | [optional] +**detector_id** | **str** | | [readonly] +**content** | **str** | Text content of the note. | **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/NoteRequest.md b/generated/docs/NoteRequest.md new file mode 100644 index 00000000..730546a8 --- /dev/null +++ b/generated/docs/NoteRequest.md @@ -0,0 +1,12 @@ +# NoteRequest + + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**content** | **str** | Text content of the note. | +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/NotesApi.md b/generated/docs/NotesApi.md index 844701d4..28e9f1da 100644 --- a/generated/docs/NotesApi.md +++ b/generated/docs/NotesApi.md @@ -9,11 +9,11 @@ Method | HTTP request | Description # **create_note** -> [Note] create_note(detector_id, note_creation_input) +> create_note(detector_id, note_request) -Create a new note. +Create a new note ### Example @@ -23,8 +23,7 @@ Create a new note. import time import groundlight_openapi_client from groundlight_openapi_client.api import notes_api -from groundlight_openapi_client.model.note_creation_input import NoteCreationInput -from groundlight_openapi_client.model.note import Note +from groundlight_openapi_client.model.note_request import NoteRequest from pprint import pprint # Defining the host is optional and defaults to https://api.groundlight.ai/device-api # See configuration.py for a list of all supported configuration parameters. @@ -47,15 +46,14 @@ configuration.api_key['ApiToken'] = 'YOUR_API_KEY' with groundlight_openapi_client.ApiClient(configuration) as api_client: # Create an instance of the API class api_instance = notes_api.NotesApi(api_client) - detector_id = "detector_id_example" # str | the detector to associate the note with - note_creation_input = NoteCreationInput( + detector_id = "detector_id_example" # str | the detector to associate the new note with + note_request = NoteRequest( content="content_example", - ) # NoteCreationInput | + ) # NoteRequest | # example passing only required values which don't have defaults set try: - api_response = api_instance.create_note(detector_id, note_creation_input) - pprint(api_response) + api_instance.create_note(detector_id, note_request) except groundlight_openapi_client.ApiException as e: print("Exception when calling NotesApi->create_note: %s\n" % e) ``` @@ -65,12 +63,12 @@ with groundlight_openapi_client.ApiClient(configuration) as api_client: Name | Type | Description | Notes ------------- | ------------- | ------------- | ------------- - **detector_id** | **str**| the detector to associate the note with | - **note_creation_input** | [**NoteCreationInput**](NoteCreationInput.md)| | + **detector_id** | **str**| the detector to associate the new note with | + **note_request** | [**NoteRequest**](NoteRequest.md)| | ### Return type -[**[Note]**](Note.md) +void (empty response body) ### Authorization @@ -78,24 +76,24 @@ Name | Type | Description | Notes ### HTTP request headers - - **Content-Type**: application/json - - **Accept**: application/json + - **Content-Type**: application/json, application/x-www-form-urlencoded, multipart/form-data + - **Accept**: Not defined ### HTTP response details | Status code | Description | Response headers | |-------------|-------------|------------------| -**201** | | - | +**204** | No response body | - | [[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) # **get_notes** -> InlineResponse200 get_notes(detector_id) +> AllNotes get_notes(detector_id) -Retrieve notes for a detector +Get all the notes from a given detector and return the answer in lists, one for each note_category ### Example @@ -105,7 +103,7 @@ Retrieve notes for a detector import time import groundlight_openapi_client from groundlight_openapi_client.api import notes_api -from groundlight_openapi_client.model.inline_response200 import InlineResponse200 +from groundlight_openapi_client.model.all_notes import AllNotes from pprint import pprint # Defining the host is optional and defaults to https://api.groundlight.ai/device-api # See configuration.py for a list of all supported configuration parameters. @@ -147,7 +145,7 @@ Name | Type | Description | Notes ### Return type -[**InlineResponse200**](InlineResponse200.md) +[**AllNotes**](AllNotes.md) ### Authorization diff --git a/generated/docs/PaginatedAllNotesList.md b/generated/docs/PaginatedAllNotesList.md new file mode 100644 index 00000000..80217d1e --- /dev/null +++ b/generated/docs/PaginatedAllNotesList.md @@ -0,0 +1,15 @@ +# PaginatedAllNotesList + + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**count** | **int** | | +**results** | [**[AllNotes]**](AllNotes.md) | | +**next** | **str, none_type** | | [optional] +**previous** | **str, none_type** | | [optional] +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/PaginatedDetectorList.md b/generated/docs/PaginatedDetectorList.md index a850fc59..52c596a2 100644 --- a/generated/docs/PaginatedDetectorList.md +++ b/generated/docs/PaginatedDetectorList.md @@ -4,10 +4,10 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**count** | **int** | | [optional] +**count** | **int** | | +**results** | [**[Detector]**](Detector.md) | | **next** | **str, none_type** | | [optional] **previous** | **str, none_type** | | [optional] -**results** | [**[Detector]**](Detector.md) | | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/PaginatedImageQueryList.md b/generated/docs/PaginatedImageQueryList.md index 156ed9c5..f5711b9d 100644 --- a/generated/docs/PaginatedImageQueryList.md +++ b/generated/docs/PaginatedImageQueryList.md @@ -4,10 +4,10 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**count** | **int** | | [optional] +**count** | **int** | | +**results** | [**[ImageQuery]**](ImageQuery.md) | | **next** | **str, none_type** | | [optional] **previous** | **str, none_type** | | [optional] -**results** | [**[ImageQuery]**](ImageQuery.md) | | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/PaginatedNoteList.md b/generated/docs/PaginatedNoteList.md new file mode 100644 index 00000000..fc25bd7d --- /dev/null +++ b/generated/docs/PaginatedNoteList.md @@ -0,0 +1,15 @@ +# PaginatedNoteList + + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**count** | **int** | | +**results** | [**[Note]**](Note.md) | | +**next** | **str, none_type** | | [optional] +**previous** | **str, none_type** | | [optional] +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/PaginatedRuleList.md b/generated/docs/PaginatedRuleList.md index a713a18a..e0d1942b 100644 --- a/generated/docs/PaginatedRuleList.md +++ b/generated/docs/PaginatedRuleList.md @@ -4,10 +4,10 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**count** | **int** | | [optional] +**count** | **int** | | +**results** | [**[Rule]**](Rule.md) | | **next** | **str, none_type** | | [optional] **previous** | **str, none_type** | | [optional] -**results** | [**[Rule]**](Rule.md) | | [optional] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/ResultTypeEnum.md b/generated/docs/ResultTypeEnum.md index afe52f6e..d5292681 100644 --- a/generated/docs/ResultTypeEnum.md +++ b/generated/docs/ResultTypeEnum.md @@ -4,7 +4,7 @@ ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**value** | **str** | | defaults to "binary_classification", must be one of ["binary_classification", ] +**value** | **str** | | must be one of ["binary_classification", "counting", ] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/Rule.md b/generated/docs/Rule.md index 064d4ec5..28da4c6a 100644 --- a/generated/docs/Rule.md +++ b/generated/docs/Rule.md @@ -1,20 +1,20 @@ # Rule -Spec for serializing a rule object in the public API. ## Properties Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- -**id** | **int** | A unique ID for this object. | [optional] [readonly] -**detector_name** | **str** | The name of the detector this rule is associated with. | [optional] [readonly] -**detector_id** | **str** | Which detector should this rule be associated with? | [optional] -**name** | **str** | A short, descriptive name for the rule. | [optional] -**enabled** | **bool** | Is this rule enabled? | [optional] if omitted the server will use the default value of True -**snooze_time_enabled** | **bool** | Is this rule snooze time enabled? | [optional] if omitted the server will use the default value of False -**snooze_time_value** | **int** | How long to snooze the rule for (in seconds). | [optional] if omitted the server will use the default value of 1 -**snooze_time_unit** | **str** | What unit of time to use for the snooze time. | [optional] if omitted the server will use the default value of "DAYS" -**action** | [**Action**](Action.md) | | [optional] -**condition** | [**Condition**](Condition.md) | | [optional] +**id** | **int** | | [readonly] +**detector_id** | **str** | | [readonly] +**detector_name** | **str** | | [readonly] +**name** | **str** | | +**condition** | [**Condition**](Condition.md) | | +**action** | [**Action**](Action.md) | | +**enabled** | **bool** | | [optional] if omitted the server will use the default value of True +**snooze_time_enabled** | **bool** | | [optional] if omitted the server will use the default value of False +**snooze_time_value** | **int** | | [optional] if omitted the server will use the default value of 0 +**snooze_time_unit** | **bool, date, datetime, dict, float, int, list, str, none_type** | | [optional] +**human_review_required** | **bool** | | [optional] if omitted the server will use the default value of False **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/RuleRequest.md b/generated/docs/RuleRequest.md new file mode 100644 index 00000000..2fae4e8f --- /dev/null +++ b/generated/docs/RuleRequest.md @@ -0,0 +1,19 @@ +# RuleRequest + + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**name** | **str** | | +**condition** | [**ConditionRequest**](ConditionRequest.md) | | +**action** | [**ActionRequest**](ActionRequest.md) | | +**enabled** | **bool** | | [optional] if omitted the server will use the default value of True +**snooze_time_enabled** | **bool** | | [optional] if omitted the server will use the default value of False +**snooze_time_value** | **int** | | [optional] if omitted the server will use the default value of 0 +**snooze_time_unit** | **bool, date, datetime, dict, float, int, list, str, none_type** | | [optional] +**human_review_required** | **bool** | | [optional] if omitted the server will use the default value of False +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/SnoozeTimeUnitEnum.md b/generated/docs/SnoozeTimeUnitEnum.md new file mode 100644 index 00000000..778f7062 --- /dev/null +++ b/generated/docs/SnoozeTimeUnitEnum.md @@ -0,0 +1,12 @@ +# SnoozeTimeUnitEnum + +* `DAYS` - DAYS * `HOURS` - HOURS * `MINUTES` - MINUTES * `SECONDS` - SECONDS + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**value** | **str** | * `DAYS` - DAYS * `HOURS` - HOURS * `MINUTES` - MINUTES * `SECONDS` - SECONDS | must be one of ["DAYS", "HOURS", "MINUTES", "SECONDS", ] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/UserApi.md b/generated/docs/UserApi.md index 91367f4c..7e7d864d 100644 --- a/generated/docs/UserApi.md +++ b/generated/docs/UserApi.md @@ -4,11 +4,11 @@ All URIs are relative to *https://api.groundlight.ai/device-api* Method | HTTP request | Description ------------- | ------------- | ------------- -[**who_am_i**](UserApi.md#who_am_i) | **GET** /me | +[**who_am_i**](UserApi.md#who_am_i) | **GET** /v1/me | # **who_am_i** -> User who_am_i() +> InlineResponse200 who_am_i() @@ -22,7 +22,7 @@ Retrieve the current user. import time import groundlight_openapi_client from groundlight_openapi_client.api import user_api -from groundlight_openapi_client.model.user import User +from groundlight_openapi_client.model.inline_response200 import InlineResponse200 from pprint import pprint # Defining the host is optional and defaults to https://api.groundlight.ai/device-api # See configuration.py for a list of all supported configuration parameters. @@ -60,7 +60,7 @@ This endpoint does not need any parameter. ### Return type -[**User**](User.md) +[**InlineResponse200**](InlineResponse200.md) ### Authorization diff --git a/generated/docs/VerbEnum.md b/generated/docs/VerbEnum.md new file mode 100644 index 00000000..a76351de --- /dev/null +++ b/generated/docs/VerbEnum.md @@ -0,0 +1,12 @@ +# VerbEnum + +* `ANSWERED_CONSECUTIVELY` - ANSWERED_CONSECUTIVELY * `ANSWERED_WITHIN_TIME` - ANSWERED_WITHIN_TIME * `CHANGED_TO` - CHANGED_TO * `NO_CHANGE` - NO_CHANGE * `NO_QUERIES` - NO_QUERIES + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**value** | **str** | * `ANSWERED_CONSECUTIVELY` - ANSWERED_CONSECUTIVELY * `ANSWERED_WITHIN_TIME` - ANSWERED_WITHIN_TIME * `CHANGED_TO` - CHANGED_TO * `NO_CHANGE` - NO_CHANGE * `NO_QUERIES` - NO_QUERIES | must be one of ["ANSWERED_CONSECUTIVELY", "ANSWERED_WITHIN_TIME", "CHANGED_TO", "NO_CHANGE", "NO_QUERIES", ] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/groundlight_openapi_client/__init__.py b/generated/groundlight_openapi_client/__init__.py index 2e7d7b62..c1149a99 100644 --- a/generated/groundlight_openapi_client/__init__.py +++ b/generated/groundlight_openapi_client/__init__.py @@ -3,9 +3,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/api/__init__.py b/generated/groundlight_openapi_client/api/__init__.py index fdbe2951..895d519d 100644 --- a/generated/groundlight_openapi_client/api/__init__.py +++ b/generated/groundlight_openapi_client/api/__init__.py @@ -1,3 +1,3 @@ # do not import all apis into this module because that uses a lot of memory and stack frames # if you need the ability to import all apis from one package, import them with -# from groundlight_openapi_client.apis import DetectorsApi +# from groundlight_openapi_client.apis import ActionsApi diff --git a/generated/groundlight_openapi_client/api/rules_api.py b/generated/groundlight_openapi_client/api/actions_api.py similarity index 75% rename from generated/groundlight_openapi_client/api/rules_api.py rename to generated/groundlight_openapi_client/api/actions_api.py index d5324196..9920219d 100644 --- a/generated/groundlight_openapi_client/api/rules_api.py +++ b/generated/groundlight_openapi_client/api/actions_api.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -24,10 +24,10 @@ ) from groundlight_openapi_client.model.paginated_rule_list import PaginatedRuleList from groundlight_openapi_client.model.rule import Rule -from groundlight_openapi_client.model.rule_creation_input import RuleCreationInput +from groundlight_openapi_client.model.rule_request import RuleRequest -class RulesApi(object): +class ActionsApi(object): """NOTE: This class is auto generated by OpenAPI Generator Ref: https://openapi-generator.tech @@ -50,11 +50,11 @@ def __init__(self, api_client=None): params_map={ "all": [ "detector_id", - "rule_creation_input", + "rule_request", ], "required": [ "detector_id", - "rule_creation_input", + "rule_request", ], "nullable": [], "enum": [], @@ -65,18 +65,21 @@ def __init__(self, api_client=None): "allowed_values": {}, "openapi_types": { "detector_id": (str,), - "rule_creation_input": (RuleCreationInput,), + "rule_request": (RuleRequest,), }, "attribute_map": { "detector_id": "detector_id", }, "location_map": { "detector_id": "path", - "rule_creation_input": "body", + "rule_request": "body", }, "collection_format_map": {}, }, - headers_map={"accept": ["application/json"], "content_type": ["application/json"]}, + headers_map={ + "accept": ["application/json"], + "content_type": ["application/json", "application/x-www-form-urlencoded", "multipart/form-data"], + }, api_client=api_client, ) self.delete_rule_endpoint = _Endpoint( @@ -159,6 +162,46 @@ def __init__(self, api_client=None): }, api_client=api_client, ) + self.list_detector_rules_endpoint = _Endpoint( + settings={ + "response_type": (PaginatedRuleList,), + "auth": ["ApiToken"], + "endpoint_path": "/v1/actions/detector/{detector_id}/rules", + "operation_id": "list_detector_rules", + "http_method": "GET", + "servers": None, + }, + params_map={ + "all": [ + "detector_id", + ], + "required": [ + "detector_id", + ], + "nullable": [], + "enum": [], + "validation": [], + }, + root_map={ + "validations": {}, + "allowed_values": {}, + "openapi_types": { + "detector_id": (str,), + }, + "attribute_map": { + "detector_id": "detector_id", + }, + "location_map": { + "detector_id": "path", + }, + "collection_format_map": {}, + }, + headers_map={ + "accept": ["application/json"], + "content_type": [], + }, + api_client=api_client, + ) self.list_rules_endpoint = _Endpoint( settings={ "response_type": (PaginatedRuleList,), @@ -202,19 +245,19 @@ def __init__(self, api_client=None): api_client=api_client, ) - def create_rule(self, detector_id, rule_creation_input, **kwargs): + def create_rule(self, detector_id, rule_request, **kwargs): """create_rule # noqa: E501 - Create a new rule for a detector. # noqa: E501 + Create a new rule for a detector # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.create_rule(detector_id, rule_creation_input, async_req=True) + >>> thread = api.create_rule(detector_id, rule_request, async_req=True) >>> result = thread.get() Args: - detector_id (str): Choose a detector by its ID. - rule_creation_input (RuleCreationInput): + detector_id (str): + rule_request (RuleRequest): Keyword Args: _return_http_data_only (bool): response data without head status @@ -259,7 +302,7 @@ def create_rule(self, detector_id, rule_creation_input, **kwargs): kwargs["_content_type"] = kwargs.get("_content_type") kwargs["_host_index"] = kwargs.get("_host_index") kwargs["detector_id"] = detector_id - kwargs["rule_creation_input"] = rule_creation_input + kwargs["rule_request"] = rule_request return self.create_rule_endpoint.call_with_http_info(**kwargs) def delete_rule(self, id, **kwargs): @@ -273,7 +316,7 @@ def delete_rule(self, id, **kwargs): >>> result = thread.get() Args: - id (int): Delete a rule by its ID. + id (int): Keyword Args: _return_http_data_only (bool): response data without head status @@ -331,7 +374,7 @@ def get_rule(self, id, **kwargs): >>> result = thread.get() Args: - id (int): Get a rule by its ID. + id (int): Keyword Args: _return_http_data_only (bool): response data without head status @@ -378,10 +421,68 @@ def get_rule(self, id, **kwargs): kwargs["id"] = id return self.get_rule_endpoint.call_with_http_info(**kwargs) + def list_detector_rules(self, detector_id, **kwargs): + """list_detector_rules # noqa: E501 + + List all rules for a detector # noqa: E501 + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + + >>> thread = api.list_detector_rules(detector_id, async_req=True) + >>> result = thread.get() + + Args: + detector_id (str): + + Keyword Args: + _return_http_data_only (bool): response data without head status + code and headers. Default is True. + _preload_content (bool): if False, the urllib3.HTTPResponse object + will be returned without reading/decoding response data. + Default is True. + _request_timeout (int/float/tuple): timeout setting for this request. If + one number provided, it will be total request timeout. It can also + be a pair (tuple) of (connection, read) timeouts. + Default is None. + _check_input_type (bool): specifies if type checking + should be done one the data sent to the server. + Default is True. + _check_return_type (bool): specifies if type checking + should be done one the data received from the server. + Default is True. + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _content_type (str/None): force body content-type. + Default is None and content-type will be predicted by allowed + content-types and body. + _host_index (int/None): specifies the index of the server + that we want to use. + Default is read from the configuration. + async_req (bool): execute request asynchronously + + Returns: + PaginatedRuleList + If the method is called asynchronously, returns the request + thread. + """ + kwargs["async_req"] = kwargs.get("async_req", False) + kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True) + kwargs["_preload_content"] = kwargs.get("_preload_content", True) + kwargs["_request_timeout"] = kwargs.get("_request_timeout", None) + kwargs["_check_input_type"] = kwargs.get("_check_input_type", True) + kwargs["_check_return_type"] = kwargs.get("_check_return_type", True) + kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False) + kwargs["_content_type"] = kwargs.get("_content_type") + kwargs["_host_index"] = kwargs.get("_host_index") + kwargs["detector_id"] = detector_id + return self.list_detector_rules_endpoint.call_with_http_info(**kwargs) + def list_rules(self, **kwargs): """list_rules # noqa: E501 - Retrieve a list of rules. # noqa: E501 + Lists all rules over all detectors owned by the requester. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True diff --git a/generated/groundlight_openapi_client/api/detectors_api.py b/generated/groundlight_openapi_client/api/detectors_api.py index da583d5e..49159a4a 100644 --- a/generated/groundlight_openapi_client/api/detectors_api.py +++ b/generated/groundlight_openapi_client/api/detectors_api.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -23,7 +23,7 @@ validate_and_convert_types, ) from groundlight_openapi_client.model.detector import Detector -from groundlight_openapi_client.model.detector_creation_input import DetectorCreationInput +from groundlight_openapi_client.model.detector_creation_input_request import DetectorCreationInputRequest from groundlight_openapi_client.model.paginated_detector_list import PaginatedDetectorList @@ -49,10 +49,10 @@ def __init__(self, api_client=None): }, params_map={ "all": [ - "detector_creation_input", + "detector_creation_input_request", ], "required": [ - "detector_creation_input", + "detector_creation_input_request", ], "nullable": [], "enum": [], @@ -62,17 +62,57 @@ def __init__(self, api_client=None): "validations": {}, "allowed_values": {}, "openapi_types": { - "detector_creation_input": (DetectorCreationInput,), + "detector_creation_input_request": (DetectorCreationInputRequest,), }, "attribute_map": {}, "location_map": { - "detector_creation_input": "body", + "detector_creation_input_request": "body", }, "collection_format_map": {}, }, headers_map={"accept": ["application/json"], "content_type": ["application/json"]}, api_client=api_client, ) + self.delete_detector_endpoint = _Endpoint( + settings={ + "response_type": None, + "auth": ["ApiToken"], + "endpoint_path": "/v1/detectors/{id}", + "operation_id": "delete_detector", + "http_method": "DELETE", + "servers": None, + }, + params_map={ + "all": [ + "id", + ], + "required": [ + "id", + ], + "nullable": [], + "enum": [], + "validation": [], + }, + root_map={ + "validations": {}, + "allowed_values": {}, + "openapi_types": { + "id": (str,), + }, + "attribute_map": { + "id": "id", + }, + "location_map": { + "id": "path", + }, + "collection_format_map": {}, + }, + headers_map={ + "accept": [], + "content_type": [], + }, + api_client=api_client, + ) self.get_detector_endpoint = _Endpoint( settings={ "response_type": (Detector,), @@ -156,18 +196,18 @@ def __init__(self, api_client=None): api_client=api_client, ) - def create_detector(self, detector_creation_input, **kwargs): + def create_detector(self, detector_creation_input_request, **kwargs): """create_detector # noqa: E501 Create a new detector. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.create_detector(detector_creation_input, async_req=True) + >>> thread = api.create_detector(detector_creation_input_request, async_req=True) >>> result = thread.get() Args: - detector_creation_input (DetectorCreationInput): + detector_creation_input_request (DetectorCreationInputRequest): Keyword Args: _return_http_data_only (bool): response data without head status @@ -211,9 +251,67 @@ def create_detector(self, detector_creation_input, **kwargs): kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False) kwargs["_content_type"] = kwargs.get("_content_type") kwargs["_host_index"] = kwargs.get("_host_index") - kwargs["detector_creation_input"] = detector_creation_input + kwargs["detector_creation_input_request"] = detector_creation_input_request return self.create_detector_endpoint.call_with_http_info(**kwargs) + def delete_detector(self, id, **kwargs): + """delete_detector # noqa: E501 + + Delete a detector by its ID. # noqa: E501 + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + + >>> thread = api.delete_detector(id, async_req=True) + >>> result = thread.get() + + Args: + id (str): Choose a detector by its ID. + + Keyword Args: + _return_http_data_only (bool): response data without head status + code and headers. Default is True. + _preload_content (bool): if False, the urllib3.HTTPResponse object + will be returned without reading/decoding response data. + Default is True. + _request_timeout (int/float/tuple): timeout setting for this request. If + one number provided, it will be total request timeout. It can also + be a pair (tuple) of (connection, read) timeouts. + Default is None. + _check_input_type (bool): specifies if type checking + should be done one the data sent to the server. + Default is True. + _check_return_type (bool): specifies if type checking + should be done one the data received from the server. + Default is True. + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _content_type (str/None): force body content-type. + Default is None and content-type will be predicted by allowed + content-types and body. + _host_index (int/None): specifies the index of the server + that we want to use. + Default is read from the configuration. + async_req (bool): execute request asynchronously + + Returns: + None + If the method is called asynchronously, returns the request + thread. + """ + kwargs["async_req"] = kwargs.get("async_req", False) + kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True) + kwargs["_preload_content"] = kwargs.get("_preload_content", True) + kwargs["_request_timeout"] = kwargs.get("_request_timeout", None) + kwargs["_check_input_type"] = kwargs.get("_check_input_type", True) + kwargs["_check_return_type"] = kwargs.get("_check_return_type", True) + kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False) + kwargs["_content_type"] = kwargs.get("_content_type") + kwargs["_host_index"] = kwargs.get("_host_index") + kwargs["id"] = id + return self.delete_detector_endpoint.call_with_http_info(**kwargs) + def get_detector(self, id, **kwargs): """get_detector # noqa: E501 @@ -285,7 +383,7 @@ def list_detectors(self, **kwargs): Keyword Args: page (int): A page number within the paginated result set.. [optional] - page_size (int): Number of results to return per page.. [optional] + page_size (int): Number of items to return per page.. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object diff --git a/generated/groundlight_openapi_client/api/image_queries_api.py b/generated/groundlight_openapi_client/api/image_queries_api.py index 690d7035..e306da10 100644 --- a/generated/groundlight_openapi_client/api/image_queries_api.py +++ b/generated/groundlight_openapi_client/api/image_queries_api.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -37,6 +37,46 @@ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client + self.get_image_endpoint = _Endpoint( + settings={ + "response_type": (file_type,), + "auth": ["ApiToken"], + "endpoint_path": "/v1/image-queries/{id}/image", + "operation_id": "get_image", + "http_method": "GET", + "servers": None, + }, + params_map={ + "all": [ + "id", + ], + "required": [ + "id", + ], + "nullable": [], + "enum": [], + "validation": [], + }, + root_map={ + "validations": {}, + "allowed_values": {}, + "openapi_types": { + "id": (str,), + }, + "attribute_map": { + "id": "id", + }, + "location_map": { + "id": "path", + }, + "collection_format_map": {}, + }, + headers_map={ + "accept": ["image/jpeg"], + "content_type": [], + }, + api_client=api_client, + ) self.get_image_query_endpoint = _Endpoint( settings={ "response_type": (ImageQuery,), @@ -131,11 +171,11 @@ def __init__(self, api_client=None): params_map={ "all": [ "detector_id", - "inspection_id", "human_review", + "inspection_id", + "metadata", "patience_time", "want_async", - "metadata", "body", ], "required": [ @@ -150,28 +190,28 @@ def __init__(self, api_client=None): "allowed_values": {}, "openapi_types": { "detector_id": (str,), - "inspection_id": (str,), "human_review": (str,), + "inspection_id": (str,), + "metadata": (str,), "patience_time": (float,), "want_async": (str,), - "metadata": (str,), "body": (file_type,), }, "attribute_map": { "detector_id": "detector_id", - "inspection_id": "inspection_id", "human_review": "human_review", + "inspection_id": "inspection_id", + "metadata": "metadata", "patience_time": "patience_time", "want_async": "want_async", - "metadata": "metadata", }, "location_map": { "detector_id": "query", - "inspection_id": "query", "human_review": "query", + "inspection_id": "query", + "metadata": "query", "patience_time": "query", "want_async": "query", - "metadata": "query", "body": "body", }, "collection_format_map": {}, @@ -180,6 +220,64 @@ def __init__(self, api_client=None): api_client=api_client, ) + def get_image(self, id, **kwargs): + """get_image # noqa: E501 + + Retrieve an image by its ID. # noqa: E501 + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + + >>> thread = api.get_image(id, async_req=True) + >>> result = thread.get() + + Args: + id (str): Retrieve the image associated with the image query ID. + + Keyword Args: + _return_http_data_only (bool): response data without head status + code and headers. Default is True. + _preload_content (bool): if False, the urllib3.HTTPResponse object + will be returned without reading/decoding response data. + Default is True. + _request_timeout (int/float/tuple): timeout setting for this request. If + one number provided, it will be total request timeout. It can also + be a pair (tuple) of (connection, read) timeouts. + Default is None. + _check_input_type (bool): specifies if type checking + should be done one the data sent to the server. + Default is True. + _check_return_type (bool): specifies if type checking + should be done one the data received from the server. + Default is True. + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _content_type (str/None): force body content-type. + Default is None and content-type will be predicted by allowed + content-types and body. + _host_index (int/None): specifies the index of the server + that we want to use. + Default is read from the configuration. + async_req (bool): execute request asynchronously + + Returns: + file_type + If the method is called asynchronously, returns the request + thread. + """ + kwargs["async_req"] = kwargs.get("async_req", False) + kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True) + kwargs["_preload_content"] = kwargs.get("_preload_content", True) + kwargs["_request_timeout"] = kwargs.get("_request_timeout", None) + kwargs["_check_input_type"] = kwargs.get("_check_input_type", True) + kwargs["_check_return_type"] = kwargs.get("_check_return_type", True) + kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False) + kwargs["_content_type"] = kwargs.get("_content_type") + kwargs["_host_index"] = kwargs.get("_host_index") + kwargs["id"] = id + return self.get_image_endpoint.call_with_http_info(**kwargs) + def get_image_query(self, id, **kwargs): """get_image_query # noqa: E501 @@ -251,7 +349,7 @@ def list_image_queries(self, **kwargs): Keyword Args: page (int): A page number within the paginated result set.. [optional] - page_size (int): Number of results to return per page.. [optional] + page_size (int): Number of items to return per page.. [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object @@ -309,11 +407,11 @@ def submit_image_query(self, detector_id, **kwargs): detector_id (str): Choose a detector by its ID. Keyword Args: + human_review (str): If set to `DEFAULT`, use the regular escalation logic (i.e., send the image query for human review if the ML model is not confident). If set to `ALWAYS`, always send the image query for human review even if the ML model is confident. If set to `NEVER`, never send the image query for human review even if the ML model is not confident.. [optional] inspection_id (str): Associate the image query with an inspection.. [optional] - human_review (str): If set to `DEFAULT`, use the regular escalation logic (i.e., send the image query for human review if the ML model is not confident). If set to `ALWAYS`, always send the image query for human review even if the ML model is confident. If set to `NEVER`, never send the image query for human review even if the ML model is not confident. . [optional] + metadata (str): A dictionary of custom key/value metadata to associate with the image query (limited to 1KB).. [optional] patience_time (float): How long to wait for a confident response.. [optional] want_async (str): If \"true\" then submitting an image query returns immediately without a result. The result will be computed asynchronously and can be retrieved later.. [optional] - metadata (str): A dictionary of custom key/value metadata to associate with the image query (limited to 1KB).. [optional] body (file_type): [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. diff --git a/generated/groundlight_openapi_client/api/images_api.py b/generated/groundlight_openapi_client/api/images_api.py deleted file mode 100644 index 382d2dea..00000000 --- a/generated/groundlight_openapi_client/api/images_api.py +++ /dev/null @@ -1,135 +0,0 @@ -""" - Groundlight API - - Easy Computer Vision powered by Natural Language # noqa: E501 - - The version of the OpenAPI document: 0.6.0 - Contact: support@groundlight.ai - Generated by: https://openapi-generator.tech -""" - - -import re # noqa: F401 -import sys # noqa: F401 - -from groundlight_openapi_client.api_client import ApiClient, Endpoint as _Endpoint -from groundlight_openapi_client.model_utils import ( # noqa: F401 - check_allowed_values, - check_validations, - date, - datetime, - file_type, - none_type, - validate_and_convert_types, -) - - -class ImagesApi(object): - """NOTE: This class is auto generated by OpenAPI Generator - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - def __init__(self, api_client=None): - if api_client is None: - api_client = ApiClient() - self.api_client = api_client - self.get_image_endpoint = _Endpoint( - settings={ - "response_type": (file_type,), - "auth": ["ApiToken"], - "endpoint_path": "/v1/image-queries/{id}/image", - "operation_id": "get_image", - "http_method": "GET", - "servers": None, - }, - params_map={ - "all": [ - "id", - ], - "required": [ - "id", - ], - "nullable": [], - "enum": [], - "validation": [], - }, - root_map={ - "validations": {}, - "allowed_values": {}, - "openapi_types": { - "id": (str,), - }, - "attribute_map": { - "id": "id", - }, - "location_map": { - "id": "path", - }, - "collection_format_map": {}, - }, - headers_map={ - "accept": ["image/jpeg"], - "content_type": [], - }, - api_client=api_client, - ) - - def get_image(self, id, **kwargs): - """get_image # noqa: E501 - - Retrieve an image by its image query id. # noqa: E501 - This method makes a synchronous HTTP request by default. To make an - asynchronous HTTP request, please pass async_req=True - - >>> thread = api.get_image(id, async_req=True) - >>> result = thread.get() - - Args: - id (str): Choose an image by its image query id. - - Keyword Args: - _return_http_data_only (bool): response data without head status - code and headers. Default is True. - _preload_content (bool): if False, the urllib3.HTTPResponse object - will be returned without reading/decoding response data. - Default is True. - _request_timeout (int/float/tuple): timeout setting for this request. If - one number provided, it will be total request timeout. It can also - be a pair (tuple) of (connection, read) timeouts. - Default is None. - _check_input_type (bool): specifies if type checking - should be done one the data sent to the server. - Default is True. - _check_return_type (bool): specifies if type checking - should be done one the data received from the server. - Default is True. - _spec_property_naming (bool): True if the variable names in the input data - are serialized names, as specified in the OpenAPI document. - False if the variable names in the input data - are pythonic names, e.g. snake case (default) - _content_type (str/None): force body content-type. - Default is None and content-type will be predicted by allowed - content-types and body. - _host_index (int/None): specifies the index of the server - that we want to use. - Default is read from the configuration. - async_req (bool): execute request asynchronously - - Returns: - file_type - If the method is called asynchronously, returns the request - thread. - """ - kwargs["async_req"] = kwargs.get("async_req", False) - kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True) - kwargs["_preload_content"] = kwargs.get("_preload_content", True) - kwargs["_request_timeout"] = kwargs.get("_request_timeout", None) - kwargs["_check_input_type"] = kwargs.get("_check_input_type", True) - kwargs["_check_return_type"] = kwargs.get("_check_return_type", True) - kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False) - kwargs["_content_type"] = kwargs.get("_content_type") - kwargs["_host_index"] = kwargs.get("_host_index") - kwargs["id"] = id - return self.get_image_endpoint.call_with_http_info(**kwargs) diff --git a/generated/groundlight_openapi_client/api/notes_api.py b/generated/groundlight_openapi_client/api/notes_api.py index 40961a2d..8c20fa31 100644 --- a/generated/groundlight_openapi_client/api/notes_api.py +++ b/generated/groundlight_openapi_client/api/notes_api.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -22,9 +22,8 @@ none_type, validate_and_convert_types, ) -from groundlight_openapi_client.model.inline_response200 import InlineResponse200 -from groundlight_openapi_client.model.note import Note -from groundlight_openapi_client.model.note_creation_input import NoteCreationInput +from groundlight_openapi_client.model.all_notes import AllNotes +from groundlight_openapi_client.model.note_request import NoteRequest class NotesApi(object): @@ -40,7 +39,7 @@ def __init__(self, api_client=None): self.api_client = api_client self.create_note_endpoint = _Endpoint( settings={ - "response_type": ([Note],), + "response_type": None, "auth": ["ApiToken"], "endpoint_path": "/v1/notes", "operation_id": "create_note", @@ -50,11 +49,11 @@ def __init__(self, api_client=None): params_map={ "all": [ "detector_id", - "note_creation_input", + "note_request", ], "required": [ "detector_id", - "note_creation_input", + "note_request", ], "nullable": [], "enum": [], @@ -65,23 +64,26 @@ def __init__(self, api_client=None): "allowed_values": {}, "openapi_types": { "detector_id": (str,), - "note_creation_input": (NoteCreationInput,), + "note_request": (NoteRequest,), }, "attribute_map": { "detector_id": "detector_id", }, "location_map": { "detector_id": "query", - "note_creation_input": "body", + "note_request": "body", }, "collection_format_map": {}, }, - headers_map={"accept": ["application/json"], "content_type": ["application/json"]}, + headers_map={ + "accept": [], + "content_type": ["application/json", "application/x-www-form-urlencoded", "multipart/form-data"], + }, api_client=api_client, ) self.get_notes_endpoint = _Endpoint( settings={ - "response_type": (InlineResponse200,), + "response_type": (AllNotes,), "auth": ["ApiToken"], "endpoint_path": "/v1/notes", "operation_id": "get_notes", @@ -120,19 +122,19 @@ def __init__(self, api_client=None): api_client=api_client, ) - def create_note(self, detector_id, note_creation_input, **kwargs): + def create_note(self, detector_id, note_request, **kwargs): """create_note # noqa: E501 - Create a new note. # noqa: E501 + Create a new note # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.create_note(detector_id, note_creation_input, async_req=True) + >>> thread = api.create_note(detector_id, note_request, async_req=True) >>> result = thread.get() Args: - detector_id (str): the detector to associate the note with - note_creation_input (NoteCreationInput): + detector_id (str): the detector to associate the new note with + note_request (NoteRequest): Keyword Args: _return_http_data_only (bool): response data without head status @@ -163,7 +165,7 @@ def create_note(self, detector_id, note_creation_input, **kwargs): async_req (bool): execute request asynchronously Returns: - [Note] + None If the method is called asynchronously, returns the request thread. """ @@ -177,13 +179,13 @@ def create_note(self, detector_id, note_creation_input, **kwargs): kwargs["_content_type"] = kwargs.get("_content_type") kwargs["_host_index"] = kwargs.get("_host_index") kwargs["detector_id"] = detector_id - kwargs["note_creation_input"] = note_creation_input + kwargs["note_request"] = note_request return self.create_note_endpoint.call_with_http_info(**kwargs) def get_notes(self, detector_id, **kwargs): """get_notes # noqa: E501 - Retrieve notes for a detector # noqa: E501 + Get all the notes from a given detector and return the answer in lists, one for each note_category # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True @@ -222,7 +224,7 @@ def get_notes(self, detector_id, **kwargs): async_req (bool): execute request asynchronously Returns: - InlineResponse200 + AllNotes If the method is called asynchronously, returns the request thread. """ diff --git a/generated/groundlight_openapi_client/api/user_api.py b/generated/groundlight_openapi_client/api/user_api.py index 0d9aab8c..986219e2 100644 --- a/generated/groundlight_openapi_client/api/user_api.py +++ b/generated/groundlight_openapi_client/api/user_api.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -22,7 +22,7 @@ none_type, validate_and_convert_types, ) -from groundlight_openapi_client.model.user import User +from groundlight_openapi_client.model.inline_response200 import InlineResponse200 class UserApi(object): @@ -38,9 +38,9 @@ def __init__(self, api_client=None): self.api_client = api_client self.who_am_i_endpoint = _Endpoint( settings={ - "response_type": (User,), + "response_type": (InlineResponse200,), "auth": ["ApiToken"], - "endpoint_path": "/me", + "endpoint_path": "/v1/me", "operation_id": "who_am_i", "http_method": "GET", "servers": None, @@ -101,7 +101,7 @@ def who_am_i(self, **kwargs): async_req (bool): execute request asynchronously Returns: - User + InlineResponse200 If the method is called asynchronously, returns the request thread. """ diff --git a/generated/groundlight_openapi_client/api_client.py b/generated/groundlight_openapi_client/api_client.py index 8c2cfd6b..57caa709 100644 --- a/generated/groundlight_openapi_client/api_client.py +++ b/generated/groundlight_openapi_client/api_client.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -769,11 +769,11 @@ def __call__(self, *args, **kwargs): """This method is invoked when endpoints are called Example: - api_instance = DetectorsApi() - api_instance.create_detector # this is an instance of the class Endpoint - api_instance.create_detector() # this invokes api_instance.create_detector.__call__() + api_instance = ActionsApi() + api_instance.create_rule # this is an instance of the class Endpoint + api_instance.create_rule() # this invokes api_instance.create_rule.__call__() which then invokes the callable functions stored in that endpoint at - api_instance.create_detector.callable or self.callable in this class + api_instance.create_rule.callable or self.callable in this class """ return self.callable(self, *args, **kwargs) diff --git a/generated/groundlight_openapi_client/apis/__init__.py b/generated/groundlight_openapi_client/apis/__init__.py index 92d4a63f..708c08e8 100644 --- a/generated/groundlight_openapi_client/apis/__init__.py +++ b/generated/groundlight_openapi_client/apis/__init__.py @@ -5,7 +5,7 @@ # raise a `RecursionError`. # In order to avoid this, import only the API that you directly need like: # -# from .api.detectors_api import DetectorsApi +# from .api.actions_api import ActionsApi # # or import this package, but before doing it, use: # @@ -13,9 +13,8 @@ # sys.setrecursionlimit(n) # Import APIs into API package: +from groundlight_openapi_client.api.actions_api import ActionsApi from groundlight_openapi_client.api.detectors_api import DetectorsApi from groundlight_openapi_client.api.image_queries_api import ImageQueriesApi -from groundlight_openapi_client.api.images_api import ImagesApi from groundlight_openapi_client.api.notes_api import NotesApi -from groundlight_openapi_client.api.rules_api import RulesApi from groundlight_openapi_client.api.user_api import UserApi diff --git a/generated/groundlight_openapi_client/configuration.py b/generated/groundlight_openapi_client/configuration.py index cc771021..a00466d2 100644 --- a/generated/groundlight_openapi_client/configuration.py +++ b/generated/groundlight_openapi_client/configuration.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -420,7 +420,7 @@ def to_debug_report(self): "Python SDK Debug Report:\n" "OS: {env}\n" "Python Version: {pyversion}\n" - "Version of the API: 0.6.0\n" + "Version of the API: 0.15.3\n" "SDK Package Version: 1.0.0".format(env=sys.platform, pyversion=sys.version) ) diff --git a/generated/groundlight_openapi_client/exceptions.py b/generated/groundlight_openapi_client/exceptions.py index 895f4069..41b32990 100644 --- a/generated/groundlight_openapi_client/exceptions.py +++ b/generated/groundlight_openapi_client/exceptions.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/action.py b/generated/groundlight_openapi_client/model/action.py index 038da192..b295f4dd 100644 --- a/generated/groundlight_openapi_client/model/action.py +++ b/generated/groundlight_openapi_client/model/action.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -30,6 +30,12 @@ from groundlight_openapi_client.exceptions import ApiAttributeError +def lazy_import(): + from groundlight_openapi_client.model.channel_enum import ChannelEnum + + globals()["ChannelEnum"] = ChannelEnum + + class Action(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -54,12 +60,7 @@ class Action(ModelNormal): as additional properties values. """ - allowed_values = { - ("channel",): { - "EMAIL": "EMAIL", - "TEXT": "TEXT", - }, - } + allowed_values = {} validations = {} @@ -69,6 +70,7 @@ def additional_properties_type(): This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ + lazy_import() return ( bool, date, @@ -93,10 +95,11 @@ def openapi_types(): openapi_types (dict): The key is attribute name and the value is attribute type. """ + lazy_import() return { - "channel": (str,), # noqa: E501 - "include_image": (bool,), # noqa: E501 + "channel": (ChannelEnum,), # noqa: E501 "recipient": (str,), # noqa: E501 + "include_image": (bool,), # noqa: E501 } @cached_property @@ -105,8 +108,8 @@ def discriminator(): attribute_map = { "channel": "channel", # noqa: E501 - "include_image": "include_image", # noqa: E501 "recipient": "recipient", # noqa: E501 + "include_image": "include_image", # noqa: E501 } read_only_vars = {} @@ -115,9 +118,14 @@ def discriminator(): @classmethod @convert_js_args_to_python_args - def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 + def _from_openapi_data(cls, channel, recipient, include_image, *args, **kwargs): # noqa: E501 """Action - a model defined in OpenAPI + Args: + channel (ChannelEnum): + recipient (str): + include_image (bool): + Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be @@ -149,9 +157,6 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - channel (str): The channel to send the action to.. [optional] # noqa: E501 - include_image (bool): Should the image be included in the action?. [optional] # noqa: E501 - recipient (str): The recipient of the action.. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -180,6 +185,9 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.channel = channel + self.recipient = recipient + self.include_image = include_image for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map @@ -204,9 +212,14 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 ) @convert_js_args_to_python_args - def __init__(self, *args, **kwargs): # noqa: E501 + def __init__(self, channel, recipient, include_image, *args, **kwargs): # noqa: E501 """Action - a model defined in OpenAPI + Args: + channel (ChannelEnum): + recipient (str): + include_image (bool): + Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be @@ -238,9 +251,6 @@ def __init__(self, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - channel (str): The channel to send the action to.. [optional] # noqa: E501 - include_image (bool): Should the image be included in the action?. [optional] # noqa: E501 - recipient (str): The recipient of the action.. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -267,6 +277,9 @@ def __init__(self, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.channel = channel + self.recipient = recipient + self.include_image = include_image for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map diff --git a/generated/groundlight_openapi_client/model/action_request.py b/generated/groundlight_openapi_client/model/action_request.py new file mode 100644 index 00000000..f88e504b --- /dev/null +++ b/generated/groundlight_openapi_client/model/action_request.py @@ -0,0 +1,301 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + + +import re # noqa: F401 +import sys # noqa: F401 + +from groundlight_openapi_client.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel, +) +from groundlight_openapi_client.exceptions import ApiAttributeError + + +def lazy_import(): + from groundlight_openapi_client.model.channel_enum import ChannelEnum + + globals()["ChannelEnum"] = ChannelEnum + + +class ActionRequest(ModelNormal): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + attribute_map (dict): The key is attribute name + and the value is json key in definition. + discriminator_value_class_map (dict): A dict to go from the discriminator + variable value to the discriminator class name. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = {} + + validations = { + ("recipient",): { + "min_length": 1, + }, + } + + @cached_property + def additional_properties_type(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + """ + lazy_import() + return ( + bool, + date, + datetime, + dict, + float, + int, + list, + str, + none_type, + ) # noqa: E501 + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + lazy_import() + return { + "channel": (ChannelEnum,), # noqa: E501 + "recipient": (str,), # noqa: E501 + "include_image": (bool,), # noqa: E501 + } + + @cached_property + def discriminator(): + return None + + attribute_map = { + "channel": "channel", # noqa: E501 + "recipient": "recipient", # noqa: E501 + "include_image": "include_image", # noqa: E501 + } + + read_only_vars = {} + + _composed_schemas = {} + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, channel, recipient, include_image, *args, **kwargs): # noqa: E501 + """ActionRequest - a model defined in OpenAPI + + Args: + channel (ChannelEnum): + recipient (str): + include_image (bool): + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _path_to_item = kwargs.pop("_path_to_item", ()) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + self.channel = channel + self.recipient = recipient + self.include_image = include_image + for var_name, var_value in kwargs.items(): + if ( + var_name not in self.attribute_map + and self._configuration is not None + and self._configuration.discard_unknown_keys + and self.additional_properties_type is None + ): + # discard variable. + continue + setattr(self, var_name, var_value) + return self + + required_properties = set( + [ + "_data_store", + "_check_type", + "_spec_property_naming", + "_path_to_item", + "_configuration", + "_visited_composed_classes", + ] + ) + + @convert_js_args_to_python_args + def __init__(self, channel, recipient, include_image, *args, **kwargs): # noqa: E501 + """ActionRequest - a model defined in OpenAPI + + Args: + channel (ChannelEnum): + recipient (str): + include_image (bool): + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _path_to_item = kwargs.pop("_path_to_item", ()) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + self.channel = channel + self.recipient = recipient + self.include_image = include_image + for var_name, var_value in kwargs.items(): + if ( + var_name not in self.attribute_map + and self._configuration is not None + and self._configuration.discard_unknown_keys + and self.additional_properties_type is None + ): + # discard variable. + continue + setattr(self, var_name, var_value) + if var_name in self.read_only_vars: + raise ApiAttributeError( + f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " + "class with read only attributes." + ) diff --git a/generated/groundlight_openapi_client/model/all_notes.py b/generated/groundlight_openapi_client/model/all_notes.py new file mode 100644 index 00000000..93213e35 --- /dev/null +++ b/generated/groundlight_openapi_client/model/all_notes.py @@ -0,0 +1,291 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + + +import re # noqa: F401 +import sys # noqa: F401 + +from groundlight_openapi_client.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel, +) +from groundlight_openapi_client.exceptions import ApiAttributeError + + +def lazy_import(): + from groundlight_openapi_client.model.note import Note + + globals()["Note"] = Note + + +class AllNotes(ModelNormal): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + attribute_map (dict): The key is attribute name + and the value is json key in definition. + discriminator_value_class_map (dict): A dict to go from the discriminator + variable value to the discriminator class name. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = {} + + validations = {} + + @cached_property + def additional_properties_type(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + """ + lazy_import() + return ( + bool, + date, + datetime, + dict, + float, + int, + list, + str, + none_type, + ) # noqa: E501 + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + lazy_import() + return { + "customer": ([Note],), # noqa: E501 + "gl": ([Note],), # noqa: E501 + } + + @cached_property + def discriminator(): + return None + + attribute_map = { + "customer": "CUSTOMER", # noqa: E501 + "gl": "GL", # noqa: E501 + } + + read_only_vars = {} + + _composed_schemas = {} + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, customer, gl, *args, **kwargs): # noqa: E501 + """AllNotes - a model defined in OpenAPI + + Args: + customer ([Note]): + gl ([Note]): + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _path_to_item = kwargs.pop("_path_to_item", ()) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + self.customer = customer + self.gl = gl + for var_name, var_value in kwargs.items(): + if ( + var_name not in self.attribute_map + and self._configuration is not None + and self._configuration.discard_unknown_keys + and self.additional_properties_type is None + ): + # discard variable. + continue + setattr(self, var_name, var_value) + return self + + required_properties = set( + [ + "_data_store", + "_check_type", + "_spec_property_naming", + "_path_to_item", + "_configuration", + "_visited_composed_classes", + ] + ) + + @convert_js_args_to_python_args + def __init__(self, customer, gl, *args, **kwargs): # noqa: E501 + """AllNotes - a model defined in OpenAPI + + Args: + customer ([Note]): + gl ([Note]): + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _path_to_item = kwargs.pop("_path_to_item", ()) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + self.customer = customer + self.gl = gl + for var_name, var_value in kwargs.items(): + if ( + var_name not in self.attribute_map + and self._configuration is not None + and self._configuration.discard_unknown_keys + and self.additional_properties_type is None + ): + # discard variable. + continue + setattr(self, var_name, var_value) + if var_name in self.read_only_vars: + raise ApiAttributeError( + f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " + "class with read only attributes." + ) diff --git a/generated/groundlight_openapi_client/model/classification_result.py b/generated/groundlight_openapi_client/model/binary_classification_result.py similarity index 91% rename from generated/groundlight_openapi_client/model/classification_result.py rename to generated/groundlight_openapi_client/model/binary_classification_result.py index 4bb25e45..c3e58eb1 100644 --- a/generated/groundlight_openapi_client/model/classification_result.py +++ b/generated/groundlight_openapi_client/model/binary_classification_result.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -30,7 +30,7 @@ from groundlight_openapi_client.exceptions import ApiAttributeError -class ClassificationResult(ModelNormal): +class BinaryClassificationResult(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -56,12 +56,7 @@ class ClassificationResult(ModelNormal): allowed_values = {} - validations = { - ("confidence",): { - "inclusive_maximum": 1, - "inclusive_minimum": 0, - }, - } + validations = {} @cached_property def additional_properties_type(): @@ -94,11 +89,8 @@ def openapi_types(): and the value is attribute type. """ return { + "confidence": (float,), # noqa: E501 "label": (str,), # noqa: E501 - "confidence": ( - float, - none_type, - ), # noqa: E501 } @cached_property @@ -106,8 +98,8 @@ def discriminator(): return None attribute_map = { - "label": "label", # noqa: E501 "confidence": "confidence", # noqa: E501 + "label": "label", # noqa: E501 } read_only_vars = {} @@ -116,11 +108,12 @@ def discriminator(): @classmethod @convert_js_args_to_python_args - def _from_openapi_data(cls, label, *args, **kwargs): # noqa: E501 - """ClassificationResult - a model defined in OpenAPI + def _from_openapi_data(cls, confidence, label, *args, **kwargs): # noqa: E501 + """BinaryClassificationResult - a model defined in OpenAPI Args: - label (str): What is the predicted label? + confidence (float): + label (str): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -153,7 +146,6 @@ def _from_openapi_data(cls, label, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - confidence (float, none_type): On a scale of 0 to 1, how confident are we in the predicted label?. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -182,6 +174,7 @@ def _from_openapi_data(cls, label, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.confidence = confidence self.label = label for var_name, var_value in kwargs.items(): if ( @@ -207,11 +200,12 @@ def _from_openapi_data(cls, label, *args, **kwargs): # noqa: E501 ) @convert_js_args_to_python_args - def __init__(self, label, *args, **kwargs): # noqa: E501 - """ClassificationResult - a model defined in OpenAPI + def __init__(self, confidence, label, *args, **kwargs): # noqa: E501 + """BinaryClassificationResult - a model defined in OpenAPI Args: - label (str): What is the predicted label? + confidence (float): + label (str): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -244,7 +238,6 @@ def __init__(self, label, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - confidence (float, none_type): On a scale of 0 to 1, how confident are we in the predicted label?. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -271,6 +264,7 @@ def __init__(self, label, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.confidence = confidence self.label = label for var_name, var_value in kwargs.items(): if ( diff --git a/generated/groundlight_openapi_client/model/channel_enum.py b/generated/groundlight_openapi_client/model/channel_enum.py new file mode 100644 index 00000000..89b933b9 --- /dev/null +++ b/generated/groundlight_openapi_client/model/channel_enum.py @@ -0,0 +1,286 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + + +import re # noqa: F401 +import sys # noqa: F401 + +from groundlight_openapi_client.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel, +) +from groundlight_openapi_client.exceptions import ApiAttributeError + + +class ChannelEnum(ModelSimple): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = { + ("value",): { + "EMAIL": "EMAIL", + "TEXT": "TEXT", + }, + } + + validations = {} + + additional_properties_type = None + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + return { + "value": (str,), + } + + @cached_property + def discriminator(): + return None + + attribute_map = {} + + read_only_vars = set() + + _composed_schemas = None + + required_properties = set( + [ + "_data_store", + "_check_type", + "_spec_property_naming", + "_path_to_item", + "_configuration", + "_visited_composed_classes", + ] + ) + + @convert_js_args_to_python_args + def __init__(self, *args, **kwargs): + """ChannelEnum - a model defined in OpenAPI + + Note that value can be passed either in args or in kwargs, but not in both. + + Args: + args[0] (str): * `EMAIL` - EMAIL * `TEXT` - TEXT., must be one of ["EMAIL", "TEXT", ] # noqa: E501 + + Keyword Args: + value (str): * `EMAIL` - EMAIL * `TEXT` - TEXT., must be one of ["EMAIL", "TEXT", ] # noqa: E501 + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + # required up here when default value is not given + _path_to_item = kwargs.pop("_path_to_item", ()) + + if "value" in kwargs: + value = kwargs.pop("value") + elif args: + args = list(args) + value = args.pop(0) + else: + raise ApiTypeError( + "value is required, but not passed in args or kwargs and doesn't have default", + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.value = value + if kwargs: + raise ApiTypeError( + "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." + % ( + kwargs, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, *args, **kwargs): + """ChannelEnum - a model defined in OpenAPI + + Note that value can be passed either in args or in kwargs, but not in both. + + Args: + args[0] (str): * `EMAIL` - EMAIL * `TEXT` - TEXT., must be one of ["EMAIL", "TEXT", ] # noqa: E501 + + Keyword Args: + value (str): * `EMAIL` - EMAIL * `TEXT` - TEXT., must be one of ["EMAIL", "TEXT", ] # noqa: E501 + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + # required up here when default value is not given + _path_to_item = kwargs.pop("_path_to_item", ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if "value" in kwargs: + value = kwargs.pop("value") + elif args: + args = list(args) + value = args.pop(0) + else: + raise ApiTypeError( + "value is required, but not passed in args or kwargs and doesn't have default", + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.value = value + if kwargs: + raise ApiTypeError( + "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." + % ( + kwargs, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + return self diff --git a/generated/groundlight_openapi_client/model/condition.py b/generated/groundlight_openapi_client/model/condition.py index 5010c099..d9d0f1a5 100644 --- a/generated/groundlight_openapi_client/model/condition.py +++ b/generated/groundlight_openapi_client/model/condition.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -30,6 +30,12 @@ from groundlight_openapi_client.exceptions import ApiAttributeError +def lazy_import(): + from groundlight_openapi_client.model.verb_enum import VerbEnum + + globals()["VerbEnum"] = VerbEnum + + class Condition(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -54,15 +60,7 @@ class Condition(ModelNormal): as additional properties values. """ - allowed_values = { - ("verb",): { - "ANSWERED_CONSECUTIVELY": "ANSWERED_CONSECUTIVELY", - "ANSWERED_WITHIN_TIME": "ANSWERED_WITHIN_TIME", - "CHANGED_TO": "CHANGED_TO", - "NO_CHANGE": "NO_CHANGE", - "NO_QUERIES": "NO_QUERIES", - }, - } + allowed_values = {} validations = {} @@ -72,6 +70,7 @@ def additional_properties_type(): This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ + lazy_import() return ( bool, date, @@ -96,8 +95,9 @@ def openapi_types(): openapi_types (dict): The key is attribute name and the value is attribute type. """ + lazy_import() return { - "verb": (str,), # noqa: E501 + "verb": (VerbEnum,), # noqa: E501 "parameters": ({str: (bool, date, datetime, dict, float, int, list, str, none_type)},), # noqa: E501 } @@ -116,9 +116,13 @@ def discriminator(): @classmethod @convert_js_args_to_python_args - def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 + def _from_openapi_data(cls, verb, parameters, *args, **kwargs): # noqa: E501 """Condition - a model defined in OpenAPI + Args: + verb (VerbEnum): + parameters ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): + Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be @@ -150,8 +154,6 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - verb (str): The verb to use in the condition.. [optional] # noqa: E501 - parameters ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): The parameters to use in the condition.. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -180,6 +182,8 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.verb = verb + self.parameters = parameters for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map @@ -204,9 +208,13 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 ) @convert_js_args_to_python_args - def __init__(self, *args, **kwargs): # noqa: E501 + def __init__(self, verb, parameters, *args, **kwargs): # noqa: E501 """Condition - a model defined in OpenAPI + Args: + verb (VerbEnum): + parameters ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): + Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be @@ -238,8 +246,6 @@ def __init__(self, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - verb (str): The verb to use in the condition.. [optional] # noqa: E501 - parameters ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): The parameters to use in the condition.. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -266,6 +272,8 @@ def __init__(self, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.verb = verb + self.parameters = parameters for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map diff --git a/generated/groundlight_openapi_client/model/condition_request.py b/generated/groundlight_openapi_client/model/condition_request.py new file mode 100644 index 00000000..ab1ae410 --- /dev/null +++ b/generated/groundlight_openapi_client/model/condition_request.py @@ -0,0 +1,291 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + + +import re # noqa: F401 +import sys # noqa: F401 + +from groundlight_openapi_client.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel, +) +from groundlight_openapi_client.exceptions import ApiAttributeError + + +def lazy_import(): + from groundlight_openapi_client.model.verb_enum import VerbEnum + + globals()["VerbEnum"] = VerbEnum + + +class ConditionRequest(ModelNormal): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + attribute_map (dict): The key is attribute name + and the value is json key in definition. + discriminator_value_class_map (dict): A dict to go from the discriminator + variable value to the discriminator class name. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = {} + + validations = {} + + @cached_property + def additional_properties_type(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + """ + lazy_import() + return ( + bool, + date, + datetime, + dict, + float, + int, + list, + str, + none_type, + ) # noqa: E501 + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + lazy_import() + return { + "verb": (VerbEnum,), # noqa: E501 + "parameters": ({str: (bool, date, datetime, dict, float, int, list, str, none_type)},), # noqa: E501 + } + + @cached_property + def discriminator(): + return None + + attribute_map = { + "verb": "verb", # noqa: E501 + "parameters": "parameters", # noqa: E501 + } + + read_only_vars = {} + + _composed_schemas = {} + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, verb, parameters, *args, **kwargs): # noqa: E501 + """ConditionRequest - a model defined in OpenAPI + + Args: + verb (VerbEnum): + parameters ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _path_to_item = kwargs.pop("_path_to_item", ()) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + self.verb = verb + self.parameters = parameters + for var_name, var_value in kwargs.items(): + if ( + var_name not in self.attribute_map + and self._configuration is not None + and self._configuration.discard_unknown_keys + and self.additional_properties_type is None + ): + # discard variable. + continue + setattr(self, var_name, var_value) + return self + + required_properties = set( + [ + "_data_store", + "_check_type", + "_spec_property_naming", + "_path_to_item", + "_configuration", + "_visited_composed_classes", + ] + ) + + @convert_js_args_to_python_args + def __init__(self, verb, parameters, *args, **kwargs): # noqa: E501 + """ConditionRequest - a model defined in OpenAPI + + Args: + verb (VerbEnum): + parameters ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}): + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _path_to_item = kwargs.pop("_path_to_item", ()) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + self.verb = verb + self.parameters = parameters + for var_name, var_value in kwargs.items(): + if ( + var_name not in self.attribute_map + and self._configuration is not None + and self._configuration.discard_unknown_keys + and self.additional_properties_type is None + ): + # discard variable. + continue + setattr(self, var_name, var_value) + if var_name in self.read_only_vars: + raise ApiAttributeError( + f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " + "class with read only attributes." + ) diff --git a/generated/groundlight_openapi_client/model/user.py b/generated/groundlight_openapi_client/model/counting_result.py similarity index 91% rename from generated/groundlight_openapi_client/model/user.py rename to generated/groundlight_openapi_client/model/counting_result.py index d2633d88..7df7e575 100644 --- a/generated/groundlight_openapi_client/model/user.py +++ b/generated/groundlight_openapi_client/model/counting_result.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -30,7 +30,7 @@ from groundlight_openapi_client.exceptions import ApiAttributeError -class User(ModelNormal): +class CountingResult(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -89,7 +89,8 @@ def openapi_types(): and the value is attribute type. """ return { - "username": (str,), # noqa: E501 + "confidence": (float,), # noqa: E501 + "value": (int,), # noqa: E501 } @cached_property @@ -97,7 +98,8 @@ def discriminator(): return None attribute_map = { - "username": "username", # noqa: E501 + "confidence": "confidence", # noqa: E501 + "value": "value", # noqa: E501 } read_only_vars = {} @@ -106,11 +108,12 @@ def discriminator(): @classmethod @convert_js_args_to_python_args - def _from_openapi_data(cls, username, *args, **kwargs): # noqa: E501 - """User - a model defined in OpenAPI + def _from_openapi_data(cls, confidence, value, *args, **kwargs): # noqa: E501 + """CountingResult - a model defined in OpenAPI Args: - username (str): The user's username. + confidence (float): + value (int): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -171,7 +174,8 @@ def _from_openapi_data(cls, username, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - self.username = username + self.confidence = confidence + self.value = value for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map @@ -196,11 +200,12 @@ def _from_openapi_data(cls, username, *args, **kwargs): # noqa: E501 ) @convert_js_args_to_python_args - def __init__(self, username, *args, **kwargs): # noqa: E501 - """User - a model defined in OpenAPI + def __init__(self, confidence, value, *args, **kwargs): # noqa: E501 + """CountingResult - a model defined in OpenAPI Args: - username (str): The user's username. + confidence (float): + value (int): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -259,7 +264,8 @@ def __init__(self, username, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - self.username = username + self.confidence = confidence + self.value = value for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map diff --git a/generated/groundlight_openapi_client/model/detector.py b/generated/groundlight_openapi_client/model/detector.py index d0343e18..76fb9ecd 100644 --- a/generated/groundlight_openapi_client/model/detector.py +++ b/generated/groundlight_openapi_client/model/detector.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -121,11 +121,16 @@ def openapi_types(): "name": (str,), # noqa: E501 "query": (str,), # noqa: E501 "group_name": (str,), # noqa: E501 - "confidence_threshold": (float,), # noqa: E501 "metadata": ( {str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type, ), # noqa: E501 + "mode": (str,), # noqa: E501 + "mode_configuration": ( + {str: (bool, date, datetime, dict, float, int, list, str, none_type)}, + none_type, + ), # noqa: E501 + "confidence_threshold": (float,), # noqa: E501 } @cached_property @@ -139,8 +144,10 @@ def discriminator(): "name": "name", # noqa: E501 "query": "query", # noqa: E501 "group_name": "group_name", # noqa: E501 - "confidence_threshold": "confidence_threshold", # noqa: E501 "metadata": "metadata", # noqa: E501 + "mode": "mode", # noqa: E501 + "mode_configuration": "mode_configuration", # noqa: E501 + "confidence_threshold": "confidence_threshold", # noqa: E501 } read_only_vars = { @@ -149,13 +156,18 @@ def discriminator(): "created_at", # noqa: E501 "query", # noqa: E501 "group_name", # noqa: E501 + "metadata", # noqa: E501 + "mode", # noqa: E501 + "mode_configuration", # noqa: E501 } _composed_schemas = {} @classmethod @convert_js_args_to_python_args - def _from_openapi_data(cls, id, type, created_at, name, query, group_name, *args, **kwargs): # noqa: E501 + def _from_openapi_data( + cls, id, type, created_at, name, query, group_name, metadata, mode, mode_configuration, *args, **kwargs + ): # noqa: E501 """Detector - a model defined in OpenAPI Args: @@ -165,6 +177,9 @@ def _from_openapi_data(cls, id, type, created_at, name, query, group_name, *args name (str): A short, descriptive name for the detector. query (str): A question about the image. group_name (str): Which group should this detector be part of? + metadata ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): Metadata about the detector. + mode (str): + mode_configuration ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -197,8 +212,7 @@ def _from_openapi_data(cls, id, type, created_at, name, query, group_name, *args Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - confidence_threshold (float): If the detector's prediction is below this confidence threshold, send the image query for human review.. [optional] if omitted the server will use the default value of 0.75 # noqa: E501 - metadata ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): A dictionary of custom key/value metadata to associate with the detector (limited to 1KB). This is encoded as a URL-safe, base64-encoded JSON string.. [optional] # noqa: E501 + confidence_threshold (float): If the detector's prediction is below this confidence threshold, send the image query for human review.. [optional] if omitted the server will use the default value of 0.9 # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -233,6 +247,9 @@ def _from_openapi_data(cls, id, type, created_at, name, query, group_name, *args self.name = name self.query = query self.group_name = group_name + self.metadata = metadata + self.mode = mode + self.mode_configuration = mode_configuration for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map @@ -292,8 +309,7 @@ def __init__(self, name, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - confidence_threshold (float): If the detector's prediction is below this confidence threshold, send the image query for human review.. [optional] if omitted the server will use the default value of 0.75 # noqa: E501 - metadata ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): A dictionary of custom key/value metadata to associate with the detector (limited to 1KB). This is encoded as a URL-safe, base64-encoded JSON string.. [optional] # noqa: E501 + confidence_threshold (float): If the detector's prediction is below this confidence threshold, send the image query for human review.. [optional] if omitted the server will use the default value of 0.9 # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) diff --git a/generated/groundlight_openapi_client/model/detector_creation_input.py b/generated/groundlight_openapi_client/model/detector_creation_input_request.py similarity index 79% rename from generated/groundlight_openapi_client/model/detector_creation_input.py rename to generated/groundlight_openapi_client/model/detector_creation_input_request.py index b6eb8b1f..be4f690d 100644 --- a/generated/groundlight_openapi_client/model/detector_creation_input.py +++ b/generated/groundlight_openapi_client/model/detector_creation_input_request.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -30,7 +30,13 @@ from groundlight_openapi_client.exceptions import ApiAttributeError -class DetectorCreationInput(ModelNormal): +def lazy_import(): + from groundlight_openapi_client.model.mode_enum import ModeEnum + + globals()["ModeEnum"] = ModeEnum + + +class DetectorCreationInputRequest(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -59,19 +65,30 @@ class DetectorCreationInput(ModelNormal): validations = { ("name",): { "max_length": 200, + "min_length": 1, }, ("query",): { "max_length": 300, + "min_length": 1, }, ("group_name",): { "max_length": 100, + "min_length": 1, }, ("confidence_threshold",): { "inclusive_maximum": 1.0, "inclusive_minimum": 0.0, }, + ("patience_time",): { + "inclusive_maximum": 3600, + "inclusive_minimum": 0, + }, ("pipeline_config",): { - "max_length": 8192, + "max_length": 100, + }, + ("metadata",): { + "max_length": 1362, + "min_length": 1, }, } @@ -81,6 +98,7 @@ def additional_properties_type(): This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ + lazy_import() return ( bool, date, @@ -105,16 +123,37 @@ def openapi_types(): openapi_types (dict): The key is attribute name and the value is attribute type. """ + lazy_import() return { "name": (str,), # noqa: E501 "query": (str,), # noqa: E501 "group_name": (str,), # noqa: E501 "confidence_threshold": (float,), # noqa: E501 + "patience_time": (float,), # noqa: E501 "pipeline_config": ( str, none_type, ), # noqa: E501 - "metadata": ( + "metadata": (str,), # noqa: E501 + "mode": ( + bool, + date, + datetime, + dict, + float, + int, + list, + str, + none_type, + ), # noqa: E501 + "mode_configuration": ( + bool, + date, + datetime, + dict, + float, + int, + list, str, none_type, ), # noqa: E501 @@ -129,8 +168,11 @@ def discriminator(): "query": "query", # noqa: E501 "group_name": "group_name", # noqa: E501 "confidence_threshold": "confidence_threshold", # noqa: E501 + "patience_time": "patience_time", # noqa: E501 "pipeline_config": "pipeline_config", # noqa: E501 "metadata": "metadata", # noqa: E501 + "mode": "mode", # noqa: E501 + "mode_configuration": "mode_configuration", # noqa: E501 } read_only_vars = {} @@ -140,7 +182,7 @@ def discriminator(): @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, name, query, *args, **kwargs): # noqa: E501 - """DetectorCreationInput - a model defined in OpenAPI + """DetectorCreationInputRequest - a model defined in OpenAPI Args: name (str): A short, descriptive name for the detector. @@ -178,9 +220,12 @@ def _from_openapi_data(cls, name, query, *args, **kwargs): # noqa: E501 through its discriminator because we passed in _visited_composed_classes = (Animal,) group_name (str): Which group should this detector be part of?. [optional] # noqa: E501 - confidence_threshold (float): If the detector's prediction is below this confidence threshold, send the image query for human review.. [optional] if omitted the server will use the default value of 0.75 # noqa: E501 - pipeline_config (str, none_type): (Advanced usage) Configuration to instantiate a specific prediction pipeline.. [optional] # noqa: E501 - metadata (str, none_type): A dictionary of custom key/value metadata to associate with the detector (limited to 1KB). This is encoded as a URL-safe, base64-encoded JSON string.. [optional] # noqa: E501 + confidence_threshold (float): If the detector's prediction is below this confidence threshold, send the image query for human review.. [optional] if omitted the server will use the default value of 0.9 # noqa: E501 + patience_time (float): How long Groundlight will attempt to generate a confident prediction. [optional] if omitted the server will use the default value of 30.0 # noqa: E501 + pipeline_config (str, none_type): (Advanced usage) Configuration needed to instantiate a prediction pipeline.. [optional] # noqa: E501 + metadata (str): Base64-encoded metadata for the detector. This should be a JSON object with string keys. The size after encoding should not exceed 1362 bytes, corresponding to 1KiB before encoding.. [optional] # noqa: E501 + mode (bool, date, datetime, dict, float, int, list, str, none_type): Mode in which this detector will work. * `BINARY` - BINARY * `COUNT` - COUNT. [optional] # noqa: E501 + mode_configuration (bool, date, datetime, dict, float, int, list, str, none_type): Configuration for each detector mode.. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -236,7 +281,7 @@ def _from_openapi_data(cls, name, query, *args, **kwargs): # noqa: E501 @convert_js_args_to_python_args def __init__(self, name, query, *args, **kwargs): # noqa: E501 - """DetectorCreationInput - a model defined in OpenAPI + """DetectorCreationInputRequest - a model defined in OpenAPI Args: name (str): A short, descriptive name for the detector. @@ -274,9 +319,12 @@ def __init__(self, name, query, *args, **kwargs): # noqa: E501 through its discriminator because we passed in _visited_composed_classes = (Animal,) group_name (str): Which group should this detector be part of?. [optional] # noqa: E501 - confidence_threshold (float): If the detector's prediction is below this confidence threshold, send the image query for human review.. [optional] if omitted the server will use the default value of 0.75 # noqa: E501 - pipeline_config (str, none_type): (Advanced usage) Configuration to instantiate a specific prediction pipeline.. [optional] # noqa: E501 - metadata (str, none_type): A dictionary of custom key/value metadata to associate with the detector (limited to 1KB). This is encoded as a URL-safe, base64-encoded JSON string.. [optional] # noqa: E501 + confidence_threshold (float): If the detector's prediction is below this confidence threshold, send the image query for human review.. [optional] if omitted the server will use the default value of 0.9 # noqa: E501 + patience_time (float): How long Groundlight will attempt to generate a confident prediction. [optional] if omitted the server will use the default value of 30.0 # noqa: E501 + pipeline_config (str, none_type): (Advanced usage) Configuration needed to instantiate a prediction pipeline.. [optional] # noqa: E501 + metadata (str): Base64-encoded metadata for the detector. This should be a JSON object with string keys. The size after encoding should not exceed 1362 bytes, corresponding to 1KiB before encoding.. [optional] # noqa: E501 + mode (bool, date, datetime, dict, float, int, list, str, none_type): Mode in which this detector will work. * `BINARY` - BINARY * `COUNT` - COUNT. [optional] # noqa: E501 + mode_configuration (bool, date, datetime, dict, float, int, list, str, none_type): Configuration for each detector mode.. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) diff --git a/generated/groundlight_openapi_client/model/detector_type_enum.py b/generated/groundlight_openapi_client/model/detector_type_enum.py index 65b6573a..fe089546 100644 --- a/generated/groundlight_openapi_client/model/detector_type_enum.py +++ b/generated/groundlight_openapi_client/model/detector_type_enum.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/image_query.py b/generated/groundlight_openapi_client/model/image_query.py index 3862cdb3..b3f80efe 100644 --- a/generated/groundlight_openapi_client/model/image_query.py +++ b/generated/groundlight_openapi_client/model/image_query.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -31,11 +31,13 @@ def lazy_import(): - from groundlight_openapi_client.model.classification_result import ClassificationResult + from groundlight_openapi_client.model.binary_classification_result import BinaryClassificationResult + from groundlight_openapi_client.model.counting_result import CountingResult from groundlight_openapi_client.model.image_query_type_enum import ImageQueryTypeEnum from groundlight_openapi_client.model.result_type_enum import ResultTypeEnum - globals()["ClassificationResult"] = ClassificationResult + globals()["BinaryClassificationResult"] = BinaryClassificationResult + globals()["CountingResult"] = CountingResult globals()["ImageQueryTypeEnum"] = ImageQueryTypeEnum globals()["ResultTypeEnum"] = ResultTypeEnum @@ -66,7 +68,12 @@ class ImageQuery(ModelNormal): allowed_values = {} - validations = {} + validations = { + ("confidence_threshold",): { + "inclusive_maximum": 1, + "inclusive_minimum": 0.5, + }, + } @cached_property def additional_properties_type(): @@ -101,6 +108,10 @@ def openapi_types(): """ lazy_import() return { + "metadata": ( + {str: (bool, date, datetime, dict, float, int, list, str, none_type)}, + none_type, + ), # noqa: E501 "id": (str,), # noqa: E501 "type": ( bool, @@ -138,10 +149,8 @@ def openapi_types(): str, none_type, ), # noqa: E501 - "metadata": ( - {str: (bool, date, datetime, dict, float, int, list, str, none_type)}, - none_type, - ), # noqa: E501 + "patience_time": (float,), # noqa: E501 + "confidence_threshold": (float,), # noqa: E501 } @cached_property @@ -149,6 +158,7 @@ def discriminator(): return None attribute_map = { + "metadata": "metadata", # noqa: E501 "id": "id", # noqa: E501 "type": "type", # noqa: E501 "created_at": "created_at", # noqa: E501 @@ -156,10 +166,12 @@ def discriminator(): "detector_id": "detector_id", # noqa: E501 "result_type": "result_type", # noqa: E501 "result": "result", # noqa: E501 - "metadata": "metadata", # noqa: E501 + "patience_time": "patience_time", # noqa: E501 + "confidence_threshold": "confidence_threshold", # noqa: E501 } read_only_vars = { + "metadata", # noqa: E501 "id", # noqa: E501 "type", # noqa: E501 "created_at", # noqa: E501 @@ -167,23 +179,28 @@ def discriminator(): "detector_id", # noqa: E501 "result_type", # noqa: E501 "result", # noqa: E501 - "metadata", # noqa: E501 + "patience_time", # noqa: E501 } _composed_schemas = {} @classmethod @convert_js_args_to_python_args - def _from_openapi_data(cls, id, type, created_at, query, detector_id, result_type, *args, **kwargs): # noqa: E501 + def _from_openapi_data( + cls, metadata, id, type, created_at, query, detector_id, result_type, result, patience_time, *args, **kwargs + ): # noqa: E501 """ImageQuery - a model defined in OpenAPI Args: + metadata ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): Metadata about the image query. id (str): A unique ID for this object. type (bool, date, datetime, dict, float, int, list, str, none_type): The type of this object. created_at (datetime): When was this detector created? query (str): A question about the image. detector_id (str): Which detector was used on this image query? result_type (bool, date, datetime, dict, float, int, list, str, none_type): What type of result are we returning? + result (bool, date, datetime, dict, float, int, list, str, none_type): The result of the image query. + patience_time (float): How long to wait for a confident response. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -216,8 +233,7 @@ def _from_openapi_data(cls, id, type, created_at, query, detector_id, result_typ Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - result (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501 - metadata ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): A dictionary of custom key/value metadata to associate with the image query (limited to 1KB).. [optional] # noqa: E501 + confidence_threshold (float): Min confidence needed to accept the response of the image query.. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -246,12 +262,15 @@ def _from_openapi_data(cls, id, type, created_at, query, detector_id, result_typ self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.metadata = metadata self.id = id self.type = type self.created_at = created_at self.query = query self.detector_id = detector_id self.result_type = result_type + self.result = result + self.patience_time = patience_time for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map @@ -310,8 +329,7 @@ def __init__(self, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - result (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501 - metadata ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): A dictionary of custom key/value metadata to associate with the image query (limited to 1KB).. [optional] # noqa: E501 + confidence_threshold (float): Min confidence needed to accept the response of the image query.. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) diff --git a/generated/groundlight_openapi_client/model/image_query_type_enum.py b/generated/groundlight_openapi_client/model/image_query_type_enum.py index 2aeb4cef..7e3344bc 100644 --- a/generated/groundlight_openapi_client/model/image_query_type_enum.py +++ b/generated/groundlight_openapi_client/model/image_query_type_enum.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/model/inline_response200.py b/generated/groundlight_openapi_client/model/inline_response200.py index 531b7fad..64090215 100644 --- a/generated/groundlight_openapi_client/model/inline_response200.py +++ b/generated/groundlight_openapi_client/model/inline_response200.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -30,12 +30,6 @@ from groundlight_openapi_client.exceptions import ApiAttributeError -def lazy_import(): - from groundlight_openapi_client.model.note import Note - - globals()["Note"] = Note - - class InlineResponse200(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -70,7 +64,6 @@ def additional_properties_type(): This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ - lazy_import() return ( bool, date, @@ -95,10 +88,8 @@ def openapi_types(): openapi_types (dict): The key is attribute name and the value is attribute type. """ - lazy_import() return { - "customer": ([Note],), # noqa: E501 - "gl": ([Note],), # noqa: E501 + "username": (str,), # noqa: E501 } @cached_property @@ -106,8 +97,7 @@ def discriminator(): return None attribute_map = { - "customer": "CUSTOMER", # noqa: E501 - "gl": "GL", # noqa: E501 + "username": "username", # noqa: E501 } read_only_vars = {} @@ -116,13 +106,9 @@ def discriminator(): @classmethod @convert_js_args_to_python_args - def _from_openapi_data(cls, customer, gl, *args, **kwargs): # noqa: E501 + def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 """InlineResponse200 - a model defined in OpenAPI - Args: - customer ([Note]): - gl ([Note]): - Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be @@ -154,6 +140,7 @@ def _from_openapi_data(cls, customer, gl, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) + username (str): The user's username. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -182,8 +169,6 @@ def _from_openapi_data(cls, customer, gl, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - self.customer = customer - self.gl = gl for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map @@ -208,13 +193,9 @@ def _from_openapi_data(cls, customer, gl, *args, **kwargs): # noqa: E501 ) @convert_js_args_to_python_args - def __init__(self, customer, gl, *args, **kwargs): # noqa: E501 + def __init__(self, *args, **kwargs): # noqa: E501 """InlineResponse200 - a model defined in OpenAPI - Args: - customer ([Note]): - gl ([Note]): - Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be @@ -246,6 +227,7 @@ def __init__(self, customer, gl, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) + username (str): The user's username. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -272,8 +254,6 @@ def __init__(self, customer, gl, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - self.customer = customer - self.gl = gl for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map diff --git a/generated/groundlight_openapi_client/model/mode_enum.py b/generated/groundlight_openapi_client/model/mode_enum.py new file mode 100644 index 00000000..e1920b27 --- /dev/null +++ b/generated/groundlight_openapi_client/model/mode_enum.py @@ -0,0 +1,286 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + + +import re # noqa: F401 +import sys # noqa: F401 + +from groundlight_openapi_client.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel, +) +from groundlight_openapi_client.exceptions import ApiAttributeError + + +class ModeEnum(ModelSimple): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = { + ("value",): { + "BINARY": "BINARY", + "COUNT": "COUNT", + }, + } + + validations = {} + + additional_properties_type = None + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + return { + "value": (str,), + } + + @cached_property + def discriminator(): + return None + + attribute_map = {} + + read_only_vars = set() + + _composed_schemas = None + + required_properties = set( + [ + "_data_store", + "_check_type", + "_spec_property_naming", + "_path_to_item", + "_configuration", + "_visited_composed_classes", + ] + ) + + @convert_js_args_to_python_args + def __init__(self, *args, **kwargs): + """ModeEnum - a model defined in OpenAPI + + Note that value can be passed either in args or in kwargs, but not in both. + + Args: + args[0] (str): * `BINARY` - BINARY * `COUNT` - COUNT., must be one of ["BINARY", "COUNT", ] # noqa: E501 + + Keyword Args: + value (str): * `BINARY` - BINARY * `COUNT` - COUNT., must be one of ["BINARY", "COUNT", ] # noqa: E501 + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + # required up here when default value is not given + _path_to_item = kwargs.pop("_path_to_item", ()) + + if "value" in kwargs: + value = kwargs.pop("value") + elif args: + args = list(args) + value = args.pop(0) + else: + raise ApiTypeError( + "value is required, but not passed in args or kwargs and doesn't have default", + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.value = value + if kwargs: + raise ApiTypeError( + "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." + % ( + kwargs, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, *args, **kwargs): + """ModeEnum - a model defined in OpenAPI + + Note that value can be passed either in args or in kwargs, but not in both. + + Args: + args[0] (str): * `BINARY` - BINARY * `COUNT` - COUNT., must be one of ["BINARY", "COUNT", ] # noqa: E501 + + Keyword Args: + value (str): * `BINARY` - BINARY * `COUNT` - COUNT., must be one of ["BINARY", "COUNT", ] # noqa: E501 + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + # required up here when default value is not given + _path_to_item = kwargs.pop("_path_to_item", ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if "value" in kwargs: + value = kwargs.pop("value") + elif args: + args = list(args) + value = args.pop(0) + else: + raise ApiTypeError( + "value is required, but not passed in args or kwargs and doesn't have default", + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.value = value + if kwargs: + raise ApiTypeError( + "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." + % ( + kwargs, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + return self diff --git a/generated/groundlight_openapi_client/model/note.py b/generated/groundlight_openapi_client/model/note.py index f977d91c..713e4af8 100644 --- a/generated/groundlight_openapi_client/model/note.py +++ b/generated/groundlight_openapi_client/model/note.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -54,12 +54,7 @@ class Note(ModelNormal): as additional properties values. """ - allowed_values = { - ("note_type",): { - "CUSTOMER": "CUSTOMER", - "GL": "GL", - }, - } + allowed_values = {} validations = {} @@ -94,8 +89,8 @@ def openapi_types(): and the value is attribute type. """ return { + "detector_id": (str,), # noqa: E501 "content": (str,), # noqa: E501 - "note_type": (str,), # noqa: E501 } @cached_property @@ -103,19 +98,25 @@ def discriminator(): return None attribute_map = { + "detector_id": "detector_id", # noqa: E501 "content": "content", # noqa: E501 - "note_type": "note_type", # noqa: E501 } - read_only_vars = {} + read_only_vars = { + "detector_id", # noqa: E501 + } _composed_schemas = {} @classmethod @convert_js_args_to_python_args - def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 + def _from_openapi_data(cls, detector_id, content, *args, **kwargs): # noqa: E501 """Note - a model defined in OpenAPI + Args: + detector_id (str): + content (str): Text content of the note. + Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be @@ -147,8 +148,6 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - content (str): The text inside the note. [optional] # noqa: E501 - note_type (str): The type of note. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -177,6 +176,8 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.detector_id = detector_id + self.content = content for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map @@ -201,9 +202,11 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 ) @convert_js_args_to_python_args - def __init__(self, *args, **kwargs): # noqa: E501 + def __init__(self, content, *args, **kwargs): # noqa: E501 """Note - a model defined in OpenAPI + content (str): Text content of the note. + Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be @@ -235,8 +238,6 @@ def __init__(self, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - content (str): The text inside the note. [optional] # noqa: E501 - note_type (str): The type of note. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -263,6 +264,7 @@ def __init__(self, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.content = content for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map diff --git a/generated/groundlight_openapi_client/model/note_creation_input.py b/generated/groundlight_openapi_client/model/note_request.py similarity index 95% rename from generated/groundlight_openapi_client/model/note_creation_input.py rename to generated/groundlight_openapi_client/model/note_request.py index 2b285cba..b46e242e 100644 --- a/generated/groundlight_openapi_client/model/note_creation_input.py +++ b/generated/groundlight_openapi_client/model/note_request.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -30,7 +30,7 @@ from groundlight_openapi_client.exceptions import ApiAttributeError -class NoteCreationInput(ModelNormal): +class NoteRequest(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -56,7 +56,11 @@ class NoteCreationInput(ModelNormal): allowed_values = {} - validations = {} + validations = { + ("content",): { + "min_length": 1, + }, + } @cached_property def additional_properties_type(): @@ -107,10 +111,10 @@ def discriminator(): @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, content, *args, **kwargs): # noqa: E501 - """NoteCreationInput - a model defined in OpenAPI + """NoteRequest - a model defined in OpenAPI Args: - content (str): The text inside the note + content (str): Text content of the note. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -197,10 +201,10 @@ def _from_openapi_data(cls, content, *args, **kwargs): # noqa: E501 @convert_js_args_to_python_args def __init__(self, content, *args, **kwargs): # noqa: E501 - """NoteCreationInput - a model defined in OpenAPI + """NoteRequest - a model defined in OpenAPI Args: - content (str): The text inside the note + content (str): Text content of the note. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types diff --git a/generated/groundlight_openapi_client/model/paginated_detector_list.py b/generated/groundlight_openapi_client/model/paginated_detector_list.py index fceaa7e2..c52532c4 100644 --- a/generated/groundlight_openapi_client/model/paginated_detector_list.py +++ b/generated/groundlight_openapi_client/model/paginated_detector_list.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -98,6 +98,7 @@ def openapi_types(): lazy_import() return { "count": (int,), # noqa: E501 + "results": ([Detector],), # noqa: E501 "next": ( str, none_type, @@ -106,7 +107,6 @@ def openapi_types(): str, none_type, ), # noqa: E501 - "results": ([Detector],), # noqa: E501 } @cached_property @@ -115,9 +115,9 @@ def discriminator(): attribute_map = { "count": "count", # noqa: E501 + "results": "results", # noqa: E501 "next": "next", # noqa: E501 "previous": "previous", # noqa: E501 - "results": "results", # noqa: E501 } read_only_vars = {} @@ -126,9 +126,13 @@ def discriminator(): @classmethod @convert_js_args_to_python_args - def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 + def _from_openapi_data(cls, count, results, *args, **kwargs): # noqa: E501 """PaginatedDetectorList - a model defined in OpenAPI + Args: + count (int): + results ([Detector]): + Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be @@ -160,10 +164,8 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - count (int): [optional] # noqa: E501 next (str, none_type): [optional] # noqa: E501 previous (str, none_type): [optional] # noqa: E501 - results ([Detector]): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -192,6 +194,8 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.count = count + self.results = results for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map @@ -216,9 +220,13 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 ) @convert_js_args_to_python_args - def __init__(self, *args, **kwargs): # noqa: E501 + def __init__(self, count, results, *args, **kwargs): # noqa: E501 """PaginatedDetectorList - a model defined in OpenAPI + Args: + count (int): + results ([Detector]): + Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be @@ -250,10 +258,8 @@ def __init__(self, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - count (int): [optional] # noqa: E501 next (str, none_type): [optional] # noqa: E501 previous (str, none_type): [optional] # noqa: E501 - results ([Detector]): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -280,6 +286,8 @@ def __init__(self, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.count = count + self.results = results for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map diff --git a/generated/groundlight_openapi_client/model/paginated_image_query_list.py b/generated/groundlight_openapi_client/model/paginated_image_query_list.py index 2a095dd4..06024eea 100644 --- a/generated/groundlight_openapi_client/model/paginated_image_query_list.py +++ b/generated/groundlight_openapi_client/model/paginated_image_query_list.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -98,6 +98,7 @@ def openapi_types(): lazy_import() return { "count": (int,), # noqa: E501 + "results": ([ImageQuery],), # noqa: E501 "next": ( str, none_type, @@ -106,7 +107,6 @@ def openapi_types(): str, none_type, ), # noqa: E501 - "results": ([ImageQuery],), # noqa: E501 } @cached_property @@ -115,9 +115,9 @@ def discriminator(): attribute_map = { "count": "count", # noqa: E501 + "results": "results", # noqa: E501 "next": "next", # noqa: E501 "previous": "previous", # noqa: E501 - "results": "results", # noqa: E501 } read_only_vars = {} @@ -126,9 +126,13 @@ def discriminator(): @classmethod @convert_js_args_to_python_args - def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 + def _from_openapi_data(cls, count, results, *args, **kwargs): # noqa: E501 """PaginatedImageQueryList - a model defined in OpenAPI + Args: + count (int): + results ([ImageQuery]): + Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be @@ -160,10 +164,8 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - count (int): [optional] # noqa: E501 next (str, none_type): [optional] # noqa: E501 previous (str, none_type): [optional] # noqa: E501 - results ([ImageQuery]): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -192,6 +194,8 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.count = count + self.results = results for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map @@ -216,9 +220,13 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 ) @convert_js_args_to_python_args - def __init__(self, *args, **kwargs): # noqa: E501 + def __init__(self, count, results, *args, **kwargs): # noqa: E501 """PaginatedImageQueryList - a model defined in OpenAPI + Args: + count (int): + results ([ImageQuery]): + Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be @@ -250,10 +258,8 @@ def __init__(self, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - count (int): [optional] # noqa: E501 next (str, none_type): [optional] # noqa: E501 previous (str, none_type): [optional] # noqa: E501 - results ([ImageQuery]): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -280,6 +286,8 @@ def __init__(self, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.count = count + self.results = results for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map diff --git a/generated/groundlight_openapi_client/model/paginated_rule_list.py b/generated/groundlight_openapi_client/model/paginated_rule_list.py index c6153bb8..b8d12979 100644 --- a/generated/groundlight_openapi_client/model/paginated_rule_list.py +++ b/generated/groundlight_openapi_client/model/paginated_rule_list.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -98,6 +98,7 @@ def openapi_types(): lazy_import() return { "count": (int,), # noqa: E501 + "results": ([Rule],), # noqa: E501 "next": ( str, none_type, @@ -106,7 +107,6 @@ def openapi_types(): str, none_type, ), # noqa: E501 - "results": ([Rule],), # noqa: E501 } @cached_property @@ -115,9 +115,9 @@ def discriminator(): attribute_map = { "count": "count", # noqa: E501 + "results": "results", # noqa: E501 "next": "next", # noqa: E501 "previous": "previous", # noqa: E501 - "results": "results", # noqa: E501 } read_only_vars = {} @@ -126,9 +126,13 @@ def discriminator(): @classmethod @convert_js_args_to_python_args - def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 + def _from_openapi_data(cls, count, results, *args, **kwargs): # noqa: E501 """PaginatedRuleList - a model defined in OpenAPI + Args: + count (int): + results ([Rule]): + Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be @@ -160,10 +164,8 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - count (int): [optional] # noqa: E501 next (str, none_type): [optional] # noqa: E501 previous (str, none_type): [optional] # noqa: E501 - results ([Rule]): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -192,6 +194,8 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.count = count + self.results = results for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map @@ -216,9 +220,13 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 ) @convert_js_args_to_python_args - def __init__(self, *args, **kwargs): # noqa: E501 + def __init__(self, count, results, *args, **kwargs): # noqa: E501 """PaginatedRuleList - a model defined in OpenAPI + Args: + count (int): + results ([Rule]): + Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be @@ -250,10 +258,8 @@ def __init__(self, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - count (int): [optional] # noqa: E501 next (str, none_type): [optional] # noqa: E501 previous (str, none_type): [optional] # noqa: E501 - results ([Rule]): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -280,6 +286,8 @@ def __init__(self, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.count = count + self.results = results for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map diff --git a/generated/groundlight_openapi_client/model/result_type_enum.py b/generated/groundlight_openapi_client/model/result_type_enum.py index 3dc5b04a..11a456e4 100644 --- a/generated/groundlight_openapi_client/model/result_type_enum.py +++ b/generated/groundlight_openapi_client/model/result_type_enum.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -53,6 +53,7 @@ class ResultTypeEnum(ModelSimple): allowed_values = { ("value",): { "BINARY_CLASSIFICATION": "binary_classification", + "COUNTING": "counting", }, } @@ -104,10 +105,10 @@ def __init__(self, *args, **kwargs): Note that value can be passed either in args or in kwargs, but not in both. Args: - args[0] (str): if omitted defaults to "binary_classification", must be one of ["binary_classification", ] # noqa: E501 + args[0] (str):, must be one of ["binary_classification", "counting", ] # noqa: E501 Keyword Args: - value (str): if omitted defaults to "binary_classification", must be one of ["binary_classification", ] # noqa: E501 + value (str):, must be one of ["binary_classification", "counting", ] # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. @@ -148,7 +149,11 @@ def __init__(self, *args, **kwargs): args = list(args) value = args.pop(0) else: - value = "binary_classification" + raise ApiTypeError( + "value is required, but not passed in args or kwargs and doesn't have default", + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) _check_type = kwargs.pop("_check_type", True) _spec_property_naming = kwargs.pop("_spec_property_naming", False) @@ -192,10 +197,10 @@ def _from_openapi_data(cls, *args, **kwargs): Note that value can be passed either in args or in kwargs, but not in both. Args: - args[0] (str): if omitted defaults to "binary_classification", must be one of ["binary_classification", ] # noqa: E501 + args[0] (str):, must be one of ["binary_classification", "counting", ] # noqa: E501 Keyword Args: - value (str): if omitted defaults to "binary_classification", must be one of ["binary_classification", ] # noqa: E501 + value (str):, must be one of ["binary_classification", "counting", ] # noqa: E501 _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. @@ -238,7 +243,11 @@ def _from_openapi_data(cls, *args, **kwargs): args = list(args) value = args.pop(0) else: - value = "binary_classification" + raise ApiTypeError( + "value is required, but not passed in args or kwargs and doesn't have default", + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) _check_type = kwargs.pop("_check_type", True) _spec_property_naming = kwargs.pop("_spec_property_naming", False) diff --git a/generated/groundlight_openapi_client/model/rule.py b/generated/groundlight_openapi_client/model/rule.py index 851ade06..55d2fd3f 100644 --- a/generated/groundlight_openapi_client/model/rule.py +++ b/generated/groundlight_openapi_client/model/rule.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -33,14 +33,14 @@ def lazy_import(): from groundlight_openapi_client.model.action import Action from groundlight_openapi_client.model.condition import Condition - from groundlight_openapi_client.model.rule_base import RuleBase + from groundlight_openapi_client.model.snooze_time_unit_enum import SnoozeTimeUnitEnum globals()["Action"] = Action globals()["Condition"] = Condition - globals()["RuleBase"] = RuleBase + globals()["SnoozeTimeUnitEnum"] = SnoozeTimeUnitEnum -class Rule(ModelComposed): +class Rule(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -64,18 +64,14 @@ class Rule(ModelComposed): as additional properties values. """ - allowed_values = { - ("snooze_time_unit",): { - "SECONDS": "SECONDS", - "MINUTES": "MINUTES", - "HOURS": "HOURS", - "DAYS": "DAYS", - }, - } + allowed_values = {} validations = { ("name",): { - "max_length": 200, + "max_length": 44, + }, + ("snooze_time_value",): { + "inclusive_minimum": 0, }, } @@ -113,15 +109,26 @@ def openapi_types(): lazy_import() return { "id": (int,), # noqa: E501 - "detector_name": (str,), # noqa: E501 "detector_id": (str,), # noqa: E501 + "detector_name": (str,), # noqa: E501 "name": (str,), # noqa: E501 + "condition": (Condition,), # noqa: E501 + "action": (Action,), # noqa: E501 "enabled": (bool,), # noqa: E501 "snooze_time_enabled": (bool,), # noqa: E501 "snooze_time_value": (int,), # noqa: E501 - "snooze_time_unit": (str,), # noqa: E501 - "action": (Action,), # noqa: E501 - "condition": (Condition,), # noqa: E501 + "snooze_time_unit": ( + bool, + date, + datetime, + dict, + float, + int, + list, + str, + none_type, + ), # noqa: E501 + "human_review_required": (bool,), # noqa: E501 } @cached_property @@ -130,27 +137,39 @@ def discriminator(): attribute_map = { "id": "id", # noqa: E501 - "detector_name": "detector_name", # noqa: E501 "detector_id": "detector_id", # noqa: E501 + "detector_name": "detector_name", # noqa: E501 "name": "name", # noqa: E501 + "condition": "condition", # noqa: E501 + "action": "action", # noqa: E501 "enabled": "enabled", # noqa: E501 "snooze_time_enabled": "snooze_time_enabled", # noqa: E501 "snooze_time_value": "snooze_time_value", # noqa: E501 "snooze_time_unit": "snooze_time_unit", # noqa: E501 - "action": "action", # noqa: E501 - "condition": "condition", # noqa: E501 + "human_review_required": "human_review_required", # noqa: E501 } read_only_vars = { "id", # noqa: E501 + "detector_id", # noqa: E501 "detector_name", # noqa: E501 } + _composed_schemas = {} + @classmethod @convert_js_args_to_python_args - def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 + def _from_openapi_data(cls, id, detector_id, detector_name, name, condition, action, *args, **kwargs): # noqa: E501 """Rule - a model defined in OpenAPI + Args: + id (int): + detector_id (str): + detector_name (str): + name (str): + condition (Condition): + action (Action): + Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be @@ -182,16 +201,11 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - id (int): A unique ID for this object.. [optional] # noqa: E501 - detector_name (str): The name of the detector this rule is associated with.. [optional] # noqa: E501 - detector_id (str): Which detector should this rule be associated with?. [optional] # noqa: E501 - name (str): A short, descriptive name for the rule.. [optional] # noqa: E501 - enabled (bool): Is this rule enabled?. [optional] if omitted the server will use the default value of True # noqa: E501 - snooze_time_enabled (bool): Is this rule snooze time enabled?. [optional] if omitted the server will use the default value of False # noqa: E501 - snooze_time_value (int): How long to snooze the rule for (in seconds).. [optional] if omitted the server will use the default value of 1 # noqa: E501 - snooze_time_unit (str): What unit of time to use for the snooze time.. [optional] if omitted the server will use the default value of "DAYS" # noqa: E501 - action (Action): [optional] # noqa: E501 - condition (Condition): [optional] # noqa: E501 + enabled (bool): [optional] if omitted the server will use the default value of True # noqa: E501 + snooze_time_enabled (bool): [optional] if omitted the server will use the default value of False # noqa: E501 + snooze_time_value (int): [optional] if omitted the server will use the default value of 0 # noqa: E501 + snooze_time_unit (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501 + human_review_required (bool): [optional] if omitted the server will use the default value of False # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -220,30 +234,22 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - constant_args = { - "_check_type": _check_type, - "_path_to_item": _path_to_item, - "_spec_property_naming": _spec_property_naming, - "_configuration": _configuration, - "_visited_composed_classes": self._visited_composed_classes, - } - composed_info = validate_get_composed_info(constant_args, kwargs, self) - self._composed_instances = composed_info[0] - self._var_name_to_model_instances = composed_info[1] - self._additional_properties_model_instances = composed_info[2] - discarded_args = composed_info[3] - + self.id = id + self.detector_id = detector_id + self.detector_name = detector_name + self.name = name + self.condition = condition + self.action = action for var_name, var_value in kwargs.items(): if ( - var_name in discarded_args + var_name not in self.attribute_map and self._configuration is not None and self._configuration.discard_unknown_keys - and self._additional_properties_model_instances + and self.additional_properties_type is None ): # discard variable. continue setattr(self, var_name, var_value) - return self required_properties = set( @@ -254,16 +260,17 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 "_path_to_item", "_configuration", "_visited_composed_classes", - "_composed_instances", - "_var_name_to_model_instances", - "_additional_properties_model_instances", ] ) @convert_js_args_to_python_args - def __init__(self, *args, **kwargs): # noqa: E501 + def __init__(self, name, condition, action, *args, **kwargs): # noqa: E501 """Rule - a model defined in OpenAPI + name (str): + condition (Condition): + action (Action): + Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be @@ -295,16 +302,11 @@ def __init__(self, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - id (int): A unique ID for this object.. [optional] # noqa: E501 - detector_name (str): The name of the detector this rule is associated with.. [optional] # noqa: E501 - detector_id (str): Which detector should this rule be associated with?. [optional] # noqa: E501 - name (str): A short, descriptive name for the rule.. [optional] # noqa: E501 - enabled (bool): Is this rule enabled?. [optional] if omitted the server will use the default value of True # noqa: E501 - snooze_time_enabled (bool): Is this rule snooze time enabled?. [optional] if omitted the server will use the default value of False # noqa: E501 - snooze_time_value (int): How long to snooze the rule for (in seconds).. [optional] if omitted the server will use the default value of 1 # noqa: E501 - snooze_time_unit (str): What unit of time to use for the snooze time.. [optional] if omitted the server will use the default value of "DAYS" # noqa: E501 - action (Action): [optional] # noqa: E501 - condition (Condition): [optional] # noqa: E501 + enabled (bool): [optional] if omitted the server will use the default value of True # noqa: E501 + snooze_time_enabled (bool): [optional] if omitted the server will use the default value of False # noqa: E501 + snooze_time_value (int): [optional] if omitted the server will use the default value of 0 # noqa: E501 + snooze_time_unit (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501 + human_review_required (bool): [optional] if omitted the server will use the default value of False # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -331,25 +333,15 @@ def __init__(self, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - constant_args = { - "_check_type": _check_type, - "_path_to_item": _path_to_item, - "_spec_property_naming": _spec_property_naming, - "_configuration": _configuration, - "_visited_composed_classes": self._visited_composed_classes, - } - composed_info = validate_get_composed_info(constant_args, kwargs, self) - self._composed_instances = composed_info[0] - self._var_name_to_model_instances = composed_info[1] - self._additional_properties_model_instances = composed_info[2] - discarded_args = composed_info[3] - + self.name = name + self.condition = condition + self.action = action for var_name, var_value in kwargs.items(): if ( - var_name in discarded_args + var_name not in self.attribute_map and self._configuration is not None and self._configuration.discard_unknown_keys - and self._additional_properties_model_instances + and self.additional_properties_type is None ): # discard variable. continue @@ -359,21 +351,3 @@ def __init__(self, *args, **kwargs): # noqa: E501 f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " "class with read only attributes." ) - - @cached_property - def _composed_schemas(): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - lazy_import() - return { - "anyOf": [], - "allOf": [ - RuleBase, - ], - "oneOf": [], - } diff --git a/generated/groundlight_openapi_client/model/rule_creation_input.py b/generated/groundlight_openapi_client/model/rule_creation_input.py deleted file mode 100644 index 656294fe..00000000 --- a/generated/groundlight_openapi_client/model/rule_creation_input.py +++ /dev/null @@ -1,368 +0,0 @@ -""" - Groundlight API - - Easy Computer Vision powered by Natural Language # noqa: E501 - - The version of the OpenAPI document: 0.6.0 - Contact: support@groundlight.ai - Generated by: https://openapi-generator.tech -""" - - -import re # noqa: F401 -import sys # noqa: F401 - -from groundlight_openapi_client.model_utils import ( # noqa: F401 - ApiTypeError, - ModelComposed, - ModelNormal, - ModelSimple, - cached_property, - change_keys_js_to_python, - convert_js_args_to_python_args, - date, - datetime, - file_type, - none_type, - validate_get_composed_info, - OpenApiModel, -) -from groundlight_openapi_client.exceptions import ApiAttributeError - - -def lazy_import(): - from groundlight_openapi_client.model.action import Action - from groundlight_openapi_client.model.condition import Condition - from groundlight_openapi_client.model.rule_base import RuleBase - - globals()["Action"] = Action - globals()["Condition"] = Condition - globals()["RuleBase"] = RuleBase - - -class RuleCreationInput(ModelComposed): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Attributes: - allowed_values (dict): The key is the tuple path to the attribute - and the for var_name this is (var_name,). The value is a dict - with a capitalized key describing the allowed value and an allowed - value. These dicts store the allowed enum values. - attribute_map (dict): The key is attribute name - and the value is json key in definition. - discriminator_value_class_map (dict): A dict to go from the discriminator - variable value to the discriminator class name. - validations (dict): The key is the tuple path to the attribute - and the for var_name this is (var_name,). The value is a dict - that stores validations for max_length, min_length, max_items, - min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, - inclusive_minimum, and regex. - additional_properties_type (tuple): A tuple of classes accepted - as additional properties values. - """ - - allowed_values = { - ("snooze_time_unit",): { - "SECONDS": "SECONDS", - "MINUTES": "MINUTES", - "HOURS": "HOURS", - "DAYS": "DAYS", - }, - } - - validations = { - ("name",): { - "max_length": 200, - }, - } - - @cached_property - def additional_properties_type(): - """ - This must be a method because a model may have properties that are - of type self, this must run after the class is loaded - """ - lazy_import() - return ( - bool, - date, - datetime, - dict, - float, - int, - list, - str, - none_type, - ) # noqa: E501 - - _nullable = False - - @cached_property - def openapi_types(): - """ - This must be a method because a model may have properties that are - of type self, this must run after the class is loaded - - Returns - openapi_types (dict): The key is attribute name - and the value is attribute type. - """ - lazy_import() - return { - "detector_id": (str,), # noqa: E501 - "name": (str,), # noqa: E501 - "enabled": (bool,), # noqa: E501 - "snooze_time_enabled": (bool,), # noqa: E501 - "snooze_time_value": (int,), # noqa: E501 - "snooze_time_unit": (str,), # noqa: E501 - "action": (Action,), # noqa: E501 - "condition": (Condition,), # noqa: E501 - } - - @cached_property - def discriminator(): - return None - - attribute_map = { - "detector_id": "detector_id", # noqa: E501 - "name": "name", # noqa: E501 - "enabled": "enabled", # noqa: E501 - "snooze_time_enabled": "snooze_time_enabled", # noqa: E501 - "snooze_time_value": "snooze_time_value", # noqa: E501 - "snooze_time_unit": "snooze_time_unit", # noqa: E501 - "action": "action", # noqa: E501 - "condition": "condition", # noqa: E501 - } - - read_only_vars = {} - - @classmethod - @convert_js_args_to_python_args - def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 - """RuleCreationInput - a model defined in OpenAPI - - Keyword Args: - _check_type (bool): if True, values for parameters in openapi_types - will be type checked and a TypeError will be - raised if the wrong type is input. - Defaults to True - _path_to_item (tuple/list): This is a list of keys or values to - drill down to the model in received_data - when deserializing a response - _spec_property_naming (bool): True if the variable names in the input data - are serialized names, as specified in the OpenAPI document. - False if the variable names in the input data - are pythonic names, e.g. snake case (default) - _configuration (Configuration): the instance to use when - deserializing a file_type parameter. - If passed, type conversion is attempted - If omitted no type conversion is done. - _visited_composed_classes (tuple): This stores a tuple of - classes that we have traveled through so that - if we see that class again we will not use its - discriminator again. - When traveling through a discriminator, the - composed schema that is - is traveled through is added to this set. - For example if Animal has a discriminator - petType and we pass in "Dog", and the class Dog - allOf includes Animal, we move through Animal - once using the discriminator, and pick Dog. - Then in Dog, we will make an instance of the - Animal class but this time we won't travel - through its discriminator because we passed in - _visited_composed_classes = (Animal,) - detector_id (str): Which detector should this rule be associated with?. [optional] # noqa: E501 - name (str): A short, descriptive name for the rule.. [optional] # noqa: E501 - enabled (bool): Is this rule enabled?. [optional] if omitted the server will use the default value of True # noqa: E501 - snooze_time_enabled (bool): Is this rule snooze time enabled?. [optional] if omitted the server will use the default value of False # noqa: E501 - snooze_time_value (int): How long to snooze the rule for (in seconds).. [optional] if omitted the server will use the default value of 1 # noqa: E501 - snooze_time_unit (str): What unit of time to use for the snooze time.. [optional] if omitted the server will use the default value of "DAYS" # noqa: E501 - action (Action): [optional] # noqa: E501 - condition (Condition): [optional] # noqa: E501 - """ - - _check_type = kwargs.pop("_check_type", True) - _spec_property_naming = kwargs.pop("_spec_property_naming", False) - _path_to_item = kwargs.pop("_path_to_item", ()) - _configuration = kwargs.pop("_configuration", None) - _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) - - self = super(OpenApiModel, cls).__new__(cls) - - if args: - raise ApiTypeError( - "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." - % ( - args, - self.__class__.__name__, - ), - path_to_item=_path_to_item, - valid_classes=(self.__class__,), - ) - - self._data_store = {} - self._check_type = _check_type - self._spec_property_naming = _spec_property_naming - self._path_to_item = _path_to_item - self._configuration = _configuration - self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - - constant_args = { - "_check_type": _check_type, - "_path_to_item": _path_to_item, - "_spec_property_naming": _spec_property_naming, - "_configuration": _configuration, - "_visited_composed_classes": self._visited_composed_classes, - } - composed_info = validate_get_composed_info(constant_args, kwargs, self) - self._composed_instances = composed_info[0] - self._var_name_to_model_instances = composed_info[1] - self._additional_properties_model_instances = composed_info[2] - discarded_args = composed_info[3] - - for var_name, var_value in kwargs.items(): - if ( - var_name in discarded_args - and self._configuration is not None - and self._configuration.discard_unknown_keys - and self._additional_properties_model_instances - ): - # discard variable. - continue - setattr(self, var_name, var_value) - - return self - - required_properties = set( - [ - "_data_store", - "_check_type", - "_spec_property_naming", - "_path_to_item", - "_configuration", - "_visited_composed_classes", - "_composed_instances", - "_var_name_to_model_instances", - "_additional_properties_model_instances", - ] - ) - - @convert_js_args_to_python_args - def __init__(self, *args, **kwargs): # noqa: E501 - """RuleCreationInput - a model defined in OpenAPI - - Keyword Args: - _check_type (bool): if True, values for parameters in openapi_types - will be type checked and a TypeError will be - raised if the wrong type is input. - Defaults to True - _path_to_item (tuple/list): This is a list of keys or values to - drill down to the model in received_data - when deserializing a response - _spec_property_naming (bool): True if the variable names in the input data - are serialized names, as specified in the OpenAPI document. - False if the variable names in the input data - are pythonic names, e.g. snake case (default) - _configuration (Configuration): the instance to use when - deserializing a file_type parameter. - If passed, type conversion is attempted - If omitted no type conversion is done. - _visited_composed_classes (tuple): This stores a tuple of - classes that we have traveled through so that - if we see that class again we will not use its - discriminator again. - When traveling through a discriminator, the - composed schema that is - is traveled through is added to this set. - For example if Animal has a discriminator - petType and we pass in "Dog", and the class Dog - allOf includes Animal, we move through Animal - once using the discriminator, and pick Dog. - Then in Dog, we will make an instance of the - Animal class but this time we won't travel - through its discriminator because we passed in - _visited_composed_classes = (Animal,) - detector_id (str): Which detector should this rule be associated with?. [optional] # noqa: E501 - name (str): A short, descriptive name for the rule.. [optional] # noqa: E501 - enabled (bool): Is this rule enabled?. [optional] if omitted the server will use the default value of True # noqa: E501 - snooze_time_enabled (bool): Is this rule snooze time enabled?. [optional] if omitted the server will use the default value of False # noqa: E501 - snooze_time_value (int): How long to snooze the rule for (in seconds).. [optional] if omitted the server will use the default value of 1 # noqa: E501 - snooze_time_unit (str): What unit of time to use for the snooze time.. [optional] if omitted the server will use the default value of "DAYS" # noqa: E501 - action (Action): [optional] # noqa: E501 - condition (Condition): [optional] # noqa: E501 - """ - - _check_type = kwargs.pop("_check_type", True) - _spec_property_naming = kwargs.pop("_spec_property_naming", False) - _path_to_item = kwargs.pop("_path_to_item", ()) - _configuration = kwargs.pop("_configuration", None) - _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) - - if args: - raise ApiTypeError( - "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." - % ( - args, - self.__class__.__name__, - ), - path_to_item=_path_to_item, - valid_classes=(self.__class__,), - ) - - self._data_store = {} - self._check_type = _check_type - self._spec_property_naming = _spec_property_naming - self._path_to_item = _path_to_item - self._configuration = _configuration - self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - - constant_args = { - "_check_type": _check_type, - "_path_to_item": _path_to_item, - "_spec_property_naming": _spec_property_naming, - "_configuration": _configuration, - "_visited_composed_classes": self._visited_composed_classes, - } - composed_info = validate_get_composed_info(constant_args, kwargs, self) - self._composed_instances = composed_info[0] - self._var_name_to_model_instances = composed_info[1] - self._additional_properties_model_instances = composed_info[2] - discarded_args = composed_info[3] - - for var_name, var_value in kwargs.items(): - if ( - var_name in discarded_args - and self._configuration is not None - and self._configuration.discard_unknown_keys - and self._additional_properties_model_instances - ): - # discard variable. - continue - setattr(self, var_name, var_value) - if var_name in self.read_only_vars: - raise ApiAttributeError( - f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " - "class with read only attributes." - ) - - @cached_property - def _composed_schemas(): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - lazy_import() - return { - "anyOf": [], - "allOf": [ - RuleBase, - ], - "oneOf": [], - } diff --git a/generated/groundlight_openapi_client/model/rule_base.py b/generated/groundlight_openapi_client/model/rule_request.py similarity index 77% rename from generated/groundlight_openapi_client/model/rule_base.py rename to generated/groundlight_openapi_client/model/rule_request.py index 24ec181d..034182e3 100644 --- a/generated/groundlight_openapi_client/model/rule_base.py +++ b/generated/groundlight_openapi_client/model/rule_request.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -31,14 +31,16 @@ def lazy_import(): - from groundlight_openapi_client.model.action import Action - from groundlight_openapi_client.model.condition import Condition + from groundlight_openapi_client.model.action_request import ActionRequest + from groundlight_openapi_client.model.condition_request import ConditionRequest + from groundlight_openapi_client.model.snooze_time_unit_enum import SnoozeTimeUnitEnum - globals()["Action"] = Action - globals()["Condition"] = Condition + globals()["ActionRequest"] = ActionRequest + globals()["ConditionRequest"] = ConditionRequest + globals()["SnoozeTimeUnitEnum"] = SnoozeTimeUnitEnum -class RuleBase(ModelNormal): +class RuleRequest(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -62,18 +64,15 @@ class RuleBase(ModelNormal): as additional properties values. """ - allowed_values = { - ("snooze_time_unit",): { - "SECONDS": "SECONDS", - "MINUTES": "MINUTES", - "HOURS": "HOURS", - "DAYS": "DAYS", - }, - } + allowed_values = {} validations = { ("name",): { - "max_length": 200, + "max_length": 44, + "min_length": 1, + }, + ("snooze_time_value",): { + "inclusive_minimum": 0, }, } @@ -110,14 +109,24 @@ def openapi_types(): """ lazy_import() return { - "detector_id": (str,), # noqa: E501 "name": (str,), # noqa: E501 + "condition": (ConditionRequest,), # noqa: E501 + "action": (ActionRequest,), # noqa: E501 "enabled": (bool,), # noqa: E501 "snooze_time_enabled": (bool,), # noqa: E501 "snooze_time_value": (int,), # noqa: E501 - "snooze_time_unit": (str,), # noqa: E501 - "action": (Action,), # noqa: E501 - "condition": (Condition,), # noqa: E501 + "snooze_time_unit": ( + bool, + date, + datetime, + dict, + float, + int, + list, + str, + none_type, + ), # noqa: E501 + "human_review_required": (bool,), # noqa: E501 } @cached_property @@ -125,14 +134,14 @@ def discriminator(): return None attribute_map = { - "detector_id": "detector_id", # noqa: E501 "name": "name", # noqa: E501 + "condition": "condition", # noqa: E501 + "action": "action", # noqa: E501 "enabled": "enabled", # noqa: E501 "snooze_time_enabled": "snooze_time_enabled", # noqa: E501 "snooze_time_value": "snooze_time_value", # noqa: E501 "snooze_time_unit": "snooze_time_unit", # noqa: E501 - "action": "action", # noqa: E501 - "condition": "condition", # noqa: E501 + "human_review_required": "human_review_required", # noqa: E501 } read_only_vars = {} @@ -141,8 +150,13 @@ def discriminator(): @classmethod @convert_js_args_to_python_args - def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 - """RuleBase - a model defined in OpenAPI + def _from_openapi_data(cls, name, condition, action, *args, **kwargs): # noqa: E501 + """RuleRequest - a model defined in OpenAPI + + Args: + name (str): + condition (ConditionRequest): + action (ActionRequest): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -175,14 +189,11 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - detector_id (str): Which detector should this rule be associated with?. [optional] # noqa: E501 - name (str): A short, descriptive name for the rule.. [optional] # noqa: E501 - enabled (bool): Is this rule enabled?. [optional] if omitted the server will use the default value of True # noqa: E501 - snooze_time_enabled (bool): Is this rule snooze time enabled?. [optional] if omitted the server will use the default value of False # noqa: E501 - snooze_time_value (int): How long to snooze the rule for (in seconds).. [optional] if omitted the server will use the default value of 1 # noqa: E501 - snooze_time_unit (str): What unit of time to use for the snooze time.. [optional] if omitted the server will use the default value of "DAYS" # noqa: E501 - action (Action): [optional] # noqa: E501 - condition (Condition): [optional] # noqa: E501 + enabled (bool): [optional] if omitted the server will use the default value of True # noqa: E501 + snooze_time_enabled (bool): [optional] if omitted the server will use the default value of False # noqa: E501 + snooze_time_value (int): [optional] if omitted the server will use the default value of 0 # noqa: E501 + snooze_time_unit (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501 + human_review_required (bool): [optional] if omitted the server will use the default value of False # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -211,6 +222,9 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.name = name + self.condition = condition + self.action = action for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map @@ -235,8 +249,13 @@ def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 ) @convert_js_args_to_python_args - def __init__(self, *args, **kwargs): # noqa: E501 - """RuleBase - a model defined in OpenAPI + def __init__(self, name, condition, action, *args, **kwargs): # noqa: E501 + """RuleRequest - a model defined in OpenAPI + + Args: + name (str): + condition (ConditionRequest): + action (ActionRequest): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -269,14 +288,11 @@ def __init__(self, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - detector_id (str): Which detector should this rule be associated with?. [optional] # noqa: E501 - name (str): A short, descriptive name for the rule.. [optional] # noqa: E501 - enabled (bool): Is this rule enabled?. [optional] if omitted the server will use the default value of True # noqa: E501 - snooze_time_enabled (bool): Is this rule snooze time enabled?. [optional] if omitted the server will use the default value of False # noqa: E501 - snooze_time_value (int): How long to snooze the rule for (in seconds).. [optional] if omitted the server will use the default value of 1 # noqa: E501 - snooze_time_unit (str): What unit of time to use for the snooze time.. [optional] if omitted the server will use the default value of "DAYS" # noqa: E501 - action (Action): [optional] # noqa: E501 - condition (Condition): [optional] # noqa: E501 + enabled (bool): [optional] if omitted the server will use the default value of True # noqa: E501 + snooze_time_enabled (bool): [optional] if omitted the server will use the default value of False # noqa: E501 + snooze_time_value (int): [optional] if omitted the server will use the default value of 0 # noqa: E501 + snooze_time_unit (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501 + human_review_required (bool): [optional] if omitted the server will use the default value of False # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -303,6 +319,9 @@ def __init__(self, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.name = name + self.condition = condition + self.action = action for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map diff --git a/generated/groundlight_openapi_client/model/snooze_time_unit_enum.py b/generated/groundlight_openapi_client/model/snooze_time_unit_enum.py new file mode 100644 index 00000000..0d4156a8 --- /dev/null +++ b/generated/groundlight_openapi_client/model/snooze_time_unit_enum.py @@ -0,0 +1,288 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + + +import re # noqa: F401 +import sys # noqa: F401 + +from groundlight_openapi_client.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel, +) +from groundlight_openapi_client.exceptions import ApiAttributeError + + +class SnoozeTimeUnitEnum(ModelSimple): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = { + ("value",): { + "DAYS": "DAYS", + "HOURS": "HOURS", + "MINUTES": "MINUTES", + "SECONDS": "SECONDS", + }, + } + + validations = {} + + additional_properties_type = None + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + return { + "value": (str,), + } + + @cached_property + def discriminator(): + return None + + attribute_map = {} + + read_only_vars = set() + + _composed_schemas = None + + required_properties = set( + [ + "_data_store", + "_check_type", + "_spec_property_naming", + "_path_to_item", + "_configuration", + "_visited_composed_classes", + ] + ) + + @convert_js_args_to_python_args + def __init__(self, *args, **kwargs): + """SnoozeTimeUnitEnum - a model defined in OpenAPI + + Note that value can be passed either in args or in kwargs, but not in both. + + Args: + args[0] (str): * `DAYS` - DAYS * `HOURS` - HOURS * `MINUTES` - MINUTES * `SECONDS` - SECONDS., must be one of ["DAYS", "HOURS", "MINUTES", "SECONDS", ] # noqa: E501 + + Keyword Args: + value (str): * `DAYS` - DAYS * `HOURS` - HOURS * `MINUTES` - MINUTES * `SECONDS` - SECONDS., must be one of ["DAYS", "HOURS", "MINUTES", "SECONDS", ] # noqa: E501 + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + # required up here when default value is not given + _path_to_item = kwargs.pop("_path_to_item", ()) + + if "value" in kwargs: + value = kwargs.pop("value") + elif args: + args = list(args) + value = args.pop(0) + else: + raise ApiTypeError( + "value is required, but not passed in args or kwargs and doesn't have default", + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.value = value + if kwargs: + raise ApiTypeError( + "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." + % ( + kwargs, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, *args, **kwargs): + """SnoozeTimeUnitEnum - a model defined in OpenAPI + + Note that value can be passed either in args or in kwargs, but not in both. + + Args: + args[0] (str): * `DAYS` - DAYS * `HOURS` - HOURS * `MINUTES` - MINUTES * `SECONDS` - SECONDS., must be one of ["DAYS", "HOURS", "MINUTES", "SECONDS", ] # noqa: E501 + + Keyword Args: + value (str): * `DAYS` - DAYS * `HOURS` - HOURS * `MINUTES` - MINUTES * `SECONDS` - SECONDS., must be one of ["DAYS", "HOURS", "MINUTES", "SECONDS", ] # noqa: E501 + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + # required up here when default value is not given + _path_to_item = kwargs.pop("_path_to_item", ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if "value" in kwargs: + value = kwargs.pop("value") + elif args: + args = list(args) + value = args.pop(0) + else: + raise ApiTypeError( + "value is required, but not passed in args or kwargs and doesn't have default", + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.value = value + if kwargs: + raise ApiTypeError( + "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." + % ( + kwargs, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + return self diff --git a/generated/groundlight_openapi_client/model/verb_enum.py b/generated/groundlight_openapi_client/model/verb_enum.py new file mode 100644 index 00000000..792781f2 --- /dev/null +++ b/generated/groundlight_openapi_client/model/verb_enum.py @@ -0,0 +1,289 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + + +import re # noqa: F401 +import sys # noqa: F401 + +from groundlight_openapi_client.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel, +) +from groundlight_openapi_client.exceptions import ApiAttributeError + + +class VerbEnum(ModelSimple): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = { + ("value",): { + "ANSWERED_CONSECUTIVELY": "ANSWERED_CONSECUTIVELY", + "ANSWERED_WITHIN_TIME": "ANSWERED_WITHIN_TIME", + "CHANGED_TO": "CHANGED_TO", + "NO_CHANGE": "NO_CHANGE", + "NO_QUERIES": "NO_QUERIES", + }, + } + + validations = {} + + additional_properties_type = None + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + return { + "value": (str,), + } + + @cached_property + def discriminator(): + return None + + attribute_map = {} + + read_only_vars = set() + + _composed_schemas = None + + required_properties = set( + [ + "_data_store", + "_check_type", + "_spec_property_naming", + "_path_to_item", + "_configuration", + "_visited_composed_classes", + ] + ) + + @convert_js_args_to_python_args + def __init__(self, *args, **kwargs): + """VerbEnum - a model defined in OpenAPI + + Note that value can be passed either in args or in kwargs, but not in both. + + Args: + args[0] (str): * `ANSWERED_CONSECUTIVELY` - ANSWERED_CONSECUTIVELY * `ANSWERED_WITHIN_TIME` - ANSWERED_WITHIN_TIME * `CHANGED_TO` - CHANGED_TO * `NO_CHANGE` - NO_CHANGE * `NO_QUERIES` - NO_QUERIES., must be one of ["ANSWERED_CONSECUTIVELY", "ANSWERED_WITHIN_TIME", "CHANGED_TO", "NO_CHANGE", "NO_QUERIES", ] # noqa: E501 + + Keyword Args: + value (str): * `ANSWERED_CONSECUTIVELY` - ANSWERED_CONSECUTIVELY * `ANSWERED_WITHIN_TIME` - ANSWERED_WITHIN_TIME * `CHANGED_TO` - CHANGED_TO * `NO_CHANGE` - NO_CHANGE * `NO_QUERIES` - NO_QUERIES., must be one of ["ANSWERED_CONSECUTIVELY", "ANSWERED_WITHIN_TIME", "CHANGED_TO", "NO_CHANGE", "NO_QUERIES", ] # noqa: E501 + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + # required up here when default value is not given + _path_to_item = kwargs.pop("_path_to_item", ()) + + if "value" in kwargs: + value = kwargs.pop("value") + elif args: + args = list(args) + value = args.pop(0) + else: + raise ApiTypeError( + "value is required, but not passed in args or kwargs and doesn't have default", + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.value = value + if kwargs: + raise ApiTypeError( + "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." + % ( + kwargs, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, *args, **kwargs): + """VerbEnum - a model defined in OpenAPI + + Note that value can be passed either in args or in kwargs, but not in both. + + Args: + args[0] (str): * `ANSWERED_CONSECUTIVELY` - ANSWERED_CONSECUTIVELY * `ANSWERED_WITHIN_TIME` - ANSWERED_WITHIN_TIME * `CHANGED_TO` - CHANGED_TO * `NO_CHANGE` - NO_CHANGE * `NO_QUERIES` - NO_QUERIES., must be one of ["ANSWERED_CONSECUTIVELY", "ANSWERED_WITHIN_TIME", "CHANGED_TO", "NO_CHANGE", "NO_QUERIES", ] # noqa: E501 + + Keyword Args: + value (str): * `ANSWERED_CONSECUTIVELY` - ANSWERED_CONSECUTIVELY * `ANSWERED_WITHIN_TIME` - ANSWERED_WITHIN_TIME * `CHANGED_TO` - CHANGED_TO * `NO_CHANGE` - NO_CHANGE * `NO_QUERIES` - NO_QUERIES., must be one of ["ANSWERED_CONSECUTIVELY", "ANSWERED_WITHIN_TIME", "CHANGED_TO", "NO_CHANGE", "NO_QUERIES", ] # noqa: E501 + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + # required up here when default value is not given + _path_to_item = kwargs.pop("_path_to_item", ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if "value" in kwargs: + value = kwargs.pop("value") + elif args: + args = list(args) + value = args.pop(0) + else: + raise ApiTypeError( + "value is required, but not passed in args or kwargs and doesn't have default", + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.value = value + if kwargs: + raise ApiTypeError( + "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." + % ( + kwargs, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + return self diff --git a/generated/groundlight_openapi_client/model_utils.py b/generated/groundlight_openapi_client/model_utils.py index f362705b..b9176a65 100644 --- a/generated/groundlight_openapi_client/model_utils.py +++ b/generated/groundlight_openapi_client/model_utils.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/groundlight_openapi_client/models/__init__.py b/generated/groundlight_openapi_client/models/__init__.py index d8c8de05..cb0c2ca1 100644 --- a/generated/groundlight_openapi_client/models/__init__.py +++ b/generated/groundlight_openapi_client/models/__init__.py @@ -10,21 +10,27 @@ # sys.setrecursionlimit(n) from groundlight_openapi_client.model.action import Action -from groundlight_openapi_client.model.classification_result import ClassificationResult +from groundlight_openapi_client.model.action_request import ActionRequest +from groundlight_openapi_client.model.all_notes import AllNotes +from groundlight_openapi_client.model.binary_classification_result import BinaryClassificationResult +from groundlight_openapi_client.model.channel_enum import ChannelEnum from groundlight_openapi_client.model.condition import Condition +from groundlight_openapi_client.model.condition_request import ConditionRequest +from groundlight_openapi_client.model.counting_result import CountingResult from groundlight_openapi_client.model.detector import Detector -from groundlight_openapi_client.model.detector_creation_input import DetectorCreationInput +from groundlight_openapi_client.model.detector_creation_input_request import DetectorCreationInputRequest from groundlight_openapi_client.model.detector_type_enum import DetectorTypeEnum from groundlight_openapi_client.model.image_query import ImageQuery from groundlight_openapi_client.model.image_query_type_enum import ImageQueryTypeEnum from groundlight_openapi_client.model.inline_response200 import InlineResponse200 +from groundlight_openapi_client.model.mode_enum import ModeEnum from groundlight_openapi_client.model.note import Note -from groundlight_openapi_client.model.note_creation_input import NoteCreationInput +from groundlight_openapi_client.model.note_request import NoteRequest from groundlight_openapi_client.model.paginated_detector_list import PaginatedDetectorList from groundlight_openapi_client.model.paginated_image_query_list import PaginatedImageQueryList from groundlight_openapi_client.model.paginated_rule_list import PaginatedRuleList from groundlight_openapi_client.model.result_type_enum import ResultTypeEnum from groundlight_openapi_client.model.rule import Rule -from groundlight_openapi_client.model.rule_base import RuleBase -from groundlight_openapi_client.model.rule_creation_input import RuleCreationInput -from groundlight_openapi_client.model.user import User +from groundlight_openapi_client.model.rule_request import RuleRequest +from groundlight_openapi_client.model.snooze_time_unit_enum import SnoozeTimeUnitEnum +from groundlight_openapi_client.model.verb_enum import VerbEnum diff --git a/generated/groundlight_openapi_client/rest.py b/generated/groundlight_openapi_client/rest.py index 802db132..57fbacaf 100644 --- a/generated/groundlight_openapi_client/rest.py +++ b/generated/groundlight_openapi_client/rest.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/model.py b/generated/model.py index 6899cc8e..b0858ce7 100644 --- a/generated/model.py +++ b/generated/model.py @@ -1,88 +1,56 @@ # generated by datamodel-codegen: # filename: public-api.yaml -# timestamp: 2024-05-28T18:14:08+00:00 +# timestamp: 2024-06-03T17:05:22+00:00 from __future__ import annotations from datetime import datetime from enum import Enum -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Union -from pydantic import AnyUrl, BaseModel, Field, confloat, constr +from pydantic import AnyUrl, BaseModel, Field, confloat, conint, constr -class User(BaseModel): - username: str = Field(..., description="The user's username.") - - -class ClassificationResult(BaseModel): - confidence: Optional[confloat(ge=0.0, le=1.0)] = Field( - None, description="On a scale of 0 to 1, how confident are we in the predicted label?" - ) - label: str = Field(..., description="What is the predicted label?") - - -class DetectorCreationInput(BaseModel): - name: constr(max_length=200) = Field(..., description="A short, descriptive name for the detector.") - query: constr(max_length=300) = Field(..., description="A question about the image.") - group_name: Optional[constr(max_length=100)] = Field( - None, description="Which group should this detector be part of?" - ) - confidence_threshold: Optional[confloat(ge=0.0, le=1.0)] = Field( - 0.75, - description=( - "If the detector's prediction is below this confidence threshold, send the image query for human review." - ), - ) - pipeline_config: Optional[constr(max_length=8192)] = Field( - None, description="(Advanced usage) Configuration to instantiate a specific prediction pipeline." - ) - metadata: Optional[str] = Field( - None, - description=( - "A dictionary of custom key/value metadata to associate with the detector (limited to 1KB). This is encoded" - " as a URL-safe, base64-encoded JSON string." - ), - ) +class ChannelEnum(Enum): + EMAIL = "EMAIL" + TEXT = "TEXT" class DetectorTypeEnum(Enum): detector = "detector" -class SnoozeTimeUnit(Enum): - SECONDS = "SECONDS" - MINUTES = "MINUTES" - HOURS = "HOURS" - DAYS = "DAYS" +class ImageQueryTypeEnum(Enum): + image_query = "image_query" -class Channel(Enum): - EMAIL = "EMAIL" - TEXT = "TEXT" +class ModeEnum(Enum): + BINARY = "BINARY" + COUNT = "COUNT" -class Action(BaseModel): - channel: Optional[Channel] = Field(None, description="The channel to send the action to.") - include_image: Optional[bool] = Field(None, description="Should the image be included in the action?") - recipient: Optional[str] = Field(None, description="The recipient of the action.") +class Note(BaseModel): + detector_id: str + content: str = Field(..., description="Text content of the note.") -class NoteType(Enum): - CUSTOMER = "CUSTOMER" - GL = "GL" +class NoteRequest(BaseModel): + content: constr(min_length=1) = Field(..., description="Text content of the note.") -class Note(BaseModel): - content: Optional[str] = Field(None, description="The text inside the note") - note_type: Optional[NoteType] = Field(None, description="The type of note") +class ResultTypeEnum(Enum): + binary_classification = "binary_classification" + counting = "counting" -class NoteCreationInput(BaseModel): - content: str = Field(..., description="The text inside the note") +class SnoozeTimeUnitEnum(Enum): + DAYS = "DAYS" + HOURS = "HOURS" + MINUTES = "MINUTES" + SECONDS = "SECONDS" -class Verb(Enum): +class VerbEnum(Enum): ANSWERED_CONSECUTIVELY = "ANSWERED_CONSECUTIVELY" ANSWERED_WITHIN_TIME = "ANSWERED_WITHIN_TIME" CHANGED_TO = "CHANGED_TO" @@ -90,17 +58,41 @@ class Verb(Enum): NO_QUERIES = "NO_QUERIES" -class Condition(BaseModel): - verb: Optional[Verb] = Field(None, description="The verb to use in the condition.") - parameters: Optional[Dict[str, Any]] = Field(None, description="The parameters to use in the condition.") +class BinaryClassificationResult(BaseModel): + confidence: Any + label: str -class ImageQueryTypeEnum(Enum): - image_query = "image_query" +class CountingResult(BaseModel): + confidence: Any + value: int -class ResultTypeEnum(Enum): - binary_classification = "binary_classification" +class Action(BaseModel): + channel: ChannelEnum + recipient: str + include_image: bool + + +class ActionRequest(BaseModel): + channel: ChannelEnum + recipient: constr(min_length=1) + include_image: bool + + +class AllNotes(BaseModel): + CUSTOMER: List[Note] + GL: List[Note] + + +class Condition(BaseModel): + verb: VerbEnum + parameters: Dict[str, Any] + + +class ConditionRequest(BaseModel): + verb: VerbEnum + parameters: Dict[str, Any] class Detector(BaseModel): @@ -110,73 +102,106 @@ class Detector(BaseModel): name: constr(max_length=200) = Field(..., description="A short, descriptive name for the detector.") query: str = Field(..., description="A question about the image.") group_name: str = Field(..., description="Which group should this detector be part of?") - confidence_threshold: Optional[confloat(ge=0.0, le=1.0)] = Field( - 0.75, + confidence_threshold: confloat(ge=0.0, le=1.0) = Field( + 0.9, + description=( + "If the detector's prediction is below this confidence threshold, send the image query for human review." + ), + ) + metadata: Optional[Dict[str, Any]] = Field(..., description="Metadata about the detector.") + mode: str + mode_configuration: Optional[Dict[str, Any]] = Field(...) + + +class DetectorCreationInputRequest(BaseModel): + name: constr(min_length=1, max_length=200) = Field(..., description="A short, descriptive name for the detector.") + query: constr(min_length=1, max_length=300) = Field(..., description="A question about the image.") + group_name: Optional[constr(min_length=1, max_length=100)] = Field( + None, description="Which group should this detector be part of?" + ) + confidence_threshold: confloat(ge=0.0, le=1.0) = Field( + 0.9, description=( "If the detector's prediction is below this confidence threshold, send the image query for human review." ), ) - metadata: Optional[Dict[str, Any]] = Field( + patience_time: confloat(ge=0.0, le=3600.0) = Field( + 30.0, description="How long Groundlight will attempt to generate a confident prediction" + ) + pipeline_config: Optional[constr(max_length=100)] = Field( + None, description="(Advanced usage) Configuration needed to instantiate a prediction pipeline." + ) + metadata: Optional[constr(min_length=1, max_length=1362)] = Field( None, description=( - "A dictionary of custom key/value metadata to associate with the detector (limited to 1KB). This is encoded" - " as a URL-safe, base64-encoded JSON string." + "Base64-encoded metadata for the detector. This should be a JSON object with string keys. The size after" + " encoding should not exceed 1362 bytes, corresponding to 1KiB before encoding." ), ) - - -class RuleBase(BaseModel): - detector_id: Optional[str] = Field(None, description="Which detector should this rule be associated with?") - name: Optional[constr(max_length=200)] = Field(None, description="A short, descriptive name for the rule.") - enabled: Optional[bool] = Field(True, description="Is this rule enabled?") - snooze_time_enabled: Optional[bool] = Field(False, description="Is this rule snooze time enabled?") - snooze_time_value: Optional[int] = Field(1, description="How long to snooze the rule for (in seconds).") - snooze_time_unit: Optional[SnoozeTimeUnit] = Field( - "DAYS", description="What unit of time to use for the snooze time." + mode: ModeEnum = Field( + "BINARY", description="Mode in which this detector will work.\n\n* `BINARY` - BINARY\n* `COUNT` - COUNT" ) - action: Optional[Action] = Field(None, description="What action should be taken when the rule is triggered?") - condition: Optional[Condition] = Field(None, description="What condition should trigger the rule?") - - -class RuleCreationInput(RuleBase): - pass - - -class Rule(RuleBase): - id: Optional[int] = Field(None, description="A unique ID for this object.") - detector_name: Optional[str] = Field(None, description="The name of the detector this rule is associated with.") + mode_configuration: Optional[Any] = Field(None, description="Configuration for each detector mode.") class ImageQuery(BaseModel): + metadata: Optional[Dict[str, Any]] = Field(..., description="Metadata about the image query.") id: str = Field(..., description="A unique ID for this object.") type: ImageQueryTypeEnum = Field(..., description="The type of this object.") created_at: datetime = Field(..., description="When was this detector created?") query: str = Field(..., description="A question about the image.") detector_id: str = Field(..., description="Which detector was used on this image query?") result_type: ResultTypeEnum = Field(..., description="What type of result are we returning?") - result: Optional[ClassificationResult] = None - metadata: Optional[Dict[str, Any]] = Field( - None, - description="A dictionary of custom key/value metadata to associate with the image query (limited to 1KB).", + result: Optional[Union[BinaryClassificationResult, CountingResult]] = Field( + ..., description="The result of the image query." + ) + patience_time: float = Field(..., description="How long to wait for a confident response.") + confidence_threshold: Optional[confloat(ge=0.5, le=1.0)] = Field( + None, description="Min confidence needed to accept the response of the image query." ) - - -class PaginatedRuleList(BaseModel): - count: Optional[int] = None - next: Optional[AnyUrl] = None - previous: Optional[AnyUrl] = None - results: Optional[List[Rule]] = None class PaginatedDetectorList(BaseModel): - count: Optional[int] = Field(None, example=123) + count: int = Field(..., example=123) next: Optional[AnyUrl] = Field(None, example="http://api.example.org/accounts/?page=4") previous: Optional[AnyUrl] = Field(None, example="http://api.example.org/accounts/?page=2") - results: Optional[List[Detector]] = None + results: List[Detector] class PaginatedImageQueryList(BaseModel): - count: Optional[int] = Field(None, example=123) + count: int = Field(..., example=123) + next: Optional[AnyUrl] = Field(None, example="http://api.example.org/accounts/?page=4") + previous: Optional[AnyUrl] = Field(None, example="http://api.example.org/accounts/?page=2") + results: List[ImageQuery] + + +class Rule(BaseModel): + id: int + detector_id: str + detector_name: str + name: constr(max_length=44) + enabled: bool = True + snooze_time_enabled: bool = False + snooze_time_value: conint(ge=0) = 0 + snooze_time_unit: SnoozeTimeUnitEnum = "DAYS" + human_review_required: bool = False + condition: Condition + action: Action + + +class RuleRequest(BaseModel): + name: constr(min_length=1, max_length=44) + enabled: bool = True + snooze_time_enabled: bool = False + snooze_time_value: conint(ge=0) = 0 + snooze_time_unit: SnoozeTimeUnitEnum = "DAYS" + human_review_required: bool = False + condition: ConditionRequest + action: ActionRequest + + +class PaginatedRuleList(BaseModel): + count: int = Field(..., example=123) next: Optional[AnyUrl] = Field(None, example="http://api.example.org/accounts/?page=4") previous: Optional[AnyUrl] = Field(None, example="http://api.example.org/accounts/?page=2") - results: Optional[List[ImageQuery]] = None + results: List[Rule] diff --git a/generated/setup.py b/generated/setup.py index 779cfb15..f755be99 100644 --- a/generated/setup.py +++ b/generated/setup.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -29,7 +29,7 @@ name=NAME, version=VERSION, description="Groundlight API", - author="Questions?", + author="support@groundlight.ai", author_email="support@groundlight.ai", url="", keywords=["OpenAPI", "OpenAPI-Generator", "Groundlight API"], @@ -38,6 +38,6 @@ packages=find_packages(exclude=["test", "tests"]), include_package_data=True, long_description="""\ - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 """, ) diff --git a/generated/test/test_action_request.py b/generated/test/test_action_request.py new file mode 100644 index 00000000..423c03d4 --- /dev/null +++ b/generated/test/test_action_request.py @@ -0,0 +1,39 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + + +import sys +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.model.channel_enum import ChannelEnum + +globals()["ChannelEnum"] = ChannelEnum +from groundlight_openapi_client.model.action_request import ActionRequest + + +class TestActionRequest(unittest.TestCase): + """ActionRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testActionRequest(self): + """Test ActionRequest""" + # FIXME: construct object with mandatory attributes with example values + # model = ActionRequest() # noqa: E501 + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/generated/test/test_actions_api.py b/generated/test/test_actions_api.py new file mode 100644 index 00000000..cca4d4d5 --- /dev/null +++ b/generated/test/test_actions_api.py @@ -0,0 +1,49 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + + +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.api.actions_api import ActionsApi # noqa: E501 + + +class TestActionsApi(unittest.TestCase): + """ActionsApi unit test stubs""" + + def setUp(self): + self.api = ActionsApi() # noqa: E501 + + def tearDown(self): + pass + + def test_create_rule(self): + """Test case for create_rule""" + pass + + def test_delete_rule(self): + """Test case for delete_rule""" + pass + + def test_get_rule(self): + """Test case for get_rule""" + pass + + def test_list_detector_rules(self): + """Test case for list_detector_rules""" + pass + + def test_list_rules(self): + """Test case for list_rules""" + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/generated/test/test_all_notes.py b/generated/test/test_all_notes.py new file mode 100644 index 00000000..6e833506 --- /dev/null +++ b/generated/test/test_all_notes.py @@ -0,0 +1,39 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + + +import sys +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.model.note import Note + +globals()["Note"] = Note +from groundlight_openapi_client.model.all_notes import AllNotes + + +class TestAllNotes(unittest.TestCase): + """AllNotes unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testAllNotes(self): + """Test AllNotes""" + # FIXME: construct object with mandatory attributes with example values + # model = AllNotes() # noqa: E501 + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/generated/test/test_binary_classification_result.py b/generated/test/test_binary_classification_result.py new file mode 100644 index 00000000..8317f0e3 --- /dev/null +++ b/generated/test/test_binary_classification_result.py @@ -0,0 +1,36 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + + +import sys +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.model.binary_classification_result import BinaryClassificationResult + + +class TestBinaryClassificationResult(unittest.TestCase): + """BinaryClassificationResult unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testBinaryClassificationResult(self): + """Test BinaryClassificationResult""" + # FIXME: construct object with mandatory attributes with example values + # model = BinaryClassificationResult() # noqa: E501 + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/generated/test/test_channel_enum.py b/generated/test/test_channel_enum.py new file mode 100644 index 00000000..39c15823 --- /dev/null +++ b/generated/test/test_channel_enum.py @@ -0,0 +1,36 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + + +import sys +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.model.channel_enum import ChannelEnum + + +class TestChannelEnum(unittest.TestCase): + """ChannelEnum unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testChannelEnum(self): + """Test ChannelEnum""" + # FIXME: construct object with mandatory attributes with example values + # model = ChannelEnum() # noqa: E501 + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/generated/test/test_classification_result.py b/generated/test/test_classification_result.py index b5c999be..ccedb7bf 100644 --- a/generated/test/test_classification_result.py +++ b/generated/test/test_classification_result.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/test/test_condition_request.py b/generated/test/test_condition_request.py new file mode 100644 index 00000000..954d9ce9 --- /dev/null +++ b/generated/test/test_condition_request.py @@ -0,0 +1,39 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + + +import sys +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.model.verb_enum import VerbEnum + +globals()["VerbEnum"] = VerbEnum +from groundlight_openapi_client.model.condition_request import ConditionRequest + + +class TestConditionRequest(unittest.TestCase): + """ConditionRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testConditionRequest(self): + """Test ConditionRequest""" + # FIXME: construct object with mandatory attributes with example values + # model = ConditionRequest() # noqa: E501 + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/generated/test/test_counting_result.py b/generated/test/test_counting_result.py new file mode 100644 index 00000000..67697c8a --- /dev/null +++ b/generated/test/test_counting_result.py @@ -0,0 +1,36 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + + +import sys +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.model.counting_result import CountingResult + + +class TestCountingResult(unittest.TestCase): + """CountingResult unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testCountingResult(self): + """Test CountingResult""" + # FIXME: construct object with mandatory attributes with example values + # model = CountingResult() # noqa: E501 + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/generated/test/test_detector_creation_input_request.py b/generated/test/test_detector_creation_input_request.py new file mode 100644 index 00000000..8d86a489 --- /dev/null +++ b/generated/test/test_detector_creation_input_request.py @@ -0,0 +1,39 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + + +import sys +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.model.mode_enum import ModeEnum + +globals()["ModeEnum"] = ModeEnum +from groundlight_openapi_client.model.detector_creation_input_request import DetectorCreationInputRequest + + +class TestDetectorCreationInputRequest(unittest.TestCase): + """DetectorCreationInputRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testDetectorCreationInputRequest(self): + """Test DetectorCreationInputRequest""" + # FIXME: construct object with mandatory attributes with example values + # model = DetectorCreationInputRequest() # noqa: E501 + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/generated/test/test_image_query.py b/generated/test/test_image_query.py index e4a9cdbc..87832913 100644 --- a/generated/test/test_image_query.py +++ b/generated/test/test_image_query.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -13,11 +13,9 @@ import unittest import groundlight_openapi_client -from groundlight_openapi_client.model.classification_result import ClassificationResult from groundlight_openapi_client.model.image_query_type_enum import ImageQueryTypeEnum from groundlight_openapi_client.model.result_type_enum import ResultTypeEnum -globals()["ClassificationResult"] = ClassificationResult globals()["ImageQueryTypeEnum"] = ImageQueryTypeEnum globals()["ResultTypeEnum"] = ResultTypeEnum from groundlight_openapi_client.model.image_query import ImageQuery diff --git a/generated/test/test_mode_enum.py b/generated/test/test_mode_enum.py new file mode 100644 index 00000000..4836bf55 --- /dev/null +++ b/generated/test/test_mode_enum.py @@ -0,0 +1,36 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + + +import sys +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.model.mode_enum import ModeEnum + + +class TestModeEnum(unittest.TestCase): + """ModeEnum unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testModeEnum(self): + """Test ModeEnum""" + # FIXME: construct object with mandatory attributes with example values + # model = ModeEnum() # noqa: E501 + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/generated/test/test_note_request.py b/generated/test/test_note_request.py new file mode 100644 index 00000000..c2043f12 --- /dev/null +++ b/generated/test/test_note_request.py @@ -0,0 +1,36 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + + +import sys +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.model.note_request import NoteRequest + + +class TestNoteRequest(unittest.TestCase): + """NoteRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testNoteRequest(self): + """Test NoteRequest""" + # FIXME: construct object with mandatory attributes with example values + # model = NoteRequest() # noqa: E501 + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/generated/test/test_paginated_all_notes_list.py b/generated/test/test_paginated_all_notes_list.py new file mode 100644 index 00000000..93770ca4 --- /dev/null +++ b/generated/test/test_paginated_all_notes_list.py @@ -0,0 +1,39 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + + +import sys +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.model.all_notes import AllNotes + +globals()["AllNotes"] = AllNotes +from groundlight_openapi_client.model.paginated_all_notes_list import PaginatedAllNotesList + + +class TestPaginatedAllNotesList(unittest.TestCase): + """PaginatedAllNotesList unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testPaginatedAllNotesList(self): + """Test PaginatedAllNotesList""" + # FIXME: construct object with mandatory attributes with example values + # model = PaginatedAllNotesList() # noqa: E501 + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/generated/test/test_paginated_note_list.py b/generated/test/test_paginated_note_list.py new file mode 100644 index 00000000..9cbcd779 --- /dev/null +++ b/generated/test/test_paginated_note_list.py @@ -0,0 +1,39 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + + +import sys +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.model.note import Note + +globals()["Note"] = Note +from groundlight_openapi_client.model.paginated_note_list import PaginatedNoteList + + +class TestPaginatedNoteList(unittest.TestCase): + """PaginatedNoteList unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testPaginatedNoteList(self): + """Test PaginatedNoteList""" + # FIXME: construct object with mandatory attributes with example values + # model = PaginatedNoteList() # noqa: E501 + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/generated/test/test_rule_request.py b/generated/test/test_rule_request.py new file mode 100644 index 00000000..68db4be6 --- /dev/null +++ b/generated/test/test_rule_request.py @@ -0,0 +1,43 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + + +import sys +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.model.action_request import ActionRequest +from groundlight_openapi_client.model.condition_request import ConditionRequest +from groundlight_openapi_client.model.snooze_time_unit_enum import SnoozeTimeUnitEnum + +globals()["ActionRequest"] = ActionRequest +globals()["ConditionRequest"] = ConditionRequest +globals()["SnoozeTimeUnitEnum"] = SnoozeTimeUnitEnum +from groundlight_openapi_client.model.rule_request import RuleRequest + + +class TestRuleRequest(unittest.TestCase): + """RuleRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testRuleRequest(self): + """Test RuleRequest""" + # FIXME: construct object with mandatory attributes with example values + # model = RuleRequest() # noqa: E501 + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/generated/test/test_snooze_time_unit_enum.py b/generated/test/test_snooze_time_unit_enum.py new file mode 100644 index 00000000..973196d1 --- /dev/null +++ b/generated/test/test_snooze_time_unit_enum.py @@ -0,0 +1,36 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + + +import sys +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.model.snooze_time_unit_enum import SnoozeTimeUnitEnum + + +class TestSnoozeTimeUnitEnum(unittest.TestCase): + """SnoozeTimeUnitEnum unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testSnoozeTimeUnitEnum(self): + """Test SnoozeTimeUnitEnum""" + # FIXME: construct object with mandatory attributes with example values + # model = SnoozeTimeUnitEnum() # noqa: E501 + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/generated/test/test_verb_enum.py b/generated/test/test_verb_enum.py new file mode 100644 index 00000000..ba2dc259 --- /dev/null +++ b/generated/test/test_verb_enum.py @@ -0,0 +1,36 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + + +import sys +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.model.verb_enum import VerbEnum + + +class TestVerbEnum(unittest.TestCase): + """VerbEnum unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testVerbEnum(self): + """Test VerbEnum""" + # FIXME: construct object with mandatory attributes with example values + # model = VerbEnum() # noqa: E501 + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/spec/public-api.yaml b/spec/public-api.yaml index 07a9aa0c..b1065951 100644 --- a/spec/public-api.yaml +++ b/spec/public-api.yaml @@ -1,283 +1,243 @@ openapi: 3.0.3 info: title: Groundlight API - version: 0.6.0 - description: Easy Computer Vision powered by Natural Language + version: 0.15.3 + description: Groundlight makes it simple to understand images. You can easily create + computer vision detectors just by describing what you want to know using natural + language. contact: - name: Questions? + name: support@groundlight.ai email: support@groundlight.ai paths: - /me: - get: - operationId: Who am I - description: Retrieve the current user. - tags: - - user - security: - - ApiToken: [] - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/User" - description: "" - /v1/actions/rules: - get: - operationId: List rules - description: Retrieve a list of rules. - parameters: - - name: page - required: false - in: query - description: A page number within the paginated result set. - schema: - type: integer - - name: page_size - required: false - in: query - description: Number of results to return per page. - schema: - type: integer - tags: - - rules - security: - - ApiToken: [] - responses: - "200": - content: - application/json: - schema: - $ref: "#/components/schemas/PaginatedRuleList" - description: "" - /v1/actions/rules/{id}: + /v1/actions/detector/{detector_id}/rules: get: - operationId: Get rule - description: Retrieve a rule + operationId: List detector rules + description: List all rules for a detector parameters: - - in: path - name: id - schema: - type: integer - description: Get a rule by its ID. - required: true + - in: path + name: detector_id + schema: + type: string + required: true tags: - - rules + - actions security: - - ApiToken: [] + - ApiToken: [] responses: - "200": + '200': content: application/json: schema: - $ref: "#/components/schemas/Rule" - description: "" - delete: - operationId: Delete rule - description: Delete a rule - parameters: - - in: path - name: id - schema: - type: integer - description: Delete a rule by its ID. - required: true - tags: - - rules - security: - - ApiToken: [] - responses: - "204": - description: "" - /v1/actions/detector/{detector_id}/rules: + $ref: '#/components/schemas/PaginatedRuleList' + description: '' post: operationId: Create rule - description: Create a new rule for a detector. + description: Create a new rule for a detector parameters: - - in: path - name: detector_id - schema: - type: string - description: Choose a detector by its ID. - required: true + - in: path + name: detector_id + schema: + type: string + required: true tags: - - rules + - actions requestBody: content: application/json: schema: - $ref: "#/components/schemas/RuleCreationInput" + $ref: '#/components/schemas/RuleRequest' + application/x-www-form-urlencoded: + schema: + $ref: '#/components/schemas/RuleRequest' + multipart/form-data: + schema: + $ref: '#/components/schemas/RuleRequest' required: true security: - - ApiToken: [] + - ApiToken: [] responses: - "201": + '201': content: application/json: schema: - $ref: "#/components/schemas/Rule" - description: "" - /v1/notes: + $ref: '#/components/schemas/Rule' + description: '' + /v1/actions/rules: get: - operationId: get notes - description: Retrieve notes for a detector + operationId: List rules + description: Lists all rules over all detectors owned by the requester. parameters: - - name: detector_id - required: true - in: query - description: the detector whose notes to retrieve - schema: - type: string + - name: page + required: false + in: query + description: A page number within the paginated result set. + schema: + type: integer + - name: page_size + required: false + in: query + description: Number of results to return per page. + schema: + type: integer tags: - - notes + - actions security: - - ApiToken: [] + - ApiToken: [] responses: - "200": + '200': content: application/json: schema: - type: object - properties: - CUSTOMER: - type: array - items: - $ref: "#/components/schemas/Note" - GL: - type: array - items: - $ref: "#/components/schemas/Note" - required: - - CUSTOMER - - GL - description: "" - post: - operationId: Create note - description: Create a new note. + $ref: '#/components/schemas/PaginatedRuleList' + description: '' + /v1/actions/rules/{id}: + get: + operationId: Get rule + description: Retrieve a rule parameters: - - name: detector_id - required: true - in: query - description: the detector to associate the note with - schema: - type: string - tags: - - notes - requestBody: - content: - application/json: - schema: - $ref: "#/components/schemas/NoteCreationInput" + - in: path + name: id + schema: + type: integer required: true + tags: + - actions security: - - ApiToken: [] + - ApiToken: [] responses: - "201": + '200': content: application/json: schema: - type: array - items: - $ref: "#/components/schemas/Note" - description: "" + $ref: '#/components/schemas/Rule' + description: '' + delete: + operationId: Delete rule + description: Delete a rule + parameters: + - in: path + name: id + schema: + type: integer + required: true + tags: + - actions + security: + - ApiToken: [] + responses: + '204': + description: No response body /v1/detectors: get: operationId: List detectors description: Retrieve a list of detectors. parameters: - - name: page - required: false - in: query - description: A page number within the paginated result set. - schema: - type: integer - - name: page_size - required: false - in: query - description: Number of results to return per page. - schema: - type: integer + - in: query + name: page + schema: + type: integer + description: A page number within the paginated result set. + - in: query + name: page_size + schema: + type: integer + description: Number of items to return per page. tags: - - detectors + - detectors security: - - ApiToken: [] + - ApiToken: [] responses: - "200": + '200': content: application/json: schema: - $ref: "#/components/schemas/PaginatedDetectorList" - description: "" + $ref: '#/components/schemas/PaginatedDetectorList' + description: '' post: operationId: Create detector description: Create a new detector. tags: - - detectors + - detectors requestBody: content: application/json: schema: - $ref: "#/components/schemas/DetectorCreationInput" + $ref: '#/components/schemas/DetectorCreationInputRequest' required: true security: - - ApiToken: [] + - ApiToken: [] responses: - "201": + '201': content: application/json: schema: - $ref: "#/components/schemas/Detector" - description: "" + $ref: '#/components/schemas/Detector' + description: '' /v1/detectors/{id}: get: operationId: Get detector description: Retrieve a detector by its ID. parameters: - - in: path - name: id - schema: - type: string - description: Choose a detector by its ID. - required: true + - in: path + name: id + schema: + type: string + description: Choose a detector by its ID. + required: true tags: - - detectors + - detectors security: - - ApiToken: [] + - ApiToken: [] responses: - "200": + '200': content: application/json: schema: - $ref: "#/components/schemas/Detector" - description: "" + $ref: '#/components/schemas/Detector' + description: '' + delete: + operationId: Delete detector + description: Delete a detector by its ID. + parameters: + - in: path + name: id + schema: + type: string + description: Choose a detector by its ID. + required: true + tags: + - detectors + security: + - ApiToken: [] + responses: + '204': + description: No response body /v1/image-queries: get: operationId: List image queries description: Retrieve a list of image-queries. parameters: - - name: page - required: false - in: query - description: A page number within the paginated result set. - schema: - type: integer - - name: page_size - required: false - in: query - description: Number of results to return per page. - schema: - type: integer + - in: query + name: page + schema: + type: integer + description: A page number within the paginated result set. + - in: query + name: page_size + schema: + type: integer + description: Number of items to return per page. tags: - - image-queries + - image-queries security: - - ApiToken: [] + - ApiToken: [] responses: - "200": + '200': content: application/json: schema: - $ref: "#/components/schemas/PaginatedImageQueryList" - description: "" + $ref: '#/components/schemas/PaginatedImageQueryList' + description: '' post: operationId: Submit image query description: |2+ @@ -293,143 +253,253 @@ paths: ``` parameters: - - in: query - name: detector_id - schema: - type: string - description: Choose a detector by its ID. - required: true - - in: query - name: inspection_id - schema: - type: string - description: Associate the image query with an inspection. - - in: query - name: human_review - schema: - type: string - description: > - If set to `DEFAULT`, use the regular escalation logic (i.e., send the image query for human review if the ML model is not confident). - If set to `ALWAYS`, always send the image query for human review even if the ML model is confident. - If set to `NEVER`, never send the image query for human review even if the ML model is not confident. - required: false - - in: query - name: patience_time - schema: - type: number - format: float - description: How long to wait for a confident response. - required: false - - in: query - name: want_async - schema: - type: string - description: If "true" then submitting an image query returns immediately without a result. The result will be computed asynchronously and can be retrieved later. - - in: query - name: metadata - schema: - type: string - required: false - description: - A dictionary of custom key/value metadata to associate with the image - query (limited to 1KB). + - in: query + name: detector_id + schema: + type: string + description: Choose a detector by its ID. + required: true + - in: query + name: human_review + schema: + type: string + description: |- + If set to `DEFAULT`, use the regular escalation logic (i.e., send the image query for human review if the ML model is not confident). + If set to `ALWAYS`, always send the image query for human review even if the ML model is confident. + If set to `NEVER`, never send the image query for human review even if the ML model is not confident. + - in: query + name: inspection_id + schema: + type: string + description: Associate the image query with an inspection. + - in: query + name: metadata + schema: + type: string + description: A dictionary of custom key/value metadata to associate with the + image query (limited to 1KB). + - in: query + name: patience_time + schema: + type: number + format: float + description: How long to wait for a confident response. + - in: query + name: want_async + schema: + type: string + description: If "true" then submitting an image query returns immediately + without a result. The result will be computed asynchronously and can be + retrieved later. tags: - - image-queries + - image-queries requestBody: content: image/jpeg: schema: type: string format: binary - examples: - JPEGBinaryImageData: - value: "@path/to/image.jpeg" - summary: JPEG binary image data security: - - ApiToken: [] + - ApiToken: [] responses: - "201": + '201': content: application/json: schema: - $ref: "#/components/schemas/ImageQuery" - description: "" + $ref: '#/components/schemas/ImageQuery' + description: '' /v1/image-queries/{id}: get: operationId: Get image query description: Retrieve an image-query by its ID. parameters: - - in: path - name: id - schema: - type: string - description: Choose an image query by its ID. - required: true + - in: path + name: id + schema: + type: string + description: Choose an image query by its ID. + required: true tags: - - image-queries + - image-queries security: - - ApiToken: [] + - ApiToken: [] responses: - "200": + '200': content: application/json: schema: - $ref: "#/components/schemas/ImageQuery" - description: "" + $ref: '#/components/schemas/ImageQuery' + description: '' /v1/image-queries/{id}/image: get: operationId: Get image - description: Retrieve an image by its image query id. + description: Retrieve an image by its ID. parameters: - - in: path - name: id - schema: - type: string - description: Choose an image by its image query id. - required: true + - in: path + name: id + schema: + type: string + description: Retrieve the image associated with the image query ID. + required: true tags: - - images + - image-queries security: - - ApiToken: [] + - ApiToken: [] responses: - "200": + '200': content: image/jpeg: schema: type: string format: binary - description: "" + description: '' + /v1/me: + get: + operationId: Who am I + description: Retrieve the current user. + tags: + - user + security: + - ApiToken: [] + responses: + '200': + content: + application/json: + schema: + type: object + properties: + username: + type: string + description: The user's username + description: '' + /v1/notes: + get: + operationId: get notes + description: Get all the notes from a given detector and return the answer in + lists, one for each note_category + parameters: + - in: query + name: detector_id + schema: + type: string + description: the detector whose notes to retrieve + required: true + tags: + - notes + security: + - ApiToken: [] + responses: + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/AllNotes' + description: '' + post: + operationId: Create note + description: Create a new note + parameters: + - in: query + name: detector_id + schema: + type: string + description: the detector to associate the new note with + required: true + tags: + - notes + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/NoteRequest' + application/x-www-form-urlencoded: + schema: + $ref: '#/components/schemas/NoteRequest' + multipart/form-data: + schema: + $ref: '#/components/schemas/NoteRequest' + required: true + security: + - ApiToken: [] + responses: + '204': + description: No response body components: schemas: - User: + Action: type: object - description: Spec for serializing a user object in the public API properties: - username: + channel: + $ref: '#/components/schemas/ChannelEnum' + recipient: type: string - description: The user's username. + include_image: + type: boolean required: - - username - ClassificationResult: + - channel + - include_image + - recipient + ActionRequest: type: object - description: - Our classification result. This result can come from the detector, - or a human reviewer. properties: - confidence: - type: number - maximum: 1 - minimum: 0 - nullable: true - description: - On a scale of 0 to 1, how confident are we in the predicted - label? - label: + channel: + $ref: '#/components/schemas/ChannelEnum' + recipient: type: string - description: What is the predicted label? + minLength: 1 + include_image: + type: boolean required: - - label - x-internal: true + - channel + - include_image + - recipient + AllNotes: + type: object + description: |- + Serializes all notes for a given detector, grouped by type as listed in UserProfile.NoteCategoryChoices + The fields must match whats in USERPROFILE.NoteCategoryChoices + properties: + CUSTOMER: + type: array + items: + $ref: '#/components/schemas/Note' + GL: + type: array + items: + $ref: '#/components/schemas/Note' + required: + - CUSTOMER + - GL + ChannelEnum: + enum: + - EMAIL + - TEXT + type: string + description: |- + * `EMAIL` - EMAIL + * `TEXT` - TEXT + Condition: + type: object + properties: + verb: + $ref: '#/components/schemas/VerbEnum' + parameters: + type: object + additionalProperties: {} + required: + - parameters + - verb + ConditionRequest: + type: object + properties: + verb: + $ref: '#/components/schemas/VerbEnum' + parameters: + type: object + additionalProperties: {} + required: + - parameters + - verb Detector: type: object description: Spec for serializing a detector object in the public API. @@ -440,7 +510,7 @@ components: description: A unique ID for this object. type: allOf: - - $ref: "#/components/schemas/DetectorTypeEnum" + - $ref: '#/components/schemas/DetectorTypeEnum' readOnly: true description: The type of this object. created_at: @@ -454,8 +524,8 @@ components: maxLength: 200 query: type: string - readOnly: true description: A question about the image. + readOnly: true group_name: type: string readOnly: true @@ -465,38 +535,51 @@ components: format: double maximum: 1.0 minimum: 0.0 - default: 0.75 - description: - If the detector's prediction is below this confidence threshold, + default: 0.9 + description: If the detector's prediction is below this confidence threshold, send the image query for human review. metadata: type: object + additionalProperties: {} + nullable: true + readOnly: true + description: Metadata about the detector. + mode: + type: string + readOnly: true + mode_configuration: + type: object + additionalProperties: {} nullable: true - description: - A dictionary of custom key/value metadata to associate with the detector - (limited to 1KB). This is encoded as a URL-safe, base64-encoded JSON string. + readOnly: true required: - - created_at - - group_name - - id - - name - - query - - type + - created_at + - group_name + - id + - metadata + - mode + - mode_configuration + - name + - query + - type x-internal: true - DetectorCreationInput: + DetectorCreationInputRequest: type: object description: Helper serializer for validating POST /detectors input. properties: name: type: string + minLength: 1 description: A short, descriptive name for the detector. maxLength: 200 query: type: string + minLength: 1 description: A question about the image. maxLength: 300 group_name: type: string + minLength: 1 description: Which group should this detector be part of? maxLength: 100 confidence_threshold: @@ -504,156 +587,66 @@ components: format: double maximum: 1.0 minimum: 0.0 - default: 0.75 - description: - If the detector's prediction is below this confidence threshold, + default: 0.9 + description: If the detector's prediction is below this confidence threshold, send the image query for human review. + patience_time: + type: number + format: double + maximum: 3600 + minimum: 0 + default: 30.0 + description: How long Groundlight will attempt to generate a confident prediction pipeline_config: type: string nullable: true - description: (Advanced usage) Configuration to instantiate a specific prediction pipeline. - maxLength: 8192 + description: (Advanced usage) Configuration needed to instantiate a prediction + pipeline. + maxLength: 100 metadata: type: string + minLength: 1 + description: Base64-encoded metadata for the detector. This should be a + JSON object with string keys. The size after encoding should not exceed + 1362 bytes, corresponding to 1KiB before encoding. + maxLength: 1362 + mode: + allOf: + - $ref: '#/components/schemas/ModeEnum' + default: BINARY + description: |- + Mode in which this detector will work. + + * `BINARY` - BINARY + * `COUNT` - COUNT + mode_configuration: nullable: true - description: - A dictionary of custom key/value metadata to associate with the detector - (limited to 1KB). This is encoded as a URL-safe, base64-encoded JSON string. + description: Configuration for each detector mode. required: - # TODO: make name optional - that's how the web version is going. - - name - - query + - name + - query x-internal: true DetectorTypeEnum: enum: - - detector + - detector type: string - RuleBase: - type: object - description: Base spec for serializing a rule object in the public API. - properties: - detector_id: - type: string - description: Which detector should this rule be associated with? - name: - type: string - description: A short, descriptive name for the rule. - maxLength: 200 - enabled: - type: boolean - default: true - description: Is this rule enabled? - snooze_time_enabled: - type: boolean - default: false - description: Is this rule snooze time enabled? - snooze_time_value: - type: integer - default: 1 - description: How long to snooze the rule for (in seconds). - snooze_time_unit: - type: string - default: "DAYS" - enum: - - "SECONDS" - - "MINUTES" - - "HOURS" - - "DAYS" - description: What unit of time to use for the snooze time. - action: - $ref: "#/components/schemas/Action" - description: What action should be taken when the rule is triggered? - condition: - $ref: "#/components/schemas/Condition" - description: What condition should trigger the rule? - x-internal: true - RuleCreationInput: - type: object - description: Helper serializer for structuring POST request to actions - allOf: - - $ref: "#/components/schemas/RuleBase" - x-internal: true - Rule: - type: object - description: Spec for serializing a rule object in the public API. - allOf: - - $ref: "#/components/schemas/RuleBase" - properties: - id: - type: integer - readOnly: true - description: A unique ID for this object. - detector_name: - type: string - readOnly: true - description: The name of the detector this rule is associated with. - x-internal: true - Action: - type: object - description: What action should be taken when the rule is triggered? - properties: - channel: - type: string - enum: - - EMAIL - - TEXT - description: The channel to send the action to. - include_image: - type: boolean - description: Should the image be included in the action? - recipient: - type: string - description: The recipient of the action. - x-internal: true - Note: - type: object - properties: - content: - type: string - description: The text inside the note - note_type: - type: string - enum: - - "CUSTOMER" - - "GL" - description: The type of note - NoteCreationInput: - type: object - properties: - content: - type: string - description: The text inside the note - required: - - content - - detector_id - Condition: - type: object - description: What condition should trigger the rule? - properties: - verb: - type: string - enum: - - "ANSWERED_CONSECUTIVELY" - - "ANSWERED_WITHIN_TIME" - - "CHANGED_TO" - - "NO_CHANGE" - - "NO_QUERIES" - description: The verb to use in the condition. - parameters: - type: object - description: The parameters to use in the condition. - x-internal: true ImageQuery: type: object description: Spec for serializing a image-query object in the public API. properties: + metadata: + type: object + additionalProperties: {} + nullable: true + readOnly: true + description: Metadata about the image query. id: type: string readOnly: true description: A unique ID for this object. type: allOf: - - $ref: "#/components/schemas/ImageQueryTypeEnum" + - $ref: '#/components/schemas/ImageQueryTypeEnum' readOnly: true description: The type of this object. created_at: @@ -671,51 +664,100 @@ components: description: Which detector was used on this image query? result_type: allOf: - - $ref: "#/components/schemas/ResultTypeEnum" + - $ref: '#/components/schemas/ResultTypeEnum' readOnly: true description: What type of result are we returning? result: - allOf: - - $ref: "#/components/schemas/ClassificationResult" + additionalProperties: {} + nullable: true readOnly: true - metadata: - type: object + description: The result of the image query. + oneOf: + - $ref: '#/components/schemas/BinaryClassificationResult' + - $ref: '#/components/schemas/CountingResult' + patience_time: + type: number + format: double readOnly: true - nullable: true - description: - A dictionary of custom key/value metadata to associate with the image - query (limited to 1KB). + description: How long to wait for a confident response. + confidence_threshold: + type: number + format: double + maximum: 1 + minimum: 0.5 + description: Min confidence needed to accept the response of the image query. required: - - created_at - - detector_id - - id - - query - - result_type - - type + - created_at + - detector_id + - id + - metadata + - patience_time + - query + - result + - result_type + - type x-internal: true ImageQueryTypeEnum: enum: - - image_query + - image_query type: string - PaginatedRuleList: + ModeEnum: + enum: + - BINARY + - COUNT + type: string + description: |- + * `BINARY` - BINARY + * `COUNT` - COUNT + Note: + type: object + properties: + detector_id: + type: string + readOnly: true + content: + type: string + description: Text content of the note. + required: + - content + - detector_id + NoteRequest: + type: object + properties: + content: + type: string + minLength: 1 + description: Text content of the note. + required: + - content + PaginatedDetectorList: type: object + required: + - count + - results properties: count: type: integer + example: 123 next: type: string nullable: true format: uri + example: http://api.example.org/accounts/?page=4 previous: type: string nullable: true format: uri + example: http://api.example.org/accounts/?page=2 results: type: array items: - $ref: "#/components/schemas/Rule" - PaginatedDetectorList: + $ref: '#/components/schemas/Detector' + PaginatedImageQueryList: type: object + required: + - count + - results properties: count: type: integer @@ -733,9 +775,12 @@ components: results: type: array items: - $ref: "#/components/schemas/Detector" - PaginatedImageQueryList: + $ref: '#/components/schemas/ImageQuery' + PaginatedRuleList: type: object + required: + - count + - results properties: count: type: integer @@ -753,22 +798,144 @@ components: results: type: array items: - $ref: "#/components/schemas/ImageQuery" + $ref: '#/components/schemas/Rule' ResultTypeEnum: enum: - - binary_classification + - binary_classification + - counting type: string + Rule: + type: object + properties: + id: + type: integer + readOnly: true + detector_id: + type: string + readOnly: true + detector_name: + type: string + readOnly: true + name: + type: string + maxLength: 44 + enabled: + type: boolean + default: true + snooze_time_enabled: + type: boolean + default: false + snooze_time_value: + type: integer + minimum: 0 + default: 0 + snooze_time_unit: + allOf: + - $ref: '#/components/schemas/SnoozeTimeUnitEnum' + default: DAYS + human_review_required: + type: boolean + default: false + condition: + $ref: '#/components/schemas/Condition' + action: + $ref: '#/components/schemas/Action' + required: + - action + - condition + - detector_id + - detector_name + - id + - name + RuleRequest: + type: object + properties: + name: + type: string + minLength: 1 + maxLength: 44 + enabled: + type: boolean + default: true + snooze_time_enabled: + type: boolean + default: false + snooze_time_value: + type: integer + minimum: 0 + default: 0 + snooze_time_unit: + allOf: + - $ref: '#/components/schemas/SnoozeTimeUnitEnum' + default: DAYS + human_review_required: + type: boolean + default: false + condition: + $ref: '#/components/schemas/ConditionRequest' + action: + $ref: '#/components/schemas/ActionRequest' + required: + - action + - condition + - name + SnoozeTimeUnitEnum: + enum: + - DAYS + - HOURS + - MINUTES + - SECONDS + type: string + description: |- + * `DAYS` - DAYS + * `HOURS` - HOURS + * `MINUTES` - MINUTES + * `SECONDS` - SECONDS + VerbEnum: + enum: + - ANSWERED_CONSECUTIVELY + - ANSWERED_WITHIN_TIME + - CHANGED_TO + - NO_CHANGE + - NO_QUERIES + type: string + description: |- + * `ANSWERED_CONSECUTIVELY` - ANSWERED_CONSECUTIVELY + * `ANSWERED_WITHIN_TIME` - ANSWERED_WITHIN_TIME + * `CHANGED_TO` - CHANGED_TO + * `NO_CHANGE` - NO_CHANGE + * `NO_QUERIES` - NO_QUERIES + BinaryClassificationResult: + type: object + properties: + confidence: + type: float + label: + type: string + required: + - confidence + - label + CountingResult: + type: object + properties: + confidence: + type: float + value: + type: integer + required: + - confidence + - value securitySchemes: ApiToken: name: x-api-token type: apiKey in: header servers: - - url: https://api.groundlight.ai/device-api - description: Prod - - url: https://api.integ.groundlight.ai/device-api - description: Integ - - url: https://device.positronix.ai/device-api - description: Device Prod - - url: https://device.integ.positronix.ai/device-api - description: Device Integ +- url: https://api.groundlight.ai/device-api + description: Prod +- url: https://api.integ.groundlight.ai/device-api + description: Integ +- url: https://device.positronix.ai/device-api + description: Device Prod +- url: https://device.integ.positronix.ai/device-api + description: Device Integ diff --git a/src/groundlight/client.py b/src/groundlight/client.py index ffc6210e..945fca64 100644 --- a/src/groundlight/client.py +++ b/src/groundlight/client.py @@ -11,7 +11,7 @@ from groundlight_openapi_client.api.image_queries_api import ImageQueriesApi from groundlight_openapi_client.api.user_api import UserApi from groundlight_openapi_client.exceptions import NotFoundException, UnauthorizedException -from groundlight_openapi_client.model.detector_creation_input import DetectorCreationInput +from groundlight_openapi_client.model.detector_creation_input_request import DetectorCreationInputRequest from model import ( Detector, ImageQuery, @@ -266,13 +266,15 @@ def create_detector( # noqa: PLR0913 :return: Detector """ - detector_creation_input = DetectorCreationInput(name=name, query=query) - if confidence_threshold is not None: - detector_creation_input.confidence_threshold = confidence_threshold - if pipeline_config is not None: - detector_creation_input.pipeline_config = pipeline_config + detector_creation_input = DetectorCreationInputRequest( + name=name, + query=query, + pipeline_config=pipeline_config, + ) if metadata is not None: detector_creation_input.metadata = str(url_encode_dict(metadata, name="metadata", size_limit_bytes=1024)) + if confidence_threshold: + detector_creation_input.confidence_threshold = confidence_threshold obj = self.detectors_api.create_detector(detector_creation_input, _request_timeout=DEFAULT_REQUEST_TIMEOUT) return Detector.parse_obj(obj.to_dict()) diff --git a/src/groundlight/experimental_api.py b/src/groundlight/experimental_api.py index 8a8132e2..8b5c21ee 100644 --- a/src/groundlight/experimental_api.py +++ b/src/groundlight/experimental_api.py @@ -9,14 +9,17 @@ import json from typing import Any, Dict, Union -from groundlight_openapi_client.api.images_api import ImagesApi +from groundlight_openapi_client.api.actions_api import ActionsApi +from groundlight_openapi_client.api.image_queries_api import ImageQueriesApi from groundlight_openapi_client.api.notes_api import NotesApi -from groundlight_openapi_client.api.rules_api import RulesApi from groundlight_openapi_client.model.action import Action -from groundlight_openapi_client.model.condition import Condition -from groundlight_openapi_client.model.note_creation_input import NoteCreationInput -from groundlight_openapi_client.model.rule_creation_input import RuleCreationInput -from model import Channel, Detector, PaginatedRuleList, Rule, Verb +from groundlight_openapi_client.model.action_request import ActionRequest +from groundlight_openapi_client.model.channel_enum import ChannelEnum +from groundlight_openapi_client.model.condition_request import ConditionRequest +from groundlight_openapi_client.model.note_request import NoteRequest +from groundlight_openapi_client.model.rule_request import RuleRequest +from groundlight_openapi_client.model.verb_enum import VerbEnum +from model import Detector, PaginatedRuleList, Rule from .client import Groundlight @@ -24,8 +27,8 @@ class ExperimentalApi(Groundlight): def __init__(self, endpoint: Union[str, None] = None, api_token: Union[str, None] = None): super().__init__(endpoint=endpoint, api_token=api_token) - self.rules_api = RulesApi(self.api_client) - self.images_api = ImagesApi(self.api_client) + self.actions_api = ActionsApi(self.api_client) + self.images_api = ImageQueriesApi(self.api_client) self.notes_api = NotesApi(self.api_client) ITEMS_PER_PAGE = 100 @@ -34,10 +37,10 @@ def create_rule( # pylint: disable=too-many-locals # noqa: PLR0913 self, detector: Union[str, Detector], rule_name: str, - channel: Union[str, Channel], + channel: Union[str, ChannelEnum], recipient: str, *, - alert_on: Union[str, Verb] = "CHANGED_TO", + alert_on: Union[str, VerbEnum] = "CHANGED_TO", enabled: bool = True, include_image: bool = False, condition_parameters: Union[str, dict, None] = None, @@ -68,19 +71,19 @@ def create_rule( # pylint: disable=too-many-locals # noqa: PLR0913 if condition_parameters is None: condition_parameters = {} if isinstance(alert_on, str): - alert_on = Verb(alert_on.upper()) + alert_on = VerbEnum(alert_on.upper()) if isinstance(channel, str): - channel = Channel(channel.upper()) + channel = ChannelEnum(channel.upper()) if isinstance(condition_parameters, str): condition_parameters = json.loads(condition_parameters) # type: ignore - action = Action( - channel=channel.value, # type: ignore + action = ActionRequest( + channel=channel, # type: ignore recipient=recipient, include_image=include_image, ) - condition = Condition(verb=alert_on.value, parameters=condition_parameters) # type: ignore + condition = ConditionRequest(verb=alert_on, parameters=condition_parameters) # type: ignore det_id = detector.id if isinstance(detector, Detector) else detector - rule_input = RuleCreationInput( + rule_input = RuleRequest( detector_id=det_id, name=rule_name, enabled=enabled, @@ -90,7 +93,7 @@ def create_rule( # pylint: disable=too-many-locals # noqa: PLR0913 snooze_time_value=snooze_time_value, snooze_time_unit=snooze_time_unit, ) - return Rule.model_validate(self.rules_api.create_rule(det_id, rule_input).to_dict()) + return Rule.model_validate(self.actions_api.create_rule(det_id, rule_input).to_dict()) def get_rule(self, action_id: int) -> Action: """ @@ -99,7 +102,7 @@ def get_rule(self, action_id: int) -> Action: :param action_id: the id of the action to get :return: the action with the given id """ - return Rule.model_validate(self.rules_api.get_rule(action_id).to_dict()) + return Rule.model_validate(self.actions_api.get_rule(action_id).to_dict()) def delete_rule(self, action_id: int) -> None: """ @@ -107,7 +110,7 @@ def delete_rule(self, action_id: int) -> None: :param action_id: the id of the action to delete """ - self.rules_api.delete_rule(action_id) + self.actions_api.delete_rule(action_id) def list_rules(self, page=1, page_size=10) -> PaginatedRuleList: """ @@ -115,7 +118,7 @@ def list_rules(self, page=1, page_size=10) -> PaginatedRuleList: :return: a list of all rules """ - obj = self.rules_api.list_rules(page=page, page_size=page_size) + obj = self.actions_api.list_rules(page=page, page_size=page_size) return PaginatedRuleList.parse_obj(obj.to_dict()) def delete_all_rules(self, detector: Union[None, str, Detector] = None) -> int: @@ -162,7 +165,7 @@ def get_notes(self, detector: Union[str, Detector]) -> Dict[str, Any]: det_id = detector.id if isinstance(detector, Detector) else detector return self.notes_api.get_notes(det_id) - def create_note(self, detector: Union[str, Detector], note: Union[str, NoteCreationInput]) -> None: + def create_note(self, detector: Union[str, Detector], note: Union[str, NoteRequest]) -> None: """ Adds a note to a given detector @@ -170,5 +173,5 @@ def create_note(self, detector: Union[str, Detector], note: Union[str, NoteCreat """ det_id = detector.id if isinstance(detector, Detector) else detector if isinstance(note, str): - note = NoteCreationInput(content=note) + note = NoteRequest(content=note) self.notes_api.create_note(det_id, note) diff --git a/test/integration/test_groundlight.py b/test/integration/test_groundlight.py index 15efab4c..b494446e 100644 --- a/test/integration/test_groundlight.py +++ b/test/integration/test_groundlight.py @@ -15,7 +15,14 @@ from groundlight.internalapi import InternalApiError, NotFoundError, iq_is_answered from groundlight.optional_imports import * from groundlight.status_codes import is_user_error -from model import ClassificationResult, Detector, ImageQuery, PaginatedDetectorList, PaginatedImageQueryList +from model import ( + BinaryClassificationResult, + CountingResult, + Detector, + ImageQuery, + PaginatedDetectorList, + PaginatedImageQueryList, +) DEFAULT_CONFIDENCE_THRESHOLD = 0.9 IQ_IMPROVEMENT_THRESHOLD = 0.75 @@ -23,7 +30,7 @@ def is_valid_display_result(result: Any) -> bool: """Is the image query result valid to display to the user?.""" - if not isinstance(result, ClassificationResult): + if not isinstance(result, BinaryClassificationResult) and not isinstance(result, CountingResult): return False if not is_valid_display_label(result.label): return False diff --git a/test/unit/test_notes.py b/test/unit/test_notes.py index 0b5fa04b..e0e5dc48 100644 --- a/test/unit/test_notes.py +++ b/test/unit/test_notes.py @@ -8,7 +8,7 @@ def test_notes(gl: ExperimentalApi): det = gl.create_detector(name, "test_query") gl.create_note(det, "test_note") # test runner could be either a customer or GL - notes = (gl.get_notes(det).get("CUSTOMER") or []) + (gl.get_notes(det).get("gl") or []) + notes = (gl.get_notes(det).get("customer") or []) + (gl.get_notes(det).get("gl") or []) found_note = False for i in range(len(notes)): if notes[i].content == "test_note":