From ab8dbd425a1d4789041ee93484c2b48fcefc5400 Mon Sep 17 00:00:00 2001 From: Brandon <132288221+brandon-groundlight@users.noreply.github.com> Date: Mon, 12 Aug 2024 17:30:29 -0700 Subject: [PATCH] Bbox support (#230) * Added support for sending and getting bounding box labels --------- Co-authored-by: Auto-format Bot --- feature_schedule.txt | 1 + generated/.openapi-generator/FILES | 18 + generated/README.md | 11 + generated/docs/AnnotationsRequestedEnum.md | 12 + generated/docs/BBoxGeometry.md | 18 + generated/docs/BBoxGeometryRequest.md | 16 + generated/docs/BlankEnum.md | 11 + generated/docs/DetectorsApi.md | 155 +++++++ generated/docs/ImageQuery.md | 1 + generated/docs/LabelValue.md | 18 + generated/docs/LabelValueRequest.md | 14 + generated/docs/LabelsApi.md | 101 ++++ generated/docs/NullEnum.md | 11 + generated/docs/ROI.md | 15 + generated/docs/ROIRequest.md | 14 + generated/docs/ReviewReasonEnum.md | 12 + generated/docs/SourceEnum.md | 12 + .../api/detectors_api.py | 177 +++++++ .../api/{images_api.py => labels_api.py} | 50 +- .../api/rules_api.py | 435 ------------------ .../apis/__init__.py | 1 + .../model/annotations_requested_enum.py | 283 ++++++++++++ .../model/b_box_geometry.py | 302 ++++++++++++ ...ion_input.py => b_box_geometry_request.py} | 44 +- .../model/image_query.py | 11 + ...ector_creation_input.py => label_value.py} | 130 +++--- .../model/label_value_request.py | 302 ++++++++++++ .../model/{user.py => roi.py} | 48 +- ...lassification_result.py => roi_request.py} | 44 +- .../model/rule_base.py | 317 ------------- .../model/rule_creation_input.py | 365 --------------- .../model/source_enum.py | 288 ++++++++++++ .../models/__init__.py | 8 + generated/model.py | 94 +++- generated/test/test_action.py | 7 +- .../test/test_annotations_requested_enum.py | 35 ++ ...ation_result.py => test_b_box_geometry.py} | 12 +- generated/test/test_b_box_geometry_request.py | 35 ++ generated/test/test_condition.py | 7 +- generated/test/test_detector.py | 4 +- .../test/test_detector_creation_input.py | 35 -- generated/test/test_detector_type_enum.py | 4 +- generated/test/test_detectors_api.py | 16 +- generated/test/test_image_queries_api.py | 8 +- generated/test/test_image_query.py | 6 + generated/test/test_image_query_type_enum.py | 4 +- generated/test/test_images_api.py | 32 -- generated/test/test_inline_response200.py | 7 +- generated/test/test_label_value.py | 42 ++ ...es_list.py => test_label_value_request.py} | 16 +- generated/test/test_labels_api.py | 32 ++ generated/test/test_note.py | 4 +- generated/test/test_note_creation_input.py | 35 -- generated/test/test_notes_api.py | 4 +- .../test/test_paginated_detector_list.py | 4 +- .../test/test_paginated_image_query_list.py | 4 +- generated/test/test_paginated_rule_list.py | 4 +- generated/test/test_result_type_enum.py | 4 +- ...est_paginated_note_list.py => test_roi.py} | 16 +- generated/test/test_roi_request.py | 38 ++ generated/test/test_rule.py | 8 +- generated/test/test_rule_base.py | 40 -- generated/test/test_rule_creation_input.py | 42 -- generated/test/test_rules_api.py | 44 -- generated/test/test_source_enum.py | 35 ++ generated/test/test_user.py | 35 -- generated/test/test_user_api.py | 4 +- spec/public-api.yaml | 262 +++++++++++ src/groundlight/cli.py | 27 +- src/groundlight/client.py | 18 +- src/groundlight/experimental_api.py | 71 ++- src/groundlight/internalapi.py | 2 + test/unit/conftest.py | 34 +- test/unit/test_actions.py | 49 +- test/unit/test_experimental.py | 38 +- test/unit/test_http_retries.py | 4 +- test/unit/test_images.py | 10 +- test/unit/test_notes.py | 10 +- 78 files changed, 2874 insertions(+), 1613 deletions(-) create mode 100644 feature_schedule.txt create mode 100644 generated/docs/AnnotationsRequestedEnum.md create mode 100644 generated/docs/BBoxGeometry.md create mode 100644 generated/docs/BBoxGeometryRequest.md create mode 100644 generated/docs/BlankEnum.md create mode 100644 generated/docs/LabelValue.md create mode 100644 generated/docs/LabelValueRequest.md create mode 100644 generated/docs/LabelsApi.md create mode 100644 generated/docs/NullEnum.md create mode 100644 generated/docs/ROI.md create mode 100644 generated/docs/ROIRequest.md create mode 100644 generated/docs/ReviewReasonEnum.md create mode 100644 generated/docs/SourceEnum.md rename generated/groundlight_openapi_client/api/{images_api.py => labels_api.py} (72%) delete mode 100644 generated/groundlight_openapi_client/api/rules_api.py create mode 100644 generated/groundlight_openapi_client/model/annotations_requested_enum.py create mode 100644 generated/groundlight_openapi_client/model/b_box_geometry.py rename generated/groundlight_openapi_client/model/{note_creation_input.py => b_box_geometry_request.py} (89%) rename generated/groundlight_openapi_client/model/{detector_creation_input.py => label_value.py} (77%) create mode 100644 generated/groundlight_openapi_client/model/label_value_request.py rename generated/groundlight_openapi_client/model/{user.py => roi.py} (88%) rename generated/groundlight_openapi_client/model/{classification_result.py => roi_request.py} (90%) delete mode 100644 generated/groundlight_openapi_client/model/rule_base.py delete mode 100644 generated/groundlight_openapi_client/model/rule_creation_input.py create mode 100644 generated/groundlight_openapi_client/model/source_enum.py create mode 100644 generated/test/test_annotations_requested_enum.py rename generated/test/{test_classification_result.py => test_b_box_geometry.py} (66%) create mode 100644 generated/test/test_b_box_geometry_request.py delete mode 100644 generated/test/test_detector_creation_input.py delete mode 100644 generated/test/test_images_api.py create mode 100644 generated/test/test_label_value.py rename generated/test/{test_paginated_all_notes_list.py => test_label_value_request.py} (59%) create mode 100644 generated/test/test_labels_api.py delete mode 100644 generated/test/test_note_creation_input.py rename generated/test/{test_paginated_note_list.py => test_roi.py} (62%) create mode 100644 generated/test/test_roi_request.py delete mode 100644 generated/test/test_rule_base.py delete mode 100644 generated/test/test_rule_creation_input.py delete mode 100644 generated/test/test_rules_api.py create mode 100644 generated/test/test_source_enum.py delete mode 100644 generated/test/test_user.py diff --git a/feature_schedule.txt b/feature_schedule.txt new file mode 100644 index 00000000..85391a0a --- /dev/null +++ b/feature_schedule.txt @@ -0,0 +1 @@ +0.17.3 : ImageQuery returns image_query_id instead of posicheck_id \ No newline at end of file diff --git a/generated/.openapi-generator/FILES b/generated/.openapi-generator/FILES index b0d55d2c..0db4410e 100644 --- a/generated/.openapi-generator/FILES +++ b/generated/.openapi-generator/FILES @@ -6,6 +6,9 @@ docs/Action.md docs/ActionRequest.md docs/ActionsApi.md docs/AllNotes.md +docs/AnnotationsRequestedEnum.md +docs/BBoxGeometry.md +docs/BBoxGeometryRequest.md docs/BinaryClassificationResult.md docs/ChannelEnum.md docs/Condition.md @@ -22,6 +25,9 @@ docs/ImageQueriesApi.md docs/ImageQuery.md docs/ImageQueryTypeEnum.md docs/InlineResponse200.md +docs/LabelValue.md +docs/LabelValueRequest.md +docs/LabelsApi.md docs/ModeEnum.md docs/Note.md docs/NoteRequest.md @@ -29,10 +35,13 @@ docs/NotesApi.md docs/PaginatedDetectorList.md docs/PaginatedImageQueryList.md docs/PaginatedRuleList.md +docs/ROI.md +docs/ROIRequest.md docs/ResultTypeEnum.md docs/Rule.md docs/RuleRequest.md docs/SnoozeTimeUnitEnum.md +docs/SourceEnum.md docs/UserApi.md docs/VerbEnum.md git_push.sh @@ -42,6 +51,7 @@ groundlight_openapi_client/api/actions_api.py groundlight_openapi_client/api/detector_groups_api.py groundlight_openapi_client/api/detectors_api.py groundlight_openapi_client/api/image_queries_api.py +groundlight_openapi_client/api/labels_api.py groundlight_openapi_client/api/notes_api.py groundlight_openapi_client/api/user_api.py groundlight_openapi_client/api_client.py @@ -52,6 +62,9 @@ groundlight_openapi_client/model/__init__.py groundlight_openapi_client/model/action.py groundlight_openapi_client/model/action_request.py groundlight_openapi_client/model/all_notes.py +groundlight_openapi_client/model/annotations_requested_enum.py +groundlight_openapi_client/model/b_box_geometry.py +groundlight_openapi_client/model/b_box_geometry_request.py groundlight_openapi_client/model/binary_classification_result.py groundlight_openapi_client/model/channel_enum.py groundlight_openapi_client/model/condition.py @@ -65,6 +78,8 @@ groundlight_openapi_client/model/detector_type_enum.py groundlight_openapi_client/model/image_query.py groundlight_openapi_client/model/image_query_type_enum.py groundlight_openapi_client/model/inline_response200.py +groundlight_openapi_client/model/label_value.py +groundlight_openapi_client/model/label_value_request.py groundlight_openapi_client/model/mode_enum.py groundlight_openapi_client/model/note.py groundlight_openapi_client/model/note_request.py @@ -72,9 +87,12 @@ groundlight_openapi_client/model/paginated_detector_list.py groundlight_openapi_client/model/paginated_image_query_list.py groundlight_openapi_client/model/paginated_rule_list.py groundlight_openapi_client/model/result_type_enum.py +groundlight_openapi_client/model/roi.py +groundlight_openapi_client/model/roi_request.py groundlight_openapi_client/model/rule.py groundlight_openapi_client/model/rule_request.py groundlight_openapi_client/model/snooze_time_unit_enum.py +groundlight_openapi_client/model/source_enum.py groundlight_openapi_client/model/verb_enum.py groundlight_openapi_client/model_utils.py groundlight_openapi_client/models/__init__.py diff --git a/generated/README.md b/generated/README.md index 7967ef7e..60492381 100644 --- a/generated/README.md +++ b/generated/README.md @@ -117,13 +117,16 @@ Class | Method | HTTP request | Description *DetectorGroupsApi* | [**create_detector_group**](docs/DetectorGroupsApi.md#create_detector_group) | **POST** /v1/detector-groups | *DetectorGroupsApi* | [**get_detector_groups**](docs/DetectorGroupsApi.md#get_detector_groups) | **GET** /v1/detector-groups | *DetectorsApi* | [**create_detector**](docs/DetectorsApi.md#create_detector) | **POST** /v1/detectors | +*DetectorsApi* | [**create_detector_group2**](docs/DetectorsApi.md#create_detector_group2) | **POST** /v1/detectors/detector-groups | *DetectorsApi* | [**delete_detector**](docs/DetectorsApi.md#delete_detector) | **DELETE** /v1/detectors/{id} | *DetectorsApi* | [**get_detector**](docs/DetectorsApi.md#get_detector) | **GET** /v1/detectors/{id} | +*DetectorsApi* | [**get_detector_groups2**](docs/DetectorsApi.md#get_detector_groups2) | **GET** /v1/detectors/detector-groups | *DetectorsApi* | [**list_detectors**](docs/DetectorsApi.md#list_detectors) | **GET** /v1/detectors | *ImageQueriesApi* | [**get_image**](docs/ImageQueriesApi.md#get_image) | **GET** /v1/image-queries/{id}/image | *ImageQueriesApi* | [**get_image_query**](docs/ImageQueriesApi.md#get_image_query) | **GET** /v1/image-queries/{id} | *ImageQueriesApi* | [**list_image_queries**](docs/ImageQueriesApi.md#list_image_queries) | **GET** /v1/image-queries | *ImageQueriesApi* | [**submit_image_query**](docs/ImageQueriesApi.md#submit_image_query) | **POST** /v1/image-queries | +*LabelsApi* | [**create_label**](docs/LabelsApi.md#create_label) | **POST** /v1/labels | *NotesApi* | [**create_note**](docs/NotesApi.md#create_note) | **POST** /v1/notes | *NotesApi* | [**get_notes**](docs/NotesApi.md#get_notes) | **GET** /v1/notes | *UserApi* | [**who_am_i**](docs/UserApi.md#who_am_i) | **GET** /v1/me | @@ -134,6 +137,9 @@ Class | Method | HTTP request | Description - [Action](docs/Action.md) - [ActionRequest](docs/ActionRequest.md) - [AllNotes](docs/AllNotes.md) + - [AnnotationsRequestedEnum](docs/AnnotationsRequestedEnum.md) + - [BBoxGeometry](docs/BBoxGeometry.md) + - [BBoxGeometryRequest](docs/BBoxGeometryRequest.md) - [BinaryClassificationResult](docs/BinaryClassificationResult.md) - [ChannelEnum](docs/ChannelEnum.md) - [Condition](docs/Condition.md) @@ -147,16 +153,21 @@ Class | Method | HTTP request | Description - [ImageQuery](docs/ImageQuery.md) - [ImageQueryTypeEnum](docs/ImageQueryTypeEnum.md) - [InlineResponse200](docs/InlineResponse200.md) + - [LabelValue](docs/LabelValue.md) + - [LabelValueRequest](docs/LabelValueRequest.md) - [ModeEnum](docs/ModeEnum.md) - [Note](docs/Note.md) - [NoteRequest](docs/NoteRequest.md) - [PaginatedDetectorList](docs/PaginatedDetectorList.md) - [PaginatedImageQueryList](docs/PaginatedImageQueryList.md) - [PaginatedRuleList](docs/PaginatedRuleList.md) + - [ROI](docs/ROI.md) + - [ROIRequest](docs/ROIRequest.md) - [ResultTypeEnum](docs/ResultTypeEnum.md) - [Rule](docs/Rule.md) - [RuleRequest](docs/RuleRequest.md) - [SnoozeTimeUnitEnum](docs/SnoozeTimeUnitEnum.md) + - [SourceEnum](docs/SourceEnum.md) - [VerbEnum](docs/VerbEnum.md) diff --git a/generated/docs/AnnotationsRequestedEnum.md b/generated/docs/AnnotationsRequestedEnum.md new file mode 100644 index 00000000..fd366f01 --- /dev/null +++ b/generated/docs/AnnotationsRequestedEnum.md @@ -0,0 +1,12 @@ +# AnnotationsRequestedEnum + +* `BINARY_CLASSIFICATION` - Binary Classification * `BOUNDING_BOXES` - Bounding Boxes + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**value** | **str** | * `BINARY_CLASSIFICATION` - Binary Classification * `BOUNDING_BOXES` - Bounding Boxes | must be one of ["BINARY_CLASSIFICATION", "BOUNDING_BOXES", ] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/BBoxGeometry.md b/generated/docs/BBoxGeometry.md new file mode 100644 index 00000000..432bb48c --- /dev/null +++ b/generated/docs/BBoxGeometry.md @@ -0,0 +1,18 @@ +# BBoxGeometry + +Mixin for serializers to handle data in the StrictBaseModel format + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**left** | **float** | | +**top** | **float** | | +**right** | **float** | | +**bottom** | **float** | | +**x** | **float** | | [readonly] +**y** | **float** | | [readonly] +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/BBoxGeometryRequest.md b/generated/docs/BBoxGeometryRequest.md new file mode 100644 index 00000000..93432d84 --- /dev/null +++ b/generated/docs/BBoxGeometryRequest.md @@ -0,0 +1,16 @@ +# BBoxGeometryRequest + +Mixin for serializers to handle data in the StrictBaseModel format + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**left** | **float** | | +**top** | **float** | | +**right** | **float** | | +**bottom** | **float** | | +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/BlankEnum.md b/generated/docs/BlankEnum.md new file mode 100644 index 00000000..1531f56d --- /dev/null +++ b/generated/docs/BlankEnum.md @@ -0,0 +1,11 @@ +# BlankEnum + + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**value** | **str** | | defaults to "", must be one of ["", ] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/DetectorsApi.md b/generated/docs/DetectorsApi.md index 6a748263..ad39beda 100644 --- a/generated/docs/DetectorsApi.md +++ b/generated/docs/DetectorsApi.md @@ -5,8 +5,10 @@ All URIs are relative to *https://api.groundlight.ai/device-api* Method | HTTP request | Description ------------- | ------------- | ------------- [**create_detector**](DetectorsApi.md#create_detector) | **POST** /v1/detectors | +[**create_detector_group2**](DetectorsApi.md#create_detector_group2) | **POST** /v1/detectors/detector-groups | [**delete_detector**](DetectorsApi.md#delete_detector) | **DELETE** /v1/detectors/{id} | [**get_detector**](DetectorsApi.md#get_detector) | **GET** /v1/detectors/{id} | +[**get_detector_groups2**](DetectorsApi.md#get_detector_groups2) | **GET** /v1/detectors/detector-groups | [**list_detectors**](DetectorsApi.md#list_detectors) | **GET** /v1/detectors | @@ -90,6 +92,86 @@ Name | Type | Description | Notes - **Accept**: application/json +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**201** | | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **create_detector_group2** +> DetectorGroup create_detector_group2(detector_group_request) + + + +Create a new detector group POST data: Required: - name (str) - name of the predictor set + +### Example + +* Api Key Authentication (ApiToken): + +```python +import time +import groundlight_openapi_client +from groundlight_openapi_client.api import detectors_api +from groundlight_openapi_client.model.detector_group_request import DetectorGroupRequest +from groundlight_openapi_client.model.detector_group import DetectorGroup +from pprint import pprint +# Defining the host is optional and defaults to https://api.groundlight.ai/device-api +# See configuration.py for a list of all supported configuration parameters. +configuration = groundlight_openapi_client.Configuration( + host = "https://api.groundlight.ai/device-api" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure API key authorization: ApiToken +configuration.api_key['ApiToken'] = 'YOUR_API_KEY' + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['ApiToken'] = 'Bearer' + +# Enter a context with an instance of the API client +with groundlight_openapi_client.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = detectors_api.DetectorsApi(api_client) + detector_group_request = DetectorGroupRequest( + name="name_example", + ) # DetectorGroupRequest | + + # example passing only required values which don't have defaults set + try: + api_response = api_instance.create_detector_group2(detector_group_request) + pprint(api_response) + except groundlight_openapi_client.ApiException as e: + print("Exception when calling DetectorsApi->create_detector_group2: %s\n" % e) +``` + + +### Parameters + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **detector_group_request** | [**DetectorGroupRequest**](DetectorGroupRequest.md)| | + +### Return type + +[**DetectorGroup**](DetectorGroup.md) + +### Authorization + +[ApiToken](../README.md#ApiToken) + +### HTTP request headers + + - **Content-Type**: application/json, application/x-www-form-urlencoded, multipart/form-data + - **Accept**: application/json + + ### HTTP response details | Status code | Description | Response headers | @@ -242,6 +324,79 @@ Name | Type | Description | Notes - **Accept**: application/json +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**200** | | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + +# **get_detector_groups2** +> [DetectorGroup] get_detector_groups2() + + + +List all detector groups + +### Example + +* Api Key Authentication (ApiToken): + +```python +import time +import groundlight_openapi_client +from groundlight_openapi_client.api import detectors_api +from groundlight_openapi_client.model.detector_group import DetectorGroup +from pprint import pprint +# Defining the host is optional and defaults to https://api.groundlight.ai/device-api +# See configuration.py for a list of all supported configuration parameters. +configuration = groundlight_openapi_client.Configuration( + host = "https://api.groundlight.ai/device-api" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure API key authorization: ApiToken +configuration.api_key['ApiToken'] = 'YOUR_API_KEY' + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['ApiToken'] = 'Bearer' + +# Enter a context with an instance of the API client +with groundlight_openapi_client.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = detectors_api.DetectorsApi(api_client) + + # example, this endpoint has no required or optional parameters + try: + api_response = api_instance.get_detector_groups2() + pprint(api_response) + except groundlight_openapi_client.ApiException as e: + print("Exception when calling DetectorsApi->get_detector_groups2: %s\n" % e) +``` + + +### Parameters +This endpoint does not need any parameter. + +### Return type + +[**[DetectorGroup]**](DetectorGroup.md) + +### Authorization + +[ApiToken](../README.md#ApiToken) + +### HTTP request headers + + - **Content-Type**: Not defined + - **Accept**: application/json + + ### HTTP response details | Status code | Description | Response headers | diff --git a/generated/docs/ImageQuery.md b/generated/docs/ImageQuery.md index 92078b73..e75a620b 100644 --- a/generated/docs/ImageQuery.md +++ b/generated/docs/ImageQuery.md @@ -15,6 +15,7 @@ Name | Type | Description | Notes **result** | **bool, date, datetime, dict, float, int, list, str, none_type** | The result of the image query. | [readonly] **patience_time** | **float** | How long to wait for a confident response. | [readonly] **confidence_threshold** | **float** | Min confidence needed to accept the response of the image query. | [readonly] +**rois** | [**[ROI], none_type**](ROI.md) | An array of regions of interest (bounding boxes) collected on image | [readonly] **any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/generated/docs/LabelValue.md b/generated/docs/LabelValue.md new file mode 100644 index 00000000..a96d9ab6 --- /dev/null +++ b/generated/docs/LabelValue.md @@ -0,0 +1,18 @@ +# LabelValue + + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**confidence** | **float, none_type** | | [readonly] +**class_name** | **str, none_type** | A human-readable class name for this label (e.g. YES/NO) | [readonly] +**annotations_requested** | **[bool, date, datetime, dict, float, int, list, str, none_type]** | | [readonly] +**created_at** | **datetime** | | [readonly] +**detector_id** | **int, none_type** | | [readonly] +**source** | **bool, date, datetime, dict, float, int, list, str, none_type** | | [readonly] +**rois** | [**[ROI], none_type**](ROI.md) | | [optional] +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/LabelValueRequest.md b/generated/docs/LabelValueRequest.md new file mode 100644 index 00000000..a6934ab5 --- /dev/null +++ b/generated/docs/LabelValueRequest.md @@ -0,0 +1,14 @@ +# LabelValueRequest + + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**label** | **str** | | +**image_query_id** | **str** | | +**rois** | [**[ROIRequest], none_type**](ROIRequest.md) | | [optional] +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/LabelsApi.md b/generated/docs/LabelsApi.md new file mode 100644 index 00000000..487e3fe4 --- /dev/null +++ b/generated/docs/LabelsApi.md @@ -0,0 +1,101 @@ +# groundlight_openapi_client.LabelsApi + +All URIs are relative to *https://api.groundlight.ai/device-api* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**create_label**](LabelsApi.md#create_label) | **POST** /v1/labels | + + +# **create_label** +> LabelValue create_label(label_value_request) + + + +Create a new LabelValue and attach it to an image query. This will trigger asynchronous fine-tuner model training. + +### Example + +* Api Key Authentication (ApiToken): + +```python +import time +import groundlight_openapi_client +from groundlight_openapi_client.api import labels_api +from groundlight_openapi_client.model.label_value import LabelValue +from groundlight_openapi_client.model.label_value_request import LabelValueRequest +from pprint import pprint +# Defining the host is optional and defaults to https://api.groundlight.ai/device-api +# See configuration.py for a list of all supported configuration parameters. +configuration = groundlight_openapi_client.Configuration( + host = "https://api.groundlight.ai/device-api" +) + +# The client must configure the authentication and authorization parameters +# in accordance with the API server security policy. +# Examples for each auth method are provided below, use the example that +# satisfies your auth use case. + +# Configure API key authorization: ApiToken +configuration.api_key['ApiToken'] = 'YOUR_API_KEY' + +# Uncomment below to setup prefix (e.g. Bearer) for API key, if needed +# configuration.api_key_prefix['ApiToken'] = 'Bearer' + +# Enter a context with an instance of the API client +with groundlight_openapi_client.ApiClient(configuration) as api_client: + # Create an instance of the API class + api_instance = labels_api.LabelsApi(api_client) + label_value_request = LabelValueRequest( + label="label_example", + image_query_id="image_query_id_example", + rois=[ + ROIRequest( + label="label_example", + geometry=BBoxGeometryRequest( + left=3.14, + top=3.14, + right=3.14, + bottom=3.14, + ), + ), + ], + ) # LabelValueRequest | + + # example passing only required values which don't have defaults set + try: + api_response = api_instance.create_label(label_value_request) + pprint(api_response) + except groundlight_openapi_client.ApiException as e: + print("Exception when calling LabelsApi->create_label: %s\n" % e) +``` + + +### Parameters + +Name | Type | Description | Notes +------------- | ------------- | ------------- | ------------- + **label_value_request** | [**LabelValueRequest**](LabelValueRequest.md)| | + +### Return type + +[**LabelValue**](LabelValue.md) + +### Authorization + +[ApiToken](../README.md#ApiToken) + +### HTTP request headers + + - **Content-Type**: application/json, application/x-www-form-urlencoded, multipart/form-data + - **Accept**: application/json + + +### HTTP response details + +| Status code | Description | Response headers | +|-------------|-------------|------------------| +**201** | | - | + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/generated/docs/NullEnum.md b/generated/docs/NullEnum.md new file mode 100644 index 00000000..7cccbef2 --- /dev/null +++ b/generated/docs/NullEnum.md @@ -0,0 +1,11 @@ +# NullEnum + + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**value** | **str** | | must be one of ["null", ] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/ROI.md b/generated/docs/ROI.md new file mode 100644 index 00000000..60da87e6 --- /dev/null +++ b/generated/docs/ROI.md @@ -0,0 +1,15 @@ +# ROI + +Mixin for serializers to handle data in the StrictBaseModel format + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**label** | **str** | The label of the bounding box. | +**score** | **float** | The confidence of the bounding box. | [readonly] +**geometry** | [**BBoxGeometry**](BBoxGeometry.md) | | +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/ROIRequest.md b/generated/docs/ROIRequest.md new file mode 100644 index 00000000..8d675ec3 --- /dev/null +++ b/generated/docs/ROIRequest.md @@ -0,0 +1,14 @@ +# ROIRequest + +Mixin for serializers to handle data in the StrictBaseModel format + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**label** | **str** | The label of the bounding box. | +**geometry** | [**BBoxGeometryRequest**](BBoxGeometryRequest.md) | | +**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/ReviewReasonEnum.md b/generated/docs/ReviewReasonEnum.md new file mode 100644 index 00000000..6c042a26 --- /dev/null +++ b/generated/docs/ReviewReasonEnum.md @@ -0,0 +1,12 @@ +# ReviewReasonEnum + +* `ESCALATION` - ESCALATION * `AUDIT` - AUDIT * `ASSESSMENT` - ASSESSMENT * `CUSTOMER_INITIATED` - CUSTOMER_INITIATED * `CONFIDENT_UNCLEAR` - CONFIDENT_UNCLEAR * `CONFIDENT_UNCLEAR_AUDIT` - CONFIDENT_UNCLEAR_AUDIT * `DISAGREEMENT` - DISAGREEMENT * `DISAGREEMENT_ON_ASSESSMENT` - DISAGREEMENT_ON_ASSESSMENT * `CONFIRM_ACTION_TRIGGER` - CONFIRM_ACTION_TRIGGER + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**value** | **str** | * `ESCALATION` - ESCALATION * `AUDIT` - AUDIT * `ASSESSMENT` - ASSESSMENT * `CUSTOMER_INITIATED` - CUSTOMER_INITIATED * `CONFIDENT_UNCLEAR` - CONFIDENT_UNCLEAR * `CONFIDENT_UNCLEAR_AUDIT` - CONFIDENT_UNCLEAR_AUDIT * `DISAGREEMENT` - DISAGREEMENT * `DISAGREEMENT_ON_ASSESSMENT` - DISAGREEMENT_ON_ASSESSMENT * `CONFIRM_ACTION_TRIGGER` - CONFIRM_ACTION_TRIGGER | must be one of ["ESCALATION", "AUDIT", "ASSESSMENT", "CUSTOMER_INITIATED", "CONFIDENT_UNCLEAR", "CONFIDENT_UNCLEAR_AUDIT", "DISAGREEMENT", "DISAGREEMENT_ON_ASSESSMENT", "CONFIRM_ACTION_TRIGGER", ] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/docs/SourceEnum.md b/generated/docs/SourceEnum.md new file mode 100644 index 00000000..d2b0b9af --- /dev/null +++ b/generated/docs/SourceEnum.md @@ -0,0 +1,12 @@ +# SourceEnum + +* `INITIAL_PLACEHOLDER` - InitialPlaceholder * `CLOUD` - HumanCloud * `CUST` - HumanCustomer * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear + +## Properties +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**value** | **str** | * `INITIAL_PLACEHOLDER` - InitialPlaceholder * `CLOUD` - HumanCloud * `CUST` - HumanCustomer * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear | must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", ] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/generated/groundlight_openapi_client/api/detectors_api.py b/generated/groundlight_openapi_client/api/detectors_api.py index 2d945476..c03b0cdc 100644 --- a/generated/groundlight_openapi_client/api/detectors_api.py +++ b/generated/groundlight_openapi_client/api/detectors_api.py @@ -23,6 +23,8 @@ ) from groundlight_openapi_client.model.detector import Detector from groundlight_openapi_client.model.detector_creation_input_request import DetectorCreationInputRequest +from groundlight_openapi_client.model.detector_group import DetectorGroup +from groundlight_openapi_client.model.detector_group_request import DetectorGroupRequest from groundlight_openapi_client.model.paginated_detector_list import PaginatedDetectorList @@ -72,6 +74,44 @@ def __init__(self, api_client=None): headers_map={"accept": ["application/json"], "content_type": ["application/json"]}, api_client=api_client, ) + self.create_detector_group2_endpoint = _Endpoint( + settings={ + "response_type": (DetectorGroup,), + "auth": ["ApiToken"], + "endpoint_path": "/v1/detectors/detector-groups", + "operation_id": "create_detector_group2", + "http_method": "POST", + "servers": None, + }, + params_map={ + "all": [ + "detector_group_request", + ], + "required": [ + "detector_group_request", + ], + "nullable": [], + "enum": [], + "validation": [], + }, + root_map={ + "validations": {}, + "allowed_values": {}, + "openapi_types": { + "detector_group_request": (DetectorGroupRequest,), + }, + "attribute_map": {}, + "location_map": { + "detector_group_request": "body", + }, + "collection_format_map": {}, + }, + headers_map={ + "accept": ["application/json"], + "content_type": ["application/json", "application/x-www-form-urlencoded", "multipart/form-data"], + }, + api_client=api_client, + ) self.delete_detector_endpoint = _Endpoint( settings={ "response_type": None, @@ -152,6 +192,30 @@ def __init__(self, api_client=None): }, api_client=api_client, ) + self.get_detector_groups2_endpoint = _Endpoint( + settings={ + "response_type": ([DetectorGroup],), + "auth": ["ApiToken"], + "endpoint_path": "/v1/detectors/detector-groups", + "operation_id": "get_detector_groups2", + "http_method": "GET", + "servers": None, + }, + params_map={"all": [], "required": [], "nullable": [], "enum": [], "validation": []}, + root_map={ + "validations": {}, + "allowed_values": {}, + "openapi_types": {}, + "attribute_map": {}, + "location_map": {}, + "collection_format_map": {}, + }, + headers_map={ + "accept": ["application/json"], + "content_type": [], + }, + api_client=api_client, + ) self.list_detectors_endpoint = _Endpoint( settings={ "response_type": (PaginatedDetectorList,), @@ -253,6 +317,64 @@ def create_detector(self, detector_creation_input_request, **kwargs): kwargs["detector_creation_input_request"] = detector_creation_input_request return self.create_detector_endpoint.call_with_http_info(**kwargs) + def create_detector_group2(self, detector_group_request, **kwargs): + """create_detector_group2 # noqa: E501 + + Create a new detector group POST data: Required: - name (str) - name of the predictor set # noqa: E501 + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + + >>> thread = api.create_detector_group2(detector_group_request, async_req=True) + >>> result = thread.get() + + Args: + detector_group_request (DetectorGroupRequest): + + Keyword Args: + _return_http_data_only (bool): response data without head status + code and headers. Default is True. + _preload_content (bool): if False, the urllib3.HTTPResponse object + will be returned without reading/decoding response data. + Default is True. + _request_timeout (int/float/tuple): timeout setting for this request. If + one number provided, it will be total request timeout. It can also + be a pair (tuple) of (connection, read) timeouts. + Default is None. + _check_input_type (bool): specifies if type checking + should be done one the data sent to the server. + Default is True. + _check_return_type (bool): specifies if type checking + should be done one the data received from the server. + Default is True. + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _content_type (str/None): force body content-type. + Default is None and content-type will be predicted by allowed + content-types and body. + _host_index (int/None): specifies the index of the server + that we want to use. + Default is read from the configuration. + async_req (bool): execute request asynchronously + + Returns: + DetectorGroup + If the method is called asynchronously, returns the request + thread. + """ + kwargs["async_req"] = kwargs.get("async_req", False) + kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True) + kwargs["_preload_content"] = kwargs.get("_preload_content", True) + kwargs["_request_timeout"] = kwargs.get("_request_timeout", None) + kwargs["_check_input_type"] = kwargs.get("_check_input_type", True) + kwargs["_check_return_type"] = kwargs.get("_check_return_type", True) + kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False) + kwargs["_content_type"] = kwargs.get("_content_type") + kwargs["_host_index"] = kwargs.get("_host_index") + kwargs["detector_group_request"] = detector_group_request + return self.create_detector_group2_endpoint.call_with_http_info(**kwargs) + def delete_detector(self, id, **kwargs): """delete_detector # noqa: E501 @@ -369,6 +491,61 @@ def get_detector(self, id, **kwargs): kwargs["id"] = id return self.get_detector_endpoint.call_with_http_info(**kwargs) + def get_detector_groups2(self, **kwargs): + """get_detector_groups2 # noqa: E501 + + List all detector groups # noqa: E501 + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + + >>> thread = api.get_detector_groups2(async_req=True) + >>> result = thread.get() + + + Keyword Args: + _return_http_data_only (bool): response data without head status + code and headers. Default is True. + _preload_content (bool): if False, the urllib3.HTTPResponse object + will be returned without reading/decoding response data. + Default is True. + _request_timeout (int/float/tuple): timeout setting for this request. If + one number provided, it will be total request timeout. It can also + be a pair (tuple) of (connection, read) timeouts. + Default is None. + _check_input_type (bool): specifies if type checking + should be done one the data sent to the server. + Default is True. + _check_return_type (bool): specifies if type checking + should be done one the data received from the server. + Default is True. + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _content_type (str/None): force body content-type. + Default is None and content-type will be predicted by allowed + content-types and body. + _host_index (int/None): specifies the index of the server + that we want to use. + Default is read from the configuration. + async_req (bool): execute request asynchronously + + Returns: + [DetectorGroup] + If the method is called asynchronously, returns the request + thread. + """ + kwargs["async_req"] = kwargs.get("async_req", False) + kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True) + kwargs["_preload_content"] = kwargs.get("_preload_content", True) + kwargs["_request_timeout"] = kwargs.get("_request_timeout", None) + kwargs["_check_input_type"] = kwargs.get("_check_input_type", True) + kwargs["_check_return_type"] = kwargs.get("_check_return_type", True) + kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False) + kwargs["_content_type"] = kwargs.get("_content_type") + kwargs["_host_index"] = kwargs.get("_host_index") + return self.get_detector_groups2_endpoint.call_with_http_info(**kwargs) + def list_detectors(self, **kwargs): """list_detectors # noqa: E501 diff --git a/generated/groundlight_openapi_client/api/images_api.py b/generated/groundlight_openapi_client/api/labels_api.py similarity index 72% rename from generated/groundlight_openapi_client/api/images_api.py rename to generated/groundlight_openapi_client/api/labels_api.py index 1dd52ab2..80593cf2 100644 --- a/generated/groundlight_openapi_client/api/images_api.py +++ b/generated/groundlight_openapi_client/api/labels_api.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -21,9 +21,11 @@ none_type, validate_and_convert_types, ) +from groundlight_openapi_client.model.label_value import LabelValue +from groundlight_openapi_client.model.label_value_request import LabelValueRequest -class ImagesApi(object): +class LabelsApi(object): """NOTE: This class is auto generated by OpenAPI Generator Ref: https://openapi-generator.tech @@ -34,21 +36,21 @@ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client - self.get_image_endpoint = _Endpoint( + self.create_label_endpoint = _Endpoint( settings={ - "response_type": (file_type,), + "response_type": (LabelValue,), "auth": ["ApiToken"], - "endpoint_path": "/v1/image-queries/{id}/image", - "operation_id": "get_image", - "http_method": "GET", + "endpoint_path": "/v1/labels", + "operation_id": "create_label", + "http_method": "POST", "servers": None, }, params_map={ "all": [ - "id", + "label_value_request", ], "required": [ - "id", + "label_value_request", ], "nullable": [], "enum": [], @@ -58,35 +60,33 @@ def __init__(self, api_client=None): "validations": {}, "allowed_values": {}, "openapi_types": { - "id": (str,), - }, - "attribute_map": { - "id": "id", + "label_value_request": (LabelValueRequest,), }, + "attribute_map": {}, "location_map": { - "id": "path", + "label_value_request": "body", }, "collection_format_map": {}, }, headers_map={ - "accept": ["image/jpeg"], - "content_type": [], + "accept": ["application/json"], + "content_type": ["application/json", "application/x-www-form-urlencoded", "multipart/form-data"], }, api_client=api_client, ) - def get_image(self, id, **kwargs): - """get_image # noqa: E501 + def create_label(self, label_value_request, **kwargs): + """create_label # noqa: E501 - Retrieve an image by its image query id. # noqa: E501 + Create a new LabelValue and attach it to an image query. This will trigger asynchronous fine-tuner model training. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True - >>> thread = api.get_image(id, async_req=True) + >>> thread = api.create_label(label_value_request, async_req=True) >>> result = thread.get() Args: - id (str): Choose an image by its image query id. + label_value_request (LabelValueRequest): Keyword Args: _return_http_data_only (bool): response data without head status @@ -117,7 +117,7 @@ def get_image(self, id, **kwargs): async_req (bool): execute request asynchronously Returns: - file_type + LabelValue If the method is called asynchronously, returns the request thread. """ @@ -130,5 +130,5 @@ def get_image(self, id, **kwargs): kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False) kwargs["_content_type"] = kwargs.get("_content_type") kwargs["_host_index"] = kwargs.get("_host_index") - kwargs["id"] = id - return self.get_image_endpoint.call_with_http_info(**kwargs) + kwargs["label_value_request"] = label_value_request + return self.create_label_endpoint.call_with_http_info(**kwargs) diff --git a/generated/groundlight_openapi_client/api/rules_api.py b/generated/groundlight_openapi_client/api/rules_api.py deleted file mode 100644 index 6b7b4567..00000000 --- a/generated/groundlight_openapi_client/api/rules_api.py +++ /dev/null @@ -1,435 +0,0 @@ -""" - Groundlight API - - Easy Computer Vision powered by Natural Language # noqa: E501 - - The version of the OpenAPI document: 0.6.0 - Contact: support@groundlight.ai - Generated by: https://openapi-generator.tech -""" - -import re # noqa: F401 -import sys # noqa: F401 - -from groundlight_openapi_client.api_client import ApiClient, Endpoint as _Endpoint -from groundlight_openapi_client.model_utils import ( # noqa: F401 - check_allowed_values, - check_validations, - date, - datetime, - file_type, - none_type, - validate_and_convert_types, -) -from groundlight_openapi_client.model.paginated_rule_list import PaginatedRuleList -from groundlight_openapi_client.model.rule import Rule -from groundlight_openapi_client.model.rule_creation_input import RuleCreationInput - - -class RulesApi(object): - """NOTE: This class is auto generated by OpenAPI Generator - Ref: https://openapi-generator.tech - - Do not edit the class manually. - """ - - def __init__(self, api_client=None): - if api_client is None: - api_client = ApiClient() - self.api_client = api_client - self.create_rule_endpoint = _Endpoint( - settings={ - "response_type": (Rule,), - "auth": ["ApiToken"], - "endpoint_path": "/v1/actions/detector/{detector_id}/rules", - "operation_id": "create_rule", - "http_method": "POST", - "servers": None, - }, - params_map={ - "all": [ - "detector_id", - "rule_creation_input", - ], - "required": [ - "detector_id", - "rule_creation_input", - ], - "nullable": [], - "enum": [], - "validation": [], - }, - root_map={ - "validations": {}, - "allowed_values": {}, - "openapi_types": { - "detector_id": (str,), - "rule_creation_input": (RuleCreationInput,), - }, - "attribute_map": { - "detector_id": "detector_id", - }, - "location_map": { - "detector_id": "path", - "rule_creation_input": "body", - }, - "collection_format_map": {}, - }, - headers_map={"accept": ["application/json"], "content_type": ["application/json"]}, - api_client=api_client, - ) - self.delete_rule_endpoint = _Endpoint( - settings={ - "response_type": None, - "auth": ["ApiToken"], - "endpoint_path": "/v1/actions/rules/{id}", - "operation_id": "delete_rule", - "http_method": "DELETE", - "servers": None, - }, - params_map={ - "all": [ - "id", - ], - "required": [ - "id", - ], - "nullable": [], - "enum": [], - "validation": [], - }, - root_map={ - "validations": {}, - "allowed_values": {}, - "openapi_types": { - "id": (int,), - }, - "attribute_map": { - "id": "id", - }, - "location_map": { - "id": "path", - }, - "collection_format_map": {}, - }, - headers_map={ - "accept": [], - "content_type": [], - }, - api_client=api_client, - ) - self.get_rule_endpoint = _Endpoint( - settings={ - "response_type": (Rule,), - "auth": ["ApiToken"], - "endpoint_path": "/v1/actions/rules/{id}", - "operation_id": "get_rule", - "http_method": "GET", - "servers": None, - }, - params_map={ - "all": [ - "id", - ], - "required": [ - "id", - ], - "nullable": [], - "enum": [], - "validation": [], - }, - root_map={ - "validations": {}, - "allowed_values": {}, - "openapi_types": { - "id": (int,), - }, - "attribute_map": { - "id": "id", - }, - "location_map": { - "id": "path", - }, - "collection_format_map": {}, - }, - headers_map={ - "accept": ["application/json"], - "content_type": [], - }, - api_client=api_client, - ) - self.list_rules_endpoint = _Endpoint( - settings={ - "response_type": (PaginatedRuleList,), - "auth": ["ApiToken"], - "endpoint_path": "/v1/actions/rules", - "operation_id": "list_rules", - "http_method": "GET", - "servers": None, - }, - params_map={ - "all": [ - "page", - "page_size", - ], - "required": [], - "nullable": [], - "enum": [], - "validation": [], - }, - root_map={ - "validations": {}, - "allowed_values": {}, - "openapi_types": { - "page": (int,), - "page_size": (int,), - }, - "attribute_map": { - "page": "page", - "page_size": "page_size", - }, - "location_map": { - "page": "query", - "page_size": "query", - }, - "collection_format_map": {}, - }, - headers_map={ - "accept": ["application/json"], - "content_type": [], - }, - api_client=api_client, - ) - - def create_rule(self, detector_id, rule_creation_input, **kwargs): - """create_rule # noqa: E501 - - Create a new rule for a detector. # noqa: E501 - This method makes a synchronous HTTP request by default. To make an - asynchronous HTTP request, please pass async_req=True - - >>> thread = api.create_rule(detector_id, rule_creation_input, async_req=True) - >>> result = thread.get() - - Args: - detector_id (str): Choose a detector by its ID. - rule_creation_input (RuleCreationInput): - - Keyword Args: - _return_http_data_only (bool): response data without head status - code and headers. Default is True. - _preload_content (bool): if False, the urllib3.HTTPResponse object - will be returned without reading/decoding response data. - Default is True. - _request_timeout (int/float/tuple): timeout setting for this request. If - one number provided, it will be total request timeout. It can also - be a pair (tuple) of (connection, read) timeouts. - Default is None. - _check_input_type (bool): specifies if type checking - should be done one the data sent to the server. - Default is True. - _check_return_type (bool): specifies if type checking - should be done one the data received from the server. - Default is True. - _spec_property_naming (bool): True if the variable names in the input data - are serialized names, as specified in the OpenAPI document. - False if the variable names in the input data - are pythonic names, e.g. snake case (default) - _content_type (str/None): force body content-type. - Default is None and content-type will be predicted by allowed - content-types and body. - _host_index (int/None): specifies the index of the server - that we want to use. - Default is read from the configuration. - async_req (bool): execute request asynchronously - - Returns: - Rule - If the method is called asynchronously, returns the request - thread. - """ - kwargs["async_req"] = kwargs.get("async_req", False) - kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True) - kwargs["_preload_content"] = kwargs.get("_preload_content", True) - kwargs["_request_timeout"] = kwargs.get("_request_timeout", None) - kwargs["_check_input_type"] = kwargs.get("_check_input_type", True) - kwargs["_check_return_type"] = kwargs.get("_check_return_type", True) - kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False) - kwargs["_content_type"] = kwargs.get("_content_type") - kwargs["_host_index"] = kwargs.get("_host_index") - kwargs["detector_id"] = detector_id - kwargs["rule_creation_input"] = rule_creation_input - return self.create_rule_endpoint.call_with_http_info(**kwargs) - - def delete_rule(self, id, **kwargs): - """delete_rule # noqa: E501 - - Delete a rule # noqa: E501 - This method makes a synchronous HTTP request by default. To make an - asynchronous HTTP request, please pass async_req=True - - >>> thread = api.delete_rule(id, async_req=True) - >>> result = thread.get() - - Args: - id (int): Delete a rule by its ID. - - Keyword Args: - _return_http_data_only (bool): response data without head status - code and headers. Default is True. - _preload_content (bool): if False, the urllib3.HTTPResponse object - will be returned without reading/decoding response data. - Default is True. - _request_timeout (int/float/tuple): timeout setting for this request. If - one number provided, it will be total request timeout. It can also - be a pair (tuple) of (connection, read) timeouts. - Default is None. - _check_input_type (bool): specifies if type checking - should be done one the data sent to the server. - Default is True. - _check_return_type (bool): specifies if type checking - should be done one the data received from the server. - Default is True. - _spec_property_naming (bool): True if the variable names in the input data - are serialized names, as specified in the OpenAPI document. - False if the variable names in the input data - are pythonic names, e.g. snake case (default) - _content_type (str/None): force body content-type. - Default is None and content-type will be predicted by allowed - content-types and body. - _host_index (int/None): specifies the index of the server - that we want to use. - Default is read from the configuration. - async_req (bool): execute request asynchronously - - Returns: - None - If the method is called asynchronously, returns the request - thread. - """ - kwargs["async_req"] = kwargs.get("async_req", False) - kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True) - kwargs["_preload_content"] = kwargs.get("_preload_content", True) - kwargs["_request_timeout"] = kwargs.get("_request_timeout", None) - kwargs["_check_input_type"] = kwargs.get("_check_input_type", True) - kwargs["_check_return_type"] = kwargs.get("_check_return_type", True) - kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False) - kwargs["_content_type"] = kwargs.get("_content_type") - kwargs["_host_index"] = kwargs.get("_host_index") - kwargs["id"] = id - return self.delete_rule_endpoint.call_with_http_info(**kwargs) - - def get_rule(self, id, **kwargs): - """get_rule # noqa: E501 - - Retrieve a rule # noqa: E501 - This method makes a synchronous HTTP request by default. To make an - asynchronous HTTP request, please pass async_req=True - - >>> thread = api.get_rule(id, async_req=True) - >>> result = thread.get() - - Args: - id (int): Get a rule by its ID. - - Keyword Args: - _return_http_data_only (bool): response data without head status - code and headers. Default is True. - _preload_content (bool): if False, the urllib3.HTTPResponse object - will be returned without reading/decoding response data. - Default is True. - _request_timeout (int/float/tuple): timeout setting for this request. If - one number provided, it will be total request timeout. It can also - be a pair (tuple) of (connection, read) timeouts. - Default is None. - _check_input_type (bool): specifies if type checking - should be done one the data sent to the server. - Default is True. - _check_return_type (bool): specifies if type checking - should be done one the data received from the server. - Default is True. - _spec_property_naming (bool): True if the variable names in the input data - are serialized names, as specified in the OpenAPI document. - False if the variable names in the input data - are pythonic names, e.g. snake case (default) - _content_type (str/None): force body content-type. - Default is None and content-type will be predicted by allowed - content-types and body. - _host_index (int/None): specifies the index of the server - that we want to use. - Default is read from the configuration. - async_req (bool): execute request asynchronously - - Returns: - Rule - If the method is called asynchronously, returns the request - thread. - """ - kwargs["async_req"] = kwargs.get("async_req", False) - kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True) - kwargs["_preload_content"] = kwargs.get("_preload_content", True) - kwargs["_request_timeout"] = kwargs.get("_request_timeout", None) - kwargs["_check_input_type"] = kwargs.get("_check_input_type", True) - kwargs["_check_return_type"] = kwargs.get("_check_return_type", True) - kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False) - kwargs["_content_type"] = kwargs.get("_content_type") - kwargs["_host_index"] = kwargs.get("_host_index") - kwargs["id"] = id - return self.get_rule_endpoint.call_with_http_info(**kwargs) - - def list_rules(self, **kwargs): - """list_rules # noqa: E501 - - Retrieve a list of rules. # noqa: E501 - This method makes a synchronous HTTP request by default. To make an - asynchronous HTTP request, please pass async_req=True - - >>> thread = api.list_rules(async_req=True) - >>> result = thread.get() - - - Keyword Args: - page (int): A page number within the paginated result set.. [optional] - page_size (int): Number of results to return per page.. [optional] - _return_http_data_only (bool): response data without head status - code and headers. Default is True. - _preload_content (bool): if False, the urllib3.HTTPResponse object - will be returned without reading/decoding response data. - Default is True. - _request_timeout (int/float/tuple): timeout setting for this request. If - one number provided, it will be total request timeout. It can also - be a pair (tuple) of (connection, read) timeouts. - Default is None. - _check_input_type (bool): specifies if type checking - should be done one the data sent to the server. - Default is True. - _check_return_type (bool): specifies if type checking - should be done one the data received from the server. - Default is True. - _spec_property_naming (bool): True if the variable names in the input data - are serialized names, as specified in the OpenAPI document. - False if the variable names in the input data - are pythonic names, e.g. snake case (default) - _content_type (str/None): force body content-type. - Default is None and content-type will be predicted by allowed - content-types and body. - _host_index (int/None): specifies the index of the server - that we want to use. - Default is read from the configuration. - async_req (bool): execute request asynchronously - - Returns: - PaginatedRuleList - If the method is called asynchronously, returns the request - thread. - """ - kwargs["async_req"] = kwargs.get("async_req", False) - kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True) - kwargs["_preload_content"] = kwargs.get("_preload_content", True) - kwargs["_request_timeout"] = kwargs.get("_request_timeout", None) - kwargs["_check_input_type"] = kwargs.get("_check_input_type", True) - kwargs["_check_return_type"] = kwargs.get("_check_return_type", True) - kwargs["_spec_property_naming"] = kwargs.get("_spec_property_naming", False) - kwargs["_content_type"] = kwargs.get("_content_type") - kwargs["_host_index"] = kwargs.get("_host_index") - return self.list_rules_endpoint.call_with_http_info(**kwargs) diff --git a/generated/groundlight_openapi_client/apis/__init__.py b/generated/groundlight_openapi_client/apis/__init__.py index 15f8ec1f..2015077b 100644 --- a/generated/groundlight_openapi_client/apis/__init__.py +++ b/generated/groundlight_openapi_client/apis/__init__.py @@ -17,5 +17,6 @@ from groundlight_openapi_client.api.detector_groups_api import DetectorGroupsApi from groundlight_openapi_client.api.detectors_api import DetectorsApi from groundlight_openapi_client.api.image_queries_api import ImageQueriesApi +from groundlight_openapi_client.api.labels_api import LabelsApi from groundlight_openapi_client.api.notes_api import NotesApi from groundlight_openapi_client.api.user_api import UserApi diff --git a/generated/groundlight_openapi_client/model/annotations_requested_enum.py b/generated/groundlight_openapi_client/model/annotations_requested_enum.py new file mode 100644 index 00000000..dc037938 --- /dev/null +++ b/generated/groundlight_openapi_client/model/annotations_requested_enum.py @@ -0,0 +1,283 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + +import re # noqa: F401 +import sys # noqa: F401 + +from groundlight_openapi_client.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel, +) +from groundlight_openapi_client.exceptions import ApiAttributeError + + +class AnnotationsRequestedEnum(ModelSimple): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = { + ("value",): { + "BINARY_CLASSIFICATION": "BINARY_CLASSIFICATION", + "BOUNDING_BOXES": "BOUNDING_BOXES", + }, + } + + validations = {} + + additional_properties_type = None + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + return { + "value": (str,), + } + + @cached_property + def discriminator(): + return None + + attribute_map = {} + + read_only_vars = set() + + _composed_schemas = None + + required_properties = set([ + "_data_store", + "_check_type", + "_spec_property_naming", + "_path_to_item", + "_configuration", + "_visited_composed_classes", + ]) + + @convert_js_args_to_python_args + def __init__(self, *args, **kwargs): + """AnnotationsRequestedEnum - a model defined in OpenAPI + + Note that value can be passed either in args or in kwargs, but not in both. + + Args: + args[0] (str): * `BINARY_CLASSIFICATION` - Binary Classification * `BOUNDING_BOXES` - Bounding Boxes., must be one of ["BINARY_CLASSIFICATION", "BOUNDING_BOXES", ] # noqa: E501 + + Keyword Args: + value (str): * `BINARY_CLASSIFICATION` - Binary Classification * `BOUNDING_BOXES` - Bounding Boxes., must be one of ["BINARY_CLASSIFICATION", "BOUNDING_BOXES", ] # noqa: E501 + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + # required up here when default value is not given + _path_to_item = kwargs.pop("_path_to_item", ()) + + if "value" in kwargs: + value = kwargs.pop("value") + elif args: + args = list(args) + value = args.pop(0) + else: + raise ApiTypeError( + "value is required, but not passed in args or kwargs and doesn't have default", + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.value = value + if kwargs: + raise ApiTypeError( + "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." + % ( + kwargs, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, *args, **kwargs): + """AnnotationsRequestedEnum - a model defined in OpenAPI + + Note that value can be passed either in args or in kwargs, but not in both. + + Args: + args[0] (str): * `BINARY_CLASSIFICATION` - Binary Classification * `BOUNDING_BOXES` - Bounding Boxes., must be one of ["BINARY_CLASSIFICATION", "BOUNDING_BOXES", ] # noqa: E501 + + Keyword Args: + value (str): * `BINARY_CLASSIFICATION` - Binary Classification * `BOUNDING_BOXES` - Bounding Boxes., must be one of ["BINARY_CLASSIFICATION", "BOUNDING_BOXES", ] # noqa: E501 + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + # required up here when default value is not given + _path_to_item = kwargs.pop("_path_to_item", ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if "value" in kwargs: + value = kwargs.pop("value") + elif args: + args = list(args) + value = args.pop(0) + else: + raise ApiTypeError( + "value is required, but not passed in args or kwargs and doesn't have default", + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.value = value + if kwargs: + raise ApiTypeError( + "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." + % ( + kwargs, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + return self diff --git a/generated/groundlight_openapi_client/model/b_box_geometry.py b/generated/groundlight_openapi_client/model/b_box_geometry.py new file mode 100644 index 00000000..b1c352d9 --- /dev/null +++ b/generated/groundlight_openapi_client/model/b_box_geometry.py @@ -0,0 +1,302 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + +import re # noqa: F401 +import sys # noqa: F401 + +from groundlight_openapi_client.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel, +) +from groundlight_openapi_client.exceptions import ApiAttributeError + + +class BBoxGeometry(ModelNormal): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + attribute_map (dict): The key is attribute name + and the value is json key in definition. + discriminator_value_class_map (dict): A dict to go from the discriminator + variable value to the discriminator class name. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = {} + + validations = {} + + @cached_property + def additional_properties_type(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + """ + return ( + bool, + date, + datetime, + dict, + float, + int, + list, + str, + none_type, + ) # noqa: E501 + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + return { + "left": (float,), # noqa: E501 + "top": (float,), # noqa: E501 + "right": (float,), # noqa: E501 + "bottom": (float,), # noqa: E501 + "x": (float,), # noqa: E501 + "y": (float,), # noqa: E501 + } + + @cached_property + def discriminator(): + return None + + attribute_map = { + "left": "left", # noqa: E501 + "top": "top", # noqa: E501 + "right": "right", # noqa: E501 + "bottom": "bottom", # noqa: E501 + "x": "x", # noqa: E501 + "y": "y", # noqa: E501 + } + + read_only_vars = { + "x", # noqa: E501 + "y", # noqa: E501 + } + + _composed_schemas = {} + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, left, top, right, bottom, x, y, *args, **kwargs): # noqa: E501 + """BBoxGeometry - a model defined in OpenAPI + + Args: + left (float): + top (float): + right (float): + bottom (float): + x (float): + y (float): + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _path_to_item = kwargs.pop("_path_to_item", ()) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + self.left = left + self.top = top + self.right = right + self.bottom = bottom + self.x = x + self.y = y + for var_name, var_value in kwargs.items(): + if ( + var_name not in self.attribute_map + and self._configuration is not None + and self._configuration.discard_unknown_keys + and self.additional_properties_type is None + ): + # discard variable. + continue + setattr(self, var_name, var_value) + return self + + required_properties = set([ + "_data_store", + "_check_type", + "_spec_property_naming", + "_path_to_item", + "_configuration", + "_visited_composed_classes", + ]) + + @convert_js_args_to_python_args + def __init__(self, left, top, right, bottom, *args, **kwargs): # noqa: E501 + """BBoxGeometry - a model defined in OpenAPI + + Args: + left (float): + top (float): + right (float): + bottom (float): + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _path_to_item = kwargs.pop("_path_to_item", ()) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + self.left = left + self.top = top + self.right = right + self.bottom = bottom + for var_name, var_value in kwargs.items(): + if ( + var_name not in self.attribute_map + and self._configuration is not None + and self._configuration.discard_unknown_keys + and self.additional_properties_type is None + ): + # discard variable. + continue + setattr(self, var_name, var_value) + if var_name in self.read_only_vars: + raise ApiAttributeError( + f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " + "class with read only attributes." + ) diff --git a/generated/groundlight_openapi_client/model/note_creation_input.py b/generated/groundlight_openapi_client/model/b_box_geometry_request.py similarity index 89% rename from generated/groundlight_openapi_client/model/note_creation_input.py rename to generated/groundlight_openapi_client/model/b_box_geometry_request.py index 2cdce48d..6e756bd6 100644 --- a/generated/groundlight_openapi_client/model/note_creation_input.py +++ b/generated/groundlight_openapi_client/model/b_box_geometry_request.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -29,7 +29,7 @@ from groundlight_openapi_client.exceptions import ApiAttributeError -class NoteCreationInput(ModelNormal): +class BBoxGeometryRequest(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -88,7 +88,10 @@ def openapi_types(): and the value is attribute type. """ return { - "content": (str,), # noqa: E501 + "left": (float,), # noqa: E501 + "top": (float,), # noqa: E501 + "right": (float,), # noqa: E501 + "bottom": (float,), # noqa: E501 } @cached_property @@ -96,7 +99,10 @@ def discriminator(): return None attribute_map = { - "content": "content", # noqa: E501 + "left": "left", # noqa: E501 + "top": "top", # noqa: E501 + "right": "right", # noqa: E501 + "bottom": "bottom", # noqa: E501 } read_only_vars = {} @@ -105,11 +111,14 @@ def discriminator(): @classmethod @convert_js_args_to_python_args - def _from_openapi_data(cls, content, *args, **kwargs): # noqa: E501 - """NoteCreationInput - a model defined in OpenAPI + def _from_openapi_data(cls, left, top, right, bottom, *args, **kwargs): # noqa: E501 + """BBoxGeometryRequest - a model defined in OpenAPI Args: - content (str): The text inside the note + left (float): + top (float): + right (float): + bottom (float): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -170,7 +179,10 @@ def _from_openapi_data(cls, content, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - self.content = content + self.left = left + self.top = top + self.right = right + self.bottom = bottom for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map @@ -193,11 +205,14 @@ def _from_openapi_data(cls, content, *args, **kwargs): # noqa: E501 ]) @convert_js_args_to_python_args - def __init__(self, content, *args, **kwargs): # noqa: E501 - """NoteCreationInput - a model defined in OpenAPI + def __init__(self, left, top, right, bottom, *args, **kwargs): # noqa: E501 + """BBoxGeometryRequest - a model defined in OpenAPI Args: - content (str): The text inside the note + left (float): + top (float): + right (float): + bottom (float): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -256,7 +271,10 @@ def __init__(self, content, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - self.content = content + self.left = left + self.top = top + self.right = right + self.bottom = bottom for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map diff --git a/generated/groundlight_openapi_client/model/image_query.py b/generated/groundlight_openapi_client/model/image_query.py index cb570040..aa6a5a1b 100644 --- a/generated/groundlight_openapi_client/model/image_query.py +++ b/generated/groundlight_openapi_client/model/image_query.py @@ -34,10 +34,12 @@ def lazy_import(): from groundlight_openapi_client.model.counting_result import CountingResult from groundlight_openapi_client.model.image_query_type_enum import ImageQueryTypeEnum from groundlight_openapi_client.model.result_type_enum import ResultTypeEnum + from groundlight_openapi_client.model.roi import ROI globals()["BinaryClassificationResult"] = BinaryClassificationResult globals()["CountingResult"] = CountingResult globals()["ImageQueryTypeEnum"] = ImageQueryTypeEnum + globals()["ROI"] = ROI globals()["ResultTypeEnum"] = ResultTypeEnum @@ -145,6 +147,10 @@ def openapi_types(): ), # noqa: E501 "patience_time": (float,), # noqa: E501 "confidence_threshold": (float,), # noqa: E501 + "rois": ( + [ROI], + none_type, + ), # noqa: E501 } @cached_property @@ -162,6 +168,7 @@ def discriminator(): "result": "result", # noqa: E501 "patience_time": "patience_time", # noqa: E501 "confidence_threshold": "confidence_threshold", # noqa: E501 + "rois": "rois", # noqa: E501 } read_only_vars = { @@ -175,6 +182,7 @@ def discriminator(): "result", # noqa: E501 "patience_time", # noqa: E501 "confidence_threshold", # noqa: E501 + "rois", # noqa: E501 } _composed_schemas = {} @@ -193,6 +201,7 @@ def _from_openapi_data( result, patience_time, confidence_threshold, + rois, *args, **kwargs, ): # noqa: E501 @@ -209,6 +218,7 @@ def _from_openapi_data( result (bool, date, datetime, dict, float, int, list, str, none_type): The result of the image query. patience_time (float): How long to wait for a confident response. confidence_threshold (float): Min confidence needed to accept the response of the image query. + rois ([ROI], none_type): An array of regions of interest (bounding boxes) collected on image Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -279,6 +289,7 @@ def _from_openapi_data( self.result = result self.patience_time = patience_time self.confidence_threshold = confidence_threshold + self.rois = rois for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map diff --git a/generated/groundlight_openapi_client/model/detector_creation_input.py b/generated/groundlight_openapi_client/model/label_value.py similarity index 77% rename from generated/groundlight_openapi_client/model/detector_creation_input.py rename to generated/groundlight_openapi_client/model/label_value.py index 7e1e6788..b6ac9148 100644 --- a/generated/groundlight_openapi_client/model/detector_creation_input.py +++ b/generated/groundlight_openapi_client/model/label_value.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -29,7 +29,17 @@ from groundlight_openapi_client.exceptions import ApiAttributeError -class DetectorCreationInput(ModelNormal): +def lazy_import(): + from groundlight_openapi_client.model.annotations_requested_enum import AnnotationsRequestedEnum + from groundlight_openapi_client.model.roi import ROI + from groundlight_openapi_client.model.source_enum import SourceEnum + + globals()["AnnotationsRequestedEnum"] = AnnotationsRequestedEnum + globals()["ROI"] = ROI + globals()["SourceEnum"] = SourceEnum + + +class LabelValue(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -55,24 +65,7 @@ class DetectorCreationInput(ModelNormal): allowed_values = {} - validations = { - ("name",): { - "max_length": 200, - }, - ("query",): { - "max_length": 300, - }, - ("group_name",): { - "max_length": 100, - }, - ("confidence_threshold",): { - "inclusive_maximum": 1.0, - "inclusive_minimum": 0.0, - }, - ("pipeline_config",): { - "max_length": 8192, - }, - } + validations = {} @cached_property def additional_properties_type(): @@ -80,6 +73,7 @@ def additional_properties_type(): This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ + lazy_import() return ( bool, date, @@ -104,19 +98,37 @@ def openapi_types(): openapi_types (dict): The key is attribute name and the value is attribute type. """ + lazy_import() return { - "name": (str,), # noqa: E501 - "query": (str,), # noqa: E501 - "group_name": (str,), # noqa: E501 - "confidence_threshold": (float,), # noqa: E501 - "pipeline_config": ( + "confidence": ( + float, + none_type, + ), # noqa: E501 + "class_name": ( str, none_type, ), # noqa: E501 - "metadata": ( + "annotations_requested": ([bool, date, datetime, dict, float, int, list, str, none_type],), # noqa: E501 + "created_at": (datetime,), # noqa: E501 + "detector_id": ( + int, + none_type, + ), # noqa: E501 + "source": ( + bool, + date, + datetime, + dict, + float, + int, + list, str, none_type, ), # noqa: E501 + "rois": ( + [ROI], + none_type, + ), # noqa: E501 } @cached_property @@ -124,26 +136,40 @@ def discriminator(): return None attribute_map = { - "name": "name", # noqa: E501 - "query": "query", # noqa: E501 - "group_name": "group_name", # noqa: E501 - "confidence_threshold": "confidence_threshold", # noqa: E501 - "pipeline_config": "pipeline_config", # noqa: E501 - "metadata": "metadata", # noqa: E501 + "confidence": "confidence", # noqa: E501 + "class_name": "class_name", # noqa: E501 + "annotations_requested": "annotations_requested", # noqa: E501 + "created_at": "created_at", # noqa: E501 + "detector_id": "detector_id", # noqa: E501 + "source": "source", # noqa: E501 + "rois": "rois", # noqa: E501 } - read_only_vars = {} + read_only_vars = { + "confidence", # noqa: E501 + "class_name", # noqa: E501 + "annotations_requested", # noqa: E501 + "created_at", # noqa: E501 + "detector_id", # noqa: E501 + "source", # noqa: E501 + } _composed_schemas = {} @classmethod @convert_js_args_to_python_args - def _from_openapi_data(cls, name, query, *args, **kwargs): # noqa: E501 - """DetectorCreationInput - a model defined in OpenAPI + def _from_openapi_data( + cls, confidence, class_name, annotations_requested, created_at, detector_id, source, *args, **kwargs + ): # noqa: E501 + """LabelValue - a model defined in OpenAPI Args: - name (str): A short, descriptive name for the detector. - query (str): A question about the image. + confidence (float, none_type): + class_name (str, none_type): A human-readable class name for this label (e.g. YES/NO) + annotations_requested ([bool, date, datetime, dict, float, int, list, str, none_type]): + created_at (datetime): + detector_id (int, none_type): + source (bool, date, datetime, dict, float, int, list, str, none_type): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -176,10 +202,7 @@ def _from_openapi_data(cls, name, query, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - group_name (str): Which group should this detector be part of?. [optional] # noqa: E501 - confidence_threshold (float): If the detector's prediction is below this confidence threshold, send the image query for human review.. [optional] if omitted the server will use the default value of 0.9 # noqa: E501 - pipeline_config (str, none_type): (Advanced usage) Configuration to instantiate a specific prediction pipeline.. [optional] # noqa: E501 - metadata (str, none_type): A dictionary of custom key/value metadata to associate with the detector (limited to 1KB). This is encoded as a URL-safe, base64-encoded JSON string.. [optional] # noqa: E501 + rois ([ROI], none_type): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -208,8 +231,12 @@ def _from_openapi_data(cls, name, query, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - self.name = name - self.query = query + self.confidence = confidence + self.class_name = class_name + self.annotations_requested = annotations_requested + self.created_at = created_at + self.detector_id = detector_id + self.source = source for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map @@ -232,12 +259,8 @@ def _from_openapi_data(cls, name, query, *args, **kwargs): # noqa: E501 ]) @convert_js_args_to_python_args - def __init__(self, name, query, *args, **kwargs): # noqa: E501 - """DetectorCreationInput - a model defined in OpenAPI - - Args: - name (str): A short, descriptive name for the detector. - query (str): A question about the image. + def __init__(self, *args, **kwargs): # noqa: E501 + """LabelValue - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -270,10 +293,7 @@ def __init__(self, name, query, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - group_name (str): Which group should this detector be part of?. [optional] # noqa: E501 - confidence_threshold (float): If the detector's prediction is below this confidence threshold, send the image query for human review.. [optional] if omitted the server will use the default value of 0.9 # noqa: E501 - pipeline_config (str, none_type): (Advanced usage) Configuration to instantiate a specific prediction pipeline.. [optional] # noqa: E501 - metadata (str, none_type): A dictionary of custom key/value metadata to associate with the detector (limited to 1KB). This is encoded as a URL-safe, base64-encoded JSON string.. [optional] # noqa: E501 + rois ([ROI], none_type): [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -300,8 +320,6 @@ def __init__(self, name, query, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - self.name = name - self.query = query for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map diff --git a/generated/groundlight_openapi_client/model/label_value_request.py b/generated/groundlight_openapi_client/model/label_value_request.py new file mode 100644 index 00000000..36ecde44 --- /dev/null +++ b/generated/groundlight_openapi_client/model/label_value_request.py @@ -0,0 +1,302 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + +import re # noqa: F401 +import sys # noqa: F401 + +from groundlight_openapi_client.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel, +) +from groundlight_openapi_client.exceptions import ApiAttributeError + + +def lazy_import(): + from groundlight_openapi_client.model.roi_request import ROIRequest + + globals()["ROIRequest"] = ROIRequest + + +class LabelValueRequest(ModelNormal): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + attribute_map (dict): The key is attribute name + and the value is json key in definition. + discriminator_value_class_map (dict): A dict to go from the discriminator + variable value to the discriminator class name. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = {} + + validations = { + ("label",): { + "min_length": 1, + }, + ("image_query_id",): { + "min_length": 1, + }, + } + + @cached_property + def additional_properties_type(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + """ + lazy_import() + return ( + bool, + date, + datetime, + dict, + float, + int, + list, + str, + none_type, + ) # noqa: E501 + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + lazy_import() + return { + "label": (str,), # noqa: E501 + "image_query_id": (str,), # noqa: E501 + "rois": ( + [ROIRequest], + none_type, + ), # noqa: E501 + } + + @cached_property + def discriminator(): + return None + + attribute_map = { + "label": "label", # noqa: E501 + "image_query_id": "image_query_id", # noqa: E501 + "rois": "rois", # noqa: E501 + } + + read_only_vars = {} + + _composed_schemas = {} + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, label, image_query_id, *args, **kwargs): # noqa: E501 + """LabelValueRequest - a model defined in OpenAPI + + Args: + label (str): + image_query_id (str): + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + rois ([ROIRequest], none_type): [optional] # noqa: E501 + """ + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _path_to_item = kwargs.pop("_path_to_item", ()) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + self.label = label + self.image_query_id = image_query_id + for var_name, var_value in kwargs.items(): + if ( + var_name not in self.attribute_map + and self._configuration is not None + and self._configuration.discard_unknown_keys + and self.additional_properties_type is None + ): + # discard variable. + continue + setattr(self, var_name, var_value) + return self + + required_properties = set([ + "_data_store", + "_check_type", + "_spec_property_naming", + "_path_to_item", + "_configuration", + "_visited_composed_classes", + ]) + + @convert_js_args_to_python_args + def __init__(self, label, image_query_id, *args, **kwargs): # noqa: E501 + """LabelValueRequest - a model defined in OpenAPI + + Args: + label (str): + image_query_id (str): + + Keyword Args: + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + rois ([ROIRequest], none_type): [optional] # noqa: E501 + """ + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _path_to_item = kwargs.pop("_path_to_item", ()) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + + self.label = label + self.image_query_id = image_query_id + for var_name, var_value in kwargs.items(): + if ( + var_name not in self.attribute_map + and self._configuration is not None + and self._configuration.discard_unknown_keys + and self.additional_properties_type is None + ): + # discard variable. + continue + setattr(self, var_name, var_value) + if var_name in self.read_only_vars: + raise ApiAttributeError( + f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " + "class with read only attributes." + ) diff --git a/generated/groundlight_openapi_client/model/user.py b/generated/groundlight_openapi_client/model/roi.py similarity index 88% rename from generated/groundlight_openapi_client/model/user.py rename to generated/groundlight_openapi_client/model/roi.py index e78b021e..65db005f 100644 --- a/generated/groundlight_openapi_client/model/user.py +++ b/generated/groundlight_openapi_client/model/roi.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -29,7 +29,13 @@ from groundlight_openapi_client.exceptions import ApiAttributeError -class User(ModelNormal): +def lazy_import(): + from groundlight_openapi_client.model.b_box_geometry import BBoxGeometry + + globals()["BBoxGeometry"] = BBoxGeometry + + +class ROI(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -63,6 +69,7 @@ def additional_properties_type(): This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ + lazy_import() return ( bool, date, @@ -87,8 +94,11 @@ def openapi_types(): openapi_types (dict): The key is attribute name and the value is attribute type. """ + lazy_import() return { - "username": (str,), # noqa: E501 + "label": (str,), # noqa: E501 + "score": (float,), # noqa: E501 + "geometry": (BBoxGeometry,), # noqa: E501 } @cached_property @@ -96,20 +106,26 @@ def discriminator(): return None attribute_map = { - "username": "username", # noqa: E501 + "label": "label", # noqa: E501 + "score": "score", # noqa: E501 + "geometry": "geometry", # noqa: E501 } - read_only_vars = {} + read_only_vars = { + "score", # noqa: E501 + } _composed_schemas = {} @classmethod @convert_js_args_to_python_args - def _from_openapi_data(cls, username, *args, **kwargs): # noqa: E501 - """User - a model defined in OpenAPI + def _from_openapi_data(cls, label, score, geometry, *args, **kwargs): # noqa: E501 + """ROI - a model defined in OpenAPI Args: - username (str): The user's username. + label (str): The label of the bounding box. + score (float): The confidence of the bounding box. + geometry (BBoxGeometry): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -170,7 +186,9 @@ def _from_openapi_data(cls, username, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - self.username = username + self.label = label + self.score = score + self.geometry = geometry for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map @@ -193,11 +211,12 @@ def _from_openapi_data(cls, username, *args, **kwargs): # noqa: E501 ]) @convert_js_args_to_python_args - def __init__(self, username, *args, **kwargs): # noqa: E501 - """User - a model defined in OpenAPI + def __init__(self, label, geometry, *args, **kwargs): # noqa: E501 + """ROI - a model defined in OpenAPI Args: - username (str): The user's username. + label (str): The label of the bounding box. + geometry (BBoxGeometry): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -256,7 +275,8 @@ def __init__(self, username, *args, **kwargs): # noqa: E501 self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - self.username = username + self.label = label + self.geometry = geometry for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map diff --git a/generated/groundlight_openapi_client/model/classification_result.py b/generated/groundlight_openapi_client/model/roi_request.py similarity index 90% rename from generated/groundlight_openapi_client/model/classification_result.py rename to generated/groundlight_openapi_client/model/roi_request.py index ca1aedb2..07dbb5cc 100644 --- a/generated/groundlight_openapi_client/model/classification_result.py +++ b/generated/groundlight_openapi_client/model/roi_request.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -29,7 +29,13 @@ from groundlight_openapi_client.exceptions import ApiAttributeError -class ClassificationResult(ModelNormal): +def lazy_import(): + from groundlight_openapi_client.model.b_box_geometry_request import BBoxGeometryRequest + + globals()["BBoxGeometryRequest"] = BBoxGeometryRequest + + +class ROIRequest(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech @@ -56,9 +62,8 @@ class ClassificationResult(ModelNormal): allowed_values = {} validations = { - ("confidence",): { - "inclusive_maximum": 1, - "inclusive_minimum": 0, + ("label",): { + "min_length": 1, }, } @@ -68,6 +73,7 @@ def additional_properties_type(): This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ + lazy_import() return ( bool, date, @@ -92,12 +98,10 @@ def openapi_types(): openapi_types (dict): The key is attribute name and the value is attribute type. """ + lazy_import() return { "label": (str,), # noqa: E501 - "confidence": ( - float, - none_type, - ), # noqa: E501 + "geometry": (BBoxGeometryRequest,), # noqa: E501 } @cached_property @@ -106,7 +110,7 @@ def discriminator(): attribute_map = { "label": "label", # noqa: E501 - "confidence": "confidence", # noqa: E501 + "geometry": "geometry", # noqa: E501 } read_only_vars = {} @@ -115,11 +119,12 @@ def discriminator(): @classmethod @convert_js_args_to_python_args - def _from_openapi_data(cls, label, *args, **kwargs): # noqa: E501 - """ClassificationResult - a model defined in OpenAPI + def _from_openapi_data(cls, label, geometry, *args, **kwargs): # noqa: E501 + """ROIRequest - a model defined in OpenAPI Args: - label (str): What is the predicted label? + label (str): The label of the bounding box. + geometry (BBoxGeometryRequest): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -152,7 +157,6 @@ def _from_openapi_data(cls, label, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - confidence (float, none_type): On a scale of 0 to 1, how confident are we in the predicted label?. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -182,6 +186,7 @@ def _from_openapi_data(cls, label, *args, **kwargs): # noqa: E501 self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.label = label + self.geometry = geometry for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map @@ -204,11 +209,12 @@ def _from_openapi_data(cls, label, *args, **kwargs): # noqa: E501 ]) @convert_js_args_to_python_args - def __init__(self, label, *args, **kwargs): # noqa: E501 - """ClassificationResult - a model defined in OpenAPI + def __init__(self, label, geometry, *args, **kwargs): # noqa: E501 + """ROIRequest - a model defined in OpenAPI Args: - label (str): What is the predicted label? + label (str): The label of the bounding box. + geometry (BBoxGeometryRequest): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types @@ -241,7 +247,6 @@ def __init__(self, label, *args, **kwargs): # noqa: E501 Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) - confidence (float, none_type): On a scale of 0 to 1, how confident are we in the predicted label?. [optional] # noqa: E501 """ _check_type = kwargs.pop("_check_type", True) @@ -269,6 +274,7 @@ def __init__(self, label, *args, **kwargs): # noqa: E501 self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.label = label + self.geometry = geometry for var_name, var_value in kwargs.items(): if ( var_name not in self.attribute_map diff --git a/generated/groundlight_openapi_client/model/rule_base.py b/generated/groundlight_openapi_client/model/rule_base.py deleted file mode 100644 index 20f1dc5e..00000000 --- a/generated/groundlight_openapi_client/model/rule_base.py +++ /dev/null @@ -1,317 +0,0 @@ -""" - Groundlight API - - Easy Computer Vision powered by Natural Language # noqa: E501 - - The version of the OpenAPI document: 0.6.0 - Contact: support@groundlight.ai - Generated by: https://openapi-generator.tech -""" - -import re # noqa: F401 -import sys # noqa: F401 - -from groundlight_openapi_client.model_utils import ( # noqa: F401 - ApiTypeError, - ModelComposed, - ModelNormal, - ModelSimple, - cached_property, - change_keys_js_to_python, - convert_js_args_to_python_args, - date, - datetime, - file_type, - none_type, - validate_get_composed_info, - OpenApiModel, -) -from groundlight_openapi_client.exceptions import ApiAttributeError - - -def lazy_import(): - from groundlight_openapi_client.model.action import Action - from groundlight_openapi_client.model.condition import Condition - - globals()["Action"] = Action - globals()["Condition"] = Condition - - -class RuleBase(ModelNormal): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Attributes: - allowed_values (dict): The key is the tuple path to the attribute - and the for var_name this is (var_name,). The value is a dict - with a capitalized key describing the allowed value and an allowed - value. These dicts store the allowed enum values. - attribute_map (dict): The key is attribute name - and the value is json key in definition. - discriminator_value_class_map (dict): A dict to go from the discriminator - variable value to the discriminator class name. - validations (dict): The key is the tuple path to the attribute - and the for var_name this is (var_name,). The value is a dict - that stores validations for max_length, min_length, max_items, - min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, - inclusive_minimum, and regex. - additional_properties_type (tuple): A tuple of classes accepted - as additional properties values. - """ - - allowed_values = { - ("snooze_time_unit",): { - "SECONDS": "SECONDS", - "MINUTES": "MINUTES", - "HOURS": "HOURS", - "DAYS": "DAYS", - }, - } - - validations = { - ("name",): { - "max_length": 200, - }, - } - - @cached_property - def additional_properties_type(): - """ - This must be a method because a model may have properties that are - of type self, this must run after the class is loaded - """ - lazy_import() - return ( - bool, - date, - datetime, - dict, - float, - int, - list, - str, - none_type, - ) # noqa: E501 - - _nullable = False - - @cached_property - def openapi_types(): - """ - This must be a method because a model may have properties that are - of type self, this must run after the class is loaded - - Returns - openapi_types (dict): The key is attribute name - and the value is attribute type. - """ - lazy_import() - return { - "detector_id": (str,), # noqa: E501 - "name": (str,), # noqa: E501 - "enabled": (bool,), # noqa: E501 - "snooze_time_enabled": (bool,), # noqa: E501 - "snooze_time_value": (int,), # noqa: E501 - "snooze_time_unit": (str,), # noqa: E501 - "action": (Action,), # noqa: E501 - "condition": (Condition,), # noqa: E501 - } - - @cached_property - def discriminator(): - return None - - attribute_map = { - "detector_id": "detector_id", # noqa: E501 - "name": "name", # noqa: E501 - "enabled": "enabled", # noqa: E501 - "snooze_time_enabled": "snooze_time_enabled", # noqa: E501 - "snooze_time_value": "snooze_time_value", # noqa: E501 - "snooze_time_unit": "snooze_time_unit", # noqa: E501 - "action": "action", # noqa: E501 - "condition": "condition", # noqa: E501 - } - - read_only_vars = {} - - _composed_schemas = {} - - @classmethod - @convert_js_args_to_python_args - def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 - """RuleBase - a model defined in OpenAPI - - Keyword Args: - _check_type (bool): if True, values for parameters in openapi_types - will be type checked and a TypeError will be - raised if the wrong type is input. - Defaults to True - _path_to_item (tuple/list): This is a list of keys or values to - drill down to the model in received_data - when deserializing a response - _spec_property_naming (bool): True if the variable names in the input data - are serialized names, as specified in the OpenAPI document. - False if the variable names in the input data - are pythonic names, e.g. snake case (default) - _configuration (Configuration): the instance to use when - deserializing a file_type parameter. - If passed, type conversion is attempted - If omitted no type conversion is done. - _visited_composed_classes (tuple): This stores a tuple of - classes that we have traveled through so that - if we see that class again we will not use its - discriminator again. - When traveling through a discriminator, the - composed schema that is - is traveled through is added to this set. - For example if Animal has a discriminator - petType and we pass in "Dog", and the class Dog - allOf includes Animal, we move through Animal - once using the discriminator, and pick Dog. - Then in Dog, we will make an instance of the - Animal class but this time we won't travel - through its discriminator because we passed in - _visited_composed_classes = (Animal,) - detector_id (str): Which detector should this rule be associated with?. [optional] # noqa: E501 - name (str): A short, descriptive name for the rule.. [optional] # noqa: E501 - enabled (bool): Is this rule enabled?. [optional] if omitted the server will use the default value of True # noqa: E501 - snooze_time_enabled (bool): Is this rule snooze time enabled?. [optional] if omitted the server will use the default value of False # noqa: E501 - snooze_time_value (int): How long to snooze the rule for (in seconds).. [optional] if omitted the server will use the default value of 1 # noqa: E501 - snooze_time_unit (str): What unit of time to use for the snooze time.. [optional] if omitted the server will use the default value of "DAYS" # noqa: E501 - action (Action): [optional] # noqa: E501 - condition (Condition): [optional] # noqa: E501 - """ - - _check_type = kwargs.pop("_check_type", True) - _spec_property_naming = kwargs.pop("_spec_property_naming", False) - _path_to_item = kwargs.pop("_path_to_item", ()) - _configuration = kwargs.pop("_configuration", None) - _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) - - self = super(OpenApiModel, cls).__new__(cls) - - if args: - raise ApiTypeError( - "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." - % ( - args, - self.__class__.__name__, - ), - path_to_item=_path_to_item, - valid_classes=(self.__class__,), - ) - - self._data_store = {} - self._check_type = _check_type - self._spec_property_naming = _spec_property_naming - self._path_to_item = _path_to_item - self._configuration = _configuration - self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - - for var_name, var_value in kwargs.items(): - if ( - var_name not in self.attribute_map - and self._configuration is not None - and self._configuration.discard_unknown_keys - and self.additional_properties_type is None - ): - # discard variable. - continue - setattr(self, var_name, var_value) - return self - - required_properties = set([ - "_data_store", - "_check_type", - "_spec_property_naming", - "_path_to_item", - "_configuration", - "_visited_composed_classes", - ]) - - @convert_js_args_to_python_args - def __init__(self, *args, **kwargs): # noqa: E501 - """RuleBase - a model defined in OpenAPI - - Keyword Args: - _check_type (bool): if True, values for parameters in openapi_types - will be type checked and a TypeError will be - raised if the wrong type is input. - Defaults to True - _path_to_item (tuple/list): This is a list of keys or values to - drill down to the model in received_data - when deserializing a response - _spec_property_naming (bool): True if the variable names in the input data - are serialized names, as specified in the OpenAPI document. - False if the variable names in the input data - are pythonic names, e.g. snake case (default) - _configuration (Configuration): the instance to use when - deserializing a file_type parameter. - If passed, type conversion is attempted - If omitted no type conversion is done. - _visited_composed_classes (tuple): This stores a tuple of - classes that we have traveled through so that - if we see that class again we will not use its - discriminator again. - When traveling through a discriminator, the - composed schema that is - is traveled through is added to this set. - For example if Animal has a discriminator - petType and we pass in "Dog", and the class Dog - allOf includes Animal, we move through Animal - once using the discriminator, and pick Dog. - Then in Dog, we will make an instance of the - Animal class but this time we won't travel - through its discriminator because we passed in - _visited_composed_classes = (Animal,) - detector_id (str): Which detector should this rule be associated with?. [optional] # noqa: E501 - name (str): A short, descriptive name for the rule.. [optional] # noqa: E501 - enabled (bool): Is this rule enabled?. [optional] if omitted the server will use the default value of True # noqa: E501 - snooze_time_enabled (bool): Is this rule snooze time enabled?. [optional] if omitted the server will use the default value of False # noqa: E501 - snooze_time_value (int): How long to snooze the rule for (in seconds).. [optional] if omitted the server will use the default value of 1 # noqa: E501 - snooze_time_unit (str): What unit of time to use for the snooze time.. [optional] if omitted the server will use the default value of "DAYS" # noqa: E501 - action (Action): [optional] # noqa: E501 - condition (Condition): [optional] # noqa: E501 - """ - - _check_type = kwargs.pop("_check_type", True) - _spec_property_naming = kwargs.pop("_spec_property_naming", False) - _path_to_item = kwargs.pop("_path_to_item", ()) - _configuration = kwargs.pop("_configuration", None) - _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) - - if args: - raise ApiTypeError( - "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." - % ( - args, - self.__class__.__name__, - ), - path_to_item=_path_to_item, - valid_classes=(self.__class__,), - ) - - self._data_store = {} - self._check_type = _check_type - self._spec_property_naming = _spec_property_naming - self._path_to_item = _path_to_item - self._configuration = _configuration - self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - - for var_name, var_value in kwargs.items(): - if ( - var_name not in self.attribute_map - and self._configuration is not None - and self._configuration.discard_unknown_keys - and self.additional_properties_type is None - ): - # discard variable. - continue - setattr(self, var_name, var_value) - if var_name in self.read_only_vars: - raise ApiAttributeError( - f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " - "class with read only attributes." - ) diff --git a/generated/groundlight_openapi_client/model/rule_creation_input.py b/generated/groundlight_openapi_client/model/rule_creation_input.py deleted file mode 100644 index 0be588b7..00000000 --- a/generated/groundlight_openapi_client/model/rule_creation_input.py +++ /dev/null @@ -1,365 +0,0 @@ -""" - Groundlight API - - Easy Computer Vision powered by Natural Language # noqa: E501 - - The version of the OpenAPI document: 0.6.0 - Contact: support@groundlight.ai - Generated by: https://openapi-generator.tech -""" - -import re # noqa: F401 -import sys # noqa: F401 - -from groundlight_openapi_client.model_utils import ( # noqa: F401 - ApiTypeError, - ModelComposed, - ModelNormal, - ModelSimple, - cached_property, - change_keys_js_to_python, - convert_js_args_to_python_args, - date, - datetime, - file_type, - none_type, - validate_get_composed_info, - OpenApiModel, -) -from groundlight_openapi_client.exceptions import ApiAttributeError - - -def lazy_import(): - from groundlight_openapi_client.model.action import Action - from groundlight_openapi_client.model.condition import Condition - from groundlight_openapi_client.model.rule_base import RuleBase - - globals()["Action"] = Action - globals()["Condition"] = Condition - globals()["RuleBase"] = RuleBase - - -class RuleCreationInput(ModelComposed): - """NOTE: This class is auto generated by OpenAPI Generator. - Ref: https://openapi-generator.tech - - Do not edit the class manually. - - Attributes: - allowed_values (dict): The key is the tuple path to the attribute - and the for var_name this is (var_name,). The value is a dict - with a capitalized key describing the allowed value and an allowed - value. These dicts store the allowed enum values. - attribute_map (dict): The key is attribute name - and the value is json key in definition. - discriminator_value_class_map (dict): A dict to go from the discriminator - variable value to the discriminator class name. - validations (dict): The key is the tuple path to the attribute - and the for var_name this is (var_name,). The value is a dict - that stores validations for max_length, min_length, max_items, - min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, - inclusive_minimum, and regex. - additional_properties_type (tuple): A tuple of classes accepted - as additional properties values. - """ - - allowed_values = { - ("snooze_time_unit",): { - "SECONDS": "SECONDS", - "MINUTES": "MINUTES", - "HOURS": "HOURS", - "DAYS": "DAYS", - }, - } - - validations = { - ("name",): { - "max_length": 200, - }, - } - - @cached_property - def additional_properties_type(): - """ - This must be a method because a model may have properties that are - of type self, this must run after the class is loaded - """ - lazy_import() - return ( - bool, - date, - datetime, - dict, - float, - int, - list, - str, - none_type, - ) # noqa: E501 - - _nullable = False - - @cached_property - def openapi_types(): - """ - This must be a method because a model may have properties that are - of type self, this must run after the class is loaded - - Returns - openapi_types (dict): The key is attribute name - and the value is attribute type. - """ - lazy_import() - return { - "detector_id": (str,), # noqa: E501 - "name": (str,), # noqa: E501 - "enabled": (bool,), # noqa: E501 - "snooze_time_enabled": (bool,), # noqa: E501 - "snooze_time_value": (int,), # noqa: E501 - "snooze_time_unit": (str,), # noqa: E501 - "action": (Action,), # noqa: E501 - "condition": (Condition,), # noqa: E501 - } - - @cached_property - def discriminator(): - return None - - attribute_map = { - "detector_id": "detector_id", # noqa: E501 - "name": "name", # noqa: E501 - "enabled": "enabled", # noqa: E501 - "snooze_time_enabled": "snooze_time_enabled", # noqa: E501 - "snooze_time_value": "snooze_time_value", # noqa: E501 - "snooze_time_unit": "snooze_time_unit", # noqa: E501 - "action": "action", # noqa: E501 - "condition": "condition", # noqa: E501 - } - - read_only_vars = {} - - @classmethod - @convert_js_args_to_python_args - def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 - """RuleCreationInput - a model defined in OpenAPI - - Keyword Args: - _check_type (bool): if True, values for parameters in openapi_types - will be type checked and a TypeError will be - raised if the wrong type is input. - Defaults to True - _path_to_item (tuple/list): This is a list of keys or values to - drill down to the model in received_data - when deserializing a response - _spec_property_naming (bool): True if the variable names in the input data - are serialized names, as specified in the OpenAPI document. - False if the variable names in the input data - are pythonic names, e.g. snake case (default) - _configuration (Configuration): the instance to use when - deserializing a file_type parameter. - If passed, type conversion is attempted - If omitted no type conversion is done. - _visited_composed_classes (tuple): This stores a tuple of - classes that we have traveled through so that - if we see that class again we will not use its - discriminator again. - When traveling through a discriminator, the - composed schema that is - is traveled through is added to this set. - For example if Animal has a discriminator - petType and we pass in "Dog", and the class Dog - allOf includes Animal, we move through Animal - once using the discriminator, and pick Dog. - Then in Dog, we will make an instance of the - Animal class but this time we won't travel - through its discriminator because we passed in - _visited_composed_classes = (Animal,) - detector_id (str): Which detector should this rule be associated with?. [optional] # noqa: E501 - name (str): A short, descriptive name for the rule.. [optional] # noqa: E501 - enabled (bool): Is this rule enabled?. [optional] if omitted the server will use the default value of True # noqa: E501 - snooze_time_enabled (bool): Is this rule snooze time enabled?. [optional] if omitted the server will use the default value of False # noqa: E501 - snooze_time_value (int): How long to snooze the rule for (in seconds).. [optional] if omitted the server will use the default value of 1 # noqa: E501 - snooze_time_unit (str): What unit of time to use for the snooze time.. [optional] if omitted the server will use the default value of "DAYS" # noqa: E501 - action (Action): [optional] # noqa: E501 - condition (Condition): [optional] # noqa: E501 - """ - - _check_type = kwargs.pop("_check_type", True) - _spec_property_naming = kwargs.pop("_spec_property_naming", False) - _path_to_item = kwargs.pop("_path_to_item", ()) - _configuration = kwargs.pop("_configuration", None) - _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) - - self = super(OpenApiModel, cls).__new__(cls) - - if args: - raise ApiTypeError( - "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." - % ( - args, - self.__class__.__name__, - ), - path_to_item=_path_to_item, - valid_classes=(self.__class__,), - ) - - self._data_store = {} - self._check_type = _check_type - self._spec_property_naming = _spec_property_naming - self._path_to_item = _path_to_item - self._configuration = _configuration - self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - - constant_args = { - "_check_type": _check_type, - "_path_to_item": _path_to_item, - "_spec_property_naming": _spec_property_naming, - "_configuration": _configuration, - "_visited_composed_classes": self._visited_composed_classes, - } - composed_info = validate_get_composed_info(constant_args, kwargs, self) - self._composed_instances = composed_info[0] - self._var_name_to_model_instances = composed_info[1] - self._additional_properties_model_instances = composed_info[2] - discarded_args = composed_info[3] - - for var_name, var_value in kwargs.items(): - if ( - var_name in discarded_args - and self._configuration is not None - and self._configuration.discard_unknown_keys - and self._additional_properties_model_instances - ): - # discard variable. - continue - setattr(self, var_name, var_value) - - return self - - required_properties = set([ - "_data_store", - "_check_type", - "_spec_property_naming", - "_path_to_item", - "_configuration", - "_visited_composed_classes", - "_composed_instances", - "_var_name_to_model_instances", - "_additional_properties_model_instances", - ]) - - @convert_js_args_to_python_args - def __init__(self, *args, **kwargs): # noqa: E501 - """RuleCreationInput - a model defined in OpenAPI - - Keyword Args: - _check_type (bool): if True, values for parameters in openapi_types - will be type checked and a TypeError will be - raised if the wrong type is input. - Defaults to True - _path_to_item (tuple/list): This is a list of keys or values to - drill down to the model in received_data - when deserializing a response - _spec_property_naming (bool): True if the variable names in the input data - are serialized names, as specified in the OpenAPI document. - False if the variable names in the input data - are pythonic names, e.g. snake case (default) - _configuration (Configuration): the instance to use when - deserializing a file_type parameter. - If passed, type conversion is attempted - If omitted no type conversion is done. - _visited_composed_classes (tuple): This stores a tuple of - classes that we have traveled through so that - if we see that class again we will not use its - discriminator again. - When traveling through a discriminator, the - composed schema that is - is traveled through is added to this set. - For example if Animal has a discriminator - petType and we pass in "Dog", and the class Dog - allOf includes Animal, we move through Animal - once using the discriminator, and pick Dog. - Then in Dog, we will make an instance of the - Animal class but this time we won't travel - through its discriminator because we passed in - _visited_composed_classes = (Animal,) - detector_id (str): Which detector should this rule be associated with?. [optional] # noqa: E501 - name (str): A short, descriptive name for the rule.. [optional] # noqa: E501 - enabled (bool): Is this rule enabled?. [optional] if omitted the server will use the default value of True # noqa: E501 - snooze_time_enabled (bool): Is this rule snooze time enabled?. [optional] if omitted the server will use the default value of False # noqa: E501 - snooze_time_value (int): How long to snooze the rule for (in seconds).. [optional] if omitted the server will use the default value of 1 # noqa: E501 - snooze_time_unit (str): What unit of time to use for the snooze time.. [optional] if omitted the server will use the default value of "DAYS" # noqa: E501 - action (Action): [optional] # noqa: E501 - condition (Condition): [optional] # noqa: E501 - """ - - _check_type = kwargs.pop("_check_type", True) - _spec_property_naming = kwargs.pop("_spec_property_naming", False) - _path_to_item = kwargs.pop("_path_to_item", ()) - _configuration = kwargs.pop("_configuration", None) - _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) - - if args: - raise ApiTypeError( - "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." - % ( - args, - self.__class__.__name__, - ), - path_to_item=_path_to_item, - valid_classes=(self.__class__,), - ) - - self._data_store = {} - self._check_type = _check_type - self._spec_property_naming = _spec_property_naming - self._path_to_item = _path_to_item - self._configuration = _configuration - self._visited_composed_classes = _visited_composed_classes + (self.__class__,) - - constant_args = { - "_check_type": _check_type, - "_path_to_item": _path_to_item, - "_spec_property_naming": _spec_property_naming, - "_configuration": _configuration, - "_visited_composed_classes": self._visited_composed_classes, - } - composed_info = validate_get_composed_info(constant_args, kwargs, self) - self._composed_instances = composed_info[0] - self._var_name_to_model_instances = composed_info[1] - self._additional_properties_model_instances = composed_info[2] - discarded_args = composed_info[3] - - for var_name, var_value in kwargs.items(): - if ( - var_name in discarded_args - and self._configuration is not None - and self._configuration.discard_unknown_keys - and self._additional_properties_model_instances - ): - # discard variable. - continue - setattr(self, var_name, var_value) - if var_name in self.read_only_vars: - raise ApiAttributeError( - f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " - "class with read only attributes." - ) - - @cached_property - def _composed_schemas(): - # we need this here to make our import statements work - # we must store _composed_schemas in here so the code is only run - # when we invoke this method. If we kept this at the class - # level we would get an error because the class level - # code would be run when this module is imported, and these composed - # classes don't exist yet because their module has not finished - # loading - lazy_import() - return { - "anyOf": [], - "allOf": [ - RuleBase, - ], - "oneOf": [], - } diff --git a/generated/groundlight_openapi_client/model/source_enum.py b/generated/groundlight_openapi_client/model/source_enum.py new file mode 100644 index 00000000..cb24e36a --- /dev/null +++ b/generated/groundlight_openapi_client/model/source_enum.py @@ -0,0 +1,288 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + +import re # noqa: F401 +import sys # noqa: F401 + +from groundlight_openapi_client.model_utils import ( # noqa: F401 + ApiTypeError, + ModelComposed, + ModelNormal, + ModelSimple, + cached_property, + change_keys_js_to_python, + convert_js_args_to_python_args, + date, + datetime, + file_type, + none_type, + validate_get_composed_info, + OpenApiModel, +) +from groundlight_openapi_client.exceptions import ApiAttributeError + + +class SourceEnum(ModelSimple): + """NOTE: This class is auto generated by OpenAPI Generator. + Ref: https://openapi-generator.tech + + Do not edit the class manually. + + Attributes: + allowed_values (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + with a capitalized key describing the allowed value and an allowed + value. These dicts store the allowed enum values. + validations (dict): The key is the tuple path to the attribute + and the for var_name this is (var_name,). The value is a dict + that stores validations for max_length, min_length, max_items, + min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, + inclusive_minimum, and regex. + additional_properties_type (tuple): A tuple of classes accepted + as additional properties values. + """ + + allowed_values = { + ("value",): { + "INITIAL_PLACEHOLDER": "INITIAL_PLACEHOLDER", + "CLOUD": "CLOUD", + "CUST": "CUST", + "HUMAN_CLOUD_ENSEMBLE": "HUMAN_CLOUD_ENSEMBLE", + "ALG": "ALG", + "ALG_REC": "ALG_REC", + "ALG_UNCLEAR": "ALG_UNCLEAR", + }, + } + + validations = {} + + additional_properties_type = None + + _nullable = False + + @cached_property + def openapi_types(): + """ + This must be a method because a model may have properties that are + of type self, this must run after the class is loaded + + Returns + openapi_types (dict): The key is attribute name + and the value is attribute type. + """ + return { + "value": (str,), + } + + @cached_property + def discriminator(): + return None + + attribute_map = {} + + read_only_vars = set() + + _composed_schemas = None + + required_properties = set([ + "_data_store", + "_check_type", + "_spec_property_naming", + "_path_to_item", + "_configuration", + "_visited_composed_classes", + ]) + + @convert_js_args_to_python_args + def __init__(self, *args, **kwargs): + """SourceEnum - a model defined in OpenAPI + + Note that value can be passed either in args or in kwargs, but not in both. + + Args: + args[0] (str): * `INITIAL_PLACEHOLDER` - InitialPlaceholder * `CLOUD` - HumanCloud * `CUST` - HumanCustomer * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear., must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", ] # noqa: E501 + + Keyword Args: + value (str): * `INITIAL_PLACEHOLDER` - InitialPlaceholder * `CLOUD` - HumanCloud * `CUST` - HumanCustomer * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear., must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", ] # noqa: E501 + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + # required up here when default value is not given + _path_to_item = kwargs.pop("_path_to_item", ()) + + if "value" in kwargs: + value = kwargs.pop("value") + elif args: + args = list(args) + value = args.pop(0) + else: + raise ApiTypeError( + "value is required, but not passed in args or kwargs and doesn't have default", + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.value = value + if kwargs: + raise ApiTypeError( + "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." + % ( + kwargs, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + @classmethod + @convert_js_args_to_python_args + def _from_openapi_data(cls, *args, **kwargs): + """SourceEnum - a model defined in OpenAPI + + Note that value can be passed either in args or in kwargs, but not in both. + + Args: + args[0] (str): * `INITIAL_PLACEHOLDER` - InitialPlaceholder * `CLOUD` - HumanCloud * `CUST` - HumanCustomer * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear., must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", ] # noqa: E501 + + Keyword Args: + value (str): * `INITIAL_PLACEHOLDER` - InitialPlaceholder * `CLOUD` - HumanCloud * `CUST` - HumanCustomer * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble * `ALG` - Algorithm * `ALG_REC` - AlgorithmReconciled * `ALG_UNCLEAR` - AlgorithmUnclear., must be one of ["INITIAL_PLACEHOLDER", "CLOUD", "CUST", "HUMAN_CLOUD_ENSEMBLE", "ALG", "ALG_REC", "ALG_UNCLEAR", ] # noqa: E501 + _check_type (bool): if True, values for parameters in openapi_types + will be type checked and a TypeError will be + raised if the wrong type is input. + Defaults to True + _path_to_item (tuple/list): This is a list of keys or values to + drill down to the model in received_data + when deserializing a response + _spec_property_naming (bool): True if the variable names in the input data + are serialized names, as specified in the OpenAPI document. + False if the variable names in the input data + are pythonic names, e.g. snake case (default) + _configuration (Configuration): the instance to use when + deserializing a file_type parameter. + If passed, type conversion is attempted + If omitted no type conversion is done. + _visited_composed_classes (tuple): This stores a tuple of + classes that we have traveled through so that + if we see that class again we will not use its + discriminator again. + When traveling through a discriminator, the + composed schema that is + is traveled through is added to this set. + For example if Animal has a discriminator + petType and we pass in "Dog", and the class Dog + allOf includes Animal, we move through Animal + once using the discriminator, and pick Dog. + Then in Dog, we will make an instance of the + Animal class but this time we won't travel + through its discriminator because we passed in + _visited_composed_classes = (Animal,) + """ + # required up here when default value is not given + _path_to_item = kwargs.pop("_path_to_item", ()) + + self = super(OpenApiModel, cls).__new__(cls) + + if "value" in kwargs: + value = kwargs.pop("value") + elif args: + args = list(args) + value = args.pop(0) + else: + raise ApiTypeError( + "value is required, but not passed in args or kwargs and doesn't have default", + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + _check_type = kwargs.pop("_check_type", True) + _spec_property_naming = kwargs.pop("_spec_property_naming", False) + _configuration = kwargs.pop("_configuration", None) + _visited_composed_classes = kwargs.pop("_visited_composed_classes", ()) + + if args: + raise ApiTypeError( + "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." + % ( + args, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + self._data_store = {} + self._check_type = _check_type + self._spec_property_naming = _spec_property_naming + self._path_to_item = _path_to_item + self._configuration = _configuration + self._visited_composed_classes = _visited_composed_classes + (self.__class__,) + self.value = value + if kwargs: + raise ApiTypeError( + "Invalid named arguments=%s passed to %s. Remove those invalid named arguments." + % ( + kwargs, + self.__class__.__name__, + ), + path_to_item=_path_to_item, + valid_classes=(self.__class__,), + ) + + return self diff --git a/generated/groundlight_openapi_client/models/__init__.py b/generated/groundlight_openapi_client/models/__init__.py index 43ea5e69..0491cc60 100644 --- a/generated/groundlight_openapi_client/models/__init__.py +++ b/generated/groundlight_openapi_client/models/__init__.py @@ -12,6 +12,9 @@ from groundlight_openapi_client.model.action import Action from groundlight_openapi_client.model.action_request import ActionRequest from groundlight_openapi_client.model.all_notes import AllNotes +from groundlight_openapi_client.model.annotations_requested_enum import AnnotationsRequestedEnum +from groundlight_openapi_client.model.b_box_geometry import BBoxGeometry +from groundlight_openapi_client.model.b_box_geometry_request import BBoxGeometryRequest from groundlight_openapi_client.model.binary_classification_result import BinaryClassificationResult from groundlight_openapi_client.model.channel_enum import ChannelEnum from groundlight_openapi_client.model.condition import Condition @@ -25,14 +28,19 @@ from groundlight_openapi_client.model.image_query import ImageQuery from groundlight_openapi_client.model.image_query_type_enum import ImageQueryTypeEnum from groundlight_openapi_client.model.inline_response200 import InlineResponse200 +from groundlight_openapi_client.model.label_value import LabelValue +from groundlight_openapi_client.model.label_value_request import LabelValueRequest from groundlight_openapi_client.model.mode_enum import ModeEnum from groundlight_openapi_client.model.note import Note from groundlight_openapi_client.model.note_request import NoteRequest from groundlight_openapi_client.model.paginated_detector_list import PaginatedDetectorList from groundlight_openapi_client.model.paginated_image_query_list import PaginatedImageQueryList from groundlight_openapi_client.model.paginated_rule_list import PaginatedRuleList +from groundlight_openapi_client.model.roi import ROI +from groundlight_openapi_client.model.roi_request import ROIRequest from groundlight_openapi_client.model.result_type_enum import ResultTypeEnum from groundlight_openapi_client.model.rule import Rule from groundlight_openapi_client.model.rule_request import RuleRequest from groundlight_openapi_client.model.snooze_time_unit_enum import SnoozeTimeUnitEnum +from groundlight_openapi_client.model.source_enum import SourceEnum from groundlight_openapi_client.model.verb_enum import VerbEnum diff --git a/generated/model.py b/generated/model.py index ba8fac57..d439bf93 100644 --- a/generated/model.py +++ b/generated/model.py @@ -1,6 +1,6 @@ # generated by datamodel-codegen: # filename: public-api.yaml -# timestamp: 2024-07-29T23:09:00+00:00 +# timestamp: 2024-08-13T00:01:16+00:00 from __future__ import annotations @@ -11,6 +11,40 @@ from pydantic import AnyUrl, BaseModel, Field, confloat, conint, constr +class AnnotationsRequestedEnum(Enum): + """ + * `BINARY_CLASSIFICATION` - Binary Classification + * `BOUNDING_BOXES` - Bounding Boxes + """ + + BINARY_CLASSIFICATION = "BINARY_CLASSIFICATION" + BOUNDING_BOXES = "BOUNDING_BOXES" + + +class BBoxGeometry(BaseModel): + """ + Mixin for serializers to handle data in the StrictBaseModel format + """ + + left: float + top: float + right: float + bottom: float + x: float + y: float + + +class BBoxGeometryRequest(BaseModel): + """ + Mixin for serializers to handle data in the StrictBaseModel format + """ + + left: float + top: float + right: float + bottom: float + + class ChannelEnum(Enum): """ * `EMAIL` - EMAIL @@ -59,6 +93,25 @@ class NoteRequest(BaseModel): content: constr(min_length=1) = Field(..., description="Text content of the note.") +class ROI(BaseModel): + """ + Mixin for serializers to handle data in the StrictBaseModel format + """ + + label: str = Field(..., description="The label of the bounding box.") + score: float = Field(..., description="The confidence of the bounding box.") + geometry: BBoxGeometry + + +class ROIRequest(BaseModel): + """ + Mixin for serializers to handle data in the StrictBaseModel format + """ + + label: constr(min_length=1) = Field(..., description="The label of the bounding box.") + geometry: BBoxGeometryRequest + + class ResultTypeEnum(Enum): binary_classification = "binary_classification" counting = "counting" @@ -78,6 +131,26 @@ class SnoozeTimeUnitEnum(Enum): SECONDS = "SECONDS" +class SourceEnum(Enum): + """ + * `INITIAL_PLACEHOLDER` - InitialPlaceholder + * `CLOUD` - HumanCloud + * `CUST` - HumanCustomer + * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble + * `ALG` - Algorithm + * `ALG_REC` - AlgorithmReconciled + * `ALG_UNCLEAR` - AlgorithmUnclear + """ + + INITIAL_PLACEHOLDER = "INITIAL_PLACEHOLDER" + CLOUD = "CLOUD" + CUST = "CUST" + HUMAN_CLOUD_ENSEMBLE = "HUMAN_CLOUD_ENSEMBLE" + ALG = "ALG" + ALG_REC = "ALG_REC" + ALG_UNCLEAR = "ALG_UNCLEAR" + + class VerbEnum(Enum): """ * `ANSWERED_CONSECUTIVELY` - ANSWERED_CONSECUTIVELY @@ -219,6 +292,25 @@ class ImageQuery(BaseModel): confidence_threshold: float = Field( ..., description="Min confidence needed to accept the response of the image query." ) + rois: Optional[List[ROI]] = Field( + ..., description="An array of regions of interest (bounding boxes) collected on image" + ) + + +class LabelValue(BaseModel): + confidence: Optional[float] = Field(...) + class_name: Optional[str] = Field(..., description="A human-readable class name for this label (e.g. YES/NO)") + rois: Optional[List[ROI]] = None + annotations_requested: List[AnnotationsRequestedEnum] + created_at: datetime + detector_id: Optional[int] = Field(...) + source: SourceEnum + + +class LabelValueRequest(BaseModel): + label: constr(min_length=1) + image_query_id: constr(min_length=1) + rois: Optional[List[ROIRequest]] = None class PaginatedDetectorList(BaseModel): diff --git a/generated/test/test_action.py b/generated/test/test_action.py index 05e47837..533ecd23 100644 --- a/generated/test/test_action.py +++ b/generated/test/test_action.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -12,6 +12,9 @@ import unittest import groundlight_openapi_client +from groundlight_openapi_client.model.channel_enum import ChannelEnum + +globals()["ChannelEnum"] = ChannelEnum from groundlight_openapi_client.model.action import Action diff --git a/generated/test/test_annotations_requested_enum.py b/generated/test/test_annotations_requested_enum.py new file mode 100644 index 00000000..e140ffff --- /dev/null +++ b/generated/test/test_annotations_requested_enum.py @@ -0,0 +1,35 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + +import sys +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.model.annotations_requested_enum import AnnotationsRequestedEnum + + +class TestAnnotationsRequestedEnum(unittest.TestCase): + """AnnotationsRequestedEnum unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testAnnotationsRequestedEnum(self): + """Test AnnotationsRequestedEnum""" + # FIXME: construct object with mandatory attributes with example values + # model = AnnotationsRequestedEnum() # noqa: E501 + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/generated/test/test_classification_result.py b/generated/test/test_b_box_geometry.py similarity index 66% rename from generated/test/test_classification_result.py rename to generated/test/test_b_box_geometry.py index a95afa37..b3236b8d 100644 --- a/generated/test/test_classification_result.py +++ b/generated/test/test_b_box_geometry.py @@ -12,11 +12,11 @@ import unittest import groundlight_openapi_client -from groundlight_openapi_client.model.classification_result import ClassificationResult +from groundlight_openapi_client.model.b_box_geometry import BBoxGeometry -class TestClassificationResult(unittest.TestCase): - """ClassificationResult unit test stubs""" +class TestBBoxGeometry(unittest.TestCase): + """BBoxGeometry unit test stubs""" def setUp(self): pass @@ -24,10 +24,10 @@ def setUp(self): def tearDown(self): pass - def testClassificationResult(self): - """Test ClassificationResult""" + def testBBoxGeometry(self): + """Test BBoxGeometry""" # FIXME: construct object with mandatory attributes with example values - # model = ClassificationResult() # noqa: E501 + # model = BBoxGeometry() # noqa: E501 pass diff --git a/generated/test/test_b_box_geometry_request.py b/generated/test/test_b_box_geometry_request.py new file mode 100644 index 00000000..0a17a4a9 --- /dev/null +++ b/generated/test/test_b_box_geometry_request.py @@ -0,0 +1,35 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + +import sys +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.model.b_box_geometry_request import BBoxGeometryRequest + + +class TestBBoxGeometryRequest(unittest.TestCase): + """BBoxGeometryRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testBBoxGeometryRequest(self): + """Test BBoxGeometryRequest""" + # FIXME: construct object with mandatory attributes with example values + # model = BBoxGeometryRequest() # noqa: E501 + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/generated/test/test_condition.py b/generated/test/test_condition.py index f7830b38..55820e20 100644 --- a/generated/test/test_condition.py +++ b/generated/test/test_condition.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -12,6 +12,9 @@ import unittest import groundlight_openapi_client +from groundlight_openapi_client.model.verb_enum import VerbEnum + +globals()["VerbEnum"] = VerbEnum from groundlight_openapi_client.model.condition import Condition diff --git a/generated/test/test_detector.py b/generated/test/test_detector.py index 56ad9079..93c39251 100644 --- a/generated/test/test_detector.py +++ b/generated/test/test_detector.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/test/test_detector_creation_input.py b/generated/test/test_detector_creation_input.py deleted file mode 100644 index 63c2afb2..00000000 --- a/generated/test/test_detector_creation_input.py +++ /dev/null @@ -1,35 +0,0 @@ -""" - Groundlight API - - Easy Computer Vision powered by Natural Language # noqa: E501 - - The version of the OpenAPI document: 0.6.0 - Contact: support@groundlight.ai - Generated by: https://openapi-generator.tech -""" - -import sys -import unittest - -import groundlight_openapi_client -from groundlight_openapi_client.model.detector_creation_input import DetectorCreationInput - - -class TestDetectorCreationInput(unittest.TestCase): - """DetectorCreationInput unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def testDetectorCreationInput(self): - """Test DetectorCreationInput""" - # FIXME: construct object with mandatory attributes with example values - # model = DetectorCreationInput() # noqa: E501 - pass - - -if __name__ == "__main__": - unittest.main() diff --git a/generated/test/test_detector_type_enum.py b/generated/test/test_detector_type_enum.py index b4fe6029..5208bf9d 100644 --- a/generated/test/test_detector_type_enum.py +++ b/generated/test/test_detector_type_enum.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/test/test_detectors_api.py b/generated/test/test_detectors_api.py index 422e6a75..654127eb 100644 --- a/generated/test/test_detectors_api.py +++ b/generated/test/test_detectors_api.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -27,10 +27,22 @@ def test_create_detector(self): """Test case for create_detector""" pass + def test_create_detector_group2(self): + """Test case for create_detector_group2""" + pass + + def test_delete_detector(self): + """Test case for delete_detector""" + pass + def test_get_detector(self): """Test case for get_detector""" pass + def test_get_detector_groups2(self): + """Test case for get_detector_groups2""" + pass + def test_list_detectors(self): """Test case for list_detectors""" pass diff --git a/generated/test/test_image_queries_api.py b/generated/test/test_image_queries_api.py index 01ae7442..337f8a24 100644 --- a/generated/test/test_image_queries_api.py +++ b/generated/test/test_image_queries_api.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -23,6 +23,10 @@ def setUp(self): def tearDown(self): pass + def test_get_image(self): + """Test case for get_image""" + pass + def test_get_image_query(self): """Test case for get_image_query""" pass diff --git a/generated/test/test_image_query.py b/generated/test/test_image_query.py index eed34123..5b9cfeda 100644 --- a/generated/test/test_image_query.py +++ b/generated/test/test_image_query.py @@ -12,10 +12,16 @@ import unittest import groundlight_openapi_client +from groundlight_openapi_client.model.binary_classification_result import BinaryClassificationResult +from groundlight_openapi_client.model.counting_result import CountingResult from groundlight_openapi_client.model.image_query_type_enum import ImageQueryTypeEnum from groundlight_openapi_client.model.result_type_enum import ResultTypeEnum +from groundlight_openapi_client.model.roi import ROI +globals()["BinaryClassificationResult"] = BinaryClassificationResult +globals()["CountingResult"] = CountingResult globals()["ImageQueryTypeEnum"] = ImageQueryTypeEnum +globals()["ROI"] = ROI globals()["ResultTypeEnum"] = ResultTypeEnum from groundlight_openapi_client.model.image_query import ImageQuery diff --git a/generated/test/test_image_query_type_enum.py b/generated/test/test_image_query_type_enum.py index 2484c245..3b4fdf62 100644 --- a/generated/test/test_image_query_type_enum.py +++ b/generated/test/test_image_query_type_enum.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/test/test_images_api.py b/generated/test/test_images_api.py deleted file mode 100644 index ef7f4e18..00000000 --- a/generated/test/test_images_api.py +++ /dev/null @@ -1,32 +0,0 @@ -""" - Groundlight API - - Easy Computer Vision powered by Natural Language # noqa: E501 - - The version of the OpenAPI document: 0.6.0 - Contact: support@groundlight.ai - Generated by: https://openapi-generator.tech -""" - -import unittest - -import groundlight_openapi_client -from groundlight_openapi_client.api.images_api import ImagesApi # noqa: E501 - - -class TestImagesApi(unittest.TestCase): - """ImagesApi unit test stubs""" - - def setUp(self): - self.api = ImagesApi() # noqa: E501 - - def tearDown(self): - pass - - def test_get_image(self): - """Test case for get_image""" - pass - - -if __name__ == "__main__": - unittest.main() diff --git a/generated/test/test_inline_response200.py b/generated/test/test_inline_response200.py index 1b31c00d..6848d1f5 100644 --- a/generated/test/test_inline_response200.py +++ b/generated/test/test_inline_response200.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -12,9 +12,6 @@ import unittest import groundlight_openapi_client -from groundlight_openapi_client.model.note import Note - -globals()["Note"] = Note from groundlight_openapi_client.model.inline_response200 import InlineResponse200 diff --git a/generated/test/test_label_value.py b/generated/test/test_label_value.py new file mode 100644 index 00000000..e68cf812 --- /dev/null +++ b/generated/test/test_label_value.py @@ -0,0 +1,42 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + +import sys +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.model.annotations_requested_enum import AnnotationsRequestedEnum +from groundlight_openapi_client.model.roi import ROI +from groundlight_openapi_client.model.source_enum import SourceEnum + +globals()["AnnotationsRequestedEnum"] = AnnotationsRequestedEnum +globals()["ROI"] = ROI +globals()["SourceEnum"] = SourceEnum +from groundlight_openapi_client.model.label_value import LabelValue + + +class TestLabelValue(unittest.TestCase): + """LabelValue unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testLabelValue(self): + """Test LabelValue""" + # FIXME: construct object with mandatory attributes with example values + # model = LabelValue() # noqa: E501 + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/generated/test/test_paginated_all_notes_list.py b/generated/test/test_label_value_request.py similarity index 59% rename from generated/test/test_paginated_all_notes_list.py rename to generated/test/test_label_value_request.py index e35ee324..bd9c93c3 100644 --- a/generated/test/test_paginated_all_notes_list.py +++ b/generated/test/test_label_value_request.py @@ -12,14 +12,14 @@ import unittest import groundlight_openapi_client -from groundlight_openapi_client.model.all_notes import AllNotes +from groundlight_openapi_client.model.roi_request import ROIRequest -globals()["AllNotes"] = AllNotes -from groundlight_openapi_client.model.paginated_all_notes_list import PaginatedAllNotesList +globals()["ROIRequest"] = ROIRequest +from groundlight_openapi_client.model.label_value_request import LabelValueRequest -class TestPaginatedAllNotesList(unittest.TestCase): - """PaginatedAllNotesList unit test stubs""" +class TestLabelValueRequest(unittest.TestCase): + """LabelValueRequest unit test stubs""" def setUp(self): pass @@ -27,10 +27,10 @@ def setUp(self): def tearDown(self): pass - def testPaginatedAllNotesList(self): - """Test PaginatedAllNotesList""" + def testLabelValueRequest(self): + """Test LabelValueRequest""" # FIXME: construct object with mandatory attributes with example values - # model = PaginatedAllNotesList() # noqa: E501 + # model = LabelValueRequest() # noqa: E501 pass diff --git a/generated/test/test_labels_api.py b/generated/test/test_labels_api.py new file mode 100644 index 00000000..4d583f2a --- /dev/null +++ b/generated/test/test_labels_api.py @@ -0,0 +1,32 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.api.labels_api import LabelsApi # noqa: E501 + + +class TestLabelsApi(unittest.TestCase): + """LabelsApi unit test stubs""" + + def setUp(self): + self.api = LabelsApi() # noqa: E501 + + def tearDown(self): + pass + + def test_create_label(self): + """Test case for create_label""" + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/generated/test/test_note.py b/generated/test/test_note.py index e96516d0..ce74c309 100644 --- a/generated/test/test_note.py +++ b/generated/test/test_note.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/test/test_note_creation_input.py b/generated/test/test_note_creation_input.py deleted file mode 100644 index 79f5c4c4..00000000 --- a/generated/test/test_note_creation_input.py +++ /dev/null @@ -1,35 +0,0 @@ -""" - Groundlight API - - Easy Computer Vision powered by Natural Language # noqa: E501 - - The version of the OpenAPI document: 0.6.0 - Contact: support@groundlight.ai - Generated by: https://openapi-generator.tech -""" - -import sys -import unittest - -import groundlight_openapi_client -from groundlight_openapi_client.model.note_creation_input import NoteCreationInput - - -class TestNoteCreationInput(unittest.TestCase): - """NoteCreationInput unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def testNoteCreationInput(self): - """Test NoteCreationInput""" - # FIXME: construct object with mandatory attributes with example values - # model = NoteCreationInput() # noqa: E501 - pass - - -if __name__ == "__main__": - unittest.main() diff --git a/generated/test/test_notes_api.py b/generated/test/test_notes_api.py index 71b527ae..9ae90325 100644 --- a/generated/test/test_notes_api.py +++ b/generated/test/test_notes_api.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/test/test_paginated_detector_list.py b/generated/test/test_paginated_detector_list.py index 496730dd..24a02160 100644 --- a/generated/test/test_paginated_detector_list.py +++ b/generated/test/test_paginated_detector_list.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/test/test_paginated_image_query_list.py b/generated/test/test_paginated_image_query_list.py index 5488a2ec..01afa4e8 100644 --- a/generated/test/test_paginated_image_query_list.py +++ b/generated/test/test_paginated_image_query_list.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/test/test_paginated_rule_list.py b/generated/test/test_paginated_rule_list.py index f03d3751..12f3b387 100644 --- a/generated/test/test_paginated_rule_list.py +++ b/generated/test/test_paginated_rule_list.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/test/test_result_type_enum.py b/generated/test/test_result_type_enum.py index aaaefe90..68312d1a 100644 --- a/generated/test/test_result_type_enum.py +++ b/generated/test/test_result_type_enum.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/generated/test/test_paginated_note_list.py b/generated/test/test_roi.py similarity index 62% rename from generated/test/test_paginated_note_list.py rename to generated/test/test_roi.py index 9c66490f..05deb982 100644 --- a/generated/test/test_paginated_note_list.py +++ b/generated/test/test_roi.py @@ -12,14 +12,14 @@ import unittest import groundlight_openapi_client -from groundlight_openapi_client.model.note import Note +from groundlight_openapi_client.model.b_box_geometry import BBoxGeometry -globals()["Note"] = Note -from groundlight_openapi_client.model.paginated_note_list import PaginatedNoteList +globals()["BBoxGeometry"] = BBoxGeometry +from groundlight_openapi_client.model.roi import ROI -class TestPaginatedNoteList(unittest.TestCase): - """PaginatedNoteList unit test stubs""" +class TestROI(unittest.TestCase): + """ROI unit test stubs""" def setUp(self): pass @@ -27,10 +27,10 @@ def setUp(self): def tearDown(self): pass - def testPaginatedNoteList(self): - """Test PaginatedNoteList""" + def testROI(self): + """Test ROI""" # FIXME: construct object with mandatory attributes with example values - # model = PaginatedNoteList() # noqa: E501 + # model = ROI() # noqa: E501 pass diff --git a/generated/test/test_roi_request.py b/generated/test/test_roi_request.py new file mode 100644 index 00000000..836b26f1 --- /dev/null +++ b/generated/test/test_roi_request.py @@ -0,0 +1,38 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + +import sys +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.model.b_box_geometry_request import BBoxGeometryRequest + +globals()["BBoxGeometryRequest"] = BBoxGeometryRequest +from groundlight_openapi_client.model.roi_request import ROIRequest + + +class TestROIRequest(unittest.TestCase): + """ROIRequest unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testROIRequest(self): + """Test ROIRequest""" + # FIXME: construct object with mandatory attributes with example values + # model = ROIRequest() # noqa: E501 + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/generated/test/test_rule.py b/generated/test/test_rule.py index 3714818f..9720d2a2 100644 --- a/generated/test/test_rule.py +++ b/generated/test/test_rule.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ @@ -14,11 +14,11 @@ import groundlight_openapi_client from groundlight_openapi_client.model.action import Action from groundlight_openapi_client.model.condition import Condition -from groundlight_openapi_client.model.rule_base import RuleBase +from groundlight_openapi_client.model.snooze_time_unit_enum import SnoozeTimeUnitEnum globals()["Action"] = Action globals()["Condition"] = Condition -globals()["RuleBase"] = RuleBase +globals()["SnoozeTimeUnitEnum"] = SnoozeTimeUnitEnum from groundlight_openapi_client.model.rule import Rule diff --git a/generated/test/test_rule_base.py b/generated/test/test_rule_base.py deleted file mode 100644 index 8c53a1ac..00000000 --- a/generated/test/test_rule_base.py +++ /dev/null @@ -1,40 +0,0 @@ -""" - Groundlight API - - Easy Computer Vision powered by Natural Language # noqa: E501 - - The version of the OpenAPI document: 0.6.0 - Contact: support@groundlight.ai - Generated by: https://openapi-generator.tech -""" - -import sys -import unittest - -import groundlight_openapi_client -from groundlight_openapi_client.model.action import Action -from groundlight_openapi_client.model.condition import Condition - -globals()["Action"] = Action -globals()["Condition"] = Condition -from groundlight_openapi_client.model.rule_base import RuleBase - - -class TestRuleBase(unittest.TestCase): - """RuleBase unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def testRuleBase(self): - """Test RuleBase""" - # FIXME: construct object with mandatory attributes with example values - # model = RuleBase() # noqa: E501 - pass - - -if __name__ == "__main__": - unittest.main() diff --git a/generated/test/test_rule_creation_input.py b/generated/test/test_rule_creation_input.py deleted file mode 100644 index 4684d6c0..00000000 --- a/generated/test/test_rule_creation_input.py +++ /dev/null @@ -1,42 +0,0 @@ -""" - Groundlight API - - Easy Computer Vision powered by Natural Language # noqa: E501 - - The version of the OpenAPI document: 0.6.0 - Contact: support@groundlight.ai - Generated by: https://openapi-generator.tech -""" - -import sys -import unittest - -import groundlight_openapi_client -from groundlight_openapi_client.model.action import Action -from groundlight_openapi_client.model.condition import Condition -from groundlight_openapi_client.model.rule_base import RuleBase - -globals()["Action"] = Action -globals()["Condition"] = Condition -globals()["RuleBase"] = RuleBase -from groundlight_openapi_client.model.rule_creation_input import RuleCreationInput - - -class TestRuleCreationInput(unittest.TestCase): - """RuleCreationInput unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def testRuleCreationInput(self): - """Test RuleCreationInput""" - # FIXME: construct object with mandatory attributes with example values - # model = RuleCreationInput() # noqa: E501 - pass - - -if __name__ == "__main__": - unittest.main() diff --git a/generated/test/test_rules_api.py b/generated/test/test_rules_api.py deleted file mode 100644 index 36539521..00000000 --- a/generated/test/test_rules_api.py +++ /dev/null @@ -1,44 +0,0 @@ -""" - Groundlight API - - Easy Computer Vision powered by Natural Language # noqa: E501 - - The version of the OpenAPI document: 0.6.0 - Contact: support@groundlight.ai - Generated by: https://openapi-generator.tech -""" - -import unittest - -import groundlight_openapi_client -from groundlight_openapi_client.api.rules_api import RulesApi # noqa: E501 - - -class TestRulesApi(unittest.TestCase): - """RulesApi unit test stubs""" - - def setUp(self): - self.api = RulesApi() # noqa: E501 - - def tearDown(self): - pass - - def test_create_rule(self): - """Test case for create_rule""" - pass - - def test_delete_rule(self): - """Test case for delete_rule""" - pass - - def test_get_rule(self): - """Test case for get_rule""" - pass - - def test_list_rules(self): - """Test case for list_rules""" - pass - - -if __name__ == "__main__": - unittest.main() diff --git a/generated/test/test_source_enum.py b/generated/test/test_source_enum.py new file mode 100644 index 00000000..d2643e97 --- /dev/null +++ b/generated/test/test_source_enum.py @@ -0,0 +1,35 @@ +""" + Groundlight API + + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 + + The version of the OpenAPI document: 0.15.3 + Contact: support@groundlight.ai + Generated by: https://openapi-generator.tech +""" + +import sys +import unittest + +import groundlight_openapi_client +from groundlight_openapi_client.model.source_enum import SourceEnum + + +class TestSourceEnum(unittest.TestCase): + """SourceEnum unit test stubs""" + + def setUp(self): + pass + + def tearDown(self): + pass + + def testSourceEnum(self): + """Test SourceEnum""" + # FIXME: construct object with mandatory attributes with example values + # model = SourceEnum() # noqa: E501 + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/generated/test/test_user.py b/generated/test/test_user.py deleted file mode 100644 index c6e7f5f3..00000000 --- a/generated/test/test_user.py +++ /dev/null @@ -1,35 +0,0 @@ -""" - Groundlight API - - Easy Computer Vision powered by Natural Language # noqa: E501 - - The version of the OpenAPI document: 0.6.0 - Contact: support@groundlight.ai - Generated by: https://openapi-generator.tech -""" - -import sys -import unittest - -import groundlight_openapi_client -from groundlight_openapi_client.model.user import User - - -class TestUser(unittest.TestCase): - """User unit test stubs""" - - def setUp(self): - pass - - def tearDown(self): - pass - - def testUser(self): - """Test User""" - # FIXME: construct object with mandatory attributes with example values - # model = User() # noqa: E501 - pass - - -if __name__ == "__main__": - unittest.main() diff --git a/generated/test/test_user_api.py b/generated/test/test_user_api.py index eb698f08..497759fa 100644 --- a/generated/test/test_user_api.py +++ b/generated/test/test_user_api.py @@ -1,9 +1,9 @@ """ Groundlight API - Easy Computer Vision powered by Natural Language # noqa: E501 + Groundlight makes it simple to understand images. You can easily create computer vision detectors just by describing what you want to know using natural language. # noqa: E501 - The version of the OpenAPI document: 0.6.0 + The version of the OpenAPI document: 0.15.3 Contact: support@groundlight.ai Generated by: https://openapi-generator.tech """ diff --git a/spec/public-api.yaml b/spec/public-api.yaml index 7be94f8e..c6b3d8bb 100644 --- a/spec/public-api.yaml +++ b/spec/public-api.yaml @@ -260,6 +260,54 @@ paths: responses: '204': description: No response body + /v1/detectors/detector-groups: + get: + operationId: Get Detector Groups_2 + description: List all detector groups + tags: + - detectors + security: + - ApiToken: [] + responses: + '200': + content: + application/json: + schema: + type: array + items: + $ref: '#/components/schemas/DetectorGroup' + description: '' + post: + operationId: Create Detector Group_2 + description: |- + Create a new detector group + + POST data: + Required: + - name (str) - name of the predictor set + tags: + - detectors + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/DetectorGroupRequest' + application/x-www-form-urlencoded: + schema: + $ref: '#/components/schemas/DetectorGroupRequest' + multipart/form-data: + schema: + $ref: '#/components/schemas/DetectorGroupRequest' + required: true + security: + - ApiToken: [] + responses: + '201': + content: + application/json: + schema: + $ref: '#/components/schemas/DetectorGroup' + description: '' /v1/image-queries: get: operationId: List image queries @@ -401,6 +449,35 @@ paths: type: string format: binary description: '' + /v1/labels: + post: + operationId: create label + description: |- + Create a new LabelValue and attach it to an image query. This will trigger + asynchronous fine-tuner model training. + tags: + - labels + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/LabelValueRequest' + application/x-www-form-urlencoded: + schema: + $ref: '#/components/schemas/LabelValueRequest' + multipart/form-data: + schema: + $ref: '#/components/schemas/LabelValueRequest' + required: true + security: + - ApiToken: [] + responses: + '201': + content: + application/json: + schema: + $ref: '#/components/schemas/LabelValue' + description: '' /v1/me: get: operationId: Who am I @@ -518,6 +595,66 @@ components: required: - CUSTOMER - GL + AnnotationsRequestedEnum: + enum: + - BINARY_CLASSIFICATION + - BOUNDING_BOXES + type: string + description: |- + * `BINARY_CLASSIFICATION` - Binary Classification + * `BOUNDING_BOXES` - Bounding Boxes + BBoxGeometry: + type: object + description: Mixin for serializers to handle data in the StrictBaseModel format + properties: + left: + type: number + format: double + top: + type: number + format: double + right: + type: number + format: double + bottom: + type: number + format: double + x: + type: number + format: double + readOnly: true + y: + type: number + format: double + readOnly: true + required: + - bottom + - left + - right + - top + - x + - y + BBoxGeometryRequest: + type: object + description: Mixin for serializers to handle data in the StrictBaseModel format + properties: + left: + type: number + format: double + top: + type: number + format: double + right: + type: number + format: double + bottom: + type: number + format: double + required: + - bottom + - left + - right + - top ChannelEnum: enum: - EMAIL @@ -762,6 +899,14 @@ components: format: double readOnly: true description: Min confidence needed to accept the response of the image query. + rois: + type: array + items: + $ref: '#/components/schemas/ROI' + readOnly: true + nullable: true + description: An array of regions of interest (bounding boxes) collected + on image required: - confidence_threshold - created_at @@ -772,12 +917,80 @@ components: - query - result - result_type + - rois - type x-internal: true ImageQueryTypeEnum: enum: - image_query type: string + LabelValue: + type: object + properties: + confidence: + type: number + format: double + nullable: true + readOnly: true + class_name: + type: string + nullable: true + description: A human-readable class name for this label (e.g. YES/NO) + readOnly: true + rois: + type: array + items: + $ref: '#/components/schemas/ROI' + nullable: true + annotations_requested: + type: array + items: + allOf: + - $ref: '#/components/schemas/AnnotationsRequestedEnum' + description: |- + The type of annotation requested + + * `BINARY_CLASSIFICATION` - Binary Classification + * `BOUNDING_BOXES` - Bounding Boxes + readOnly: true + created_at: + type: string + format: date-time + readOnly: true + detector_id: + type: integer + nullable: true + readOnly: true + source: + allOf: + - $ref: '#/components/schemas/SourceEnum' + readOnly: true + required: + - annotations_requested + - class_name + - confidence + - created_at + - detector_id + - source + LabelValueRequest: + type: object + properties: + label: + type: string + writeOnly: true + minLength: 1 + image_query_id: + type: string + writeOnly: true + minLength: 1 + rois: + type: array + items: + $ref: '#/components/schemas/ROIRequest' + nullable: true + required: + - image_query_id + - label ModeEnum: enum: - BINARY @@ -878,6 +1091,37 @@ components: type: array items: $ref: '#/components/schemas/Rule' + ROI: + type: object + description: Mixin for serializers to handle data in the StrictBaseModel format + properties: + label: + type: string + description: The label of the bounding box. + score: + type: number + format: double + readOnly: true + description: The confidence of the bounding box. + geometry: + $ref: '#/components/schemas/BBoxGeometry' + required: + - geometry + - label + - score + ROIRequest: + type: object + description: Mixin for serializers to handle data in the StrictBaseModel format + properties: + label: + type: string + minLength: 1 + description: The label of the bounding box. + geometry: + $ref: '#/components/schemas/BBoxGeometryRequest' + required: + - geometry + - label ResultTypeEnum: enum: - binary_classification @@ -970,6 +1214,24 @@ components: * `HOURS` - HOURS * `MINUTES` - MINUTES * `SECONDS` - SECONDS + SourceEnum: + enum: + - INITIAL_PLACEHOLDER + - CLOUD + - CUST + - HUMAN_CLOUD_ENSEMBLE + - ALG + - ALG_REC + - ALG_UNCLEAR + type: string + description: |- + * `INITIAL_PLACEHOLDER` - InitialPlaceholder + * `CLOUD` - HumanCloud + * `CUST` - HumanCustomer + * `HUMAN_CLOUD_ENSEMBLE` - HumanCloudEnsemble + * `ALG` - Algorithm + * `ALG_REC` - AlgorithmReconciled + * `ALG_UNCLEAR` - AlgorithmUnclear VerbEnum: enum: - ANSWERED_CONSECUTIVELY diff --git a/src/groundlight/cli.py b/src/groundlight/cli.py index 36a7c3cc..de33e741 100644 --- a/src/groundlight/cli.py +++ b/src/groundlight/cli.py @@ -13,6 +13,14 @@ ) +def is_cli_supported_type(annotation): + """ + Check if the annotation is a type that can be supported by the CLI + str is a supported type, but is given precedence over other types + """ + return annotation in (int, float, bool) + + def class_func_to_cli(method): """ Given the class method, create a method with the identical signature to provide the help documentation and @@ -36,12 +44,29 @@ def wrapper(*args, **kwargs): print(gl_bound_method(*args, **kwargs)) # this is where we output to the console # not recommended practice to directly change annotations, but gets around Typer not supporting Union types + cli_unsupported_params = [] for name, annotation in method.__annotations__.items(): if get_origin(annotation) is Union: + # If we can submit a string, we take the string from the cli if str in annotation.__args__: wrapper.__annotations__[name] = str + # Otherwise, we grab the first type that is supported by the CLI else: - wrapper.__annotations__[name] = annotation + found_supported_type = False + for arg in annotation.__args__: + if is_cli_supported_type(arg): + found_supported_type = True + wrapper.__annotations__[name] = arg + break + if not found_supported_type: + cli_unsupported_params.append(name) + # Ideally we could just not list the unsupported params, but it doesn't seem natively supported by Typer + # and requires more metaprogamming than makes sense at the moment. For now, we require methods to support str + for param in cli_unsupported_params: + raise Exception( + f"Parameter {param} on method {method.__name__} has an unsupported type for the CLI. Consider allowing a" + " string representation or writing a custom exception inside the method" + ) return wrapper diff --git a/src/groundlight/client.py b/src/groundlight/client.py index 91e3318b..6cb787b2 100644 --- a/src/groundlight/client.py +++ b/src/groundlight/client.py @@ -4,15 +4,18 @@ import warnings from functools import partial from io import BufferedReader, BytesIO -from typing import Callable, Optional, Union +from typing import Callable, List, Optional, Union from groundlight_openapi_client import Configuration from groundlight_openapi_client.api.detectors_api import DetectorsApi from groundlight_openapi_client.api.image_queries_api import ImageQueriesApi +from groundlight_openapi_client.api.labels_api import LabelsApi from groundlight_openapi_client.api.user_api import UserApi from groundlight_openapi_client.exceptions import NotFoundException, UnauthorizedException from groundlight_openapi_client.model.detector_creation_input_request import DetectorCreationInputRequest +from groundlight_openapi_client.model.label_value_request import LabelValueRequest from model import ( + ROI, Detector, ImageQuery, PaginatedDetectorList, @@ -149,6 +152,7 @@ def __init__( self.detectors_api = DetectorsApi(self.api_client) self.image_queries_api = ImageQueriesApi(self.api_client) self.user_api = UserApi(self.api_client) + self.labels_api = LabelsApi(self.api_client) self._verify_connectivity() def __repr__(self) -> str: @@ -723,7 +727,9 @@ def _wait_for_result( image_query = self._fixup_image_query(image_query) return image_query - def add_label(self, image_query: Union[ImageQuery, str], label: Union[Label, str]): + def add_label( + self, image_query: Union[ImageQuery, str], label: Union[Label, str], rois: Union[List[ROI], str, None] = None + ): """ Add a new label to an image query. This answers the detector's question. @@ -731,9 +737,12 @@ def add_label(self, image_query: Union[ImageQuery, str], label: Union[Label, str or an image_query id as a string. :param label: The string "YES" or the string "NO" in answer to the query. + :param rois: An option list of regions of interest (ROIs) to associate with the label. (This feature experimental) :return: None """ + if isinstance(rois, str): + raise TypeError("rois must be a list of ROI objects. CLI support is not implemented") if isinstance(image_query, ImageQuery): image_query_id = image_query.id else: @@ -743,8 +752,9 @@ def add_label(self, image_query: Union[ImageQuery, str], label: Union[Label, str if not image_query_id.startswith(("chk_", "iq_")): raise ValueError(f"Invalid image query id {image_query_id}") api_label = convert_display_label_to_internal(image_query_id, label) - - return self.api_client._add_label(image_query_id, api_label) # pylint: disable=protected-access + rois_json = [roi.dict() for roi in rois] if rois else None + request_params = LabelValueRequest(label=api_label, image_query_id=image_query_id, rois=rois_json) + self.labels_api.create_label(request_params) def start_inspection(self) -> str: """ diff --git a/src/groundlight/experimental_api.py b/src/groundlight/experimental_api.py index daf57cd4..db97beda 100644 --- a/src/groundlight/experimental_api.py +++ b/src/groundlight/experimental_api.py @@ -7,20 +7,25 @@ """ import json -from typing import Any, Dict, List, Union +from typing import Any, Dict, List, Tuple, Union from groundlight_openapi_client.api.actions_api import ActionsApi from groundlight_openapi_client.api.detector_groups_api import DetectorGroupsApi from groundlight_openapi_client.api.image_queries_api import ImageQueriesApi from groundlight_openapi_client.api.notes_api import NotesApi from groundlight_openapi_client.model.action_request import ActionRequest +from groundlight_openapi_client.model.b_box_geometry_request import BBoxGeometryRequest from groundlight_openapi_client.model.channel_enum import ChannelEnum from groundlight_openapi_client.model.condition_request import ConditionRequest from groundlight_openapi_client.model.detector_group_request import DetectorGroupRequest +from groundlight_openapi_client.model.label_value_request import LabelValueRequest from groundlight_openapi_client.model.note_request import NoteRequest +from groundlight_openapi_client.model.roi_request import ROIRequest from groundlight_openapi_client.model.rule_request import RuleRequest from groundlight_openapi_client.model.verb_enum import VerbEnum -from model import Detector, DetectorGroup, PaginatedRuleList, Rule +from model import ROI, BBoxGeometry, Detector, DetectorGroup, ImageQuery, PaginatedRuleList, Rule + +from groundlight.binary_labels import Label, convert_display_label_to_internal from .client import Groundlight @@ -204,3 +209,65 @@ def list_detector_groups(self) -> List[DetectorGroup]: :return: a list of all detector groups """ return [DetectorGroup(**det.to_dict()) for det in self.detector_group_api.get_detector_groups()] + + def create_roi(self, label: str, top_left: Tuple[float, float], bottom_right: Tuple[float, float]) -> ROI: + """ + Adds a region of interest to the given detector + NOTE: This feature is only available to Pro tier and higher + If you would like to learn more, reach out to us at https://groundlight.ai + + :param label: the label of the item in the roi + :param top_left: the top left corner of the roi + :param bottom_right: the bottom right corner of the roi + """ + + return ROI( + label=label, + score=1.0, + geometry=BBoxGeometry( + left=top_left[0], + top=top_left[1], + right=bottom_right[0], + bottom=bottom_right[1], + x=(top_left[0] + bottom_right[0]) / 2, + y=(top_left[1] + bottom_right[1]) / 2, + ), + ) + + def add_label( + self, image_query: Union[ImageQuery, str], label: Union[Label, str], rois: Union[List[ROI], str, None] = None + ): + """ + Experimental version of add_label. + Add a new label to an image query. This answers the detector's question. + + :param image_query: Either an ImageQuery object (returned from `submit_image_query`) + or an image_query id as a string. + + :param label: The string "YES" or the string "NO" in answer to the query. + :param rois: An option list of regions of interest (ROIs) to associate with the label. (This feature experimental) + + :return: None + """ + if isinstance(rois, str): + raise TypeError("rois must be a list of ROI objects. CLI support is not implemented") + if isinstance(image_query, ImageQuery): + image_query_id = image_query.id + else: + image_query_id = str(image_query) + # Some old imagequery id's started with "chk_" + # TODO: handle iqe_ for image_queries returned from edge endpoints + if not image_query_id.startswith(("chk_", "iq_")): + raise ValueError(f"Invalid image query id {image_query_id}") + api_label = convert_display_label_to_internal(image_query_id, label) + geometry_requests = [BBoxGeometryRequest(**roi.geometry.dict()) for roi in rois] if rois else None + roi_requests = ( + [ + ROIRequest(label=roi.label, score=roi.score, geometry=geometry) + for roi, geometry in zip(rois, geometry_requests) + ] + if rois and geometry_requests + else None + ) + request_params = LabelValueRequest(label=api_label, image_query_id=image_query_id, rois=roi_requests) + self.labels_api.create_label(request_params) diff --git a/src/groundlight/internalapi.py b/src/groundlight/internalapi.py index 739f412a..f4984810 100644 --- a/src/groundlight/internalapi.py +++ b/src/groundlight/internalapi.py @@ -203,10 +203,12 @@ def _headers(self) -> dict: @RequestsRetryDecorator() def _add_label(self, image_query_id: str, label: str) -> dict: """Temporary internal call to add a label to an image query. Not supported.""" + logger.warning("This method is slated for removal, instead use the labels_api in the groundlight client") # TODO: Properly model this with OpenApi spec. start_time = time.time() url = f"{self.configuration.host}/labels" + # TODO: remove posicheck_id data = {"label": label, "posicheck_id": image_query_id, "review_reason": ReviewReason.CUSTOMER_INITIATED} headers = self._headers() diff --git a/test/unit/conftest.py b/test/unit/conftest.py index 9c0d1ca0..49550bde 100644 --- a/test/unit/conftest.py +++ b/test/unit/conftest.py @@ -1,7 +1,39 @@ +from datetime import datetime + import pytest -from groundlight import ExperimentalApi +from groundlight import ExperimentalApi, Groundlight +from model import Detector, ImageQuery @pytest.fixture(name="gl") +def fixture_gl() -> Groundlight: + """Creates a Groundlight client object for testing.""" + _gl = Groundlight() + _gl.DEFAULT_WAIT = 10 + return _gl + + +@pytest.fixture(name="detector") +def fixture_detector(gl: Groundlight) -> Detector: + """Creates a new Test detector.""" + name = f"Test {datetime.utcnow()}" # Need a unique name + query = "Is there a dog?" + pipeline_config = "never-review" + return gl.create_detector(name=name, query=query, pipeline_config=pipeline_config) + + +@pytest.fixture(name="image_query_yes") +def fixture_image_query_yes(gl: Groundlight, detector: Detector) -> ImageQuery: + iq = gl.submit_image_query(detector=detector.id, image="test/assets/dog.jpeg", human_review="NEVER") + return iq + + +@pytest.fixture(name="image_query_no") +def fixture_image_query_no(gl: Groundlight, detector: Detector) -> ImageQuery: + iq = gl.submit_image_query(detector=detector.id, image="test/assets/cat.jpeg", human_review="NEVER") + return iq + + +@pytest.fixture(name="gl_experimental") def _gl() -> ExperimentalApi: return ExperimentalApi() diff --git a/test/unit/test_actions.py b/test/unit/test_actions.py index 0600ec1e..08a79606 100644 --- a/test/unit/test_actions.py +++ b/test/unit/test_actions.py @@ -5,43 +5,46 @@ from groundlight_openapi_client.exceptions import NotFoundException -def test_create_action(gl: ExperimentalApi): - # use a unique name for the alert +def test_create_action(gl_experimental: ExperimentalApi): + # We first clear out any rules in case the account has any left over from a previous test + gl_experimental.delete_all_rules() name = f"Test {datetime.utcnow()}" - det = gl.get_or_create_detector(name, "test_query") - rule = gl.create_rule(det, f"test_rule_{name}", "EMAIL", "test@example.com") - rule2 = gl.get_rule(rule.id) + det = gl_experimental.get_or_create_detector(name, "test_query") + rule = gl_experimental.create_rule(det, f"test_rule_{name}", "EMAIL", "test@example.com") + rule2 = gl_experimental.get_rule(rule.id) assert rule == rule2 - gl.delete_rule(rule.id) + gl_experimental.delete_rule(rule.id) with pytest.raises(NotFoundException) as _: - gl.get_rule(rule.id) + gl_experimental.get_rule(rule.id) @pytest.mark.skip(reason="actions are global on account, the test matrix collides with itself") # type: ignore -def test_get_all_actions(gl: ExperimentalApi): +def test_get_all_actions(gl_experimental: ExperimentalApi): name = f"Test {datetime.utcnow()}" num_test_rules = 13 # needs to be larger than the default page size - gl.ITEMS_PER_PAGE = 10 - assert gl.ITEMS_PER_PAGE < num_test_rules - det = gl.get_or_create_detector(name, "test_query") - gl.delete_all_rules() + gl_experimental.ITEMS_PER_PAGE = 10 + assert gl_experimental.ITEMS_PER_PAGE < num_test_rules + det = gl_experimental.get_or_create_detector(name, "test_query") + gl_experimental.delete_all_rules() for i in range(num_test_rules): - _ = gl.create_rule(det, f"test_rule_{i}", "EMAIL", "test@example.com") - rules = gl.list_rules(page_size=gl.ITEMS_PER_PAGE) + _ = gl_experimental.create_rule(det, f"test_rule_{i}", "EMAIL", "test@example.com") + rules = gl_experimental.list_rules(page_size=gl_experimental.ITEMS_PER_PAGE) assert rules.count == num_test_rules - assert len(rules.results) == gl.ITEMS_PER_PAGE - num_deleted = gl.delete_all_rules() + assert len(rules.results) == gl_experimental.ITEMS_PER_PAGE + num_deleted = gl_experimental.delete_all_rules() assert num_deleted == num_test_rules - rules = gl.list_rules() + rules = gl_experimental.list_rules() assert rules.count == 0 -def test_create_action_with_human_review(gl: ExperimentalApi): +def test_create_action_with_human_review(gl_experimental: ExperimentalApi): name = f"Test {datetime.utcnow()}" - det = gl.get_or_create_detector(name, "test_query") - rule = gl.create_rule(det, f"test_rule_{name}", "EMAIL", "test@example.com", human_review_required=True) - rule2 = gl.get_rule(rule.id) + det = gl_experimental.get_or_create_detector(name, "test_query") + rule = gl_experimental.create_rule( + det, f"test_rule_{name}", "EMAIL", "test@example.com", human_review_required=True + ) + rule2 = gl_experimental.get_rule(rule.id) assert rule == rule2 - gl.delete_rule(rule.id) + gl_experimental.delete_rule(rule.id) with pytest.raises(NotFoundException) as _: - gl.get_rule(rule.id) + gl_experimental.get_rule(rule.id) diff --git a/test/unit/test_experimental.py b/test/unit/test_experimental.py index af6d1baa..5a94a8a9 100644 --- a/test/unit/test_experimental.py +++ b/test/unit/test_experimental.py @@ -1,13 +1,45 @@ from datetime import datetime +import pytest from groundlight import ExperimentalApi +from model import ImageQuery -def test_detector_groups(gl: ExperimentalApi): +def test_detector_groups(gl_experimental: ExperimentalApi): """ verify that we can create a detector group and retrieve it """ name = f"Test {datetime.utcnow()}" - created_group = gl.create_detector_group(name) - all_groups = gl.list_detector_groups() + created_group = gl_experimental.create_detector_group(name) + all_groups = gl_experimental.list_detector_groups() assert created_group in all_groups + + +@pytest.mark.skip( + reason=( + "Users currently don't have permission to turn object detection on their own. If you have questions, reach out" + " to Groundlight support." + ) +) +def test_submit_roi(gl_experimental: ExperimentalApi, image_query_yes: ImageQuery): + """ + verify that we can submit an ROI + """ + label_name = "dog" + roi = gl_experimental.create_roi(label_name, (0, 0), (0.5, 0.5)) + gl_experimental.add_label(image_query_yes.id, "YES", [roi]) + + +@pytest.mark.skip( + reason=( + "Users currently don't have permission to turn object detection on their own. If you have questions, reach out" + " to Groundlight support." + ) +) +def test_submit_multiple_rois(gl_experimental: ExperimentalApi, image_query_no: ImageQuery): + """ + verify that we can submit multiple ROIs + """ + label_name = "dog" + roi = gl_experimental.create_roi(label_name, (0, 0), (0.5, 0.5)) + gl_experimental.add_label(image_query_no, "YES", [roi] * 3) diff --git a/test/unit/test_http_retries.py b/test/unit/test_http_retries.py index ffc8d77c..42ab0caf 100644 --- a/test/unit/test_http_retries.py +++ b/test/unit/test_http_retries.py @@ -108,7 +108,7 @@ def test_list_image_queries_attempts_retries(gl: Groundlight): def test_add_label_attempts_retries(gl: Groundlight, detector: Detector): image_query = gl.submit_image_query(detector=detector.id, image=IMAGE_FILE) run_test( - mocked_call="requests.request", + mocked_call="urllib3.PoolManager.request", api_method=gl.add_label, expected_call_counts=TOTAL_RETRIES + 1, image_query=image_query, @@ -116,7 +116,7 @@ def test_add_label_attempts_retries(gl: Groundlight, detector: Detector): ) run_test( - mocked_call="requests.request", + mocked_call="urllib3.PoolManager.request", api_method=gl.add_label, expected_call_counts=TOTAL_RETRIES + 1, image_query=image_query, diff --git a/test/unit/test_images.py b/test/unit/test_images.py index e35a3cc5..12220ade 100644 --- a/test/unit/test_images.py +++ b/test/unit/test_images.py @@ -4,9 +4,9 @@ from groundlight import ExperimentalApi -def test_get_image(gl: ExperimentalApi): +def test_get_image(gl_experimental: ExperimentalApi): name = f"Test {datetime.utcnow()}" - det = gl.get_or_create_detector(name, "test_query") - iq = gl.submit_image_query(det, image="test/assets/dog.jpeg", wait=10) - gl.get_image(iq.id) - assert isinstance(PIL.Image.open(gl.get_image(iq.id)), PIL.Image.Image) + det = gl_experimental.get_or_create_detector(name, "test_query") + iq = gl_experimental.submit_image_query(det, image="test/assets/dog.jpeg", wait=10) + gl_experimental.get_image(iq.id) + assert isinstance(PIL.Image.open(gl_experimental.get_image(iq.id)), PIL.Image.Image) diff --git a/test/unit/test_notes.py b/test/unit/test_notes.py index e0e5dc48..b8189169 100644 --- a/test/unit/test_notes.py +++ b/test/unit/test_notes.py @@ -3,12 +3,12 @@ from groundlight import ExperimentalApi -def test_notes(gl: ExperimentalApi): +def test_notes(gl_experimental: ExperimentalApi): name = f"Test {datetime.utcnow()}" - det = gl.create_detector(name, "test_query") - gl.create_note(det, "test_note") - # test runner could be either a customer or GL - notes = (gl.get_notes(det).get("customer") or []) + (gl.get_notes(det).get("gl") or []) + det = gl_experimental.create_detector(name, "test_query") + gl_experimental.create_note(det, "test_note") + # test runner could be either a customer or gl_experimental + notes = (gl_experimental.get_notes(det).get("customer") or []) + (gl_experimental.get_notes(det).get("gl") or []) found_note = False for i in range(len(notes)): if notes[i].content == "test_note":