Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

No longer allows null confidence values #204

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ install-dev: ## Only install the dev dependencies
install-pre-commit: install ## Install pre-commit hooks
poetry run pre-commit install

install-generator: install ## Install dependencies for SDK code generator
install-generator: ## Install dependencies for SDK code generator
npm install --save remark-math@6 rehype-katex@7

generate: install-generator ## Generate the SDK from our public openapi spec
Expand Down
3 changes: 2 additions & 1 deletion generated/docs/ClassificationResult.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,9 @@ Our classification result. This result can come from the detector, or a human re
## Properties
Name | Type | Description | Notes
------------ | ------------- | ------------- | -------------
**confidence** | **float** | On a scale of 0 to 1, how confident are we in the predicted label? |
**label** | **str** | What is the predicted label? |
**confidence** | **float, none_type** | On a scale of 0 to 1, how confident are we in the predicted label? | [optional]
**source** | **str** | Where did this classification result come from? | [optional]
**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional]

[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md)
Expand Down
1 change: 1 addition & 0 deletions generated/docs/ImageQuery.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ Name | Type | Description | Notes
**detector_id** | **str** | Which detector was used on this image query? | [readonly]
**result_type** | **bool, date, datetime, dict, float, int, list, str, none_type** | What type of result are we returning? | [readonly]
**result** | **bool, date, datetime, dict, float, int, list, str, none_type** | | [optional] [readonly]
**confidence_threshold** | **float** | | [optional] [readonly]
**metadata** | **{str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type** | A dictionary of custom key/value metadata to associate with the image query (limited to 1KB). | [optional] [readonly]
**any string name** | **bool, date, datetime, dict, float, int, list, str, none_type** | any string name can be used but the value must be the correct type | [optional]

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -94,20 +94,19 @@ def openapi_types():
and the value is attribute type.
"""
return {
"confidence": (float,), # noqa: E501
"label": (str,), # noqa: E501
"confidence": (
float,
none_type,
), # noqa: E501
"source": (str,), # noqa: E501
}

@cached_property
def discriminator():
return None

attribute_map = {
"label": "label", # noqa: E501
"confidence": "confidence", # noqa: E501
"label": "label", # noqa: E501
"source": "source", # noqa: E501
}

read_only_vars = {}
Expand All @@ -116,10 +115,11 @@ def discriminator():

@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, label, *args, **kwargs): # noqa: E501
def _from_openapi_data(cls, confidence, label, *args, **kwargs): # noqa: E501
"""ClassificationResult - a model defined in OpenAPI

Args:
confidence (float): On a scale of 0 to 1, how confident are we in the predicted label?
label (str): What is the predicted label?

Keyword Args:
Expand Down Expand Up @@ -153,7 +153,7 @@ def _from_openapi_data(cls, label, *args, **kwargs): # noqa: E501
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
confidence (float, none_type): On a scale of 0 to 1, how confident are we in the predicted label?. [optional] # noqa: E501
source (str): Where did this classification result come from?. [optional] # noqa: E501
"""

_check_type = kwargs.pop("_check_type", True)
Expand Down Expand Up @@ -182,6 +182,7 @@ def _from_openapi_data(cls, label, *args, **kwargs): # noqa: E501
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)

self.confidence = confidence
self.label = label
for var_name, var_value in kwargs.items():
if (
Expand All @@ -207,10 +208,11 @@ def _from_openapi_data(cls, label, *args, **kwargs): # noqa: E501
)

@convert_js_args_to_python_args
def __init__(self, label, *args, **kwargs): # noqa: E501
def __init__(self, confidence, label, *args, **kwargs): # noqa: E501
"""ClassificationResult - a model defined in OpenAPI

Args:
confidence (float): On a scale of 0 to 1, how confident are we in the predicted label?
label (str): What is the predicted label?

Keyword Args:
Expand Down Expand Up @@ -244,7 +246,7 @@ def __init__(self, label, *args, **kwargs): # noqa: E501
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
confidence (float, none_type): On a scale of 0 to 1, how confident are we in the predicted label?. [optional] # noqa: E501
source (str): Where did this classification result come from?. [optional] # noqa: E501
"""

_check_type = kwargs.pop("_check_type", True)
Expand All @@ -271,6 +273,7 @@ def __init__(self, label, *args, **kwargs): # noqa: E501
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)

self.confidence = confidence
self.label = label
for var_name, var_value in kwargs.items():
if (
Expand Down
5 changes: 5 additions & 0 deletions generated/groundlight_openapi_client/model/image_query.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,7 @@ def openapi_types():
str,
none_type,
), # noqa: E501
"confidence_threshold": (float,), # noqa: E501
"metadata": (
{str: (bool, date, datetime, dict, float, int, list, str, none_type)},
none_type,
Expand All @@ -156,6 +157,7 @@ def discriminator():
"detector_id": "detector_id", # noqa: E501
"result_type": "result_type", # noqa: E501
"result": "result", # noqa: E501
"confidence_threshold": "confidence_threshold", # noqa: E501
"metadata": "metadata", # noqa: E501
}

Expand All @@ -167,6 +169,7 @@ def discriminator():
"detector_id", # noqa: E501
"result_type", # noqa: E501
"result", # noqa: E501
"confidence_threshold", # noqa: E501
"metadata", # noqa: E501
}

Expand Down Expand Up @@ -217,6 +220,7 @@ def _from_openapi_data(cls, id, type, created_at, query, detector_id, result_typ
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
result (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501
confidence_threshold (float): [optional] # noqa: E501
metadata ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): A dictionary of custom key/value metadata to associate with the image query (limited to 1KB).. [optional] # noqa: E501
"""

Expand Down Expand Up @@ -311,6 +315,7 @@ def __init__(self, *args, **kwargs): # noqa: E501
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
result (bool, date, datetime, dict, float, int, list, str, none_type): [optional] # noqa: E501
confidence_threshold (float): [optional] # noqa: E501
metadata ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): A dictionary of custom key/value metadata to associate with the image query (limited to 1KB).. [optional] # noqa: E501
"""

Expand Down
8 changes: 5 additions & 3 deletions generated/model.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# generated by datamodel-codegen:
# filename: public-api.yaml
# timestamp: 2024-04-04T00:11:29+00:00
# timestamp: 2024-04-17T23:39:40+00:00

from __future__ import annotations

Expand All @@ -16,10 +16,11 @@ class User(BaseModel):


class ClassificationResult(BaseModel):
confidence: Optional[confloat(ge=0.0, le=1.0)] = Field(
None, description="On a scale of 0 to 1, how confident are we in the predicted label?"
confidence: confloat(ge=0.0, le=1.0) = Field(
..., description="On a scale of 0 to 1, how confident are we in the predicted label?"
)
label: str = Field(..., description="What is the predicted label?")
source: Optional[str] = Field(None, description="Where did this classification result come from?")


class DetectorCreationInput(BaseModel):
Expand Down Expand Up @@ -155,6 +156,7 @@ class ImageQuery(BaseModel):
detector_id: str = Field(..., description="Which detector was used on this image query?")
result_type: ResultTypeEnum = Field(..., description="What type of result are we returning?")
result: Optional[ClassificationResult] = None
confidence_threshold: Optional[float] = None
metadata: Optional[Dict[str, Any]] = Field(
None,
description="A dictionary of custom key/value metadata to associate with the image query (limited to 1KB).",
Expand Down
9 changes: 8 additions & 1 deletion spec/public-api.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -420,14 +420,17 @@ components:
type: number
maximum: 1
minimum: 0
nullable: true
description:
On a scale of 0 to 1, how confident are we in the predicted
label?
label:
type: string
description: What is the predicted label?
source:
type: string
description: Where did this classification result come from?
required:
- confidence
- label
x-internal: true
Detector:
Expand Down Expand Up @@ -678,6 +681,10 @@ components:
allOf:
- $ref: "#/components/schemas/ClassificationResult"
readOnly: true
confidence_threshold:
type: number
format: float
readOnly: true
metadata:
type: object
readOnly: true
Expand Down
2 changes: 1 addition & 1 deletion src/groundlight/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -667,7 +667,7 @@ def wait_for_confident_result(
if confidence_threshold is None:
if isinstance(image_query, str):
image_query = self.get_image_query(image_query)
confidence_threshold = self.get_detector(image_query.detector_id).confidence_threshold
confidence_threshold = image_query.confidence_threshold

confidence_above_thresh = partial(iq_is_confident, confidence_threshold=confidence_threshold)
return self._wait_for_result(image_query, condition=confidence_above_thresh, timeout_sec=timeout_sec)
Expand Down
17 changes: 6 additions & 11 deletions src/groundlight/internalapi.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,26 +61,21 @@ def _generate_request_id():
return "req_uu" + uuid.uuid4().hex


def iq_is_confident(iq: ImageQuery, confidence_threshold: float) -> bool:
def iq_is_confident(iq: ImageQuery, confidence_threshold: Optional[float]) -> bool:
"""Returns True if the image query's confidence is above threshold.
The only subtletie here is that currently confidence of None means
human label, which is treated as confident.
"""
if iq.result.confidence is None:
# Human label
return True
return iq.result.confidence >= confidence_threshold
if confidence_threshold is None:
confidence_threshold = iq.confidence_threshold
return iq.result.confidence >= iq.confidence_threshold


def iq_is_answered(iq: ImageQuery) -> bool:
"""Returns True if the image query has a ML or human label.
Placeholder and special labels (out of domain) have confidences exactly 0.5
"""
if iq.result.confidence is None:
# Human label
return True
placeholder_confidence = 0.5
return iq.result.confidence > placeholder_confidence
return not iq.result.source == "Still Processing"


class InternalApiError(ApiException, RuntimeError):
Expand Down Expand Up @@ -218,7 +213,7 @@ def _add_label(self, image_query_id: str, label: str) -> dict:
logger.info(f"Posting label={label} to image_query {image_query_id} ...")
response = requests.request("POST", url, json=data, headers=headers, verify=self.configuration.verify_ssl)
elapsed = 1000 * (time.time() - start_time)
logger.debug(f"Call to ImageQuery.add_label took {elapsed:.1f}ms response={response.text}")
logger.debug(f"Call to ImageQuery._add_label took {elapsed:.1f}ms response={response.text}")

if not is_ok(response.status_code):
raise InternalApiError(
Expand Down
Loading