Skip to content

Commit

Permalink
Modify the Anomaly Task so that the integration Test works (#3007)
Browse files Browse the repository at this point in the history
* Add Anomaly intg test

* Fix anoamly integration test

* Fix unit-test

* Revert lower torchmetric things

* Fix export tests

* Fix anomalib version to hot-fix commit

* Fix anomalib issues

* Fix tox.ini

* Update otx_efficientnet_v2.yaml
  • Loading branch information
harimkang authored Mar 13, 2024
1 parent 50a4d17 commit a616ce9
Show file tree
Hide file tree
Showing 89 changed files with 102 additions and 53 deletions.
1 change: 1 addition & 0 deletions .github/workflows/pre_merge.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,7 @@ jobs:
- task: "instance_segmentation"
- task: "semantic_segmentation"
- task: "visual_prompting"
- task: "anomaly"
name: Integration-Test-${{ matrix.task }}-py310
# This is what will cancel the job concurrency
concurrency:
Expand Down
13 changes: 12 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,18 @@ mmlab = [
"oss2==2.17.0",
]
anomaly = [
"anomalib==1.0.0",
# [FIXME] @ashwinvaidya17: Install using a temporary hot-fix commit due to a torchmetrics version conflict.
"anomalib @ git+https://github.com/openvinotoolkit/anomalib.git@e78091883a620229c277a79674a904d9f785f8d5",
# This is a dependency to avoid conflicts with installing the anomalib[core] option.
"av>=10.0.0",
"einops>=0.3.2",
"freia>=0.2",
"imgaug==0.4.0",
"kornia>=0.6.6,<0.6.10",
"matplotlib>=3.4.3",
"opencv-python>=4.5.3.56",
"pandas>=1.1.0",
"open-clip-torch>=2.23.0",
]

[project.scripts]
Expand Down
8 changes: 4 additions & 4 deletions src/otx/algo/anomaly/padim.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,26 +15,26 @@ class Padim(OTXAnomaly, OTXModel, AnomalibPadim):
"""OTX Padim model.
Args:
input_size (tuple[int, int], optional): Input size. Defaults to (256, 256).
backbone (str, optional): Feature extractor backbone. Defaults to "resnet18".
layers (list[str], optional): Feature extractor layers. Defaults to ["layer1", "layer2", "layer3"].
pre_trained (bool, optional): Pretrained backbone. Defaults to True.
n_features (int | None, optional): Number of features. Defaults to None.
num_classes (int, optional): Anoamly don't use num_classes ,
but OTXModel always receives num_classes, so need this.
"""

def __init__(
self,
input_size: tuple[int, int] = (256, 256),
backbone: str = "resnet18",
layers: list[str] = ["layer1", "layer2", "layer3"], # noqa: B006
pre_trained: bool = True,
n_features: int | None = None,
num_classes: int = 2,
) -> None:
OTXAnomaly.__init__(self)
OTXModel.__init__(self, num_classes=2)
OTXModel.__init__(self, num_classes=num_classes)
AnomalibPadim.__init__(
self,
input_size=input_size,
backbone=backbone,
layers=layers,
pre_trained=pre_trained,
Expand Down
8 changes: 4 additions & 4 deletions src/otx/algo/anomaly/stfpm.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,21 +21,21 @@ class Stfpm(OTXAnomaly, OTXModel, AnomalibStfpm):
Args:
layers (Sequence[str]): Feature extractor layers.
input_size (tuple[int, int]): Input size.
backbone (str, optional): Feature extractor backbone. Defaults to "resnet18".
num_classes (int, optional): Anoamly don't use num_classes ,
but OTXModel always receives num_classes, so need this.
"""

def __init__(
self,
layers: Sequence[str] = ["layer1", "layer2", "layer3"],
input_size: tuple[int, int] = (256, 256),
backbone: str = "resnet18",
num_classes: int = 2,
) -> None:
OTXAnomaly.__init__(self)
OTXModel.__init__(self, num_classes=2)
OTXModel.__init__(self, num_classes=num_classes)
AnomalibStfpm.__init__(
self,
input_size=input_size,
backbone=backbone,
layers=layers,
)
Expand Down
2 changes: 1 addition & 1 deletion src/otx/cli/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -413,7 +413,7 @@ def instantiate_model(self, model_config: Namespace) -> tuple:
# Update num_classes
if not self.get_config_value(self.config_init, "disable_infer_num_classes", False):
num_classes = self.datamodule.label_info.num_classes
if num_classes != model_config.init_args.num_classes:
if hasattr(model_config.init_args, "num_classes") and num_classes != model_config.init_args.num_classes:
warning_msg = (
f"The `num_classes` in dataset is {num_classes} "
f"but, the `num_classes` of model is {model_config.init_args.num_classes}. "
Expand Down
2 changes: 1 addition & 1 deletion src/otx/core/data/entity/anomaly/classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def collate_fn(
) -> AnomalyClassificationDataBatch:
"""Collection function to collect `OTXDataEntity` into `OTXBatchDataEntity` in data loader."""
batch = super().collate_fn(entities)
images = tv_tensors.Image(data=torch.stack(batch.images, dim=0)) if stack_images else batch.images
images = tv_tensors.Image(data=torch.stack(tuple(batch.images), dim=0)) if stack_images else batch.images
return AnomalyClassificationDataBatch(
batch_size=batch.batch_size,
images=images,
Expand Down
2 changes: 1 addition & 1 deletion src/otx/core/data/entity/anomaly/detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def collate_fn(
) -> AnomalyDetectionDataBatch:
"""Collection function to collect `OTXDataEntity` into `OTXBatchDataEntity` in data loader."""
batch = super().collate_fn(entities)
images = tv_tensors.Image(data=torch.stack(batch.images, dim=0)) if stack_images else batch.images
images = tv_tensors.Image(data=torch.stack(tuple(batch.images), dim=0)) if stack_images else batch.images
return AnomalyDetectionDataBatch(
batch_size=batch.batch_size,
images=images,
Expand Down
2 changes: 1 addition & 1 deletion src/otx/core/data/entity/anomaly/segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def collate_fn(
) -> AnomalySegmentationDataBatch:
"""Collection function to collect `OTXDataEntity` into `OTXBatchDataEntity` in data loader."""
batch = super().collate_fn(entities)
images = tv_tensors.Image(data=torch.stack(batch.images, dim=0)) if stack_images else batch.images
images = tv_tensors.Image(data=torch.stack(tuple(batch.images), dim=0)) if stack_images else batch.images
return AnomalySegmentationDataBatch(
batch_size=batch.batch_size,
images=images,
Expand Down
22 changes: 18 additions & 4 deletions src/otx/core/model/module/anomaly/anomaly_lightning.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ class OTXAnomaly:
def __init__(self) -> None:
self.optimizer: list[OptimizerCallable] | OptimizerCallable = None
self.scheduler: list[LRSchedulerCallable] | LRSchedulerCallable = None
self.input_size: list[int] = [256, 256]
self._input_size: tuple[int, int] = (256, 256)
self.mean_values: tuple[float, float, float] = (0.0, 0.0, 0.0)
self.scale_values: tuple[float, float, float] = (1.0, 1.0, 1.0)
self.trainer: Trainer
Expand All @@ -147,6 +147,19 @@ def __init__(self) -> None:
self.image_metrics: AnomalibMetricCollection
self.pixel_metrics: AnomalibMetricCollection

@property
def input_size(self) -> tuple[int, int]:
"""Returns the input size of the model.
Returns:
tuple[int, int]: The input size of the model as a tuple of (height, width).
"""
return self._input_size

@input_size.setter
def input_size(self, value: tuple[int, int]) -> None:
self._input_size = value

@property
def task(self) -> AnomalibTaskType:
"""Return the task type of the model."""
Expand Down Expand Up @@ -342,13 +355,13 @@ def state_dict(self) -> dict[str, Any]:
"""
state_dict = super().state_dict() # type: ignore[misc]
# This is defined in OTXModel
state_dict["meta_info"] = self.meta_info # type: ignore[attr-defined]
state_dict["label_info"] = self.label_info # type: ignore[attr-defined]
return state_dict

def load_state_dict(self, ckpt: OrderedDict[str, Any], *args, **kwargs) -> None:
"""Pass the checkpoint to the anomaly model."""
ckpt = ckpt.get("state_dict", ckpt)
ckpt.pop("meta_info", None) # [TODO](ashwinvaidya17): Revisit this method when OTXModel is the lightning model
ckpt.pop("label_info", None) # [TODO](ashwinvaidya17): Revisit this method when OTXModel is the lightning model
return super().load_state_dict(ckpt, *args, **kwargs) # type: ignore[misc]

def forward(
Expand Down Expand Up @@ -441,8 +454,9 @@ def export(
"""
min_val = self.normalization_metrics.state_dict()["min"].cpu().numpy().tolist()
max_val = self.normalization_metrics.state_dict()["max"].cpu().numpy().tolist()
image_shape = (256, 256) if self.input_size is None else self.input_size
exporter = _AnomalyModelExporter(
image_shape=(self.input_size[0], self.input_size[1]),
image_shape=image_shape,
image_threshold=self.image_threshold.value.cpu().numpy().tolist(),
pixel_threshold=self.pixel_threshold.value.cpu().numpy().tolist(),
task=self.task,
Expand Down
7 changes: 4 additions & 3 deletions src/otx/engine/utils/auto_configurator.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,9 @@
OTXTaskType.INSTANCE_SEGMENTATION: RECIPE_PATH / "instance_segmentation" / "maskrcnn_r50.yaml",
OTXTaskType.ACTION_CLASSIFICATION: RECIPE_PATH / "action" / "action_classification" / "x3d.yaml",
OTXTaskType.ACTION_DETECTION: RECIPE_PATH / "action" / "action_detection" / "x3d_fastrcnn.yaml",
OTXTaskType.ANOMALY_CLASSIFICATION: RECIPE_PATH / "anomaly" / "anomaly_classification" / "padim.yaml",
OTXTaskType.ANOMALY_SEGMENTATION: RECIPE_PATH / "anomaly" / "anomaly_segmentation" / "padim.yaml",
OTXTaskType.ANOMALY_DETECTION: RECIPE_PATH / "anomaly" / "anomaly_detection" / "padim.yaml",
OTXTaskType.ANOMALY_CLASSIFICATION: RECIPE_PATH / "anomaly_classification" / "padim.yaml",
OTXTaskType.ANOMALY_SEGMENTATION: RECIPE_PATH / "anomaly_segmentation" / "padim.yaml",
OTXTaskType.ANOMALY_DETECTION: RECIPE_PATH / "anomaly_detection" / "padim.yaml",
OTXTaskType.VISUAL_PROMPTING: RECIPE_PATH / "visual_prompting" / "sam_tiny_vit.yaml",
OTXTaskType.ZERO_SHOT_VISUAL_PROMPTING: RECIPE_PATH / "zero_shot_visual_prompting" / "sam_tiny_vit.yaml",
}
Expand All @@ -67,6 +67,7 @@
"common_semantic_segmentation_with_subset_dirs": [OTXTaskType.SEMANTIC_SEGMENTATION],
"kinetics": [OTXTaskType.ACTION_CLASSIFICATION],
"ava": [OTXTaskType.ACTION_DETECTION],
"mvtec": [OTXTaskType.ANOMALY_CLASSIFICATION, OTXTaskType.ANOMALY_DETECTION, OTXTaskType.ANOMALY_SEGMENTATION],
}

OVMODEL_PER_TASK = {
Expand Down
10 changes: 6 additions & 4 deletions src/otx/recipe/anomaly_classification/padim.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
model:
class_path: otx.algo.anomaly.padim.Padim
init_args:
input_size:
- 256
- 256
layers: ["layer1", "layer2", "layer3"]
backbone: "resnet18"
pre_trained: True
Expand All @@ -15,10 +12,15 @@ engine:

callback_monitor: step # this has no effect as Padim does not need to be trained

data: ../../_base_/data/torchvision_base.yaml
data: ../_base_/data/torchvision_base.yaml
overrides:
precision: 32
max_epochs: 1
limit_val_batches: 0 # this is set to 0 as the default dataloader does not have validation set. But this also means that the model will not give correct performance numbers
callbacks:
- class_path: otx.algo.callbacks.adaptive_train_scheduling.AdaptiveTrainScheduling
init_args:
max_interval: 1
data:
task: ANOMALY_CLASSIFICATION
config:
Expand Down
8 changes: 4 additions & 4 deletions src/otx/recipe/anomaly_classification/stfpm.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
model:
class_path: otx.algo.anomaly.stfpm.Stfpm
init_args:
input_size:
- 256
- 256
layers: ["layer1", "layer2", "layer3"]
backbone: "resnet18"

Expand All @@ -21,14 +18,17 @@ engine:

callback_monitor: train_loss_epoch # val loss is not available as there is no validation set from default dataloader

data: ../../_base_/data/torchvision_base.yaml
data: ../_base_/data/torchvision_base.yaml
overrides:
max_epochs: 100
limit_val_batches: 0 # this is set to 0 as the default dataloader does not have validation set. But this also means that the model will not give correct performance numbers
callbacks:
- class_path: lightning.pytorch.callbacks.EarlyStopping
init_args:
patience: 5
- class_path: otx.algo.callbacks.adaptive_train_scheduling.AdaptiveTrainScheduling
init_args:
max_interval: 1
data:
task: ANOMALY_CLASSIFICATION
config:
Expand Down
10 changes: 6 additions & 4 deletions src/otx/recipe/anomaly_detection/padim.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
model:
class_path: otx.algo.anomaly.padim.Padim
init_args:
input_size:
- 256
- 256
layers: ["layer1", "layer2", "layer3"]
backbone: "resnet18"
pre_trained: True
Expand All @@ -15,10 +12,15 @@ engine:

callback_monitor: step # this has no effect as Padim does not need to be trained

data: ../../_base_/data/torchvision_base.yaml
data: ../_base_/data/torchvision_base.yaml
overrides:
precision: 32
max_epochs: 1
limit_val_batches: 0 # this is set to 0 as the default dataloader does not have validation set. But this also means that the model will not give correct performance numbers
callbacks:
- class_path: otx.algo.callbacks.adaptive_train_scheduling.AdaptiveTrainScheduling
init_args:
max_interval: 1
data:
task: ANOMALY_DETECTION
config:
Expand Down
8 changes: 4 additions & 4 deletions src/otx/recipe/anomaly_detection/stfpm.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
model:
class_path: otx.algo.anomaly.stfpm.Stfpm
init_args:
input_size:
- 256
- 256
layers: ["layer1", "layer2", "layer3"]
backbone: "resnet18"

Expand All @@ -21,14 +18,17 @@ engine:

callback_monitor: train_loss_epoch # val loss is not available as there is no validation set from default dataloader

data: ../../_base_/data/torchvision_base.yaml
data: ../_base_/data/torchvision_base.yaml
overrides:
max_epochs: 100
limit_val_batches: 0 # this is set to 0 as the default dataloader does not have validation set. But this also means that the model will not give correct performance numbers
callbacks:
- class_path: lightning.pytorch.callbacks.EarlyStopping
init_args:
patience: 5
- class_path: otx.algo.callbacks.adaptive_train_scheduling.AdaptiveTrainScheduling
init_args:
max_interval: 1
data:
task: ANOMALY_DETECTION
config:
Expand Down
10 changes: 6 additions & 4 deletions src/otx/recipe/anomaly_segmentation/padim.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
model:
class_path: otx.algo.anomaly.padim.Padim
init_args:
input_size:
- 256
- 256
layers: ["layer1", "layer2", "layer3"]
backbone: "resnet18"
pre_trained: True
Expand All @@ -15,10 +12,15 @@ engine:

callback_monitor: step # this has no effect as Padim does not need to be trained

data: ../../_base_/data/torchvision_base.yaml
data: ../_base_/data/torchvision_base.yaml
overrides:
precision: 32
max_epochs: 1
limit_val_batches: 0 # this is set to 0 as the default dataloader does not have validation set. But this also means that the model will not give correct performance numbers
callbacks:
- class_path: otx.algo.callbacks.adaptive_train_scheduling.AdaptiveTrainScheduling
init_args:
max_interval: 1
data:
task: ANOMALY_SEGMENTATION
config:
Expand Down
8 changes: 4 additions & 4 deletions src/otx/recipe/anomaly_segmentation/stfpm.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
model:
class_path: otx.algo.anomaly.stfpm.Stfpm
init_args:
input_size:
- 256
- 256
layers: ["layer1", "layer2", "layer3"]
backbone: "resnet18"

Expand All @@ -21,14 +18,17 @@ engine:

callback_monitor: train_loss_epoch # val loss is not available as there is no validation set from default dataloader

data: ../../_base_/data/torchvision_base.yaml
data: ../_base_/data/torchvision_base.yaml
overrides:
max_epochs: 100
limit_val_batches: 0 # this is set to 0 as the default dataloader does not have validation set. But this also means that the model will not give correct performance numbers
callbacks:
- class_path: lightning.pytorch.callbacks.EarlyStopping
init_args:
patience: 5
- class_path: otx.algo.callbacks.adaptive_train_scheduling.AdaptiveTrainScheduling
init_args:
max_interval: 1
data:
task: ANOMALY_SEGMENTATION
config:
Expand Down
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added tests/assets/anomaly_hazelnut/test/colour/00.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added tests/assets/anomaly_hazelnut/test/colour/01.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added tests/assets/anomaly_hazelnut/test/colour/02.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added tests/assets/anomaly_hazelnut/test/colour/03.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added tests/assets/anomaly_hazelnut/test/colour/04.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added tests/assets/anomaly_hazelnut/test/colour/05.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added tests/assets/anomaly_hazelnut/test/colour/06.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added tests/assets/anomaly_hazelnut/test/colour/07.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added tests/assets/anomaly_hazelnut/test/colour/08.jpg
Binary file added tests/assets/anomaly_hazelnut/test/colour/09.jpg
Binary file added tests/assets/anomaly_hazelnut/test/colour/10.jpg
Binary file added tests/assets/anomaly_hazelnut/test/colour/11.jpg
Binary file added tests/assets/anomaly_hazelnut/test/colour/12.jpg
Binary file added tests/assets/anomaly_hazelnut/test/colour/13.jpg
Binary file added tests/assets/anomaly_hazelnut/test/colour/14.jpg
Binary file added tests/assets/anomaly_hazelnut/test/colour/15.jpg
Binary file added tests/assets/anomaly_hazelnut/test/colour/16.jpg
Binary file added tests/assets/anomaly_hazelnut/test/good/04.jpg
Binary file added tests/assets/anomaly_hazelnut/test/good/05.jpg
Binary file added tests/assets/anomaly_hazelnut/test/good/13.jpg
Binary file added tests/assets/anomaly_hazelnut/test/good/23.jpg
Binary file added tests/assets/anomaly_hazelnut/test/good/25.jpg
Binary file added tests/assets/anomaly_hazelnut/test/good/28.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/00.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/01.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/02.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/03.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/06.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/07.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/08.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/09.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/10.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/11.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/12.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/14.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/15.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/16.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/17.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/18.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/19.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/20.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/21.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/22.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/24.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/26.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/27.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/29.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/30.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/31.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/32.jpg
Binary file added tests/assets/anomaly_hazelnut/train/good/33.jpg
2 changes: 2 additions & 0 deletions tests/integration/api/test_auto_configuration.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ def test_auto_configuration(
pytest.skip(
reason="H-labels require num_multiclass_head, num_multilabel_classes, which skip until we have the ability to automate this.",
)
if task.lower().startswith("anomaly"):
pytest.skip(reason="This will be added in a future pipeline behavior.")

tmp_path_train = tmp_path / f"auto_train_{task}"
data_root = fxt_target_dataset_per_task[task.lower()]
Expand Down
5 changes: 3 additions & 2 deletions tests/integration/api/test_engine_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,6 @@ def test_engine_from_config(
pytest.skip(
reason="H-labels require num_multiclass_head, num_multilabel_classes, which skip until we have the ability to automate this.",
)
if "anomaly" in task.lower():
pytest.skip(reason="There's no dataset for anomaly tasks.")

tmp_path_train = tmp_path / task
engine = Engine.from_config(
Expand Down Expand Up @@ -70,6 +68,9 @@ def test_engine_from_config(
OTXTaskType.ACTION_DETECTION,
OTXTaskType.H_LABEL_CLS,
OTXTaskType.ROTATED_DETECTION,
OTXTaskType.ANOMALY_CLASSIFICATION,
OTXTaskType.ANOMALY_DETECTION,
OTXTaskType.ANOMALY_SEGMENTATION,
]:
return

Expand Down
Loading

0 comments on commit a616ce9

Please sign in to comment.