From f0ae65c198a818df52e7abdf9d02fe6011faf55c Mon Sep 17 00:00:00 2001 From: Fanli Lin Date: Wed, 29 Jan 2025 23:05:33 +0800 Subject: [PATCH] [tests] further fix `Tester object has no attribute '_testMethodName'` (#35781) * bug fix * update with more cases * more entries * Fix --------- Co-authored-by: ydshieh --- tests/models/albert/test_modeling_flax_albert.py | 3 +-- tests/models/aria/test_image_processing_aria.py | 3 +-- tests/models/beit/test_modeling_flax_beit.py | 3 +-- tests/models/bert/test_modeling_flax_bert.py | 3 +-- tests/models/big_bird/test_modeling_flax_big_bird.py | 3 +-- tests/models/blip/test_image_processing_blip.py | 3 +-- tests/models/bridgetower/test_image_processing_bridgetower.py | 3 +-- tests/models/chameleon/test_image_processing_chameleon.py | 3 +-- .../models/chinese_clip/test_image_processing_chinese_clip.py | 3 +-- tests/models/convnext/test_image_processing_convnext.py | 3 +-- .../deformable_detr/test_image_processing_deformable_detr.py | 3 +-- tests/models/deit/test_image_processing_deit.py | 3 +-- tests/models/detr/test_image_processing_detr.py | 3 +-- tests/models/distilbert/test_modeling_flax_distilbert.py | 3 +-- tests/models/donut/test_image_processing_donut.py | 3 +-- tests/models/dpt/test_image_processing_dpt.py | 3 +-- .../models/efficientnet/test_image_processing_efficientnet.py | 3 +-- tests/models/electra/test_modeling_flax_electra.py | 3 +-- tests/models/flava/test_image_processing_flava.py | 3 +-- tests/models/glpn/test_image_processing_glpn.py | 3 +-- tests/models/idefics3/test_image_processing_idefics3.py | 3 +-- tests/models/imagegpt/test_image_processing_imagegpt.py | 3 +-- .../test_image_processing_instrictblipvideo.py | 3 +-- tests/models/layoutlmv2/test_image_processing_layoutlmv2.py | 3 +-- tests/models/layoutlmv3/test_image_processing_layoutlmv3.py | 3 +-- tests/models/levit/test_image_processing_levit.py | 3 +-- tests/models/llava/test_image_processing_llava.py | 2 +- tests/models/mbart/test_modeling_flax_mbart.py | 3 +-- tests/models/mllama/test_image_processing_mllama.py | 3 +-- .../models/mobilenet_v1/test_image_processing_mobilenet_v1.py | 3 +-- .../models/mobilenet_v2/test_image_processing_mobilenet_v2.py | 3 +-- tests/models/mobilevit/test_image_processing_mobilevit.py | 3 +-- tests/models/nougat/test_image_processing_nougat.py | 3 +-- tests/models/owlv2/test_image_processing_owlv2.py | 3 +-- tests/models/owlvit/test_image_processing_owlvit.py | 3 +-- tests/models/poolformer/test_image_processing_poolformer.py | 3 +-- tests/models/pvt/test_image_processing_pvt.py | 3 +-- tests/models/regnet/test_modeling_flax_regnet.py | 3 +-- tests/models/resnet/test_modeling_flax_resnet.py | 3 +-- tests/models/roberta/test_modeling_flax_roberta.py | 3 +-- .../test_modeling_flax_roberta_prelayernorm.py | 3 +-- tests/models/roformer/test_modeling_flax_roformer.py | 3 +-- tests/models/rt_detr/test_image_processing_rt_detr.py | 3 +-- tests/models/siglip/test_image_processing_siglip.py | 3 +-- tests/models/superglue/test_image_processing_superglue.py | 2 +- tests/models/swin2sr/test_image_processing_swin2sr.py | 3 +-- tests/models/textnet/test_image_processing_textnet.py | 2 +- tests/models/tvp/test_image_processing_tvp.py | 3 +-- tests/models/video_llava/test_image_processing_video_llava.py | 3 +-- tests/models/videomae/test_image_processing_videomae.py | 3 +-- tests/models/vilt/test_image_processing_vilt.py | 3 +-- tests/models/vit/test_image_processing_vit.py | 3 +-- tests/models/vit/test_modeling_flax_vit.py | 3 +-- tests/models/vitmatte/test_image_processing_vitmatte.py | 3 +-- tests/models/vitpose/test_image_processing_vitpose.py | 2 +- tests/models/vivit/test_image_processing_vivit.py | 3 +-- tests/models/zoedepth/test_image_processing_zoedepth.py | 3 +-- 57 files changed, 57 insertions(+), 110 deletions(-) diff --git a/tests/models/albert/test_modeling_flax_albert.py b/tests/models/albert/test_modeling_flax_albert.py index 90590e737f5f..ca8eeec59131 100644 --- a/tests/models/albert/test_modeling_flax_albert.py +++ b/tests/models/albert/test_modeling_flax_albert.py @@ -36,7 +36,7 @@ ) -class FlaxAlbertModelTester(unittest.TestCase): +class FlaxAlbertModelTester: def __init__( self, parent, @@ -80,7 +80,6 @@ def __init__( self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_choices = num_choices - super().__init__() def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) diff --git a/tests/models/aria/test_image_processing_aria.py b/tests/models/aria/test_image_processing_aria.py index 8a0f84d34eef..255903e4077e 100644 --- a/tests/models/aria/test_image_processing_aria.py +++ b/tests/models/aria/test_image_processing_aria.py @@ -35,7 +35,7 @@ import torch -class AriaImageProcessingTester(unittest.TestCase): +class AriaImageProcessingTester: def __init__( self, parent, @@ -55,7 +55,6 @@ def __init__( do_convert_rgb=True, resample=PILImageResampling.BICUBIC, ): - super().__init__() self.size = size if size is not None else {"longest_edge": max_resolution} self.parent = parent self.batch_size = batch_size diff --git a/tests/models/beit/test_modeling_flax_beit.py b/tests/models/beit/test_modeling_flax_beit.py index 24307532fd77..2ac3668d3b09 100644 --- a/tests/models/beit/test_modeling_flax_beit.py +++ b/tests/models/beit/test_modeling_flax_beit.py @@ -36,7 +36,7 @@ from transformers import BeitImageProcessor -class FlaxBeitModelTester(unittest.TestCase): +class FlaxBeitModelTester: def __init__( self, parent, @@ -79,7 +79,6 @@ def __init__( # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 - super().__init__() def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) diff --git a/tests/models/bert/test_modeling_flax_bert.py b/tests/models/bert/test_modeling_flax_bert.py index 4a9610d723d1..72d5c951e68b 100644 --- a/tests/models/bert/test_modeling_flax_bert.py +++ b/tests/models/bert/test_modeling_flax_bert.py @@ -35,7 +35,7 @@ ) -class FlaxBertModelTester(unittest.TestCase): +class FlaxBertModelTester: def __init__( self, parent, @@ -79,7 +79,6 @@ def __init__( self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_choices = num_choices - super().__init__() def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) diff --git a/tests/models/big_bird/test_modeling_flax_big_bird.py b/tests/models/big_bird/test_modeling_flax_big_bird.py index f889952d2be9..8beb12b8c6c8 100644 --- a/tests/models/big_bird/test_modeling_flax_big_bird.py +++ b/tests/models/big_bird/test_modeling_flax_big_bird.py @@ -35,7 +35,7 @@ ) -class FlaxBigBirdModelTester(unittest.TestCase): +class FlaxBigBirdModelTester: def __init__( self, parent, @@ -90,7 +90,6 @@ def __init__( self.use_bias = use_bias self.block_size = block_size self.num_random_blocks = num_random_blocks - super().__init__() def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) diff --git a/tests/models/blip/test_image_processing_blip.py b/tests/models/blip/test_image_processing_blip.py index d745f3420a61..038a051a557c 100644 --- a/tests/models/blip/test_image_processing_blip.py +++ b/tests/models/blip/test_image_processing_blip.py @@ -26,7 +26,7 @@ from transformers import BlipImageProcessor -class BlipImageProcessingTester(unittest.TestCase): +class BlipImageProcessingTester: def __init__( self, parent, @@ -43,7 +43,6 @@ def __init__( image_std=[0.26862954, 0.26130258, 0.27577711], do_convert_rgb=True, ): - super().__init__() size = size if size is not None else {"height": 20, "width": 20} self.parent = parent self.batch_size = batch_size diff --git a/tests/models/bridgetower/test_image_processing_bridgetower.py b/tests/models/bridgetower/test_image_processing_bridgetower.py index 61d07f10f367..d70715c78a7f 100644 --- a/tests/models/bridgetower/test_image_processing_bridgetower.py +++ b/tests/models/bridgetower/test_image_processing_bridgetower.py @@ -31,7 +31,7 @@ from transformers import BridgeTowerImageProcessor -class BridgeTowerImageProcessingTester(unittest.TestCase): +class BridgeTowerImageProcessingTester: def __init__( self, parent, @@ -50,7 +50,6 @@ def __init__( max_resolution=400, num_channels=3, ): - super().__init__() self.parent = parent self.do_resize = do_resize self.size = size if size is not None else {"shortest_edge": 288} diff --git a/tests/models/chameleon/test_image_processing_chameleon.py b/tests/models/chameleon/test_image_processing_chameleon.py index 4a5c8c546790..1948e58f9ff1 100644 --- a/tests/models/chameleon/test_image_processing_chameleon.py +++ b/tests/models/chameleon/test_image_processing_chameleon.py @@ -32,7 +32,7 @@ from transformers import ChameleonImageProcessor -class ChameleonImageProcessingTester(unittest.TestCase): +class ChameleonImageProcessingTester: def __init__( self, parent, @@ -50,7 +50,6 @@ def __init__( image_std=[1.0, 1.0, 1.0], do_convert_rgb=True, ): - super().__init__() size = size if size is not None else {"shortest_edge": 18} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent diff --git a/tests/models/chinese_clip/test_image_processing_chinese_clip.py b/tests/models/chinese_clip/test_image_processing_chinese_clip.py index d75176895617..01bea098ebd5 100644 --- a/tests/models/chinese_clip/test_image_processing_chinese_clip.py +++ b/tests/models/chinese_clip/test_image_processing_chinese_clip.py @@ -26,7 +26,7 @@ from transformers import ChineseCLIPImageProcessor -class ChineseCLIPImageProcessingTester(unittest.TestCase): +class ChineseCLIPImageProcessingTester: def __init__( self, parent, @@ -44,7 +44,6 @@ def __init__( image_std=[0.26862954, 0.26130258, 0.27577711], do_convert_rgb=True, ): - super().__init__() size = size if size is not None else {"height": 224, "width": 224} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent diff --git a/tests/models/convnext/test_image_processing_convnext.py b/tests/models/convnext/test_image_processing_convnext.py index 14a6b3e8e1aa..9ddfc0d41466 100644 --- a/tests/models/convnext/test_image_processing_convnext.py +++ b/tests/models/convnext/test_image_processing_convnext.py @@ -26,7 +26,7 @@ from transformers import ConvNextImageProcessor -class ConvNextImageProcessingTester(unittest.TestCase): +class ConvNextImageProcessingTester: def __init__( self, parent, @@ -42,7 +42,6 @@ def __init__( image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): - super().__init__() size = size if size is not None else {"shortest_edge": 20} self.parent = parent self.batch_size = batch_size diff --git a/tests/models/deformable_detr/test_image_processing_deformable_detr.py b/tests/models/deformable_detr/test_image_processing_deformable_detr.py index 5a8825cc6c15..25aa486b5425 100644 --- a/tests/models/deformable_detr/test_image_processing_deformable_detr.py +++ b/tests/models/deformable_detr/test_image_processing_deformable_detr.py @@ -35,7 +35,7 @@ from transformers import DeformableDetrImageProcessor, DeformableDetrImageProcessorFast -class DeformableDetrImageProcessingTester(unittest.TestCase): +class DeformableDetrImageProcessingTester: def __init__( self, parent, @@ -52,7 +52,6 @@ def __init__( rescale_factor=1 / 255, do_pad=True, ): - super().__init__() # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p size = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333} self.parent = parent diff --git a/tests/models/deit/test_image_processing_deit.py b/tests/models/deit/test_image_processing_deit.py index 7792ac10e057..184875947712 100644 --- a/tests/models/deit/test_image_processing_deit.py +++ b/tests/models/deit/test_image_processing_deit.py @@ -26,7 +26,7 @@ from transformers import DeiTImageProcessor -class DeiTImageProcessingTester(unittest.TestCase): +class DeiTImageProcessingTester: def __init__( self, parent, @@ -43,7 +43,6 @@ def __init__( image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): - super().__init__() size = size if size is not None else {"height": 20, "width": 20} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} diff --git a/tests/models/detr/test_image_processing_detr.py b/tests/models/detr/test_image_processing_detr.py index 2dc84fe5e019..6f19de09dc47 100644 --- a/tests/models/detr/test_image_processing_detr.py +++ b/tests/models/detr/test_image_processing_detr.py @@ -37,7 +37,7 @@ from transformers import DetrImageProcessorFast -class DetrImageProcessingTester(unittest.TestCase): +class DetrImageProcessingTester: def __init__( self, parent, @@ -54,7 +54,6 @@ def __init__( image_std=[0.5, 0.5, 0.5], do_pad=True, ): - super().__init__() # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p size = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333} self.parent = parent diff --git a/tests/models/distilbert/test_modeling_flax_distilbert.py b/tests/models/distilbert/test_modeling_flax_distilbert.py index 39a25a42fe8a..50655771ed14 100644 --- a/tests/models/distilbert/test_modeling_flax_distilbert.py +++ b/tests/models/distilbert/test_modeling_flax_distilbert.py @@ -35,7 +35,7 @@ ) -class FlaxDistilBertModelTester(unittest.TestCase): +class FlaxDistilBertModelTester: def __init__( self, parent, @@ -79,7 +79,6 @@ def __init__( self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_choices = num_choices - super().__init__() def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) diff --git a/tests/models/donut/test_image_processing_donut.py b/tests/models/donut/test_image_processing_donut.py index 468108d593f2..052894c0cbd8 100644 --- a/tests/models/donut/test_image_processing_donut.py +++ b/tests/models/donut/test_image_processing_donut.py @@ -33,7 +33,7 @@ from transformers import DonutImageProcessor -class DonutImageProcessingTester(unittest.TestCase): +class DonutImageProcessingTester: def __init__( self, parent, @@ -51,7 +51,6 @@ def __init__( image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): - super().__init__() self.parent = parent self.batch_size = batch_size self.num_channels = num_channels diff --git a/tests/models/dpt/test_image_processing_dpt.py b/tests/models/dpt/test_image_processing_dpt.py index 713c722a4c2b..feb551d3bec8 100644 --- a/tests/models/dpt/test_image_processing_dpt.py +++ b/tests/models/dpt/test_image_processing_dpt.py @@ -34,7 +34,7 @@ from transformers import DPTImageProcessor -class DPTImageProcessingTester(unittest.TestCase): +class DPTImageProcessingTester: def __init__( self, parent, @@ -50,7 +50,6 @@ def __init__( image_std=[0.5, 0.5, 0.5], do_reduce_labels=False, ): - super().__init__() size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size diff --git a/tests/models/efficientnet/test_image_processing_efficientnet.py b/tests/models/efficientnet/test_image_processing_efficientnet.py index 22e80c7312ef..618885dc97be 100644 --- a/tests/models/efficientnet/test_image_processing_efficientnet.py +++ b/tests/models/efficientnet/test_image_processing_efficientnet.py @@ -28,7 +28,7 @@ from transformers import EfficientNetImageProcessor -class EfficientNetImageProcessorTester(unittest.TestCase): +class EfficientNetImageProcessorTester: def __init__( self, parent, @@ -43,7 +43,6 @@ def __init__( image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): - super().__init__() size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size diff --git a/tests/models/electra/test_modeling_flax_electra.py b/tests/models/electra/test_modeling_flax_electra.py index f531c7f8d073..698a492fc3c7 100644 --- a/tests/models/electra/test_modeling_flax_electra.py +++ b/tests/models/electra/test_modeling_flax_electra.py @@ -21,7 +21,7 @@ ) -class FlaxElectraModelTester(unittest.TestCase): +class FlaxElectraModelTester: def __init__( self, parent, @@ -67,7 +67,6 @@ def __init__( self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_choices = num_choices - super().__init__() def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) diff --git a/tests/models/flava/test_image_processing_flava.py b/tests/models/flava/test_image_processing_flava.py index 657a63bd5375..e3c1f5b5b536 100644 --- a/tests/models/flava/test_image_processing_flava.py +++ b/tests/models/flava/test_image_processing_flava.py @@ -42,7 +42,7 @@ FLAVA_IMAGE_MEAN = FLAVA_IMAGE_STD = FLAVA_CODEBOOK_MEAN = FLAVA_CODEBOOK_STD = None -class FlavaImageProcessingTester(unittest.TestCase): +class FlavaImageProcessingTester: def __init__( self, parent, @@ -76,7 +76,6 @@ def __init__( codebook_image_mean=FLAVA_CODEBOOK_MEAN, codebook_image_std=FLAVA_CODEBOOK_STD, ): - super().__init__() size = size if size is not None else {"height": 224, "width": 224} crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} codebook_size = codebook_size if codebook_size is not None else {"height": 112, "width": 112} diff --git a/tests/models/glpn/test_image_processing_glpn.py b/tests/models/glpn/test_image_processing_glpn.py index ba387943d748..43d5da070720 100644 --- a/tests/models/glpn/test_image_processing_glpn.py +++ b/tests/models/glpn/test_image_processing_glpn.py @@ -33,7 +33,7 @@ from transformers import GLPNImageProcessor -class GLPNImageProcessingTester(unittest.TestCase): +class GLPNImageProcessingTester: def __init__( self, parent, @@ -46,7 +46,6 @@ def __init__( size_divisor=32, do_rescale=True, ): - super().__init__() self.parent = parent self.batch_size = batch_size self.num_channels = num_channels diff --git a/tests/models/idefics3/test_image_processing_idefics3.py b/tests/models/idefics3/test_image_processing_idefics3.py index 102356dc1dd9..d5e3870006ab 100644 --- a/tests/models/idefics3/test_image_processing_idefics3.py +++ b/tests/models/idefics3/test_image_processing_idefics3.py @@ -35,7 +35,7 @@ import torch -class Idefics3ImageProcessingTester(unittest.TestCase): +class Idefics3ImageProcessingTester: def __init__( self, parent, @@ -58,7 +58,6 @@ def __init__( do_image_splitting=True, resample=PILImageResampling.LANCZOS, ): - super().__init__() self.size = size if size is not None else {"longest_edge": max_resolution} self.parent = parent self.batch_size = batch_size diff --git a/tests/models/imagegpt/test_image_processing_imagegpt.py b/tests/models/imagegpt/test_image_processing_imagegpt.py index 25e5d6b8d92e..d32e954cca25 100644 --- a/tests/models/imagegpt/test_image_processing_imagegpt.py +++ b/tests/models/imagegpt/test_image_processing_imagegpt.py @@ -38,7 +38,7 @@ from transformers import ImageGPTImageProcessor -class ImageGPTImageProcessingTester(unittest.TestCase): +class ImageGPTImageProcessingTester: def __init__( self, parent, @@ -51,7 +51,6 @@ def __init__( size=None, do_normalize=True, ): - super().__init__() size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size diff --git a/tests/models/instructblipvideo/test_image_processing_instrictblipvideo.py b/tests/models/instructblipvideo/test_image_processing_instrictblipvideo.py index 536b20554fd1..dacb922e7216 100644 --- a/tests/models/instructblipvideo/test_image_processing_instrictblipvideo.py +++ b/tests/models/instructblipvideo/test_image_processing_instrictblipvideo.py @@ -33,7 +33,7 @@ from transformers import InstructBlipVideoImageProcessor -class InstructBlipVideoProcessingTester(unittest.TestCase): +class InstructBlipVideoProcessingTester: def __init__( self, parent, @@ -50,7 +50,6 @@ def __init__( do_convert_rgb=True, frames=4, ): - super().__init__() size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size diff --git a/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py b/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py index f4a5b90d4ba3..41cc9943cf27 100644 --- a/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py +++ b/tests/models/layoutlmv2/test_image_processing_layoutlmv2.py @@ -28,7 +28,7 @@ from transformers import LayoutLMv2ImageProcessor -class LayoutLMv2ImageProcessingTester(unittest.TestCase): +class LayoutLMv2ImageProcessingTester: def __init__( self, parent, @@ -41,7 +41,6 @@ def __init__( size=None, apply_ocr=True, ): - super().__init__() size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size diff --git a/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py b/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py index 943b5bf4f0c6..a6aebc88b27f 100644 --- a/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py +++ b/tests/models/layoutlmv3/test_image_processing_layoutlmv3.py @@ -28,7 +28,7 @@ from transformers import LayoutLMv3ImageProcessor -class LayoutLMv3ImageProcessingTester(unittest.TestCase): +class LayoutLMv3ImageProcessingTester: def __init__( self, parent, @@ -41,7 +41,6 @@ def __init__( size=None, apply_ocr=True, ): - super().__init__() size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size diff --git a/tests/models/levit/test_image_processing_levit.py b/tests/models/levit/test_image_processing_levit.py index 6bd1b4ca9bcb..c04656ea8bda 100644 --- a/tests/models/levit/test_image_processing_levit.py +++ b/tests/models/levit/test_image_processing_levit.py @@ -26,7 +26,7 @@ from transformers import LevitImageProcessor -class LevitImageProcessingTester(unittest.TestCase): +class LevitImageProcessingTester: def __init__( self, parent, @@ -43,7 +43,6 @@ def __init__( image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): - super().__init__() size = size if size is not None else {"shortest_edge": 18} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent diff --git a/tests/models/llava/test_image_processing_llava.py b/tests/models/llava/test_image_processing_llava.py index 75570c50d90a..99124f66b4a4 100644 --- a/tests/models/llava/test_image_processing_llava.py +++ b/tests/models/llava/test_image_processing_llava.py @@ -31,7 +31,7 @@ from transformers import LlavaImageProcessor -class LlavaImageProcessingTester(unittest.TestCase): +class LlavaImageProcessingTester: def __init__( self, parent, diff --git a/tests/models/mbart/test_modeling_flax_mbart.py b/tests/models/mbart/test_modeling_flax_mbart.py index ef48e7c4f3e9..737728587352 100644 --- a/tests/models/mbart/test_modeling_flax_mbart.py +++ b/tests/models/mbart/test_modeling_flax_mbart.py @@ -74,7 +74,7 @@ def prepare_mbart_inputs_dict( } -class FlaxMBartModelTester(unittest.TestCase): +class FlaxMBartModelTester: def __init__( self, parent, @@ -116,7 +116,6 @@ def __init__( self.bos_token_id = bos_token_id self.decoder_start_token_id = decoder_start_token_id self.initializer_range = initializer_range - super().__init__() def prepare_config_and_inputs(self): input_ids = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size), 3, self.vocab_size) diff --git a/tests/models/mllama/test_image_processing_mllama.py b/tests/models/mllama/test_image_processing_mllama.py index b79d2f802459..351f1f16f299 100644 --- a/tests/models/mllama/test_image_processing_mllama.py +++ b/tests/models/mllama/test_image_processing_mllama.py @@ -34,7 +34,7 @@ import torch -class MllamaImageProcessingTester(unittest.TestCase): +class MllamaImageProcessingTester: def __init__( self, parent, @@ -55,7 +55,6 @@ def __init__( do_pad=True, max_image_tiles=4, ): - super().__init__() size = size if size is not None else {"height": 224, "width": 224} self.parent = parent self.batch_size = batch_size diff --git a/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py b/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py index 0d5f2eb8d001..c6553d445b15 100644 --- a/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py +++ b/tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py @@ -26,7 +26,7 @@ from transformers import MobileNetV1ImageProcessor -class MobileNetV1ImageProcessingTester(unittest.TestCase): +class MobileNetV1ImageProcessingTester: def __init__( self, parent, @@ -40,7 +40,6 @@ def __init__( do_center_crop=True, crop_size=None, ): - super().__init__() size = size if size is not None else {"shortest_edge": 20} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent diff --git a/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py b/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py index f4fd2b401681..cf583e017572 100644 --- a/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py +++ b/tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py @@ -26,7 +26,7 @@ from transformers import MobileNetV2ImageProcessor -class MobileNetV2ImageProcessingTester(unittest.TestCase): +class MobileNetV2ImageProcessingTester: def __init__( self, parent, @@ -40,7 +40,6 @@ def __init__( do_center_crop=True, crop_size=None, ): - super().__init__() size = size if size is not None else {"shortest_edge": 20} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent diff --git a/tests/models/mobilevit/test_image_processing_mobilevit.py b/tests/models/mobilevit/test_image_processing_mobilevit.py index f1bbeac8af34..976c8c68e9b2 100644 --- a/tests/models/mobilevit/test_image_processing_mobilevit.py +++ b/tests/models/mobilevit/test_image_processing_mobilevit.py @@ -33,7 +33,7 @@ from transformers import MobileViTImageProcessor -class MobileViTImageProcessingTester(unittest.TestCase): +class MobileViTImageProcessingTester: def __init__( self, parent, @@ -48,7 +48,6 @@ def __init__( crop_size=None, do_flip_channel_order=True, ): - super().__init__() size = size if size is not None else {"shortest_edge": 20} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent diff --git a/tests/models/nougat/test_image_processing_nougat.py b/tests/models/nougat/test_image_processing_nougat.py index f923a2f159e0..69382a1c3fad 100644 --- a/tests/models/nougat/test_image_processing_nougat.py +++ b/tests/models/nougat/test_image_processing_nougat.py @@ -34,7 +34,7 @@ from transformers import NougatImageProcessor -class NougatImageProcessingTester(unittest.TestCase): +class NougatImageProcessingTester: def __init__( self, parent, @@ -53,7 +53,6 @@ def __init__( image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): - super().__init__() size = size if size is not None else {"height": 20, "width": 20} self.parent = parent self.batch_size = batch_size diff --git a/tests/models/owlv2/test_image_processing_owlv2.py b/tests/models/owlv2/test_image_processing_owlv2.py index decf903a1470..cc6cf4b8330a 100644 --- a/tests/models/owlv2/test_image_processing_owlv2.py +++ b/tests/models/owlv2/test_image_processing_owlv2.py @@ -31,7 +31,7 @@ import torch -class Owlv2ImageProcessingTester(unittest.TestCase): +class Owlv2ImageProcessingTester: def __init__( self, parent, @@ -47,7 +47,6 @@ def __init__( image_std=[0.26862954, 0.26130258, 0.27577711], do_convert_rgb=True, ): - super().__init__() self.parent = parent self.batch_size = batch_size self.num_channels = num_channels diff --git a/tests/models/owlvit/test_image_processing_owlvit.py b/tests/models/owlvit/test_image_processing_owlvit.py index b95e61346205..d6297de2290c 100644 --- a/tests/models/owlvit/test_image_processing_owlvit.py +++ b/tests/models/owlvit/test_image_processing_owlvit.py @@ -26,7 +26,7 @@ from transformers import OwlViTImageProcessor -class OwlViTImageProcessingTester(unittest.TestCase): +class OwlViTImageProcessingTester: def __init__( self, parent, @@ -44,7 +44,6 @@ def __init__( image_std=[0.26862954, 0.26130258, 0.27577711], do_convert_rgb=True, ): - super().__init__() self.parent = parent self.batch_size = batch_size self.num_channels = num_channels diff --git a/tests/models/poolformer/test_image_processing_poolformer.py b/tests/models/poolformer/test_image_processing_poolformer.py index 21975371f9fe..047cd23d36aa 100644 --- a/tests/models/poolformer/test_image_processing_poolformer.py +++ b/tests/models/poolformer/test_image_processing_poolformer.py @@ -25,7 +25,7 @@ from transformers import PoolFormerImageProcessor -class PoolFormerImageProcessingTester(unittest.TestCase): +class PoolFormerImageProcessingTester: def __init__( self, parent, @@ -41,7 +41,6 @@ def __init__( image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): - super().__init__() size = size if size is not None else {"shortest_edge": 30} crop_size = crop_size if crop_size is not None else {"height": 30, "width": 30} self.parent = parent diff --git a/tests/models/pvt/test_image_processing_pvt.py b/tests/models/pvt/test_image_processing_pvt.py index c32169d03ae5..de093307abeb 100644 --- a/tests/models/pvt/test_image_processing_pvt.py +++ b/tests/models/pvt/test_image_processing_pvt.py @@ -26,7 +26,7 @@ from transformers import PvtImageProcessor -class PvtImageProcessingTester(unittest.TestCase): +class PvtImageProcessingTester: def __init__( self, parent, @@ -41,7 +41,6 @@ def __init__( image_mean=[0.485, 0.456, 0.406], image_std=[0.229, 0.224, 0.225], ): - super().__init__() size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size diff --git a/tests/models/regnet/test_modeling_flax_regnet.py b/tests/models/regnet/test_modeling_flax_regnet.py index 314f0b367bb9..30511c84c794 100644 --- a/tests/models/regnet/test_modeling_flax_regnet.py +++ b/tests/models/regnet/test_modeling_flax_regnet.py @@ -36,7 +36,7 @@ from transformers import AutoImageProcessor -class FlaxRegNetModelTester(unittest.TestCase): +class FlaxRegNetModelTester: def __init__( self, parent, @@ -65,7 +65,6 @@ def __init__( self.num_labels = num_labels self.scope = scope self.num_stages = len(hidden_sizes) - super().__init__() def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) diff --git a/tests/models/resnet/test_modeling_flax_resnet.py b/tests/models/resnet/test_modeling_flax_resnet.py index ce83d415dc0f..7399405f00c1 100644 --- a/tests/models/resnet/test_modeling_flax_resnet.py +++ b/tests/models/resnet/test_modeling_flax_resnet.py @@ -35,7 +35,7 @@ from transformers import AutoImageProcessor -class FlaxResNetModelTester(unittest.TestCase): +class FlaxResNetModelTester: def __init__( self, parent, @@ -64,7 +64,6 @@ def __init__( self.num_labels = num_labels self.scope = scope self.num_stages = len(hidden_sizes) - super().__init__() def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) diff --git a/tests/models/roberta/test_modeling_flax_roberta.py b/tests/models/roberta/test_modeling_flax_roberta.py index f2f7296df655..b9a877d2bddd 100644 --- a/tests/models/roberta/test_modeling_flax_roberta.py +++ b/tests/models/roberta/test_modeling_flax_roberta.py @@ -34,7 +34,7 @@ ) -class FlaxRobertaModelTester(unittest.TestCase): +class FlaxRobertaModelTester: def __init__( self, parent, @@ -78,7 +78,6 @@ def __init__( self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_choices = num_choices - super().__init__() def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) diff --git a/tests/models/roberta_prelayernorm/test_modeling_flax_roberta_prelayernorm.py b/tests/models/roberta_prelayernorm/test_modeling_flax_roberta_prelayernorm.py index 409752e162f4..d464e28640ae 100644 --- a/tests/models/roberta_prelayernorm/test_modeling_flax_roberta_prelayernorm.py +++ b/tests/models/roberta_prelayernorm/test_modeling_flax_roberta_prelayernorm.py @@ -37,7 +37,7 @@ # Copied from tests.models.roberta.test_modeling_flax_roberta.FlaxRobertaModelTester with Roberta->RobertaPreLayerNorm -class FlaxRobertaPreLayerNormModelTester(unittest.TestCase): +class FlaxRobertaPreLayerNormModelTester: def __init__( self, parent, @@ -81,7 +81,6 @@ def __init__( self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_choices = num_choices - super().__init__() def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) diff --git a/tests/models/roformer/test_modeling_flax_roformer.py b/tests/models/roformer/test_modeling_flax_roformer.py index 971c1a18cde5..856ed906060b 100644 --- a/tests/models/roformer/test_modeling_flax_roformer.py +++ b/tests/models/roformer/test_modeling_flax_roformer.py @@ -35,7 +35,7 @@ ) -class FlaxRoFormerModelTester(unittest.TestCase): +class FlaxRoFormerModelTester: def __init__( self, parent, @@ -79,7 +79,6 @@ def __init__( self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_choices = num_choices - super().__init__() def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) diff --git a/tests/models/rt_detr/test_image_processing_rt_detr.py b/tests/models/rt_detr/test_image_processing_rt_detr.py index 97718d97406f..e27c1838f940 100644 --- a/tests/models/rt_detr/test_image_processing_rt_detr.py +++ b/tests/models/rt_detr/test_image_processing_rt_detr.py @@ -31,7 +31,7 @@ import torch -class RTDetrImageProcessingTester(unittest.TestCase): +class RTDetrImageProcessingTester: def __init__( self, parent, @@ -45,7 +45,6 @@ def __init__( do_pad=False, return_tensors="pt", ): - super().__init__() self.parent = parent self.batch_size = batch_size self.num_channels = num_channels diff --git a/tests/models/siglip/test_image_processing_siglip.py b/tests/models/siglip/test_image_processing_siglip.py index 02bf6d78c8d4..56653ae7aa58 100644 --- a/tests/models/siglip/test_image_processing_siglip.py +++ b/tests/models/siglip/test_image_processing_siglip.py @@ -26,7 +26,7 @@ from transformers import SiglipImageProcessor -class SiglipImageProcessingTester(unittest.TestCase): +class SiglipImageProcessingTester: def __init__( self, parent, @@ -43,7 +43,6 @@ def __init__( image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): - super().__init__() size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size diff --git a/tests/models/superglue/test_image_processing_superglue.py b/tests/models/superglue/test_image_processing_superglue.py index b98d34888cfc..1068a9a92bc0 100644 --- a/tests/models/superglue/test_image_processing_superglue.py +++ b/tests/models/superglue/test_image_processing_superglue.py @@ -42,7 +42,7 @@ def random_tensor(size): return torch.rand(size) -class SuperGlueImageProcessingTester(unittest.TestCase): +class SuperGlueImageProcessingTester: def __init__( self, parent, diff --git a/tests/models/swin2sr/test_image_processing_swin2sr.py b/tests/models/swin2sr/test_image_processing_swin2sr.py index fa1e25db7134..4467a8344433 100644 --- a/tests/models/swin2sr/test_image_processing_swin2sr.py +++ b/tests/models/swin2sr/test_image_processing_swin2sr.py @@ -34,7 +34,7 @@ from transformers.image_transforms import get_image_size -class Swin2SRImageProcessingTester(unittest.TestCase): +class Swin2SRImageProcessingTester: def __init__( self, parent, @@ -48,7 +48,6 @@ def __init__( do_pad=True, pad_size=8, ): - super().__init__() self.parent = parent self.batch_size = batch_size self.num_channels = num_channels diff --git a/tests/models/textnet/test_image_processing_textnet.py b/tests/models/textnet/test_image_processing_textnet.py index 4fcd93e872fc..7765397802db 100644 --- a/tests/models/textnet/test_image_processing_textnet.py +++ b/tests/models/textnet/test_image_processing_textnet.py @@ -26,7 +26,7 @@ from transformers import TextNetImageProcessor -class TextNetImageProcessingTester(unittest.TestCase): +class TextNetImageProcessingTester: def __init__( self, parent, diff --git a/tests/models/tvp/test_image_processing_tvp.py b/tests/models/tvp/test_image_processing_tvp.py index 023cf4f9da9a..99ddcc51b457 100644 --- a/tests/models/tvp/test_image_processing_tvp.py +++ b/tests/models/tvp/test_image_processing_tvp.py @@ -35,7 +35,7 @@ from transformers import TvpImageProcessor -class TvpImageProcessingTester(unittest.TestCase): +class TvpImageProcessingTester: def __init__( self, parent, @@ -58,7 +58,6 @@ def __init__( num_channels=3, num_frames=2, ): - super().__init__() self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop diff --git a/tests/models/video_llava/test_image_processing_video_llava.py b/tests/models/video_llava/test_image_processing_video_llava.py index b666c20ab848..4b877ab325cd 100644 --- a/tests/models/video_llava/test_image_processing_video_llava.py +++ b/tests/models/video_llava/test_image_processing_video_llava.py @@ -34,7 +34,7 @@ from transformers import VideoLlavaImageProcessor -class VideoLlavaImageProcessingTester(unittest.TestCase): +class VideoLlavaImageProcessingTester: def __init__( self, parent, @@ -52,7 +52,6 @@ def __init__( image_std=OPENAI_CLIP_STD, do_convert_rgb=True, ): - super().__init__() size = size if size is not None else {"shortest_edge": 20} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent diff --git a/tests/models/videomae/test_image_processing_videomae.py b/tests/models/videomae/test_image_processing_videomae.py index 386b1f968b9c..f4e2b26fb809 100644 --- a/tests/models/videomae/test_image_processing_videomae.py +++ b/tests/models/videomae/test_image_processing_videomae.py @@ -33,7 +33,7 @@ from transformers import VideoMAEImageProcessor -class VideoMAEImageProcessingTester(unittest.TestCase): +class VideoMAEImageProcessingTester: def __init__( self, parent, @@ -50,7 +50,6 @@ def __init__( image_std=[0.5, 0.5, 0.5], crop_size=None, ): - super().__init__() size = size if size is not None else {"shortest_edge": 18} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} diff --git a/tests/models/vilt/test_image_processing_vilt.py b/tests/models/vilt/test_image_processing_vilt.py index 3e38b88a3656..bfd70694aa31 100644 --- a/tests/models/vilt/test_image_processing_vilt.py +++ b/tests/models/vilt/test_image_processing_vilt.py @@ -30,7 +30,7 @@ from transformers import ViltImageProcessor -class ViltImageProcessingTester(unittest.TestCase): +class ViltImageProcessingTester: def __init__( self, parent, @@ -46,7 +46,6 @@ def __init__( image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): - super().__init__() size = size if size is not None else {"shortest_edge": 30} self.parent = parent self.batch_size = batch_size diff --git a/tests/models/vit/test_image_processing_vit.py b/tests/models/vit/test_image_processing_vit.py index 5a94b4bb6e12..e628f5d9df36 100644 --- a/tests/models/vit/test_image_processing_vit.py +++ b/tests/models/vit/test_image_processing_vit.py @@ -29,7 +29,7 @@ from transformers import ViTImageProcessorFast -class ViTImageProcessingTester(unittest.TestCase): +class ViTImageProcessingTester: def __init__( self, parent, @@ -44,7 +44,6 @@ def __init__( image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): - super().__init__() size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size diff --git a/tests/models/vit/test_modeling_flax_vit.py b/tests/models/vit/test_modeling_flax_vit.py index 97fc3082a98d..2e352056635d 100644 --- a/tests/models/vit/test_modeling_flax_vit.py +++ b/tests/models/vit/test_modeling_flax_vit.py @@ -30,7 +30,7 @@ from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel -class FlaxViTModelTester(unittest.TestCase): +class FlaxViTModelTester: def __init__( self, parent, @@ -72,7 +72,6 @@ def __init__( # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 - super().__init__() def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) diff --git a/tests/models/vitmatte/test_image_processing_vitmatte.py b/tests/models/vitmatte/test_image_processing_vitmatte.py index 288ed53d190d..340957fd3e55 100644 --- a/tests/models/vitmatte/test_image_processing_vitmatte.py +++ b/tests/models/vitmatte/test_image_processing_vitmatte.py @@ -35,7 +35,7 @@ from transformers import VitMatteImageProcessor -class VitMatteImageProcessingTester(unittest.TestCase): +class VitMatteImageProcessingTester: def __init__( self, parent, @@ -52,7 +52,6 @@ def __init__( image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): - super().__init__() self.parent = parent self.batch_size = batch_size self.num_channels = num_channels diff --git a/tests/models/vitpose/test_image_processing_vitpose.py b/tests/models/vitpose/test_image_processing_vitpose.py index 5edf27e6a69a..238fe20aa616 100644 --- a/tests/models/vitpose/test_image_processing_vitpose.py +++ b/tests/models/vitpose/test_image_processing_vitpose.py @@ -34,7 +34,7 @@ from transformers import VitPoseImageProcessor -class VitPoseImageProcessingTester(unittest.TestCase): +class VitPoseImageProcessingTester: def __init__( self, parent, diff --git a/tests/models/vivit/test_image_processing_vivit.py b/tests/models/vivit/test_image_processing_vivit.py index 4d3fee544c27..3d05a3668985 100644 --- a/tests/models/vivit/test_image_processing_vivit.py +++ b/tests/models/vivit/test_image_processing_vivit.py @@ -33,7 +33,7 @@ from transformers import VivitImageProcessor -class VivitImageProcessingTester(unittest.TestCase): +class VivitImageProcessingTester: def __init__( self, parent, @@ -50,7 +50,6 @@ def __init__( image_std=[0.5, 0.5, 0.5], crop_size=None, ): - super().__init__() size = size if size is not None else {"shortest_edge": 18} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} diff --git a/tests/models/zoedepth/test_image_processing_zoedepth.py b/tests/models/zoedepth/test_image_processing_zoedepth.py index 56c181c97d99..ba4986c46fd4 100644 --- a/tests/models/zoedepth/test_image_processing_zoedepth.py +++ b/tests/models/zoedepth/test_image_processing_zoedepth.py @@ -28,7 +28,7 @@ from transformers import ZoeDepthImageProcessor -class ZoeDepthImageProcessingTester(unittest.TestCase): +class ZoeDepthImageProcessingTester: def __init__( self, parent, @@ -46,7 +46,6 @@ def __init__( image_std=[0.5, 0.5, 0.5], do_pad=False, ): - super().__init__() size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size