Skip to content

Commit

Permalink
Adding models from timm lib (#1696)
Browse files Browse the repository at this point in the history
* Adding models from timm lib

* Remove an unnecessary lib
  • Loading branch information
akgokce authored Jan 14, 2025
1 parent f8addd7 commit 918ebc1
Show file tree
Hide file tree
Showing 5 changed files with 750 additions and 0 deletions.
193 changes: 193 additions & 0 deletions brainscore_vision/models/timm_models/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,193 @@
from brainscore_vision import model_registry
from brainscore_vision.model_helpers.brain_transformation import ModelCommitment
from .model import get_model, MODEL_CONFIGS

model_registry["convnext_base:clip_laiona_augreg_ft_in1k_384"] = lambda: ModelCommitment(
identifier="convnext_base:clip_laiona_augreg_ft_in1k_384",
activations_model=get_model("convnext_base:clip_laiona_augreg_ft_in1k_384"),
layers=MODEL_CONFIGS["convnext_base:clip_laiona_augreg_ft_in1k_384"]["model_commitment"]["layers"],
behavioral_readout_layer=MODEL_CONFIGS["convnext_base:clip_laiona_augreg_ft_in1k_384"]["model_commitment"]["behavioral_readout_layer"],
region_layer_map=MODEL_CONFIGS["convnext_base:clip_laiona_augreg_ft_in1k_384"]["model_commitment"]["region_layer_map"]
)


model_registry["convnext_femto_ols:d1_in1k"] = lambda: ModelCommitment(
identifier="convnext_femto_ols:d1_in1k",
activations_model=get_model("convnext_femto_ols:d1_in1k"),
layers=MODEL_CONFIGS["convnext_femto_ols:d1_in1k"]["model_commitment"]["layers"],
behavioral_readout_layer=MODEL_CONFIGS["convnext_femto_ols:d1_in1k"]["model_commitment"]["behavioral_readout_layer"],
region_layer_map=MODEL_CONFIGS["convnext_femto_ols:d1_in1k"]["model_commitment"]["region_layer_map"]
)


model_registry["convnext_large:fb_in22k_ft_in1k"] = lambda: ModelCommitment(
identifier="convnext_large:fb_in22k_ft_in1k",
activations_model=get_model("convnext_large:fb_in22k_ft_in1k"),
layers=MODEL_CONFIGS["convnext_large:fb_in22k_ft_in1k"]["model_commitment"]["layers"],
behavioral_readout_layer=MODEL_CONFIGS["convnext_large:fb_in22k_ft_in1k"]["model_commitment"]["behavioral_readout_layer"],
region_layer_map=MODEL_CONFIGS["convnext_large:fb_in22k_ft_in1k"]["model_commitment"]["region_layer_map"]
)


model_registry["convnext_large_mlp:clip_laion2b_augreg_ft_in1k_384"] = lambda: ModelCommitment(
identifier="convnext_large_mlp:clip_laion2b_augreg_ft_in1k_384",
activations_model=get_model("convnext_large_mlp:clip_laion2b_augreg_ft_in1k_384"),
layers=MODEL_CONFIGS["convnext_large_mlp:clip_laion2b_augreg_ft_in1k_384"]["model_commitment"]["layers"],
behavioral_readout_layer=MODEL_CONFIGS["convnext_large_mlp:clip_laion2b_augreg_ft_in1k_384"]["model_commitment"]["behavioral_readout_layer"],
region_layer_map=MODEL_CONFIGS["convnext_large_mlp:clip_laion2b_augreg_ft_in1k_384"]["model_commitment"]["region_layer_map"]
)


model_registry["convnext_tiny:in12k_ft_in1k"] = lambda: ModelCommitment(
identifier="convnext_tiny:in12k_ft_in1k",
activations_model=get_model("convnext_tiny:in12k_ft_in1k"),
layers=MODEL_CONFIGS["convnext_tiny:in12k_ft_in1k"]["model_commitment"]["layers"],
behavioral_readout_layer=MODEL_CONFIGS["convnext_tiny:in12k_ft_in1k"]["model_commitment"]["behavioral_readout_layer"],
region_layer_map=MODEL_CONFIGS["convnext_tiny:in12k_ft_in1k"]["model_commitment"]["region_layer_map"]
)


model_registry["convnext_xlarge:fb_in22k_ft_in1k"] = lambda: ModelCommitment(
identifier="convnext_xlarge:fb_in22k_ft_in1k",
activations_model=get_model("convnext_xlarge:fb_in22k_ft_in1k"),
layers=MODEL_CONFIGS["convnext_xlarge:fb_in22k_ft_in1k"]["model_commitment"]["layers"],
behavioral_readout_layer=MODEL_CONFIGS["convnext_xlarge:fb_in22k_ft_in1k"]["model_commitment"]["behavioral_readout_layer"],
region_layer_map=MODEL_CONFIGS["convnext_xlarge:fb_in22k_ft_in1k"]["model_commitment"]["region_layer_map"]
)


model_registry["convnext_xxlarge:clip_laion2b_soup_ft_in1k"] = lambda: ModelCommitment(
identifier="convnext_xxlarge:clip_laion2b_soup_ft_in1k",
activations_model=get_model("convnext_xxlarge:clip_laion2b_soup_ft_in1k"),
layers=MODEL_CONFIGS["convnext_xxlarge:clip_laion2b_soup_ft_in1k"]["model_commitment"]["layers"],
behavioral_readout_layer=MODEL_CONFIGS["convnext_xxlarge:clip_laion2b_soup_ft_in1k"]["model_commitment"]["behavioral_readout_layer"],
region_layer_map=MODEL_CONFIGS["convnext_xxlarge:clip_laion2b_soup_ft_in1k"]["model_commitment"]["region_layer_map"]
)


model_registry["swin_small_patch4_window7_224:ms_in22k_ft_in1k"] = lambda: ModelCommitment(
identifier="swin_small_patch4_window7_224:ms_in22k_ft_in1k",
activations_model=get_model("swin_small_patch4_window7_224:ms_in22k_ft_in1k"),
layers=MODEL_CONFIGS["swin_small_patch4_window7_224:ms_in22k_ft_in1k"]["model_commitment"]["layers"],
behavioral_readout_layer=MODEL_CONFIGS["swin_small_patch4_window7_224:ms_in22k_ft_in1k"]["model_commitment"]["behavioral_readout_layer"],
region_layer_map=MODEL_CONFIGS["swin_small_patch4_window7_224:ms_in22k_ft_in1k"]["model_commitment"]["region_layer_map"]
)


model_registry["vit_base_patch16_clip_224:openai_ft_in12k_in1k"] = lambda: ModelCommitment(
identifier="vit_base_patch16_clip_224:openai_ft_in12k_in1k",
activations_model=get_model("vit_base_patch16_clip_224:openai_ft_in12k_in1k"),
layers=MODEL_CONFIGS["vit_base_patch16_clip_224:openai_ft_in12k_in1k"]["model_commitment"]["layers"],
behavioral_readout_layer=MODEL_CONFIGS["vit_base_patch16_clip_224:openai_ft_in12k_in1k"]["model_commitment"]["behavioral_readout_layer"],
region_layer_map=MODEL_CONFIGS["vit_base_patch16_clip_224:openai_ft_in12k_in1k"]["model_commitment"]["region_layer_map"]
)


model_registry["vit_base_patch16_clip_224:openai_ft_in1k"] = lambda: ModelCommitment(
identifier="vit_base_patch16_clip_224:openai_ft_in1k",
activations_model=get_model("vit_base_patch16_clip_224:openai_ft_in1k"),
layers=MODEL_CONFIGS["vit_base_patch16_clip_224:openai_ft_in1k"]["model_commitment"]["layers"],
behavioral_readout_layer=MODEL_CONFIGS["vit_base_patch16_clip_224:openai_ft_in1k"]["model_commitment"]["behavioral_readout_layer"],
region_layer_map=MODEL_CONFIGS["vit_base_patch16_clip_224:openai_ft_in1k"]["model_commitment"]["region_layer_map"]
)


model_registry["vit_huge_patch14_clip_224:laion2b_ft_in12k_in1k"] = lambda: ModelCommitment(
identifier="vit_huge_patch14_clip_224:laion2b_ft_in12k_in1k",
activations_model=get_model("vit_huge_patch14_clip_224:laion2b_ft_in12k_in1k"),
layers=MODEL_CONFIGS["vit_huge_patch14_clip_224:laion2b_ft_in12k_in1k"]["model_commitment"]["layers"],
behavioral_readout_layer=MODEL_CONFIGS["vit_huge_patch14_clip_224:laion2b_ft_in12k_in1k"]["model_commitment"]["behavioral_readout_layer"],
region_layer_map=MODEL_CONFIGS["vit_huge_patch14_clip_224:laion2b_ft_in12k_in1k"]["model_commitment"]["region_layer_map"]
)


model_registry["vit_huge_patch14_clip_336:laion2b_ft_in12k_in1k"] = lambda: ModelCommitment(
identifier="vit_huge_patch14_clip_336:laion2b_ft_in12k_in1k",
activations_model=get_model("vit_huge_patch14_clip_336:laion2b_ft_in12k_in1k"),
layers=MODEL_CONFIGS["vit_huge_patch14_clip_336:laion2b_ft_in12k_in1k"]["model_commitment"]["layers"],
behavioral_readout_layer=MODEL_CONFIGS["vit_huge_patch14_clip_336:laion2b_ft_in12k_in1k"]["model_commitment"]["behavioral_readout_layer"],
region_layer_map=MODEL_CONFIGS["vit_huge_patch14_clip_336:laion2b_ft_in12k_in1k"]["model_commitment"]["region_layer_map"]
)


model_registry["vit_large_patch14_clip_224:laion2b_ft_in12k_in1k"] = lambda: ModelCommitment(
identifier="vit_large_patch14_clip_224:laion2b_ft_in12k_in1k",
activations_model=get_model("vit_large_patch14_clip_224:laion2b_ft_in12k_in1k"),
layers=MODEL_CONFIGS["vit_large_patch14_clip_224:laion2b_ft_in12k_in1k"]["model_commitment"]["layers"],
behavioral_readout_layer=MODEL_CONFIGS["vit_large_patch14_clip_224:laion2b_ft_in12k_in1k"]["model_commitment"]["behavioral_readout_layer"],
region_layer_map=MODEL_CONFIGS["vit_large_patch14_clip_224:laion2b_ft_in12k_in1k"]["model_commitment"]["region_layer_map"]
)


model_registry["vit_large_patch14_clip_224:laion2b_ft_in1k"] = lambda: ModelCommitment(
identifier="vit_large_patch14_clip_224:laion2b_ft_in1k",
activations_model=get_model("vit_large_patch14_clip_224:laion2b_ft_in1k"),
layers=MODEL_CONFIGS["vit_large_patch14_clip_224:laion2b_ft_in1k"]["model_commitment"]["layers"],
behavioral_readout_layer=MODEL_CONFIGS["vit_large_patch14_clip_224:laion2b_ft_in1k"]["model_commitment"]["behavioral_readout_layer"],
region_layer_map=MODEL_CONFIGS["vit_large_patch14_clip_224:laion2b_ft_in1k"]["model_commitment"]["region_layer_map"]
)


model_registry["vit_large_patch14_clip_224:openai_ft_in12k_in1k"] = lambda: ModelCommitment(
identifier="vit_large_patch14_clip_224:openai_ft_in12k_in1k",
activations_model=get_model("vit_large_patch14_clip_224:openai_ft_in12k_in1k"),
layers=MODEL_CONFIGS["vit_large_patch14_clip_224:openai_ft_in12k_in1k"]["model_commitment"]["layers"],
behavioral_readout_layer=MODEL_CONFIGS["vit_large_patch14_clip_224:openai_ft_in12k_in1k"]["model_commitment"]["behavioral_readout_layer"],
region_layer_map=MODEL_CONFIGS["vit_large_patch14_clip_224:openai_ft_in12k_in1k"]["model_commitment"]["region_layer_map"]
)


model_registry["vit_large_patch14_clip_224:openai_ft_in1k"] = lambda: ModelCommitment(
identifier="vit_large_patch14_clip_224:openai_ft_in1k",
activations_model=get_model("vit_large_patch14_clip_224:openai_ft_in1k"),
layers=MODEL_CONFIGS["vit_large_patch14_clip_224:openai_ft_in1k"]["model_commitment"]["layers"],
behavioral_readout_layer=MODEL_CONFIGS["vit_large_patch14_clip_224:openai_ft_in1k"]["model_commitment"]["behavioral_readout_layer"],
region_layer_map=MODEL_CONFIGS["vit_large_patch14_clip_224:openai_ft_in1k"]["model_commitment"]["region_layer_map"]
)


model_registry["vit_large_patch14_clip_336:laion2b_ft_in1k"] = lambda: ModelCommitment(
identifier="vit_large_patch14_clip_336:laion2b_ft_in1k",
activations_model=get_model("vit_large_patch14_clip_336:laion2b_ft_in1k"),
layers=MODEL_CONFIGS["vit_large_patch14_clip_336:laion2b_ft_in1k"]["model_commitment"]["layers"],
behavioral_readout_layer=MODEL_CONFIGS["vit_large_patch14_clip_336:laion2b_ft_in1k"]["model_commitment"]["behavioral_readout_layer"],
region_layer_map=MODEL_CONFIGS["vit_large_patch14_clip_336:laion2b_ft_in1k"]["model_commitment"]["region_layer_map"]
)


model_registry["vit_large_patch14_clip_336:openai_ft_in12k_in1k"] = lambda: ModelCommitment(
identifier="vit_large_patch14_clip_336:openai_ft_in12k_in1k",
activations_model=get_model("vit_large_patch14_clip_336:openai_ft_in12k_in1k"),
layers=MODEL_CONFIGS["vit_large_patch14_clip_336:openai_ft_in12k_in1k"]["model_commitment"]["layers"],
behavioral_readout_layer=MODEL_CONFIGS["vit_large_patch14_clip_336:openai_ft_in12k_in1k"]["model_commitment"]["behavioral_readout_layer"],
region_layer_map=MODEL_CONFIGS["vit_large_patch14_clip_336:openai_ft_in12k_in1k"]["model_commitment"]["region_layer_map"]
)


model_registry["vit_relpos_base_patch16_clsgap_224:sw_in1k"] = lambda: ModelCommitment(
identifier="vit_relpos_base_patch16_clsgap_224:sw_in1k",
activations_model=get_model("vit_relpos_base_patch16_clsgap_224:sw_in1k"),
layers=MODEL_CONFIGS["vit_relpos_base_patch16_clsgap_224:sw_in1k"]["model_commitment"]["layers"],
behavioral_readout_layer=MODEL_CONFIGS["vit_relpos_base_patch16_clsgap_224:sw_in1k"]["model_commitment"]["behavioral_readout_layer"],
region_layer_map=MODEL_CONFIGS["vit_relpos_base_patch16_clsgap_224:sw_in1k"]["model_commitment"]["region_layer_map"]
)


model_registry["vit_relpos_base_patch32_plus_rpn_256:sw_in1k"] = lambda: ModelCommitment(
identifier="vit_relpos_base_patch32_plus_rpn_256:sw_in1k",
activations_model=get_model("vit_relpos_base_patch32_plus_rpn_256:sw_in1k"),
layers=MODEL_CONFIGS["vit_relpos_base_patch32_plus_rpn_256:sw_in1k"]["model_commitment"]["layers"],
behavioral_readout_layer=MODEL_CONFIGS["vit_relpos_base_patch32_plus_rpn_256:sw_in1k"]["model_commitment"]["behavioral_readout_layer"],
region_layer_map=MODEL_CONFIGS["vit_relpos_base_patch32_plus_rpn_256:sw_in1k"]["model_commitment"]["region_layer_map"]
)


model_registry["vit_tiny_r_s16_p8_384:augreg_in21k_ft_in1k"] = lambda: ModelCommitment(
identifier="vit_tiny_r_s16_p8_384:augreg_in21k_ft_in1k",
activations_model=get_model("vit_tiny_r_s16_p8_384:augreg_in21k_ft_in1k"),
layers=MODEL_CONFIGS["vit_tiny_r_s16_p8_384:augreg_in21k_ft_in1k"]["model_commitment"]["layers"],
behavioral_readout_layer=MODEL_CONFIGS["vit_tiny_r_s16_p8_384:augreg_in21k_ft_in1k"]["model_commitment"]["behavioral_readout_layer"],
region_layer_map=MODEL_CONFIGS["vit_tiny_r_s16_p8_384:augreg_in21k_ft_in1k"]["model_commitment"]["region_layer_map"]
)


90 changes: 90 additions & 0 deletions brainscore_vision/models/timm_models/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
import os
import functools
import json
from pathlib import Path
import ssl

import timm
import numpy as np
import torchvision.transforms as T
from PIL import Image

from brainscore_vision.model_helpers.activations.pytorch import PytorchWrapper

# Disable SSL verification
ssl._create_default_https_context = ssl._create_unverified_context

BIBTEX = """"""


with open(Path(__file__).parent / "model_configs.json", "r") as f:
MODEL_CONFIGS = json.load(f)


def load_image(image_filepath):
return Image.open(image_filepath).convert("RGB")


def get_interpolation_mode(interpolation: str) -> int:
"""Returns the interpolation mode for albumentations"""
if "linear" or "bilinear" in interpolation:
return 1
elif "cubic" or "bicubic" in interpolation:
return 2
else:
raise NotImplementedError(f"Interpolation mode {interpolation} not implemented")


def custom_image_preprocess(
images,
transforms=T.Compose,
):
if isinstance(transforms, T.Compose):
images = [transforms(image) for image in images]
images = [np.array(image) for image in images]
images = np.stack(images)
else:
raise NotImplementedError(
f"Transform of type {type(transforms)} is not implemented"
)

return images


def load_preprocess_images_custom(
image_filepaths, preprocess_images=custom_image_preprocess, **kwargs
):
images = [load_image(image_filepath) for image_filepath in image_filepaths]
images = preprocess_images(images, **kwargs)
return images


def get_model(model_id:str):
# Unpack model config
config = MODEL_CONFIGS[model_id]
model_name = config["model_name"]
model_id = config["model_id"]
timm_model_name = config["timm_model_name"]
is_vit = config["is_vit"]

# Temporary fix for vit models
# See https://github.com/brain-score/vision/pull/1232
if is_vit:
os.environ['RESULTCACHING_DISABLE'] = 'brainscore_vision.model_helpers.activations.core.ActivationsExtractorHelper._from_paths_stored'


# Initialize model
model = timm.create_model(timm_model_name, pretrained=True)
data_config = timm.data.resolve_model_data_config(model)
transforms = timm.data.create_transform(**data_config, is_training=False)
print(f"Model {model_name} loaded")

# Wrap model
preprocessing = functools.partial(
load_preprocess_images_custom,
transforms=transforms
)
wrapper = PytorchWrapper(
identifier=model_id, model=model, preprocessing=preprocessing
)
return wrapper
Loading

0 comments on commit 918ebc1

Please sign in to comment.