From 38261e07e5fe4f6e987b5a56c7f82920229caf9f Mon Sep 17 00:00:00 2001 From: roberto-corno-nttdata <roberto.corno@itv.com> Date: Sat, 7 Dec 2024 23:00:16 +0100 Subject: [PATCH] yolov11n and yolov11m added to model selection --- benchmarks/Evaluate-Results.ipynb | 4 +- deepface/DeepFace.py | 16 ++--- deepface/commons/weight_utils.py | 6 +- deepface/models/YoloClientBase.py | 37 ++++++++++ deepface/models/YoloModel.py | 21 ++++++ deepface/models/face_detection/Yolo.py | 79 +++++++--------------- deepface/models/facial_recognition/Yolo.py | 44 ++++++++++++ deepface/modules/demography.py | 2 +- deepface/modules/detection.py | 2 +- deepface/modules/modeling.py | 16 +++-- deepface/modules/recognition.py | 4 +- deepface/modules/representation.py | 13 ++-- deepface/modules/streaming.py | 8 +-- deepface/modules/verification.py | 2 +- tests/visual-test.py | 5 ++ 15 files changed, 173 insertions(+), 86 deletions(-) create mode 100644 deepface/models/YoloClientBase.py create mode 100644 deepface/models/YoloModel.py create mode 100644 deepface/models/facial_recognition/Yolo.py diff --git a/benchmarks/Evaluate-Results.ipynb b/benchmarks/Evaluate-Results.ipynb index 16d29dce5..72d74cc6a 100644 --- a/benchmarks/Evaluate-Results.ipynb +++ b/benchmarks/Evaluate-Results.ipynb @@ -29,8 +29,8 @@ "outputs": [], "source": [ "alignment = [False, True]\n", - "models = [\"Facenet512\", \"Facenet\", \"VGG-Face\", \"ArcFace\", \"Dlib\", \"GhostFaceNet\", \"SFace\", \"OpenFace\", \"DeepFace\", \"DeepID\"]\n", - "detectors = [\"retinaface\", \"mtcnn\", \"fastmtcnn\", \"dlib\", \"yolov8\", \"yolov11n\", \"yolov11m\", \"yunet\", \"centerface\", \"mediapipe\", \"ssd\", \"opencv\", \"skip\"]\n", + "models = [\"Facenet512\", \"Facenet\", \"VGG-Face\", \"ArcFace\", \"Dlib\", \"GhostFaceNet\", \"SFace\", \"OpenFace\", \"DeepFace\", \"DeepID\", \"yolov8\", \"yolov11n\", \"yolov11s\", \"yolov11m\"]\n", + "detectors = [\"retinaface\", \"mtcnn\", \"fastmtcnn\", \"dlib\", \"yolov8\", \"yolov11n\", \"yolov11s\", \"yolov11m\", \"yunet\", \"centerface\", \"mediapipe\", \"ssd\", \"opencv\", \"skip\"]\n", "distance_metrics = [\"euclidean\", \"euclidean_l2\", \"cosine\"]" ] }, diff --git a/deepface/DeepFace.py b/deepface/DeepFace.py index a95bcc534..e4e5411e8 100644 --- a/deepface/DeepFace.py +++ b/deepface/DeepFace.py @@ -56,7 +56,7 @@ def build_model(model_name: str, task: str = "facial_recognition") -> Any: - VGG-Face, Facenet, Facenet512, OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace, GhostFaceNet for face recognition - Age, Gender, Emotion, Race for facial attributes - - opencv, mtcnn, ssd, dlib, retinaface, mediapipe, yolov8, 'yolov11n', 'yolov11m', yunet, + - opencv, mtcnn, ssd, dlib, retinaface, mediapipe, yolov8, 'yolov11n', 'yolov11s','yolov11m', yunet, fastmtcnn or centerface for face detectors - Fasnet for spoofing task (str): facial_recognition, facial_attribute, face_detector, spoofing @@ -96,7 +96,7 @@ def verify( OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face). detector_backend (string): face detector backend. Options: 'opencv', 'retinaface', - 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11m', 'centerface' or 'skip' + 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m', 'centerface' or 'skip' (default is opencv). distance_metric (string): Metric for measuring similarity. Options: 'cosine', @@ -187,7 +187,7 @@ def analyze( Set to False to avoid the exception for low-resolution images (default is True). detector_backend (string): face detector backend. Options: 'opencv', 'retinaface', - 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11m', 'centerface' or 'skip' + 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m', 'centerface' or 'skip' (default is opencv). distance_metric (string): Metric for measuring similarity. Options: 'cosine', @@ -298,7 +298,7 @@ def find( Set to False to avoid the exception for low-resolution images (default is True). detector_backend (string): face detector backend. Options: 'opencv', 'retinaface', - 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11m', 'centerface' or 'skip' + 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m', 'centerface' or 'skip' (default is opencv). align (boolean): Perform alignment based on the eye positions (default is True). @@ -396,7 +396,7 @@ def represent( (default is True). detector_backend (string): face detector backend. Options: 'opencv', 'retinaface', - 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11m', 'centerface' or 'skip' + 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m', 'centerface' or 'skip' (default is opencv). align (boolean): Perform alignment based on the eye positions (default is True). @@ -462,7 +462,7 @@ def stream( OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face). detector_backend (string): face detector backend. Options: 'opencv', 'retinaface', - 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11m', 'centerface' or 'skip' + 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m', 'centerface' or 'skip' (default is opencv). distance_metric (string): Metric for measuring similarity. Options: 'cosine', @@ -517,7 +517,7 @@ def extract_faces( as a string, numpy array (BGR), or base64 encoded images. detector_backend (string): face detector backend. Options: 'opencv', 'retinaface', - 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11m', 'centerface' or 'skip' + 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m', 'centerface' or 'skip' (default is opencv). enforce_detection (boolean): If no face is detected in an image, raise an exception. @@ -601,7 +601,7 @@ def detectFace( added to resize the image (default is (224, 224)). detector_backend (string): face detector backend. Options: 'opencv', 'retinaface', - 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11m', 'centerface' or 'skip' + 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m', 'centerface' or 'skip' (default is opencv). enforce_detection (boolean): If no face is detected in an image, raise an exception. diff --git a/deepface/commons/weight_utils.py b/deepface/commons/weight_utils.py index 421d85704..d3207022a 100644 --- a/deepface/commons/weight_utils.py +++ b/deepface/commons/weight_utils.py @@ -127,7 +127,7 @@ def download_all_models_in_one_shot() -> None: MODEL_URL as SSD_MODEL, WEIGHTS_URL as SSD_WEIGHTS, ) - from deepface.models.face_detection.Yolo import ( + from deepface.models.YoloModel import ( WEIGHT_URLS as YOLO_WEIGHTS, WEIGHT_NAMES as YOLO_WEIGHT_NAMES, YoloModel @@ -170,6 +170,10 @@ def download_all_models_in_one_shot() -> None: "filename": YOLO_WEIGHT_NAMES[YoloModel.V11N.value], "url": YOLO_WEIGHTS[YoloModel.V11N.value], }, + { + "filename": YOLO_WEIGHT_NAMES[YoloModel.V11S.value], + "url": YOLO_WEIGHTS[YoloModel.V11S.value], + }, { "filename": YOLO_WEIGHT_NAMES[YoloModel.V11M.value], "url": YOLO_WEIGHTS[YoloModel.V11M.value], diff --git a/deepface/models/YoloClientBase.py b/deepface/models/YoloClientBase.py new file mode 100644 index 000000000..83ad32413 --- /dev/null +++ b/deepface/models/YoloClientBase.py @@ -0,0 +1,37 @@ +# built-in dependencies +from typing import Any + +# project dependencies +from deepface.models.YoloModel import YoloModel, WEIGHT_URLS, WEIGHT_NAMES +from deepface.commons import weight_utils +from deepface.commons.logger import Logger + +logger = Logger() + + +class YoloClientBase: + def __init__(self, model: YoloModel): + self.model = self.build_model(model) + + def build_model(self, model: YoloModel) -> Any: + """ + Build a yolo detector model + Returns: + model (Any) + """ + + # Import the optional Ultralytics YOLO model + try: + from ultralytics import YOLO + except ModuleNotFoundError as e: + raise ImportError( + "Yolo is an optional detector, ensure the library is installed. " + "Please install using 'pip install ultralytics'" + ) from e + + weight_file = weight_utils.download_weights_if_necessary( + file_name=WEIGHT_NAMES[model.value], source_url=WEIGHT_URLS[model.value] + ) + + # Return face_detector + return YOLO(weight_file) diff --git a/deepface/models/YoloModel.py b/deepface/models/YoloModel.py new file mode 100644 index 000000000..93f2a74fb --- /dev/null +++ b/deepface/models/YoloModel.py @@ -0,0 +1,21 @@ +from enum import Enum + + +class YoloModel(Enum): + V8N = 0 + V11N = 1 + V11S = 2 + V11M = 3 + + +# Model's weights paths +WEIGHT_NAMES = ["yolov8n-face.pt", + "yolov11n-face.pt", + "yolov11s-face.pt", + "yolov11m-face.pt"] + +# Google Drive URL from repo (https://github.com/derronqi/yolov8-face) ~6MB +WEIGHT_URLS = ["https://drive.google.com/uc?id=1qcr9DbgsX3ryrz2uU8w4Xm3cOrRywXqb", + "https://github.com/akanametov/yolo-face/releases/download/v0.0.0/yolov11n-face.pt", + "https://github.com/akanametov/yolo-face/releases/download/v0.0.0/yolov11s-face.pt", + "https://github.com/akanametov/yolo-face/releases/download/v0.0.0/yolov11m-face.pt"] diff --git a/deepface/models/face_detection/Yolo.py b/deepface/models/face_detection/Yolo.py index f0826e140..1f29781b9 100644 --- a/deepface/models/face_detection/Yolo.py +++ b/deepface/models/face_detection/Yolo.py @@ -1,61 +1,22 @@ # built-in dependencies import os -from typing import Any, List -from enum import Enum +from typing import List # 3rd party dependencies import numpy as np # project dependencies +from deepface.models.YoloClientBase import YoloClientBase +from deepface.models.YoloModel import YoloModel from deepface.models.Detector import Detector, FacialAreaRegion -from deepface.commons import weight_utils from deepface.commons.logger import Logger logger = Logger() -# Model's weights paths -WEIGHT_NAMES = ["yolov8n-face.pt", - "yolov11n-face.pt", - "yolov11m-face.pt"] -# Google Drive URL from repo (https://github.com/derronqi/yolov8-face) ~6MB -WEIGHT_URLS = ["https://drive.google.com/uc?id=1qcr9DbgsX3ryrz2uU8w4Xm3cOrRywXqb", - "https://github.com/akanametov/yolo-face/releases/download/v0.0.0/yolov11n-face.pt", - "https://github.com/akanametov/yolo-face/releases/download/v0.0.0/yolov11m-face.pt"] - - -class YoloModel(Enum): - V8N = 0 - V11N = 1 - V11M = 2 - - -class YoloClient(Detector): +class YoloDetectorClient(YoloClientBase, Detector): def __init__(self, model: YoloModel): - self.model = self.build_model(model) - - def build_model(self, model: YoloModel) -> Any: - """ - Build a yolo detector model - Returns: - model (Any) - """ - - # Import the optional Ultralytics YOLO model - try: - from ultralytics import YOLO - except ModuleNotFoundError as e: - raise ImportError( - "Yolo is an optional detector, ensure the library is installed. " - "Please install using 'pip install ultralytics'" - ) from e - - weight_file = weight_utils.download_weights_if_necessary( - file_name=WEIGHT_NAMES[model.value], source_url=WEIGHT_URLS[model.value] - ) - - # Return face_detector - return YOLO(weight_file) + super().__init__(model) def detect_faces(self, img: np.ndarray) -> List[FacialAreaRegion]: """ @@ -80,21 +41,24 @@ def detect_faces(self, img: np.ndarray) -> List[FacialAreaRegion]: # For each face, extract the bounding box, the landmarks and confidence for result in results: - if result.boxes is None or result.keypoints is None: + if result.boxes is None: continue # Extract the bounding box and the confidence x, y, w, h = result.boxes.xywh.tolist()[0] confidence = result.boxes.conf.tolist()[0] - # right_eye_conf = result.keypoints.conf[0][0] - # left_eye_conf = result.keypoints.conf[0][1] - right_eye = result.keypoints.xy[0][0].tolist() - left_eye = result.keypoints.xy[0][1].tolist() + right_eye = None + left_eye = None + if result.keypoints is not None: + # right_eye_conf = result.keypoints.conf[0][0] + # left_eye_conf = result.keypoints.conf[0][1] + right_eye = result.keypoints.xy[0][0].tolist() + left_eye = result.keypoints.xy[0][1].tolist() - # eyes are list of float, need to cast them tuple of int - left_eye = tuple(int(i) for i in left_eye) - right_eye = tuple(int(i) for i in right_eye) + # eyes are list of float, need to cast them tuple of int + left_eye = tuple(int(i) for i in left_eye) + right_eye = tuple(int(i) for i in right_eye) x, y, w, h = int(x - w / 2), int(y - h / 2), int(w), int(h) facial_area = FacialAreaRegion( @@ -111,16 +75,21 @@ def detect_faces(self, img: np.ndarray) -> List[FacialAreaRegion]: return resp -class YoloClientV8n(YoloClient): +class YoloDetectorClientV8n(YoloDetectorClient): def __init__(self): super().__init__(YoloModel.V8N) -class YoloClientV11n(YoloClient): +class YoloDetectorClientV11n(YoloDetectorClient): def __init__(self): super().__init__(YoloModel.V11N) -class YoloClientV11m(YoloClient): +class YoloDetectorClientV11s(YoloDetectorClient): + def __init__(self): + super().__init__(YoloModel.V11S) + + +class YoloDetectorClientV11m(YoloDetectorClient): def __init__(self): super().__init__(YoloModel.V11M) diff --git a/deepface/models/facial_recognition/Yolo.py b/deepface/models/facial_recognition/Yolo.py new file mode 100644 index 000000000..e8f6d5904 --- /dev/null +++ b/deepface/models/facial_recognition/Yolo.py @@ -0,0 +1,44 @@ +# built-in dependencies +from typing import List + +# 3rd party dependencies +import numpy as np + +# project dependencies +from deepface.models.YoloClientBase import YoloClientBase +from deepface.models.YoloModel import YoloModel +from deepface.models.FacialRecognition import FacialRecognition +from deepface.commons.logger import Logger + +logger = Logger() + + +class YoloFacialRecognitionClient(YoloClientBase, FacialRecognition): + def __init__(self, model: YoloModel): + super().__init__(model) + self.model_name = "Yolo" + self.input_shape = None + self.output_shape = 512 + + def forward(self, img: np.ndarray) -> List[float]: + return self.model.embed(img)[0].tolist() + + +class YoloFacialRecognitionClientV8n(YoloFacialRecognitionClient): + def __init__(self): + super().__init__(YoloModel.V8N) + + +class YoloFacialRecognitionClientV11n(YoloFacialRecognitionClient): + def __init__(self): + super().__init__(YoloModel.V11N) + + +class YoloFacialRecognitionClientV11s(YoloFacialRecognitionClient): + def __init__(self): + super().__init__(YoloModel.V11S) + + +class YoloFacialRecognitionClientV11m(YoloFacialRecognitionClient): + def __init__(self): + super().__init__(YoloModel.V11M) diff --git a/deepface/modules/demography.py b/deepface/modules/demography.py index 0f29cd9f7..cc5112e2c 100644 --- a/deepface/modules/demography.py +++ b/deepface/modules/demography.py @@ -35,7 +35,7 @@ def analyze( Set to False to avoid the exception for low-resolution images (default is True). detector_backend (string): face detector backend. Options: 'opencv', 'retinaface', - 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11m', 'centerface' or 'skip' + 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m', 'centerface' or 'skip' (default is opencv). distance_metric (string): Metric for measuring similarity. Options: 'cosine', diff --git a/deepface/modules/detection.py b/deepface/modules/detection.py index bce658605..4ed907636 100644 --- a/deepface/modules/detection.py +++ b/deepface/modules/detection.py @@ -38,7 +38,7 @@ def extract_faces( as a string, numpy array (BGR), or base64 encoded images. detector_backend (string): face detector backend. Options: 'opencv', 'retinaface', - 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11m', 'centerface' or 'skip' + 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m', 'centerface' or 'skip' (default is opencv) enforce_detection (boolean): If no face is detected in an image, raise an exception. diff --git a/deepface/modules/modeling.py b/deepface/modules/modeling.py index ba65383e9..fa884ac6b 100644 --- a/deepface/modules/modeling.py +++ b/deepface/modules/modeling.py @@ -12,6 +12,7 @@ Dlib, Facenet, GhostFaceNet, + Yolo as YoloFacialRecognition, ) from deepface.models.face_detection import ( FastMtCnn, @@ -21,7 +22,7 @@ Dlib as DlibDetector, RetinaFace, Ssd, - Yolo, + Yolo as YoloFaceDetector, YuNet, CenterFace, ) @@ -38,7 +39,7 @@ def build_model(task: str, model_name: str) -> Any: - VGG-Face, Facenet, Facenet512, OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace, GhostFaceNet for face recognition - Age, Gender, Emotion, Race for facial attributes - - opencv, mtcnn, ssd, dlib, retinaface, mediapipe, yolov8, 'yolov11n', 'yolov11m', yunet, + - opencv, mtcnn, ssd, dlib, retinaface, mediapipe, yolov8, 'yolov11n', 'yolov11s', 'yolov11m', yunet, fastmtcnn or centerface for face detectors - Fasnet for spoofing Returns: @@ -60,6 +61,10 @@ def build_model(task: str, model_name: str) -> Any: "ArcFace": ArcFace.ArcFaceClient, "SFace": SFace.SFaceClient, "GhostFaceNet": GhostFaceNet.GhostFaceNetClient, + "yolov8": YoloFacialRecognition.YoloFacialRecognitionClientV8n, + "yolov11n": YoloFacialRecognition.YoloFacialRecognitionClientV11n, + "yolov11s": YoloFacialRecognition.YoloFacialRecognitionClientV11s, + "yolov11m": YoloFacialRecognition.YoloFacialRecognitionClientV11m }, "spoofing": { "Fasnet": FasNet.Fasnet, @@ -77,9 +82,10 @@ def build_model(task: str, model_name: str) -> Any: "dlib": DlibDetector.DlibClient, "retinaface": RetinaFace.RetinaFaceClient, "mediapipe": MediaPipe.MediaPipeClient, - "yolov8": Yolo.YoloClientV8n, - "yolov11n": Yolo.YoloClientV11n, - "yolov11m": Yolo.YoloClientV11m, + "yolov8": YoloFaceDetector.YoloDetectorClientV8n, + "yolov11n": YoloFaceDetector.YoloDetectorClientV11n, + "yolov11s": YoloFaceDetector.YoloDetectorClientV11s, + "yolov11m": YoloFaceDetector.YoloDetectorClientV11m, "yunet": YuNet.YuNetClient, "fastmtcnn": FastMtCnn.FastMtCnnClient, "centerface": CenterFace.CenterFaceClient, diff --git a/deepface/modules/recognition.py b/deepface/modules/recognition.py index 96313f059..d254d66e3 100644 --- a/deepface/modules/recognition.py +++ b/deepface/modules/recognition.py @@ -54,7 +54,7 @@ def find( Default is True. Set to False to avoid the exception for low-resolution images. detector_backend (string): face detector backend. Options: 'opencv', 'retinaface', - 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8','yolov11n','yolov11m', 'centerface' or 'skip'. + 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8','yolov11n', 'yolov11s', 'yolov11m', 'centerface' or 'skip'. align (boolean): Perform alignment based on the eye positions. @@ -483,7 +483,7 @@ def find_batched( Default is True. Set to False to avoid the exception for low-resolution images. detector_backend (string): face detector backend. Options: 'opencv', 'retinaface', - 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11m', 'centerface' or 'skip'. + 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m', 'centerface' or 'skip'. align (boolean): Perform alignment based on the eye positions. diff --git a/deepface/modules/representation.py b/deepface/modules/representation.py index fbe952e69..f9e751d99 100644 --- a/deepface/modules/representation.py +++ b/deepface/modules/representation.py @@ -36,7 +36,7 @@ def represent( Default is True. Set to False to avoid the exception for low-resolution images. detector_backend (string): face detector backend. Options: 'opencv', 'retinaface', - 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11m', 'centerface' or 'skip'. + 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m', 'centerface' or 'skip'. align (boolean): Perform alignment based on the eye positions. @@ -122,11 +122,12 @@ def represent( confidence = img_obj["confidence"] # resize to expected shape of ml model - img = preprocessing.resize_image( - img=img, - # thanks to DeepId (!) - target_size=(target_size[1], target_size[0]), - ) + if target_size is not None: + img = preprocessing.resize_image( + img=img, + # thanks to DeepId (!) + target_size=(target_size[1], target_size[0]), + ) # custom normalization img = preprocessing.normalize_input(img=img, normalization=normalization) diff --git a/deepface/modules/streaming.py b/deepface/modules/streaming.py index 64ebe80aa..ca51989cb 100644 --- a/deepface/modules/streaming.py +++ b/deepface/modules/streaming.py @@ -45,7 +45,7 @@ def analysis( OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face). detector_backend (string): face detector backend. Options: 'opencv', 'retinaface', - 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11m', 'centerface' or 'skip' + 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m', 'centerface' or 'skip' (default is opencv). distance_metric (string): Metric for measuring similarity. Options: 'cosine', @@ -192,7 +192,7 @@ def search_identity( model_name (str): Model for face recognition. Options: VGG-Face, Facenet, Facenet512, OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face). detector_backend (string): face detector backend. Options: 'opencv', 'retinaface', - 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11m', 'centerface' or 'skip' + 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m', 'centerface' or 'skip' (default is opencv). distance_metric (string): Metric for measuring similarity. Options: 'cosine', 'euclidean', 'euclidean_l2' (default is cosine). @@ -374,7 +374,7 @@ def grab_facial_areas( Args: img (np.ndarray): image itself detector_backend (string): face detector backend. Options: 'opencv', 'retinaface', - 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11m', 'centerface' or 'skip' + 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m', 'centerface' or 'skip' (default is opencv). threshold (int): threshold for facial area, discard smaller ones Returns @@ -443,7 +443,7 @@ def perform_facial_recognition( db_path (string): Path to the folder containing image files. All detected faces in the database will be considered in the decision-making process. detector_backend (string): face detector backend. Options: 'opencv', 'retinaface', - 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11m', 'centerface' or 'skip' + 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m', 'centerface' or 'skip' (default is opencv). distance_metric (string): Metric for measuring similarity. Options: 'cosine', 'euclidean', 'euclidean_l2' (default is cosine). diff --git a/deepface/modules/verification.py b/deepface/modules/verification.py index 6e05eb469..1c03e5c6f 100644 --- a/deepface/modules/verification.py +++ b/deepface/modules/verification.py @@ -47,7 +47,7 @@ def verify( OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face). detector_backend (string): face detector backend. Options: 'opencv', 'retinaface', - 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11m', 'centerface' or 'skip' + 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m', 'centerface' or 'skip' (default is opencv) distance_metric (string): Metric for measuring similarity. Options: 'cosine', diff --git a/tests/visual-test.py b/tests/visual-test.py index 2b1ff22cd..111e9ed11 100644 --- a/tests/visual-test.py +++ b/tests/visual-test.py @@ -22,6 +22,10 @@ "ArcFace", "SFace", "GhostFaceNet", + "yolov8", + "yolov11n", + "yolov11s", + "yolov11m" ] detector_backends = [ @@ -35,6 +39,7 @@ "yunet", "yolov8", "yolov11n", + "yolov11s", "yolov11m", "centerface", ]