diff --git a/face-recognition/Readme.md b/face-recognition/Readme.md new file mode 100644 index 0000000000..409d75a742 --- /dev/null +++ b/face-recognition/Readme.md @@ -0,0 +1,47 @@ +# Face Recognition Project + +## Overview +This repository is part of the ML-CaPsule project, specifically focusing on face recognition using machine learning techniques. + +## Features +- **Face Detection**: Identifies faces in images and video streams. +- **Face Recognition**: Matches detected faces with known faces. +- **Real-time Processing**: Capable of processing video feeds for real-time face recognition. +- **Facial-Attribute Analysis** : Labels the face with {race, gender, age and emotion} + +## Requirements +- Python >3.9 +- deepface +- #there are some explicit dependencies +- requests>=2.27.1 +- numpy>=1.14.0 +- pandas>=0.23.4 +- gdown>=3.10.1 +- tqdm>=4.30.0 +- Pillow>=5.2.0 +- opencv-python>=4.5.5.64 +- tensorflow>=1.9.0 +- keras>=2.2.0 +- Flask>=1.1.2 +- mtcnn>=0.1.0 +- retina-face>=0.0.1 +- fire>=0.4.0 +- gunicorn>=20.1.0 + +## Installation +1. Clone the repository: + ```bash + git clone https://github.com/Raghucharan16/ML-CaPsule.git + cd ML-CaPsule/face-recognition + ``` +2. Install the required packages: + ```bash + pip install -r requirements.txt + ``` + +## Usage +1. run the deepface.ipynb cells for required operation. + + +## Contributing +Contributions are welcome! Please open an issue or submit a pull request. diff --git a/face-recognition/deepface.ipynb b/face-recognition/deepface.ipynb new file mode 100644 index 0000000000..29bc08f0a1 --- /dev/null +++ b/face-recognition/deepface.ipynb @@ -0,0 +1,111 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "68d0e7bb", + "metadata": {}, + "outputs": [], + "source": [ + "from deepface import DeepFace\n", + "detector_backends = [ 'opencv', 'retinaface',\n", + " 'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface'] # or 'skip' (default is opencv)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "74c9e27e", + "metadata": {}, + "outputs": [], + "source": [ + "#Faces Detection\n", + "faces=DeepFace.extract_faces(img_path=\"img.jpg\", #select the image [it can have multiple faces]\n", + " detector_backend = 'mtcnn', #[select the backend among mtcnn, opencv, ssd, dlib]\n", + " enforce_detection = False, #pass this argument to disable multiple face detection\n", + " align = True, #pass this argument to align faces\n", + " margin = 10) #add margin for face extraction\n", + "print(faces)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3c46a578", + "metadata": {}, + "outputs": [], + "source": [ + "#Facial Recognition\n", + "models = [\n", + " \"VGG-Face\", \n", + " \"Facenet\", \n", + " \"Facenet512\", \n", + " \"OpenFace\", \n", + " \"DeepFace\", \n", + " \"DeepID\", \n", + " \"ArcFace\", \n", + " \"Dlib\", \n", + " \"SFace\",\n", + " \"GhostFaceNet\",\n", + "]\n", + "#You can adjust the threshold according to your use case. Print the result and see the distance values. Then, you can decide the optimal threshold for your project.\n", + "#you can use any of the these models for verify and find methods for recognition\n", + "fr_result = DeepFace.verify(\n", + " img1_path = \"img1.jpg\",\n", + " img2_path = \"img2.jpg\",\n", + ")\n", + "print(fr_result)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e0cd963f", + "metadata": {}, + "outputs": [], + "source": [ + "#Deepface's find method\n", + "dfs = DeepFace.find(\n", + " img_path = \"img1.jpg\",\n", + " db_path = \"PATH_TO_YOUR_DB\"\n", + ")\n", + "print(dfs) #you can print the result to see the distance values\n", + "\n", + "#Facial Analysis\n", + "objs = DeepFace.analyze(\n", + " img_path = \"img1.jpg\", \n", + " actions = ['age', 'gender', 'race', 'emotion'],\n", + ")\n", + "print(objs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8ab050cb", + "metadata": {}, + "outputs": [], + "source": [ + "#Facial Embeddings\n", + "embedding_objs = DeepFace.represent(\n", + " img_path = \"img1.jpg\",\n", + " model_name = models[2],\n", + ")\n", + "#These can be used for clustering, finding similarity between faces, vector operations, by storing in a vector database for faster retrieval, etc." + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "env", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.11.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/face-recognition/img1.jpg b/face-recognition/img1.jpg new file mode 100644 index 0000000000..ccc5b344cc Binary files /dev/null and b/face-recognition/img1.jpg differ diff --git a/face-recognition/img2.jpg b/face-recognition/img2.jpg new file mode 100644 index 0000000000..5e5d06fbd4 Binary files /dev/null and b/face-recognition/img2.jpg differ diff --git a/face-recognition/requirements.txt b/face-recognition/requirements.txt new file mode 100644 index 0000000000..c583e925e0 --- /dev/null +++ b/face-recognition/requirements.txt @@ -0,0 +1 @@ +deepface \ No newline at end of file