diff --git a/.gitignore b/.gitignore new file mode 100755 index 0000000..e869729 --- /dev/null +++ b/.gitignore @@ -0,0 +1,326 @@ +.idea +*__pycache__* +*.pyc +dist +*egg +*.egg-info +*.ipynb_checkpoints/* +.ipynb_checkpoints +.DS_Store +/data/blood-cells.zip +/data/data +/dataset +/dataset-master +/dataset2-master +/weights +mlruns +*.zip +*.tar + +# Created by https://www.toptal.com/developers/gitignore/api/python,pycharm,jupyternotebooks +# Edit at https://www.toptal.com/developers/gitignore?templates=python,pycharm,jupyternotebooks + +### JupyterNotebooks ### +# gitignore template for Jupyter Notebooks +# website: http://jupyter.org/ + +*/.ipynb_checkpoints/* + +# IPython +profile_default/ +ipython_config.py + +# Remove previous ipynb_checkpoints +# git rm -r .ipynb_checkpoints/ + +### PyCharm ### +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +# AWS User-specific +.idea/**/aws.xml + +# Generated files +.idea/**/contentModel.xml + +# Sensitive or high-churn files +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml +.idea/**/dbnavigator.xml + +# Gradle +.idea/**/gradle.xml +.idea/**/libraries + +# Gradle and Maven with auto-import +# When using Gradle or Maven with auto-import, you should exclude module files, +# since they will be recreated, and may cause churn. Uncomment if using +# auto-import. +# .idea/artifacts +# .idea/compiler.xml +# .idea/jarRepositories.xml +# .idea/modules.xml +# .idea/*.iml +# .idea/modules +# *.iml +# *.ipr + +# CMake +cmake-build-*/ + +# Mongo Explorer plugin +.idea/**/mongoSettings.xml + +# File-based project format +*.iws + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties + +# Editor-based Rest Client +.idea/httpRequests + +# Android studio 3.1+ serialized cache file +.idea/caches/build_file_checksums.ser + +### PyCharm Patch ### +# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 + +# *.iml +# modules.xml +# .idea/misc.xml +# *.ipr + +# Sonarlint plugin +# https://plugins.jetbrains.com/plugin/7973-sonarlint +.idea/**/sonarlint/ + +# SonarQube Plugin +# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin +.idea/**/sonarIssues.xml + +# Markdown Navigator plugin +# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced +.idea/**/markdown-navigator.xml +.idea/**/markdown-navigator-enh.xml +.idea/**/markdown-navigator/ + +# Cache file creation bug +# See https://youtrack.jetbrains.com/issue/JBR-2257 +.idea/$CACHE_FILE$ + +# CodeStream plugin +# https://plugins.jetbrains.com/plugin/12206-codestream +.idea/codestream.xml + +### Python ### +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook + +# IPython + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# End of https://www.toptal.com/developers/gitignore/api/python,pycharm,jupyternotebooks + + +node_modules +/.bazelrc.user +/.tf_configure.bazelrc +/bazel-* +/bazel_pip +/tools/python_bin_path.sh +/tensorflow/tools/git/gen +/pip_test +/_python_build +__pycache__ +*.swp +.vscode/ +cmake_build/ +tensorflow/contrib/cmake/_build/ +.idea/** +/build/ +[Bb]uild/ +/tensorflow/core/util/version_info.cc +/tensorflow/python/framework/fast_tensor_util.cpp +/tensorflow/lite/gen/** +/tensorflow/lite/tools/make/downloads/** +/tensorflow/lite/tools/make/gen/** +/api_init_files_list.txt +/estimator_api_init_files_list.txt +*.whl + +# Android +.gradle +*.iml +local.properties +gradleBuild + +# iOS +*.pbxproj +*.xcworkspace +/*.podspec +/tensorflow/lite/**/coreml/**/BUILD +/tensorflow/lite/**/ios/BUILD +/tensorflow/lite/**/objc/BUILD +/tensorflow/lite/**/swift/BUILD +/tensorflow/lite/examples/ios/simple/data/*.tflite +/tensorflow/lite/examples/ios/simple/data/*.txt +Podfile.lock +Pods +xcuserdata diff --git a/README.md b/README.md new file mode 100644 index 0000000..420aa70 --- /dev/null +++ b/README.md @@ -0,0 +1,70 @@ +# Face-Detection-flask-gunicorn-nginx + +This is a simple demonstration of dockerized face-detection API which is implemented with flask and nginx and scaled +with gunicorn. + +# Notes + +1. For face-detection, I used pytorch version of mtcnn from deep_utils library. For more information check + out [deep_utils](https://github.com/pooya-mohammadi/deep_utils). +2. The service is scaled up using gunicorn. The gunicorn is a simple library with high throughput for scaling python services. + 1. To increase the number workers, increase number of `workers` in the `docker-compose.yml` file. + 2. For more information about gunicorn workers and threads check the following stackoverflow question + 3. [gunicorn-workers-and-threads](https://stackoverflow.com/questions/38425620/gunicorn-workers-and-threads) +3. nginx is used as a reverse proxy + +# Setup + +1. The face-detection name in docker-compose can be changed to any of the models available by deep-utils library. +2. For simplicity, I placed the weights of the mtcnn-torch model in app/weights. +3. To use different face-detection models in deep_utils, apply the following changes: + 1. Change the value of `FACE_DETECTION_MODEL` in the `docker-compose.yml` file. + 2. Modify configs of a new model in `app/base_app.py` file. + 3. It's recommended to run the new model in your local system and acquire the downloaded weights from `~/.deep_utils` + directory and place it inside `app/weights` directory. This will save you tons of time while working with models with + heavy weights. + 4. If your new model is based on `tensorflow`, comment the `pytorch` installation section in `app/Dockerfile` and + uncomment the `tensorflow` installation lines. + +# ScaleUp + +I scaled the project using gunicorn + +# RUN + +To run the API, install `docker` and `docker-compose`, execute the following command: + +## windows + +`docker-compose up --build` + +## Linux + +`sudo docker-compose up --build` + +# Inference + +To send an image and get back the boxes run the following commands: +`curl --request POST ip:port/endpoint -F image=@img-add` + +If you run the service on your local system the following request shall work perfectly: + +```terminal +curl --request POST http://127.0.0.1:8080/face -F image=@./sample-images/movie-stars.jpg +``` + +# Issues + +If you find something missing, please open an issue or kindly create a pull request. + +# References + +1.https://github.com/pooya-mohammadi/deep_utils + +# Licence + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0. + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + +See the License for the specific language governing permissions and limitations under the License. diff --git a/app/Dockerfile b/app/Dockerfile new file mode 100755 index 0000000..07967d9 --- /dev/null +++ b/app/Dockerfile @@ -0,0 +1,37 @@ +FROM python:3.9.7-buster +LABEL author="Pooya Mohammadi Kazaj " + + +# os and opencv libraries +RUN apt-get update -y \ + && apt install libgl1-mesa-glx -y \ + && apt-get install 'ffmpeg' 'libsm6' 'libxext6' -y \ + && python -m pip install --no-cache-dir --upgrade pip + +# fixed python libraries for pytorch models +RUN pip install --no-cache-dir torch==1.10.0+cpu torchvision==0.11.1+cpu \ + -f https://download.pytorch.org/whl/torch_stable.html \ + && pip install --no-cache-dir numpy==1.21.4 \ + && pip install --no-cache-dir opencv-python==4.5.4.58 \ + && pip install --no-cache-dir deep_utils==0.8.8 \ + && pip install --no-cache-dir scikit-learn==1.0.1 \ + && pip install --no-cache-dir matplotlib==3.4.3 \ + && pip install --no-cache-dir pandas==1.3.4 \ + && rm -rf /root/.cache/pip + +# fixed python libraries for tensorflow models +# RUN pip install --no-cache-dir tensorflow==2.7.0 \ +# && pip install --no-cache-dir numpy==1.21.4 \ +# && pip install --no-cache-dir opencv-python==4.5.4.60 \ +# $$ pip install --no-cache-dir deep_utils==0.8.8 \ +# && rm -rf /root/.cache/pip + +# Add new python libraries here or to requirements.txt +# RUN pip install --no-cache-dir + +COPY . /app +WORKDIR /app + +RUN pip install --no-cache-dir -r requirements.txt + +CMD gunicorn --workers=2 -b 0.0.0.0:660 entry_point:app --worker-class sync diff --git a/app/__init__.py b/app/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/app/base_app.py b/app/base_app.py new file mode 100755 index 0000000..aa5ef3c --- /dev/null +++ b/app/base_app.py @@ -0,0 +1,47 @@ +import os +from werkzeug.datastructures import FileStorage +from service import Inference +from flask import Flask +from flask_restful import Api, reqparse + +# define the app and the api variables +ENDPOINT = os.getenv('ENDPOINT', '/face') +HOST = "127.0.0.1" +app = Flask(ENDPOINT) +api = Api(app) + + +PORT_NUMBER = int(os.getenv('PORT_NUMBER', 8080)) + +# load the model and weights +FACE_DETECTION_MODEL = os.getenv('FACE_DETECTION_MODEL', 'MTCNNTorchFaceDetector') + +# The addresses for weights go here +if FACE_DETECTION_MODEL == "MTCNNTorchFaceDetector": + rnet = '/app/weights/rnet.npy' + onet = '/app/weights/onet.npy' + pnet = '/app/weights/pnet.npy' + model_configs = dict(rnet=rnet, onet=onet, pnet=pnet) + print(f"[INFO] Face detection mode is set to {FACE_DETECTION_MODEL}") +else: + # The configs of models other than mtcnn go here + model_configs = dict() + print( + f"[INFO] the configs for model:{FACE_DETECTION_MODEL} is set to {model_configs}." + f" If it's empty, the deep_utils library will use the defaults configs and most surely will download " + f"the weights each time you run the dockerfile ") + +inference = Inference(FACE_DETECTION_MODEL, **model_configs) +POST_TYPE = os.getenv("POST_TYPE", "FORM") + +# set global variables +app.config['inference'] = inference +app.config['POST_TYPE'] = POST_TYPE + +# file Parser arguments. Only Form is implemented +app.config['PARSER'] = reqparse.RequestParser() +app.config['PARSER'].add_argument('image', + type=FileStorage, + location='files', + required=True, + help='provide an image file') diff --git a/app/endpoints.py b/app/endpoints.py new file mode 100755 index 0000000..c29e894 --- /dev/null +++ b/app/endpoints.py @@ -0,0 +1,32 @@ +import sys +from flask import jsonify +from flask_restful import Resource +from base_app import app +from deep_utils import b64_to_img +import cv2 +import numpy as np + + +class FaceDetection(Resource): + @staticmethod + def post(): + args = app.config['PARSER'].parse_args() + contents = args['image'] + if app.config['POST_TYPE'] == 'JSON': + image = b64_to_img(contents) + elif app.config['POST_TYPE'] == 'FORM': + image = np.array(bytearray(contents.read()), dtype=np.uint8) + image = cv2.imdecode(image, cv2.IMREAD_COLOR) + else: + print(f"[ERROR] POST_TYPE:{app.config['POST_TYPE']} is not valid!, exiting ...") + sys.exit(1) + res = app.config['inference'].infer(image) + return res + + @staticmethod + def get(): + """ + Bug test + :return: some text + """ + return jsonify({"Just": "Fine!"}) diff --git a/app/entry_point.py b/app/entry_point.py new file mode 100755 index 0000000..5c18a98 --- /dev/null +++ b/app/entry_point.py @@ -0,0 +1,7 @@ +from endpoints import FaceDetection +from base_app import app, api, ENDPOINT, HOST, PORT_NUMBER + +api.add_resource(FaceDetection, ENDPOINT) + +if __name__ == '__main__': + app.run(HOST, port=PORT_NUMBER) diff --git a/app/requirements.txt b/app/requirements.txt new file mode 100755 index 0000000..c8fb592 --- /dev/null +++ b/app/requirements.txt @@ -0,0 +1,7 @@ +Flask==2.0.2 +gunicorn==20.1.0 +pillow==8.4.0 +pyyaml==6.0 +scipy==1.7.2 +flask_restful==0.3.9 +scikit-learn==1.0.1 \ No newline at end of file diff --git a/app/service.py b/app/service.py new file mode 100755 index 0000000..b60b475 --- /dev/null +++ b/app/service.py @@ -0,0 +1,24 @@ +import numpy as np +from deep_utils import face_detector_loader, Box, img_to_b64 +from flask import jsonify + + +class Inference: + def __init__(self, model_name, **model_config): + self.detector = face_detector_loader(model_name, **model_config) + + @staticmethod + def preprocessing(img) -> np.ndarray: + if type(img) is not np.ndarray: + img = np.array(img).astype(np.uint8) + return img + + def infer(self, img): + img = self.preprocessing(img) + objects = self.detector.detect_faces(img, is_rgb=False) + faces = dict() + boxes = objects['boxes'] + if boxes and len(boxes[0]): + images = Box.get_box_img(img, boxes) + faces = {f"face_{i}": box for i, box in enumerate(boxes, 1)} + return jsonify(faces) diff --git a/app/weights/onet.npy b/app/weights/onet.npy new file mode 100644 index 0000000..e8f63e5 Binary files /dev/null and b/app/weights/onet.npy differ diff --git a/app/weights/pnet.npy b/app/weights/pnet.npy new file mode 100644 index 0000000..91f8f9c Binary files /dev/null and b/app/weights/pnet.npy differ diff --git a/app/weights/rnet.npy b/app/weights/rnet.npy new file mode 100644 index 0000000..5e9bbab Binary files /dev/null and b/app/weights/rnet.npy differ diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100755 index 0000000..dfae0da --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,21 @@ +version: "3.7" +services: + face: + build: ./app + container_name: face + restart: always + expose: + - 660 + environment: + - ENDPOINT=/face + - FACE_DETECTION_MODEL=MTCNNTorchFaceDetector + command: gunicorn --workers=2 --threads 1 -b 0.0.0.0:660 entry_point:app --worker-class sync + + nginx: + build: ./nginx + container_name: nginx + restart: always + ports: + - 8000:80 + depends_on: + - face \ No newline at end of file diff --git a/nginx/Dockerfile b/nginx/Dockerfile new file mode 100755 index 0000000..95459ad --- /dev/null +++ b/nginx/Dockerfile @@ -0,0 +1,5 @@ +FROM nginx:1.21.3 + +RUN rm /etc/nginx/conf.d/default.conf + +COPY nginx.conf /etc/nginx/conf.d \ No newline at end of file diff --git a/nginx/nginx.conf b/nginx/nginx.conf new file mode 100755 index 0000000..db436f9 --- /dev/null +++ b/nginx/nginx.conf @@ -0,0 +1,14 @@ +server { + listen 80; + charset utf-8; + server_name 0.0.0.0; + + location / { + client_max_body_size 20M; + proxy_pass http://face:660; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + } + +} diff --git a/sample-images/movie-stars.jpg b/sample-images/movie-stars.jpg new file mode 100644 index 0000000..c249b8c Binary files /dev/null and b/sample-images/movie-stars.jpg differ