diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000..f0da052 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,8 @@ +[run] +omit = + */site-packages/* + */distutils/* + tests/* + setup.py + mlchain/__main__.py +concurrency = multiprocessing \ No newline at end of file diff --git a/.gitignore b/.gitignore index 27ed906..12db4de 100644 --- a/.gitignore +++ b/.gitignore @@ -7,4 +7,7 @@ mlchain.egg-info .pyc test/* .pytest_cache -**/.DS_Store \ No newline at end of file +**/.DS_Store +htmlcov +.coverage +.coverage.* \ No newline at end of file diff --git a/.travis.yml b/.travis.yml index cd5ed41..36a4761 100644 --- a/.travis.yml +++ b/.travis.yml @@ -44,7 +44,7 @@ jobs: - stage: build name: "Build on Python 3.7 MacOS" os: osx - language: minimal + language: shell install: - pip3 install -U scikit-build awscli pip - pip3 install -U -r requirements.txt @@ -78,6 +78,7 @@ jobs: script: - pip install . - python -m coverage run --source=. -m unittest discover + - python -m coverage combine - python -m coverage report after_success: - codecov @@ -93,6 +94,7 @@ jobs: script: - pip install . - python -m coverage run --source=. -m unittest discover + - python -m coverage combine - python -m coverage report after_success: - codecov @@ -101,13 +103,14 @@ jobs: - stage: test name: "Test on Python 3.7 MacOS" os: osx - language: minimal + language: shell install: - pip3 install -U scikit-build pytest coverage codecov pip - pip3 install -U attrs script: - pip3 install . - python3 -m coverage run --source=. -m unittest discover + - python3 -m coverage combine - python3 -m coverage report after_success: - codecov @@ -125,6 +128,7 @@ jobs: script: - python -m pip install . - python -m coverage run --source=. -m unittest discover + - python -m coverage combine - python -m coverage report after_success: - codecov @@ -142,4 +146,4 @@ jobs: secure: lEK5DTFYmn2vkeP8OrG8CPgUOH/PvhO76OO5F2/K3HbQxJZLH3Vsmzk/mRsY1pIC52XpGLDSg/8d62V9bu2WNReLRQJZ1zIgKSswvf4USrsd0axGwIbJyuX+vr81/x4j4rQr3ohfb2zOIit7JoDrRugwBICSAEukNfXoOZdN6wVn4zpLsW/bdlrNlIsNmhzUBKfurRMPEqsSE1Bq2dDGmyd4KNiZlaJF4PEgNQHfV9qwW2+j/ky4ulCzFgIfxKUpIfvUPFN3Uw3HdaJAaOH6h+S84hvi30xwD8bT8os990fK0zZ/sW5e0ogRckmkGh3jDJcXzyCPetAABMkUjwTrIkehQ2I2QhT49V0+Qqq3A3iJFAGxufl3HEYNY4ZVxo7PXMBIkmA6TdKomhPLh1nKeQTaFomSvoY/Usnc5pF1Va5vhHUz2mGeUgnbR0kQPN2d3yF6hVvl2TnM4Ml0YK2sQa4og4xYOouBGqJsATTZ7OEdG/Iha1KpQRwkHaWs+FShGs1UL0UZO5+FYVSd6va3i1IPxZLoZzJBPscclKBJbrtfup7cWRO2LEeS1uzS0NEKGTmrZLmOW8m9o0Pt+F61pU8meG3O+n0CxYGrytUSNvCrMA0+ZT9DY0hF9Q08w7AASG+zZVarwhnFDakeNq91Z1GLGxB+mD3vIa5mslZGYMw= on: tags: true - distributions: "sdist bdist_wheel" \ No newline at end of file + distributions: "sdist bdist_wheel" \ No newline at end of file diff --git a/LICENSE.txt b/LICENSE.txt deleted file mode 100644 index ec95e9d..0000000 --- a/LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2018 Techainer - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file diff --git a/MANIFEST.in b/MANIFEST.in index 2ea1b5d..8005087 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,4 @@ -include LICENSE *.rst *.toml *.yml *.yaml +include LICENSE *.rst *.md *.toml *.yml *.yaml include requirements.txt graft .github diff --git a/README.rst b/README.rst deleted file mode 100644 index 4b4a136..0000000 --- a/README.rst +++ /dev/null @@ -1,3 +0,0 @@ -Mlchain helps AI developers to easily run, deploy and monitor AI models and Workflows without having Devops skills. - -This Mlchain Python library lets you launch models and do many tasks with Mlchain Platform. Please sign up your account on Mlchain before working with it. \ No newline at end of file diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 0000000..24f3088 --- /dev/null +++ b/codecov.yml @@ -0,0 +1,9 @@ +coverage: + status: + project: # settings affecting project coverage + default: + target: auto # auto % coverage target + threshold: 5% # allow for 5% reduction of coverage without failing + + # do not run coverage on patch nor changes + patch: false \ No newline at end of file diff --git a/mlchain/__init__.py b/mlchain/__init__.py index 33dd70d..d100a4e 100644 --- a/mlchain/__init__.py +++ b/mlchain/__init__.py @@ -1,5 +1,5 @@ # Parameters of MLchain -__version__ = "0.1.6" +__version__ = "0.1.7" HOST = "https://www.api.mlchain.ml" WEB_HOST = HOST API_ADDRESS = HOST @@ -11,11 +11,4 @@ from .context import mlchain_context from .base.exceptions import * -from .config import mlconfig - -try: - import torch - - torch.set_num_thread(1) -except Exception as e: - pass +from .config import mlconfig \ No newline at end of file diff --git a/mlchain/cli/init.py b/mlchain/cli/init.py index 9917024..a29c381 100644 --- a/mlchain/cli/init.py +++ b/mlchain/cli/init.py @@ -14,4 +14,5 @@ def init_command(file): logger.warning("File {} exists. Please change name file".format(file)) else: with open(file, 'wb') as fp: - fp.write(open(os.path.join(root_path, 'config.yaml'), 'rb').read()) + with open(os.path.join(root_path, 'config.yaml'), 'rb') as fr: + fp.write(fr.read()) diff --git a/mlchain/cli/main.py b/mlchain/cli/main.py index 2d98267..da865fc 100644 --- a/mlchain/cli/main.py +++ b/mlchain/cli/main.py @@ -17,22 +17,21 @@ def get_version(ctx, param, value): if not value or ctx.resilient_parsing: return - message = "Python %(python)s\nMlChain %(mlchain)s\nFlask %(flask)s\nQuart %(quart)s\nGrpc %(grpc)s" + message = "Python %(python)s\nMlChain %(mlchain)s\nFlask %(flask)s\nQuart %(quart)s\n" click.echo( message % { "python": platform.python_version(), "mlchain": __version__, "flask": flask.__version__, - "quart": quart.__version__, - "grpc": grpc.__version__ + "quart": quart.__version__ }, color=ctx.color, ) ctx.exit() -def main(as_module=False): +def main(as_module=False, is_testing=False): version_option = click.Option( ["--version"], help="Show the mlchain version", @@ -46,8 +45,9 @@ def main(as_module=False): cli.add_command(init_command) cli.add_command(artifact_command) cli.add_command(serve_command) + if is_testing: + return cli cli.main(args=sys.argv[1:], prog_name="python -m mlchain" if as_module else None) - if __name__ == "__main__": - main(as_module=True) + cli = main(as_module=True) diff --git a/mlchain/cli/run.py b/mlchain/cli/run.py index 8993589..a8c5c65 100644 --- a/mlchain/cli/run.py +++ b/mlchain/cli/run.py @@ -131,11 +131,13 @@ def run_command(entry_file, host, port, bind, wrapper, server, workers, config, bind = None bind = mlconfig.get_value(bind, config, 'bind', []) wrapper = mlconfig.get_value(wrapper, config, 'wrapper', None) - workers = mlconfig.get_value(workers, config, 'workers', None) - if workers is None: - workers = 1 - else: - workers = int(workers) + if wrapper == 'gunicorn' and os.name == 'nt': + logger.warning('Gunicorn warper are not supported on Windows. Switching to None instead.') + wrapper = None + workers = mlconfig.get_value(workers, config['gunicorn'], 'workers', None) + if workers is None and 'hypercorn' in config.keys(): + workers = mlconfig.get_value(workers, config['hypercorn'], 'workers', None) + workers = int(workers) if workers is not None else 1 name = mlconfig.get_value(name, config, 'name', None) cors = mlconfig.get_value(None, config, 'cors', False) @@ -262,7 +264,7 @@ def load(self): static_url_path=static_url_path, static_folder=static_folder, template_folder=template_folder) - app.run(host, port, bind=bind, cors=cors, workers=workers, + app.run(host, port, bind=bind, cors=cors, gunicorn=False, hypercorn=True, **config.get('hypercorn', {}), model_id=model_id) app = get_model(entry_file) diff --git a/mlchain/client/__init__.py b/mlchain/client/__init__.py index fc18f1b..51ee43e 100644 --- a/mlchain/client/__init__.py +++ b/mlchain/client/__init__.py @@ -7,8 +7,6 @@ class Client(HttpClient, GrpcClient): def __init__(self, api_key=None, api_address=None, serializer='json', timeout=5 * 60, headers=None, type='http', name: str = "", version: str = "", check_status=False): - logger.warn("mlchain.client.Client is deprecated and will be remove in the next version. " - "Please use mlchain.client.HttpModel instead") assert isinstance(type, str), "type model must be a string" self._api_key = api_key self._api_address = api_address @@ -28,7 +26,7 @@ def __init__(self, api_key=None, api_address=None, serializer='json', timeout=5 raise Exception("type must be http or grpc") def model(self, name: str = "", version: str = "", check_status=False): - logger.warn( + logger.warning( "function .model is deprecated and will be remove in the next version") if self._type.lower() == 'http': return HttpClient(api_key=self._api_key, api_address=self._api_address, serializer=self._serializer, diff --git a/mlchain/config.py b/mlchain/config.py index 71448a5..954257a 100644 --- a/mlchain/config.py +++ b/mlchain/config.py @@ -43,7 +43,7 @@ def from_json(self, path): def from_yaml(self, path): import yaml - self.update(yaml.load(open(path))) + self.update(yaml.load(open(path), Loader=yaml.FullLoader)) def update(self, data): for k, v in data.items(): @@ -134,12 +134,14 @@ def load_config(data): def load_json(path): import json - return json.load(open(path, encoding='utf-8')) + with open(path, encoding='utf-8') as f: + return json.load(f) def load_yaml(path): import yaml - return yaml.load(open(path)) + with open(path) as f: + return yaml.load(f, Loader=yaml.FullLoader) def load_file(path): diff --git a/mlchain/server/static/mlchain_fig_1.png b/mlchain/server/static/mlchain_fig_1.png new file mode 100644 index 0000000..bdbaeea Binary files /dev/null and b/mlchain/server/static/mlchain_fig_1.png differ diff --git a/mlchain/server/static/mlchain_logo.png b/mlchain/server/static/mlchain_logo.png new file mode 100644 index 0000000..125743f Binary files /dev/null and b/mlchain/server/static/mlchain_logo.png differ diff --git a/mlchain/server/templates/home.html b/mlchain/server/templates/home.html index b71b5f5..673765a 100644 --- a/mlchain/server/templates/home.html +++ b/mlchain/server/templates/home.html @@ -1,392 +1,472 @@ - + - - - - - Mlchain | Serve your AI models in minutes - - - - - - - - - - - - - + } + + ; + - - - - - - - - -
- -
- -
-

How to build better AI?

-

Less effort, better model performance!

-

Using MlChain now!

- - - - More detail - -
- -
- -
- -
- -
- - -
- - - - - - - - - - - - -
- - - -
-
-

MLCHAIN

-
-
-
- -
-
-

Save time & efforts in training, deployment and scale

-

Fully support with popular libraries and application integration.

- - - -

-
- season_change -
-
- - -
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-
-
-

Better management

-

Easy to track and organize your team, work and expense.

- -

-
- -
-
-
- - - - - - - - - - - - - - - - - - - - -
- -

Contribute to MLChain

-
-
-
- -

Build and serve model yourself!

- - - -
- - - - - - - - - - - - - - - + + +
+

Contribute to MLChain

+
+
+
+

Build and serve model yourself!

+ + + + + \ No newline at end of file diff --git a/mlchain/server/templates/swaggerui/index.html b/mlchain/server/templates/swaggerui/index.html index 5fa9952..eb68bff 100644 --- a/mlchain/server/templates/swaggerui/index.html +++ b/mlchain/server/templates/swaggerui/index.html @@ -1,60 +1 @@ - - - - - - Swagger UI - - - - - - - -
- - - - - - +Swagger UI
\ No newline at end of file diff --git a/mlchain/server/templates/swaggerui/oauth2-redirect.html b/mlchain/server/templates/swaggerui/oauth2-redirect.html deleted file mode 100644 index fb68399..0000000 --- a/mlchain/server/templates/swaggerui/oauth2-redirect.html +++ /dev/null @@ -1,67 +0,0 @@ - - - - - - diff --git a/requirements.txt b/requirements.txt index e45df0a..afd7095 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,47 +1,26 @@ -aiofiles>=0.4.0 -anyio>=1.2.1 -async-generator>=1.10 attrs>=19.3.0 -blinker>=1.4 blosc>=1.8.3; sys_platform != 'win32' -chardet>=3.0.4 Click>=7.1.2 Flask>=1.1.2 Flask-Cors>=3.0.8 gunicorn>=20.0.4 -h11==0.9.0 -h2>=3.1.1<4.0.0 -hpack>=3.0.0,<4 -httptools>=0.0.13 -hyperframe>=5.2.0,<6 -idna>=2.8 -itsdangerous>=1.1.0 +h2==3.2.0 Jinja2>=2.10.3 MarkupSafe>=1.1.1 msgpack==1.0.0 -multidict>=4.7.2 numpy<=1.18.1 opencv-python>=4.1.2.30 -outcome>=1.0.1 Pillow==6.0.0 -priority>=1.3.0 -python-dotenv>=0.10.3 Quart<=0.10.0 Quart-CORS<=0.2.0 requests>=2.22.0 -requests-futures>=1.0.0 six>=1.13.0 -sniffio>=1.1.0 -sortedcontainers>=2.1.0 toml>=0.10.0 trio>=0.13.0 -typing-extensions>=3.7.4.1 urllib3>=1.25.7 uvicorn<=0.11.5 uvloop>=0.14.0; sys_platform != 'win32' -websockets>=8.1 Werkzeug>=0.15.0 -wsproto>=0.15.0 httpx==0.13.3 hypercorn>=0.5.4 grpcio==1.27.2 @@ -51,4 +30,5 @@ pyyaml fuzzywuzzy GPUtil tqdm -pyngrok \ No newline at end of file +pyngrok +python-Levenshtein \ No newline at end of file diff --git a/setup.py b/setup.py index b168fbb..119aaa1 100644 --- a/setup.py +++ b/setup.py @@ -6,7 +6,7 @@ project = "mlchain" def readme(): - with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f: + with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f: return f.read() def parse_requirements(filename): @@ -21,7 +21,8 @@ def parse_requirements(filename): version=__version__, description='MLchain Python Library', long_description=readme(), - url='https://gitlab.com/techainer/ml_platform/mlchain-python', + long_description_content_type='text/markdown', + url='http://github.com/Techainer/mlchain-python', author='Techainer Inc.', author_email='admin@techainer.com', package_data={'mlchain.cli': ['config.yaml'],'mlchain.server':['static/*','templates/*','templates/swaggerui/*']}, diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/dummy_server/mlconfig.yaml b/tests/dummy_server/mlconfig.yaml new file mode 100644 index 0000000..eb360ce --- /dev/null +++ b/tests/dummy_server/mlconfig.yaml @@ -0,0 +1,22 @@ +name: mlchain-server # name of service +version: '0.0.1' +entry_file: server.py # python file contains object ServeModel +host: localhost # host service +port: 12345 # port service +server: flask # option flask or quart or grpc +wrapper: gunicorn +cors: true +gunicorn: # config apm-server if uses gunicorn wrapper + timeout: 60 + keepalive: 60 + max_requests: 0 + threads: 1 + workers: 5 + worker_class: 'gthread' + umask: '0' +hypercorn: + timeout: 200 + keepalive: 60 + threads: 1 + workers: 1 + worker_class: 'uvloop' \ No newline at end of file diff --git a/tests/dummy_server/server.py b/tests/dummy_server/server.py new file mode 100644 index 0000000..a32a17c --- /dev/null +++ b/tests/dummy_server/server.py @@ -0,0 +1,43 @@ +import cv2 +import numpy as np +from mlchain.base import ServeModel +from mlchain.decorators import except_serving + + +class Model(): + """ Just a dummy model """ + def __init__(self): + pass + + + def predict(self, image: np.ndarray): + """ + Resize input to 100 by 100. + Args: + image (numpy.ndarray): An input image. + Returns: + The image (np.ndarray) at 100 by 100. + """ + image = cv2.resize(image, (100, 100)) + return image + + @except_serving + def dummy(self): + pass + + def get_error(self): + raise Exception("This exception is expected") + + +# Define model +model = Model() + +# Serve model +serve_model = ServeModel(model) + +# Deploy model +if __name__ == '__main__': + from mlchain.server import FlaskServer + + # Run flask model with upto 12 threads + FlaskServer(serve_model).run(port=5000, threads=12) diff --git a/tests/test_cli.py b/tests/test_cli.py new file mode 100644 index 0000000..4a0be3c --- /dev/null +++ b/tests/test_cli.py @@ -0,0 +1,67 @@ +import logging +import unittest +import os + +from click.testing import CliRunner +from mlchain.cli.main import main + +logger = logging.getLogger() + +class TestCLI(unittest.TestCase): + def __init__(self, *args, **kwargs): + unittest.TestCase.__init__(self, *args, **kwargs) + self.is_windows = os.name == 'nt' + + def test_mlchain_version(self): + cli = main(is_testing=True) + result = CliRunner().invoke(cli, args='--version'.split(), prog_name='python -m mlchain') + logger.info('Output of `mlchain --version`:\n' + str(result.output)) + assert result.exit_code == 0 + + def test_mlchain_init(self): + cli = main(is_testing=True) + runner = CliRunner() + with runner.isolated_filesystem(): + result = runner.invoke(cli, args='init'.split(), prog_name='python -m mlchain') + logger.info('Output of `mlchain init`:\n' + str(result.output)) + assert result.exit_code == 0 + + def test_mlchain_run(self): + if self.is_windows: + return 0 + cli = main(is_testing=True) + runner = CliRunner() + test_breaking_process(runner, cli, args='run'.split(), new_pwd='tests/dummy_server', prog_name='python -m mlchain') + + +def test_breaking_process(runner, cli, args, new_pwd, prog_name): + from multiprocessing import Queue, Process + from threading import Timer + from time import sleep + from os import kill, getpid + from signal import SIGINT + + q = Queue() + + # Running out app in SubProcess and after a while using signal sending + # SIGINT, results passed back via channel/queue + def background(): + Timer(5, lambda: kill(getpid(), SIGINT)).start() + os.chdir(new_pwd) + result = runner.invoke(cli, args, prog_name=prog_name) + q.put(('exit_code', result.exit_code)) + q.put(('output', result.output)) + + p = Process(target=background) + p.start() + + results = {} + + while p.is_alive(): + sleep(0.5) + else: + while not q.empty(): + key, value = q.get() + results[key] = value + logger.info('Output of `mlchain run`:\n' + results['output']) + assert results['exit_code'] == 0 \ No newline at end of file diff --git a/tests/test_client.py b/tests/test_client.py new file mode 100644 index 0000000..20c29f2 --- /dev/null +++ b/tests/test_client.py @@ -0,0 +1,85 @@ +import logging +import unittest +import time +import os + +from click.testing import CliRunner +from mlchain.cli.main import main +from mlchain.workflows import Background, Task + +logger = logging.getLogger() +cli = main(is_testing=True) +runner = CliRunner() + +def test_breaking_process(runner, cli, args, new_pwd, prog_name, wait_time=10): + from multiprocessing import Queue, Process + from threading import Timer + from time import sleep + from os import kill, getpid + from signal import SIGINT + + q = Queue() + + # Running out app in SubProcess and after a while using signal sending + # SIGINT, results passed back via channel/queue + def background(): + Timer(wait_time, lambda: kill(getpid(), SIGINT)).start() + os.chdir(new_pwd) + result = runner.invoke(cli, args, prog_name=prog_name) + q.put(('exit_code', result.exit_code)) + q.put(('output', result.output)) + + p = Process(target=background) + p.start() + + results = {} + + while p.is_alive(): + sleep(0.5) + else: + while not q.empty(): + key, value = q.get() + results[key] = value + logger.info('Output of `mlchain run`:\n' + results['output']) + assert results['exit_code'] == 0 + + +class TestClient(unittest.TestCase): + def __init__(self, *args, **kwargs): + unittest.TestCase.__init__(self, *args, **kwargs) + self.is_windows = os.name == 'nt' + + def test_client(self): + if self.is_windows: + return 0 + import numpy as np + from mlchain.client import Client + task = Task(test_breaking_process, runner, cli, args='run'.split(), new_pwd='tests/dummy_server', prog_name='python -m mlchain') + background = Background(task=task).run() + time.sleep(3) + + # Test normal client + model = Client(api_address='http://localhost:12345', serializer='json').model(check_status=True) + input_image = np.ones((200, 200), dtype=np.uint8) + result_image = model.predict(input_image) + assert result_image.shape == (100, 100) + + model = Client(api_address='http://localhost:12345', serializer='msgpack').model(check_status=True) + result_image_2 = model.predict(input_image) + assert result_image_2.shape == (100, 100) + + model = Client(api_address='http://localhost:12345', serializer='msgpack_blosc').model(check_status=True) + result_image_3 = model.predict(input_image) + assert result_image_3.shape == (100, 100) + + # Test client with exception + try: + model.predict('abc') + logger.info('This is supose to fail') + except Exception: + pass + + # Test Swagger UI + import requests + requests.get('http://localhost:12345', timeout=5) + requests.get('http://localhost:12345/swagger/', timeout=5) \ No newline at end of file diff --git a/tests/test_converter.py b/tests/test_converter.py index f9c54e8..ef1afe7 100644 --- a/tests/test_converter.py +++ b/tests/test_converter.py @@ -1,8 +1,11 @@ -from mlchain.base.converter import Converter, get_type +import logging import unittest from typing import * + import numpy as np +from mlchain.base.converter import Converter, get_type +logger = logging.getLogger() def check_type(value, t): t_origin, t_args = get_type(t) @@ -24,6 +27,7 @@ class TestConverter(unittest.TestCase): def __init__(self, *args, **kwargs): self.converter = Converter() unittest.TestCase.__init__(self, *args, **kwargs) + logger.info("Running converter test") def test_list(self): print(get_type(Union[dict, list])) diff --git a/tests/test_serializer.py b/tests/test_serializer.py index e3e4090..e50e7c6 100644 --- a/tests/test_serializer.py +++ b/tests/test_serializer.py @@ -1,7 +1,13 @@ +import logging +import sys import unittest -from mlchain.base.serializer import JsonSerializer,MsgpackSerializer,MsgpackBloscSerializer + import numpy as np -import sys +from mlchain.base.serializer import (JsonSerializer, MsgpackBloscSerializer, + MsgpackSerializer) + +logger = logging.getLogger() + class TestSerializer(unittest.TestCase): def __init__(self,*args,**kwargs): self.serializers = [ @@ -13,6 +19,7 @@ def __init__(self,*args,**kwargs): unittest.TestCase.__init__(self,*args,**kwargs) def test_numpy(self): + logger.info("Running serializer numpy test") for data in [np.ones((2,3)),np.int64(1),np.float64(1)]: for serializer in self.serializers: encoded = serializer.encode(data) @@ -20,6 +27,7 @@ def test_numpy(self): self.assertTrue(np.all(data==decoded),"{0}.encode: value: {1}".format(serializer.__class__.__name__,data)) def test_data(self): + logger.info("Running serializer python data structure test") for data in [{'dict':{'str':'str'}},{'list':[1,2,3,'str',np.float(4)]}]: for serializer in self.serializers: encoded = serializer.encode(data) @@ -27,7 +35,5 @@ def test_data(self): self.assertTrue(data==decoded,"{0}.encode: value: {1}".format(serializer.__class__.__name__,data)) - - if __name__ == '__main__': - unittest.main() \ No newline at end of file + unittest.main() diff --git a/tests/test_server.py b/tests/test_server.py new file mode 100644 index 0000000..4bd2fd6 --- /dev/null +++ b/tests/test_server.py @@ -0,0 +1,84 @@ +import logging +import unittest +from threading import Timer +import os + +try: + import cv2 +except Exception: + # Travis Windows will fail here + pass + + +import numpy as np +from mlchain.base import ServeModel +from mlchain.server.flask_server import FlaskServer +from mlchain.server.grpc_server import GrpcServer +from mlchain.server.quart_server import QuartServer +from mlchain.decorators import except_serving + +logger = logging.getLogger() + +class Model(): + """ Just a dummy model """ + + def predict(self, image: np.ndarray): + """ + Resize input to 100 by 100. + Args: + image (numpy.ndarray): An input image. + Returns: + The image (np.ndarray) at 100 by 100. + """ + image = cv2.resize(image, (100, 100)) + return image + + @except_serving + def dummy(self): + pass + +original_model = Model() + +def test_breaking_process(runner, port, wait_time=10): + from multiprocessing import Process + from threading import Timer + from os import kill, getpid + from signal import SIGINT + + def background(): + Timer(wait_time, lambda: kill(getpid(), SIGINT)).start() + runner.run(port=port, thread=1) + + p = Process(target=background) + p.start() + + +class TestServer(unittest.TestCase): + def __init__(self, *args, **kwargs): + unittest.TestCase.__init__(self, *args, **kwargs) + self.is_not_windows = os.name != 'nt' + + def test_flask_server_init(self): + logger.info("Running flask server init test") + model = ServeModel(original_model) + flask_model = FlaskServer(model) + if self.is_not_windows: + test_breaking_process(flask_model, port=10001) + + def test_quart_server_init(self): + logger.info("Running quart server init test") + model = ServeModel(original_model) + quart_model = QuartServer(model) + # if self.is_not_windows: + # test_breaking_process(quart_model, port=10002) + + def test_grpc_server_init(self): + logger.info("Running grpc server init test") + model = ServeModel(original_model) + grpc_model = GrpcServer(model) + # if self.is_not_windows: + # test_breaking_process(grpc_model, port=10003) + + +if __name__ == "__main__": + unittest.main() \ No newline at end of file diff --git a/tests/test_workflow.py b/tests/test_workflow.py new file mode 100644 index 0000000..e0e90d4 --- /dev/null +++ b/tests/test_workflow.py @@ -0,0 +1,135 @@ +import logging +import re +import unittest +import os +import time + +from mlchain.workflows import Parallel, Task, Background, Pipeline, Step + +logger = logging.getLogger() + +class TestWorkflow(unittest.TestCase): + def __init__(self, *args, **kwargs): + unittest.TestCase.__init__(self, *args, **kwargs) + + def test_mlchain_parallel(self): + input_list = [1, 2, 3, 4, 5] + def dummy_task(i): + time.sleep(0.1) + return i + 1 + + tasks = [Task(dummy_task, i) for i in input_list] + _ = Parallel(tasks, max_threads=0).run() + _ = Parallel(tasks, max_threads=-1).run() + _ = Parallel([]).run() + _ = Parallel([], threading=False).run() + output = Parallel(tasks, max_threads=2).run(progress_bar=True) + logger.info(output) + assert output == [2, 3, 4, 5, 6] + + output = Parallel(tasks, max_threads=2, threading=False).run(progress_bar=True) + logger.info(output) + assert output == [2, 3, 4, 5, 6] + + def test_mlchain_parallel_pass_fail_job(self): + input_list = [1, 2, 3, 4, 5] + def dummy_task(i): + time.sleep(0.1) + if i == 3: + raise Exception('Job failed') + return i + 1 + + tasks = [Task(dummy_task, i) for i in input_list] + output = Parallel(tasks, max_threads=2, pass_fail_job=True, max_retries=2).run(progress_bar=True) + logger.info(output) + assert output == [2, 3, None, 5, 6] + + output = Parallel(tasks, max_threads=2, pass_fail_job=True, threading=False, max_retries=2).run(progress_bar=True) + logger.info(output) + assert output == [2, 3, None, 5, 6] + + try: + output = Parallel(tasks, max_threads=2, threading=False).run(progress_bar=True) + raise AssertionError("This is supose to fail") + except Exception: + pass + + try: + output = Parallel(tasks, max_threads=2).run(progress_bar=True) + raise AssertionError("This is supose to fail") + except Exception: + pass + + def test_mlchain_parallel_in_parallel(self): + input_list = [1, 2, 3, 4, 5] + def dummy_task(i): + def sub_task(j): + return j + 2 + all_sub_task = [Task(sub_task, j) for j in range(i)] + sub_output = Parallel(all_sub_task, max_threads=2).run() + return sum(sub_output) + + tasks = [Task(dummy_task, i) for i in input_list] + output = Parallel(tasks, max_threads=2, threading=False).run(progress_bar=True) + logger.info(output) + assert output == [2, 5, 9, 14, 20] + + def test_mlchain_background(self): + x = [] + def dummy_task(n): + for i in range(n): + x.append(i) + task = Task(dummy_task, 10) + background = Background(task).run() + time.sleep(1) + logger.info(x) + assert x == list(range(10)) + background.stop() + + background = Background(task, interval=0.1).run() + time.sleep(1) + logger.info(x) + assert x[:10] == list(range(10)) + assert len(x) > 10 + background.stop() + + def test_mlchain_background_pass_fail_job(self): + x = [] + def dummy_task(): + raise Exception('This exception is expected') + task = Task(dummy_task) + + background = Background(task, interval=0.01).run(pass_fail_job=True) + time.sleep(0.02) + logger.info(x) + background.stop() + + def test_mlchain_async_task(self): + async def dummy_task(n): + return n+1 + task = Task(dummy_task, 5) + + def test_mlchain_pipeline(self): + def step_1(i): + time.sleep(0.001) + logger.info('step_1') + return i+1 + + def step_2(i): + time.sleep(0.001) + logger.info('step_2') + return i*2 + + def step_3(i): + time.sleep(0.001) + logger.info('step_3') + return i-2 + + pipeline = Pipeline( + Step(step_1, max_thread = 2), + Step(step_2, max_thread = 1), + Step(step_3, max_thread = 1) + ) + # inputs = range(4) + # results = pipeline.run(inputs) + # logger.info(results) \ No newline at end of file