Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improvement for MLChain 0.1.9 #30

Merged
merged 31 commits into from
Dec 29, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
04c9ff7
Resolve merge conflict
lamhoangtung Sep 29, 2020
67f5b2b
Compress CSS
lamhoangtung Sep 29, 2020
9578173
Compressed js
lamhoangtung Sep 29, 2020
68330e3
Compress more html
lamhoangtung Oct 1, 2020
f176e47
Detached SVG and compress final HTML file
lamhoangtung Oct 1, 2020
4745975
potential fix for #20
lamhoangtung Oct 1, 2020
8277e0b
Minor pipeline bug fixed
lamhoangtung Oct 1, 2020
add380c
Minor coverage test increase
lamhoangtung Oct 1, 2020
d2d8c04
Merge branch 'master' into linus_dev
lamhoangtung Oct 1, 2020
a190f05
More test for more coverage
lamhoangtung Oct 13, 2020
1e21c57
Raise some human readable exception when using CLI
lamhoangtung Nov 2, 2020
02bfc83
fixed #19
lamhoangtung Nov 4, 2020
5c3c8cc
🔖 Update mlconfig 🔖
meocong Dec 20, 2020
7edbe8d
🚀 🚀 🚀 Update mlchain 0.1.9 🚀 🚀 🚀
meocong Dec 25, 2020
a364bf8
Update requirements + Fixed sentry when no dsn
meocong Dec 25, 2020
00b1db5
🐛 Higher Priority for Sentry config using env vars + Fixed requiremen…
meocong Dec 25, 2020
cff1f4c
🎉 🎉 🎉 Handle exception + Fixed Sentry break Gunicorn + Better default…
meocong Dec 26, 2020
8f3c1e6
Merge branch 'linus_dev' into sentry
lamhoangtung Dec 26, 2020
b5535e9
🐛 Force value of version to sentry is string
meocong Dec 26, 2020
d4f1c4b
Merge branch 'sentry' of https://github.com/Techainer/mlchain-python …
meocong Dec 26, 2020
588c8ac
🐛 Improved capture_exception sentry + Fixed config and mlchain run
meocong Dec 26, 2020
ac524ef
🐛 Fixed handle client exception with new httpx
meocong Dec 26, 2020
1b5a1c5
Better handle exception with sentry
meocong Dec 27, 2020
f9f51ba
👕 Update MlchainError work with sentry
meocong Dec 28, 2020
bcd9073
✨ Change transaction id into UUID4 ✨
meocong Dec 28, 2020
93e7ab4
✨ Better handle exception when mlchain run
meocong Dec 28, 2020
237d74a
Minor improvement for non sentry initialization
lamhoangtung Dec 28, 2020
50f3bf8
Minor bug fixed
lamhoangtung Dec 28, 2020
f011f02
Minor function to set mlchain context id
lamhoangtung Dec 28, 2020
dd28a14
Init get GPU stats once
lamhoangtung Dec 28, 2020
bde3fc5
Default host to 0.0.0.0
lamhoangtung Dec 29, 2020
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 14 additions & 2 deletions mlchain/__init__.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,26 @@
# Parameters of MLchain
__version__ = "0.1.8rc1"
__version__ = "0.1.9"
HOST = "https://www.api.mlchain.ml"
WEB_HOST = HOST
API_ADDRESS = HOST
MODEL_ID = None
import ssl

try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context

from os import environ

environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
from mlchain.base.log import logger
from .context import mlchain_context

from .base.exceptions import *
from .config import mlconfig
from .config import mlconfig

23 changes: 18 additions & 5 deletions mlchain/base/exceptions.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,22 @@
import os
import traceback
from .log import logger

from .log import logger, sentry_ignore_logger
from sentry_sdk import capture_exception
import logging
from sentry_sdk import add_breadcrumb
import re

class MlChainError(Exception):
"""Base class for all exceptions."""

def __init__(self, msg, code='exception', status_code=500):
super(MlChainError, self).__init__(msg)
self.msg = msg
self.message = msg
self.code = code
self.status_code = status_code
logger.error("[{0}]: {1}".format(code, msg))
logger.debug(traceback.format_exc())

sentry_ignore_logger.error("[{0}]: {1}".format(code, msg))
sentry_ignore_logger.debug(traceback.format_exc())

class MLChainAssertionError(MlChainError):
def __init__(self, msg, code="assertion", status_code=422):
Expand All @@ -26,3 +31,11 @@ def __init__(self, msg, code="serialization", status_code=422):
class MLChainUnauthorized(MlChainError):
def __init__(self, msg, code="unauthorized", status_code=401):
MlChainError.__init__(self, msg, code, status_code)

class MLChainConnectionError(MlChainError):
def __init__(self, msg, code="connection_error", status_code=500):
MlChainError.__init__(self, msg, code, status_code)

class MLChainTimeoutError(MlChainError):
def __init__(self, msg, code="timeout", status_code=500):
MlChainError.__init__(self, msg, code, status_code)
4 changes: 4 additions & 0 deletions mlchain/base/gunicorn_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
from mlchain.config import init_sentry

def post_worker_init(worker):
init_sentry()
22 changes: 18 additions & 4 deletions mlchain/base/log.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,17 +4,28 @@
"""
from contextlib import contextmanager
import re
import traceback
from traceback import StackSummary, extract_tb
import os
import sys
import logging
import traceback

# Sentry integration
from sentry_sdk.integrations.logging import LoggingIntegration
from sentry_sdk.integrations.logging import ignore_logger

sentry_logging = LoggingIntegration(
level=logging.INFO, # Capture info and above as breadcrumbs
event_level=logging.ERROR # Send errors as events
)
ignore_logger("mlchain-server")
sentry_ignore_logger = logging.getLogger("mlchain-server")
# End sentry integration


def get_color(n):
return '\x1b[3{0}m'.format(n)


class MultiLine(logging.Formatter):
def __init__(self, fmt=None, datefmt=None, style='%', newline=None):
logging.Formatter.__init__(self, fmt, datefmt, style)
Expand Down Expand Up @@ -60,7 +71,7 @@ def except_handler():
sys.excepthook = sys.__excepthook__


def format_exc(name='mlchain', tb=None, exception=None):
def format_exc(name='mlchain', tb=None, exception=None, return_str=True):
if exception is None:
formatted_lines = traceback.format_exc().splitlines()
else:
Expand All @@ -78,4 +89,7 @@ def format_exc(name='mlchain', tb=None, exception=None):
output = []
for x in formatted_lines:
output.append(x)
return "\n".join(output) + "\n"

if return_str:
return "\n".join(output)
return output
4 changes: 2 additions & 2 deletions mlchain/base/serve_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,7 @@ def call_function(self, function_name_, id_=None, *args, **kwargs):
function_name, uid = function_name_, id_
if function_name is None:
raise AssertionError("You need to specify the function name (API name)")
mlchain_context['context_id'] = uid

if isinstance(function_name, str):
if len(function_name) == 0:
if hasattr(self.model, '__call__') and callable(getattr(self.model, '__call__')):
Expand Down Expand Up @@ -339,7 +339,7 @@ async def call_async_function(self, function_name_, id_=None, *args, **kwargs):
function_name, uid = function_name_, id_
if function_name is None:
raise MLChainAssertionError("You need to specify the function name (API name)")
mlchain_context['context_id'] = uid

if isinstance(function_name, str):
if len(function_name) == 0:
if hasattr(self.model, '__call__') and callable(getattr(self.model, '__call__')):
Expand Down
3 changes: 3 additions & 0 deletions mlchain/base/wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@ def load_config(self):
for key, value in config.items():
self.cfg.set(key.lower(), value)

from mlchain.base.gunicorn_config import post_worker_init
self.cfg.set("post_worker_init", post_worker_init)

def load(self):
return self.application

Expand Down
22 changes: 0 additions & 22 deletions mlchain/cli/config.yaml

This file was deleted.

28 changes: 15 additions & 13 deletions mlchain/cli/init.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,20 @@
import os
import click
from mlchain import logger
import os

root_path = os.path.dirname(__file__)


@click.command("init", short_help="Init base config to run server.")
@click.argument('file', nargs=1, required=False, default='mlconfig.yaml')
def init_command(file):
if file is None:
file = 'mlconfig.yaml' # pragma: no cover
if os.path.exists(file):
logger.warning("File {} exists. Please change name file".format(file))
else:
with open(file, 'wb') as fp:
with open(os.path.join(root_path, 'config.yaml'), 'rb') as fr:
fp.write(fr.read())
def init_command():
def create_file(file):
with open(file, 'wb') as f:
f.write(open(os.path.join(root_path, file), 'rb').read())

ALL_INIT_FILES = ['mlconfig.yaml', 'mlchain_server.py']
for file in ALL_INIT_FILES:
if os.path.exists(file):
if click.confirm('File {0} is exist, Do you want to force update?'.format(file)):
create_file(file)
else:
create_file(file)

click.secho('Mlchain initalization is done!', blink=True, bold=True)
20 changes: 20 additions & 0 deletions mlchain/cli/mlchain_server.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
"""
THE BASE MLCHAIN SERVER
"""
# Import mlchain
from mlchain.base import ServeModel
from mlchain import mlconfig


# IMPORT YOUR CLASS HERE - YOU ONLY CARE THIS
from main import Test # Import your class here

model = Test() # Init your class first
# END YOUR WORK HERE


# Wrap your class by mlchain ServeModel
serve_model = ServeModel(model)

# THEN GO TO CONSOLE:
# mlchain run -c mlconfig.yaml
54 changes: 54 additions & 0 deletions mlchain/cli/mlconfig.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
# Service Config
name: mlchain-server # Name of service
version: '0.0.1' # Version of service
entry_file: mlchain_server.py # Python file contains object ServeModel

# Host and Port Config
host: 0.0.0.0 # Host of service
port: 8001 # Port service

# Server config
server: flask # Option flask or quart or grpc
wrapper: None # Option None or gunicorn or hypercorn
cors: true # Auto enable CORS
static_folder: # static folder for TemplateResponse
static_url_path: # static url path for TemplateResponse
template_folder: # template folder for TemplateResponse

# Gunicorn config - Use gunicorn for general case
gunicorn:
timeout: 200 # The requests will be maximum 200 seconds in default, then when the requests is done, the worker will be restarted
keepalive: 3 # Keep requests alive when inactive with client in 3 seconds default
max_requests: 0 # Maximum serving requests until workers restart to handle over memory in Python
workers: 1 # Number of duplicate workers
threads: 1 # Number of simultaneous threads in workers
worker_class: 'gthread' # Worker class gthread is fit with all case. Can use 'uvicorn.workers.UvicornWorker' which be higher performance sometimes

# Hypercorn config - Use hypercorn for async server with Quart
hypercorn:
timeout: 200 # The requests will be maximum 200 seconds in default, then when the requests is done, the worker will be restarted
keepalive: 3 # Keep requests alive when inactive with client in 3 seconds default
threads: 50 # Number of simultaneous threads in workers. Default: 50. Remember that some models can not call simultaneous, so you can use @non_thread() decorator to the function.
worker_class: 'uvloop' # Worker class uvloop is fit with all case.

bind:
- 'unix:/tmp/gunicorn.sock' # Using sock to make gunicorn faster

# Sentry logging, Sentry will be run when the worker is already initialized
sentry:
dsn: None # URI Sentry of the project or export SENTRY_DSN
traces_sample_rate: 0.1 # Default log 0.1
sample_rate: 1.0 # Default 1.0
drop_modules: True # Drop python requirements to lower the size of log

# Mlconfig - Use these mode and config or env to adaptive your code
# You can import mlconfig and use as variable. Ex: mlconfig.debug
mode:
default: default # The default mode
env:
default: # All variable in default mode will be existed in other mode
test: "Hello"
dev: # Development mode
debug: True
prod: # Production mode
debug: False
Loading