Skip to content

Commit

Permalink
style: Format code and sort imports using black and isort
Browse files Browse the repository at this point in the history
  • Loading branch information
Paulooh007 committed Sep 30, 2023
1 parent 0aa213d commit 1a9937c
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 10 deletions.
21 changes: 12 additions & 9 deletions docker/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,27 +2,30 @@
# -*- coding: utf-8 -*-
import os
import socket

import numpy as np
from flask import Flask, jsonify, request
from laser_encoders import initialize_encoder, initialize_tokenizer
import numpy as np

app = Flask(__name__)


@app.route("/")
def root():
print("/")
html = "<h3>Hello {name}!</h3>" \
"<b>Hostname:</b> {hostname}<br/>"
html = "<h3>Hello {name}!</h3>" "<b>Hostname:</b> {hostname}<br/>"
return html.format(name=os.getenv("LASER", "world"), hostname=socket.gethostname())


@app.route("/vectorize", methods=["GET"])
def vectorize():
content = request.args.get('q')
lang = request.args.get('lang', 'en') # Default to English if 'lang' is not provided
content = request.args.get("q")
lang = request.args.get(
"lang", "en"
) # Default to English if 'lang' is not provided

if content is None:
return jsonify({'error': 'Missing input content'})
return jsonify({"error": "Missing input content"})

encoder = initialize_encoder(lang=lang)
tokenizer = initialize_tokenizer(lang=lang)
Expand All @@ -34,9 +37,9 @@ def vectorize():
embeddings = encoder.encode_sentences([tokenized_sentence])
embeddings_list = embeddings.tolist()

body = {'content': content, 'embedding': embeddings_list}
body = {"content": content, "embedding": embeddings_list}
return jsonify(body)

if __name__ == "__main__":
app.run(debug=True, port=80, host='0.0.0.0')

if __name__ == "__main__":
app.run(debug=True, port=80, host="0.0.0.0")
2 changes: 1 addition & 1 deletion docker/encode.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,4 @@
encoder = initialize_encoder(lang="yor")
embeddings = encoder.encode_sentences([tokenized_sentence])

print("Embeddings Shape", embeddings.shape)
print("Embeddings Shape", embeddings.shape)

0 comments on commit 1a9937c

Please sign in to comment.