"
+}
diff --git a/github/requirements.txt b/github/requirements.txt
new file mode 100644
index 0000000..555cea3
--- /dev/null
+++ b/github/requirements.txt
@@ -0,0 +1,2 @@
+PyGithub===1.53
+requests==2.24.0
diff --git a/github/utils.py b/github/utils.py
new file mode 100644
index 0000000..2b904db
--- /dev/null
+++ b/github/utils.py
@@ -0,0 +1,180 @@
+import json
+import os
+import sys
+import zipfile
+
+from config import *
+from github import Github
+
+
+def check_for_errors():
+ """
+ Checks if any errors have been recorded so far during this workflow step and returns the error if so
+ """
+ if os.getenv("CHALLENGE_ERRORS") == "False":
+ return True, None
+ return False, os.getenv("CHALLENGE_ERRORS")
+
+
+def check_if_pull_request():
+ """
+ Returns True if the workflow triggering event is a pull request
+ """
+ if GITHUB_EVENT_NAME == "pull_request":
+ return True
+ return False
+
+
+def check_if_merge_or_commit():
+ """
+ Returns True if the workflow triggering event is either a merge or a direct commit
+ """
+ if GITHUB_EVENT_NAME == "push":
+ return True
+ return False
+
+
+def add_pull_request_comment(github_auth_token, repo_name, pr_number, comment_body):
+ """
+ Adds a comment to a pull request
+ Arguments:
+ github_auth_token {str}: The auth token of the github user
+ repo_name {str}: The name of the repository
+ pr_number {int}: The Pull request number to add a comment
+ comment_body {str}: The body of the comment
+ """
+ try:
+ client = Github(github_auth_token)
+ repo = client.get_user().get_repo(repo_name)
+ pull = repo.get_pull(pr_number)
+ pull.create_issue_comment(comment_body)
+ except Exception as e:
+ print("There was an error while commenting on the Pull request: {}".format(e))
+
+
+def create_github_repository_issue(
+ github_auth_token, repo_name, issue_title, issue_body
+):
+ """
+ Creates an issue in a given repository
+
+ Arguments:
+ github_auth_token {str}: The auth token of the github user
+ repo_name {str}: The name of the repository
+ issue_title {int}: The title of the issue to be created
+ issue_body {str}: The body of the issue to be created
+ """
+ try:
+ client = Github(github_auth_token)
+ repo = client.get_user().get_repo(repo_name)
+ issue = repo.create_issue(issue_title, issue_body)
+ except Exception as e:
+ print("There was an error while creating an issue: {}".format(e))
+
+
+def create_challenge_zip_file(challenge_zip_file_path, ignore_dirs, ignore_files):
+ """
+ Creates the challenge zip file at a given path
+
+ Arguments:
+ challenge_zip_file_path {str}: The relative path of the created zip file
+ ignore_dirs {list}: The list of directories to exclude from the zip file
+ ignore_files {list}: The list of files to exclude from the zip file
+ """
+ working_dir = (
+ os.getcwd()
+ ) # Special case for github. For local. use os.path.dirname(os.getcwd())
+
+ # Creating evaluation_script.zip file
+ eval_script_dir = working_dir + "/evaluation_script"
+ eval_script_zip = zipfile.ZipFile(
+ "evaluation_script.zip", "w", zipfile.ZIP_DEFLATED
+ )
+ for root, dirs, files in os.walk(eval_script_dir):
+ for file in files:
+ file_name = os.path.join(root, file)
+ name_in_zip_file = (
+ file_name[len(eval_script_dir) + 1 :]
+ if file_name.startswith(eval_script_dir)
+ else file_name
+ )
+ eval_script_zip.write(file_name, name_in_zip_file)
+ eval_script_zip.close()
+
+ # Creating the challenge_config.zip file
+ zipf = zipfile.ZipFile(challenge_zip_file_path, "w", zipfile.ZIP_DEFLATED)
+ for root, dirs, files in os.walk(working_dir):
+ parents = root.split("/")
+ if not set(parents) & set(ignore_dirs):
+ for file in files:
+ if file not in ignore_files:
+ file_name = os.path.join(root, file)
+ name_in_zip_file = (
+ file_name[len(working_dir) + 1 :]
+ if file_name.startswith(working_dir)
+ else file_name
+ )
+ zipf.write(file_name, name_in_zip_file)
+ zipf.close()
+
+
+def get_request_header(token):
+ """
+ Returns user auth token formatted in header for sending requests
+
+ Arguments:
+ token {str}: The user token to gain access to EvalAI
+ """
+ header = {"Authorization": "Bearer {}".format(token)}
+ return header
+
+
+def load_host_configs(config_path):
+ """
+ Loads token to be used for sending requests
+
+ Arguments:
+ config_path {str}: The path of host configs having the user token, team id and the EvalAI host url
+ """
+ config_path = "{}/{}".format(os.getcwd(), config_path)
+ if os.path.exists(config_path):
+ with open(config_path, "r") as f:
+ try:
+ data = f.read()
+ except (OSError, IOError) as e:
+ print("\nAn error occured while loading the host configs: {}".format(e))
+ sys.exit(1)
+ data = json.loads(data)
+ host_auth_token = data["token"]
+ challenge_host_team_pk = data["team_pk"]
+ evalai_host_url = data["evalai_host_url"]
+ return [host_auth_token, challenge_host_team_pk, evalai_host_url]
+ else:
+ error_message = "\nThe host config json file is not present. Please include an auth token, team_pk & evalai_host_url in it: {}".format(
+ config_path
+ )
+ print(error_message)
+ os.environ["CHALLENGE_ERRORS"] = error_message
+ return False
+
+
+def validate_token(response):
+ """
+ Function to check if the authentication token provided by user is valid or not
+
+ Arguments:
+ response {dict}: The response json dict sent back from EvalAI
+ """
+ error = None
+ if "detail" in response:
+ if response["detail"] == "Invalid token":
+ error = "\nThe authentication token you are using isn't valid. Please generate it again.\n"
+ print(error)
+ os.environ["CHALLENGE_ERRORS"] = error
+ return False
+ if response["detail"] == "Token has expired":
+ error = "\nSorry, the token has expired. Please generate it again.\n"
+ print(error)
+ os.environ["CHALLENGE_ERRORS"] = error
+ return False
+ return True
diff --git a/logo.jpg b/logo.jpg
new file mode 100644
index 0000000..1a0273e
Binary files /dev/null and b/logo.jpg differ
diff --git a/remote_challenge_evaluation/README.md b/remote_challenge_evaluation/README.md
new file mode 100644
index 0000000..7388db3
--- /dev/null
+++ b/remote_challenge_evaluation/README.md
@@ -0,0 +1,17 @@
+## How to setup remote challenge evaluation using EvalAI :rocket:
+If you are looking for setting up a remote challenge evaluation on EvalAI, then you are at the right place. Follow the instructions given below to get started.
+
+1. Create a challenge on EvalAI using [GitHub](https://github.com/Cloud-CV/EvalAI-Starters#create-challenge-using-github) based challenge creation.
+
+2. Once the challenge is successfully created, please email EvalAI admin on team@cloudcv.org for sending the `challenge_pk` and `queue_name`.
+
+3. After receiving the details from the admin, please add these in the `evaluation_script_starter.py`.
+
+4. Create a new virtual python3 environment for installating the worker requirements.
+
+5. Install the requirements using `pip install -r requirements.txt`.
+
+6. For python3, run the worker using `python -m evaluation_script_starter`
+## Facing problems in setting up evaluation?
+
+Please feel free to open issues on our [GitHub Repository](https://github.com/Cloud-CV/EvalAI-Starter/issues) or contact us at team@cloudcv.org if you have issues.
diff --git a/remote_challenge_evaluation/eval_ai_interface.py b/remote_challenge_evaluation/eval_ai_interface.py
new file mode 100644
index 0000000..4f48f91
--- /dev/null
+++ b/remote_challenge_evaluation/eval_ai_interface.py
@@ -0,0 +1,148 @@
+import logging
+
+import requests
+
+logger = logging.getLogger(__name__)
+
+URLS = {
+ "get_message_from_sqs_queue": "/api/jobs/challenge/queues/{}/",
+ "get_submission_by_pk": "/api/jobs/submission/{}",
+ "get_challenge_phase_by_pk": "/api/challenges/challenge/phase/{}",
+ "delete_message_from_sqs_queue": "/api/jobs/queues/{}/",
+ "update_submission": "/api/jobs/challenge/{}/update_submission/",
+}
+
+
+class EvalAI_Interface:
+ def __init__(self, AUTH_TOKEN, EVALAI_API_SERVER, QUEUE_NAME, CHALLENGE_PK):
+ """Class to initiate call to EvalAI backend
+
+ Arguments:
+ AUTH_TOKEN {[string]} -- The authentication token corresponding to EvalAI
+ EVALAI_API_SERVER {[string]} -- It should be set to https://eval.ai # For production server
+ QUEUE_NAME {[string]} -- Unique queue name corresponding to every challenge
+ CHALLENGE_PK {[integer]} -- Primary key corresponding to a challenge
+ """
+
+ self.AUTH_TOKEN = AUTH_TOKEN
+ self.EVALAI_API_SERVER = EVALAI_API_SERVER
+ self.QUEUE_NAME = QUEUE_NAME
+ self.CHALLENGE_PK = CHALLENGE_PK
+
+ def get_request_headers(self):
+ """Function to get the header of the EvalAI request in proper format
+
+ Returns:
+ [dict]: Authorization header
+ """
+ headers = {"Authorization": "Bearer {}".format(self.AUTH_TOKEN)}
+ return headers
+
+ def make_request(self, url, method, data=None):
+ """Function to make request to EvalAI interface
+
+ Args:
+ url ([str]): URL of the request
+ method ([str]): Method of the request
+ data ([dict], optional): Data of the request. Defaults to None.
+
+ Returns:
+ [JSON]: JSON response data
+ """
+ headers = self.get_request_headers()
+ try:
+ response = requests.request(
+ method=method, url=url, headers=headers, data=data
+ )
+ response.raise_for_status()
+ except requests.exceptions.RequestException:
+ logger.info("The server isn't able establish connection with EvalAI")
+ raise
+ return response.json()
+
+ def return_url_per_environment(self, url):
+ """Function to get the URL for API
+
+ Args:
+ url ([str]): API endpoint url to which the request is to be made
+
+ Returns:
+ [str]: API endpoint url with EvalAI base url attached
+ """
+ base_url = "{0}".format(self.EVALAI_API_SERVER)
+ url = "{0}{1}".format(base_url, url)
+ return url
+
+ def get_message_from_sqs_queue(self):
+ """Function to get the message from SQS Queue
+
+ Docs: https://eval.ai/api/docs/#operation/get_submission_message_from_queue
+
+ Returns:
+ [JSON]: JSON response data
+ """
+ url = URLS.get("get_message_from_sqs_queue").format(self.QUEUE_NAME)
+ url = self.return_url_per_environment(url)
+ response = self.make_request(url, "GET")
+ return response
+
+ def delete_message_from_sqs_queue(self, receipt_handle):
+ """Function to delete the submission message from the queue
+
+ Docs: https://eval.ai/api/docs/#operation/delete_submission_message_from_queue
+
+ Args:
+ receipt_handle ([str]): Receipt handle of the message to be deleted
+
+ Returns:
+ [JSON]: JSON response data
+ """
+ url = URLS.get("delete_message_from_sqs_queue").format(self.QUEUE_NAME)
+ url = self.return_url_per_environment(url)
+ data = {"receipt_handle": receipt_handle}
+ response = self.make_request(url, "POST", data)
+ return response
+
+ def update_submission_data(self, data):
+ """Function to update the submission data on EvalAI
+
+ Docs: https://eval.ai/api/docs/#operation/update_submission
+
+ Args:
+ data ([dict]): Data to be updated
+
+ Returns:
+ [JSON]: JSON response data
+ """
+ url = URLS.get("update_submission").format(self.CHALLENGE_PK)
+ url = self.return_url_per_environment(url)
+ response = self.make_request(url, "PUT", data=data)
+ return response
+
+ def update_submission_status(self, data):
+ """
+
+ Docs: https://eval.ai/api/docs/#operation/update_submission
+
+ Args:
+ data ([dict]): Data to be updated
+
+ Returns:
+ [JSON]: JSON response data
+ """
+ url = URLS.get("update_submission").format(self.CHALLENGE_PK)
+ url = self.return_url_per_environment(url)
+ response = self.make_request(url, "PATCH", data=data)
+ return response
+
+ def get_submission_by_pk(self, submission_pk):
+ url = URLS.get("get_submission_by_pk").format(submission_pk)
+ url = self.return_url_per_environment(url)
+ response = self.make_request(url, "GET")
+ return response
+
+ def get_challenge_phase_by_pk(self, phase_pk):
+ url = URLS.get("get_challenge_phase_by_pk").format(phase_pk)
+ url = self.return_url_per_environment(url)
+ response = self.make_request(url, "GET")
+ return response
diff --git a/remote_challenge_evaluation/evaluate.py b/remote_challenge_evaluation/evaluate.py
new file mode 100644
index 0000000..297f469
--- /dev/null
+++ b/remote_challenge_evaluation/evaluate.py
@@ -0,0 +1,76 @@
+
+
+def evaluate(user_submission_file, phase_codename, test_annotation_file=None, **kwargs):
+ print("Starting Evaluation.....")
+ """
+ Evaluates the submission for a particular challenge phase and returns score
+ Arguments:
+ `user_submission_file`: Path to file submitted by the user
+ `phase_codename`: Phase to which submission is made
+
+ `test_annotations_file`: Path to test_annotation_file on the server
+ We recommend setting a default `test_annotation_file` or using `phase_codename`
+ to select the appropriate file. For example, you could load test annotation file
+ for current phase as:
+ ```
+ test_annotation_file = json.loads(open("{phase_codename}_path", "r"))
+ ```
+ `**kwargs`: keyword arguments that contains additional submission
+ metadata that challenge hosts can use to send slack notification.
+ You can access the submission metadata
+ with kwargs['submission_metadata']
+ Example: A sample submission metadata can be accessed like this:
+ >>> print(kwargs['submission_metadata'])
+ {
+ 'status': u'running',
+ 'when_made_public': None,
+ 'participant_team': 5,
+ 'input_file': 'https://abc.xyz/path/to/submission/file.json',
+ 'execution_time': u'123',
+ 'publication_url': u'ABC',
+ 'challenge_phase': 1,
+ 'created_by': u'ABC',
+ 'stdout_file': 'https://abc.xyz/path/to/stdout/file.json',
+ 'method_name': u'Test',
+ 'stderr_file': 'https://abc.xyz/path/to/stderr/file.json',
+ 'participant_team_name': u'Test Team',
+ 'project_url': u'http://foo.bar',
+ 'method_description': u'ABC',
+ 'is_public': False,
+ 'submission_result_file': 'https://abc.xyz/path/result/file.json',
+ 'id': 123,
+ 'submitted_at': u'2017-03-20T19:22:03.880652Z'
+ }
+ """
+
+ '''
+ # Load test annotation file for current phase
+ test_annotation_file = json.loads(open("{phase_codename}_path", "r"))
+ '''
+ output = {}
+ if phase_codename == "dev":
+ print("Evaluating for Dev Phase")
+ output["result"] = [
+ {
+ "split": "train_split",
+ "show_to_participant": True,
+ "accuracies": {"Metric1": 90},
+ },
+ ]
+ print("Completed evaluation for Dev Phase")
+ elif phase_codename == "test":
+ print("Evaluating for Test Phase")
+ output["result"] = [
+ {
+ "split": "train_split",
+ "show_to_participant": True,
+ "accuracies": {"Metric1": 90},
+ },
+ {
+ "split": "test_split",
+ "show_to_participant": False,
+ "accuracies": {"Metric1": 50, "Metric2": 40},
+ },
+ ]
+ print("Completed evaluation for Test Phase")
+ return output
diff --git a/remote_challenge_evaluation/main.py b/remote_challenge_evaluation/main.py
new file mode 100644
index 0000000..932ef88
--- /dev/null
+++ b/remote_challenge_evaluation/main.py
@@ -0,0 +1,108 @@
+import json
+import os
+import time
+
+import requests
+
+from eval_ai_interface import EvalAI_Interface
+from evaluate import evaluate
+
+# Remote Evaluation Meta Data
+# See https://evalai.readthedocs.io/en/latest/evaluation_scripts.html#writing-remote-evaluation-script
+auth_token = os.environ["AUTH_TOKEN"]
+evalai_api_server = os.environ["API_SERVER"]
+queue_name = os.environ["QUEUE_NAME"]
+challenge_pk = os.environ["CHALLENGE_PK"]
+save_dir = os.environ.get("SAVE_DIR", "./")
+
+
+def download(submission, save_dir):
+ response = requests.get(submission["input_file"])
+ submission_file_path = os.path.join(
+ save_dir, submission["input_file"].split("/")[-1]
+ )
+ with open(submission_file_path, "wb") as f:
+ f.write(response.content)
+ return submission_file_path
+
+
+def update_running(evalai, submission_pk):
+ status_data = {
+ "submission": submission_pk,
+ "submission_status": "RUNNING",
+ }
+ update_status = evalai.update_submission_status(status_data)
+
+
+def update_failed(
+ evalai, phase_pk, submission_pk, submission_error, stdout="", metadata=""
+):
+ submission_data = {
+ "challenge_phase": phase_pk,
+ "submission": submission_pk,
+ "stdout": stdout,
+ "stderr": submission_error,
+ "submission_status": "FAILED",
+ "metadata": metadata,
+ }
+ update_data = evalai.update_submission_data(submission_data)
+
+
+def update_finished(
+ evalai,
+ phase_pk,
+ submission_pk,
+ result,
+ submission_error="",
+ stdout="",
+ metadata="",
+):
+ submission_data = {
+ "challenge_phase": phase_pk,
+ "submission": submission_pk,
+ "stdout": stdout,
+ "stderr": submission_error,
+ "submission_status": "FINISHED",
+ "result": result,
+ "metadata": metadata,
+ }
+ update_data = evalai.update_submission_data(submission_data)
+
+
+if __name__ == "__main__":
+ evalai = EvalAI_Interface(auth_token, evalai_api_server, queue_name, challenge_pk)
+
+ while True:
+ # Get the message from the queue
+ message = evalai.get_message_from_sqs_queue()
+ message_body = message.get("body")
+ if message_body:
+ submission_pk = message_body.get("submission_pk")
+ challenge_pk = message_body.get("challenge_pk")
+ phase_pk = message_body.get("phase_pk")
+ # Get submission details -- This will contain the input file URL
+ submission = evalai.get_submission_by_pk(submission_pk)
+ challenge_phase = evalai.get_challenge_phase_by_pk(phase_pk)
+ if (
+ submission.get("status") == "finished"
+ or submission.get("status") == "failed"
+ or submission.get("status") == "cancelled"
+ ):
+ message_receipt_handle = message.get("receipt_handle")
+ evalai.delete_message_from_sqs_queue(message_receipt_handle)
+
+ else:
+ if submission.get("status") == "submitted":
+ update_running(evalai, submission_pk)
+ submission_file_path = download(submission, save_dir)
+ try:
+ results = evaluate(
+ submission_file_path, challenge_phase["codename"]
+ )
+ update_finished(
+ evalai, phase_pk, submission_pk, json.dumps(results["result"])
+ )
+ except Exception as e:
+ update_failed(evalai, phase_pk, submission_pk, str(e))
+ # Poll challenge queue for new submissions
+ time.sleep(60)
diff --git a/remote_challenge_evaluation/requirements.txt b/remote_challenge_evaluation/requirements.txt
new file mode 100644
index 0000000..fd7d3e0
--- /dev/null
+++ b/remote_challenge_evaluation/requirements.txt
@@ -0,0 +1 @@
+requests==2.25.1
\ No newline at end of file
diff --git a/run.sh b/run.sh
new file mode 100755
index 0000000..0e80046
--- /dev/null
+++ b/run.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+# Remove already existing zip files
+rm evaluation_script.zip
+rm challenge_config.zip
+
+# Create new zip configuration according the updated code
+cd evaluation_script
+zip -r ../evaluation_script.zip * -x "*.DS_Store"
+cd ..
+zip -r challenge_config.zip * -x "*.DS_Store" -x "evaluation_script/*" -x "*.git" -x "run.sh" -x "code_upload_challenge_evaluation/*" -x "remote_challenge_evaluation/*" -x "worker/*" -x "challenge_data/*" -x "github/*" -x ".github/*" -x "README.md"
diff --git a/submission.json b/submission.json
new file mode 100644
index 0000000..0967ef4
--- /dev/null
+++ b/submission.json
@@ -0,0 +1 @@
+{}
diff --git a/templates/challenge_phase_1_description.html b/templates/challenge_phase_1_description.html
new file mode 100755
index 0000000..98907f5
--- /dev/null
+++ b/templates/challenge_phase_1_description.html
@@ -0,0 +1 @@
+"Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur?"
\ No newline at end of file
diff --git a/templates/challenge_phase_2_description.html b/templates/challenge_phase_2_description.html
new file mode 100755
index 0000000..7de79f9
--- /dev/null
+++ b/templates/challenge_phase_2_description.html
@@ -0,0 +1 @@
+"Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur?"
\ No newline at end of file
diff --git a/templates/description.html b/templates/description.html
new file mode 100755
index 0000000..2ee4109
--- /dev/null
+++ b/templates/description.html
@@ -0,0 +1,3 @@
+"Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur?"
+
+"Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur?"
diff --git a/templates/evaluation_details.html b/templates/evaluation_details.html
new file mode 100755
index 0000000..14bf424
--- /dev/null
+++ b/templates/evaluation_details.html
@@ -0,0 +1 @@
+"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."
diff --git a/templates/submission_guidelines.html b/templates/submission_guidelines.html
new file mode 100755
index 0000000..491fc70
--- /dev/null
+++ b/templates/submission_guidelines.html
@@ -0,0 +1 @@
+Submit any blank file here to see a random number generated for your submission. If you get lucky, you might reach the top of the leaderboard.
diff --git a/templates/terms_and_conditions.html b/templates/terms_and_conditions.html
new file mode 100755
index 0000000..12e9f60
--- /dev/null
+++ b/templates/terms_and_conditions.html
@@ -0,0 +1 @@
+Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
diff --git a/worker/__init__.py b/worker/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/worker/run.py b/worker/run.py
new file mode 100644
index 0000000..2c8dc10
--- /dev/null
+++ b/worker/run.py
@@ -0,0 +1,61 @@
+import importlib
+import os
+import sys
+
+
+def get_curr_working_dir():
+ curr_working_dir = os.getcwd()
+ return curr_working_dir
+
+
+def run():
+ current_working_directory = get_curr_working_dir()
+ sys.path.append("{}".format(current_working_directory))
+ sys.path.append("{}/challenge_data/challenge_1".format(current_working_directory))
+
+ challenge_id = 1
+ challenge_phase = "test" # Add the challenge phase codename to be tested
+ annotation_file_path = "{}/annotations/test_annotations_testsplit.json".format(
+ current_working_directory
+ ) # Add the test annotation file path
+ user_submission_file_path = "{}/submission.json".format(
+ current_working_directory
+ ) # Add the sample submission file path
+
+ CHALLENGE_IMPORT_STRING = "challenge_data.challenge_1"
+ challenge_module = importlib.import_module(CHALLENGE_IMPORT_STRING)
+
+ EVALUATION_SCRIPTS = {}
+ EVALUATION_SCRIPTS[challenge_id] = challenge_module
+ print("Trying to evaluate")
+ submission_metadata = {
+ "status": u"running",
+ "when_made_public": None,
+ "participant_team": 5,
+ "input_file": "https://abc.xyz/path/to/submission/file.json",
+ "execution_time": u"123",
+ "publication_url": u"ABC",
+ "challenge_phase": 1,
+ "created_by": u"ABC",
+ "stdout_file": "https://abc.xyz/path/to/stdout/file.json",
+ "method_name": u"Test",
+ "stderr_file": "https://abc.xyz/path/to/stderr/file.json",
+ "participant_team_name": u"Test Team",
+ "project_url": u"http://foo.bar",
+ "method_description": u"ABC",
+ "is_public": False,
+ "submission_result_file": "https://abc.xyz/path/result/file.json",
+ "id": 123,
+ "submitted_at": u"2017-03-20T19:22:03.880652Z",
+ }
+ EVALUATION_SCRIPTS[challenge_id].evaluate(
+ annotation_file_path,
+ user_submission_file_path,
+ challenge_phase,
+ submission_metadata=submission_metadata,
+ )
+ print("Evaluated Successfully!")
+
+
+if __name__ == "__main__":
+ run()