Skip to content

Commit

Permalink
nit to allow local docker
Browse files Browse the repository at this point in the history
  • Loading branch information
joel99 committed Mar 24, 2024
1 parent 82e5afe commit 209ec1a
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 9 deletions.
12 changes: 7 additions & 5 deletions decoder_demos/sklearn_sample.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,14 @@ FROM pytorch/pytorch:2.1.2-cuda11.8-cudnn8-devel
RUN pwd
RUN /bin/bash -c "python3 -m pip install falcon_challenge --upgrade"
ENV PREDICTION_PATH "/tmp/submission.pkl"
ENV GT_PATH = "/tmp/ground_truth.pkl"
ENV GT_PATH "/tmp/ground_truth.pkl"

# TODO ensure falcon_challenge available on dockerhub...

# Users should install additional decoder-specific dependencies here.

ENV EVALUATION_LOC remote
# ENV EVALUATION_LOC remote
ENV EVALUATION_LOC local

# Add files from local context into Docker image
# Note local context reference is the working dir by default, see https://docs.docker.com/engine/reference/commandline/build/
Expand All @@ -34,12 +35,13 @@ ADD ./decoder_demos/sklearn_sample.py decode.py
ADD ./preproc/filtering.py filtering.py

ENV SPLIT "h1"
ENV PHASE "test"
ENV PHASE "minival"
# ENV PHASE "test"

# Don't touch
# Make sure this matches the mounted data volume path. Generally leave as is.
ENV EVAL_DATA_PATH "/evaluation_data"

# CMD specifies a default command to run when the container is launched.
# It can be overridden with any cmd e.g. sudo docker run -it my_image /bin/bash
CMD ["/bin/bash", "-c", \
"python decode.py --evaluation $EVALUATION_LOC --model-path data/decoder.pkl --split $TRACK --phase $PHASE"]
"python decode.py --evaluation $EVALUATION_LOC --model-path data/decoder.pkl --split $SPLIT --phase $PHASE"]
12 changes: 9 additions & 3 deletions falcon_challenge/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,10 +77,14 @@ def __init__(self, eval_remote=False, split='h1'):

@staticmethod
def get_eval_handles(is_remote: bool, dataset: FalconTask, phase: str = 'minival'):
if is_remote:
if is_remote: # i.e. definitely docker
data_dir = os.environ.get("EVAL_DATA_PATH")
else:
data_dir = "data"
else: # possibly docker or local
if os.path.exists("data"):
logger.info("Using local data directory.")
data_dir = "data"
else:
data_dir = os.environ.get("EVAL_DATA_PATH") # a local docker eval
data_dir = Path(data_dir) / dataset.name
if phase == 'test': # TODO wire wherever test is actually stored on remote
eval_dir = data_dir / f"eval"
Expand All @@ -92,6 +96,8 @@ def get_eval_files(self, phase: str = 'minival'):
logger.info("Searching for evaluation data.")
handles = self.get_eval_handles(self.eval_remote, self.dataset, phase=phase)
logger.info(f"Found {len(handles)} files.")
if len(handles) == 0:
raise FileNotFoundError(f"No files found in {self.dataset.name} for phase {phase}. Note test phase data is only available on EvalAI remote.")
return handles

def predict_files(self, decoder: BCIDecoder, eval_files: List):
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

setup(
name='falcon_challenge',
version='0.1.4',
version='0.1.5',

url='https://github.com/snel-repo/stability-benchmark',
author='Joel Ye',
Expand Down

0 comments on commit 209ec1a

Please sign in to comment.