diff --git a/.ci/installBackendsDependencies.sh b/.ci/installBackendsDependencies.sh
new file mode 100644
index 0000000..8cf8c01
--- /dev/null
+++ b/.ci/installBackendsDependencies.sh
@@ -0,0 +1,34 @@
+apt-get update
+
+echo "dependencies for GPyopt"
+source ${BASH_SOURCE%/*}/installGPyOptDependencies.sh
+
+echo "dependencies for rbfopt"
+source ${BASH_SOURCE%/*}/installRBFOptDependencies.sh
+
+echo "dependencies for SMAC3"
+source ${BASH_SOURCE%/*}/installSMAC3Dependencies.sh
+
+if pip3 show ecabc; then :;
+else
+ git clone --depth=1 --branch _args_fix https://github.com/KOLANICH/ecabc.git
+ pip install --user --upgrade --pre ./ecabc
+fi;
+
+if [ -f $PYTHON_MODULES_DIR/hyperband.py ] ; then :;
+else
+ curl -O https://raw.githubusercontent.com/zygmuntz/hyperband/master/hyperband.py
+ 2to3 -wn hyperband.py
+ mv hyperband.py $PYTHON_MODULES_DIR/
+fi;
+
+if [ -f $PYTHON_MODULES_DIR/diffevo.py ] ; then :;
+else
+ curl -O https://raw.githubusercontent.com/tiagoCuervo/EvoFuzzy/4cbfce4a432fd162d6f30017c8de0477b29e5f42/diffevo.py
+ 2to3 -wn diffevo.py
+ mv diffevo.py $PYTHON_MODULES_DIR/
+fi;
+
+
+# RoBo -> george
+pip install --upgrade git+https://github.com/yaml/pyyaml.git
diff --git a/.ci/installCython.sh b/.ci/installCython.sh
new file mode 100644
index 0000000..d43a5da
--- /dev/null
+++ b/.ci/installCython.sh
@@ -0,0 +1,6 @@
+if pip3 show cython ; then :;
+else
+ apt-get -y install swig gcc g++;
+ git clone --depth=1 https://github.com/cython/cython.git;
+ pip3 install --upgrade --user --pre ./cython;
+fi;
diff --git a/.ci/installGPyOptDependencies.sh b/.ci/installGPyOptDependencies.sh
new file mode 100644
index 0000000..779c181
--- /dev/null
+++ b/.ci/installGPyOptDependencies.sh
@@ -0,0 +1,7 @@
+if pip3 show smac; then :;
+else
+ source ${BASH_SOURCE%/*}/installCython.sh
+ git clone --depth=1 https://github.com/SheffieldML/GPy.git
+ find ./GPy -name '*.pyx' -exec cython {} \;
+ pip3 install --upgrade --user --pre ./GPy
+fi;
diff --git a/.ci/installRBFOptDependencies.sh b/.ci/installRBFOptDependencies.sh
new file mode 100644
index 0000000..af86636
--- /dev/null
+++ b/.ci/installRBFOptDependencies.sh
@@ -0,0 +1,15 @@
+echo "dependencies for rbfopt";
+if [ -x $EXECUTABLE_DEPENDENCIES_DIR/bonmin ] && [ -x $EXECUTABLE_DEPENDENCIES_DIR/ipopt ]; then :;
+else
+ apt-get -y install p7zip-full;
+ curl -O https://ampl.com/dl/open/bonmin/bonmin-linux64.zip -O https://ampl.com/dl/open/ipopt/ipopt-linux64.zip;
+ 7za x bonmin-linux64.zip;
+ 7za x -y ipopt-linux64.zip;
+ mv ./bonmin ./ipopt $EXECUTABLE_DEPENDENCIES_DIR/;
+fi;
+if pip3 show pyutilib && pip3 show pyomo; then :;
+else
+ git clone --depth=1 https://github.com/PyUtilib/pyutilib.git;
+ git clone --depth=1 https://github.com/Pyomo/pyomo.git;
+ pip3 install --user --upgrade --pre ./pyutilib ./pyomo;
+fi;
diff --git a/.ci/installSMAC3Dependencies.sh b/.ci/installSMAC3Dependencies.sh
new file mode 100644
index 0000000..b07e3b0
--- /dev/null
+++ b/.ci/installSMAC3Dependencies.sh
@@ -0,0 +1,7 @@
+apt-get -y install python3-pybind11;
+if pip3 show smac; then :;
+else
+ apt-get -y install pybind11-dev;
+ git clone --depth=1 --branch development https://github.com/automl/SMAC3.git;
+ pip3 install --user --upgrade --pre ./SMAC3;
+fi;
diff --git a/.ci/pythonStdlibFixes.sh b/.ci/pythonStdlibFixes.sh
new file mode 100644
index 0000000..eb46dd2
--- /dev/null
+++ b/.ci/pythonStdlibFixes.sh
@@ -0,0 +1,12 @@
+if $( python -c "import sys;sys.exit(int(not (sys.version_info < (3, 5)) ))" ); then
+ curl -O https://raw.githubusercontent.com/python/cpython/3.6/Lib/typing.py;
+ curl -O https://raw.githubusercontent.com/python/cpython/3.5/Lib/linecache.py;
+ curl -O https://raw.githubusercontent.com/python/cpython/3.5/Lib/traceback.py;
+ #curl -O https://raw.githubusercontent.com/python/cpython/3.5/Lib/importlib/abc.py;
+ #curl -O https://raw.githubusercontent.com/python/cpython/3.5/Lib/importlib/_bootstrap_external.py;
+ mv ./typing.py ./linecache.py ./traceback.py $PYTHON_MODULES_DIR/
+fi;
+if $( python -c "import sys;sys.exit(int(not (sys.version_info < (3, 6)) ))" ); then
+curl -O https://raw.githubusercontent.com/python/cpython/3.7/Lib/enum.py;
+ mv ./enum.py $PYTHON_MODULES_DIR/
+fi;
diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 0000000..5b56b94
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,7 @@
+root = true
+
+[*]
+charset=utf-8
+indent_style = tab
+indent_size = 4
+insert_final_newline = false
\ No newline at end of file
diff --git a/.github/.templateMarker b/.github/.templateMarker
new file mode 100644
index 0000000..5e3a3e0
--- /dev/null
+++ b/.github/.templateMarker
@@ -0,0 +1 @@
+KOLANICH/python_project_boilerplate.py
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000..89ff339
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,8 @@
+version: 2
+updates:
+ - package-ecosystem: "pip"
+ directory: "/"
+ schedule:
+ interval: "daily"
+ allow:
+ - dependency-type: "all"
diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml
new file mode 100644
index 0000000..7fe33b3
--- /dev/null
+++ b/.github/workflows/CI.yml
@@ -0,0 +1,15 @@
+name: CI
+on:
+ push:
+ branches: [master]
+ pull_request:
+ branches: [master]
+
+jobs:
+ build:
+ runs-on: ubuntu-22.04
+ steps:
+ - name: typical python workflow
+ uses: KOLANICH-GHActions/typical-python-workflow@master
+ with:
+ github_token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..3fd7b08
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,8 @@
+__pycache__
+*.py[co]
+/*.egg-info
+/build
+/dist
+/hyperband.py
+/shac
+/.eggs
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
new file mode 100644
index 0000000..8f5346e
--- /dev/null
+++ b/.gitlab-ci.yml
@@ -0,0 +1,102 @@
+#image: pypy:latest
+image: registry.gitlab.com/kolanich/fixed_python:latest
+
+stages:
+ - dependencies
+ - build
+ - test
+ - tooling
+
+variables:
+ GIT_DEPTH: "1"
+ PYTHONUSERBASE: ${CI_PROJECT_DIR}/python_user_packages
+
+dependencies:
+ tags:
+ - shared
+ stage: dependencies
+ before_script:
+ - export PYTHON_MODULES_DIR=${PYTHONUSERBASE}/lib/python3.7/site-packages
+ - export EXECUTABLE_DEPENDENCIES_DIR=${PYTHONUSERBASE}/bin
+ - export PATH="$PATH:$EXECUTABLE_DEPENDENCIES_DIR" # don't move into `variables` any of them, it is unordered
+ #- python -c "import hyperband"
+ script:
+ - mkdir -p $EXECUTABLE_DEPENDENCIES_DIR $PYTHON_MODULES_DIR
+ - source ./.ci/installBackendsDependencies.sh
+
+ cache:
+ key: deps
+ paths:
+ - $PYTHONUSERBASE
+ - $EXECUTABLE_DEPENDENCIES_DIR
+
+build:
+ tags:
+ - shared
+ stage: build
+
+ before_script:
+ - export PYTHON_MODULES_DIR=${PYTHONUSERBASE}/lib/python3.7
+ - export EXECUTABLE_DEPENDENCIES_DIR=${PYTHONUSERBASE}/bin
+ - export PATH="$PATH:$EXECUTABLE_DEPENDENCIES_DIR" # don't move into `variables` any of them, it is unordered
+ - source ./.ci/installBackendsDependencies.sh # everything should be built at this moment, needed only to install the stuff installed by apt-get
+
+ script:
+ - python3 setup.py bdist_wheel
+ - mv ./dist/*.whl ./dist/UniOpt-0.CI-py3-none-any.whl
+ - pip3 install --user --upgrade --pre -e ./[hyperopt,hyperengine,SKOpt,SMAC,BeeColony,optunity,Yabox,PySHAC,RBFOpt,Bayessian,GPyOpt,SOpt,pySOT,BayTune,RoBo] #https://github.com/pypa/pip/issues/5903
+ - coverage run --source=UniOpt -m pytest --junitxml=./rspec.xml ./tests/tests.py
+ - coverage report -m
+ - coverage xml
+ cache:
+ key: deps
+ paths:
+ - $PYTHONUSERBASE
+
+ artifacts:
+ paths:
+ - dist
+ reports:
+ junit: ./rspec.xml
+ cobertura: ./coverage.xml
+
+checks:
+ stage: tooling
+ tags:
+ - shared
+ image: docker:latest
+ variables:
+ DOCKER_DRIVER: overlay2
+ allow_failure: true
+ services:
+ - docker:dind
+ script:
+ - docker run --env SAST_CONFIDENCE_LEVEL=5 --volume "$PWD:/code" --volume /var/run/docker.sock:/var/run/docker.sock "registry.gitlab.com/gitlab-org/security-products/sast:latest" /app/bin/run /code
+ #- docker run --env SOURCE_CODE="$PWD" --env CODECLIMATE_VERSION="latest" --volume "$PWD":/code --volume /var/run/docker.sock:/var/run/docker.sock "registry.gitlab.com/gitlab-org/security-products/codequality:latest" /code
+ #- docker run --env DEP_SCAN_DISABLE_REMOTE_CHECKS="${DEP_SCAN_DISABLE_REMOTE_CHECKS:-false}" --volume "$PWD:/code" --volume /var/run/docker.sock:/var/run/docker.sock "registry.gitlab.com/gitlab-org/security-products/dependency-scanning:latest" /code
+
+ artifacts:
+ reports:
+ #codequality: gl-code-quality-report.json
+ sast: gl-sast-report.json
+ #dependency_scanning: gl-dependency-scanning-report.json
+
+
+pages:
+ stage: tooling
+ tags:
+ - shared
+ image: alpine:latest
+ allow_failure: true
+ before_script:
+ - apk update
+ - apk add doxygen
+ - apk add ttf-freefont graphviz
+ script:
+ - doxygen ./Doxyfile
+ - mv ./docs/html ./public
+ artifacts:
+ paths:
+ - public
+ only:
+ - master
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..7d26f2d
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,43 @@
+sudo: false
+dist: trusty
+language: python
+matrix:
+ include: # https://github.com/travis-ci/travis-ci/issues/8783
+ - os: linux
+ python: 3.4
+ - os: linux
+ python: 3.5
+ - os: linux
+ python: 3.6
+ - os: linux
+ python: 3.7-dev
+ - os: linux
+ python: nightly
+ - os: linux
+ python: pypy3
+ # https://github.com/travis-ci/travis-ci/issues/6865#issuecomment-345513305
+ #- os: linux
+ # python: pypy3-nightly
+ - os: osx
+ python: nightly
+ #- os: osx
+ # python: pypy3
+ #- os: osx
+ # python: pypy3-nightly
+before_install:
+ - source ./.ci/pythonStdlibFixes.sh
+ - pip3 install --upgrade setuptools setuptools_scm
+ - pip3 install --upgrade git+https://github.com/pypa/pip.git git+https://github.com/pypa/wheel.git
+ - pip3 install --upgrade coveralls
+ - pip3 install --upgrade git+https://gitlab.com/KOLANICH/alternativez.py.git
+ - pip3 install --upgrade git+https://gitlab.com/KOLANICH/lazyImport.py.git
+ - pip3 install --upgrade git+https://gitlab.com/KOLANICH/Chassis.py.git
+ - pip3 install --upgrade https://gitlab.com/KOLANICH/pyxgboost/-/jobs/artifacts/master/raw/wheels/pyxgboost-0.CI-py3-none-any.whl?job=build
+install:
+ - python setup.py install
+script:
+ - coverage run --source=AutoXGBoost setup.py test
+after_success:
+ - coveralls
+ # - python3 setup.py bdist_wheel
+ # - python3 setup.py sdistz
diff --git a/Code_Of_Conduct.md b/Code_Of_Conduct.md
new file mode 100644
index 0000000..faa5e8e
--- /dev/null
+++ b/Code_Of_Conduct.md
@@ -0,0 +1 @@
+No codes of conduct! Just do what you feel is right and say what you feel is right using the language you feel is right. If you feel that it is right to [make an own fork with a CoC and SJWs](https://en.wikipedia.org/wiki/Bender_Rodriguez), just do that. We here are doing the work, not accusing each other in violating codes of conduct.
diff --git a/Contributing.md b/Contributing.md
new file mode 100644
index 0000000..0c2b83c
--- /dev/null
+++ b/Contributing.md
@@ -0,0 +1,79 @@
+Contributing guidelines
+=======================
+
+This project has a bit different policy than the rest of other projects have. Please read it carefully, otherwise it would be very surprising.
+
+Style guide
+-----------
+0. We don't follow PEP-8.
+1. Tabs and spaces are controlled by `.editorconfig` file. See https://editorconfig.org/ for more info about its format. [Install a plugin](https://editorconfig.org/#download) for your IDE/text editor if it doesn't support it out of the box.
+2. No manual line wrapping. Wrapping is done by text editor. Enable this feature.
+
+And some very opinionated things
+--------------------------------
+1. The joke about dead python is not going to be removed. It's the message that everyone should drop python 2 as soon as possible. If you find it inappropriate it's your own personal problem.
+2. We really DON'T support python 2, so don't send PRs fixing py2 support. I'm sorry, but this coup de grace is a necessity. With python 4 not so far we don't want to support a serpentarium of different python versions. Since python 3 works fine on a very ancient 1999 year hardware and 2001 year old OS there shouldn't be a serious reason to stay on py 2. If your org cannot or is afraid to migrate to later versions of python, it's your and your org problem, not ours. I advise you to start migrating as soon as possible, to make yourself ready for the moment py2 officially dropped by PSF, you have to migrate anyway sooner or later, if you are not dropping python at all. You can use the fact that many projects are dropping python 2 support to persuade your boss that you have to migrate. Fix your tests if you don't trust them, use 2to3, fix after it, make the code to pass the tests. You also may expect python 3 support being dropped not so far after python 4 release (it depends on if it requires to replace the hardware and the OS, for example pythons >3.5 have no support for Windows XP, and XP is needed for old (but good) hardware costing several millions of $ and having no drivers for Windows 7 or Linux).
+
+And now when the brief organizational FAQ is over, the docs about the architecture.
+
+Architecture guide
+==================
+
+As already mentioned, it is easy to add an own backend. Here is an approximate algorithm.
+
+0. Read his guide entirely and get yourself familiar to the conventions used in this library.
+ * `HyperparamVector` is a class encapsulating the most generic aspects of a hyperparam vector. Shouldn't be instantiated.
+ * `dict2native` transforming a dict into an object of native output type of a spec.
+ * `native2dict` transforming an object of native output type into a dict of a spec.
+ * `Spec` is a class storing and transforming generic search space specification into an optimizer-specific one. It must have some properties:
+ * `hyperparamsVectorType:HyperparamVector` is a type of a vector.
+ * `hyperparamsSpecType:HyperparamVector` is a type of a spec itself.
+ * `Spec`s are built via mixins. Because it is tricky to remember the right order of inheritance, and because we don't want to precreate them all (exponentially many from the count of mixins) `MSpec` is the function creating the spec class for you. See the detailed description later.
+ * `name` - a desired name, don't use: the names are generated automatically
+ * `isDummy` - do not transform spec items. Used for testing.
+ * `Optimizer` is a class doing optimization. It may get additional arguments. It must have some properties/methods:
+ * `specType` - Is a type of a spec.
+ * `prepareScoring` - a method setting up scoring. It is your chance to do something before progressbar appeared. It checks correctness, creates objects and returns a `tuple` of
+ 0. count of function evaluations. You usually need `self.iters` here.
+ 1. name of optimizer to display on the progressbar.
+ 2. a context object. You should put there a maximally prepared evaluator.
+ * `invokeScoring` - receives a black-box function to optimize, a progressbar object and the context created by `prepareScoring`. This function is called in the context of progressbar. Progressbar object can be used to send messages to progressbar. Usually you wrap the black box function into own one, transforming its results to the format convenient to the optimizer.
+ * `ProgressReporter` is a class to report progress. Just install `tqdm` and it would pick that up.
+ * All the dependencies are imported with `lazyImport`. Imported package shouldn't be accessed in the main scope because this will cause actual import which will cause lags or errors, if the dependency is not available. If you need to do some preparative work, use `lazy_object_proxy.Proxy` for it.
+1. Find an example for the backend. Play with it a little. Determine the following:
+ * the format of a space spec:
+ * if it allows integers. This influences `integerMode:IntegerMetaMap` argument of `MSpec`:
+ * `supportsIntegers` - the optimizer supports specifying integers and returns them as `int`. No action needed.
+ * `floatIntegers` - the optimizer supports integers, but returns them as `float`
+ * `noIntegers` - the optimizer doesn't support integers. We have to postprocess with rounding which drastically impacts performance.
+ * if it allows plugging variables not from uniform distribution. If it does, you need to define `HyperparamsSpecsConverters` in your class, or use `transformHyperDefItemUniversal`.
+ * if it is very dumb, allows only uniform distribution, disallows categories and scalars and if the optimizer-specific hyperparameter definition is just a sequence `(lower_bound, upper_bound)`. This is a very widespread situation, so we already have the classes for that. Find them in in `SpecOnlyBoxes` module.
+ * if it allows categorical variables. If it does, you need to define `_categorical` in `HyperparamsSpecsConverters`.
+ * if it allows scalars. This influences `scalarMode:ScalarMetaMap` argument of `MSpec`:
+ * `supportsScalars` - the optimizer deals with scalars in the spec itself.
+ * `degenerateCategory` - the optimizer doesn't support scalars but supports categorical variables. The lib puts scalars into a degenerate category. May affect performance, if the impact is low this way is preferred because optimizers may have side effects like saving info to the disk.
+ * `noScalars` - the optimizer doesn't support scalars and using categorical variables for them is infeasible: either not available or too big performance penalty. This causes scalars been saved into a separate dict and added back.
+ * `isArray` - `True` if the spec is `Iterable`-based.
+ * calling convention of a black-box function:
+ * whether it is a `Mapping` (`dict`) or an `Iterable` (`list`, `tuple`, `ndarray`) or something else.
+ * if it is a `Mapping`, you need specs with `hyperparamsVectorType` being derived from `HyperparamVector`
+ * if it is an `Iterable`, you need specs with `hyperparamsVectorType` being derived from `HyperparamArray`
+ * the argument controlling count of iterations. If there is no such a control, you can try to create a wrapper using an exception to break the loop.
+ * the way the optimizer prints messages. All the messages in `invokeScoring` should be print via the `ProgressReporter` object passed. You may need some hacks if a lib directly uses `print` or an io stream. Please don't redefine global builtins like `print`.
+2. Create a draft of a `Spec` using the info from the previous point, if it is needed. Inherit from the stuff resulted from call of `MSpec`.
+3. Now you are ready to write the code of the backend. Inherit `GenericOptimizer` and populate the following properties of the class:
+ * `specType` - is the type of your spec.
+ * `__init__` - here you can save additional parameters of an optimizer
+ * `prepareScoring` - here you can prepare your optimizer. You can save arbitrary context. This function returns a tuple `(countOfIterations, optimizerFriendlyName, context)`.
+ * `countOfIterations` is for the case you need additional iterations. Usually you need to return `self.iters`.
+ * `optimizerFriendlyName` is used in UI.
+ * `context` is your context.
+ * `invokeScoring(self, fn:typing.Callable, pb:ProgressReporter, context)` - actual optimization
+ * `fn` is a prepared function. Accepts either array or dict depending on `self.__class__.specType.hyperparamsVectorType`
+ * `pb` is a `ProgressReporter` object. You can use it for redirecting output and printing messages in the way not destructing the CLI progressbar.
+ * `context` is your context.
+
+ You usually wanna wrap a `fn` into an own function returning only mean. But try to return the whole tuple first, if it works fine, return the whole tuple. Tuples are compared lexicographically, so this way the values with the same mean but lower variance gonna be preferred by the optimizer.
+
+4. Add it into `__init__.py`. Import the `Optimizer` subclass and add it to `Optimizers` class as a property, use a friendlier name if it is possible.
+3. To test it open `tests\tests.py`, disable all the unneeded tests with `@unittest.skip`, enable `OptimizersTests.testOptimizer` and replace the optimizer name there with a friendlier name for your backend.
diff --git a/Doxyfile b/Doxyfile
new file mode 100644
index 0000000..8c50df9
--- /dev/null
+++ b/Doxyfile
@@ -0,0 +1,78 @@
+PROJECT_NAME = "UniOpt"
+PROJECT_BRIEF = "A wrapper for popular black box optimization libraries providing unified interface"
+PROJECT_LOGO = "https://assets.gitlab-static.net/uploads/-/system/project/avatar/8079746/logo1536.jpg?width=40"
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = "./docs"
+ALLOW_UNICODE_NAMES = YES
+OUTPUT_LANGUAGE = English
+BRIEF_MEMBER_DESC = YES
+REPEAT_BRIEF = YES
+INLINE_INHERITED_MEMB = NO
+FULL_PATH_NAMES = YES
+STRIP_FROM_PATH =
+SHORT_NAMES = NO
+INHERIT_DOCS = YES
+SEPARATE_MEMBER_PAGES = NO
+TAB_SIZE = 4
+OPTIMIZE_OUTPUT_JAVA = YES
+MARKDOWN_SUPPORT = YES
+AUTOLINK_SUPPORT = YES
+IDL_PROPERTY_SUPPORT = YES
+
+EXTRACT_ALL = YES
+RECURSIVE = YES
+
+EXAMPLE_PATH =
+EXAMPLE_RECURSIVE = YES
+
+USE_MDFILE_AS_MAINPAGE = ReadMe.md
+
+SOURCE_BROWSER = YES
+INLINE_SOURCES = YES
+
+
+REFERENCED_BY_RELATION = YES
+REFERENCES_RELATION = YES
+VERBATIM_HEADERS = NO
+
+ALPHABETICAL_INDEX = YES
+COLS_IN_ALPHA_INDEX = 5
+
+GENERATE_HTML = YES
+GENERATE_LATEX = NO
+#HTML_OUTPUT = html
+HTML_FILE_EXTENSION = .html
+HTML_DYNAMIC_SECTIONS = YES
+HTML_INDEX_NUM_ENTRIES = 9999
+GENERATE_TREEVIEW = YES
+ENUM_VALUES_PER_LINE = 1
+EXT_LINKS_IN_WINDOW = NO
+FORMULA_FONTSIZE = 10
+FORMULA_TRANSPARENT = YES
+SEARCHENGINE = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+CLASS_DIAGRAMS = YES
+HIDE_UNDOC_RELATIONS = NO
+HAVE_DOT = YES
+DOT_FONTNAME = Helvetica
+DOT_FONTSIZE = 10
+UML_LOOK = YES
+TEMPLATE_RELATIONS = YES
+CALL_GRAPH = YES
+CALLER_GRAPH = YES
+GRAPHICAL_HIERARCHY = YES
+DIRECTORY_GRAPH = YES
+DOT_IMAGE_FORMAT = svg
+INTERACTIVE_SVG = YES
+DOT_GRAPH_MAX_NODES = 10000
+MAX_DOT_GRAPH_DEPTH = 0
+DOT_TRANSPARENT = YES
+DOT_MULTI_TARGETS = YES
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..20f0fa8
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,4 @@
+include UNLICENSE
+include *.md
+include tests
+include .editorconfig
diff --git a/ReadMe.md b/ReadMe.md
new file mode 100644
index 0000000..644a783
--- /dev/null
+++ b/ReadMe.md
@@ -0,0 +1,250 @@
+UniOpt.py [![Unlicensed work](https://raw.githubusercontent.com/unlicense/unlicense.org/master/static/favicon.png)](https://unlicense.org/)
+=========
+[![PyPi Status](https://img.shields.io/pypi/v/UniOpt.svg)](https://pypi.org/project/UniOpt)
+~~![GitLab Build Status](https://gitlab.com/KOLANICH/UniOpt.py/badges/master/pipeline.svg)~~
+~~![GitLab Coverage](https://gitlab.com/KOLANICH/UniOpt.py/badges/master/coverage.svg)~~
+~~[![Coveralls Coverage](https://img.shields.io/coveralls/KOLANICH/UniOpt.py.svg)](https://coveralls.io/r/KOLANICH/UniOpt.py)~~
+[![Libraries.io Status](https://img.shields.io/librariesio/github/KOLANICH/UniOpt.py.svg)](https://libraries.io/github/KOLANICH/UniOpt.py)
+[![Code style: antiflash](https://img.shields.io/badge/code%20style-antiflash-FFF.svg)](https://codeberg.org/KOLANICH-tools/antiflash.py)
+
+![Logo](https://gitlab.com/uploads/-/system/project/avatar/8079746/logo1536.jpg?width=40)
+
+This is a universal black box optimization library. No algos (for now) are implemented here, it just implements the framework to wrap as much as possible black box optimizers as easy as possible and to use them in a unified way.
+
+There are differrent hyperparams optimization libs, they use different formats of search space specs, differrent formats of input vectors and differrent ways to retrive the result. This lib smoothes the differences, providing a unified interface to use them all.
+
+This is a **VERY** early alpha. Don't fork for now.
+
+Doxygen documentation [is available](https://kolanich.gitlab.io/UniOpt.py/).
+
+Requirements
+------------
+* [`numpy`](https://github.com/numpy/numpy) ![Licence](https://img.shields.io/github/license/numpy/numpy.svg) [![PyPi Status](https://img.shields.io/pypi/v/numpy.svg)](https://pypi.org/project/numpy) [![Build status](https://github.com/numpy/numpy/actions/workflows/linux.yml/badge.svg?branch=main)](https://github.com/numpy/numpy/actions/workflows/linux.yml) [![Libraries.io Status](https://img.shields.io/librariesio/github/numpy/numpy.svg)](https://libraries.io/github/numpy/numpy)
+
+* [`scipy`](https://github.com/scipy/scipy) ![Licence](https://img.shields.io/github/license/scipy/scipy.svg) [![PyPi Status](https://img.shields.io/pypi/v/scipy.svg)](https://pypi.org/project/scipy) [![Build status](https://github.com/scipy/scipy/actions/workflows/linux.yml/badge.svg?branch=main)](https://github.com/scipy/scipy/actions/workflows/linux.yml) [![CodeCov Coverage](https://codecov.io/github/scipy/scipy/coverage.svg?branch=master)](https://codecov.io/github/scipy/scipy/) [![Libraries.io Status](https://img.shields.io/librariesio/github/scipy/scipy.svg)](https://libraries.io/github/scipy/scipy)
+
+* [`tqdm`](https://github.com/tqdm/tqdm) ![Licence](https://img.shields.io/github/license/tqdm/tqdm.svg) [![PyPi Status](https://img.shields.io/pypi/v/tqdm.svg)](https://pypi.org/project/tqdm) [![Conda Status](https://anaconda.org/conda-forge/tqdm/badges/version.svg)](https://anaconda.org/conda-forge/tqdm) [![Build Status](https://img.shields.io/github/actions/workflow/status/tqdm/tqdm/test.yml?branch=master&label=tqdm&logo=GitHub)](https://github.com/tqdm/tqdm/actions/workflows/test.yml) [![Coveralls Coverage](https://img.shields.io/coveralls/tqdm/tqdm.svg)](https://coveralls.io/r/tqdm/tqdm) [![CodeCov Coverage](https://codecov.io/github/tqdm/tqdm/coverage.svg?branch=master)](https://codecov.io/github/tqdm/tqdm/) [![Codacy Grade](https://api.codacy.com/project/badge/Grade/3f965571598f44549c7818f29cdcf177)](https://www.codacy.com/app/tqdm/tqdm) [![Libraries.io Status](https://img.shields.io/librariesio/github/tqdm/tqdm.svg)](https://libraries.io/github/tqdm/tqdm)
+
+
+How to use
+----------
+0. Select the optimizer backend. Different optimizers are good for different tasks and perform differently. For example, here is the result of optimizing of a test function (a hybrid of Ackley and Rosenbrock functions) as a part of testing:
+
+
+Some benchmarking results
+
+```python
+#20 iters
+OrderedDict([
+ ('MSRSM', ({'x': 3.0754612427874017e-12, 'y': 0}, (1.5205614545266144e-11, 0))),
+ ('Gutmann', ({'x': 4.495684760769583e-12, 'y': 0}, (2.2224444506946384e-11, 0))),
+ ('Yabox', ({'x': 0.04484077552690313, 'y': 0}, (0.25594782347174183, 0))),
+ ('TPE', ({'x': 1.2634392486190837, 'y': 2}, (4.106711553239084, 0))),
+ ('SKOptForest', ({'x': 2.001450714269141, 'y': 4}, (4.126995218051379, 0))),
+ ('PySHAC', ({'x': 1.1365327253605517, 'y': 2}, (4.142106739265552, 0))),
+ ('SKOptGBTree', ({'x': 1.0640782399499173, 'y': 0}, (4.6970480117446005, 0))),
+ ('Random', ({'x': 2.052104278286049, 'y': 5}, (4.789943923600834, 0))),
+ ('SKOptBayesian', ({'x': 2.0077415609175713, 'y': 3}, (4.9722440013656195, 0))),
+ ('GPyOptOptimizer', ({'x': 2.0268674793934447, 'y': 3}, (5.091945147326221, 0))),
+ ('HyperEnginePortfolio', ({'x': 2.2640333910943444, 'y': 6}, (5.909097060500178, 0))),
+ ('Bayessian', ({'x': 3.840114588120504, 'y': 13}, (7.910311893451979, 0))),
+ ('BeeColony', ({'x': 2.1060132176055504, 'y': 0}, (8.303401897709731, 0))),
+ ('Hyperband', ({'x': 1.0953442448796036, 'y': -7}, (10.21592133952341, 0))),
+ ('HyperEngineBayessian', ({'x': 0.035178716905066576, 'y': -13}, (11.73027303604122, 0))),
+ ('NelderMead', ({'x': 5.5546875, 'y': -4}, (16.629806203554303, 0))),
+ ('ParticleSwarm', ({'x': 9.512831487270224, 'y': -3}, (19.485447083871225, 0))),
+ ('Sobol', ({'x': 9.621289062499997, 'y': -11}, (19.561767255097276, 0))),
+ ('OptunityOptimizer', ({'x': 9.57421875, 'y': -13}, (19.665844964264014, 0)))
+])
+
+#100 iters
+OrderedDict([
+ ('SKOptBayesian', ({'x': 0.0, 'y': 0}, (0.0, 0))),
+ ('MSRSM', ({'x': 1.965467987064758e-12, 'y': 0}, (9.71667191151937e-12, 0))),
+ ('Gutmann', ({'x': 1.994094834174218e-12, 'y': 0}, (9.85878045867139e-12, 0))),
+ ('Yabox', ({'x': 0.02306337159200547, 'y': 0}, (0.1231750175856301, 0))),
+ ('HyperEngineBayessian', ({'x': 0.06472343408959413, 'y': 0}, (0.3903313038744054, 0))),
+ ('Bayessian', ({'x': 0.9829409844977999, 'y': 1}, (2.1634186311845145, 0))),
+ ('PySHAC', ({'x': 0.2991248121219703, 'y': 0}, (2.383562650155154, 0))),
+ ('BeeColony', ({'x': 0.7302499236805515, 'y': 1}, (3.9672566629188446, 0))),
+ ('GPyOptOptimizer', ({'x': 1.9750686145225131, 'y': 4}, (4.101219956972918, 0))),
+ ('TPE', ({'x': 1.9516353294615343, 'y': 4}, (4.120949851125776, 0))),
+ ('SKOptGBTree', ({'x': 2.0123977168910847, 'y': 4}, (4.152492764040694, 0))),
+ ('HyperEnginePortfolio', ({'x': 0.014954151978109342, 'y': 1}, (4.336781434582555, 0))),
+ ('Random', ({'x': 0.055334114406850876, 'y': 1}, (4.381030185221982, 0))),
+ ('SKOptForest', ({'x': 2.937967468371783, 'y': 9}, (5.864340107425029, 0))),
+ ('NelderMead', ({'x': 5.5438690185546875, 'y': -12}, (17.293342096641783, 0))),
+ ('OptunityOptimizer', ({'x': 9.611312133307793, 'y': -4}, (19.438307138257112, 0))),
+ ('ParticleSwarm', ({'x': 9.516992187499998, 'y': -3}, (19.48616547955807, 0))),
+ ('Sobol', ({'x': 9.49560546875, 'y': -9}, (19.607708848977282, 0))),
+ ('Hyperband', ({'x': 9.454121928413706, 'y': -14}, (19.67098161993487, 0)))
+])
+
+#another 100 iters
+OrderedDict([
+ ('SKOptBayesian', ({'x': 0.0, 'y': 0}, (0.0, 0))),
+ ('MSRSM', ({'x': 1.965467987100698e-12, 'y': 0}, (9.71667191151937e-12, 0))),
+ ('Gutmann', ({'x': 2.06572139458986e-12, 'y': 0}, (1.021183138050219e-11, 0))),
+ ('SKOptForest', ({'x': 0.0021370756873140277, 'y': 0}, (0.01064400423985612, 0))),
+ ('Yabox', ({'x': 0.011806504145005392, 'y': 0},(0.06077385485484399, 0))),
+ ('Bayessian', ({'x': 0.08963307811319719, 'y': 0}, (0.574643646185228, 0))),
+ ('SKOptGBTree', ({'x': 1.001876402415787, 'y': 1}, (2.1851226071480934, 0))),
+ ('TPE', ({'x': 0.9393761906325264, 'y': 1}, (2.273003533796679, 0))),
+ ('PySHAC', ({'x': 0.3374516167260999, 'y': 0}, (2.68232205052529, 0))),
+ ('Random', ({'x': 0.5743099848851063, 'y': 0}, (3.91888470632373, 0))),
+ ('HyperEnginePortfolio', ({'x': 0.020698458554854193, 'y': 1}, (4.340036896917615, 0))),
+ ('HyperEngineBayessian', ({'x': 0.6695867494591756, 'y': 1}, (4.402848372305214, 0))),
+ ('GPyOptOptimizer', ({'x': 1.470335759775298, 'y': 2}, (4.5145625430151055, 0))),
+ ('BeeColony', ({'x': 1.1489461183128191, 'y': 0}, (5.289477553045166, 0))),
+ ('NelderMead', ({'x': 5.5438690185546875, 'y': -12}, (17.293342096641783, 0))),
+ ('Hyperband', ({'x': 7.534649421992623, 'y': 3}, (18.055060613166553, 0))),
+ ('Sobol', ({'x': 9.456933593749998, 'y': 0}, (19.374501579830856, 0))),
+ ('OptunityOptimizer', ({'x': 9.480038915947556, 'y': 1}, (19.374823892112662, 0))),
+ ('ParticleSwarm', ({'x': 9.532494333566397, 'y': -2}, (19.463592918993786, 0)))
+])
+#and another 100 iters
+OrderedDict([
+ ('SKOptBayesian', ({'x': 0.0, 'y': 0}, (0.0, 0))),
+ ('Bayessian', ({'x': 0.0, 'y': 0}, (0.0, 0))),
+ ('MSRSM', ({'x': 1.965467987101057e-12, 'y': 0}, (9.71667191151937e-12, 0))),
+ ('Gutmann', ({'x': 2.0657213945897996e-12, 'y': 0}, (1.021183138050219e-11, 0))),
+ ('PySHAC', ({'x': 1.0736838586310893, 'y': 1}, (2.596181028196405, 0))),
+ ('TPE', ({'x': 1.112228671531816, 'y': 1}, (2.9484847125586415, 0))),
+ ('SKOptForest', ({'x': 1.9743490825396586, 'y': 4}, (4.101231423607379, 0))),
+ ('SKOptGBTree', ({'x': 1.9730793645346538, 'y': 4}, (4.101344227347713, 0))),
+ ('BeeColony', ({'x': 1.1480878788645177, 'y': 2}, (4.137194813288049, 0))),
+ ('HyperEngineBayessian', ({'x': 0.017184911830446792, 'y': -1}, (4.339052002416813, 0))),
+ ('HyperEnginePortfolio', ({'x': 0.039186794853671714, 'y': 1}, (4.357466574344844, 0))),
+ ('Yabox', ({'x': 0.10064054071073808, 'y': 1}, (4.483456305012673, 0))),
+ ('GPyOptOptimizer', ({'x': 1.4703357597723614, 'y': 2}, (4.514562543000367, 0))),
+ ('Random', ({'x': 0.8303208100740211, 'y': 2}, (5.321946188711948, 0))),
+ ('NelderMead', ({'x': 5.5438690185546875, 'y': -12}, (17.293342096641783, 0))),
+ ('Hyperband', ({'x': 7.534649421992623, 'y': 3}, (18.055060613166553, 0))),
+ ('ParticleSwarm', ({'x': 9.53042265695655, 'y': 7}, (19.243508799760164, 0))),
+ ('OptunityOptimizer', ({'x': 9.476953125, 'y': -2}, (19.443113499744367, 0))),
+ ('Sobol', ({'x': 9.553613281249998, 'y': -2}, (19.456426287512052, 0)))
+])
+
+OrderedDict([
+ ('SKOptBayesian', ({'x': 0.0, 'y': 0, 'z': 3}, (0.0, 0))),
+ ('MSRSM', ({'x': 0.0, 'y': 0, 'z': 3}, (0.0, 0))),
+ ('GPyOptOptimizer', ({'x': 0.0, 'y': 0, 'z': 3}, (0.0, 0))),
+ ('Bayessian', ({'x': 0.0, 'y': 0, 'z': 3}, (0.0, 0))),
+ ('Gutmann', ({'x': 1.862587834282002e-12, 'y': 0, 'z': 3}, (9.208189766241048e-12, 0))),
+ ('SKOptGBTree', ({'x': 0.0006981287251917047, 'y': 0, 'z': 3}, (0.0034597748341651524, 0))),
+ ('TPE', ({'x': 0.04262838182879991, 'y': 0, 'z': 3}, (0.24175273938686326, 0))),
+ ('PySHAC', ({'x': 0.9095346430312279, 'y': 1, 'z': 3}, (2.4508629369328156, 0))),
+ ('SKOptForest', ({'x': 1.975551753738029, 'y': 4, 'z': 3}, (4.1012335626387895, 0))),
+ ('HyperEnginePortfolio', ({'x': 0.6955663900186637, 'y': 0, 'z': 3}, (4.135877638966221, 0))),
+ ('HyperEngineBayessian', ({'x': 0.029900210748344813, 'y': 1, 'z': 3}, (4.347401328753184, 0))),
+ ('Yabox', ({'x': 0.0842280390688326, 'y': 1, 'z': 3}, (4.4407866406914, 0))),
+ ('Random', ({'x': 0.1937494360579084, 'y': -1, 'z': 3}, (4.936616103474133, 0))),
+ ('BeeColony', ({'x': 2.2022165228712076, 'y': 5, 'z': 3}, (5.078197918216663, 0))),
+ ('Hyperband',({'x': 5.652646139447696, 'y': -7, 'z': 3}, (16.808037852272676, 0))),
+ ('NelderMead', ({'x': 5.482275009155273, 'y': -48, 'z': 3}, (19.01645084709497, 0))),
+ ('OptunityOptimizer', ({'x': 9.4734375, 'y': 0, 'z': 3}, (19.392915479901454, 0))),
+ ('ParticleSwarm', ({'x': 9.572687738918628, 'y': -11, 'z': 3}, (19.629448159563655, 0))),
+ ('Sobol', ({'x': 9.476269531249997, 'y': -20, 'z': 3}, (19.801833074160353, 0)))
+])
+```
+
+
+
+
+The backends are available directly in UniOpt package, and you can enumerate them
+
+```python
+for optimizer in UniOpt:
+ print("optimizer: " + optimizer.__name__)
+```
+
+Here we choose hyperopt's tree of Parzen estimators:
+```python
+import UniOpt
+optimizer=UniOpt.TPE
+```
+
+
+1. Specify a search space. We use the term `spec` for this specification. The spec is a flat `Mapping`-compatible object. It consists of:
+ * `HyperparamDefinition` objects. `HyperDef` alias is available. For the help on it see its docstring, here is the brief guide:
+
+ * the first arg is the data type, `int` or `float`.
+
+ * the second one is a distribution from `scipy.stats`. Yes, we are tied tightly to `scipy`. No, I have no plans to change it for now, we use `scipy` internally for the case the optimizer doesn't have this distribution implemented internally, and even if it has, it is very likely it uses `scipy`. I could create an own registry of distributions to get rid of **mandatory** `scipy`, but this unneedly complicates the things since I would have to maintain it and to write convertors from them to optimizer-specific specs, and most likely I will still use `scipy` for them. If you are not OK with this, post your ideas into the corresponding issue. A couple of words about some special distributions:
+ * `uniform` assumes that there is **SOME smooth enough** relation between a number and the value of a function. You (and the surrogate model) can somehow predict the effect of changing this variable on the loss.
+ * `randint` assumes that there is **NO smooth enough** relation between a number and the value of a function. Changing this variable anyhow makes the result be changing unpredictably.
+
+ * scalars: `str`ings and numbers
+ * `tuple`s and `list`s of scalars. It represents a categorical variable. If you need nonuniformly distributed categories, you have to use the appropriate `scipy.stats` distribution and convert a number into a category yourself.
+ * other objects. They can be processed by a backend if this is implemented.
+
+```python
+import scipy.stats
+from UniOpt.core.Spec import *
+spaceSpec={
+ "x": HyperDef(float, scipy.stats.uniform(loc=0, scale=10)),
+ "y": HyperDef(int, scipy.stats.uniform(loc=0, scale=10)),
+ "z": HyperDef(float, scipy.stats.norm(loc=0, scale=10)), #discrete
+ "w": 3,
+ "v": (0, 1)
+}
+```
+
+2. Create the function you wanna optimize. It takes a dict of hyperparams and returns **a tuple `(mean, variance)`** of the target function, use variance of `0` if the variance is unknown or not present. Variance is useful for some optimizers and may be available in some crossvalidation routines.
+
+```python
+import numpy as np
+def optimizee(hyperparamsDict:typing.Mapping[str, typing.Any]):
+ return (np.sum(np.array(tuple(hyperparamsDict.values()))**2), 0)
+```
+
+3. If you wanna resumption, create a `PointsStorage` object.
+ `from UniOpt.core.PointsStorage imporrt *`
+ a) `stor=MemoryStorage()` for storing in an array
+ b) `stor=SQLiteStorage("Points.sqlite")` for storing in a SQLite DB.
+
+3. Create an optimizer object.
+
+```python
+opt = optimizer(optimizee, spaceSpec, iters=100, pointsStorage=stor)
+```
+
+4. Call it. You will get the minimal value of your hyperparams. If you provided it with a link to points storage, and injection of points is implemented for the backend you use, it will load points from the storage, so you can transfer progress between optimizers. This will result in a metaoptimizer somewhen. And it will save all the probed points into the storage too.
+```python
+res=opt()
+```
+
+5. Optimizer-specific result (usually the optimizer object) is available via `details` field.
+```python
+opt.details
+```
+
+
+Implementing an own backend
+---------------------------
+See [Contributing.md](./Contributing.md).
+
+
+Implemented backends
+--------------------
+
+The backends for following libraries have been implemented:
+
+|Name, link|License|PyPi|Build status|Coverage|Docs|Misc|
+|----------|-------|----|------------|--------|----|----|
+|[Hyperopt](https://github.com/hyperopt/hyperopt)|![Licence](https://img.shields.io/github/license/hyperopt/hyperopt.svg)|[![PyPi Status](https://img.shields.io/pypi/v/hyperopt.svg)](https://pypi.org/project/hyperopt)|[![Build status](https://github.com/hyperopt/hyperopt/actions/workflows/build.yml/badge.svg)](https://github.com/hyperopt/hyperopt/actions/workflows/build.yml)||https://hyperopt.github.io/hyperopt/|[![Conda package](https://anaconda.org/conda-forge/hyperopt/badges/version.svg)](https://anaconda.org/conda-forge/hyperopt)|
+|[hyperband](https://github.com/zygmuntz/hyperband) (no official package, backend imports `hyperband.py`)|[![PROPRIETARY license](https://img.shields.io/badge/license-Proprietary-F00.svg)](https://github.com/zygmuntz/hyperband/blob/master/LICENSE)||||
+|[hyperengine](https://github.com/maxim5/hyper-engine.git)|![Licence](https://img.shields.io/github/license/maxim5/hyper-engine.svg)|[![PyPi Status](https://img.shields.io/pypi/v/hyperengine.svg)](https://pypi.org/project/hyperengine)|[![TravisCI Build Status](https://travis-ci.org/maxim5/hyper-engine.svg?branch=master)](https://travis-ci.org/maxim5/hyper-engine)|
+|[GPyOpt](https://github.com/SheffieldML/GPyOpt)|![Licence](https://img.shields.io/github/license/SheffieldML/GPyOpt.svg)|[![PyPi Status](https://img.shields.io/pypi/v/gpyopt.svg)](https://pypi.org/project/gpyopt)|[![Build Status](https://travis-ci.org/SheffieldML/GPyOpt.svg?branch=master)](https://travis-ci.org/SheffieldML/GPyOpt)|[![CodeCov Coverage](http://codecov.io/github/SheffieldML/GPyOpt/coverage.svg?branch=master)](http://codecov.io/github/SheffieldML/GPyOpt?branch=master)|[![Read The Docs](https://readthedocs.org/projects/gpyopt/badge/)](https://readthedocs.org/projects/gpyopt/)
+|[scikit-optimize](https://github.com/scikit-optimize/scikit-optimize)|![Licence](https://img.shields.io/github/license/scikit-optimize/scikit-optimize.svg)|[![PyPi Status](https://img.shields.io/pypi/v/scikit-optimize.svg)](https://pypi.org/project/scikit-optimize)|[![Travis Build Status](https://travis-ci.org/scikit-optimize/scikit-optimize.svg?branch=master)](https://travis-ci.org/scikit-optimize/scikit-optimize)||https://scikit-optimize.github.io/|[![Conda package](https://anaconda.org/conda-forge/scikit-optimize/badges/version.svg)](https://anaconda.org/conda-forge/scikit-optimize)[![CircleCI Build Status](https://circleci.com/gh/scikit-optimize/scikit-optimize/tree/master.svg?style=shield&circle-token=:circle-token)](https://circleci.com/gh/scikit-optimize/scikit-optimize)[![Zenodo DOI](https://zenodo.org/badge/54340642.svg)](https://zenodo.org/badge/latestdoi/54340642)
+|[SMAC](https://github.com/automl/SMAC3)|![Licence](https://img.shields.io/github/license/automl/SMAC3.svg)||[![Build status](https://github.com/automl/SMAC3/actions/workflows/pytest.yml/badge.svg?branch=main)](https://github.com/automl/SMAC3/actions/workflows/pytest.yml)|[![Codecov Coverage](https://codecov.io/gh/automl/SMAC3/branch/development/graph/badge.svg)](https://codecov.io/gh/automl/SMAC3)|https://automl.github.io/SMAC3/|[![Code Health](https://landscape.io/github/automl/SMAC3/development/landscape.svg?style=flat)](https://landscape.io/github/automl/SMAC3/development)|
+|[ECABC](https://github.com/ECRL/ecabc)|![Licence](https://img.shields.io/github/license/ECRL/ecabc.svg)|[![PyPi Status](https://img.shields.io/pypi/v/ECabc.svg)](https://pypi.org/project/ECabc)|
+|[optunity](https://github.com/claesenm/optunity)|![Licence](https://img.shields.io/github/license/claesenm/optunity.svg)|[![PyPi Status](https://img.shields.io/pypi/v/Optunity.svg)](https://pypi.org/project/Optunity)|[![Build Status](https://travis-ci.org/claesenm/optunity.svg?branch=master)](https://travis-ci.org/claesenm/optunity)||[![Read The Docs](https://readthedocs.org/projects/optunity/badge/)](https://readthedocs.org/projects/optunity/)|
+|[Yabox](https://github.com/pablormier/yabox)|![Licence](https://img.shields.io/github/license/pablormier/yabox.svg)|[![PyPi Status](https://img.shields.io/pypi/v/yabox.svg)](https://pypi.org/project/yabox)||||[![Zenodo DOI](https://zenodo.org/badge/97233963.svg)](https://zenodo.org/badge/latestdoi/97233963)|
+|[PySHAC](https://github.com/titu1994/pyshac)|![Licence](https://img.shields.io/github/license/titu1994/pyshac.svg)|[![PyPi Status](https://img.shields.io/pypi/v/pyshac.svg)](https://pypi.org/project/pyshac)|[![Build Status](https://travis-ci.org/titu1994/pyshac.svg?branch=master)](https://travis-ci.org/titu1994/pyshac)|[![Codecov Coverage](https://codecov.io/gh/titu1994/pyshac/branch/master/graph/badge.svg)](https://codecov.io/gh/titu1994/pyshac)|https://titu1994.github.io/pyshac/|
+|[RBFOpt](https://github.com/coin-or/rbfopt)|![Licence](https://img.shields.io/github/license/coin-or/rbfopt.svg)|[![PyPi Status](https://img.shields.io/pypi/v/rbfopt.svg)](https://pypi.org/project/rbfopt)|||||[![Read The Docs](https://readthedocs.org/projects/rbfopt/badge/)](https://rbfopt.readthedocs.io/en/latest/)
+|[fmfn/BayesianOptimization](https://github.com/fmfn/BayesianOptimization)|![Licence](https://img.shields.io/github/license/fmfn/BayesianOptimization.svg)|[![PyPi Status](https://img.shields.io/pypi/v/bayesian-optimization.svg)](https://pypi.org/project/bayesian-optimization)|[![TravisCI Build Status](https://img.shields.io/travis/fmfn/BayesianOptimization/master.svg)](https://travis-ci.org/fmfn/BayesianOptimization)|[![Codecov Coverage](https://codecov.io/github/fmfn/BayesianOptimization/badge.svg?branch=master&service=github)](https://codecov.io/github/fmfn/BayesianOptimization?branch=master)
+|[pySOT](https://github.com/dme65/pySOT)|![Licence](https://img.shields.io/github/license/dme65/pySOT.svg)|[![PyPi Status](https://img.shields.io/pypi/v/pySOT.svg)](https://pypi.org/project/pySOT)|[![TravisCI Build Status](https://img.shields.io/travis/dme65/pySOT/master.svg)](https://travis-ci.org/dme65/pySOT)|[![Codecov Coverage](https://codecov.io/github/dme65/pySOT/badge.svg?branch=master&service=github)](https://codecov.io/github/dme65/pySOT?branch=master) |[![Read The Docs](https://readthedocs.org/projects/pysot/badge/?version=latest)](http://pysot.readthedocs.io/en/latest/?badge=latest)|[![Zenodo DOI](https://zenodo.org/badge/36836292.svg)](https://zenodo.org/badge/latestdoi/36836292)
+|[RoBO](https://github.com/automl/RoBO)|![Licence](https://img.shields.io/github/license/automl/RoBO.svg)|[![PyPi Status](https://img.shields.io/pypi/v/RoBO.svg)](https://pypi.org/project/RoBO)|[![TravisCI Build Status](https://img.shields.io/travis/automl/RoBO/master.svg)](https://travis-ci.org/automl/RoBO)|[![Coveralls Coverage](https://coveralls.io/repos/github/automl/RoBO/badge.svg?branch=master)](https://coveralls.io/github/automl/RoBO?branch=master)|https://automl.github.io/RoBO/|[![Landscape Health](https://landscape.io/github/automl/RoBO/master/landscape.svg?style=flat)](https://landscape.io/github/automl/RoBO/master)
+|~~[SOpt](https://github.com/Lyrichu/sopt)~~|![Licence](https://img.shields.io/github/license/Lyrichu/sopt.svg)|[![PyPi Status](https://img.shields.io/pypi/v/sopt.svg)](https://pypi.org/project/sopt)||||Calls the same target function even if unneeded
+|[BayTune / BTB](https://github.com/HDI-Project/BTB)|![Licence](https://img.shields.io/github/license/HDI-Project/BTB.svg)|[![PyPi Status](https://img.shields.io/pypi/v/baytune.svg)](https://pypi.org/project/baytune)|[![TravisCI Build Status](https://img.shields.io/travis/HDI-Project/BTB/master.svg)](https://travis-ci.org/HDI-Project/BTB)|[![CodeCov Coverage](https://codecov.io/github/HDI-Project/BTB/coverage.svg?branch=master)](https://codecov.io/github/HDI-Project/BTB/)|https://hdi-project.github.io/BTB/|
diff --git a/UNLICENSE b/UNLICENSE
new file mode 100644
index 0000000..efb9808
--- /dev/null
+++ b/UNLICENSE
@@ -0,0 +1,24 @@
+This is free and unencumbered software released into the public domain.
+
+Anyone is free to copy, modify, publish, use, compile, sell, or
+distribute this software, either in source code form or as a compiled
+binary, for any purpose, commercial or non-commercial, and by any
+means.
+
+In jurisdictions that recognize copyright laws, the author or authors
+of this software dedicate any and all copyright interest in the
+software to the public domain. We make this dedication for the benefit
+of the public at large and to the detriment of our heirs and
+successors. We intend this dedication to be an overt act of
+relinquishment in perpetuity of all present and future rights to this
+software under copyright law.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+
+For more information, please refer to
diff --git a/UniOpt/__init__.py b/UniOpt/__init__.py
new file mode 100644
index 0000000..800f493
--- /dev/null
+++ b/UniOpt/__init__.py
@@ -0,0 +1,73 @@
+import sys
+
+from .backends.bayesian import Bayesian
+from .backends.BayTune import BayTuneGCP, BayTuneGCPEi, BayTuneGP, BayTuneGPEi
+from .backends.ecabc import BeeColony
+from .backends.EvoFuzzy import EvoFuzzy
+#from .backends.SMAC import SMAC
+from .backends.GPyOpt import GPyOptOptimizer
+from .backends.hyperband import HyperBand
+from .backends.hyperengine import HyperEngineBayesian, HyperEnginePortfolio
+from .backends.hyperopt import TPE, Random
+from .backends.optunity import CMA_ES, NelderMead
+from .backends.optunity import OptunityOptimizer as Optunity
+from .backends.optunity import ParticleSwarm, Sobol
+from .backends.pyshac import PySHAC
+from .backends.pySOT import PySOT
+from .backends.rbfopt import MSRSM, Gutmann
+from .backends.RoBO import RoBOForest, RoBOGP
+from .backends.simple_spearmint import SimpleSpearmint
+from .backends.skopt import SKOptBayesian, SKOptExtraTrees, SKOptForest, SKOptGBTree
+from .backends.SOpt import SOptGA, SOptSGA
+from .backends.yabox import Yabox
+from .backends.ypde import YPDE
+from .utils import IterableModule
+
+
+class Optimizers(IterableModule):
+ __all__ = ("TPE", "Random", "Optunity", "ParticleSwarm", "Sobol", "NelderMead", "BeeColony", "Yabox", "PySHAC", "HyperEngineBayesian", "HyperEnginePortfolio", "SKOptBayesian", "Forest", "GBTree", "ExtraTrees", "MSRSM", "Gutmann", "HyperBand", "GPyOpt", "Bayesian", "PySOT")
+ TPE = TPE
+ Random = Random
+ Optunity = Optunity
+ Bayesian = Bayesian
+ BeeColony = BeeColony
+ GPyOpt = GPyOptOptimizer
+ Yabox = Yabox
+ HyperEngineBayesian = HyperEngineBayesian
+ HyperEnginePortfolio = HyperEnginePortfolio
+
+ HyperBand = HyperBand
+
+ MSRSM = MSRSM
+ Gutmann = Gutmann
+
+ SKOptBayesian = SKOptBayesian
+ Forest = SKOptForest
+ GBTree = SKOptGBTree
+ ExtraTrees = SKOptExtraTrees
+
+ PySHAC = PySHAC
+
+ ParticleSwarm = ParticleSwarm
+ Sobol = Sobol
+ NelderMead = NelderMead
+ CMA_ES = CMA_ES # seems to be broken in optunity - doesn't catch ConstraintViolation
+
+ #SMAC = SMAC
+ Spearmint = SimpleSpearmint # very slow, broken (predicts the same points) and proprietary
+
+ EvoFuzzy = EvoFuzzy
+ YPDE = YPDE
+
+ SOptSGA = SOptSGA
+ SOptGA = SOptGA
+
+ PySOT = PySOT
+
+ BayTuneGP = BayTuneGP
+
+ RoBOForest = RoBOForest
+ RoBOGP = RoBOGP
+
+
+sys.modules[__name__] = Optimizers(__name__)
diff --git a/UniOpt/__main__.py b/UniOpt/__main__.py
new file mode 100644
index 0000000..4e84f1d
--- /dev/null
+++ b/UniOpt/__main__.py
@@ -0,0 +1,4 @@
+from .core.PointsStorage import *
+
+if __name__ == __main__:
+ pass
diff --git a/UniOpt/backends/BayTune.py b/UniOpt/backends/BayTune.py
new file mode 100644
index 0000000..9fc1f2a
--- /dev/null
+++ b/UniOpt/backends/BayTune.py
@@ -0,0 +1,85 @@
+import typing
+from functools import partial
+
+from lazily import hyperopt
+from lazily.btb.hyper_parameter import CatHyperParameter, FloatHyperParameter, HyperParameter, IntHyperParameter, ParamTypes
+from lazily.btb.tuning import GCP, GP, GCPEi, GCPEiVelocity, GPEi, GPEiVelocity
+from lazy_object_proxy import Proxy
+
+from ..core.HyperparamVector import HyperparamVector
+from ..core.MetaSpec import *
+from ..core.Optimizer import GenericOptimizer, PointsStorage
+from ..core.ProgressReporter import ProgressReporter
+from ..core.Spec import *
+from ..core.Spec import HyperparamDefinition
+from ..imports import *
+
+
+class BayTuneSpec(MSpec(scalarMode=ScalarMetaMap.degenerateCategory)):
+ class HyperparamsSpecsConverters:
+ def randint(k, dist, tp):
+ return IntHyperParameter(ParamTypes.INT, dist.a, dist.b)
+
+ def uniform(k, dist, tp):
+ if tp is int:
+ tpE = ParamTypes.INT
+ elif tp is float:
+ tpE = ParamTypes.FLOAT
+ else:
+ raise NotImplementedError(tp)
+ return HyperParameter(tpE, [dist.ppf(0), dist.ppf(1)]) # do not call specialized classes, there is a bug causing __new__ return None in the base class in tuese cases
+
+ #def norm(k, dist, tp):
+ # ctor=(NormalIntegerHyperparameter if tp is int else NormalFloatHyperparameter)
+ # return ctor(k, dist.mean(), dist.std(), default_value=tp(dist.mean()))
+ def _categorical(k, categories):
+ return CatHyperParameter(ParamTypes.INT_CAT, categories)
+
+
+class BayTune(GenericOptimizer):
+ specType = BayTuneSpec
+ tunerClass = None
+
+ def __init__(self, blackBoxFunc: typing.Callable, spaceSpec: typing.Mapping[str, object], iters: int = 1000, jobs: int = 3, pointsStorage: PointsStorage = None) -> None:
+ super().__init__(blackBoxFunc, spaceSpec, iters, jobs, pointsStorage)
+
+ def prepareScoring(self, spaceSpec):
+ spaceSpec = list(spaceSpec.items())
+ tuner = self.__class__.tunerClass(spaceSpec)
+ return (self.iters, "BayTune", tuner)
+
+ def injectPoints(self, pointz, bestPointIndex, tuner, initialize=False):
+ for p in pointz:
+ tuner.add(p[0], p[1][0])
+
+ def invokeScoring(self, fn: typing.Callable, pb: ProgressReporter, tuner) -> typing.Dict[str, typing.Union[float, int]]:
+ for i in range(self.iters):
+ hp = tuner.propose()
+ loss = -fn(hp)[0]
+ tuner.add(hp, loss)
+ self.details = tuner
+ return tuner._best_hyperparams
+
+
+class BayTuneGP(BayTune):
+ tunerClass = GP
+
+
+class BayTuneGPEi(BayTuneGP):
+ tunerClass = GPEi
+
+
+class BayTuneGPEiVelocity(BayTuneGPEi):
+ tunerClass = GPEiVelocity
+
+
+class BayTuneGCP(BayTune):
+ tunerClass = GCP
+
+
+class BayTuneGCPEi(BayTuneGCP):
+ tunerClass = GCPEi
+
+
+class BayTuneGCPEiVelocity(BayTuneGCPEi):
+ tunerClass = GCPEiVelocity
diff --git a/UniOpt/backends/ConfigSpaceSpec.py b/UniOpt/backends/ConfigSpaceSpec.py
new file mode 100644
index 0000000..5ed488a
--- /dev/null
+++ b/UniOpt/backends/ConfigSpaceSpec.py
@@ -0,0 +1,41 @@
+from lazily.ConfigSpace import ConfigurationSpace
+from lazily.ConfigSpace.hyperparameters import CategoricalHyperparameter, Constant, NormalFloatHyperparameter, NormalIntegerHyperparameter, UniformFloatHyperparameter, UniformIntegerHyperparameter
+
+from ..core.HyperparamVector import HyperparamVector
+from ..core.MetaSpec import *
+from ..imports import *
+
+
+class ConfigSpaceSpecVec(HyperparamVector):
+ @classmethod
+ def dict2native(cls, dic: typing.Dict[str, typing.Any], spec) -> typing.Iterable[typing.Any]:
+ if dic:
+ cs = ConfigurationSpace()
+ print(dic, list(dic.values()))
+ cs.add_hyperparameters(dic.values())
+ return cs
+ else:
+ return None
+
+
+class ConfigSpaceSpec(MSpec()):
+ hyperparamsVectorType = HyperparamVector
+ #hyperparamsVectorType = HyperparamArray
+ hyperparamsSpecType = ConfigSpaceSpecVec
+
+ class HyperparamsSpecsConverters:
+ def randint(k, dist, tp):
+ return UniformIntegerHyperparameter(k, dist.a, dist.b)
+
+ def uniform(k, dist, tp):
+ ctor = UniformIntegerHyperparameter if tp is int else UniformFloatHyperparameter
+ return ctor(name=k, lower=dist.ppf(0), upper=dist.ppf(1))
+
+ #def norm(k, dist, tp):
+ # ctor=(NormalIntegerHyperparameter if tp is int else NormalFloatHyperparameter)
+ # return ctor(k, dist.mean(), dist.std(), default_value=tp(dist.mean()))
+ def _categorical(k, categories):
+ return CategoricalHyperparameter(k, categories)
+
+ def scalarProcessor(self, i, k, v):
+ return Constant(k, v)
diff --git a/UniOpt/backends/EvoFuzzy.py b/UniOpt/backends/EvoFuzzy.py
new file mode 100644
index 0000000..da1e531
--- /dev/null
+++ b/UniOpt/backends/EvoFuzzy.py
@@ -0,0 +1,37 @@
+import warnings
+
+from lazily import diffevo
+from numpy import ndarray
+
+from UniOpt.core.ProgressReporter import ProgressReporter
+from UniOpt.core.Spec import HyperparamDefinition
+
+from ..core.Optimizer import GenericOptimizer, PointsStorage
+from ..core.ProgressReporter import ProgressReporter
+from ..core.Spec import *
+from ..core.SpecOnlyBoxes import ArraySpecOnlyBoxesNoIntegers
+from ..imports import *
+
+
+class EvoFuzzy(GenericOptimizer):
+ specType = ArraySpecOnlyBoxesNoIntegers
+
+ def __init__(self, blackBoxFunc: typing.Callable, spaceSpec: typing.Dict[str, typing.Union[HyperparamDefinition, int]], iters: int = 1000, jobs: int = 3, pointsStorage: PointsStorage = None, popSize: int = 30, mutation: float = 0.8, crossover: float = 0.7, mode: str = "best/1") -> None:
+ super().__init__(blackBoxFunc, spaceSpec, iters, jobs, pointsStorage)
+ assert popSize < iters
+ self.popSize = popSize
+ self.mode = mode
+ self.crossover = crossover
+ self.mutation = mutation
+
+ def prepareScoring(self, specSeq: typing.List[typing.Tuple[NumericT, NumericT]]) -> typing.Tuple[int, str, typing.List[typing.Tuple[NumericT, NumericT]]]:
+ return (self.iters, "EvoFuzzy diffevo", specSeq)
+
+ def invokeScoring(self, fn: typing.Callable, pb: ProgressReporter, specSeq: typing.Iterable[typing.Tuple[NumericT, NumericT]]) -> ndarray:
+ def evofuzzyScore(hyperparamsSeq):
+ return fn(hyperparamsSeq)[0]
+
+ for best, unfitness in diffevo.differential_evolution(evofuzzyScore, specSeq, mut=self.mutation, crossprob=self.crossover, popsize=self.popSize, gens=self.iters // self.popSize, mode=self.mode):
+ pass
+ self.details = unfitness
+ return best
diff --git a/UniOpt/backends/EvoStra.py b/UniOpt/backends/EvoStra.py
new file mode 100644
index 0000000..00b10a0
--- /dev/null
+++ b/UniOpt/backends/EvoStra.py
@@ -0,0 +1,57 @@
+import typing
+from os.path import sep as pathSep
+
+import numpy as np
+from lazily import evostra
+from lazy_object_proxy import Proxy
+
+from ..core.ArraySpec import HyperparamArray
+from ..core.MetaSpec import *
+from ..core.Optimizer import GenericOptimizer, PointsStorage
+from ..core.ProgressReporter import ProgressReporter
+from ..imports import *
+from ..utils import resolveAvailablePath
+
+
+class EvoStraSpecArrayDict(HyperparamArray):
+ @classmethod
+ def dict2native(cls, hyperparamsNative: typing.Dict[str, typing.Any], spec) -> typing.Dict[str, typing.Any]:
+ spaceSpec = np.array(super().dict2native(hyperparamsNative, spec))
+ return {"dimension": spaceSpec.shape[0], "var_lower": spaceSpec[:, 1], "var_upper": spaceSpec[:, 2], "var_type": spaceSpec[:, 0]}
+
+
+class EvoStraSpec(MSpec(isArray=True, scalarMode=ScalarMetaMap.noScalars, integerMode=IntegerMetaMap.noIntegers)):
+ hyperparamsSpecType = EvoStraSpecArrayDict
+
+ class HyperparamsSpecsConverters:
+ def randint(k, dist, tp):
+ return ("I", dist.a, dist.b)
+
+ def uniform(k, dist, tp):
+ return (tp.__name__[0].upper(), dist.ppf(0), dist.ppf(1))
+
+
+class EvoStra(GenericOptimizer):
+ specType = RBFOptSpec
+
+ optimizer = None
+
+ def __init__(self, blackBoxFunc: typing.Callable, spaceSpec: typing.Mapping[str, object], iters: int = 1000, jobs: int = 3, pointsStorage: PointsStorage = None, std=0.5, popSize=50, learningRate=0.1) -> None:
+ super().__init__(blackBoxFunc, spaceSpec, iters, jobs, pointsStorage)
+ self.std = std
+ self.popSize = popSize
+ self.learningRate = learningRate
+
+ def prepareScoring(self, spaceSpec: typing.Tuple[typing.Tuple[str, float, float], typing.Tuple[str, int, int]]) -> typing.Tuple[int, str, typing.Tuple[typing.Tuple[str, float, float], typing.Tuple[str, int, int]]]:
+ return (self.iters, "evostra " + self.__class__.optimizer, spaceSpec)
+
+ def invokeScoring(self, fn: typing.Callable, pb: ProgressReporter, spaceSpec) -> np.ndarray:
+ def evostraScore(hyperparams):
+ return -fn(hyperparams)[0]
+
+ spaceSpec = np.array(spaceSpec)
+ initialPoint = spaceSpec.mean(axis=1)
+
+ es = EvolutionStrategy(initialPoint, evostraScore, population_size=self.popSize, sigma=self.std, learning_rate=self.learningRate, decay=1.0, num_threads=self.jobs)
+ res = es.run(self.iters / self.popSize, print_step=None)
+ return res
diff --git a/UniOpt/backends/GPyOpt.py b/UniOpt/backends/GPyOpt.py
new file mode 100644
index 0000000..46fd0a2
--- /dev/null
+++ b/UniOpt/backends/GPyOpt.py
@@ -0,0 +1,82 @@
+from lazily import GPyOpt
+
+from ..core.MetaSpec import *
+from ..core.Optimizer import GenericOptimizer, PointsStorage
+from ..core.ProgressReporter import ProgressReporter
+from ..core.Spec import *
+from ..imports import *
+from ..utils import notInitializedFunction
+
+
+class GPyOptSpec(MSpec(isArray=True, scalarMode=ScalarMetaMap.degenerateCategory, integerMode=IntegerMetaMap.floatIntegers)):
+ class HyperparamsSpecsConverters:
+ def randint(k, dist, tp):
+ return __class__._categorical(k, range(dist.a, dist.b))
+
+ def uniform(k: str, dist, tp: type):
+ if tp is float:
+ return {"name": k, "domain": (dist.ppf(0), dist.ppf(1)), "type": "continuous"}
+ else:
+ return __class__._categorical(k, range( int(dist.ppf(0)), int(dist.ppf(1)) ))
+ # {'name': k, 'domain': range(int(dist.ppf(0)), int(dist.ppf(1))), "type": "discrete"}
+
+ def _categorical(k, categories):
+ return {"name": k, "domain": categories, "type": "discrete"} # in gpyopt categorical returns one-hot
+
+
+class GPyOptOptimizer(GenericOptimizer):
+ specType = GPyOptSpec
+
+ def __init__(self, blackBoxFunc: typing.Callable, spaceSpec: typing.Mapping[str, object], iters: int = 1000, jobs: int = 3, pointsStorage: PointsStorage = None, acquisitionOptimizerType="lbfgs", acquisitionType="EI", acquisitionTradeoff: float = 0.1, modelType="GP", initialDesignNumData=5, initialDesignType="random", exactFeval=False, modelUpdateInterval=1, evaluatorType="sequential", batchSize=1, dedup=False, eps=0, modular=False):
+ super().__init__(blackBoxFunc, spaceSpec, iters, jobs, pointsStorage)
+ self.modelType = modelType
+ self.acquisitionType = acquisitionType
+ self.batchSize = batchSize
+ self.eps = eps
+ self.evaluatorType = evaluatorType
+ self.acquisitionTradeoff = acquisitionTradeoff
+ self.exactFeval = exactFeval
+ self.acquisitionOptimizerType = acquisitionOptimizerType
+ self.modelUpdateInterval = modelUpdateInterval
+ self.initialDesignType = initialDesignType
+ self.initialDesignNumData = initialDesignNumData
+ self.modular = modular
+
+ def prepareScoring(self, specSeq):
+ optimizer = GPyOpt.methods.BayesianOptimization(f=None, domain=specSeq, model_type=self.modelType, acquisition_type=self.acquisitionType, evaluator_type=self.evaluatorType, maximize=False, verbosity=True, verbosity_model=True, de_duplication=True, num_cores=self.jobs, batch_size=self.batchSize, eps=self.eps, initial_design_type=self.initialDesignType, initial_design_numdata=self.initialDesignNumData, exact_feval=self.exactFeval, acquisition_optimizer_type=self.acquisitionOptimizerType, model_update_interval=self.modelUpdateInterval, X=np.empty((0, len(specSeq))), Y=np.empty((0, 1)), acquisition_weight=self.acquisitionTradeoff) # vital to make it do all the iterations
+ optimizer.X = None
+ optimizer.Y = None
+ optimizer.modular_optimization = self.modular
+ return (self.iters, "GPyOpt", optimizer)
+
+ def injectPoints(self, pointz, bestPointIndex, optimizer, initialize=False):
+ hps = []
+ losses = []
+ for p in pointz:
+ hps.append(p[0])
+ losses.append([p[1][0]])
+ hps = np.array(hps)
+ losses = np.array(losses)
+ print(hps)
+ if optimizer.X is None:
+ optimizer.X = np.empty((0, len(hps[0])))
+ optimizer.Y = np.empty((0, 1))
+
+ optimizer.X = np.concatenate((optimizer.X, hps), axis=0)
+ optimizer.Y = np.concatenate((optimizer.Y, losses), axis=0)
+ #optimizer.cost.update_cost_model(hps, times)?
+
+ def invokeScoring(self, fn, pb, optimizer):
+ def gpyScore(hyperparams):
+ res = fn(hyperparams[0])
+ return res[0]
+
+ optimizer.f = optimizer._sign(gpyScore)
+
+ optimizer.objective = GPyOpt.core.task.objective.SingleObjective(optimizer.f, optimizer.batch_size, optimizer.objective_name)
+ optimizer._init_design_chooser()
+
+ optimizer.run_optimization(max_iter=self.iters - self.initialDesignNumData)
+ self.details = optimizer
+ best = optimizer.X[np.argmin(optimizer.Y)]
+ return best
diff --git a/UniOpt/backends/RoBO.py b/UniOpt/backends/RoBO.py
new file mode 100644
index 0000000..f39073e
--- /dev/null
+++ b/UniOpt/backends/RoBO.py
@@ -0,0 +1,83 @@
+import warnings
+
+from lazily.robo.fmin import bayesian_optimization
+
+from UniOpt.core.ProgressReporter import ProgressReporter
+from UniOpt.core.Spec import HyperDef
+
+from ..core.ArraySpec import HyperparamArray
+from ..core.Optimizer import GenericOptimizer, PointsStorage
+from ..core.ProgressReporter import ProgressReporter
+from ..core.Spec import *
+from ..core.SpecOnlyBoxes import ArraySpecOnlyBoxesNoIntegers
+from ..imports import *
+
+
+class RoBOSpecArrayDict(HyperparamArray):
+ @classmethod
+ def dict2native(cls, hyperparamsNative: typing.Dict[str, typing.Any], spec) -> typing.Dict[str, typing.Any]:
+ spaceSpec = np.array(super().dict2native(hyperparamsNative, spec))
+ return {"lower": spaceSpec[:, 0], "upper": spaceSpec[:, 1]}
+
+
+class RoBOSpec(ArraySpecOnlyBoxesNoIntegers):
+ hyperparamsSpecType = RoBOSpecArrayDict
+
+ class HyperparamsSpecsConverters:
+ def uniform(k, dist, tp):
+ return (dist.ppf(0), dist.ppf(1))
+
+
+class RoBO(GenericOptimizer):
+ specType = RoBOSpec
+ modelType = None
+
+ def __init__(self, blackBoxFunc: typing.Callable, spaceSpec: typing.Dict[str, typing.Union[HyperparamDefinition, int]], iters: int = 1000, jobs: int = 3, pointsStorage: PointsStorage = None, acquisitionOptimizerType="random", acquisitionType="log_ei") -> None:
+ super().__init__(blackBoxFunc, spaceSpec, iters, jobs, pointsStorage)
+ self.acquisitionType = acquisitionType.lower()
+ self.acquisitionOptimizerType = acquisitionOptimizerType
+
+ def prepareScoring(self, specArrayDict):
+ context = type(specArrayDict)(specArrayDict)
+ context["X_init"] = np.array([])
+ context["Y_init"] = np.array([])
+ return (self.iters, "RoBO (" + self.__class__.modelType + ")", specArrayDict)
+
+ def injectPoints(self, pointz, bestPointIndex, context, initialize=False):
+ points = []
+ losses = []
+ for p in pointz:
+ points.append(p[0])
+ losses.append(p[1][0])
+ points = np.array(points)
+ losses = np.array(losses)
+
+ if initialize:
+ context["X_init"] = points
+ context["Y_init"] = losses
+ else:
+ context["X_init"] = np.concatenate(points, context["X_init"])
+ context["Y_init"] = np.concatenate(losses, context["Y_init"])
+
+ def invokeScoring(self, fn: typing.Callable, pb: ProgressReporter, context) -> np.ndarray:
+ def roboScore(hyperparamsSeq):
+ return fn(hyperparamsSeq)[0]
+
+ self.details = bayesian_optimization(roboScore, num_iterations=self.iters, maximizer=self.acquisitionOptimizerType, acquisition_func=self.acquisitionType, model_type=self.__class__.modelType, n_init=3, **context)
+ return self.details["x_opt"]
+
+
+class RoBOGPMCMC(RoBO):
+ modelType = "gp_mcmc"
+
+
+class RoBOGP(RoBO):
+ modelType = "gp"
+
+
+class RoBOForest(RoBO):
+ modelType = "rf"
+
+
+class RoBOBohamiann(RoBO):
+ modelType = "bohamiann"
diff --git a/UniOpt/backends/SMAC.py b/UniOpt/backends/SMAC.py
new file mode 100644
index 0000000..908728c
--- /dev/null
+++ b/UniOpt/backends/SMAC.py
@@ -0,0 +1,56 @@
+import lazily.smac.optimizer.acquisition as acqTypes
+from lazily.smac.facade.smac_facade import SMAC as SMACFacade
+from lazily.smac.scenario.scenario import Scenario
+from lazily.smac.tae.execute_func import ExecuteTAFuncDict
+
+from ..core.Optimizer import GenericOptimizer, PointsStorage
+from ..core.ProgressReporter import ProgressReporter
+from ..imports import *
+from .ConfigSpaceSpec import ConfigSpaceSpec
+
+
+class SMAC(GenericOptimizer):
+ specType = ConfigSpaceSpec
+
+ def __init__(self, blackBoxFunc: typing.Callable, spaceSpec: typing.Mapping[str, object], iters: int = 1000, jobs: int = 3, pointsStorage: PointsStorage = None, acquisitionType="EI", acquisitionOptimizerType="InterleavedLocalAndRandomSearch", intensifier=None, SMBOClass: "smac.optimizer.smbo.SMBO" = None, acquisitionTradeoff: float = 0.1):
+ super().__init__(blackBoxFunc, spaceSpec, iters, jobs, pointsStorage)
+ if isinstance(acquisitionType, str):
+ acquisitionType = getattr(acqTypes, acquisitionType)
+ self.acquisitionType = acquisitionType
+
+ if isinstance(acquisitionOptimizerType, str):
+ import importlib
+
+ optModName = "smac.optimizer." + acquisitionType.__name__.lower() + "_optimization"
+ optMod = importlib.import_module(optModName)
+ acquisitionOptimizerType = getattr(optMod, acquisitionOptimizerType)
+ self.acquisitionOptimizerType = acquisitionOptimizerType
+ self.acquisitionTradeoff = acquisitionTradeoff
+
+ self.intensifier = intensifier
+ self.SMBOClass = SMBOClass
+
+ def prepareScoring(self, spaceSpec):
+ scenario = Scenario({"run_obj": "quality", "cs": spaceSpec, "deterministic": "true", "initial_incumbent": "DEFAULT", "runcount_limit": self.iters})
+ modelType = self.createModel()
+ acquisition = self.acquisitionType(self.acquisitionTradeoff, model)
+ smac = SMACFacade(scenario=scenario, tae_runner=None, acquisition_function=acquisition, acquisition_function_optimizer=self.acquisitionOptimizerType, model=modelType, smbo_class=self.SMBOClass)
+ return (self.iters, "SMAC3", smac)
+
+ def createModel(self):
+ raise NotImplementedException()
+
+ def invokeScoring(self, fn, pb, smac):
+ def smacScore(hyperparamsDict):
+ return fn(hyperparamsDict)
+
+ smac.tae_runner = ExecuteTAFuncDict(ta=smacScore)
+ best = smac.optimize()
+ return best
+
+
+class SMACRandomForest(SMAC):
+ def createModel(self):
+ from smac.epm.rf_with_instances import RandomForestWithInstances
+
+ return RandomForestWithInstances()
diff --git a/UniOpt/backends/SOpt.py b/UniOpt/backends/SOpt.py
new file mode 100644
index 0000000..d7e5de8
--- /dev/null
+++ b/UniOpt/backends/SOpt.py
@@ -0,0 +1,65 @@
+import warnings
+
+from lazily.sopt.GA.GA import GA
+from lazily.sopt.SGA.SGA import SGA
+
+from UniOpt.core.ProgressReporter import ProgressReporter
+from UniOpt.core.Spec import HyperDef
+
+from ..core.ArraySpec import HyperparamArray
+from ..core.Optimizer import GenericOptimizer, PointsStorage
+from ..core.ProgressReporter import ProgressReporter
+from ..core.Spec import *
+from ..core.SpecOnlyBoxes import ArraySpecOnlyBoxesNoIntegers
+from ..imports import *
+
+
+class SOptSpecArrayDict(HyperparamArray):
+ @classmethod
+ def dict2native(cls, hyperparamsNative: typing.Dict[str, typing.Any], spec) -> typing.Dict[str, typing.Any]:
+ spaceSpec = np.array(super().dict2native(hyperparamsNative, spec))
+ return {"variables_num": spaceSpec.shape[0], "lower_bound": spaceSpec[:, 0], "upper_bound": spaceSpec[:, 1]}
+
+
+class SOptSpec(ArraySpecOnlyBoxesNoIntegers):
+ hyperparamsSpecType = SOptSpecArrayDict
+
+ class HyperparamsSpecsConverters:
+ def uniform(k, dist, tp):
+ return (dist.ppf(0), dist.ppf(1))
+
+
+class SOpt(GenericOptimizer):
+ specType = SOptSpec
+ algoClass = None
+
+ def __init__(self, blackBoxFunc: typing.Callable, spaceSpec: typing.Dict[str, typing.Union[HyperparamDefinition, int]], iters: int = 1000, jobs: int = 3, pointsStorage: PointsStorage = None, popSize: int = 30, mutation: float = 0.8, crossover: float = 0.7) -> None:
+ super().__init__(blackBoxFunc, spaceSpec, iters, jobs, pointsStorage)
+ self.popSize = popSize
+ self.crossover = crossover
+ self.mutation = mutation
+
+ def prepareScoring(self, specSeq: typing.Iterable[scipy.stats._distn_infrastructure.rv_frozen]):
+ optimizer = self.__class__.algoClass(func=None, func_type="min", generations=self.iters, cross_rate=self.crossover, mutation_rate=self.mutation, population_size=self.popSize, **specSeq)
+ optimizer.generations //= optimizer.population_size
+ if optimizer.generations < 2:
+ optimizer.generations = 2
+
+ return ((optimizer.generations + 1) * optimizer.population_size, "SOpt", optimizer)
+
+ def invokeScoring(self, fn: typing.Callable, pb: ProgressReporter, optimizer) -> np.ndarray:
+ def soptScore(hyperparamsSeq):
+ return fn(hyperparamsSeq)[0]
+
+ optimizer.func = soptScore
+
+ res = optimizer.run()
+ return optimizer.generations_best_points[optimizer.global_best_index]
+
+
+class SOptSGA(SOpt):
+ algoClass = SGA
+
+
+class SOptGA(SOpt):
+ algoClass = GA
diff --git a/UniOpt/backends/bayesian.py b/UniOpt/backends/bayesian.py
new file mode 100644
index 0000000..1497431
--- /dev/null
+++ b/UniOpt/backends/bayesian.py
@@ -0,0 +1,40 @@
+from lazily import bayes_opt
+
+from ..core.Optimizer import GenericOptimizer, PointsStorage
+from ..core.ProgressReporter import ProgressReporter
+from ..core.SpecOnlyBoxes import *
+from ..imports import *
+
+
+class Bayesian(GenericOptimizer):
+ specType = SpecOnlyBoxesNoIntegers
+
+ def __init__(self, blackBoxFunc, spaceSpec: typing.Mapping[str, object], iters: int = 1000, jobs: int = 3, pointsStorage: PointsStorage = None, initPoints=None):
+ super().__init__(blackBoxFunc, spaceSpec, iters, jobs, pointsStorage)
+ if initPoints is None:
+ initPoints = iters // 10
+ if not initPoints:
+ initPoints = 1
+ self.initPoints = initPoints
+
+ def injectPoints(self, pointz, bestPointIndex, optimizer, initialize=False):
+ for p in pointz:
+ optimizer.register(params=p[0], target=-p[1][0])
+
+ def prepareScoring(self, spaceSpec):
+ optimizer = bayes_opt.BayesianOptimization(None, spaceSpec, verbose=0)
+ return (self.iters, "bayesian", optimizer)
+
+ def invokeScoring(self, fn, pb, optimizer):
+ def boScore(**hyperparams):
+ res = fn(hyperparams)
+ return -res[0]
+
+ optimizer._space.target_func = boScore
+ optimizer.maximize(init_points=self.initPoints, n_iter=self.iters - self.initPoints)
+
+ max = optimizer.max
+ bestScore = -max["target"]
+ best = max["params"]
+ self.details = optimizer._space
+ return best
diff --git a/UniOpt/backends/ecabc.py b/UniOpt/backends/ecabc.py
new file mode 100644
index 0000000..de7e9d6
--- /dev/null
+++ b/UniOpt/backends/ecabc.py
@@ -0,0 +1,83 @@
+import typing
+import warnings
+from math import sqrt
+
+from lazily.ecabc import abc
+
+from ..core.Optimizer import GenericOptimizer, PointsStorage
+from ..core.ProgressReporter import ProgressReporter
+from ..core.Spec import HyperparamDefinition
+from ..core.SpecOnlyBoxes import SpecOnlyBoxes
+from ..imports import *
+from ..utils import notInitializedFunction
+from ..utils.coresCount import getCoreCount
+
+
+class BeeColonyGridSpec(SpecOnlyBoxes):
+ class HyperparamsSpecsConverters:
+ def uniform(k: str, dist, tp: type):
+ res = (float(dist.ppf(0)), float(dist.ppf(1)))
+
+ if tp is int:
+ res = tuple(int(round(e)) for e in res)
+
+ return res
+
+
+maxDefaultAmountOfEmployers = 50
+
+from icecream import ic
+
+
+class BeeColony(GenericOptimizer):
+ specType = BeeColonyGridSpec
+
+ def __init__(self, blackBoxFunc: typing.Callable, spaceSpec: typing.Mapping[str, object], iters: int = 1000, jobs: int = 1, pointsStorage: PointsStorage = None, numEmployers=None) -> None:
+ if jobs is None:
+ self.jobs = getCoreCount()
+
+ if jobs != 1:
+ warnings.warn("Multiprocessing is not supported for this solver: it uses `pickle` and you will get AttributeError: Can't pickle local object 'BeeColony.invokeScoring..abcScore' . Setting count of jobs to 1")
+ jobs = 1
+
+ super().__init__(blackBoxFunc, spaceSpec, iters, jobs, pointsStorage)
+
+ if numEmployers is None:
+ numEmployers = min(round(sqrt(iters)), maxDefaultAmountOfEmployers)
+
+ self.numEmployers = numEmployers
+
+ self.generations = iters // numEmployers
+
+ def prepareScoring(self, spaceSpec) -> typing.Tuple[int, str, typing.Iterable[typing.List[typing.Union[str, typing.Tuple[float, float], typing.Tuple[int, int]]]]]:
+ ic(spaceSpec)
+ colony = abc.ABC(self.numEmployers, objective_fn=notInitializedFunction, num_processes=self.jobs)
+ for name, limits in spaceSpec.items():
+ colony.add_param(min_val=limits[0], max_val=limits[1], name=name)
+
+ return (self.iters, "BeeColony", colony)
+
+ def injectPoints(self, pointz, bestPointIndex, colony, initialize=False):
+ if initialize:
+ for employersInitialized, p in enumerate(pointz):
+ if employersInitialized >= colony._num_employers:
+ break
+ employer = abc.Bee(p[0])
+ employer.score = p[1][0]
+ colony._employers.append(employer)
+ colony._num_employers = colony._num_employers - employersInitialized # rest of points, will be inserted by create_employers. create_employers creates colony._num_employers employers
+ else:
+ raise NotImplementedException("Mixing values into partially initialized _employers is not yet implemented")
+
+ def invokeScoring(self, fn: typing.Callable, pb: ProgressReporter, colony) -> typing.List[typing.Union[float, int]]:
+ def abcScore(hyperparamsDict):
+ return fn(hyperparamsDict)[0]
+
+ colony._fitness_fxn = abcScore
+ #colony._minimize = True
+ #colony._args # TODO: keyword arguments
+ colony.create_employers() # inserting missing employers. create_employers creates colony._num_employers employers
+ colony._num_employers = self.numEmployers
+ for i in range(self.generations):
+ colony.run_iteration()
+ return colony.best_performer[1]
diff --git a/UniOpt/backends/hyperband.py b/UniOpt/backends/hyperband.py
new file mode 100644
index 0000000..1776337
--- /dev/null
+++ b/UniOpt/backends/hyperband.py
@@ -0,0 +1,45 @@
+import math
+import typing
+from functools import partial
+
+import sklearn
+from lazily import hyperband
+from numpy import float64
+
+from ..core.Optimizer import GenericOptimizer, PointsStorage
+from ..core.ProgressReporter import ProgressReporter
+from ..core.Spec import *
+from ..imports import *
+from .hyperopt import HyperOptSpec, hyperopt, hyperoptScore
+from .sklearn import SKLearnRandomizedSpec
+
+msel = lazyImport("sklearn.model_selection")
+
+
+class HyperBand(GenericOptimizer):
+ specType = SKLearnRandomizedSpec
+
+ def __init__(self, blackBoxFunc: typing.Callable, spaceSpec: typing.Mapping[str, object], iters: int = 1000, jobs: int = 3, pointsStorage: PointsStorage = None) -> None:
+ super().__init__(blackBoxFunc, spaceSpec, iters, jobs, pointsStorage)
+
+ def prepareScoring(self, spaceSpec: typing.Dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_frozen, typing.Tuple[int]]]) -> typing.Tuple[int, str, typing.Dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_frozen, typing.Tuple[int]]]]:
+ return (self.iters, "HyperBand", spaceSpec)
+
+ def utilizeNIterations(self, nIterations: IntT, hyperparams: typing.Dict[str, NumericT]) -> None:
+ """Redefine it to make hyperband use nIterations"""
+ pass
+
+ def invokeScoring(self, fn: typing.Callable, pb: ProgressReporter, spaceSpec: typing.Dict[str, typing.Union[scipy.stats._distn_infrastructure.rv_frozen, typing.Tuple[int]]]) -> typing.Dict[str, NumericT]:
+ s = msel.ParameterSampler(spaceSpec, n_iter=1)
+
+ def sampleParams(s):
+ return next(iter(s))
+
+ def hyperbandScore(n_iterations, hyperparams):
+ # do something with n_iterations, maybe running an other opt method for n_iterations?
+ self.utilizeNIterations(n_iterations, hyperparams)
+ return hyperoptScore(fn, hyperparams)
+
+ hb = hyperband.Hyperband(partial(sampleParams, s), hyperbandScore)
+ self.trials = hb.run()
+ return self.trials[-1]["params"]
diff --git a/UniOpt/backends/hyperengine.py b/UniOpt/backends/hyperengine.py
new file mode 100644
index 0000000..bc5714b
--- /dev/null
+++ b/UniOpt/backends/hyperengine.py
@@ -0,0 +1,102 @@
+import typing
+import warnings
+from functools import partial
+
+import numpy as np
+from lazily import hyperengine
+from lazy_object_proxy import Proxy
+from numpy import float64
+
+from ..core.MetaSpec import *
+from ..core.Optimizer import GenericOptimizer, PointsStorage
+from ..core.ProgressReporter import ProgressReporter
+from ..core.Spec import *
+from ..imports import *
+from ..utils import dummyFunction
+
+
+class BlackBoxSolver:
+ def __init__(self, func):
+ self.func = func
+ self._val_loss_curve = []
+
+ def train(self):
+ loss = self.func()
+ self._val_loss_curve.append(loss)
+ return self._reducer(self._val_loss_curve)
+
+ def _reducer(self, *args, **kwargs):
+ return min(*args, **kwargs)
+
+ def terminate(self):
+ pass
+
+
+class HyperEngineSpec(MSpec(scalarMode=ScalarMetaMap.degenerateCategory, integerMode=IntegerMetaMap.noIntegers)):
+ class HyperparamsSpecsConverters:
+ def randint(k, dist, tp):
+ return __class__._categorical(k, range(int(dist.a), int(dist.b)))
+
+ def uniform(k, dist, tp):
+ return hyperengine.spec.uniform(dist.ppf(0), dist.ppf(1))
+
+ # normal and etc are implemented through a ppf, no sense to make intrinsic support
+ def _categorical(k, categories):
+ return hyperengine.spec.choice(categories)
+
+
+class HyperEngine(GenericOptimizer):
+ specType = HyperEngineSpec
+ hyperTunerStrategy = None
+
+ def __init__(self, blackBoxFunc: typing.Callable, spaceSpec: typing.Mapping[str, object], iters: int = 1000, jobs: int = 3, pointsStorage: PointsStorage = None, **strategy_params) -> None:
+ super().__init__(blackBoxFunc, spaceSpec, iters, jobs, pointsStorage)
+ self.strategy_params = strategy_params
+ if hyperengine.base.logging.log is not dummyFunction:
+ hyperengine.base.logging.log = dummyFunction
+
+ def injectPoints(self, pointz, bestPointIndex, tuner, initialize=False):
+ for numInjectedPoints, p in enumerate(pointz):
+ x = p[0]
+ x1 = np.zeros(len(tuner._parsed._spec))
+ for i, k in enumerate(tuner._parsed._spec):
+ x1[i] = x[k]
+ tuner._strategy.add_point(x1, p[1][0])
+ tuner._max_points += numInjectedPoints
+
+ def prepareScoring(self, specDict: typing.Dict[str, typing.Union["hyperengine.spec.nodes.UniformNode", int]]) -> typing.Tuple[int, str, "hyperengine.HyperTuner"]:
+ tuner = hyperengine.HyperTuner(specDict, solver_generator=None, max_points=self.iters, strategy=self.__class__.hyperTunerStrategy, **self.strategy_params)
+ return (self.iters, "HyperTuner " + self.__class__.hyperTunerStrategy, tuner)
+
+ def invokeScoring(self, fn: typing.Callable, pb: ProgressReporter, tuner) -> typing.Dict[str, NumericT]:
+ def htScore(hyperparamsDict):
+ return fn(hyperparamsDict)[0]
+
+ def solverGenerator(hyperparams):
+ return BlackBoxSolver(partial(htScore, hyperparams))
+
+ tuner._solver_generator = solverGenerator
+ tuner.tune()
+ return tuner._parsed.instantiate(tuner._strategy.points[np.argmin(tuner._strategy.values)])
+
+
+class HyperEngineBayesian(HyperEngine):
+ hyperTunerStrategy = "bayesian"
+
+
+def createDefaultPortfolioMethods():
+ from hyperengine.bayesian.strategy import utilities
+
+ return tuple(utilities.keys())
+
+
+defaultPortfolioMethods = Proxy(createDefaultPortfolioMethods)
+
+
+class HyperEnginePortfolio(HyperEngine):
+ hyperTunerStrategy = "portfolio"
+
+ def __init__(self, blackBoxFunc: typing.Callable, spaceSpec: typing.Mapping[str, object], iters: int = 1000, jobs: int = 3, **strategy_params):
+ if "methods" not in strategy_params:
+ strategy_params["methods"] = defaultPortfolioMethods
+ super().__init__(blackBoxFunc, spaceSpec, iters, jobs, **strategy_params)
diff --git a/UniOpt/backends/hyperopt.py b/UniOpt/backends/hyperopt.py
new file mode 100644
index 0000000..ac2479b
--- /dev/null
+++ b/UniOpt/backends/hyperopt.py
@@ -0,0 +1,155 @@
+import typing
+from functools import partial
+
+from lazily import hyperopt
+from lazy_object_proxy import Proxy
+
+from ..core.HyperparamVector import HyperparamVector
+from ..core.MetaSpec import *
+from ..core.Optimizer import GenericOptimizer, PointsStorage
+from ..core.ProgressReporter import ProgressReporter
+from ..core.Spec import *
+from ..core.Spec import HyperparamDefinition
+from ..imports import *
+
+
+def hyperoptTrialData(mean, variance):
+ return {"loss": mean, "loss_variance": variance, "true_loss": mean, "true_loss_variance": variance, "status": hyperopt.STATUS_OK}
+
+
+def hyperoptScore(fn: typing.Callable, hyperparams: typing.Dict[str, typing.Union[float, int]]) -> typing.Dict[str, typing.Union[NumericT, str]]:
+ res = fn(hyperparams)
+ return hyperoptTrialData(res[0], res[1])
+
+
+class distsArgsRemap:
+ def randint(dist):
+ return [dist.a, dist.b]
+
+ def uniform(dist):
+ return [dist.ppf(0), dist.ppf(1)]
+
+ def norm(dist):
+ return [dist.mean(), dist.std()]
+
+
+class distsNamesRemap:
+ norm = "normal"
+
+
+def getNativeHyperoptDist(tp: typing.Union[typing.Type[float], typing.Type[int]], distName: str) -> typing.Callable:
+
+ if hasattr(distsNamesRemap, distName):
+ distName = getattr(distsNamesRemap, distName)
+
+ tDistName = distName
+ postProcessFunc = None
+
+ if issubclass(tp, int):
+ tDistName = "q" + tDistName
+ postProcessFunc = round
+
+ nativeHpDist = None
+ hp = hyperopt.hp
+
+ if hasattr(hp, tDistName):
+ nativeHpDist = getattr(hp, tDistName)
+ if issubclass(tp, int):
+ postProcessFunc = int
+
+ elif hasattr(hp, distName):
+ nativeHpDist = getattr(hp, distName)
+
+ return nativeHpDist, postProcessFunc
+
+
+class HyperOptSpec(MSpec(scalarMode=ScalarMetaMap.degenerateCategory)):
+ HyperparamsSpecsConverters = None
+
+ def __init__(self, *args, **kwargs):
+ self.choices = {}
+ super().__init__(*args, **kwargs)
+
+ def transformHyperDefItemUniversal(self, k: str, v: typing.Union[typing.Tuple[int], HyperparamDefinition]) -> "hyperopt.pyll.base.Apply":
+ if isinstance(v, categoricalTypes):
+ self.choices[k] = v
+ return hyperopt.hp.choice(k, v)
+ else:
+ dist = v.distribution
+ distName = dist.dist.name
+
+ nativeHpDist, postProcessFunc = getNativeHyperoptDist(v.type, distName)
+ if nativeHpDist:
+ if hasattr(distsArgsRemap, distName):
+ args = getattr(distsArgsRemap, distName)(dist)
+ else:
+ args = [dist.dist.a, dist.dist.b]
+ postProcessFunc = round
+
+ if issubclass(v.type, int):
+ args.append(1) # q argument for integer attrs, without it it raises somewhere in hyperopt
+
+ if postProcessFunc:
+ self.postProcessors[k].append((postProcessFunc, float)) # hyperopt returns floats instead of ints
+
+ return nativeHpDist(k, *args)
+ else:
+ return self.transformHyperDefItemUniversal(k, self.distributionToUniform(k, v))
+
+
+class HyperOptVectorType_(HyperparamVector):
+ """Do not set to `vectorType`. It is used only when getting categorical results from the fitter and passing them back"""
+
+ @classmethod
+ def dict2native(cls, dic: typing.Dict[str, typing.Any], spec) -> typing.Dict[str, typing.Any]:
+ for choiceAttrName, choices in spec.choices.items():
+ # no need to check if choiceAttrName in dic
+ dic[choiceAttrName] = spec.choices[choiceAttrName].index(dic[choiceAttrName])
+ return super().dict2native(dic, spec)
+
+ @classmethod
+ def native2dict(cls, native: typing.Dict[str, typing.Any], spec) -> typing.Dict[str, typing.Any]:
+ for choiceAttrName, choices in spec.choices.items():
+ # no need to check if choiceAttrName in native
+ native[choiceAttrName] = choices[native[choiceAttrName]]
+ return super().native2dict(native, spec)
+
+
+class Hyperopt(GenericOptimizer):
+ specType = HyperOptSpec
+ hyperoptAlgo = None
+
+ def __init__(self, blackBoxFunc: typing.Callable, spaceSpec: typing.Mapping[str, object], iters: int = 1000, jobs: int = 3, pointsStorage: PointsStorage = None) -> None:
+ super().__init__(blackBoxFunc, spaceSpec, iters, jobs, pointsStorage)
+
+ def prepareScoring(self, spaceSpec: typing.Dict[str, "hyperopt.pyll.base.Apply"]) -> typing.Tuple[int, str, typing.Dict[str, "hyperopt.pyll.base.Apply"]]:
+ return (self.iters, "hyperopt (" + self.__class__.hyperoptAlgo + ")", {"spaceSpec": spaceSpec, "trials": None})
+
+ def injectPoints(self, pointz, bestPointIndex, context, initialize=False):
+ trialsData = []
+ if context["trials"] is None:
+ context["trials"] = hyperopt.Trials()
+ pointz = list(pointz)
+ from hyperopt.fmin import generate_trial
+
+ for tid, p in zip(context["trials"].new_trial_ids(len(pointz)), pointz):
+ t = generate_trial(tid, HyperOptVectorType_.dict2native(p[0], self.spaceSpec))
+ t.update({"state": hyperopt.JOB_STATE_DONE, "result": hyperoptTrialData(*p[1])})
+ trialsData.append(t)
+
+ context["trials"].insert_trial_docs(trialsData)
+ context["trials"].refresh()
+
+ def invokeScoring(self, fn: typing.Callable, pb: ProgressReporter, context: dict) -> typing.Dict[str, typing.Union[float, int]]:
+ hyperoptScore1 = partial(hyperoptScore, fn)
+ best = hyperopt.fmin(hyperoptScore1, context["spaceSpec"], algo=getattr(hyperopt, self.__class__.hyperoptAlgo).suggest, trials=context["trials"], max_evals=self.iters + (len(context["trials"].trials) if context["trials"] else 0))
+ self.details = context["trials"]
+ return HyperOptVectorType_.native2dict(best, self.spaceSpec)
+
+
+class TPE(Hyperopt):
+ hyperoptAlgo = "tpe"
+
+
+class Random(Hyperopt):
+ hyperoptAlgo = "rand"
diff --git a/UniOpt/backends/optunity.py b/UniOpt/backends/optunity.py
new file mode 100644
index 0000000..5b03894
--- /dev/null
+++ b/UniOpt/backends/optunity.py
@@ -0,0 +1,59 @@
+from lazily import optunity
+
+from ..core.Optimizer import GenericOptimizer, PointsStorage
+from ..core.ProgressReporter import ProgressReporter
+from ..core.Spec import *
+from ..core.SpecOnlyBoxes import *
+from ..imports import *
+
+
+class OptunityOptimizer(GenericOptimizer):
+ specType = SpecOnlyBoxesNoIntegers
+
+ solver = None
+
+ def prepareScoring(self, spaceSpec):
+ optunityParams = optunity.suggest_solver(self.iters, self.__class__.solver, **spaceSpec) # it's not only suggests, it initializes the params to pass to solver
+ return (self.iters, "optunity " + optunityParams["solver_name"], {"optunityParams": optunityParams, "spaceSpec": spaceSpec})
+
+ def invokeScoring(self, fn, pb, ctx):
+ def optunityScore(**hyperparams):
+ res = fn(hyperparams)
+ # return (-res[0], -res[1])
+ return -res[0]
+
+ optunityScore_ = optunity.wrap_constraints(optunityScore, sys.float_info.max, range_oc=ctx["spaceSpec"]) # FUCK, it just skips these iterations
+ # optunityScore_=optunity.wrap_constraints(optunityScore, range_oc=ctx["spaceSpec"])
+
+ solver = optunity.make_solver(**ctx["optunityParams"])
+
+ (best, self.details) = optunity.optimize(solver=solver, func=optunityScore_, maximize=False, max_evals=self.iters)
+ # details.optimum
+ # details.call_log - dict (keys are arguments names) of lists each one value corresponds to an arg of each call
+ # details.report
+ # details.time
+ return best
+
+
+class NelderMead(OptunityOptimizer):
+ solver = "nelder-mead"
+
+
+class Sobol(OptunityOptimizer):
+ solver = "sobol"
+
+
+class ParticleSwarm(OptunityOptimizer):
+ solver = "particle swarm"
+
+
+class CMA_ES(OptunityOptimizer):
+ solver = "cma-es"
+
+
+class RandomSearch(OptunityOptimizer):
+ solver = "random search"
+
+
+class GrigSearch(OptunityOptimizer):
+ solver = "grid search"
diff --git a/UniOpt/backends/pySOT.py b/UniOpt/backends/pySOT.py
new file mode 100644
index 0000000..77f5b3d
--- /dev/null
+++ b/UniOpt/backends/pySOT.py
@@ -0,0 +1,108 @@
+import warnings
+
+from lazily.poap.controller import BasicWorkerThread, EvalRecord, ThreadController
+from lazily.pySOT.experimental_design import SymmetricLatinHypercube
+from lazily.pySOT.optimization_problems import OptimizationProblem
+from lazily.pySOT.strategy import SRBFStrategy
+from lazily.pySOT.surrogate import CubicKernel, LinearTail, RBFInterpolant, SurrogateUnitBox
+
+from UniOpt.core.ProgressReporter import ProgressReporter
+from UniOpt.core.Spec import HyperDef
+
+from ..core.ArraySpec import HyperparamArray
+from ..core.Optimizer import GenericOptimizer, PointsStorage
+from ..core.ProgressReporter import ProgressReporter
+from ..core.Spec import *
+from ..core.SpecOnlyBoxes import ArraySpecOnlyBoxes
+from ..imports import *
+
+
+class PySOTSpecObj(HyperparamArray):
+ @classmethod
+ def dict2native(cls, hyperparamsNative: typing.Dict[str, typing.Any], spec) -> typing.Dict[str, typing.Any]:
+ lbs = []
+ ubs = []
+ typeSelector = {
+ int: [],
+ float: [],
+ }
+
+ for i, (tp, lb, ub) in enumerate(super().dict2native(hyperparamsNative, spec)):
+ typeSelector[tp].append(i)
+ lbs.append(lb)
+ ubs.append(ub)
+
+ res = OptimizationProblem()
+ res.dim = len(lbs)
+ res.lb = np.array(lbs)
+ res.ub = np.array(ubs)
+ res.int_var = np.array(typeSelector[int])
+ res.cont_var = np.array(typeSelector[float])
+ return res
+
+
+class PySOTSpec(ArraySpecOnlyBoxes):
+ hyperparamsSpecType = PySOTSpecObj
+
+ class HyperparamsSpecsConverters:
+ def uniform(k, dist, tp):
+ return (tp, dist.ppf(0), dist.ppf(1))
+
+
+class PySOT(GenericOptimizer):
+ specType = PySOTSpec
+
+ def __init__(self, blackBoxFunc: typing.Callable, spaceSpec: typing.Dict[str, typing.Union[HyperparamDefinition, int]], iters: int = 1000, jobs: int = 3, pointsStorage: PointsStorage = None, surrogateClass=None, interpolantClass=None, kernelClass=None, tailClass=None, designClass=None, strategyClass=None) -> None:
+ super().__init__(blackBoxFunc, spaceSpec, iters, jobs, pointsStorage)
+ if surrogateClass is None:
+ surrogateClass = SurrogateUnitBox
+
+ if interpolantClass is None:
+ interpolantClass = RBFInterpolant
+ if kernelClass is None:
+ kernelClass = CubicKernel
+ if tailClass is None:
+ tailClass = LinearTail
+ if designClass is None:
+ designClass = SymmetricLatinHypercube
+ if strategyClass is None:
+ strategyClass = SRBFStrategy
+
+ self.surrogateClass = surrogateClass
+ self.interpolantClass = interpolantClass
+ self.kernelClass = kernelClass
+ self.tailClass = tailClass
+ self.designClass = designClass
+ self.strategyClass = strategyClass
+
+ def injectPoints(self, pointz, bestPointIndex, controller, initialize=False):
+ for p in pointz:
+ rec = EvalRecord(params=p[0], status="completed")
+ rec.value = p[1][0]
+ rec.feasible = True
+ controller.fevals.append(rec)
+
+ def prepareScoring(self, problem):
+ interpolant = self.interpolantClass(dim=problem.dim, kernel=self.kernelClass(), tail=self.tailClass(problem.dim))
+ surrogate = self.surrogateClass(interpolant, lb=problem.lb, ub=problem.ub)
+ design = self.designClass(dim=problem.dim, num_pts=self.iters)
+ controller = ThreadController()
+ controller.strategy = self.strategyClass(max_evals=self.iters, asynchronous=True, exp_design=design, surrogate=surrogate, batch_size=self.jobs, opt_prob=problem)
+ return (self.iters, "pySOT", controller)
+
+ def invokeScoring(self, fn: typing.Callable, pb: ProgressReporter, controller) -> np.ndarray:
+ def pySOTScore(hyperparamsSeq):
+ return fn(hyperparamsSeq)[0]
+
+ controller.strategy.opt_prob.eval = pySOTScore
+
+ for _ in range(self.jobs):
+ worker = BasicWorkerThread(controller, controller.strategy.opt_prob.eval)
+ controller.launch_worker(worker)
+
+ self.details = controller.run()
+ res = np.array(self.details.params)
+ if len(res.shape) == 2: # WTF? sometimes it is an array of an array of params in its zeroth element, sometimes it is an array of params itself, depending on if other optimizers have been run before???
+ return self.details.params[0]
+ else:
+ return self.details.params
diff --git a/UniOpt/backends/pyshac.py b/UniOpt/backends/pyshac.py
new file mode 100644
index 0000000..a167e4e
--- /dev/null
+++ b/UniOpt/backends/pyshac.py
@@ -0,0 +1,110 @@
+import typing
+
+from lazily import pyshac
+from lazy_object_proxy import Proxy
+
+from ..core.MetaSpec import *
+from ..core.Optimizer import GenericOptimizer, PointsStorage
+from ..core.ProgressReporter import ProgressReporter
+from ..core.SpecOnlyBoxes import HyperparamVector
+from ..imports import *
+
+
+def loadPyShacGridSpec():
+ from pyshac import DiscreteHyperParameter, NormalContinuousHyperParameter, UniformContinuousHyperParameter
+ from pyshac.config.hyperparameters import AbstractContinuousHyperParameter
+
+ class SciPyContinuousHyperParameter(AbstractContinuousHyperParameter):
+ def __init__(self, k, dist):
+ self.dist = dist
+ super().__init__(k, dist.a, dist.b, False)
+
+ def sample(self):
+ return dist.rvs(1)[0]
+
+ def get_config(self):
+ config = super().get_config()
+ config.update({"dist": self.dist})
+ return config
+
+ class PySHACGridSpec(MSpec(isArray=True, scalarMode=ScalarMetaMap.degenerateCategory, integerMode=IntegerMetaMap.noIntegers)):
+ hyperparamsVectorType = HyperparamVector
+
+ class HyperparamsSpecsConverters:
+ def randint(k, dist, tp):
+ return __class__._categorical(k, range(dist.a, dist.b))
+
+ def uniform(k, dist, tp):
+ return UniformContinuousHyperParameter(k, dist.ppf(0), dist.ppf(1))
+
+ def norm(k, dist, tp):
+ return NormalContinuousHyperParameter(k, dist.mean(), dist.std())
+
+ def _categorical(k, categories):
+ return DiscreteHyperParameter(k, categories)
+
+ def transformHyperDefItemUniversal(self, k, v):
+ return SciPyContinuousHyperParameter(k, v.distribution)
+
+ def transformResult(self, hyperparamsNative):
+ return super().transformResult(self.__class__.hyperparamsSpecType.native2dict(hyperparamsNative, self))
+
+ return PySHACGridSpec
+
+
+PySHACGridSpec = Proxy(loadPyShacGridSpec)
+
+
+class PySHAC(GenericOptimizer):
+ specType = PySHACGridSpec
+
+ def __init__(self, blackBoxFunc: typing.Callable, spaceSpec: typing.Mapping[str, object], iters: int = 1000, jobs: int = 3, pointsStorage: PointsStorage = None, batches=None, skipCV: bool = False, earlyStop: bool = False, relaxChecks: bool = False, engine=None, **otherShacHyperparams) -> None:
+ ratio = 10
+ if batches is None:
+ batches = round(iters / ratio)
+ warnings.warn("Count of batches is not set, assumming iters/" + str(ratio))
+ if not batches:
+ batches = 1
+ warnings.warn("Count of batches 0, setting to " + str(batches))
+
+ ratio = round(iters / batches)
+ iters1 = batches * ratio
+ if iters1 != iters:
+ warnings.warn("Count of iters is not divided by count of batches. Rounding to" + str(iters1))
+ iters = iters1
+
+ super().__init__(blackBoxFunc, spaceSpec, iters, jobs, pointsStorage)
+
+ if engine is None:
+ engine = pyshac.SHAC
+
+ self.engine = engine
+ self.batches = batches
+ self.skipCV = skipCV
+ self.earlyStop = earlyStop
+ self.relaxChecks = relaxChecks
+ self.otherShacHyperparams = otherShacHyperparams
+
+ def prepareScoring(self, spaceSpec: typing.Iterable["pyshac.config.hyperparameters.AbstractHyperParameter"]) -> typing.Tuple[int, str, "pyshac.SHAC"]:
+ shacHyperparams = {"total_budget": self.iters, "num_batches": self.batches, "objective": "min"}
+ shacHyperparams.update(self.otherShacHyperparams)
+
+ shac = self.engine(spaceSpec, **shacHyperparams)
+ shac.num_parallel_generators = self.jobs
+ shac.num_parallel_evaluators = self.jobs
+ # shac.generator_backend = 'multiprocessing'
+ # shac.generator_backend = 'threading'
+
+ warnings.warn("Problem passing data between processes, https://github.com/titu1994/pyshac/issues/1 , setting evaluator_backend = 'threading'")
+ # shac.num_parallel_evaluators = 1
+ shac.evaluator_backend = "threading"
+ # shac.evaluator_backend = 'multiprocessing'
+
+ return (self.iters, "pyshac", shac)
+
+ def invokeScoring(self, fn: typing.Callable, pb: ProgressReporter, shac: "pyshac.SHAC") -> typing.List[float]:
+ def pyshacScore(id, hyperparamsDict):
+ return fn(hyperparamsDict)[0]
+
+ shac.fit(pyshacScore, skip_cv_checks=self.skipCV, early_stop=self.earlyStop, relax_checks=self.relaxChecks)
+ return shac.dataset.X[np.argmin(shac.dataset.Y)]
diff --git a/UniOpt/backends/rbfopt.py b/UniOpt/backends/rbfopt.py
new file mode 100644
index 0000000..f28c992
--- /dev/null
+++ b/UniOpt/backends/rbfopt.py
@@ -0,0 +1,96 @@
+import typing
+from os.path import sep as pathSep
+
+import numpy as np
+from lazily import rbfopt
+from lazy_object_proxy import Proxy
+
+from ..core.ArraySpec import HyperparamArray
+from ..core.MetaSpec import *
+from ..core.Optimizer import GenericOptimizer, PointsStorage
+from ..core.ProgressReporter import ProgressReporter
+from ..imports import *
+from ..utils import resolveAvailablePath
+
+
+class RBFOptSpecArrayDict(HyperparamArray):
+ @classmethod
+ def dict2native(cls, hyperparamsNative: typing.Dict[str, typing.Any], spec) -> typing.Dict[str, typing.Any]:
+ spaceSpec = np.array(super().dict2native(hyperparamsNative, spec))
+ return {"dimension": spaceSpec.shape[0], "var_lower": spaceSpec[:, 1], "var_upper": spaceSpec[:, 2], "var_type": spaceSpec[:, 0]}
+
+
+class RBFOptSpec(MSpec(isArray=True, scalarMode=ScalarMetaMap.noScalars, integerMode=IntegerMetaMap.floatIntegers)):
+ hyperparamsSpecType = RBFOptSpecArrayDict
+
+ class HyperparamsSpecsConverters:
+ def randint(k, dist, tp):
+ return ("I", dist.a, dist.b)
+
+ def uniform(k, dist, tp):
+ return (tp.__name__[0].upper(), dist.ppf(0), dist.ppf(1))
+
+
+class RBFOpt(GenericOptimizer):
+ specType = RBFOptSpec
+
+ rbfoptAlgo = None
+
+ def __init__(self, blackBoxFunc: typing.Callable, spaceSpec: typing.Mapping[str, object], iters: int = 1000, jobs: int = 3, pointsStorage: PointsStorage = None, maxAlgIterations=None, bonminPath="bonmin", ipoptPath="ipopt", **otherOpts) -> None:
+ super().__init__(blackBoxFunc, spaceSpec, iters, jobs, pointsStorage)
+
+ if isinstance(bonminPath, str) and pathSep not in bonminPath:
+ bonminPath = resolveAvailablePath(bonminPath)
+ if isinstance(ipoptPath, str) and pathSep not in ipoptPath:
+ ipoptPath = resolveAvailablePath(ipoptPath)
+
+ if maxAlgIterations is None:
+ maxAlgIterations = round(iters * 1.5)
+
+ self.bonminPath = bonminPath
+ self.ipoptPath = ipoptPath
+ self.maxAlgIterations = maxAlgIterations
+ self.otherOpts = otherOpts
+
+ def prepareScoring(self, spaceSpec: typing.Tuple[typing.Tuple[str, float, float], typing.Tuple[str, int, int]]) -> typing.Tuple[int, str, typing.Tuple[typing.Tuple[str, float, float], typing.Tuple[str, int, int]]]:
+ return (self.iters, "rbfopt " + self.__class__.rbfoptAlgo, {"spaceSpec": spaceSpec, "x": np.array(()), "y": np.array(())})
+
+ def injectPoints(self, pointz, bestPointIndex, context, initialize=False):
+ points = []
+ losses = []
+ for p in pointz:
+ points.append(p[0])
+ losses.append(p[1][0])
+ points = np.array(points)
+ losses = np.array(losses)
+
+ if initialize:
+ context["points"] = points
+ context["losses"] = losses
+ else:
+ context["points"] = np.concatenate(points, context["points"])
+ context["losses"] = np.concatenate(losses, context["losses"])
+
+ def invokeScoring(self, fn: typing.Callable, pb: ProgressReporter, context: typing.Tuple[typing.Tuple[str, float, float], typing.Tuple[str, int, int]]) -> np.ndarray:
+ def rbfoptScore(hyperparams):
+ return fn(hyperparams)[0]
+
+ spaceSpec = context["spaceSpec"]
+ blackBox = rbfopt.RbfoptUserBlackBox(obj_funct=rbfoptScore, **spaceSpec)
+ settings = rbfopt.RbfoptSettings(max_evaluations=self.iters, algorithm=self.__class__.rbfoptAlgo, print_solver_output=False, minlp_solver_path=str(self.bonminPath), nlp_solver_path=str(self.ipoptPath), **self.otherOpts)
+ hasPoints = "points" in context and len(context["points"])
+ algo = rbfopt.RbfoptAlgorithm(settings, blackBox, init_node_pos=(context["points"] if hasPoints else None), init_node_val=(context["losses"] if hasPoints else None), do_init_strategy=(not hasPoints))
+
+ algo.output_stream = pb
+
+ minCost, best, _, _, _ = algo.optimize()
+ self.details = algo
+ return best
+
+
+class Gutmann(RBFOpt):
+ rbfoptAlgo = "Gutmann"
+
+
+class MSRSM(RBFOpt):
+ rbfoptAlgo = "MSRSM"
diff --git a/UniOpt/backends/simple_spearmint.py b/UniOpt/backends/simple_spearmint.py
new file mode 100644
index 0000000..a1aecd3
--- /dev/null
+++ b/UniOpt/backends/simple_spearmint.py
@@ -0,0 +1,51 @@
+from lazily import simple_spearmint
+from numpy import ndarray
+
+from ..core.MetaSpec import *
+from ..core.Optimizer import GenericOptimizer, PointsStorage
+from ..core.ProgressReporter import ProgressReporter
+from ..core.Spec import *
+from ..imports import *
+
+
+class SimpleSpearmintSpec(MSpec(scalarMode=ScalarMetaMap.degenerateCategory, integerMode=IntegerMetaMap.noIntegers)):
+ class HyperparamsSpecsConverters:
+ def randint(k, dist, tp):
+ return __class__._categorical(k, range(dist.a, dist.b))
+
+ # uniform=lambda k, dist, tp: {"type": tp.__name__, "min": dist.a, "max": dist.b}
+ def uniform(k, dist, tp):
+ return {"type": "float", "min": dist.ppf(0), "max": dist.ppf(1)} # a bug in simple_spearmint: float is used instead of round
+
+ def _categorical(k, categories):
+ return {"type": "enum", "options": categories}
+
+
+class SimpleSpearmint(GenericOptimizer):
+ specType = SimpleSpearmintSpec
+
+ def __init__(self, blackBoxFunc: typing.Callable, spaceSpec: typing.Mapping[str, object], iters: int = 1000, jobs: int = 3, pointsStorage: PointsStorage = None, seedIters: int = 5) -> None:
+ super().__init__(blackBoxFunc, spaceSpec, iters, jobs, pointsStorage)
+
+ self.seedIters = seedIters
+
+ def prepareScoring(self, spaceSpec):
+ solver = simple_spearmint.SimpleSpearmint(spaceSpec)
+ return (self.iters, "SimpleSpearmint", solver)
+
+ def invokeScoring(self, fn: typing.Callable, pb: ProgressReporter, solver):
+ def spearmintIteration(suggester):
+ suggestion = suggester()
+ solver.update(suggestion, fn(suggestion)[0])
+
+ pb.print("Bootstrapping...")
+ for i in range(self.seedIters):
+ spearmintIteration(solver.suggest_random)
+
+ pb.print("Started predicting")
+ for i in range(self.iters - self.seedIters):
+ spearmintIteration(solver.suggest)
+
+ self.details = solver
+ best, _ = solver.get_best_parameters()
+ return best
diff --git a/UniOpt/backends/sklearn.py b/UniOpt/backends/sklearn.py
new file mode 100644
index 0000000..71a3f6a
--- /dev/null
+++ b/UniOpt/backends/sklearn.py
@@ -0,0 +1,35 @@
+__all__ = ("SKLearnRigidGridSpec", "SKLearnRandomizedSpec")
+import typing
+
+import scipy.stats
+
+from ..core.MetaSpec import *
+from ..core.ProgressReporter import ProgressReporter
+from ..core.Spec import *
+from ..core.Spec import HyperparamDefinition
+from ..imports import *
+
+msel = lazyImport("sklearn.model_selection")
+
+# msel.GridSearchCV
+# msel.RandomizedSearchCV
+
+
+class SKLearnSpec(MSpec(scalarMode=ScalarMetaMap.degenerateCategory, integerMode=IntegerMetaMap.noIntegers)):
+ pass
+
+
+class SKLearnRigidGridSpec(SKLearnSpec):
+ class HyperparamsSpecsConverters:
+ def randint(k, dist):
+ return range(v.lower, v.upper, 1)
+
+ def uniform(k, dist):
+ return np.linspace(dist.ppf(0), dist.ppf(1), count)
+
+
+class SKLearnRandomizedSpec(SKLearnSpec):
+ def transformHyperDefItemUniversal(self, k: str, v: typing.Union[typing.Tuple[int], HyperparamDefinition]) -> typing.Union[typing.Tuple[int], scipy.stats._distn_infrastructure.rv_frozen]:
+ if isinstance(v, categoricalTypes):
+ return v
+ return v.distribution
diff --git a/UniOpt/backends/skopt.py b/UniOpt/backends/skopt.py
new file mode 100644
index 0000000..d4520ab
--- /dev/null
+++ b/UniOpt/backends/skopt.py
@@ -0,0 +1,88 @@
+import typing
+
+from lazily import skopt
+from lazy_object_proxy import Proxy
+
+from ..core.MetaSpec import *
+from ..core.Optimizer import *
+from ..core.ProgressReporter import ProgressReporter
+from ..core.Spec import *
+from ..imports import *
+
+
+def getTypeRemap():
+ return {int: skopt.space.Integer, float: skopt.space.Real}
+
+
+typeRemap = Proxy(getTypeRemap)
+
+
+class SKOptSpec(MSpec(isArray=True, scalarMode=ScalarMetaMap.degenerateCategory)):
+ class HyperparamsSpecsConverters:
+ def randint(k, dist, tp):
+ return __class__._categorical(k, range(dist.a, dist.b))
+
+ def uniform(k, dist, tp):
+ return typeRemap[tp](dist.ppf(0), dist.ppf(1), name=k)
+
+ def _categorical(k, categories):
+ return skopt.space.Categorical(categories, name=k)
+
+
+class SKOpt(GenericOptimizer):
+ specType = SKOptSpec
+ skoptAlgo = None
+
+ def __init__(self, blackBoxFunc: typing.Callable, spaceSpec: typing.Mapping[str, object], iters: int = 1000, jobs: int = 3, pointsStorage: PointsStorage = None, acquisitionOptimizerType="auto", acquisitionType="gp_hedge", chi: float = 0.01, kappa: float = 1.96, nInitialPoints: int = 10, nRestartsOptimizer: int = 5):
+ super().__init__(blackBoxFunc, spaceSpec, iters, jobs, pointsStorage)
+ self.acquisitionType = acquisitionType
+ self.acquisitionOptimizerType = acquisitionOptimizerType
+
+ self.chi = chi
+ self.kappa = kappa
+ self.nInitialPoints = nInitialPoints
+ self.nRestartsOptimizer = nRestartsOptimizer
+
+ def prepareScoring(self, spaceSpec: typing.Tuple["skopt.space.space.Real", "skopt.space.space.Integer", "skopt.space.space.Categorical"]) -> typing.Tuple[int, str, typing.Tuple["skopt.space.space.Real", "skopt.space.space.Integer", "skopt.space.space.Categorical"]]:
+ from skopt.utils import cook_estimator, normalize_dimensions
+
+ normalized = normalize_dimensions(spaceSpec)
+ base_estimator = cook_estimator(self.__class__.skoptAlgo, space=normalized, random_state=None)
+ optimizer = skopt.Optimizer(normalized, base_estimator, n_initial_points=0, acq_func=self.acquisitionType, acq_optimizer=self.acquisitionOptimizerType, acq_optimizer_kwargs={"n_points": self.iters, "n_restarts_optimizer": self.nRestartsOptimizer, "n_jobs": self.jobs}, acq_func_kwargs={"xi": self.chi, "kappa": self.kappa})
+ return (self.iters, "SKOpt (" + self.__class__.skoptAlgo + ")", optimizer)
+
+ def injectPoints(self, pointz, bestPointIndex, optimizer, initialize=False):
+ for p in pointz:
+ optimizer.tell(p[0], p[1][0])
+ self.nInitialPoints -= 1
+ if self.nInitialPoints < 0:
+ self.nInitialPoints = 0
+
+ def invokeScoring(self, fn: typing.Callable, pb: ProgressReporter, optimizer) -> typing.List[NumericT]:
+ optimizer._n_initial_points = self.nInitialPoints
+ for i in range(self.iters):
+ hp = optimizer.ask()
+ loss = fn(hp)
+ result = optimizer.tell(hp, loss[0])
+ self.details = result
+ return result.x
+
+
+class SKOptBayesian(SKOpt):
+ skoptAlgo = "GP"
+
+
+class SKOptExtraTrees(SKOpt):
+ skoptAlgo = "ET"
+
+
+class SKOptForest(SKOpt):
+ skoptAlgo = "RF"
+
+
+class SKOptGBTree(SKOpt):
+ skoptAlgo = "GBRT"
+
+
+class SKOptRandom(SKOpt):
+ skoptAlgo = "dummy"
diff --git a/UniOpt/backends/yabox.py b/UniOpt/backends/yabox.py
new file mode 100644
index 0000000..56ef346
--- /dev/null
+++ b/UniOpt/backends/yabox.py
@@ -0,0 +1,119 @@
+import warnings
+
+import numpy as np
+from lazy_object_proxy import Proxy
+
+from ..core import LossT
+from ..core.Optimizer import GenericOptimizer, PointsStorage
+from ..core.ProgressReporter import ProgressReporter
+from ..core.Spec import *
+from ..core.SpecOnlyBoxes import ArraySpecOnlyBoxesNoIntegers
+from ..imports import *
+
+
+def createResumableDE():
+ from functools import wraps
+
+ from yabox.algorithms.de import DE, DEIterator
+
+ class ResumableDE(DE):
+ @wraps(DE.__init__)
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._evaluationEnabled = False
+ self.initializeIterator()
+ self._initialized = False
+ self._evaluationEnabled = True
+
+ def initializeIterator(self):
+ self._iter = DEIterator(self) # invokes `init` and `evaluate` hooked by us, returning fake values depending on _evaluationEnabled
+ self._initialized = True
+
+ def iterator(self):
+ if not self._initialized:
+ self.initializeIterator()
+ return iter(self._iter)
+
+ def evaluate(self, P):
+ if self._evaluationEnabled:
+ return super().evaluate(P)
+ else:
+ return [np.inf] * self.popsize
+
+ def init(self, data=None):
+ if self._evaluationEnabled:
+ return super().init()
+ else:
+ return np.array([[np.inf] * self.dims] * self.popsize)
+
+ return ResumableDE
+
+
+ResumableDE = Proxy(createResumableDE)
+
+# from yabox.algorithms.de import DE, DEIterator
+
+
+class Yabox(GenericOptimizer):
+ specType = ArraySpecOnlyBoxesNoIntegers
+
+ def __init__(self, blackBoxFunc: typing.Callable, spaceSpec: typing.Mapping[str, object], iters: int = 1000, jobs: int = 3, pointsStorage: PointsStorage = None, mutation: typing.Tuple[float, float] = (0.5, 1.0), crossover: float = 0.7, selfAdaptive: bool = False, popSize=None, seed=None) -> None:
+ super().__init__(blackBoxFunc, spaceSpec, iters, jobs, pointsStorage)
+
+ self.mutation = mutation
+ self.crossover = crossover
+ self.selfAdaptive = selfAdaptive
+ self.popSize = popSize
+ self.seed = seed
+
+ def prepareScoring(self, specSeq: typing.Tuple[typing.Tuple[float, float], typing.Tuple[int, int]]) -> typing.Tuple[int, str, "ResumableDE"]:
+ warnings.warn("Multiprocessing is broken for now, https://github.com/pablormier/yabox/issues/33")
+ # solver=yabox.PDE(fobj=None, bounds=specSeq, mutation=self.mutation, crossover=self.crossover, maxiters=self.iters, self_adaptive=self.selfAdaptive, popsize=self.popSize, seed=self.seed, processes=self.jobs)
+
+ warnings.warn("Bug with non-returning, see https://github.com/pablormier/yabox/pull/32 for fix")
+ solver = ResumableDE(fobj=None, bounds=specSeq, mutation=self.mutation, crossover=self.crossover, maxiters=self.iters, self_adaptive=self.selfAdaptive, popsize=self.popSize, seed=self.seed)
+ solver.maxiters //= solver.popsize
+ if solver.maxiters < 2:
+ solver.maxiters = 2
+
+ return ((solver.maxiters + 1) * solver.popsize, "yabox", solver)
+
+ def injectPoints(self, pointz, bestPointIndex, solver, initialize=False):
+ bestPointTranslatedIndex = len(solver._iter.population) + bestPointIndex
+ points = []
+ losses = []
+ for i, p in enumerate(pointz):
+ points.append(p[0])
+ losses.append(p[1])
+
+ if not solver._initialized:
+ newPointsCount = min(solver.popsize, len(points))
+ solver._initialized = True
+ # assume sorted!
+ solver._iter.population[:newPointsCount] = np.array(points[:newPointsCount])
+ solver._iter.fitness[:newPointsCount] = losses[:newPointsCount]
+ solver._iter.best_idx = bestPointIndex
+ solver._iter.best_fitness = losses[bestPointIndex]
+ else:
+ solver.initializeIterator()
+
+ mergedLosses = np.concatenate((losses, solver._iter.fitness))
+ mergedPointsCount = min(solver.popsize, len(mergedLosses))
+ order = np.argsort(mergedLosses)[:mergedPointsCount]
+
+ for indexInPop, mergedSortedIndex in enumerate(order):
+ if mergedSortedIndex >= solver.popsize:
+ hp = solver._iter.population[mergedSortedIndex - mergedPointsCount]
+ else:
+ hp = points[mergedSortedIndex]
+ solver._iter.replacement(indexInPop, hp, losses[mergedSortedIndex])
+
+ def invokeScoring(self, fn: typing.Callable, pb: ProgressReporter, solver: "ResumableDE") -> np.ndarray:
+ def yaboxScore(hyperparamsSeq):
+ return fn(hyperparamsSeq)
+
+ solver.fobj = yaboxScore
+ for iteration in solver.iterator():
+ pass
+
+ return iteration.population[iteration.best_idx]
diff --git a/UniOpt/backends/ypde.py b/UniOpt/backends/ypde.py
new file mode 100644
index 0000000..325912f
--- /dev/null
+++ b/UniOpt/backends/ypde.py
@@ -0,0 +1,63 @@
+import warnings
+
+from lazily import de, ypstruct
+
+from UniOpt.core.ProgressReporter import ProgressReporter
+from UniOpt.core.Spec import HyperDef
+
+from ..core.Optimizer import GenericOptimizer, PointsStorage
+from ..core.ProgressReporter import ProgressReporter
+from ..core.Spec import *
+from ..core.SpecOnlyBoxes import ArraySpecOnlyBoxesNoIntegers
+from ..imports import *
+
+
+class YPDESpec(ArraySpecOnlyBoxesNoIntegers):
+ class HyperparamsSpecsConverters:
+ def uniform(k, dist, tp):
+ return dist
+
+ def distributionToUniform(self, k: str, hpDef: HyperDef) -> HyperDef:
+ return self.distributionToUniform_(k, hpDef)
+
+
+class YPDE(GenericOptimizer):
+ specType = YPDESpec
+
+ def __init__(self, blackBoxFunc: typing.Callable, spaceSpec: typing.Dict[str, typing.Union[HyperparamDefinition, int]], iters: int = 1000, jobs: int = 3, pointsStorage: PointsStorage = None, popSize: int = 30, mutation: float = 0.8, crossover: float = 0.7) -> None:
+ super().__init__(blackBoxFunc, spaceSpec, iters, jobs, pointsStorage)
+
+ restIters = self.iters - popSize
+ self.generations = restIters / popSize
+ assert (self.generations).is_integer()
+ self.generations = int(self.generations)
+ assert self.generations > 0
+
+ self.popSize = popSize
+ self.crossover = crossover
+ self.mutation = mutation
+
+ def prepareScoring(self, specSeq: typing.Iterable[scipy.stats._distn_infrastructure.rv_frozen]) -> typing.Tuple[int, str, typing.Tuple["ypstruct.structure", "ypstruct.structure"]]:
+ problem = ypstruct.structure()
+ problem.varmin = uniformLimits[0]
+ problem.varmax = uniformLimits[1]
+ problem.nvar = len(specSeq)
+
+ params = ypstruct.structure()
+ params.maxit = self.generations
+ params.npop = self.popSize
+ params.F = self.mutation
+ params.CR = self.crossover
+ params.DisplayInfo = False
+ return (self.iters, "ypde", (problem, params))
+
+ def invokeScoring(self, fn: typing.Callable, pb: ProgressReporter, context: typing.Tuple["ypstruct.structure", "ypstruct.structure"]) -> np.ndarray:
+ def ypdeScore(hyperparamsSeq):
+ return fn(hyperparamsSeq)[0]
+
+ (problem, params) = context
+ problem.objfunc = ypdeScore
+
+ res = de.run(problem, params)
+ self.detail = res
+ return res.bestsol.position
diff --git a/UniOpt/core/ArraySpec.py b/UniOpt/core/ArraySpec.py
new file mode 100644
index 0000000..87a749f
--- /dev/null
+++ b/UniOpt/core/ArraySpec.py
@@ -0,0 +1,41 @@
+from ..imports import *
+from .HyperparamVector import HyperparamVector
+from .Optimizer import GenericOptimizer
+from .Spec import Spec
+
+
+class HyperparamArray(HyperparamVector):
+ primitiveType = list
+
+ @classmethod
+ def dict2native(cls, dic: typing.Dict[str, typing.Any], spec) -> typing.Iterable[typing.Any]:
+ if dic:
+ res = [None] * len(spec.spec.keys())
+ #res = [None] * len(spec.indexes.keys())
+ for k, v in dic.items():
+ res[spec.indexes[k]] = v
+ return res
+ else:
+ return None
+
+ @staticmethod
+ def native2dict(native: typing.Iterable[typing.Any], spec) -> typing.Dict[str, typing.Any]:
+ """Transforms a object of native output type into a dict of a spec"""
+ if native is not None:
+ keys = list(spec.spec.keys())
+ return {spec.rIndexes[i]: v for i, v in enumerate(native)}
+ else:
+ return None
+
+
+class ArraySpec(Spec):
+ """Optimizer receives arrays as a spec"""
+
+ hyperparamsVectorType = HyperparamArray
+ hyperparamsSpecType = HyperparamArray
+
+ def __init__(self, spec):
+ super().__init__(spec)
+ self.indexes = {k: i for i, k in enumerate(self.spec)}
+ self.rIndexes = tuple(self.spec.keys())
+ self.maxIndex = len(self.indexes)
diff --git a/UniOpt/core/HyperparamVector.py b/UniOpt/core/HyperparamVector.py
new file mode 100644
index 0000000..15898cc
--- /dev/null
+++ b/UniOpt/core/HyperparamVector.py
@@ -0,0 +1,18 @@
+import typing
+
+
+class HyperparamVector:
+ """A container class storing the functions for transforming hyperparams CONTAINER TYPE (dict, list) back and forth."""
+
+ makeEmpty = staticmethod(dict)
+ primitiveType = dict
+
+ @classmethod
+ def dict2native(cls, dic: typing.Dict[str, typing.Any], spec) -> typing.Dict[str, typing.Any]:
+ """Transforms a dict into an object of native output type of a spec"""
+ return dic
+
+ @classmethod
+ def native2dict(cls, native: typing.Dict[str, typing.Any], spec) -> typing.Dict[str, typing.Any]:
+ """Transforms an object of native output type into a dict of a spec"""
+ return native
diff --git a/UniOpt/core/MetaMap.py b/UniOpt/core/MetaMap.py
new file mode 100644
index 0000000..1fc50f3
--- /dev/null
+++ b/UniOpt/core/MetaMap.py
@@ -0,0 +1,30 @@
+import typing
+
+
+class MetaMapMeta(type):
+ def __new__(cls, className: str, parents, attrs: typing.Dict[str, typing.Any], *args, **kwargs) -> typing.Type["MetaMap"]:
+ defaultCase = None
+ newAttrs = type(attrs)()
+ newAttrs["_namePartToClassMapping"] = {}
+ newAttrs["_classToNamePartMapping"] = {}
+ for friendlyName, v in attrs.items():
+ if friendlyName[0] == "_":
+ newAttrs[friendlyName] = v
+ continue
+
+ if v is None or isinstance(v, type):
+ defaultCase = v
+ else:
+ (namePart, mixinCls) = v
+ newAttrs["_namePartToClassMapping"][namePart] = mixinCls
+ newAttrs["_classToNamePartMapping"][mixinCls] = namePart
+ newAttrs[friendlyName] = mixinCls
+
+ newAttrs["_namePartToClassMapping"][None] = defaultCase
+ newAttrs["__slots__"] = tuple()
+ return super().__new__(cls, className, parents, newAttrs, *args, **kwargs)
+
+
+class MetaMap(object, metaclass=MetaMapMeta):
+ def __new__(cls):
+ raise NotImplementedError("These classes are not meant to be instantiated!")
diff --git a/UniOpt/core/MetaSpec.py b/UniOpt/core/MetaSpec.py
new file mode 100644
index 0000000..25ce451
--- /dev/null
+++ b/UniOpt/core/MetaSpec.py
@@ -0,0 +1,80 @@
+import re
+
+__all__ = ("IntegerMetaMap", "ScalarMetaMap", "MSpec")
+import typing
+from collections import OrderedDict
+from enum import IntEnum
+from functools import lru_cache
+from warnings import warn
+
+from .ArraySpec import ArraySpec
+from .Spec import *
+from .SpecNoIntegers import IntegerMetaMap
+from .SpecNoScalars import ScalarMetaMap
+
+
+def specClassNameGenerator(*, isArray: bool = False, integerMode=None, scalarMode=None, isDummy: bool = False):
+ name = []
+ if isDummy:
+ name.append("Dummy")
+ if isArray:
+ name.append("Array")
+
+ name.append("Spec")
+
+ if scalarMode:
+ name.append("NoScalars" + ScalarMetaMap._classToNamePartMapping[scalarMode])
+
+ if integerMode:
+ name.append(IntegerMetaMap._classToNamePartMapping[integerMode] + "Integers")
+
+ return "".join(name)
+
+
+nameRx = re.compile("^(Dummy)?(Array)?Spec(?:NoScalars(Dumb|Categorical))?(?:(To|No)Integers)?$")
+
+
+def parseName(name: str) -> typing.Mapping[str, typing.Any]:
+ names = ("isDummy", "isArray", "scalarMode", "integerMode")
+ res = dict(zip(names, nameRx.match(name).groups()))
+ res["isDummy"] = bool(res["isDummy"])
+ res["isArray"] = bool(res["isArray"])
+ res["scalarMode"] = ScalarMetaMap._namePartToClassMapping[res["scalarMode"]]
+ res["integerMode"] = IntegerMetaMap._namePartToClassMapping[res["integerMode"]]
+ return res
+
+
+@lru_cache(maxsize=None, typed=True)
+def MSpec(*name: typing.Optional[typing.Tuple[str]], isArray: bool = False, integerMode=None, scalarMode=None, isDummy: bool = False, **kwargs):
+ """A class choosing the right sequence of inheritance of mixins depending on traits the spec class must have"""
+ if name:
+ assert len(name) == 1
+ assert isinstance(name[0], str)
+ return MSpec(**parseName(name[0]), name=name[0])
+ else:
+ superclasses = []
+ if isDummy:
+ superclasses.append(DummySpec)
+
+ if isArray:
+ superclasses.append(ArraySpec)
+
+ if integerMode:
+ superclasses.append(integerMode)
+
+ if scalarMode:
+ superclasses.append(scalarMode)
+
+ if not superclasses:
+ superclasses.append(Spec)
+
+ if len(superclasses) == 1:
+ #warn("Use " + superclasses[0].__name__ + " directly")
+ return superclasses[0]
+
+ if "name" in kwargs:
+ name = kwargs["name"]
+ else:
+ name = specClassNameGenerator(isArray=isArray, integerMode=integerMode, scalarMode=scalarMode, isDummy=isDummy)
+
+ return type(name, tuple(superclasses), {})
diff --git a/UniOpt/core/Optimizer.py b/UniOpt/core/Optimizer.py
new file mode 100644
index 0000000..683c71b
--- /dev/null
+++ b/UniOpt/core/Optimizer.py
@@ -0,0 +1,109 @@
+from collections.abc import MutableMapping
+from functools import wraps
+from warnings import warn
+
+from lazy_object_proxy import Proxy
+
+from ..imports import *
+from . import LossT, PointsSequenceT
+from .PointsStorage import *
+from .ProgressReporter import ProgressReporter, defaultProgressReporter
+from .Spec import NumericT, Spec
+
+
+class PointsInjectionIsNotImplemented(NotImplementedError):
+ pass
+
+
+class Optimizer:
+ #blackBoxFunc:int
+ #iters:int
+ #jobs:int
+ #spaceSpec:Dict[str, object]
+ #progressReporterClass:typing.type[ProgressReporter]
+ def __init__(self, blackBoxFunc: typing.Callable, spaceSpec: typing.Mapping[str, object], iters: int = 1000, jobs: int = 3, pointsStorage: PointsStorage = None) -> None:
+ self.blackBoxFunc = blackBoxFunc
+ self.iters = iters
+ self.jobs = jobs
+ self.spaceSpec = spaceSpec
+ self.progressReporterClass = defaultProgressReporter
+
+ if pointsStorage is None:
+ self.pointsStorage = DummyStorage()
+ else:
+ self.pointsStorage = pointsStorage
+
+ def injectPoints(self, points: PointsSequenceT, bestPointIndex: int, context: object, initialize: bool = False):
+ raise PointsInjectionIsNotImplemented()
+
+ def savePoint(self, params: dict, loss: LossT):
+ try:
+ self.pointsStorage.append(params, loss)
+ except Exception as ex:
+ warn("Unable to backup point: " + str(ex))
+
+ def prepareCachedPoints(self) -> typing.Tuple[PointsSequenceT, int]:
+ """Prepares cached points: loads them from storage. Returns the points and the best point index"""
+ return self.pointsStorage.prepare()
+
+ def resume(self, context):
+ """Injects the state (in the form of saved points) into the used optimizer"""
+ try:
+ pts, bestIndex = self.prepareCachedPoints()
+ if pts and bestIndex is not None:
+ self.injectPoints(pts, bestIndex, context, True)
+ except PointsInjectionIsNotImplemented:
+ warn("Cannot resume: `" + str(self.__class__.__name__) + "` does not have `injectPoint` implemented")
+
+ def __call__(self):
+ raise NotImplementedError()
+
+
+class GenericOptimizer(Optimizer):
+ specType = Spec
+
+ def __init__(self, blackBoxFunc: typing.Callable, spaceSpec: typing.Mapping[str, object], iters: int = 1000, jobs: int = 3, pointsStorage: PointsStorage = None) -> None:
+ self.details = None
+
+ spaceSpec = self.__class__.specType(spaceSpec)
+ super().__init__(blackBoxFunc, spaceSpec, iters, jobs, pointsStorage)
+
+ def __call__(self, textMessage: str = "") -> typing.Dict[str, NumericT]:
+ """Does the optimization"""
+ total, desc, context = self.prepareScoring(self.spaceSpec.getOptimizerConsumableSpec())
+
+ pr = self.progressReporterClass(total=total, title=desc + " " + textMessage + " ...")
+
+ with self.pointsStorage:
+ self.resume(context)
+ with pr as pb:
+ blackBoxIteration = self.__class__.wrapIteration(self, self.spaceSpec.transformHyperparams, pb)(self.blackBoxFunc)
+ best = self.invokeScoring(blackBoxIteration, pb, context)
+
+ best = self.spaceSpec.transformResult(best)
+
+ #hyperparamsTypeCoerce(best)
+ assert best is not None
+ return best
+
+ @classmethod
+ def wrapIteration(cls, optimizer: "GenericOptimizer", prepareHyperparams: typing.Callable, pb: ProgressReporter) -> typing.Callable:
+ def decorator(blackBoxFunc):
+ @wraps(blackBoxFunc)
+ def blackBoxFunc1(hyperparams, *args, **kwargs):
+ hyperparams = prepareHyperparams(hyperparams)
+ pb.reportHyperparams(hyperparams)
+
+ res = blackBoxFunc(hyperparams, *args, **kwargs)
+
+ optimizer.savePoint(hyperparams, res)
+ pb.reportLoss(res)
+ return res
+
+ return blackBoxFunc1
+
+ return decorator
+
+ def prepareCachedPoints(self):
+ pts, bestIndex = super().prepareCachedPoints()
+ return map(lambda p: (self.spaceSpec.getOptimizerConsumableVector(p[0]), p[1]), pts), bestIndex
diff --git a/UniOpt/core/PointsStorage.py b/UniOpt/core/PointsStorage.py
new file mode 100644
index 0000000..69d19a3
--- /dev/null
+++ b/UniOpt/core/PointsStorage.py
@@ -0,0 +1,141 @@
+__all__ = ("PointsStorage", "DummyStorage", "MemoryStorage", "SQLiteStorage")
+import heapq
+from itertools import islice
+
+from lazily.Cache import JSONCache
+
+from ..imports import *
+from . import LossT, Point, PointsSequenceT
+
+
+def keyFunc(p):
+ return p[1]
+
+
+class PointsStorage:
+ """Used to store points - pairs (point in hyperparams space in form of dict, loss in that point (mean, variance))"""
+
+ __slots__ = ()
+
+ def __init__(self) -> None:
+ raise NotImplementedError()
+
+ def append(self, params, loss):
+ raise NotImplementedError()
+
+ def __iter__(self):
+ raise NotImplementedError()
+
+ def __len__(self):
+ raise NotImplementedError()
+
+ def __enter__(self):
+ raise NotImplementedError()
+
+ def prepare(self, n: int = None, requireSorted: bool = True) -> typing.Tuple[PointsSequenceT, int]:
+ """Prepares cached points: loads them and returns their sequence and the best point index."""
+ if n is not None:
+ if requireSorted:
+ res = nlargest(n, self, key=keyFunc)
+ else:
+ res = list(islice(self, 0, n))
+ else:
+ if requireSorted:
+ res = sorted(self, key=keyFunc)
+ else:
+ res = list(self)
+ return res, (0 if len(res) else None)
+
+
+class DummyStorage(PointsStorage):
+ def __init__(self) -> None:
+ pass
+
+ def append(self, params: dict, loss: LossT) -> None:
+ pass
+
+ def __len__(self):
+ return 0
+
+ def __iter__(self):
+ return iter(())
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_class, exc, traceback) -> None:
+ pass
+
+
+class ListStorage(PointsStorage):
+ """Stores points in a list"""
+
+ __slots__ = ("stor", "isSorted")
+
+ def __init__(self) -> None:
+ self.stor = []
+ self.isSorted = False
+
+ def append(self, params: dict, loss: LossT) -> None:
+ self.isSorted = False
+ self.stor.append(Point((params, loss)))
+
+ def sort(self) -> None:
+ if not (self.isSorted):
+ self.stor.sort()
+ self.isSorted = True
+
+ def __len__(self):
+ return len(self.stor)
+
+ def __iter__(self):
+ self.sort()
+ for el in self.stor:
+ yield Point((dict(el[0]), el[1]))
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_class, exc, traceback) -> None:
+ pass
+
+
+class MemoryStorage(ListStorage):
+ """Stores points in a heap"""
+
+ __slots__ = ()
+
+ def append(self, params: dict, loss: LossT) -> None:
+ self.isSorted = False
+ heapq.heappush(self.stor, Point((params, loss)))
+
+
+defaultSQLitePointsCacheFileName = "./UniOptPointsBackup.sqlite"
+
+
+class SQLiteStorage(PointsStorage):
+ """Stores points in an SQLite DB. Currently uses Cache"""
+
+ __slots__ = ("pointsBackup",)
+
+ def __init__(self, fileName: typing.Union[str, "pathlib.Path"] = defaultSQLitePointsCacheFileName, cacheClass: typing.Type = None) -> None:
+ if cacheClass is None:
+ cacheClass = JSONCache
+ self.pointsBackup = cacheClass(fileName)
+
+ def append(self, params, loss: LossT):
+ self.pointsBackup[len(self.pointsBackup)] = (params, loss)
+
+ def __len__(self):
+ return len(self.pointsBackup)
+
+ def __iter__(self):
+ for hp, loss in self.pointsBackup.values():
+ yield Point((hp, loss))
+
+ def __enter__(self):
+ self.pointsBackup.__enter__()
+ return self
+
+ def __exit__(self, exc_class, exc, traceback) -> None:
+ self.pointsBackup.__exit__(exc_class, exc, traceback)
diff --git a/UniOpt/core/ProgressReporter.py b/UniOpt/core/ProgressReporter.py
new file mode 100644
index 0000000..dba7038
--- /dev/null
+++ b/UniOpt/core/ProgressReporter.py
@@ -0,0 +1,135 @@
+__all__ = ("ProgressReporter", "defaultProgressReporter")
+import sys
+import typing
+
+
+class ProgressReporter:
+ """A class to report progress of optimization to a user"""
+
+ def __init__(self, total, title) -> None:
+ raise NotImplementedError()
+
+ def reportHyperparams(self, hyperparams) -> None:
+ self.print(hyperparams)
+
+ def reportLoss(self, loss) -> None:
+ self.print(loss)
+
+ def print(self, *args, **kwargs) -> None:
+ print(*args, **kwargs)
+
+ def write(self, *args, **kwargs):
+ raise NotImplementedError()
+
+ def flush(self):
+ pass
+
+ def __enter__(self) -> typing.Any:
+ raise NotImplementedError()
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ raise NotImplementedError()
+
+
+class DumbProgressReporterBase(ProgressReporter):
+ """Setups basic facilities for printing"""
+
+ def __init__(self, total: int, title: str) -> None:
+ self.stream = sys.stderr
+
+ def print(self, *args, **kwargs) -> None:
+ kwargs["file"] = self.stream
+ return print(*args, **kwargs)
+
+ def flush(self):
+ return self.stream.flush()
+
+ def write(self, *args, **kwargs) -> None:
+ return self.stream.write(*args, **kwargs)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type: None, exc_value: None, traceback: None) -> None:
+ pass
+
+
+class DumbProgressReporter(DumbProgressReporterBase):
+ """Just prints the messages"""
+
+ def __init__(self, total: int, title: str) -> None:
+ super().__init__(total, title)
+ self.i = 0
+ self.total = total
+ self.title = title
+
+ def reportHyperparams(self, hyperparams: typing.Dict[str, typing.Any]) -> None:
+ super().print(self.title, self.i, "/", self.total, hyperparams)
+
+ def reportLoss(self, loss: typing.Tuple[float, float]) -> None:
+ super().print(self.title, self.i, "/", self.total, loss)
+ self.i += 1
+
+
+defaultProgressReporter = DumbProgressReporter
+
+try:
+ try:
+ from tqdm.autonotebook import tqdm as mtqdm
+ except BaseException:
+ from tqdm import tqdm as mtqdm
+
+ class TQDMProgressReporter(DumbProgressReporterBase):
+ """Uses an awesome tqdm lib to print progress"""
+
+ def __init__(self, total: int, title: str) -> None:
+ super().__init__(total, title)
+ self.underlyingStream = self.stream
+ self.stream = mtqdm(total=total, desc=title, file=self.stream)
+
+ def flush(self):
+ return self.underlyingStream.flush()
+
+ def reportLoss(self, loss: typing.Tuple[float, float]) -> None:
+ #super().reportLoss(loss)
+ self.write(repr(loss)) # problems: print doesn't work well enough, may be a bug in tqdm
+ self.stream.update()
+
+ def reportHyperparams(self, hyperparams: typing.Dict[str, typing.Any]):
+ return self.write(repr(hyperparams)) # problems: print doesn't work well enough, may be a bug in tqdm
+
+ def __enter__(self):
+ self.stream.__enter__()
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ return self.stream.__exit__(exc_type, exc_value, traceback)
+
+ defaultProgressReporter = TQDMProgressReporter
+except ImportError:
+ pass
+
+if defaultProgressReporter is DumbProgressReporter:
+ try:
+ from fish import SwimFishProgressSync, fish_types
+
+ class FishProgressReporter(DumbProgressReporterBase):
+ """Uses `fish` lib to show some shit like swimming fishes"""
+
+ def __init__(self, total, title, type="bass"):
+ super().__init__(total, title)
+ self.i = 0
+
+ class ProgressFish(SwimFishProgressSync, fish_types[type]):
+ pass
+
+ self.fish = ProgressFish(total=total, outfile=self.stream)
+
+ def reportLoss(self, loss):
+ super().reportLoss(loss)
+ self.fish.animate(amount=self.i)
+ self.i += 1
+
+ defaultProgressReporter = FishProgressReporter
+ except ImportError:
+ pass
diff --git a/UniOpt/core/Spec.py b/UniOpt/core/Spec.py
new file mode 100644
index 0000000..18957f7
--- /dev/null
+++ b/UniOpt/core/Spec.py
@@ -0,0 +1,236 @@
+from collections import defaultdict
+
+import numpy as np
+import scipy.stats
+
+from ..imports import *
+from ..utils import getEffectiveAttrForAClass
+from .HyperparamVector import HyperparamVector
+
+eps = scipy.finfo(scipy.float32).eps
+
+possibleTypesRemap = {
+ int: (int, np.int16, np.int32, np.int64, np.int_),
+ float: (float, np.float16, np.float32, np.float64)
+}
+
+IntT = typing.Union.__getitem__(possibleTypesRemap[int])
+FloatT = typing.Union.__getitem__(possibleTypesRemap[float])
+NumericT = typing.Union[IntT, FloatT]
+
+
+class HyperparamDefinition:
+ """Used to define a spec. The first argument is data type, the second one is a scipy.stat distribution.
+ `randint` means that changing the number has unpredictable effect on the resulting value."""
+
+ __slots__ = ("type", "distribution")
+
+ def __init__(self, type: typing.Union[type, str], distribution):
+ self.type = type
+ self.distribution = distribution
+
+ def __repr__(self):
+ return self.__class__.__name__ + "(" + ", ".join((k + "=" + repr(getattr(self, k)) for k in self.__class__.__slots__)) + ")"
+
+
+HyperDef = HyperparamDefinition
+
+uniformUnityOffset = 1
+#uniformLimits = (uniformUnityOffset * eps, 1.0 - uniformUnityOffset * eps) # 1 and 0 cannot be used since some distributions have infinity ppf there
+uniformLimits = (0.00001, 0.99999)
+uniformUnityDistr = HyperDef(float, scipy.stats.uniform(loc=uniformLimits[0], scale=uniformLimits[1] - uniformLimits[0]))
+
+categoricalTypes = (tuple, list)
+
+
+def float2int(v):
+ return int(round(v))
+
+
+categoricalName = "_categorical"
+
+
+def getDistrName(hpDef: typing.Any) -> str:
+ if isinstance(hpDef, categoricalTypes):
+ return categoricalName
+ else:
+ return hpDef.distribution.dist.name
+
+
+class SpecProto(object):
+ """Basic spec class. Uses dicts as a spec. Allows to add scalars if optimization is not needed."""
+
+ hyperparamsVectorType = HyperparamVector
+ hyperparamsSpecType = HyperparamVector
+
+ class HyperparamsSpecsConverters:
+ """Use it as a mapping distribution name -> function transforming a scipy.stats distribution into optimizer-specific param definition"""
+
+ def transformHyperDefItemUniversal(self, k, v):
+ raise NotImplementedError()
+
+ def _transformHyperDefItem(self, k, v):
+ """the internal method is created by metaclass"""
+ raise NotImplementedError()
+
+ def __init__(self, genericSpec):
+ self.transformHyperDefItemUniversalRecursionLock = False
+ self.postProcessors = defaultdict(list)
+ spec = self.transformGenericSpec(genericSpec)
+ self.spec = spec
+
+ def addAPostProcessorIfNeeded(self, i, k, v):
+ pass
+
+ def scalarProcessor(self, i, k, v):
+ return v
+
+ def isScalar(self, v):
+ return not self.isSpecItem(v)
+
+ def getOptimizerConsumableSpec(self):
+ """Converts the container of a space spec in a semi-processed form (dict but its values are optimizer-specific) into the form optimizer wants."""
+ return self.__class__.hyperparamsSpecType.dict2native(self.spec, self)
+
+ def transformGenericSpec(self, genericSpec):
+ """Converts generic spec values into optimizer-specific form"""
+ res = {}
+ for i, (k, v) in enumerate(genericSpec.items()):
+ if self.isScalar(v):
+ v = self.scalarProcessor(i, k, v)
+
+ if v is not None:
+ if self.isSpecItem(v):
+ res[k] = self._transformHyperDefItem(k, v) # the method is created by metaclass
+ else:
+ res[k] = v
+ self.addAPostProcessorIfNeeded(i, k, v)
+ return res
+
+ def __repr__(self):
+ return self.__class__.__name__ + "(" + repr(self.spec) + ")"
+
+ def getSpec(self):
+ """Gives spec in its internal form"""
+ return self.spec
+
+ def getOptimizerConsumableVector(self, dic: dict):
+ """Converts a dict of points into optimizer-specific format. Useful when wants to inject points into an optimizer."""
+ for attrName, postProcessorStack in self.postProcessors.items():
+ v = dic[attrName]
+ for f in reversed(postProcessorStack):
+ v = f[1](v)
+ if v is None:
+ break
+ if v is not None:
+ dic[attrName] = v
+ else:
+ del dic[attrName]
+ return self.__class__.hyperparamsVectorType.dict2native(dic, self)
+
+ def transformHyperparams(self, hyperparamsNative):
+ """Used to transform hyperparams returned by a optimizer into black box function consumable form (with respect to its calling convention)"""
+ hpVec = self.__class__.hyperparamsVectorType.native2dict(hyperparamsNative, self)
+ for attrName, postProcessorStack in self.postProcessors.items():
+ if attrName in hpVec:
+ v = hpVec[attrName]
+ else:
+ v = None
+ for f in postProcessorStack:
+ v = f[0](v)
+ hpVec[attrName] = v
+
+ dictForm = dict(hpVec)
+ res = self.prepareHyperparamsDict(dictForm)
+ return res
+
+ def transformResult(self, hyperparamsNative):
+ """Transforms result in optimizer-specific form into the generic form (just a dict)"""
+ return self.transformHyperparams(hyperparamsNative)
+
+ def distributionToUniform_(self, k, hpDef):
+ #def printer(v):
+ # print(k, " ", v)
+ # return v
+ #self.postProcessors[k].append((printer, printer))
+ self.postProcessors[k].append((hpDef.distribution.ppf, hpDef.distribution.cdf))
+ if hpDef.type is int:
+ self.postProcessors[k].append((float2int, float))
+ return uniformUnityDistr
+
+ def distributionToUniform(self, k, hpDef):
+ distName = getDistrName(hpDef)
+ if distName != "uniform" and distName[0] != "_":
+ return self.distributionToUniform_(k, hpDef)
+ else:
+ return hpDef
+
+ def prepareHyperparamsDict(self, dic: typing.Mapping[str, typing.Any]) -> dict:
+ """Prepares hyperparams dict"""
+ return dic
+
+ def isSpecItem(self, item) -> bool:
+ """Returns True if `item` is an item of an abstract generic spec"""
+ return isinstance(item, (HyperDef, *categoricalTypes))
+
+
+def transformHyperDefItemSelect(self, k, v):
+ distName = getDistrName(v)
+
+ if hasattr(self.__class__.HyperparamsSpecsConverters, distName):
+ if distName[0] == "_":
+ return getattr(self.__class__.HyperparamsSpecsConverters, distName)(k, v)
+ else:
+ return getattr(self.__class__.HyperparamsSpecsConverters, distName)(k, v.distribution, v.type)
+ else:
+ if self.transformHyperDefItemUniversalRecursionLock:
+ raise Exception("Recursion of transformHyperDefItemSelect in " + self.__class__.__name__ + ". " + distName + " is not defined in HyperparamsSpecsConverters.")
+
+ self.transformHyperDefItemUniversalRecursionLock = True
+ res = transformHyperDefItemUniversalViaUniform(self, k, v)
+ self.transformHyperDefItemUniversalRecursionLock = False
+ return res
+
+
+def transformHyperDefItemUniversalViaUniform(self, k, v):
+ return self._transformHyperDefItem(k, self.distributionToUniform(k, v))
+
+
+class SpecMeta(type):
+ """A metaclass to generate properties for Spec classes. Automatically fills some fields."""
+
+ def __new__(cls, className, parents, attrs, *args, **kwargs):
+ isNotARootSpecClass = True
+ if isNotARootSpecClass:
+ effective = getEffectiveAttrForAClass(("transformHyperDefItemUniversal", "HyperparamsSpecsConverters"), attrs, parents)
+ if not effective["transformHyperDefItemUniversal"]:
+ attrs["transformHyperDefItemUniversal"] = transformHyperDefItemUniversalViaUniform
+
+ if effective["HyperparamsSpecsConverters"] is not None:
+ if hasattr(effective["HyperparamsSpecsConverters"], "uniform"):
+ attrs["_transformHyperDefItem"] = transformHyperDefItemSelect
+ else:
+ raise Exception(repr(effective["HyperparamsSpecsConverters"]) + " contains no `uniform`. Class must either define or inherit a converter for uniform distribution `HyperparamsSpecsConverters.uniform` or an own converter function `transformHyperDefItemUniversal`, or both.")
+ else:
+ if effective["transformHyperDefItemUniversal"]:
+ attrs["_transformHyperDefItem"] = effective["transformHyperDefItemUniversal"]
+ else:
+ raise Exception("`" + className + ".transformHyperDefItemUniversal` is not defined. Class must either define or inherit a converter for uniform distribution `HyperparamsSpecsConverters.uniform` or an own converter function `transformHyperDefItemUniversal`, or both")
+ else:
+ metaclassFirstClassInitialized = True
+
+ return super().__new__(cls, className, parents, attrs, *args, **kwargs)
+
+
+class Spec(SpecProto, metaclass=SpecMeta):
+ """A base class representing optimizer initializing and calling convention."""
+
+ HyperparamsSpecsConverters = None
+ transformHyperDefItemUniversal = transformHyperDefItemSelect
+
+
+class DummySpec(Spec):
+ """A trivial spec, just returns input. Used mainly for testing purposes"""
+
+ def transformHyperDefItemUniversal(self, k, v):
+ return v
diff --git a/UniOpt/core/SpecNoCategories.py b/UniOpt/core/SpecNoCategories.py
new file mode 100644
index 0000000..a63e3b9
--- /dev/null
+++ b/UniOpt/core/SpecNoCategories.py
@@ -0,0 +1,33 @@
+from ..imports import *
+from .HyperparamVector import HyperparamVector
+from .MetaMap import MetaMap
+from .Optimizer import GenericOptimizer
+from .Spec import HyperparamDefinition, Spec
+from .SpecNoIntegers import *
+
+
+class SpecNoCategories(Spec):
+ """A mixing implementing optimization of categorical variables with optimizers which don't support them, but support numbers"""
+
+ hyperparamsVectorType = HyperparamVector
+
+ def __init__(self, genericSpec):
+ raise NotImplementedError()
+ super().__init__(genericSpec)
+
+ def scalarProcessor(self, i, k, v):
+ raise NotImplementedError()
+ v = super().scalarProcessor(i, k, v)
+
+ def returnV(arg):
+ raise NotImplementedError()
+
+ self.postProcessors[k].insert(0, (returnV, None))
+
+
+class CategoriesMetaMap(MetaMap):
+ supportsCategories = None
+ noCategories = None
+
+
+raise NotImplementedError()
diff --git a/UniOpt/core/SpecNoIntegers.py b/UniOpt/core/SpecNoIntegers.py
new file mode 100644
index 0000000..82ca130
--- /dev/null
+++ b/UniOpt/core/SpecNoIntegers.py
@@ -0,0 +1,50 @@
+from ..imports import *
+from .HyperparamVector import HyperparamVector
+from .MetaMap import MetaMap
+from .Optimizer import GenericOptimizer
+from .Spec import DummySpec, HyperDef, HyperparamDefinition, Spec, SpecMeta, categoricalTypes, float2int
+
+
+class SpecToIntegersMeta(SpecMeta):
+ """A metaclass to process integerCtor ."""
+
+ integerCtors = set()
+
+ def __new__(cls, className, parents, attrs, *args, **kwargs):
+ if "integerCtor" in attrs:
+ __class__.integerCtors.add(attrs["integerCtor"])
+ return super().__new__(cls, className, parents, attrs, *args, **kwargs)
+
+
+class SpecToIntegersBase(Spec, metaclass=SpecToIntegersMeta):
+ """Transforms floats into integers."""
+
+ def integerCtor(val: float):
+ raise NotImplementedError()
+
+ def scalarProcessor(self, i, k, v):
+ res = super().scalarProcessor(i, k, v)
+ return res
+
+ def addAnIntegerCheckPostprocessorIfNeeded(self, i, k, v):
+ pp = self.postProcessors[k]
+ if not pp or pp[-1] not in __class__.__class__.integerCtors:
+ pp.append((self.__class__.integerCtor, float))
+
+ def addAPostProcessorIfNeeded(self, i, k, v):
+ if isinstance(v, HyperDef) and issubclass(v.type, int) or isinstance(v, categoricalTypes) and isinstance(v[0], int) or isinstance(v, int):
+ self.addAnIntegerCheckPostprocessorIfNeeded(i, k, v)
+
+
+class SpecToIntegers(SpecToIntegersBase):
+ integerCtor = int
+
+
+class SpecNoIntegers(SpecToIntegersBase):
+ integerCtor = float2int
+
+
+class IntegerMetaMap(MetaMap):
+ supportsIntegers = None
+ floatIntegers = ("To", SpecToIntegers)
+ noIntegers = ("No", SpecNoIntegers)
diff --git a/UniOpt/core/SpecNoScalars.py b/UniOpt/core/SpecNoScalars.py
new file mode 100644
index 0000000..f5799b0
--- /dev/null
+++ b/UniOpt/core/SpecNoScalars.py
@@ -0,0 +1,49 @@
+from ..imports import *
+from .HyperparamVector import HyperparamVector
+from .MetaMap import MetaMap
+from .Optimizer import GenericOptimizer
+from .Spec import HyperparamDefinition, Spec
+from .SpecNoIntegers import *
+
+
+class SpecNoScalars(Spec):
+ """A spec not supporting scalars"""
+
+ pass
+
+
+def returnNone(x):
+ pass
+
+
+class SpecNoScalarsDumb(SpecNoScalars):
+ """Saves scalars itself, then augment the result"""
+
+ hyperparamsVectorType = HyperparamVector
+
+ def __init__(self, genericSpec):
+ self.savedScalarHyperparamsNames = []
+ super().__init__(genericSpec)
+
+ def scalarProcessor(self, i, k, v):
+ #v = super().scalarProcessor(i, k, v) # assuming that a user has provided the scalar in right format, no postprocessing needed at all
+
+ def returnV(arg):
+ return v
+
+ self.savedScalarHyperparamsNames.append(k)
+ self.postProcessors[k].insert(0, (returnV, returnNone))
+ # return res
+
+
+class SpecNoScalarsCategorical(SpecNoScalars):
+ """Creates a degenerate category"""
+
+ def scalarProcessor(self, i, k, v):
+ return super().scalarProcessor(i, k, (v,))
+
+
+class ScalarMetaMap(MetaMap):
+ supportsScalars = None
+ degenerateCategory = ("Categorical", SpecNoScalarsCategorical)
+ noScalars = ("Dumb", SpecNoScalarsDumb)
diff --git a/UniOpt/core/SpecOnlyBoxes.py b/UniOpt/core/SpecOnlyBoxes.py
new file mode 100644
index 0000000..cf03f56
--- /dev/null
+++ b/UniOpt/core/SpecOnlyBoxes.py
@@ -0,0 +1,27 @@
+from ..core.MetaSpec import *
+from ..imports import *
+from .HyperparamVector import HyperparamVector
+from .Spec import HyperparamDefinition, Spec
+
+
+class SpecOnlyBoxes(MSpec(scalarMode=ScalarMetaMap.noScalars)):
+ """A very dumb spec, allowing only uniformly distributed variables, no categoricals and scalars and internal space spec is just a sequence `(lower_bound, upper_bound)`. A widespread situation."""
+
+ class HyperparamsSpecsConverters:
+ def randint(k, dist, tp):
+ return (dist.a, dist.b)
+
+ def uniform(k, dist, tp):
+ return (dist.ppf(0), dist.ppf(1))
+
+
+class SpecOnlyBoxesNoIntegers(MSpec(integerMode=IntegerMetaMap.noIntegers), SpecOnlyBoxes):
+ pass
+
+
+class ArraySpecOnlyBoxes(MSpec(isArray=True, scalarMode=ScalarMetaMap.noScalars), SpecOnlyBoxes):
+ pass
+
+
+class ArraySpecOnlyBoxesNoIntegers(MSpec(isArray=True, scalarMode=ScalarMetaMap.noScalars, integerMode=IntegerMetaMap.noIntegers), SpecOnlyBoxes):
+ pass
diff --git a/UniOpt/core/__init__.py b/UniOpt/core/__init__.py
new file mode 100644
index 0000000..4d75976
--- /dev/null
+++ b/UniOpt/core/__init__.py
@@ -0,0 +1,32 @@
+import typing
+
+LossT = typing.Union[float, typing.Tuple[float, float]]
+PointTupleT = typing.Tuple[dict, LossT]
+PointsSequenceT = typing.Iterable[PointTupleT]
+
+
+class Point(tuple):
+ __slots__ = ()
+ # broken
+ #def __init__(self, pointTuple:PointTupleT):
+ # super(tuple, self).__init__(pointTuple)
+
+ def __lt__(self, other: PointTupleT):
+ return self[1] < other[1]
+
+ def __le__(self, other: PointTupleT):
+ return self[1] <= other[1]
+
+ def __gt__(self, other: PointTupleT):
+ return self[1] > other[1]
+
+ def __ge__(self, other: PointTupleT):
+ return self[1] >= other[1]
+
+ @property
+ def loss(self):
+ return self[1]
+
+ @property
+ def params(self):
+ return self[0]
diff --git a/UniOpt/imports.py b/UniOpt/imports.py
new file mode 100644
index 0000000..ff79c80
--- /dev/null
+++ b/UniOpt/imports.py
@@ -0,0 +1,8 @@
+import sys
+import types
+import typing
+import warnings
+from pprint import pformat, pprint
+
+import scipy as np
+from lazily import lazyImport
diff --git a/UniOpt/utils/__init__.py b/UniOpt/utils/__init__.py
new file mode 100644
index 0000000..5099383
--- /dev/null
+++ b/UniOpt/utils/__init__.py
@@ -0,0 +1,46 @@
+import shutil
+import types
+import typing
+from pathlib import Path
+
+
+class IterableModule(types.ModuleType):
+ """Just a module available for iteration"""
+
+ __all__ = None
+
+ def __iter__(self):
+ for pn in self.__class__.__all__:
+ yield getattr(self, pn)
+
+
+def resolveAvailablePath(fileName: str):
+ """Searches a file in availability, like in PATH or in the folder."""
+ p = shutil.which(fileName)
+ if p:
+ return Path(p).resolve().absolute()
+
+
+def getEffectiveAttrForAClass(checkedProps: typing.Iterable[str], attrs: typing.Mapping[str, typing.Any], parents: typing.Iterable[type]) -> typing.Mapping[str, typing.Any]:
+ effective = {pN: None for pN in checkedProps}
+
+ for pN in checkedProps:
+ if pN in attrs:
+ effective[pN] = attrs[pN]
+ else:
+ for p in parents:
+ if effective[pN] is None:
+ if hasattr(p, pN):
+ v = getattr(p, pN)
+ if v:
+ effective[pN] = v
+ break
+ return effective
+
+
+def notInitializedFunction(*args, **kwargs):
+ raise NotImplementedError("This function is used instead of None to make sanity checkers happy.")
+
+
+def dummyFunction(*args, **kwargs):
+ pass
diff --git a/UniOpt/utils/coresCount.py b/UniOpt/utils/coresCount.py
new file mode 100644
index 0000000..daea1b1
--- /dev/null
+++ b/UniOpt/utils/coresCount.py
@@ -0,0 +1,13 @@
+import typing
+
+try:
+ import psutil
+
+ def getCoreCount():
+ return psutil.cpu_count()
+
+
+except BaseException:
+
+ def getCoreCount():
+ return os.cpu_count()
diff --git a/funcs.py b/funcs.py
new file mode 100644
index 0000000..7c062f5
--- /dev/null
+++ b/funcs.py
@@ -0,0 +1,23 @@
+import numpy as np
+
+
+def modRosenbrockNP(X, a=1, b=100):
+ return np.sqrt(np.power(a - X[0], 4) + b * np.power(X[1] - np.power(X[0], 2), 2))
+
+
+def ackleyRosenbrockNp(X, a=20, b=0.2, c=2 * np.pi):
+ return np.real(a * (1 - np.exp(-b * np.sqrt(modRosenbrockNP(X, a=0, b=a) / X.shape[0]))) - np.exp(np.sum(np.cos(c * X), axis=0) / X.shape[0]) + np.exp(1))
+
+
+def ackleyRosenbrock(params):
+ """Ackley function"""
+ X = np.array(list(params.values()))
+ return ackleyRosenbrockNp(X)
+
+
+def ackleyRosenbrockWithVariance(params):
+ return (ackleyRosenbrock(params), 0)
+
+
+def logoFunc(X):
+ return ackleyRosenbrockNp(np.array((np.sinh(X[0]), X[1])))
diff --git a/logo.py b/logo.py
new file mode 100644
index 0000000..e5e0e31
--- /dev/null
+++ b/logo.py
@@ -0,0 +1,78 @@
+import numpy as np
+from funcs import logoFunc
+
+import matplotlib
+from matplotlib import pyplot as plt
+import PIL.Image
+import colorcet
+
+def background(xscale=3):
+ return np.mean(makeLogo(xscale*10)[-1])
+
+defaultRes=2048
+def makeLogo(x=0, y=0, xscale=3, xres=defaultRes, yres=defaultRes):
+ xs=np.linspace(-xscale+x, x+xscale, xres)
+ ys=np.linspace(y-1, y+30, yres)
+ xys=np.array(np.meshgrid(xs, ys))
+ zs=logoFunc(xys)
+ zs=np.flip(zs, axis=0)
+ return (xs, ys, xys, zs)
+
+from mpl_toolkits.mplot3d import Axes3D
+from matplotlib.colors import LightSource
+
+logoBack=background()
+
+defaultCmap=colorcet.m_rainbow_r
+
+def getColors(z, cmap=None):
+ if cmap is None:
+ cmap=defaultCmap
+ mz=np.array(z)
+ mz[mz>logoBack]=logoBack
+ mz-=np.min(mz)
+ mz/=np.max(mz)
+
+ cols=cmap(mz)
+ #cols=np.zeros((z.shape[0], z.shape[1], 4))
+
+ #cols[:,:,3]=np.exp(-mz)
+ return cols
+
+def plotLogo3D(res=defaultRes):
+ (x, y, xy, z)=makeLogo(xres=res, yres=res)
+
+ fig = plt.figure()
+ ax = Axes3D(fig)
+ ax.view_init(elev=90, azim=90)
+
+ ax.plot_surface(*xy, z, facecolors=getColors(z))
+
+ #ax.contour(x, y, z)
+ return fig
+
+def plotLogo(res=defaultRes):
+ (x, y, xy, z)=makeLogo(xres=res, yres=res)
+ del(xy)
+ del(x)
+ del(y)
+ imgData=getColors(z)
+ del(z)
+ im=PIL.Image.fromarray(np.array(imgData*255, dtype=np.int8), "RGBA")
+ return im
+
+from plumbum import cli
+class LogoPlotterCLI(cli.Application):
+ res=cli.SwitchAttr(["r", "resolution"], int, default=defaultRes, help="A resolution of the image. Use the highest possible.")
+ threeD=cli.Flag(["3d"], help="Plot 3d")
+
+ def main(self):
+ if not self.threeD:
+ im=plotLogo(res=self.res)
+ im.save("logo.png", format="png")
+ else:
+ fig=plotLogo3D(res=self.res)
+ plt.show()
+
+if __name__=="__main__":
+ LogoPlotterCLI.run()
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..bb4b5af
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,97 @@
+[build-system]
+requires = ["setuptools>=61.2.0", "wheel", "setuptools_scm[toml]>=3.4.3",]
+build-backend = "setuptools.build_meta"
+
+[project]
+name = "UniOpt"
+description = "A wrapper for popular black box optimization libraries providing unified interface"
+readme = "ReadMe.md"
+keywords = ["hyperparameter optimization", "black box optimization"]
+license = {text = "Unlicense"}
+classifiers = [
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Development Status :: 4 - Beta",
+ "Environment :: Other Environment",
+ "Intended Audience :: Developers",
+ "License :: Public Domain",
+ #"License :: Public Domain :: Unlicense",
+ "Operating System :: OS Independent",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ #"Topic :: Science",
+]
+requires-python = ">=3.4"
+dependencies = [
+ "numpy",
+ "scipy",
+ "scikit_learn",
+ "lazy_object_proxy",
+ "lazily", # @ git+https://codeberg.org/KOLANICH-libs/lazily.py.git
+]
+authors = [
+ { name = "KOLANICH" },
+]
+dynamic = ["version"]
+
+[project.optional-dependencies]
+tqdmprogressreporter = [
+ "tqdm", # @ git+https://github.com/tqdm/tqdm.git"
+]
+hyperopt = [
+ "hyperopt", # @ git+https://github.com/hyperopt/hyperopt.git"
+]
+hyperband = ["hyperband"]
+hyperengine = [
+ "hyperengine", # @ git+https://github.com/maxim5/hyper-engine.git"
+]
+gpyopt = [
+ "GPyOpt", # @ git+https://github.com/SheffieldML/GPyOpt.git"
+]
+skopt = [
+ "scikit-optimize", # @ git+https://github.com/scikit-optimize/scikit-optimize.git"
+]
+smac = [
+ "smac", # @ git+https://github.com/automl/SMAC3.git"
+]
+beecolony = [
+ "ecabc", # @ git+https://github.com/ECRL/ecabc.git"
+]
+optunity = [
+ "optunity", # @ git+https://github.com/claesenm/optunity.git"
+]
+yabox = [
+ "yabox", # @ git+https://github.com/pablormier/yabox.git"
+]
+pyshac = [
+ "pyshac", # @ git+https://github.com/titu1994/pyshac.git"
+]
+rbfopt = [
+ "rbfopt", # @ git+https://github.com/coin-or/rbfopt.git"
+]
+bayessian = [
+ "bayesian_optimization", # @ git+https://github.com/fmfn/BayesianOptimization.git"
+]
+sopt = [
+ "sopt", # @ git+https://github.com/Lyrichu/sopt.git@43cfc8fe39d5d05ad01db626dd777e4ce3e8d14e"
+]
+pysot = [
+ "pySOT", # @ git+https://github.com/dme65/pySOT.git
+]
+baytune = [
+ "baytune", # @ git+https://github.com/HDI-Project/BTB.git
+]
+robo = [
+ "RoBO", # @ git+https://github.com/KOLANICH/RoBO.git@optional_pybnn
+]
+evostra = [
+ "evostra", # @ git+https://github.com/alirezamika/evostra.git
+]
+
+[project.urls]
+Homepage = "https://codeberg.org/KOLANICH-ML/UniOpt.py"
+
+[tool.setuptools]
+zip-safe = true
+packages = ["UniOpt", "UniOpt.core", "UniOpt.backends"]
+
+[tool.setuptools_scm]
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/tests/tests.py b/tests/tests.py
new file mode 100644
index 0000000..e925738
--- /dev/null
+++ b/tests/tests.py
@@ -0,0 +1,228 @@
+#!/usr/bin/env python3
+import sys
+import unittest
+from collections import OrderedDict
+from pathlib import Path
+from pprint import pprint
+from random import randint
+
+import numpy as np
+import scipy.stats
+
+thisDir = Path(__file__).parent.absolute()
+sys.path.append(str(thisDir.parent))
+
+import UniOpt
+from funcs import ackleyRosenbrockWithVariance
+from UniOpt.backends.ecabc import BeeColonyGridSpec
+from UniOpt.backends.pyshac import PySHACGridSpec
+from UniOpt.core.ArraySpec import *
+from UniOpt.core.MetaSpec import MSpec
+from UniOpt.core.PointsStorage import *
+from UniOpt.core.Spec import *
+from UniOpt.core.SpecNoIntegers import *
+from UniOpt.core.SpecNoScalars import *
+from UniOpt.core.SpecOnlyBoxes import ArraySpecOnlyBoxes, ArraySpecOnlyBoxesNoIntegers, SpecOnlyBoxes, SpecOnlyBoxesNoIntegers
+
+specsTestGridSpec = {
+ "x": HyperparamDefinition(float, scipy.stats.uniform(loc=0, scale=10)),
+ "w": HyperparamDefinition(int, scipy.stats.uniform(loc=0, scale=10)),
+ "y": HyperparamDefinition(float, scipy.stats.norm(loc=0, scale=10)),
+ "z": 3 # discrete
+}
+
+
+masterInitVec = OrderedDict((
+ ("x", 0.7),
+ ("w", 0.7),
+ ("y", 0.6),
+ ("z", 3)
+))
+resultVec = OrderedDict((
+ ("x", masterInitVec["x"]),
+ ("w", masterInitVec["w"]),
+ ("y", specsTestGridSpec["y"].distribution.ppf(masterInitVec["y"])),
+ ("z", 3)
+))
+
+classezToTest = ("DummySpecNoScalarsCategoricalNoIntegers", "DummyArraySpecNoScalarsCategorical", "DummyArraySpecToIntegers", "DummyArraySpecNoScalarsDumbToIntegers", "DummyArraySpecNoScalarsCategoricalToIntegers", "DummyArraySpecNoScalarsCategoricalNoIntegers", "DummyArraySpecNoScalarsDumbNoIntegers", "DummyArraySpecNoScalarsDumb", "DummyArraySpecNoIntegers", "DummyArraySpec", "DummySpec", "DummySpecNoIntegers", "DummySpecNoScalarsCategoricalToIntegers")
+classezToTest = [MSpec(clsName) for clsName in classezToTest]
+classezToTest.extend((SpecOnlyBoxes, ArraySpecOnlyBoxesNoIntegers, SpecOnlyBoxesNoIntegers, ArraySpecOnlyBoxes))
+
+
+class TestSpecsClasses(unittest.TestCase):
+ def assertIsSubclass(self, cls, superCls, msg=None):
+ if not issubclass(cls, superCls):
+ self.fail(self._formatMessage(msg, repr(cls) + " is not a subclass of " + repr(superCls)))
+
+ def assertForBasicClass(self, cls, basicCls):
+ if issubclass(cls, basicCls):
+ self.assertIsSubclass(cls.hyperparamsVectorType, basicCls.hyperparamsVectorType)
+ self.assertIsSubclass(cls.hyperparamsSpecType, basicCls.hyperparamsSpecType)
+
+ def assertions4ASpecClass(self, cls):
+ basicClasses = (
+ SpecNoIntegers,
+ SpecToIntegers,
+ SpecNoScalarsDumb,
+ SpecNoScalarsCategorical,
+ ArraySpec
+ )
+ for basicCls in basicClasses:
+ self.assertForBasicClass(cls, basicCls)
+
+ def testSpecsClasses(self):
+ for cls in classezToTest:
+ with self.subTest(specClass=cls):
+ #print(cls, [scls.__name__ for scls in cls.mro()])
+ #print(cls.hyperparamsVectorType, [scls.__name__ for scls in cls.hyperparamsVectorType.mro()[:-1]])
+ #print(cls.hyperparamsSpecType, [scls.__name__ for scls in cls.hyperparamsSpecType.mro()[:-1]])
+ self.assertions4ASpecClass(cls)
+
+ def testSpecsInheritedClasses(self):
+ for cls in classezToTest:
+ with self.subTest(specClass=cls):
+
+ class InheritedClass(cls):
+ pass
+
+ self.assertions4ASpecClass(cls)
+
+
+class TestSpecs(unittest.TestCase):
+ def assertionsOnHyperparamsVector(self, cls, b):
+ self.assertEqual(b["x"], resultVec["x"])
+ self.assertIsInstance(b["x"], float)
+
+ if issubclass(cls, SpecNoIntegers):
+ self.assertEqual(b["w"], float2int(resultVec["w"]))
+ self.assertIsInstance(b["w"], int)
+ elif issubclass(cls, SpecToIntegers):
+ self.assertEqual(b["w"], int(resultVec["w"]))
+ self.assertIsInstance(b["w"], int)
+ else:
+ self.assertEqual(b["w"], resultVec["w"])
+ self.assertIsInstance(b["w"], float)
+
+ self.assertEqual(b["y"], resultVec["y"])
+ self.assertIsInstance(b["y"], float)
+
+ self.assertEqual(b["z"], resultVec["z"])
+ self.assertIsInstance(b["z"], int)
+
+ def generateTestHPVec(self, cls):
+ hpInitVec = type(masterInitVec)(masterInitVec)
+ if issubclass(cls, DummySpec) or hasattr(cls, "HyperparamsSpecsConverters") and hasattr(cls.HyperparamsSpecsConverters, specsTestGridSpec["y"].distribution.dist.name):
+ hpInitVec["y"] = resultVec["y"] # the result is generated by the optimizer itself
+
+ if issubclass(cls, SpecNoScalarsDumb):
+ del hpInitVec["z"]
+ else:
+ # optimizer may transform a categorical int into float !!! DO NOT DELETE!
+ if issubclass(cls, SpecToIntegersBase):
+ hpInitVec["z"] = float(hpInitVec["z"]) # to test if conversion to int works
+
+ if issubclass(cls.hyperparamsVectorType, HyperparamArray):
+ hpInitVec = list(hpInitVec.values())
+ else:
+ hpInitVec = dict(hpInitVec)
+ return hpInitVec
+
+ def genericSpecTest(self, cls):
+ hpInitVec = self.generateTestHPVec(cls)
+
+ a = cls(specsTestGridSpec)
+ b = a.transformHyperparams(hpInitVec)
+ self.assertionsOnHyperparamsVector(cls, b)
+
+ return a, b
+
+ def testGenericSpecs(self):
+ for cls in classezToTest:
+ with self.subTest(specClass=cls):
+ self.genericSpecTest(cls)
+
+ def testBeeColonyGridSpec(self):
+ a = BeeColonyGridSpec(specsTestGridSpec)
+ b = a.transformHyperparams(self.generateTestHPVec(BeeColonyGridSpec))
+ from icecream import ic
+
+ self.assertEqual(b["x"], resultVec["x"])
+ self.assertEqual(b["y"], resultVec["y"])
+ self.assertEqual(b["z"], resultVec["z"])
+
+ self.assertEqual(a.spec["x"], (0, 10))
+ self.assertEqual(a.spec["y"], uniformLimits)
+ self.assertEqual(a.spec["w"], (0, 10))
+
+ def testPySHACSpecGridSpec(self):
+ import pyshac
+
+ a, b = self.genericSpecTest(PySHACGridSpec)
+
+ self.assertIsInstance(a.spec["x"], pyshac.config.hyperparameters.UniformContinuousHyperParameter)
+ self.assertIsInstance(a.spec["y"], pyshac.config.hyperparameters.NormalContinuousHyperParameter)
+ self.assertIsInstance(a.spec["w"], pyshac.config.hyperparameters.UniformContinuousHyperParameter)
+ self.assertIsInstance(a.spec["z"], pyshac.config.hyperparameters.DiscreteHyperParameter)
+
+ #self.assertionsOnHyperparamsVector()
+
+
+optimizerTestGridSpec = {
+ "x": HyperparamDefinition(float, scipy.stats.uniform(loc=0, scale=10)),
+ "y": HyperparamDefinition(int, scipy.stats.norm(loc=0, scale=10)), # discrete
+ # "y": HyperparamDefinition(int, scipy.stats.uniform(loc=0, scale=10)), #discrete
+ "z": 3,
+}
+
+
+testStoredPointsToTestInjection = [(p, ackleyRosenbrockWithVariance(p)) for p in ({"x": pp / 5.0, "y": randint(-30, 30) / 3, "z": 3} for pp in range(50))]
+
+
+def prepareTestStor(cls):
+ stor = cls()
+ for p, loss in testStoredPointsToTestInjection:
+ stor.append(p, loss)
+ return stor
+
+
+class OptimizersTests(unittest.TestCase):
+ def assertOnParams(self, params):
+ self.assertIsInstance(params["x"], possibleTypesRemap[optimizerTestGridSpec["x"].type])
+ self.assertGreaterEqual(params["x"], 0.0)
+ self.assertLessEqual(params["x"], 10.0)
+ self.assertIsInstance(params["y"], possibleTypesRemap[optimizerTestGridSpec["y"].type])
+ self.assertIsInstance(params["z"], possibleTypesRemap[type(optimizerTestGridSpec["z"])])
+ self.assertEqual(params["z"], optimizerTestGridSpec["z"])
+
+ def ackleyRosenbrockWithVarianceAndAssert(self, params):
+ self.assertOnParams(params)
+ return ackleyRosenbrockWithVariance(params)
+
+ #@unittest.skip
+ def testOptimizers(self):
+ func = self.ackleyRosenbrockWithVarianceAndAssert
+ results = {}
+ #for optimizer in UniOpt:
+ #for optimizer in (UniOpt.BayTuneGP, UniOpt.PySOT, UniOpt.RoBOGP):
+ for optimizer in (UniOpt.BeeColony,):
+ print("optimizer: " + optimizer.__name__)
+ with self.subTest(optimizer=optimizer):
+ opt = optimizer(func, optimizerTestGridSpec, iters=100, jobs=1, pointsStorage=prepareTestStor(MemoryStorage))
+ res = opt()
+ results[optimizer] = (res, func(res))
+ results = OrderedDict(((k.__name__, v) for k, v in sorted(results.items(), key=lambda x: x[1][1][0])))
+ #if sys.version_info >= (3, 5):
+ # results=dict(results)
+ pprint(results)
+
+ @unittest.skip
+ def testOptimizer(self):
+ func = self.ackleyRosenbrockWithVarianceAndAssert
+ opt = UniOpt.GPyOpt(func, optimizerTestGridSpec, iters=100, jobs=1, pointsStorage=prepareTestStor(MemoryStorage))
+ res = opt()
+ self.assertOnParams(res)
+
+
+if __name__ == "__main__":
+ unittest.main()