diff --git a/.circleci/config.yml b/.circleci/config.yml index 805b2c32..9185d92b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,5 +1,54 @@ version: 2 jobs: + check-codestyle: + docker: + - image: hungpham2511/toppra-dep:0.0.2 + + working_directory: ~/repo + + steps: + - checkout + + # Download and cache dependencies + - restore_cache: + keys: + - v1-dependencies3-{{ checksum "requirements3.txt" }} + # fallback to using the latest cache if no exact match is found + - v1-dependencies3- + + - run: + name: install dependencies + command: | + python3 -m venv venv3 + . venv3/bin/activate + # main deps + pip install -r requirements3.txt + git clone https://github.com/hungpham2511/qpOASES && cd qpOASES/ && mkdir bin && make && cd interfaces/python/ && python setup.py install + # test deps + pip install cvxpy cvxopt pytest pytest-cov pandas tabulate pylint pydocstyle pycodestyle + + - save_cache: + paths: + - ./venv3 + key: v1-dependencies3-{{ checksum "requirements3.txt" }} + + # build install + - run: + name: build and install + command: | + . venv3/bin/activate + python setup.py develop + + - run: + name: check codestyle + command: | + . venv3/bin/activate + make lint + + - store_artifacts: + path: test-reports + destination: test-reports + test-python-3: docker: - image: hungpham2511/toppra-dep:0.0.2 @@ -19,31 +68,32 @@ jobs: - run: name: install dependencies command: | - python3 -m venv venv - . toppra-venv/bin/activate + python3 -m venv venv3 + . venv3/bin/activate # main deps - pip install -q -r requirements3.txt + pip install -r requirements3.txt + git clone https://github.com/hungpham2511/qpOASES && cd qpOASES/ && mkdir bin && make && cd interfaces/python/ && python setup.py install # test deps - pip install cvxpy pytest pytest-cov pandas tabulate + pip install cvxpy cvxopt pytest pytest-cov pandas tabulate pylint pydocstyle pycodestyle - save_cache: paths: - - ./toppra-venv + - ./venv3 key: v1-dependencies3-{{ checksum "requirements3.txt" }} # build install - run: name: build and install command: | - . venv/bin/activate + . venv3/bin/activate python setup.py develop - run: name: test command: | - . venv/bin/activate + . venv3/bin/activate export PYTHONPATH=$PYTHONPATH:`openrave-config --python-dir` - pytest -xv + pytest tests -W ignore::PendingDeprecationWarning - store_artifacts: path: test-reports @@ -73,17 +123,15 @@ jobs: . venv/bin/activate # main deps pip install -r requirements.txt - # test deps - pip install cvxpy==0.4.11 cvxopt pytest pytest-cov pandas tabulate - # qpOAses git clone https://github.com/hungpham2511/qpOASES && cd qpOASES/ && mkdir bin && make && cd interfaces/python/ && python setup.py install + # test deps + pip install cvxpy==0.4.11 cvxopt pytest pytest-cov pandas tabulate pylint pydocstyle pycodestyle pathlib2 - save_cache: paths: - ./venv key: v1-dependencies2-{{ checksum "requirements.txt" }} - # build install - run: name: build and install command: | @@ -95,7 +143,7 @@ jobs: command: | . venv/bin/activate export PYTHONPATH=$PYTHONPATH:`openrave-config --python-dir` - pytest tests + pytest tests -W ignore::PendingDeprecationWarning - store_artifacts: path: test-reports @@ -107,4 +155,5 @@ workflows: test: jobs: - test-python-2 - # - test-python-3 + - test-python-3 + - check-codestyle diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 00000000..69dac2da --- /dev/null +++ b/.pylintrc @@ -0,0 +1,560 @@ +[MASTER] + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code +extension-pkg-whitelist=cv2,openravepy + +# Add files or directories to the blacklist. They should be base names, not +# paths. +ignore=CVS + +# Add files or directories matching the regex patterns to the blacklist. The +# regex matches against base names, not paths. +ignore-patterns= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. +jobs=1 + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Specify a configuration file. +#rcfile= + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED +confidence= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once).You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use"--disable=all --enable=classes +# --disable=W" +disable=print-statement, + parameter-unpacking, + unpacking-in-except, + old-raise-syntax, + backtick, + long-suffix, + old-ne-operator, + old-octal-literal, + import-star-module-level, + non-ascii-bytes-literal, + invalid-unicode-literal, + raw-checker-failed, + bad-inline-option, + locally-disabled, + locally-enabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + apply-builtin, + basestring-builtin, + buffer-builtin, + cmp-builtin, + coerce-builtin, + execfile-builtin, + file-builtin, + long-builtin, + raw_input-builtin, + reduce-builtin, + standarderror-builtin, + unicode-builtin, + xrange-builtin, + coerce-method, + delslice-method, + getslice-method, + setslice-method, + no-absolute-import, + old-division, + dict-iter-method, + dict-view-method, + next-method-called, + metaclass-assignment, + indexing-exception, + raising-string, + reload-builtin, + oct-method, + hex-method, + nonzero-method, + cmp-method, + input-builtin, + round-builtin, + intern-builtin, + unichr-builtin, + map-builtin-not-iterating, + zip-builtin-not-iterating, + range-builtin-not-iterating, + filter-builtin-not-iterating, + using-cmp-argument, + eq-without-hash, + div-method, + idiv-method, + rdiv-method, + exception-message-attribute, + invalid-str-codec, + sys-max-int, + bad-python3-import, + deprecated-string-function, + deprecated-str-translate-call, + deprecated-itertools-function, + deprecated-types-field, + next-method-defined, + dict-items-not-iterating, + dict-keys-not-iterating, + dict-values-not-iterating, + deprecated-operator-function, + deprecated-urllib-function, + xreadlines-attribute, + deprecated-sys-function, + exception-escape, + comprehension-escape, + len-as-condition, + superfluous-parens, + no-member, + too-few-public-methods, + fixme,anomalous-backslash-in-string + + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable=c-extension-no-member + + +[REPORTS] + +# Python expression which should return a note less than 10 (10 is the highest +# note). You have access to the variables errors warning, statement which +# respectively contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details +#msg-template= + +# Set the output format. Available formats are text, parseable, colorized, json +# and msvs (visual studio).You can also give a reporter class, eg +# mypackage.mymodule.MyReporterClass. +output-format=parseable + +# Tells whether to display a full report or only the messages +reports=no + +# Activate the evaluation score. +score=yes + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=optparse.Values,sys.exit + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expectedly +# not used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,io,builtins + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis. It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + + +[SIMILARITIES] + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[BASIC] + +# Naming style matching correct argument names +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style +#argument-rgx= + +# Naming style matching correct attribute names +attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style +#attr-rgx= + +# Bad variable names which should always be refused, separated by a comma +bad-names=foo, + bar, + baz, + toto, + tutu, + tata + +# Naming style matching correct class attribute names +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style +#class-attribute-rgx= + +# Naming style matching correct class names +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming-style +#class-rgx= + +# Naming style matching correct constant names +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style + + +# Good variable names which should always be accepted, separated by a comma +good-names=i, + j, + k, + ex, + Run, + _, + logger, + dx, dy, dz, dq, q, + x, y, z, s, dt, F, g, a, b, c, + N, ds + + +# Include a hint for the correct naming format with invalid-name +include-naming-hint=no + +# Naming style matching correct inline iteration names +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style +#inlinevar-rgx= + +# Naming style matching correct method names +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style +#method-rgx= + +# Naming style matching correct module names +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +property-classes=abc.abstractproperty + +# Naming style matching correct variable names +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style +#variable-rgx= + + +[LOGGING] + +# Logging modules to check that the string format arguments are in logging +# function parameter format +logging-modules=logging + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=120 + +# Maximum number of lines in a module +max-module-lines=1000 + +# List of optional constructs for which whitespace checking is disabled. `dict- +# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. +# `trailing-comma` allows a space between comma and closing bracket: (a, ). +# `empty-line` allows space-only lines. +no-space-check=trailing-comma, + dict-separator + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes +max-spelling-suggestions=4 + +# Spelling dictionary name. Available dictionaries: none. To make it working +# install python-enchant package. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to indicated private dictionary in +# --spelling-private-dict-file option instead of raising a message. +spelling-store-unknown-words=no + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + + +[IMPORTS] + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=regsub, + TERMIOS, + Bastion, + rexec + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled) +ext-import-graph= + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled) +import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled) +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict, + _fields, + _replace, + _source, + _make + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# Maximum number of arguments for function / method +max-args=8 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in a if statement +max-bool-expr=5 + +# Maximum number of branch for function / method body +max-branches=12 + +# Maximum number of locals for function / method body +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body +max-returns=6 + +# Maximum number of statements in function / method body +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "Exception" +overgeneral-exceptions=Exception diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 00000000..5d4a92a8 --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,26 @@ +# .readthedocs.yml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Build documentation in the docs/ directory with Sphinx +sphinx: + configuration: docs/source/conf.py + +# Build documentation with MkDocs +#mkdocs: +# configuration: mkdocs.yml + +# Optionally build your docs in additional formats such as PDF and ePub +formats: all + +# Optionally set the version of Python and requirements required to build your docs +python: + version: 3.6 + install: + - requirements: requirements3.txt + - requirements: docs/requirements.txt + - method: setuptools + path: . diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..fed9fff7 --- /dev/null +++ b/Makefile @@ -0,0 +1,8 @@ +lint: + python -m pylint --rcfile=.pylintrc toppra + pycodestyle toppra --max-line-length=120 --ignore=E731,W503 + pydocstyle toppra + +doc: + echo "Buidling toppra docs" + sphinx-build -b html docs/source docs/build diff --git a/README.md b/README.md index b10ac811..52328c5d 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,9 @@ # TOPP-RA -[![Codacy Badge](https://api.codacy.com/project/badge/Grade/e77c2abbdaa04578b115d2911a146fcb)](https://app.codacy.com/app/hungpham2511/toppra?utm_source=github.com&utm_medium=referral&utm_content=hungpham2511/toppra&utm_campaign=Badge_Grade_Dashboard) [![Build Status](https://travis-ci.org/hungpham2511/toppra.svg?branch=master)](https://travis-ci.org/hungpham2511/toppra) [![Coverage Status](https://coveralls.io/repos/github/hungpham2511/toppra/badge.svg?branch=master)](https://coveralls.io/github/hungpham2511/toppra?branch=master) +[![Documentation Status](https://readthedocs.org/projects/toppra/badge/?version=latest)](https://toppra.readthedocs.io/en/latest/?badge=latest) +[![Codacy Badge](https://api.codacy.com/project/badge/Grade/e77c2abbdaa04578b115d2911a146fcb)](https://app.codacy.com/app/hungpham2511/toppra?utm_source=github.com&utm_medium=referral&utm_content=hungpham2511/toppra&utm_campaign=Badge_Grade_Dashboard) - -**Documentation and tutorials** are available at (https://hungpham2511.github.io/toppra/). +**Documentation and tutorials** are available at (https://toppra.readthedocs.io/en/latest/index.html). TOPP-RA is a library for computing the time-optimal path parametrization for robots subject to kinematic and dynamic constraints. In general, given the inputs: diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 00000000..e3382298 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,3 @@ +sphinx>=1.4 +ipykernel +nbsphinx diff --git a/docs/source/FAQs.rst b/docs/source/FAQs.rst new file mode 100644 index 00000000..62a67578 --- /dev/null +++ b/docs/source/FAQs.rst @@ -0,0 +1,32 @@ +Frequently Asked Questions +====================================== + + +1. How many gridpoints should I take? +--------------------------------------- + +A very important parameter in solving path parameterization instances +with `toppra` is the number of the gridpoints. Below is how to create +an instance with 1000 uniform gridpoints. + + +.. code-block:: python + :linenos: + + gridpoints = np.linspace(0, path.duration, 1000) # 1000 points + instance = algo.TOPPRA([pc_vel, pc_acc], path, gridpoints=gridpoints) + + +Generally, more gridpoints give you better solution quality, but also +increase solution time. Often the increase in solution time is linear, +that is if it takes 5ms to solve an instance with 100 gridpoints, then +most likely `toppra` will take 10ms to solve another instance which +has 200 gridpoints. + +By default, `toppra` select 100 gridpoints. + +As a general rule of thumb, the number of gridpoints should be at +least twice the number of waypoints in the given path. + + + diff --git a/docs/source/conf.py b/docs/source/conf.py index d546c114..689a8364 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -18,7 +18,8 @@ # import os import sys -sys.path.insert(0, os.path.abspath('../../toppra/')) +import toppra +# sys.path.insert(0, os.path.abspath('../../toppra/')) # -- General configuration ------------------------------------------------ @@ -67,14 +68,9 @@ author = 'Hung Pham' -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = '0.1' +version = 'v0.2.2' # The full version, including alpha/beta/rc tags. -release = '0.1' +release = 'v0.2.2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/docs/source/index.rst b/docs/source/index.rst index bc915b4a..6988d97b 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -50,6 +50,7 @@ You can find on this page :ref:`installation`, :ref:`tutorials`, some installation tutorials notes + FAQs modules Citing TOPP-RA! diff --git a/docs/source/medias/2_figure_1.png b/docs/source/medias/2_figure_1.png new file mode 100644 index 00000000..5af750a8 Binary files /dev/null and b/docs/source/medias/2_figure_1.png differ diff --git a/docs/source/medias/2_figure_2.png b/docs/source/medias/2_figure_2.png new file mode 100644 index 00000000..82a2d174 Binary files /dev/null and b/docs/source/medias/2_figure_2.png differ diff --git a/docs/source/medias/2_figure_2b.png b/docs/source/medias/2_figure_2b.png new file mode 100644 index 00000000..d164fdb1 Binary files /dev/null and b/docs/source/medias/2_figure_2b.png differ diff --git a/docs/source/modules.rst b/docs/source/modules.rst index 0528d358..b16995f0 100644 --- a/docs/source/modules.rst +++ b/docs/source/modules.rst @@ -6,51 +6,53 @@ Module references Interpolators ------------- +Interpolator base class +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: toppra.Interpolator + :members: -SplineInterplator +Spline Interplator ^^^^^^^^^^^^^^^^^^^ .. autoclass:: toppra.SplineInterpolator :members: -RaveTrajectoryWrapper -^^^^^^^^^^^^^^^^^^^^^^ +Rave Trajectory Wrapper +^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: toppra.RaveTrajectoryWrapper :members: -Interpolator (base class) -^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: toppra.Interpolator - :members: - Constraints ------------ -JointAccelerationConstraint +Joint Acceleration Constraint ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: toppra.constraint.JointAccelerationConstraint :members: + :special-members: -JointVelocityConstraint +Joint Velocity Constraint ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: toppra.constraint.JointVelocityConstraint :members: + :special-members: -SecondOrderConstraint +Second Order Constraint ^^^^^^^^^^^^^^^^^^^^^^^^^ -.. autoclass:: toppra.constraint.CanonicalLinearSecondOrderConstraint - :members: - -CanonicalLinearConstraint (base class) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. autoclass:: toppra.constraint.CanonicalLinearConstraint +.. autoclass:: toppra.constraint.SecondOrderConstraint :members: + :special-members: -RobustLinearConstraint +Robust Linear Constraint ^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: toppra.constraint.RobustLinearConstraint + :members: -.. autoclass:: toppra.constraint.RobustCanonicalLinearConstraint +Canonical Linear Constraint (base class) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: toppra.constraint.LinearConstraint :members: + :special-members: + Constraints (base class) ^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/docs/source/notes.rst b/docs/source/notes.rst index 7855326a..60299183 100644 --- a/docs/source/notes.rst +++ b/docs/source/notes.rst @@ -8,4 +8,16 @@ Notes Derivation of kinematical quantities ------------------------------------ -content +In `toppra` we deal with geometric paths, which are often represented +as a function :math:`\mathbf p(s)`. Here :math:`s` is the path +position and usually belongs to the interval :math:`[0, 1]`. Notice +that `toppra` can also handle arbitrary interval. + + +Important expression relating kinematic quantities: + +.. math:: + \mathbf q(t) &= \mathbf p(s(t)) \\ + \dot{\mathbf p}(t) &= \mathbf p'(s) \dot s(t) \\ + \ddot{\mathbf p}(t) &= \mathbf p'(s) \ddot s(t) + \mathbf p''(s) \dot s(t)^2 + diff --git a/examples/cartesian_accel.py b/examples/cartesian_accel.py index 08c6e650..fc87cb0b 100644 --- a/examples/cartesian_accel.py +++ b/examples/cartesian_accel.py @@ -64,7 +64,7 @@ def F(q): return F_q def g(q): return g_q - pc_cart_acc = constraint.CanonicalLinearSecondOrderConstraint( + pc_cart_acc = constraint.SecondOrderConstraint( inverse_dynamics, F, g, dof=7) # cartesin accel finishes diff --git a/examples/robust_kinematics.py b/examples/robust_kinematics.py index 98e5044d..4006e3ca 100644 --- a/examples/robust_kinematics.py +++ b/examples/robust_kinematics.py @@ -42,7 +42,7 @@ def main(): pc_vel = constraint.JointVelocityConstraint(vlim) pc_acc = constraint.JointAccelerationConstraint( alim, discretization_scheme=constraint.DiscretizationType.Interpolation) - robust_pc_acc = constraint.RobustCanonicalLinearConstraint( + robust_pc_acc = constraint.RobustLinearConstraint( pc_acc, [args.du, args.dx, args.dc], args.interpolation_scheme) instance = algo.TOPPRA([pc_vel, robust_pc_acc], path, gridpoints=np.linspace(0, 1, args.N + 1), diff --git a/examples/torque_limit.py b/examples/torque_limit.py new file mode 100644 index 00000000..a9842875 --- /dev/null +++ b/examples/torque_limit.py @@ -0,0 +1,126 @@ +import toppra as ta +import toppra.constraint as constraint +import toppra.algorithm as algo +import numpy as np +import matplotlib.pyplot as plt +import time +import openravepy as orpy + +ta.setup_logging("INFO") + + +def main(): + # openrave setup + env = orpy.Environment() + env.Load("robots/barrettwam.robot.xml") + env.SetViewer('qtosg') + robot = env.GetRobots()[0] + + robot.SetActiveDOFs(range(7)) + + # Parameters + N_samples = 5 + SEED = 9 + dof = 7 + + # Random waypoints used to obtain a random geometric path. Here, + # we use spline interpolation. + np.random.seed(SEED) + way_pts = np.random.randn(N_samples, dof) * 0.6 + path = ta.SplineInterpolator(np.linspace(0, 1, 5), way_pts) + + # Create velocity bounds, then velocity constraint object + vlim_ = robot.GetActiveDOFMaxVel() + vlim_[robot.GetActiveDOFIndices()] = [80., 80., 80., 80., 80., 80., 80.] + vlim = np.vstack((-vlim_, vlim_)).T + # Create acceleration bounds, then acceleration constraint object + alim_ = robot.GetActiveDOFMaxAccel() + alim_[robot.GetActiveDOFIndices()] = [80., 80., 80., 80., 80., 80., 80.] + alim = np.vstack((-alim_, alim_)).T + pc_vel = constraint.JointVelocityConstraint(vlim) + pc_acc = constraint.JointAccelerationConstraint( + alim, discretization_scheme=constraint.DiscretizationType.Interpolation) + + # torque limit + def inv_dyn(q, qd, qdd): + qdd_full = np.zeros(robot.GetDOF()) + active_dofs = robot.GetActiveDOFIndices() + with robot: + # Temporary remove vel/acc constraints + vlim = robot.GetDOFVelocityLimits() + alim = robot.GetDOFAccelerationLimits() + robot.SetDOFVelocityLimits(100 * vlim) + robot.SetDOFAccelerationLimits(100 * alim) + # Inverse dynamics + qdd_full[active_dofs] = qdd + robot.SetActiveDOFValues(q) + robot.SetActiveDOFVelocities(qd) + res = robot.ComputeInverseDynamics(qdd_full) + # Restore vel/acc constraints + robot.SetDOFVelocityLimits(vlim) + robot.SetDOFAccelerationLimits(alim) + return res[active_dofs] + + tau_max_ = robot.GetDOFTorqueLimits() * 4 + + tau_max = np.vstack((-tau_max_[robot.GetActiveDOFIndices()], tau_max_[robot.GetActiveDOFIndices()])).T + fs_coef = np.random.rand(dof) * 10 + pc_tau = constraint.JointTorqueConstraint( + inv_dyn, tau_max, fs_coef, discretization_scheme=constraint.DiscretizationType.Interpolation) + all_constraints = [pc_vel, pc_acc, pc_tau] + # all_constraints = pc_vel + + instance = algo.TOPPRA(all_constraints, path, solver_wrapper='seidel') + + # Retime the trajectory, only this step is necessary. + t0 = time.time() + jnt_traj, _ = instance.compute_trajectory(0, 0) + print("Parameterization time: {:} secs".format(time.time() - t0)) + ts_sample = np.linspace(0, jnt_traj.get_duration(), 100) + qs_sample = jnt_traj.eval(ts_sample) + qds_sample = jnt_traj.evald(ts_sample) + qdds_sample = jnt_traj.evaldd(ts_sample) + + torque = [] + for q_, qd_, qdd_ in zip(qs_sample, qds_sample, qdds_sample): + torque.append(inv_dyn(q_, qd_, qdd_) + fs_coef * np.sign(qd_)) + torque = np.array(torque) + + fig, axs = plt.subplots(dof, 1) + for i in range(0, robot.GetActiveDOF()): + axs[i].plot(ts_sample, torque[:, i]) + axs[i].plot([ts_sample[0], ts_sample[-1]], [tau_max[i], tau_max[i]], "--") + axs[i].plot([ts_sample[0], ts_sample[-1]], [-tau_max[i], -tau_max[i]], "--") + plt.xlabel("Time (s)") + plt.ylabel("Torque $(Nm)$") + plt.legend(loc='upper right') + plt.show() + + # preview path + for t in np.arange(0, jnt_traj.get_duration(), 0.01): + robot.SetActiveDOFValues(jnt_traj.eval(t)) + time.sleep(0.01) # 5x slow down + + # Compute the feasible sets and the controllable sets for viewing. + # Note that these steps are not necessary. + _, sd_vec, _ = instance.compute_parameterization(0, 0) + X = instance.compute_feasible_sets() + K = instance.compute_controllable_sets(0, 0) + + X = np.sqrt(X) + K = np.sqrt(K) + + plt.plot(X[:, 0], c='green', label="Feasible sets") + plt.plot(X[:, 1], c='green') + plt.plot(K[:, 0], '--', c='red', label="Controllable sets") + plt.plot(K[:, 1], '--', c='red') + plt.plot(sd_vec, label="Velocity profile") + plt.title("Path-position path-velocity plot") + plt.xlabel("Path position") + plt.ylabel("Path velocity square") + plt.legend() + plt.tight_layout() + plt.show() + +if __name__ == '__main__': + main() diff --git a/requirements3.txt b/requirements3.txt index 9f7b1625..08596ac2 100644 --- a/requirements3.txt +++ b/requirements3.txt @@ -1,9 +1,7 @@ numpy cython>=0.22.0 +PyYAML scipy>0.18 coloredlogs enum34 -cvxpy==1.0.21 -pytest -pytest-cov matplotlib diff --git a/tests/conftest.py b/tests/conftest.py index 484c2245..0f914bd4 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,13 +1,19 @@ import toppra import pytest -import openravepy as orpy +try: + import openravepy as orpy + IMPORT_OPENRAVE = True +except ImportError as err: + IMPORT_OPENRAVE = False +except SyntaxError as err: + IMPORT_OPENRAVE = False + @pytest.fixture(scope="session") def rave_env(): env = orpy.Environment() yield env env.Destroy() - orpy.RaveDestroy() def pytest_addoption(parser): diff --git a/tests/constraint/__init__.py b/tests/constraint/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/constraint/test_create_rave_torque.py b/tests/constraint/test_create_rave_torque.py index 3603494c..80d3b000 100644 --- a/tests/constraint/test_create_rave_torque.py +++ b/tests/constraint/test_create_rave_torque.py @@ -1,6 +1,7 @@ import pytest import toppra import numpy as np +from ..testing_utils import IMPORT_OPENRAVEPY, IMPORT_OPENRAVEPY_MSG @pytest.fixture(scope="module") @@ -11,8 +12,10 @@ def barret_robot(rave_env): yield robot +@pytest.mark.skipif(not IMPORT_OPENRAVEPY, reason=IMPORT_OPENRAVEPY_MSG) @pytest.mark.parametrize("dof", [3, 5, 7]) -def test_torque_bound_barret(barret_robot, dof): +def test_shape(barret_robot, dof): + """Check basic information.""" barret_robot.SetActiveDOFs(range(dof)) constraint = toppra.create_rave_torque_path_constraint(barret_robot) np.random.seed(0) @@ -25,6 +28,3 @@ def test_torque_bound_barret(barret_robot, dof): assert c.shape[1] == dof assert F.shape[1:] == (2 * dof, dof) assert g.shape[1] == 2 * dof - - - diff --git a/tests/constraint/test_joint_acceleration.py b/tests/constraint/test_joint_acceleration.py index eb45f34a..c0621f52 100644 --- a/tests/constraint/test_joint_acceleration.py +++ b/tests/constraint/test_joint_acceleration.py @@ -6,7 +6,7 @@ from toppra.constants import JACC_MAXU -@pytest.fixture(scope="class", params=[1, 2, 6], name='acceleration_pc_data') +@pytest.fixture(params=[1, 2, 6], name='acceleration_pc_data') def create_acceleration_pc_fixtures(request): """ Parameterized Acceleration path constraint. @@ -45,59 +45,49 @@ def create_acceleration_pc_fixtures(request): return data, pc_vel -class TestClass_JointAccelerationConstraint(object): +def test_constraint_type(acceleration_pc_data): + """ Syntactic correct. """ + data, pc = acceleration_pc_data + assert pc.get_constraint_type() == constraint.ConstraintType.CanonicalLinear - Tests: - ------ - 1. syntactic: the object return should have correct dimension. +def test_constraint_params(acceleration_pc_data): + """ Test constraint satisfaction with cvxpy. + """ + data, constraint = acceleration_pc_data + path, ss, alim = data - 2. constraint satisfaction: the `PathConstraint` returned should - be consistent with the data. + # An user of the class + a, b, c, F, g, ubound, xbound = constraint.compute_constraint_params(path, ss, 1.0) + assert xbound is None - """ - def test_constraint_type(self, acceleration_pc_data): - """ Syntactic correct. - """ - data, pc = acceleration_pc_data - assert pc.get_constraint_type() == constraint.ConstraintType.CanonicalLinear - - def test_constraint_params(self, acceleration_pc_data): - """ Test constraint satisfaction with cvxpy. - """ - data, constraint = acceleration_pc_data - path, ss, alim = data - - # An user of the class - a, b, c, F, g, ubound, xbound = constraint.compute_constraint_params(path, ss, 1.0) + N = ss.shape[0] - 1 + dof = path.dof + + ps = path.evald(ss) + pss = path.evaldd(ss) + + F_actual = np.vstack((np.eye(dof), - np.eye(dof))) + g_actual = np.hstack((alim[:, 1], - alim[:, 0])) + + npt.assert_allclose(F, F_actual) + npt.assert_allclose(g, g_actual) + for i in range(0, N + 1): + npt.assert_allclose(a[i], ps[i]) + npt.assert_allclose(b[i], pss[i]) + npt.assert_allclose(c[i], np.zeros_like(ps[i])) + assert ubound is None assert xbound is None - N = ss.shape[0] - 1 - dof = path.get_dof() - - ps = path.evald(ss) - pss = path.evaldd(ss) - - F_actual = np.vstack((np.eye(dof), - np.eye(dof))) - g_actual = np.hstack((alim[:, 1], - alim[:, 0])) - - npt.assert_allclose(F, F_actual) - npt.assert_allclose(g, g_actual) - for i in range(0, N + 1): - npt.assert_allclose(a[i], ps[i]) - npt.assert_allclose(b[i], pss[i]) - npt.assert_allclose(c[i], np.zeros_like(ps[i])) - assert ubound is None - assert xbound is None - - def test_wrong_dimension(self, acceleration_pc_data): - data, pc = acceleration_pc_data - path_wrongdim = ta.SplineInterpolator(np.linspace(0, 1, 5), np.random.randn(5, 10)) - with pytest.raises(ValueError) as e_info: - pc.compute_constraint_params(path_wrongdim, np.r_[0, 0.5, 1], 1.0) - assert e_info.value.args[0] == "Wrong dimension: constraint dof ({:d}) not equal to path dof ({:d})".format( - pc.get_dof(), 10 - ) + +def test_wrong_dimension(acceleration_pc_data): + data, pc = acceleration_pc_data + path_wrongdim = ta.SplineInterpolator(np.linspace(0, 1, 5), np.random.randn(5, 10)) + with pytest.raises(ValueError) as e_info: + pc.compute_constraint_params(path_wrongdim, np.r_[0, 0.5, 1], 1.0) + assert e_info.value.args[0] == "Wrong dimension: constraint dof ({:d}) not equal to path dof ({:d})".format( + pc.get_dof(), 10 + ) diff --git a/tests/constraint/test_robust_can_linear.py b/tests/constraint/test_robust_can_linear.py index 2f653b76..6f26c93f 100644 --- a/tests/constraint/test_robust_can_linear.py +++ b/tests/constraint/test_robust_can_linear.py @@ -22,7 +22,7 @@ def test_basic(accel_constraint, dist_scheme): "Basic initialization." cnst, path = accel_constraint - ro_cnst = toppra.constraint.RobustCanonicalLinearConstraint(cnst, [0.1, 2, .3], dist_scheme) + ro_cnst = toppra.constraint.RobustLinearConstraint(cnst, [0.1, 2, .3], dist_scheme) assert ro_cnst.get_constraint_type() == toppra.constraint.ConstraintType.CanonicalConic assert ro_cnst.get_dof() == 5 @@ -53,7 +53,7 @@ def test_negative_perb(accel_constraint): "If negative pertubations are given, raise ValueError" cnst, path = accel_constraint with pytest.raises(ValueError) as e_info: - ro_cnst = toppra.constraint.RobustCanonicalLinearConstraint(cnst, [-0.1, 2, .3]) + ro_cnst = toppra.constraint.RobustLinearConstraint(cnst, [-0.1, 2, .3]) assert e_info.value.args[0] == "Perturbation must be non-negative. Input {:}".format([-0.1, 2, .3]) diff --git a/tests/constraint/test_second_order.py b/tests/constraint/test_second_order.py index 6fde9ede..ffab6959 100644 --- a/tests/constraint/test_second_order.py +++ b/tests/constraint/test_second_order.py @@ -34,19 +34,20 @@ def g(q): def test_wrong_dimension(coefficients_functions): + """If the given path has wrong dimension, raise error.""" A, B, C, cnst_F, cnst_g, path = coefficients_functions def inv_dyn(q, qd, qdd): return A(q).dot(qdd) + np.dot(qd.T, np.dot(B(q), qd)) + C(q) - constraint = toppra.constraint.CanonicalLinearSecondOrderConstraint(inv_dyn, cnst_F, cnst_g, dof=2) + constraint = toppra.constraint.SecondOrderConstraint(inv_dyn, cnst_F, cnst_g, dof=2) path_wrongdim = toppra.SplineInterpolator(np.linspace(0, 1, 5), np.random.randn(5, 10)) - with pytest.raises(AssertionError) as e_info: + with pytest.raises(ValueError) as e_info: constraint.compute_constraint_params(path_wrongdim, np.r_[0, 0.5, 1], 1.0) assert e_info.value.args[0] == "Wrong dimension: constraint dof ({:d}) not equal to path dof ({:d})".format( - constraint.get_dof(), 10 + constraint.dof, 10 ) -def test_assemble_ABCFg(coefficients_functions): +def test_correctness(coefficients_functions): """ For randomly set A, B, C, F, g functions. The generated parameters must equal those given by equations. """ @@ -54,15 +55,16 @@ def test_assemble_ABCFg(coefficients_functions): def inv_dyn(q, qd, qdd): return A(q).dot(qdd) + np.dot(qd.T, np.dot(B(q), qd)) + C(q) - constraint = toppra.constraint.CanonicalLinearSecondOrderConstraint(inv_dyn, cnst_F, cnst_g, dof=2) - constraint.set_discretization_type(0) + constraint = toppra.constraint.SecondOrderConstraint( + inv_dyn, cnst_F, cnst_g, dof=2, + discretization_scheme=toppra.constraint.DiscretizationType.Collocation) a, b, c, F, g, _, _ = constraint.compute_constraint_params( - path, np.linspace(0, path.get_duration(), 10), 1.0) + path, np.linspace(0, path.duration, 10), 1.0) # Correct params - q_vec = path.eval(np.linspace(0, path.get_duration(), 10)) - qs_vec = path.evald(np.linspace(0, path.get_duration(), 10)) - qss_vec = path.evaldd(np.linspace(0, path.get_duration(), 10)) + q_vec = path.eval(np.linspace(0, path.duration, 10)) + qs_vec = path.evald(np.linspace(0, path.duration, 10)) + qss_vec = path.evaldd(np.linspace(0, path.duration, 10)) for i in range(10): ai_ = A(q_vec[i]).dot(qs_vec[i]) @@ -73,3 +75,74 @@ def inv_dyn(q, qd, qdd): np.testing.assert_allclose(ci_, c[i]) np.testing.assert_allclose(cnst_F(q_vec[i]), F[i]) np.testing.assert_allclose(cnst_g(q_vec[i]), g[i]) + + +def test_correctness_friction(coefficients_functions): + """ Same as the above test, but has frictional effect. + """ + # setup + A, B, C, cnst_F, cnst_g, path = coefficients_functions + def inv_dyn(q, qd, qdd): + return A(q).dot(qdd) + np.dot(qd.T, np.dot(B(q), qd)) + C(q) + def friction(q): + """Randomize with fixed input/output.""" + np.random.seed(int(np.sum(q) * 1000)) + return 2 + np.sin(q) + np.random.rand(len(q)) + + constraint = toppra.constraint.SecondOrderConstraint(inv_dyn, cnst_F, cnst_g, 2, friction=friction) + constraint.set_discretization_type(0) + a, b, c, F, g, _, _ = constraint.compute_constraint_params( + path, np.linspace(0, path.duration, 10), 1.0) + + # Correct params + p_vec = path.eval(np.linspace(0, path.duration, 10)) + ps_vec = path.evald(np.linspace(0, path.duration, 10)) + pss_vec = path.evaldd(np.linspace(0, path.duration, 10)) + + for i in range(10): + ai_ = A(p_vec[i]).dot(ps_vec[i]) + bi_ = A(p_vec[i]).dot(pss_vec[i]) + np.dot(ps_vec[i].T, B(p_vec[i]).dot(ps_vec[i])) + ci_ = C(p_vec[i]) + np.sign(ps_vec[i]) * friction(p_vec[i]) + np.testing.assert_allclose(ai_, a[i]) + np.testing.assert_allclose(bi_, b[i]) + np.testing.assert_allclose(ci_, c[i]) + np.testing.assert_allclose(cnst_F(p_vec[i]), F[i]) + + +def test_joint_force(coefficients_functions): + """ Same as the above test, but has frictional effect. + """ + # setup + A, B, C, cnst_F, cnst_g, path = coefficients_functions + def inv_dyn(q, qd, qdd): + return A(q).dot(qdd) + np.dot(qd.T, np.dot(B(q), qd)) + C(q) + def friction(q): + """Randomize with fixed input/output.""" + np.random.seed(int(np.sum(q) * 1000)) + return 2 + np.sin(q) + np.random.rand(len(q)) + taulim = np.random.randn(2, 2) + + constraint = toppra.constraint.SecondOrderConstraint.joint_torque_constraint( + inv_dyn, taulim, friction=friction) + constraint.set_discretization_type(0) + a, b, c, F, g, _, _ = constraint.compute_constraint_params( + path, np.linspace(0, path.duration, 10), 1.0) + + # Correct params + p_vec = path.eval(np.linspace(0, path.duration, 10)) + ps_vec = path.evald(np.linspace(0, path.duration, 10)) + pss_vec = path.evaldd(np.linspace(0, path.duration, 10)) + + dof = 2 + F_actual = np.vstack((np.eye(dof), - np.eye(dof))) + g_actual = np.hstack((taulim[:, 1], - taulim[:, 0])) + + for i in range(10): + ai_ = A(p_vec[i]).dot(ps_vec[i]) + bi_ = A(p_vec[i]).dot(pss_vec[i]) + np.dot(ps_vec[i].T, B(p_vec[i]).dot(ps_vec[i])) + ci_ = C(p_vec[i]) + np.sign(ps_vec[i]) * friction(p_vec[i]) + np.testing.assert_allclose(ai_, a[i]) + np.testing.assert_allclose(bi_, b[i]) + np.testing.assert_allclose(ci_, c[i]) + np.testing.assert_allclose(F_actual, F[i]) + np.testing.assert_allclose(g_actual, g[i]) diff --git a/tests/interpolators/__init__.py b/tests/interpolators/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/interpolators/test_poly_interpolator.py b/tests/interpolators/test_poly_interpolator.py index 80a44d1c..a62f3459 100644 --- a/tests/interpolators/test_poly_interpolator.py +++ b/tests/interpolators/test_poly_interpolator.py @@ -3,26 +3,25 @@ from toppra import PolynomialPath -class Test_PolynomialInterpolator(object): - """ Test suite for Polynomial Interpolator - """ - def test_scalar(self): - pi = PolynomialPath([1, 2, 3], s_start=0, s_end=2) # 1 + 2s + 3s^2 - assert pi.dof == 1 - npt.assert_allclose(pi.eval([0, 0.5, 1]), [1, 2.75, 6]) - npt.assert_allclose(pi.evald([0, 0.5, 1]), [2, 5, 8]) - npt.assert_allclose(pi.evaldd([0, 0.5, 1]), [6, 6, 6]) - npt.assert_allclose(pi.get_path_interval(), np.r_[0, 2]) +def test_scalar(): + """Scalar case.""" + pi = PolynomialPath([1, 2, 3], s_start=0, s_end=2) # 1 + 2s + 3s^2 + assert pi.dof == 1 + npt.assert_allclose(pi.eval([0, 0.5, 1]), [1, 2.75, 6]) + npt.assert_allclose(pi.evald([0, 0.5, 1]), [2, 5, 8]) + npt.assert_allclose(pi.evaldd([0, 0.5, 1]), [6, 6, 6]) + npt.assert_allclose(pi.get_path_interval(), np.r_[0, 2]) - def test_2_dof(self): - pi = PolynomialPath([[1, 2, 3], [-2, 3, 4, 5]]) - # [1 + 2s + 3s^2] - # [-2 + 3s + 4s^2 + 5s^3] - assert pi.dof == 2 - npt.assert_allclose( - pi.eval([0, 0.5, 1]), [[1, -2], [2.75, 1.125], [6, 10]]) - npt.assert_allclose( - pi.evald([0, 0.5, 1]), [[2, 3], [5, 10.75], [8, 26]]) - npt.assert_allclose(pi.evaldd([0, 0.5, 1]), [[6, 8], [6, 23], [6, 38]]) +def test_2_dof(): + """Polynomial path with 2dof.""" + pi = PolynomialPath([[1, 2, 3], [-2, 3, 4, 5]]) + # [1 + 2s + 3s^2] + # [-2 + 3s + 4s^2 + 5s^3] + assert pi.dof == 2 + npt.assert_allclose( + pi.eval([0, 0.5, 1]), [[1, -2], [2.75, 1.125], [6, 10]]) + npt.assert_allclose( + pi.evald([0, 0.5, 1]), [[2, 3], [5, 10.75], [8, 26]]) + npt.assert_allclose(pi.evaldd([0, 0.5, 1]), [[6, 8], [6, 23], [6, 38]]) diff --git a/tests/interpolators/test_rave_trajectory.py b/tests/interpolators/test_rave_trajectory.py index 1a9647b1..457e9292 100644 --- a/tests/interpolators/test_rave_trajectory.py +++ b/tests/interpolators/test_rave_trajectory.py @@ -1,20 +1,20 @@ import numpy as np import pytest -try: - import openravepy as orpy - FOUND_OPENRAVE = True -except ImportError: - FOUND_OPENRAVE = False import toppra +from ..testing_utils import IMPORT_OPENRAVEPY, IMPORT_OPENRAVEPY_MSG + +if IMPORT_OPENRAVEPY: + import openravepy as orpy @pytest.fixture(scope='module') def env(): - env = orpy.Environment() - env.Load('data/lab1.env.xml') - env.GetRobots()[0].SetActiveDOFs(range(7)) - yield env - env.Destroy() + """Simple openrave environment.""" + rave_env = orpy.Environment() + rave_env.Load('data/lab1.env.xml') + rave_env.GetRobots()[0].SetActiveDOFs(range(7)) + yield rave_env + rave_env.Destroy() # data for testing @@ -28,7 +28,7 @@ def env(): string_quad_dup = '\n\n\n\n\n\n\n\n\n1.709482686468753 -0.2850567549595687 0.05294741159654096 1.74744585982622 -4.152741431707534 0.660301373938459 -2.726042691060638 0 0 0 0 0 0 0 0 1 1.709482686468753 -0.2850567549595687 0.05294741159654096 1.74744585982622 -4.152741431707534 0.660301373938459 -2.726042691060638 0 0 0 0 0 0 0 0 1 1.709763065926199 -0.2960230333595688 0.04697191334676359 1.74658278016568 -4.147831128669768 0.6605886349647567 -2.726034280729737 0.02677420334659321 -1.047200000000001 -0.5706167159833193 -0.08241784382543958 0.4688983038355807 0.02743134322934739 0.0008031255635491938 0.02094399999999993 1 1.713259942901273 -0.3545920561229389 -0.02755416796682923 1.735818498716196 -4.086590102705014 0.6641713384832277 -2.725929387551221 0.09827243036942108 -1.047200000000001 -2.0944 -0.3025076680596648 1.72105124165692 0.1006844062753302 0.002947803899119033 0.0559291661223931 1 1.731857947846823 -0.4700201550683899 -0.2584103658577312 1.678569084150065 -3.760882074689892 0.6832258085803744 -2.725371517238171 0.2391814644258742 -1.047199999999999 -2.094399999999998 -0.7362617040666819 4.188799999999989 0.2450518792223936 0.00717454580886203 0.1102254573581465 1 1.890089312694608 -0.8307414940332825 -0.9798530437875164 1.191492484802069 -2.317996718830322 0.8453407676176505 -2.72062517044987 0.679532608570662 -1.047200000000001 -2.094400000000001 -2.091775119598266 4.188800000000009 0.6962109004678214 0.02038342661922174 0.344462699546307 1 2.048320677542392 -1.191462832998175 -1.701295721717301 0.7044158854540735 -0.8751113629707516 1.007455726654927 -2.715878823661571 0.2391814644258731 -1.0472 -2.094399999999998 -0.7362617040666843 4.188799999999991 0.2450518792223938 0.00717454580885946 0.3444626995463069 1 2.066918682487942 -1.306890931943626 -1.932151919608203 0.647166470887943 -0.5494033349556307 1.026510196752073 -2.71532095334852 0.09827243036942263 -1.047200000000002 -2.094399999999998 -0.3025076680596619 1.721051241656918 0.1006844062753305 0.002947803899121618 0.1102254573581464 1 2.070415559463016 -1.365459954706996 -2.006678000921796 0.6364021894384583 -0.488162308990876 1.030092900270544 -2.715216060170004 0.02677420334659206 -1.047199999999992 -0.5706167159833306 -0.08241784382544765 0.4688983038355894 0.02743134322933954 0.0008031255635466195 0.05592916612239294 1 2.070695938920462 -1.376426233106996 -2.012653499171573 0.6355391097779184 -0.4832520059531096 1.030380161296842 -2.715207649839103 1.020017403874363e-15 -3.552713678800501e-15 1.620925615952729e-14 1.908195823574488e-14 1.010302952408892e-14 1.830827156545922e-14 2.570209670094137e-15 0.02094400000000004 1 2.070695938920462 -1.376426233106996 -2.012653499171573 0.6355391097779183 -0.4832520059531091 1.030380161296842 -2.715207649839103 0 0 0 0 0 0 0 0 1 \n\n' -@pytest.mark.skipif(not FOUND_OPENRAVE, reason="Not found openrave installation") +@pytest.mark.skipif(not IMPORT_OPENRAVEPY, reason=IMPORT_OPENRAVEPY_MSG) @pytest.mark.parametrize("traj_string", [ pytest.param(string_cubic, id="cubic 3wp", marks=[]), pytest.param(string_cubic_5wp, id="cubic 5wp", marks=[]), diff --git a/tests/interpolators/test_spline_interpolator.py b/tests/interpolators/test_spline_interpolator.py index 4fa803e0..d9bba82d 100644 --- a/tests/interpolators/test_spline_interpolator.py +++ b/tests/interpolators/test_spline_interpolator.py @@ -2,118 +2,121 @@ import numpy.testing as npt import pytest from toppra import SplineInterpolator -try: - import openravepy as orpy - FOUND_OPENRAVE = True -except ImportError: - FOUND_OPENRAVE = False +from ..testing_utils import IMPORT_OPENRAVEPY, IMPORT_OPENRAVEPY_MSG @pytest.fixture(scope='module') -def robot_fixture(): - env = orpy.Environment() - env.Load("data/lab1.env.xml") - robot = env.GetRobots()[0] +def robot_fixture(rave_env): + import openravepy as orpy + rave_env.Reset() + rave_env.Load("data/lab1.env.xml") + robot = rave_env.GetRobots()[0] manipulator = robot.GetManipulators()[0] robot.SetActiveDOFs(manipulator.GetArmIndices()) # Generate IKFast if needed iktype = orpy.IkParameterization.Type.Transform6D - ikmodel = orpy.databases.inversekinematics.InverseKinematicsModel(robot, iktype=iktype) + ikmodel = orpy.databases.inversekinematics.InverseKinematicsModel( + robot, iktype=iktype) if not ikmodel.load(): - print('Generating IKFast {0}. It will take few minutes...'.format(iktype.name)) + print('Generating IKFast {0}. It will take few minutes...'.format( + iktype.name)) ikmodel.autogenerate() print('IKFast {0} has been successfully generated'.format(iktype.name)) - # env.SetViewer('qtosg') yield robot - env.Destroy() - - -class Test_SplineInterpolator(object): - """ Test suite for :class:`SplineInterpolator`. - """ - @pytest.mark.parametrize("sswp, wp, ss, path_interval", [ - [[0, 0.3, 0.5], [1,2,3], [0., 0.1, 0.2, 0.3, 0.5], [0, 0.5]], - [np.r_[0, 0.3, 0.5], [1,2,3], [0.], [0, 0.5]] - ]) - def test_scalar(self, sswp, wp, ss, path_interval): - "A scalar (dof=1) trajectory." - pi = SplineInterpolator(sswp, wp) # 1 + 2s + 3s^2 - assert pi.dof == 1 - - assert pi.eval(ss).shape == (len(ss), ) - assert pi.evald(ss).shape == (len(ss), ) - assert pi.evaldd(ss).shape == (len(ss), ) - assert pi.eval(0).shape == () - npt.assert_allclose(pi.get_path_interval(), path_interval) - - def test_5_dof(self): - pi = SplineInterpolator([0, 1], np.random.rand(2, 5)) - # [1 + 2s + 3s^2] - # [-2 + 3s + 4s^2 + 5s^3] - - ss = np.linspace(0, 1, 10) - assert pi.dof == 5 - assert pi.eval(ss).shape == (10, 5) - assert pi.evald(ss).shape == (10, 5) - assert pi.evaldd(ss).shape == (10, 5) - npt.assert_allclose(pi.get_path_interval(), np.r_[0, 1]) - - def test_1waypoints(self): - "The case where there is only one waypoint." - pi = SplineInterpolator([0], [[1, 2, 3]]) - assert pi.dof == 3 - npt.assert_allclose(pi.get_path_interval(), np.r_[0, 0]) - npt.assert_allclose(pi.eval(0), np.r_[1, 2, 3]) - npt.assert_allclose(pi.evald(0), np.r_[0, 0, 0]) - - npt.assert_allclose(pi.eval([0, 0]), [[1, 2, 3], [1, 2, 3]]) - npt.assert_allclose(pi.evald([0, 0]), [[0, 0, 0], [0, 0, 0]]) - - @pytest.mark.parametrize("xs,ys, yd", [ - ([0, 1], [[0, 1], [2, 3]], [2, 2]), - ([0, 2], [[0, 1], [0, 3]], [0, 1]), - ]) - def test_2waypoints(self, xs, ys, yd): - "There is only two waypoints. Linear interpolation is done between them." - pi = SplineInterpolator(xs, ys, bc_type='natural') - npt.assert_allclose(pi.get_path_interval(), xs) - npt.assert_allclose(pi.evald((xs[0] + xs[1]) / 2), yd) - npt.assert_allclose(pi.evaldd(0), np.zeros_like(ys[0])) - - @pytest.mark.skipif(not FOUND_OPENRAVE, reason="Not found openrave installation") - @pytest.mark.parametrize("ss_waypoints, waypoints", [ - [[0, 0.2, 0.5, 0.9], [[0.377, -0.369, 1.042, -0.265, -0.35 , -0.105, -0.74 ], - [ 1.131, 0.025, 0.778, 0.781, 0.543, -0.139, 0.222], - [-1.055, 1.721, -0.452, -0.268, 0.182, -0.935, 2.257], - [-0.274, -0.164, 1.492, 1.161, 1.958, -1.125, 0.567]]], - [[0, 0.2], [[0.377, -0.369, 1.042, -0.265, -0.35 , -0.105, -0.74 ], - [ 1.131, 0.025, 0.778, 0.781, 0.543, -0.139, 0.222]]], - [[0], [[0.377, -0.369, 1.042, -0.265, -0.35 , -0.105, -0.74 ]]] - ]) - def test_compute_rave_trajectory(self, robot_fixture, ss_waypoints, waypoints): - active_indices = robot_fixture.GetActiveDOFIndices() - path = SplineInterpolator(ss_waypoints, waypoints) - traj = path.compute_rave_trajectory(robot_fixture) - spec = traj.GetConfigurationSpecification() - - xs = np.linspace(0, path.get_duration(), 10) - - # Interpolate with spline - qs_spline = path.eval(xs) - qds_spline = path.evald(xs) - qdds_spline = path.evaldd(xs) - - # Interpolate with OpenRAVE - qs_rave = [] - qds_rave = [] - qdds_rave = [] - for t in xs: - data = traj.Sample(t) - qs_rave.append(spec.ExtractJointValues(data, robot_fixture, active_indices, 0)) - qds_rave.append(spec.ExtractJointValues(data, robot_fixture, active_indices, 1)) - qdds_rave.append(spec.ExtractJointValues(data, robot_fixture, active_indices, 2)) - - # Assert all close - npt.assert_allclose(qs_spline, qs_rave) - npt.assert_allclose(qds_spline, qds_rave) - npt.assert_allclose(qdds_spline, qdds_rave) + rave_env.Destroy() + + +@pytest.mark.parametrize("sswp, wp, ss, path_interval", [ + [[0, 0.3, 0.5], [1, 2, 3], [0., 0.1, 0.2, 0.3, 0.5], [0, 0.5]], + [np.r_[0, 0.3, 0.5], [1, 2, 3], [0.], [0, 0.5]] +]) +def test_scalar(sswp, wp, ss, path_interval): + "A scalar (dof=1) trajectory." + pi = SplineInterpolator(sswp, wp) # 1 + 2s + 3s^2 + assert pi.dof == 1 + + assert pi.eval(ss).shape == (len(ss), ) + assert pi.evald(ss).shape == (len(ss), ) + assert pi.evaldd(ss).shape == (len(ss), ) + assert pi.eval(0).shape == () + npt.assert_allclose(pi.get_path_interval(), path_interval) + + +def test_5_dof(): + pi = SplineInterpolator([0, 1], np.random.rand(2, 5)) + # [1 + 2s + 3s^2] + # [-2 + 3s + 4s^2 + 5s^3] + + ss = np.linspace(0, 1, 10) + assert pi.dof == 5 + assert pi.eval(ss).shape == (10, 5) + assert pi.evald(ss).shape == (10, 5) + assert pi.evaldd(ss).shape == (10, 5) + npt.assert_allclose(pi.get_path_interval(), np.r_[0, 1]) + + +def test_1waypoints(): + "The case where there is only one waypoint." + pi = SplineInterpolator([0], [[1, 2, 3]]) + assert pi.dof == 3 + npt.assert_allclose(pi.get_path_interval(), np.r_[0, 0]) + npt.assert_allclose(pi.eval(0), np.r_[1, 2, 3]) + npt.assert_allclose(pi.evald(0), np.r_[0, 0, 0]) + + npt.assert_allclose(pi.eval([0, 0]), [[1, 2, 3], [1, 2, 3]]) + npt.assert_allclose(pi.evald([0, 0]), [[0, 0, 0], [0, 0, 0]]) + + +@pytest.mark.parametrize("xs,ys, yd", [ + ([0, 1], [[0, 1], [2, 3]], [2, 2]), + ([0, 2], [[0, 1], [0, 3]], [0, 1]), +]) +def test_2waypoints(xs, ys, yd): + "There is only two waypoints. Linear interpolation is done between them." + pi = SplineInterpolator(xs, ys, bc_type='natural') + npt.assert_allclose(pi.get_path_interval(), xs) + npt.assert_allclose(pi.evald((xs[0] + xs[1]) / 2), yd) + npt.assert_allclose(pi.evaldd(0), np.zeros_like(ys[0])) + + +@pytest.mark.skipif(not IMPORT_OPENRAVEPY, reason=IMPORT_OPENRAVEPY_MSG) +@pytest.mark.parametrize("ss_waypoints, waypoints", [ + [[0, 0.2, 0.5, 0.9], [[0.377, -0.369, 1.042, -0.265, -0.35, -0.105, -0.74], + [1.131, 0.025, 0.778, 0.781, 0.543, -0.139, 0.222], + [-1.055, 1.721, -0.452, -0.268, 0.182, -0.935, 2.257], + [-0.274, -0.164, 1.492, 1.161, 1.958, -1.125, 0.567]]], + [[0, 0.2], [[0.377, -0.369, 1.042, -0.265, -0.35, -0.105, -0.74], + [1.131, 0.025, 0.778, 0.781, 0.543, -0.139, 0.222]]], + [[0], [[0.377, -0.369, 1.042, -0.265, -0.35, -0.105, -0.74]]] +]) +def test_compute_rave_trajectory(robot_fixture, ss_waypoints, waypoints): + """From the given spline trajectory, compute openrave trajectory.""" + active_indices = robot_fixture.GetActiveDOFIndices() + path = SplineInterpolator(ss_waypoints, waypoints) + traj = path.compute_rave_trajectory(robot_fixture) + spec = traj.GetConfigurationSpecification() + + xs = np.linspace(0, path.get_duration(), 10) + + # Interpolate with spline + qs_spline = path.eval(xs) + qds_spline = path.evald(xs) + qdds_spline = path.evaldd(xs) + + # Interpolate with OpenRAVE + qs_rave = [] + qds_rave = [] + qdds_rave = [] + for t in xs: + data = traj.Sample(t) + qs_rave.append(spec.ExtractJointValues( + data, robot_fixture, active_indices, 0)) + qds_rave.append(spec.ExtractJointValues( + data, robot_fixture, active_indices, 1)) + qdds_rave.append(spec.ExtractJointValues( + data, robot_fixture, active_indices, 2)) + + # Assert all close + npt.assert_allclose(qs_spline, qs_rave) + npt.assert_allclose(qds_spline, qds_rave) + npt.assert_allclose(qdds_spline, qdds_rave) diff --git a/tests/seidel/test_lp1d.py b/tests/lpsolvers/seidel/test_lp1d.py similarity index 91% rename from tests/seidel/test_lp1d.py rename to tests/lpsolvers/seidel/test_lp1d.py index af17dbaa..15931afd 100644 --- a/tests/seidel/test_lp1d.py +++ b/tests/lpsolvers/seidel/test_lp1d.py @@ -1,3 +1,4 @@ +"""This test suite tests the 1d seidel LP solver.""" import toppra.solverwrapper.cy_seidel_solverwrapper as seidel import numpy as np import pytest @@ -26,6 +27,7 @@ testdata, ids=testids) def test_correct(v, a, b, low, high, res_expected, optval_expected, optvar_expected, active_c_expected): + """Correct LP instances.""" data = seidel.solve_lp1d(v, a, b, low, high) res, optval, optvar, active_c = data @@ -35,7 +37,9 @@ def test_correct(v, a, b, low, high, res_expected, optval_expected, optvar_expec assert active_c == active_c_expected + def test_infeasible(): + """Correct output for infeasible instances.""" a = np.array([-1.0, 1.0]) b = np.array([0.0, 0.5]) data = seidel.solve_lp1d(np.r_[1.0, 2], a, b, -1, 1.00) diff --git a/tests/seidel/test_lp2d.py b/tests/lpsolvers/seidel/test_lp2d.py similarity index 61% rename from tests/seidel/test_lp2d.py rename to tests/lpsolvers/seidel/test_lp2d.py index eb429bc8..c01bedfd 100644 --- a/tests/seidel/test_lp2d.py +++ b/tests/lpsolvers/seidel/test_lp2d.py @@ -15,21 +15,22 @@ 1, 0.995, [-1, -0.5], [-1, 1]), ([1, 2, 0], (1.36866544, 1.28199038, -0.19515422, 0.97578149, 0.64391477, - -0.0811908 , -0.70696349, -1.01804875, 0.5742392 , 0.02939029), - ( 0.1969094 , 1.13910161, 0.10109674, 1.71246466, -0.45206747, - -0.51302219, -1.16558797, 0.19919171, -0.906885 , 0.94722345), + -0.0811908, -0.70696349, -1.01804875, 0.5742392, 0.02939029), + (0.1969094, 1.13910161, 0.10109674, 1.71246466, -0.45206747, + -0.51302219, -1.16558797, 0.19919171, -0.906885, 0.94722345), (-2.68926068, -1.59762444, -2.03337493, -2.04617298, -1.09241401, - -1.67319798, -1.9483617 , -1.57529407, -1.37795315, -3.47919232), [-100, -100], [100, 100], + -1.67319798, -1.9483617, -1.57529407, -1.37795315, -3.47919232), [-100, -100], [100, 100], [0, 1], 1, 2.5547484757095305, [-1.18181729266432, 1.8682828841869252], [3, 7]), ([1, 2, 0], (1.36866544, 1.28199038, -0.19515422, 0.97578149, 0.64391477, - -0.0811908 , -0.70696349, -1.01804875, 0.5742392 , 0.02939029), - ( 0.1969094 , 1.13910161, 0.10109674, 1.71246466, -0.45206747, - -0.51302219, -1.16558797, 0.19919171, -0.906885 , 0.94722345), + -0.0811908, -0.70696349, -1.01804875, 0.5742392, 0.02939029), + (0.1969094, 1.13910161, 0.10109674, 1.71246466, -0.45206747, + -0.51302219, -1.16558797, 0.19919171, -0.906885, 0.94722345), (-2.68926068, -1.59762444, -2.03337493, -2.04617298, -1.09241401, - -1.67319798, -1.9483617 , -1.57529407, -1.37795315, -3.47919232), [-100, -100], [100, 100], + -1.67319798, -1.9483617, -1.57529407, -1.37795315, -3.47919232), [-100, -100], [100, 100], [5, 9], 1, 2.5547484757095305, [-1.18181729266432, 1.8682828841869252], [3, 7]), - ([1, 2, 0], [-0.01, 0.01], [-1, 1], [0, 0.5], [-1, -1], [1, 1], [0, 1], 0, None, None, None) + ([1, 2, 0], [-0.01, 0.01], [-1, 1], [0, 0.5], + [-1, -1], [1, 1], [0, 1], 0, None, None, None) ] testids_correct = [ @@ -48,6 +49,7 @@ testdata_correct, ids=testids_correct) def test_correct(v, a, b, c, low, high, active_c, res_expected, optval_expected, optvar_expected, active_c_expected): + """Test a few correct instances.""" if a is None: a_np = None b_np = None @@ -57,7 +59,7 @@ def test_correct(v, a, b, c, low, high, active_c, res_expected, optval_expected, b_np = np.array(b, dtype=float) c_np = np.array(c, dtype=float) data = seidel.solve_lp2d(np.array(v, dtype=float), a_np, b_np, c_np, - np.array(low, dtype=float), np.array(high, dtype=float), np.array(active_c, dtype=int)) + np.array(low, dtype=float), np.array(high, dtype=float), np.array(active_c, dtype=int)) res, optval, optvar, active_c = data if res_expected == 1: @@ -68,13 +70,12 @@ def test_correct(v, a, b, c, low, high, active_c, res_expected, optval_expected, else: assert res == res_expected + @pytest.mark.parametrize("seed", range(100)) def test_random_constraints(seed): """Generate random problem data, solve with cvxpy and then compare! Generated problems can be feasible or infeasible. Both cases are - tested in this unit test. - - """ + tested in this unit test.""" # generate random problem data d = 50 np.random.seed(seed) @@ -98,33 +99,36 @@ def test_random_constraints(seed): low <= x, x <= high] obj = cvx.Maximize(v[0] * x[0] + v[1] * x[1] + v[2]) prob = cvx.Problem(obj, constraints) - prob.solve() + prob.solve(solver='CVXOPT') + # solve with the method to test and assert correctness data = seidel.solve_lp2d(v, a, b, c, low, high, active_c) res, optval, optvar, active_c = data if prob.status == "optimal": assert res == 1 - np.testing.assert_allclose(optval, prob.value) - np.testing.assert_allclose(optvar, np.asarray(x.value).flatten()) + np.testing.assert_allclose(optval, prob.value, atol=1e-6) + np.testing.assert_allclose(optvar, np.asarray(x.value).flatten(), atol=1e-6) elif prob.status == "infeasible": assert res == 0 else: - assert False, "Solve this LP with cvxpy returns status: {:}".format(prob.status) + assert False, "Solve this LP with cvxpy returns status: {:}".format( + prob.status) def test_err1(): """A case seidel solver fails to solve correctly. I discovered this - while working on toppra. - - """ - v = array([-1.e-09, 1.e+00, 0.e+00]) - a = array([-0.02020202, 0.02020202, 1.53515768, 4.3866269 , -3.9954173 , -1.53515768, -4.3866269 , 3.9954173 ]) - b = array([ -1. , 1. , -185.63664301, 156.27072783, -209.00954213, 185.63664301, -156.27072783, 209.00954213]) - c = array([ 0. , -0.0062788, -1. , -2. , -4. , -1. , -1. , -1. ]) + while working on toppra. """ + v = array([-1.e-09, 1.e+00, 0.e+00]) + a = array([-0.02020202, 0.02020202, 1.53515768, 4.3866269, - + 3.9954173, -1.53515768, -4.3866269, 3.9954173]) + b = array([-1., 1., -185.63664301, 156.27072783, - + 209.00954213, 185.63664301, -156.27072783, 209.00954213]) + c = array([0., -0.0062788, -1., -2., -4., -1., -1., -1.]) low = array([-100., 0.]) high = array([1.00000000e+02, 6.26434609e-02]) - data = seidel.solve_lp2d(v, a, b, c, low, high, np.array([0, 5])) # only break at this active constraints + data = seidel.solve_lp2d(v, a, b, c, low, high, np.array( + [0, 5])) # only break at this active constraints res, optval, optvar, active_c = data # solve with cvxpy @@ -142,38 +146,38 @@ def test_err1(): elif prob.status == "infeasible": assert res == 0 else: - assert False, "Solve this LP with cvxpy returns status: {:}".format(prob.status) + assert False, "Solve this LP with cvxpy returns status: {:}".format( + prob.status) def test_err2(): - """ A case that fails. Discovered on 31/10/2018. - """ - v=array([-1.e-09, 1.e+00, 0.e+00]) - a=array([-0.04281662, 0.04281662, 0. , 0. , 0. , - 0. , 0. , 0. , 0. , 0. , - 0. , 0. , 0. , 0. , 0. , - 0. , -1.27049648, 0.63168407, 0.54493736, -0.17238098, - 0.22457236, 0.6543007 , 1.24159883, 1.27049648, -0.63168407, - -0.54493736, 0.17238098, -0.22457236, -0.6543007 , -1.24159883]) - b=array([ -1. , 1. , -70.14534325, 35.42759706, - 31.23305996, -9.04430553, 12.51402852, 36.71562421, - 68.63795557, 70.14534325, -35.42759706, -31.23305996, - 9.04430553, -12.51402852, -36.71562421, -68.63795557, - -9.70931351, 4.71707751, 3.93518034, -1.41196299, - 1.69317949, 4.88204872, 9.47085771, 9.70931351, - -4.71707751, -3.93518034, 1.41196299, -1.69317949, - -4.88204872, -9.47085771]) - c=array([ 0. , -1.56875277, -50. , -50. , - -50. , -50. , -50. , -50. , - -50. , -50. , -50. , -50. , - -50. , -50. , -50. , -50. , - -50. , -50. , -50. , -50. , - -50. , -50. , -50. , -50. , - -50. , -50. , -50. , -50. , - -50. , -50. ]) - low=array([-1.e+08, 0.e+00]) - high=array([1.e+08, 1.e+08]) - active_c=np.array([ 0, -4]) + """A case that fails. Discovered on 31/10/2018.""" + v = array([-1.e-09, 1.e+00, 0.e+00]) + a = array([-0.04281662, 0.04281662, 0., 0., 0., + 0., 0., 0., 0., 0., + 0., 0., 0., 0., 0., + 0., -1.27049648, 0.63168407, 0.54493736, -0.17238098, + 0.22457236, 0.6543007, 1.24159883, 1.27049648, -0.63168407, + -0.54493736, 0.17238098, -0.22457236, -0.6543007, -1.24159883]) + b = array([-1., 1., -70.14534325, 35.42759706, + 31.23305996, -9.04430553, 12.51402852, 36.71562421, + 68.63795557, 70.14534325, -35.42759706, -31.23305996, + 9.04430553, -12.51402852, -36.71562421, -68.63795557, + -9.70931351, 4.71707751, 3.93518034, -1.41196299, + 1.69317949, 4.88204872, 9.47085771, 9.70931351, + -4.71707751, -3.93518034, 1.41196299, -1.69317949, + -4.88204872, -9.47085771]) + c = array([0., -1.56875277, -50., -50., + -50., -50., -50., -50., + -50., -50., -50., -50., + -50., -50., -50., -50., + -50., -50., -50., -50., + -50., -50., -50., -50., + -50., -50., -50., -50., + -50., -50.]) + low = array([-1.e+08, 0.e+00]) + high = array([1.e+08, 1.e+08]) + active_c = np.array([0, -4]) data = seidel.solve_lp2d( v, a, b, c, low, high, np.array([0, -4])) # only break at this active constraints @@ -194,6 +198,5 @@ def test_err2(): elif prob.status == "infeasible": assert res == 0 else: - assert False, "Solve this LP with cvxpy returns status: {:}".format(prob.status) - - + assert False, "Solve this LP with cvxpy returns status: {:}".format( + prob.status) diff --git a/tests/retime/__init__.py b/tests/retime/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/retime/conftest.py b/tests/retime/conftest.py index 7ffe0102..e7a6c18b 100644 --- a/tests/retime/conftest.py +++ b/tests/retime/conftest.py @@ -16,7 +16,7 @@ def basic_constraints(request): vel_cnst = constraint.JointVelocityConstraint(vlims) accl_cnst = constraint.JointAccelerationConstraint(alims, dtype_a) - robust_accl_cnst = constraint.RobustCanonicalLinearConstraint( + robust_accl_cnst = constraint.RobustLinearConstraint( accl_cnst, [1e-4, 1e-4, 5e-4], dtype_ra) yield vel_cnst, accl_cnst, robust_accl_cnst diff --git a/tests/retime_robustness/README.md b/tests/retime/robustness/README.md similarity index 100% rename from tests/retime_robustness/README.md rename to tests/retime/robustness/README.md diff --git a/tests/retime_robustness/problem_suite_1.yaml b/tests/retime/robustness/problem_suite_1.yaml similarity index 100% rename from tests/retime_robustness/problem_suite_1.yaml rename to tests/retime/robustness/problem_suite_1.yaml diff --git a/tests/retime_robustness/test_robustness_main.py b/tests/retime/robustness/test_robustness_main.py similarity index 89% rename from tests/retime_robustness/test_robustness_main.py rename to tests/retime/robustness/test_robustness_main.py index 8a4384d8..1d713db6 100644 --- a/tests/retime_robustness/test_robustness_main.py +++ b/tests/retime/robustness/test_robustness_main.py @@ -5,6 +5,10 @@ import pandas import tabulate import time +try: + import pathlib +except ImportError: + import pathlib2 as pathlib import toppra import toppra.constraint as constraint @@ -21,8 +25,9 @@ def test_robustness_main(request): visualize = request.config.getoption("--visualize") # parse problems from a configuration file parsed_problems = [] - with open("tests/retime_robustness/problem_suite_1.yaml", "r") as f: - problem_dict = yaml.load(f.read()) + path = pathlib.Path(__file__) + path = path / '../problem_suite_1.yaml' + problem_dict = yaml.load(path.resolve().read_text()) for key in problem_dict: if len(problem_dict[key]['ss_waypoints']) == 2: ss_waypoints = np.linspace(problem_dict[key]['ss_waypoints'][0], @@ -98,14 +103,15 @@ def test_robustness_main(request): parsed_problems_df.loc[row_index, "status"] = "SUCCESS" parsed_problems_df.loc[row_index, "duration"] = jnt_traj.get_duration() - parsed_problems_df.loc[row_index, "t_init"] = (t1 - t0) * 1e3 - parsed_problems_df.loc[row_index, "t_setup"] = (t2 - t1) * 1e3 - parsed_problems_df.loc[row_index, "t_solve"] = (t3 - t2) * 1e3 + parsed_problems_df.loc[row_index, "t_init(ms)"] = (t1 - t0) * 1e3 + parsed_problems_df.loc[row_index, "t_setup(ms)"] = (t2 - t1) * 1e3 + parsed_problems_df.loc[row_index, "t_solve(ms)"] = (t3 - t2) * 1e3 # get all rows with status different from NaN, then reports other columns. result_df = parsed_problems_df[parsed_problems_df["status"].notna()][ ["status", "duration", "desired_duration", "name", "solver_wrapper", - "nb_gridpoints", "problem_id", "t_init", "t_setup", "t_solve"]] - + "nb_gridpoints", "problem_id", "t_init(ms)", "t_setup(ms)", "t_solve(ms)"]] + result_df.to_csv('%s.result' % __file__) + print("Test summary\n") print(tabulate.tabulate(result_df, result_df.columns)) assert all_success, "Unable to solve some problems in the test suite" diff --git a/tests/retime/test_general_cases.py b/tests/retime/test_retime_basic.py similarity index 95% rename from tests/retime/test_general_cases.py rename to tests/retime/test_retime_basic.py index b5bc21fc..5782e8eb 100644 --- a/tests/retime/test_general_cases.py +++ b/tests/retime/test_retime_basic.py @@ -12,7 +12,7 @@ toppra.setup_logging(level="INFO") -@pytest.mark.parametrize("solver_wrapper", ["cvxpy", "qpoases", "hotqpoases", "seidel"]) +@pytest.mark.parametrize("solver_wrapper", ["qpoases", "hotqpoases", "seidel"]) def test_toppra_linear(basic_constraints, basic_path, solver_wrapper): """Solve some basic problem instances. @@ -35,7 +35,6 @@ def test_toppra_linear(basic_constraints, basic_path, solver_wrapper): @pytest.mark.parametrize("solver_wrapper", [ - "cvxpy,qpoases", "qpoases,hotqpoases", "qpoases,seidel", "hotqpoases,seidel" diff --git a/tests/retime/test_retime_scaling.py b/tests/retime/test_retime_scaling.py index 82f3a7ce..0299bc56 100644 --- a/tests/retime/test_retime_scaling.py +++ b/tests/retime/test_retime_scaling.py @@ -10,7 +10,7 @@ toppra.setup_logging(level="INFO") -@pytest.mark.parametrize("solver_wrapper", ["cvxpy", "hotqpoases", "seidel"]) +@pytest.mark.parametrize("solver_wrapper", ["hotqpoases", "seidel"]) def test_linear_case1(basic_constraints, basic_path, solver_wrapper): """A generic test case. diff --git a/tests/retime/test_retime_wconic_constraints.py b/tests/retime/test_retime_wconic_constraints.py index cfa732e1..4fec303f 100644 --- a/tests/retime/test_retime_wconic_constraints.py +++ b/tests/retime/test_retime_wconic_constraints.py @@ -14,7 +14,7 @@ def vel_accel_robustaccel(request): alims = np.array([[-1, 1], [-1, 2], [-1, 4]], dtype=float) vel_cnst = constraint.JointVelocityConstraint(vlims) accl_cnst = constraint.JointAccelerationConstraint(alims, dtype_a) - robust_accl_cnst = constraint.RobustCanonicalLinearConstraint( + robust_accl_cnst = constraint.RobustLinearConstraint( accl_cnst, [1e-4, 1e-4, 5e-4], dtype_ra) yield vel_cnst, accl_cnst, robust_accl_cnst @@ -27,7 +27,7 @@ def path(request): yield path -@pytest.mark.parametrize("solver_wrapper", ["cvxpy", "ecos"]) +@pytest.mark.parametrize("solver_wrapper", ["ecos"]) def test_toppra_conic(vel_accel_robustaccel, path, solver_wrapper): vel_c, acc_c, ro_acc_c = vel_accel_robustaccel acc_c.set_discretization_type(1) diff --git a/tests/retime/test_retime_with_openrave.py b/tests/retime/test_retime_with_openrave.py index bfc39c39..b0220e12 100644 --- a/tests/retime/test_retime_with_openrave.py +++ b/tests/retime/test_retime_with_openrave.py @@ -1,11 +1,10 @@ import pytest import toppra import numpy as np -try: + +from ..testing_utils import IMPORT_OPENRAVEPY, IMPORT_OPENRAVEPY_MSG +if IMPORT_OPENRAVEPY: import openravepy as orpy - FOUND_OPENRAVE = True -except ImportError: - FOUND_OPENRAVE = False @pytest.fixture(scope='module') @@ -28,7 +27,7 @@ def robot_fixture(): env.Destroy() -@pytest.mark.skipif(not FOUND_OPENRAVE, reason="Not found openrave installation") +@pytest.mark.skipif(not IMPORT_OPENRAVEPY, reason="Not found openrave installation") @pytest.mark.parametrize("seed", range(90, 100), ids=["Seed=" + str(i) for i in range(90, 100)]) @pytest.mark.parametrize("solver_wrapper", ["hotqpoases", "seidel"]) def test_retime_kinematics_ravetraj(robot_fixture, seed, solver_wrapper): @@ -63,7 +62,7 @@ def test_retime_kinematics_ravetraj(robot_fixture, seed, solver_wrapper): assert traj_new.GetDuration() < traj.GetDuration() + 1 # Should not be too far from the optimal value -@pytest.mark.skipif(not FOUND_OPENRAVE, reason="Not found openrave installation") +@pytest.mark.skipif(not IMPORT_OPENRAVEPY, reason="Not found openrave installation") @pytest.mark.parametrize("seed", range(100, 110), ids=["Seed="+str(i) for i in range(100, 110)]) @pytest.mark.parametrize("solver_wrapper", ["hotqpoases", "seidel", "ecos"]) def test_retime_kinematics_waypoints(robot_fixture, seed, solver_wrapper): diff --git a/tests/solverwrapper/conftest.py b/tests/solverwrapper/conftest.py index dd29e4ef..0d969f5f 100644 --- a/tests/solverwrapper/conftest.py +++ b/tests/solverwrapper/conftest.py @@ -4,6 +4,7 @@ import toppra import toppra.constraint as constraint + @pytest.fixture(params=[(0, 0)]) def vel_accel_robustaccel(request): "Velocity + Acceleration + Robust Acceleration constraint" @@ -12,7 +13,7 @@ def vel_accel_robustaccel(request): alims = np.array([[-1, 1], [-1, 2], [-1, 4]], dtype=float) vel_cnst = constraint.JointVelocityConstraint(vlims) accl_cnst = constraint.JointAccelerationConstraint(alims, dtype_a) - robust_accl_cnst = constraint.RobustCanonicalLinearConstraint( + robust_accl_cnst = constraint.RobustLinearConstraint( accl_cnst, [0.5, 0.1, 2.0], dtype_ra) yield vel_cnst, accl_cnst, robust_accl_cnst diff --git a/tests/solverwrapper/test_basic_can_linear.py b/tests/solverwrapper/test_basic_can_linear.py index c2defdce..3f23be11 100644 --- a/tests/solverwrapper/test_basic_can_linear.py +++ b/tests/solverwrapper/test_basic_can_linear.py @@ -15,7 +15,7 @@ toppra.setup_logging(level="INFO") -class RandomSecondOrderLinearConstraint(constraint.CanonicalLinearConstraint): +class RandomSecondOrderLinearConstraint(constraint.LinearConstraint): """A random Second-Order non-identical constraint. This contraint is defined solely for testing purposes. It accepts diff --git a/tests/solverwrapper/test_basic_conic_can_linear.py b/tests/solverwrapper/test_basic_conic_can_linear.py index baa238db..bb28b17a 100644 --- a/tests/solverwrapper/test_basic_conic_can_linear.py +++ b/tests/solverwrapper/test_basic_conic_can_linear.py @@ -89,7 +89,7 @@ def test_compare_accel_robust_accel(vel_accel_robustaccel, path, solver_name, i, """ vel_c, acc_c, _ = vel_accel_robustaccel - robust_acc_c = toppra.constraint.RobustCanonicalLinearConstraint( + robust_acc_c = toppra.constraint.RobustLinearConstraint( acc_c, [0, 0, 0], discretization_scheme=acc_c.get_discretization_type()) path_dist = np.linspace(0, path.get_duration(), 10) diff --git a/tests/testing_flags.py b/tests/testing_flags.py index 6acc34cf..1a0f2231 100644 --- a/tests/testing_flags.py +++ b/tests/testing_flags.py @@ -13,8 +13,6 @@ # Unable to find openrave FOUND_OPENRAVEPY = False -FOUND_MOSEK = False - # try: # import mosek # FOUND_MOSEK = True diff --git a/tests/testing_utils.py b/tests/testing_utils.py new file mode 100644 index 00000000..1efd2af8 --- /dev/null +++ b/tests/testing_utils.py @@ -0,0 +1,17 @@ +# testing flags +try: + import openravepy as orpy + IMPORT_OPENRAVEPY = True + IMPORT_OPENRAVEPY_MSG = "" +except Exception as err: + # Unable to find openrave + IMPORT_OPENRAVEPY = False + IMPORT_OPENRAVEPY_MSG = err.args[0] + +FOUND_MOSEK = False # permanently disable MOSEK during testing + +try: + import cvxpy + FOUND_CXPY = True +except ImportError: + FOUND_CXPY = False diff --git a/toppra/__init__.py b/toppra/__init__.py index c1d780e3..b0bf70b9 100644 --- a/toppra/__init__.py +++ b/toppra/__init__.py @@ -6,14 +6,12 @@ This package produces routines for creation and handling path constraints using the algorithm `TOPP-RA`. """ -# import constraint -# from .TOPP import * -from .interpolator import * -# from .constraints import * +from .interpolator import RaveTrajectoryWrapper, SplineInterpolator,\ + UnivariateSplineInterpolator, PolynomialPath, Interpolator from .utils import smooth_singularities, setup_logging -# from . import postprocess -# from .postprocess import compute_trajectory_gridpoints, compute_trajectory_uniform -from .planning_utils import retime_active_joints_kinematics, create_rave_torque_path_constraint -from . import constraint as constraint -from . import algorithm as algorithm -from . import solverwrapper as solverwrapper +from .planning_utils import retime_active_joints_kinematics,\ + create_rave_torque_path_constraint + +from . import constraint +from . import algorithm +from . import solverwrapper diff --git a/toppra/algorithm/algorithm.py b/toppra/algorithm/algorithm.py index 7a21eefd..6091d640 100644 --- a/toppra/algorithm/algorithm.py +++ b/toppra/algorithm/algorithm.py @@ -2,18 +2,19 @@ """ import numpy as np -try: - import openravepy as orpy - OPENRAVEPY_AVAILABLE = True -except ImportError: - OPENRAVEPY_AVAILABLE = False - from ..constants import TINY from ..interpolator import SplineInterpolator, Interpolator import logging logger = logging.getLogger(__name__) +try: + import openravepy as orpy +except ImportError as err: + logger.warning("Unable to import openravepy. Exception: %s" % err.args[0]) +except SyntaxError as err: + logger.warning("Unable to import openravepy. Exception: %s" % err.args[0]) + class ParameterizationAlgorithm(object): """Base class for all parameterization algorithms. diff --git a/toppra/algorithm/reachabilitybased/reachability_algorithm.py b/toppra/algorithm/reachabilitybased/reachability_algorithm.py index 299aeba0..b61d7164 100644 --- a/toppra/algorithm/reachabilitybased/reachability_algorithm.py +++ b/toppra/algorithm/reachabilitybased/reachability_algorithm.py @@ -188,7 +188,7 @@ def compute_controllable_sets(self, sdmin, sdmax): if K[i, 0] < 0: K[i, 0] = 0 if np.isnan(K[i]).any(): - logger.warn("A numerical error occurs: The controllable set at step " + logger.warning("A numerical error occurs: The controllable set at step " "[{:d} / {:d}] can't be computed.".format(i, self._N + 1)) return K if logger.isEnabledFor(logging.DEBUG): @@ -267,7 +267,7 @@ def compute_parameterization(self, sd_start, sd_end, return_data=False): assert sd_end >= 0 and sd_start >= 0, "Path velocities must be positive" K = self.compute_controllable_sets(sd_end, sd_end) if np.isnan(K).any(): - logger.warn("An error occurred when computing controllable velocities. " + logger.warning("An error occurred when computing controllable velocities. " "The path is not controllable, or is badly conditioned.") if return_data: return None, None, None, K @@ -276,7 +276,7 @@ def compute_parameterization(self, sd_start, sd_end, return_data=False): x_start = sd_start ** 2 if x_start + SMALL < K[0, 0] or K[0, 1] + SMALL < x_start: - logger.warn("The initial velocity is not controllable. {:f} not in ({:f}, {:f})".format( + logger.warning("The initial velocity is not controllable. {:f} not in ({:f}, {:f})".format( x_start, K[0, 0], K[0, 1] )) if return_data: @@ -308,13 +308,13 @@ def compute_parameterization(self, sd_start, sd_end, return_data=False): if tries < MAX_TRIES: xs[i] = max(xs[i] - TINY, 0.999 * xs[i]) # a slightly more aggressive reduction tries += 1 - logger.warn( + logger.warning( "A numerical error occurs: the instance is controllable " "but forward pass fails. Attempt to try again with x[i] " "slightly reduced.\n" "x[{:d}] reduced from {:.6f} to {:.6f}".format(i, xs[i] + SMALL, xs[i])) else: - logger.warn("Number of trials (to reduce xs[i]) reaches limits. " + logger.warning("Number of trials (to reduce xs[i]) reaches limits. " "Compute parametrization fails!") xs[i + 1:] = np.nan break diff --git a/toppra/constants.py b/toppra/constants.py index 9c25f57f..bacb30f2 100644 --- a/toppra/constants.py +++ b/toppra/constants.py @@ -13,13 +13,12 @@ # TODO: What are these constant used for? MAXU = 10000 # Max limit for `u` MAXX = 10000 # Max limit for `x` -MAXSD = 100 # square root of maxx +MAXSD = 100 # square root of maxx # constraint creation -JVEL_MAXSD = 1e8 # max sd when creating joint velocity constraints +JVEL_MAXSD = 1e8 # max sd when creating joint velocity constraints JACC_MAXU = 1e16 # max u when creating joint acceleration constraint - # solver wrapper related constants. # NOTE: Read the wrapper's documentation for more details. diff --git a/toppra/constraint/__init__.py b/toppra/constraint/__init__.py index 968e54f8..106bf6f0 100644 --- a/toppra/constraint/__init__.py +++ b/toppra/constraint/__init__.py @@ -1,10 +1,8 @@ -""" Module -""" +"""This module defines different dynamic constraints.""" from .constraint import ConstraintType, DiscretizationType, Constraint -from .joint_acceleration import JointAccelerationConstraint -from .joint_velocity import JointVelocityConstraint, JointVelocityConstraintVarying -from .can_linear_second_order import CanonicalLinearSecondOrderConstraint, canlinear_colloc_to_interpolate -from .canonical_conic import RobustCanonicalLinearConstraint -from .canonical_linear import CanonicalLinearConstraint - - +from .joint_torque import JointTorqueConstraint +from .linear_joint_acceleration import JointAccelerationConstraint +from .linear_joint_velocity import JointVelocityConstraint, JointVelocityConstraintVarying +from .linear_second_order import SecondOrderConstraint, canlinear_colloc_to_interpolate +from .conic_constraint import RobustLinearConstraint +from .linear_constraint import LinearConstraint diff --git a/toppra/constraint/can_linear_second_order.py b/toppra/constraint/can_linear_second_order.py deleted file mode 100644 index 7f0e8522..00000000 --- a/toppra/constraint/can_linear_second_order.py +++ /dev/null @@ -1,86 +0,0 @@ -from .canonical_linear import CanonicalLinearConstraint, canlinear_colloc_to_interpolate -from .constraint import DiscretizationType -import numpy as np - - -class CanonicalLinearSecondOrderConstraint(CanonicalLinearConstraint): - """A class to represent Canonical Linear Generalized Second-order constraints. - - Parameters - ---------- - inv_dyn: (array, array, array) -> array - The "inverse dynamics" function that receives joint position, velocity and - acceleration as inputs and ouputs the "joint torque". See notes for more - details. - cnst_F: array -> array - Coefficient function. See notes for more details. - cnst_g: array -> array - Coefficient function. See notes for more details. - dof: int, optional - Dimension of joint position vectors. Required. - - Notes - ----- - A Second Order Constraint can be given by the following formula: - - .. math:: - A(q) \ddot q + \dot q^\\top B(q) \dot q + C(q) = w, - - where w is a vector that satisfies the polyhedral constraint: - - .. math:: - F(q) w \\leq g(q). - - Notice that `inv_dyn(q, qd, qdd) = w` and that `cnsf_coeffs(q) = - F(q), g(q)`. - - To evaluate the constraint on a geometric path `p(s)`, multiple - calls to `inv_dyn` and `const_coeff` are made. Specifically one - can derive the second-order equation as follows - - .. math:: - A(q) p'(s) \ddot s + [A(q) p''(s) + p'(s)^\\top B(q) p'(s)] \dot s^2 + C(q) = w, - a(s) \ddot s + b(s) \dot s ^2 + c(s) = w - - To evaluate the coefficients a(s), b(s), c(s), inv_dyn is called - repeatedly with appropriate arguments. - """ - def __init__(self, inv_dyn, cnst_F, cnst_g, dof, discretization_scheme=DiscretizationType.Interpolation): - super(CanonicalLinearSecondOrderConstraint, self).__init__() - self.set_discretization_type(discretization_scheme) - self.inv_dyn = inv_dyn - self.cnst_F = cnst_F - self.cnst_g = cnst_g - self.dof = dof - self._format_string = " Kind: Generalized Second-order constraint\n" - self._format_string = " Dimension:\n" - F_ = cnst_F(np.zeros(dof)) - self._format_string += " F in R^({:d}, {:d})\n".format(*F_.shape) - - def compute_constraint_params(self, path, gridpoints, scaling): - assert path.get_dof() == self.get_dof(), ("Wrong dimension: constraint dof ({:d}) " - "not equal to path dof ({:d})".format( - self.get_dof(), path.get_dof())) - v_zero = np.zeros(path.get_dof()) - p = path.eval(gridpoints / scaling) - ps = path.evald(gridpoints / scaling) / scaling - pss = path.evaldd(gridpoints / scaling) / scaling ** 2 - - F = np.array(list(map(self.cnst_F, p))) - g = np.array(list(map(self.cnst_g, p))) - c = np.array( - [self.inv_dyn(p_, v_zero, v_zero) for p_ in p] - ) - a = np.array( - [self.inv_dyn(p_, v_zero, ps_) for p_, ps_ in zip(p, ps)] - ) - c - b = np.array( - [self.inv_dyn(p_, ps_, pss_) for p_, ps_, pss_ in zip(p, ps, pss)] - ) - c - - if self.discretization_type == DiscretizationType.Collocation: - return a, b, c, F, g, None, None - elif self.discretization_type == DiscretizationType.Interpolation: - return canlinear_colloc_to_interpolate(a, b, c, F, g, None, None, gridpoints) - else: - raise NotImplementedError("Other form of discretization not supported!") diff --git a/toppra/constraint/canonical_conic.py b/toppra/constraint/conic_constraint.py similarity index 94% rename from toppra/constraint/canonical_conic.py rename to toppra/constraint/conic_constraint.py index 93f25054..9d53ad46 100644 --- a/toppra/constraint/canonical_conic.py +++ b/toppra/constraint/conic_constraint.py @@ -3,7 +3,7 @@ import numpy as np -class CanonicalConicConstraint(Constraint): +class ConicConstraint(Constraint): """Base class for all canonical conic constraints. A canonical conic constraint is one with the following form @@ -43,7 +43,7 @@ def compute_constraint_params(self, path, gridpoints): raise NotImplementedError -class RobustCanonicalLinearConstraint(CanonicalConicConstraint): +class RobustLinearConstraint(ConicConstraint): """The simple canonical conic constraint. This constraint can be seen as a robustified version of a @@ -59,7 +59,7 @@ class RobustCanonicalLinearConstraint(CanonicalConicConstraint): Parameters ---------- - cnst: :class:`~toppra.constraint.CanonicalLinearConstraint` + cnst: :class:`~toppra.constraint.LinearConstraint` The base constraint to robustify. ellipsoid_axes_lengths: (3,)array Lengths of the axes of the perturbation ellipsoid. Must all be @@ -69,7 +69,7 @@ class RobustCanonicalLinearConstraint(CanonicalConicConstraint): """ def __init__(self, cnst, ellipsoid_axes_lengths, discretization_scheme=DiscretizationType.Collocation): - super(RobustCanonicalLinearConstraint, self).__init__() + super(RobustLinearConstraint, self).__init__() self.dof = cnst.get_dof() assert cnst.get_constraint_type() == ConstraintType.CanonicalLinear self.set_discretization_type(discretization_scheme) diff --git a/toppra/constraint/constraint.py b/toppra/constraint/constraint.py index 6a9384c4..7e93eb18 100644 --- a/toppra/constraint/constraint.py +++ b/toppra/constraint/constraint.py @@ -1,16 +1,18 @@ +"""Base class for all path parametrization contraints. """ from enum import Enum import logging logger = logging.getLogger(__name__) class ConstraintType(Enum): + """Type of path parametrization constraint.""" Unknown = -1 CanonicalLinear = 0 CanonicalConic = 1 class DiscretizationType(Enum): - """Enum to mark different Discretization Scheme for constraint. + """Enum to mark different Discretization Scheme for constraint. 1. `Collocation`: smaller problem size, but lower accuracy. 2. `Interplation`: larger problem size, but higher accuracy. @@ -23,22 +25,7 @@ class DiscretizationType(Enum): class Constraint(object): - """ Base class for all parameterization constraints. - - This class has two main functions: first, to tell its type and second, to produce - the parameters given the geometric path and the gridpoints. - - A derived class should implement the following method - - compute_constraint_params(): tuple - - Attributes - ---------- - constraint_type: ConstraintType - discretization_type: DiscretizationType - n_extra_vars: int - dof: int - Degree-of-freedom of the input path. - """ + """The base constraint class.""" def __repr__(self): string = self.__class__.__name__ + '(\n' string += ' Type: {:}'.format(self.constraint_type) + '\n' @@ -48,34 +35,52 @@ def __repr__(self): return string def get_dof(self): + """Return the degree of freedom of the constraint. + + TODO: It is unclear what is a dof of a constraint. Perharps remove this. + """ return self.dof def get_no_extra_vars(self): + """Return the number of extra variable required. + + TODO: This is not a property of a constraint. Rather it is specific to kinds of constraints. To be removed. + """ return self.n_extra_vars def get_constraint_type(self): + """Return the constraint type. + + TODO: Use property instead. + + """ return self.constraint_type def get_discretization_type(self): + """Return the discretization type. + + TODO: Use property instead. + + """ return self.discretization_type - def set_discretization_type(self, t): - """ Discretization type: Collocation or Interpolation. + def set_discretization_type(self, discretization_type): + """Discretization type: Collocation or Interpolation. Parameters ---------- - t: int, or DiscretizationType - If is 1, set to Interpolation. - If is 0, set to Collocation. + discretization_type: int or :class:`DiscretizationType` + Method to discretize this constraint. """ - if t == 0: + if discretization_type == 0: self.discretization_type = DiscretizationType.Collocation - elif t == 1: + elif discretization_type == 1: self.discretization_type = DiscretizationType.Interpolation - elif t == DiscretizationType.Collocation or t == DiscretizationType.Interpolation: - self.discretization_type = t + elif discretization_type == DiscretizationType.Collocation or discretization_type == DiscretizationType.Interpolation: + self.discretization_type = discretization_type else: - raise "Discretization type: {:} not implemented!".format(t) + raise "Discretization type: {:} not implemented!".format(discretization_type) - def compute_constraint_params(self, path, gridpoints, scaling): + def compute_constraint_params(self, path, gridpoints, scaling=1): + """Evaluate parameters of the constraint.""" raise NotImplementedError diff --git a/toppra/constraint/joint_torque.py b/toppra/constraint/joint_torque.py new file mode 100644 index 00000000..f372838d --- /dev/null +++ b/toppra/constraint/joint_torque.py @@ -0,0 +1,108 @@ +from .linear_constraint import LinearConstraint, canlinear_colloc_to_interpolate +from ..constraint import DiscretizationType +import numpy as np + + +class JointTorqueConstraint(LinearConstraint): + """Joint Torque Constraint. + + A joint torque constraint is given by + + .. math:: + A(q) \ddot q + \dot q^\\top B(q) \dot q + C(q) + D( \dot q )= w, + + where w is a vector that satisfies the polyhedral constraint: + + .. math:: + F(q) w \\leq g(q). + + Notice that `inv_dyn(q, qd, qdd) = w` and that `cnsf_coeffs(q) = + F(q), g(q)`. + + To evaluate the constraint on a geometric path `p(s)`, multiple + calls to `inv_dyn` and `const_coeff` are made. Specifically one + can derive the second-order equation as follows + + .. math:: + A(q) p'(s) \ddot s + [A(q) p''(s) + p'(s)^\\top B(q) p'(s)] \dot s^2 + C(q) + D( \dot q ) = w, + a(s) \ddot s + b(s) \dot s ^2 + c(s) = w + + To evaluate the coefficients a(s), b(s), c(s), inv_dyn is called + repeatedly with appropriate arguments. + + Parameters + ---------- + + inv_dyn: (array, array, array) -> array + The "inverse dynamics" function that receives joint position, velocity and + acceleration as inputs and ouputs the "joint torque". See notes for more + details. + + tau_lim: array + Shape (dof, 2). The lower and upper torque bounds of the + j-th joint are tau_lim[j, 0] and tau_lim[j, 1] respectively. + + fs_coef: array + Shape (dof). The coefficients of dry friction of the + joints. + + discretization_scheme: :class:`.DiscretizationType` + Can be either Collocation (0) or Interpolation + (1). Interpolation gives more accurate results with slightly + higher computational cost. + + """ + def __init__(self, inv_dyn, tau_lim, fs_coef, discretization_scheme=DiscretizationType.Collocation): + super(JointTorqueConstraint, self).__init__() + self.inv_dyn = inv_dyn + self.tau_lim = np.array(tau_lim, dtype=float) + self.fs_coef = np.array(fs_coef) + self.dof = self.tau_lim.shape[0] + self.set_discretization_type(discretization_scheme) + assert self.tau_lim.shape[1] == 2, "Wrong input shape." + self._format_string = " Torque limit: \n" + for i in range(self.tau_lim.shape[0]): + self._format_string += " J{:d}: {:}".format(i + 1, self.tau_lim[i]) + "\n" + self.identical = True + + def compute_constraint_params(self, path, gridpoints, scaling): + if path.get_dof() != self.get_dof(): + raise ValueError("Wrong dimension: constraint dof ({:d}) not equal to path dof ({:d})".format( + self.get_dof(), path.get_dof() + )) + v_zero = np.zeros(path.get_dof()) + p = path.eval(gridpoints / scaling) + ps = path.evald(gridpoints / scaling) / scaling + pss = path.evaldd(gridpoints / scaling) / scaling ** 2 + N = gridpoints.shape[0] - 1 + dof = path.get_dof() + I_dof = np.eye(dof) + F = np.zeros((dof * 2, dof)) + g = np.zeros(dof * 2) + g[0:dof] = self.tau_lim[:, 1] + g[dof:] = - self.tau_lim[:, 0] + F[0:dof, :] = I_dof + F[dof:, :] = -I_dof + + c = np.array( + [self.inv_dyn(p_, v_zero, v_zero) for p_ in p] + ) + a = np.array( + [self.inv_dyn(p_, v_zero, ps_) for p_, ps_ in zip(p, ps)] + ) - c + b = np.array( + [self.inv_dyn(p_, ps_, pss_) for p_, ps_, pss_ in zip(p, ps, pss)] + ) - c + + # dry friction + for i in range(0, dof): + c[:,i] += self.fs_coef[i] * np.sign(ps[:,i]) + + if self.discretization_type == DiscretizationType.Collocation: + return a, b, c, F, g, None, None + elif self.discretization_type == DiscretizationType.Interpolation: + return canlinear_colloc_to_interpolate(a, b, c, F, g, None, None, + gridpoints, identical=True) + else: + raise NotImplementedError("Other form of discretization not supported!") + diff --git a/toppra/constraint/canonical_linear.py b/toppra/constraint/linear_constraint.py similarity index 76% rename from toppra/constraint/canonical_linear.py rename to toppra/constraint/linear_constraint.py index 0ef1b286..ba6bf434 100644 --- a/toppra/constraint/canonical_linear.py +++ b/toppra/constraint/linear_constraint.py @@ -4,7 +4,7 @@ import numpy as np -class CanonicalLinearConstraint(Constraint): +class LinearConstraint(Constraint): """A core type of constraints. Also known as Second-order Constraint. @@ -14,7 +14,7 @@ class CanonicalLinearConstraint(Constraint): .. math:: \mathbf a_i u + \mathbf b_i x + \mathbf c_i &= v, \\\\ - \mathbf F_i v &\\leq \mathbf h_i, \\\\ + \mathbf F_i v &\\leq \mathbf g_i, \\\\ x^{bound}_{i, 0} \\leq x &\\leq x^{bound}_{i, 1}, \\\\ u^{bound}_{i, 0} \\leq u &\\leq u^{bound}_{i, 1}. @@ -22,16 +22,16 @@ class CanonicalLinearConstraint(Constraint): of :math:`i`, then we can consider the simpler constraint: .. math:: - \mathbf F v &\\leq \mathbf h, \\\\ + \mathbf F v &\\leq \mathbf w, \\\\ In this case, the returned value of :math:`F` by `compute_constraint_params` has shape (k, m) instead of (N, k, m), - :math:`h` shape (k) instead of (N, k) and the class attribute + :math:`w` shape (k) instead of (N, k) and the class attribute `identical` will be True. .. note:: - Derived classes of :class:`CanonicalLinearConstraint` should at + Derived classes of :class:`LinearConstraint` should at least implement the method :func:`compute_constraint_params`. @@ -48,33 +48,38 @@ def __init__(self): self.n_extra_vars = 0 self.identical = False - def compute_constraint_params(self, path, gridpoints, scaling): + def compute_constraint_params(self, path, gridpoints, scaling=1): """Compute numerical coefficients of the given constraint. Parameters ---------- - path: `Interpolator` + path: :class:`Interpolator` The geometric path. gridpoints: np.ndarray Shape (N+1,). Gridpoint use for discretizing path. scaling: float Numerical scaling. If is 1, no scaling is performed. + NOTE: This parameter is deprecated. For numerical + stability it is better to scale at the solver level, as + then one can perform solver-specific optimization, as well + as strictly more general scaling strategy. Another + advantage of scaling at the solver level is a cleaner API. Returns ------- - a: array, or None + a: np.ndarray or None Shape (N + 1, m). See notes. - b: array, or None + b: np.ndarray, or None Shape (N + 1, m). See notes. - c: array, or None + c: np.ndarray or None Shape (N + 1, m). See notes. - F: array, or None + F: np.ndarray or None Shape (N + 1, k, m). See notes. - g: array, or None + g: np.ndarray or None Shape (N + 1, k,). See notes - ubound: array, or None + ubound: np.ndarray, or None Shape (N + 1, 2). See notes. - xbound: array, or None + xbound: np.ndarray or None Shape (N + 1, 2). See notes. """ @@ -88,38 +93,40 @@ def canlinear_colloc_to_interpolate(a, b, c, F, g, xbound, ubound, gridpoints, i Parameters ---------- - a: array, or None + a: np.ndarray or None Shape (N + 1, m). See notes. - b: array, or None + b: np.ndarray or None Shape (N + 1, m). See notes. - c: array, or None + c: np.ndarray or None Shape (N + 1, m). See notes. - F: array, or None + F: np.ndarray or None Shape (N + 1, k, m). See notes. - g: array, or None + g: np.ndarray or None Shape (N + 1, k,). See notes - ubound: array, or None + ubound: np.ndarray, or None Shape (N + 1, 2). See notes. - xbound: array, or None + xbound: np.ndarray or None Shape (N + 1, 2). See notes. - gridpoints: array - (N+1,) array. The path discretization. + gridpoints: np.ndarray + Shape (N+1,). The path discretization. + identical: bool, optional + If True, matrices F and g are identical at all gridpoint. Returns ------- - a_intp: array, or None + a_intp: np.ndarray, or None Shape (N + 1, m). See notes. - b_intp: array, or None + b_intp: np.ndarray, or None Shape (N + 1, m). See notes. - c_intp: array, or None + c_intp: np.ndarray, or None Shape (N + 1, m). See notes. - F_intp: array, or None + F_intp: np.ndarray, or None Shape (N + 1, k, m). See notes. - g_intp: array, or None + g_intp: np.ndarray, or None Shape (N + 1, k,). See notes - ubound: array, or None + ubound: np.ndarray, or None Shape (N + 1, 2). See notes. - xbound: array, or None + xbound: np.ndarray, or None Shape (N + 1, 2). See notes. """ diff --git a/toppra/constraint/joint_acceleration.py b/toppra/constraint/linear_joint_acceleration.py similarity index 50% rename from toppra/constraint/joint_acceleration.py rename to toppra/constraint/linear_joint_acceleration.py index c9352528..b3a2361b 100644 --- a/toppra/constraint/joint_acceleration.py +++ b/toppra/constraint/linear_joint_acceleration.py @@ -1,42 +1,47 @@ -from .canonical_linear import CanonicalLinearConstraint, canlinear_colloc_to_interpolate -from ..constraint import DiscretizationType +"""Joint acceleration constraint.""" import numpy as np +from .linear_constraint import LinearConstraint, canlinear_colloc_to_interpolate +from ..constraint import DiscretizationType -class JointAccelerationConstraint(CanonicalLinearConstraint): - """Joint Acceleration Constraint. +class JointAccelerationConstraint(LinearConstraint): + """The Joint Acceleration Constraint class. A joint acceleration constraint is given by .. math :: - \ddot{\mathbf{q}}_{min} & \leq \ddot{\mathbf q} &\leq \ddot{\mathbf{q}}_{max} \\\\ - \ddot{\mathbf{q}}_{min} & \leq \mathbf{q}'(s_i) u_i + \mathbf{q}''(s_i) x_i &\leq \ddot{\mathbf{q}}_{max} + \ddot{\mathbf{q}}_{min} & \leq \ddot{\mathbf q} + &\leq \ddot{\mathbf{q}}_{max} \\\\ + \ddot{\mathbf{q}}_{min} & \leq \mathbf{q}'(s_i) u_i + \mathbf{q}''(s_i) x_i + &\leq \ddot{\mathbf{q}}_{max} where :math:`u_i, x_i` are respectively the path acceleration and path velocity square at :math:`s_i`. For more detail see :ref:`derivationKinematics`. Rearranging the above pair of vector inequalities into the form - required by :class:`CanonicalLinearConstraint`, we have: + required by :class:`LinearConstraint`, we have: - :code:`a[i]` := :math:`\mathbf q'(s_i)` - :code:`b[i]` := :math:`\mathbf q''(s_i)` - :code:`F` := :math:`[\mathbf{I}, -\mathbf I]^T` - :code:`h` := :math:`[\ddot{\mathbf{q}}_{max}^T, -\ddot{\mathbf{q}}_{min}^T]^T` + """ - Parameters - ---------- - alim: array - Shape (dof, 2). The lower and upper acceleration bounds of the - j-th joint are alim[j, 0] and alim[j, 1] respectively. + def __init__(self, alim, discretization_scheme=DiscretizationType.Collocation): + """Initialize the joint acceleration class. - discretization_scheme: :class:`.DiscretizationType` - Can be either Collocation (0) or Interpolation - (1). Interpolation gives more accurate results with slightly - higher computational cost. + Parameters + ---------- + alim: array + Shape (dof, 2). The lower and upper acceleration bounds of the + j-th joint are alim[j, 0] and alim[j, 1] respectively. - """ - def __init__(self, alim, discretization_scheme=DiscretizationType.Collocation): + discretization_scheme: :class:`.DiscretizationType` + Can be either Collocation (0) or Interpolation + (1). Interpolation gives more accurate results with slightly + higher computational cost. + """ super(JointAccelerationConstraint, self).__init__() self.alim = np.array(alim, dtype=float) self.dof = self.alim.shape[0] @@ -48,27 +53,23 @@ def __init__(self, alim, discretization_scheme=DiscretizationType.Collocation): self.identical = True def compute_constraint_params(self, path, gridpoints, scaling): - if path.get_dof() != self.get_dof(): + if path.dof != self.dof: raise ValueError("Wrong dimension: constraint dof ({:d}) not equal to path dof ({:d})".format( - self.get_dof(), path.get_dof() + self.dof, path.dof )) - ps = path.evald(gridpoints / scaling) / scaling - pss = path.evaldd(gridpoints / scaling) / scaling ** 2 - N = gridpoints.shape[0] - 1 - dof = path.get_dof() - I_dof = np.eye(dof) - F = np.zeros((dof * 2, dof)) - g = np.zeros(dof * 2) - ubound = np.zeros((N + 1, 2)) - g[0:dof] = self.alim[:, 1] - g[dof:] = - self.alim[:, 0] - F[0:dof, :] = I_dof - F[dof:, :] = -I_dof + ps_vec = path.evald(gridpoints / scaling) / scaling + pss_vec = path.evaldd(gridpoints / scaling) / scaling ** 2 + dof = path.dof + F_single = np.zeros((dof * 2, dof)) + g_single = np.zeros(dof * 2) + g_single[0:dof] = self.alim[:, 1] + g_single[dof:] = - self.alim[:, 0] + F_single[0:dof, :] = np.eye(dof) + F_single[dof:, :] = -np.eye(dof) if self.discretization_type == DiscretizationType.Collocation: - return ps, pss, np.zeros_like(ps), F, g, None, None + return ps_vec, pss_vec, np.zeros_like(ps_vec), F_single, g_single, None, None elif self.discretization_type == DiscretizationType.Interpolation: - return canlinear_colloc_to_interpolate(ps, pss, np.zeros_like(ps), F, g, None, None, + return canlinear_colloc_to_interpolate(ps_vec, pss_vec, np.zeros_like(ps_vec), F_single, g_single, None, None, gridpoints, identical=True) else: raise NotImplementedError("Other form of discretization not supported!") - diff --git a/toppra/constraint/joint_velocity.py b/toppra/constraint/linear_joint_velocity.py similarity index 94% rename from toppra/constraint/joint_velocity.py rename to toppra/constraint/linear_joint_velocity.py index 80805c54..e349b358 100644 --- a/toppra/constraint/joint_velocity.py +++ b/toppra/constraint/linear_joint_velocity.py @@ -1,10 +1,10 @@ -from .canonical_linear import CanonicalLinearConstraint +from .linear_constraint import LinearConstraint from toppra._CythonUtils import (_create_velocity_constraint, _create_velocity_constraint_varying) import numpy as np -class JointVelocityConstraint(CanonicalLinearConstraint): +class JointVelocityConstraint(LinearConstraint): """ A Joint Velocity Constraint class. Parameters @@ -39,7 +39,7 @@ def compute_constraint_params(self, path, gridpoints, scaling): return None, None, None, None, None, None, xbound -class JointVelocityConstraintVarying(CanonicalLinearConstraint): +class JointVelocityConstraintVarying(LinearConstraint): """A Joint Velocity Constraint class. This class handle velocity constraints that vary along the path. diff --git a/toppra/constraint/linear_second_order.py b/toppra/constraint/linear_second_order.py new file mode 100644 index 00000000..09327a39 --- /dev/null +++ b/toppra/constraint/linear_second_order.py @@ -0,0 +1,126 @@ +"""This module implements the general Second-Order constraints.""" +import numpy as np +from .linear_constraint import LinearConstraint, canlinear_colloc_to_interpolate +from .constraint import DiscretizationType + + +class SecondOrderConstraint(LinearConstraint): + """This class represents the linear generalized Second-order constraints. + + A `SecondOrderConstraint` is given by the following formula: + + .. math:: + A(\mathbf{q}) \ddot {\mathbf{q}} + \dot + {\mathbf{q}}^\\top B(\mathbf{q}) \dot {\mathbf{q}} + + C(\mathbf{q}) + sign(\dot {\mathbf{q}}) * D(\mathbf{q}) = w, + + where w is a vector that satisfies the polyhedral constraint: + + .. math:: + F(\mathbf{q}) w \\leq g(\mathbf{q}). + + The functions :math:`A, B, C, D` represent respectively the inertial, + Corriolis, gravitational and dry friction terms in a robot torque + bound constraint. + + To evaluate the constraint on a geometric path :math:`\mathbf{p}(s)`: + + .. math:: + + A(\mathbf{q}) \mathbf{p}'(s) \ddot s + [A(\mathbf{q}) \mathbf{p}''(s) + \mathbf{p}'(s)^\\top B(\mathbf{q}) + \mathbf{p}'(s)] \dot s^2 + C(\mathbf{q}) + sign(\mathbf{p}'(s)) * D(\mathbf{p}(s)) = w, \\\\ + a(s) \ddot s + b(s) \dot s ^2 + c(s) = w. + + where :math:`\mathbf{p}', \mathbf{p}''` denote respectively the + first and second derivatives of the path. + + + It is important to note that to evaluate the coefficients + :math:`a(s), b(s), c(s)`, it is not necessary to have the + functions :math:`A, B, C`. Rather, only the sum of the these 3 + functions--the inverse dynamic function--is necessary. + + """ + + def __init__(self, inv_dyn, cnst_F, cnst_g, dof, friction=None, discretization_scheme=1): + """Initialize the constraint. + + Parameters + ---------- + inv_dyn: [np.ndarray, np.ndarray, np.ndarray] -> np.ndarray + The "inverse dynamics" function that receives joint + position, velocity and acceleration as inputs and ouputs + the "joint torque". It is not necessary to supply each + individual component functions such as gravitational, + Coriolis, etc. + cnst_F: [np.ndarray] -> np.ndarray + Coefficient function. See notes for more details. + cnst_g: [np.ndarray] -> np.ndarray + Coefficient function. See notes for more details. + dof: int + The dimension of the joint position. + discretization_scheme: DiscretizationType + Type of discretization. + friction: [np.ndarray] -> np.ndarray + Dry friction function. + """ + super(SecondOrderConstraint, self).__init__() + self.set_discretization_type(discretization_scheme) + self.inv_dyn = inv_dyn + self.cnst_F = cnst_F + self.cnst_g = cnst_g + self.dof = dof + if friction is None: + self.friction = lambda s: np.zeros(self.dof) + else: + self.friction = friction + self._format_string = " Kind: Generalized Second-order constraint\n" + self._format_string = " Dimension:\n" + self._format_string += " F in R^({:d}, {:d})\n".format(*cnst_F(np.zeros(dof)).shape) + + @staticmethod + def joint_torque_constraint(inv_dyn, taulim, **kwargs): + """Initialize a Joint Torque constraint. + + Parameters + ---------- + inv_dyn: [np.ndarray, np.ndarray, np.ndarray] -> np.ndarray + Inverse dynamic function of the robot. + taulim: np.ndarray + Shape (N, 2). The i-th element contains the minimum and maximum joint torque limits + respectively. + + """ + dof = np.shape(taulim)[0] + F_aug = np.vstack((np.eye(dof), - np.eye(dof))) + g_aug = np.zeros(2 * dof) + g_aug[:dof] = taulim[:, 1] + g_aug[dof:] = - taulim[:, 0] + cnst_F = lambda _: F_aug + cnst_g = lambda _: g_aug + return SecondOrderConstraint(inv_dyn, cnst_F, cnst_g, dof, **kwargs) + + def compute_constraint_params(self, path, gridpoints, scaling): + if path.dof != self.dof: + raise ValueError("Wrong dimension: constraint dof ({:d}) not equal to path dof ({:d})".format( + self.dof, path.dof)) + v_zero = np.zeros(path.dof) + p_vec = path.eval(gridpoints / scaling) + ps_vec = path.evald(gridpoints / scaling) / scaling + pss_vec = path.evaldd(gridpoints / scaling) / scaling ** 2 + + F_vec = np.array(list(map(self.cnst_F, p_vec))) + g_vec = np.array(list(map(self.cnst_g, p_vec))) + c_vec = np.array([self.inv_dyn(_p, v_zero, v_zero) for _p in p_vec]) + a_vec = np.array([self.inv_dyn(_p, v_zero, _ps) for _p, _ps in zip(p_vec, ps_vec)]) - c_vec + b_vec = np.array([self.inv_dyn(_p, _ps, pss_) for _p, _ps, pss_ in zip(p_vec, ps_vec, pss_vec)]) - c_vec + + for i, (_p, _ps) in enumerate(zip(p_vec, ps_vec)): + c_vec[i] = c_vec[i] + np.sign(_ps) * self.friction(_p) + + if self.discretization_type == DiscretizationType.Collocation: + return a_vec, b_vec, c_vec, F_vec, g_vec, None, None + elif self.discretization_type == DiscretizationType.Interpolation: + return canlinear_colloc_to_interpolate(a_vec, b_vec, c_vec, F_vec, g_vec, None, None, gridpoints) + else: + raise NotImplementedError("Other form of discretization not supported!") diff --git a/toppra/interpolator.py b/toppra/interpolator.py index 3ce62f36..867fc70b 100644 --- a/toppra/interpolator.py +++ b/toppra/interpolator.py @@ -1,172 +1,193 @@ +"""Implementations of interpolators, which representgeometric paths. + """ -This module contains several interfaces for interpolated path. -Most are simple wrappers over scipy.interpolators. -""" +import logging +import warnings import numpy as np from scipy.interpolate import UnivariateSpline, CubicSpline, PPoly -import logging + logger = logging.getLogger(__name__) + try: import openravepy as orpy -except ImportError: - logger.warn("Openravepy not found!") +except ImportError as err: + logger.warning("Unable to import openravepy. Exception: %s", err.args[0]) +except SyntaxError as err: + logger.warning("Unable to import openravepy. Exception: %s", err.args[0]) -def normalize(ss): - """ Normalize the path discretization. +def normalize(gridpoints): + # type: (np.ndarray) -> np.ndarray + """Normalize the path discretization. Parameters ---------- - ss: ndarray - Path position array. + gridpoints: Path position array. Returns ------- - out: ndarray - Normalized path position array. + out: Normalized path position array. """ - return np.array(ss) / ss[-1] + return np.array(gridpoints) / gridpoints[-1] -def _find_left_index(ss_waypoints, s): - """Return the index of the largest entry in `ss_waypoints` that is - larger or equal `s`. +def _find_left_index(gridpoints, s): + # type: (np.ndarray, float) -> int + """Find the least lowest entry that is larger or equal. Parameters ---------- - ss_waypoints: ndarray + gridpoints: Array of path positions. - s: float - A single path position. + s: + A path position. Returns ------- - out: int + out: The desired index. + """ - for i in range(1, len(ss_waypoints)): - if ss_waypoints[i - 1] <= s and s < ss_waypoints[i]: + for i in range(1, len(gridpoints)): + if gridpoints[i - 1] <= s < gridpoints[i]: return i - 1 - return len(ss_waypoints) - 2 + return len(gridpoints) - 2 class Interpolator(object): - """ Abstract class for interpolators. - """ + """Abstract class for interpolators.""" + def __init__(self): - self.dof = None - # Note: do not use this attribute directly, use get_duration - # method instead. - self.duration = None + pass def get_dof(self): - """ Return the degree-of-freedom of the path. + # type: () -> int + """Return the degree-of-freedom of the path. Returns ------- - out: int + out: Degree-of-freedom of the path. """ - return self.dof - - def get_duration(self): - """ Return the duration of the path. + raise NotImplementedError - Returns - ------- - out: float - Path duration. + @property + def duration(self): + """Return the duration of the path.""" + raise NotImplementedError - """ + @property + def dof(self): + """Return the degrees-of-freedom of the path.""" raise NotImplementedError def get_path_interval(self): - """ Return the starting and ending path positions. + # type: () -> np.ndarray + """Return the starting and ending path positions. Returns ------- - out: ndarray - Shaped (2,). + out: + The starting and ending path positions. """ return np.array([self.s_start, self.s_end]) def eval(self, ss_sam): - """ Evaluate joint positions at specified path positions. + # type: (any[np.ndarray, float]) -> np.ndarray + """Evaluate joint positions at specified path positions. Parameters ---------- - ss_sam : array, or float - Shape (m, ). Path positions to sample at. + ss_sam : + Shape (m,) or float. The path positions to sample at. Returns ------- - out : array - Shape (m, dof). Evaluated values at position. - Shape (dof,) if `ss_sam` is a float. + out : + Shape (m, dof) if input is an array: evaluated values at positions. + Shape (dof,) if input is a float. """ raise NotImplementedError def evald(self, ss_sam): - """ Evaluate the first derivative of the geometric path. + # type: (any[np.ndarray, float]) -> np.ndarray + """Evaluate first derivative at specified path positions. Parameters ---------- - ss_sam : array - Shape (m, ). Positions to sample at. + ss_sam : + Shape (m,) or float. The path positions to sample at. Returns ------- - out : array - Shape (m, dof). Evaluated values at position. + out : + Shape (m, dof) if input is an array: evaluated values at positions. + Shape (dof,) if input is a float. """ raise NotImplementedError def evaldd(self, ss_sam): - """ Evaluate the 2nd derivative of the geometric path. + # type: (Union[np.ndarray, float]) -> np.ndarray + """Evaluate second derivative at specified path positions. Parameters ---------- - ss_sam : array - Shape (m, ). Positions to sample at. + ss_sam : + Shape (m,) or float. The path positions to sample at. Returns ------- - out : array - Shape (m, dof). Evaluated values at position. + out : + Shape (m, dof) if input is an array: evaluated values at positions. + Shape (dof,) if input is a float. """ raise NotImplementedError - def compute_rave_trajectory(self): + def compute_rave_trajectory(self, robot): + """Return the corresponding Openrave Trajectory.""" raise NotImplementedError def compute_ros_trajectory(self): + """Return the corresponding ROS trajectory.""" raise NotImplementedError class RaveTrajectoryWrapper(Interpolator): """An interpolator that wraps OpenRAVE's :class:`GenericTrajectory`. - Only trajectories using quadratic interpolation or cubic interpolation are supported. - The trajectory is represented as a piecewise polynomial. The polynomial could be - quadratic or cubic depending the interpolation method used by the input trajectory object. + Only trajectories using quadratic interpolation or cubic + interpolation are supported. The trajectory is represented as a + piecewise polynomial. The polynomial could be quadratic or cubic + depending the interpolation method used by the input trajectory + object. - Parameters - ---------- - traj: :class:`openravepy.GenericTrajectory` - An OpenRAVE joint trajectory. - robot: :class:`openravepy.GenericRobot` - An OpenRAVE robot. """ + def __init__(self, traj, robot): + # type: (orpy.RaveTrajectory, orpy.Robot) -> None + """Initialize the Trajectory Wrapper. + + Parameters + ---------- + traj: + An OpenRAVE joint trajectory. + robot: + An OpenRAVE robot. + """ + super(RaveTrajectoryWrapper, self).__init__() self.traj = traj #: init self.spec = traj.GetConfigurationSpecification() - self.dof = robot.GetActiveDOF() + self._dof = robot.GetActiveDOF() self._interpolation = self.spec.GetGroupFromName('joint').interpolation - assert self._interpolation == 'quadratic' or self._interpolation == "cubic", "This class only handles trajectories with quadratic or cubic interpolation" + if self._interpolation not in ['quadratic', 'cubic']: + raise ValueError( + "This class only handles trajectories with quadratic or cubic interpolation" + ) self._duration = traj.GetDuration() - all_waypoints = traj.GetWaypoints(0, traj.GetNumWaypoints()).reshape(traj.GetNumWaypoints(), -1) + all_waypoints = traj.GetWaypoints(0, traj.GetNumWaypoints()).reshape( + traj.GetNumWaypoints(), -1) valid_wp_indices = [0] self.ss_waypoints = [0.0] for i in range(1, traj.GetNumWaypoints()): @@ -180,8 +201,16 @@ def __init__(self, traj, robot): self.s_start = self.ss_waypoints[0] self.s_end = self.ss_waypoints[-1] - self.waypoints = np.array([self.spec.ExtractJointValues(all_waypoints[i], robot, robot.GetActiveDOFIndices()) for i in valid_wp_indices]) - self.waypoints_d = np.array([self.spec.ExtractJointValues(all_waypoints[i], robot, robot.GetActiveDOFIndices(), 1) for i in valid_wp_indices]) + self.waypoints = np.array([ + self.spec.ExtractJointValues(all_waypoints[i], robot, + robot.GetActiveDOFIndices()) + for i in valid_wp_indices + ]) + self.waypoints_d = np.array([ + self.spec.ExtractJointValues(all_waypoints[i], robot, + robot.GetActiveDOFIndices(), 1) + for i in valid_wp_indices + ]) # Degenerate case: there is only one waypoint. if self.n_waypoints == 1: @@ -194,7 +223,8 @@ def __init__(self, traj, robot): elif self._interpolation == "quadratic": self.waypoints_dd = [] for i in range(self.n_waypoints - 1): - qdd = ((self.waypoints_d[i + 1] - self.waypoints_d[i]) / (self.ss_waypoints[i + 1] - self.ss_waypoints[i])) + qdd = ((self.waypoints_d[i + 1] - self.waypoints_d[i]) / + (self.ss_waypoints[i + 1] - self.ss_waypoints[i])) self.waypoints_dd.append(qdd) self.waypoints_dd = np.array(self.waypoints_dd) @@ -202,16 +232,23 @@ def __init__(self, traj, robot): pp_coeffs = np.zeros((3, self.n_waypoints - 1, self.dof)) for idof in range(self.dof): for iseg in range(self.n_waypoints - 1): - pp_coeffs[:, iseg, idof] = [self.waypoints_dd[iseg, idof] / 2, - self.waypoints_d[iseg, idof], - self.waypoints[iseg, idof]] + pp_coeffs[:, iseg, idof] = [ + self.waypoints_dd[iseg, idof] / 2, + self.waypoints_d[iseg, idof], + self.waypoints[iseg, idof] + ] self.ppoly = PPoly(pp_coeffs, self.ss_waypoints) elif self._interpolation == "cubic": - self.waypoints_dd = np.array([self.spec.ExtractJointValues(all_waypoints[i], robot, robot.GetActiveDOFIndices(), 2) for i in valid_wp_indices]) + self.waypoints_dd = np.array([ + self.spec.ExtractJointValues(all_waypoints[i], robot, + robot.GetActiveDOFIndices(), 2) + for i in valid_wp_indices + ]) self.waypoints_ddd = [] for i in range(self.n_waypoints - 1): - qddd = ((self.waypoints_dd[i + 1] - self.waypoints_dd[i]) / (self.ss_waypoints[i + 1] - self.ss_waypoints[i])) + qddd = ((self.waypoints_dd[i + 1] - self.waypoints_dd[i]) / + (self.ss_waypoints[i + 1] - self.ss_waypoints[i])) self.waypoints_ddd.append(qddd) self.waypoints_ddd = np.array(self.waypoints_ddd) @@ -219,18 +256,36 @@ def __init__(self, traj, robot): pp_coeffs = np.zeros((4, self.n_waypoints - 1, self.dof)) for idof in range(self.dof): for iseg in range(self.n_waypoints - 1): - pp_coeffs[:, iseg, idof] = [self.waypoints_ddd[iseg, idof] / 6, - self.waypoints_dd[iseg, idof] / 2, - self.waypoints_d[iseg, idof], - self.waypoints[iseg, idof]] + pp_coeffs[:, iseg, idof] = [ + self.waypoints_ddd[iseg, idof] / 6, + self.waypoints_dd[iseg, idof] / 2, + self.waypoints_d[iseg, idof], + self.waypoints[iseg, idof] + ] self.ppoly = PPoly(pp_coeffs, self.ss_waypoints) self.ppoly_d = self.ppoly.derivative() self.ppoly_dd = self.ppoly.derivative(2) def get_duration(self): + warnings.warn( + "`get_duration` method is deprecated, use `duration` property instead", + PendingDeprecationWarning) + return self.duration + + def get_dof(self): # type: () -> int + warnings.warn("This method is deprecated, use the property instead", + PendingDeprecationWarning) + return self.dof + + @property + def duration(self): return self._duration + @property + def dof(self): + return self._dof + def eval(self, ss_sam): return self.ppoly(ss_sam) @@ -269,52 +324,68 @@ class SplineInterpolator(Interpolator): The path 2nd derivative. """ + def __init__(self, ss_waypoints, waypoints, bc_type='clamped'): super(SplineInterpolator, self).__init__() assert ss_waypoints[0] == 0, "First index must equals zero." self.ss_waypoints = np.array(ss_waypoints) self.waypoints = np.array(waypoints) self.bc_type = bc_type - if np.isscalar(waypoints[0]): - self.dof = 1 - else: - self.dof = self.waypoints[0].shape[0] - self.duration = ss_waypoints[-1] + assert self.ss_waypoints.shape[0] == self.waypoints.shape[0] self.s_start = self.ss_waypoints[0] self.s_end = self.ss_waypoints[-1] if len(ss_waypoints) == 1: - def f1(s): + + def _1dof_cspl(s): try: ret = np.zeros((len(s), self.dof)) ret[:, :] = self.waypoints[0] except TypeError: ret = self.waypoints[0] return ret - def f2(s): + + def _1dof_cspld(s): try: ret = np.zeros((len(s), self.dof)) except TypeError: ret = np.zeros(self.dof) return ret - self.cspl = f1 - self.cspld = f2 - self.cspldd = f2 + self.cspl = _1dof_cspl + self.cspld = _1dof_cspld + self.cspldd = _1dof_cspld else: self.cspl = CubicSpline(ss_waypoints, waypoints, bc_type=bc_type) self.cspld = self.cspl.derivative() self.cspldd = self.cspld.derivative() def get_waypoints(self): - """ Return the appropriate scaled waypoints. - """ + """Return the appropriate scaled waypoints.""" return self.ss_waypoints, self.waypoints def get_duration(self): + warnings.warn( + "get_duration is deprecated, use duration (property) instead", + PendingDeprecationWarning) return self.duration + @property + def duration(self): + return self.ss_waypoints[-1] - self.ss_waypoints[0] + + @property + def dof(self): + if np.isscalar(self.waypoints[0]): + return 1 + return self.waypoints[0].shape[0] + + def get_dof(self): # type: () -> int + warnings.warn("get_dof is deprecated, use dof (property) instead", + PendingDeprecationWarning) + return self.dof + def eval(self, ss_sam): return self.cspl(ss_sam) @@ -325,15 +396,17 @@ def evaldd(self, ss_sam): return self.cspldd(ss_sam) def compute_rave_trajectory(self, robot): - """ Compute an OpenRAVE trajectory equivalent to this trajectory. + """Compute an OpenRAVE trajectory equivalent to this trajectory. Parameters ---------- - robot: OpenRAVE.Robot + robot: + Openrave robot. Returns ------- - trajectory: OpenRAVE.Trajectory + trajectory: + Equivalent openrave trajectory. """ traj = orpy.RaveCreateTrajectory(robot.GetEnv(), "") @@ -349,7 +422,8 @@ def compute_rave_trajectory(self, robot): q = self.eval(0) qd = self.evald(0) qdd = self.evaldd(0) - traj.Insert(traj.GetNumWaypoints(), list(q) + list(qd) + list(qdd) + [0]) + traj.Insert(traj.GetNumWaypoints(), + list(q) + list(qd) + list(qdd) + [0]) else: qs = self.eval(self.ss_waypoints) qds = self.evald(self.ss_waypoints) @@ -373,6 +447,7 @@ class UnivariateSplineInterpolator(Interpolator): waypoints: ndarray The waypoints. """ + def __init__(self, ss_waypoints, waypoints): super(UnivariateSplineInterpolator, self).__init__() assert ss_waypoints[0] == 0, "First index must equals zero." @@ -389,29 +464,30 @@ def __init__(self, ss_waypoints, waypoints): self.uspl = [] for i in range(self.dof): - self.uspl.append(UnivariateSpline(self.ss_waypoints, self.waypoints[:, i])) + self.uspl.append( + UnivariateSpline(self.ss_waypoints, self.waypoints[:, i])) self.uspld = [spl.derivative() for spl in self.uspl] self.uspldd = [spl.derivative() for spl in self.uspld] def get_duration(self): return self.duration - def eval(self, ss): + def eval(self, ss_sam): data = [] for spl in self.uspl: - data.append(spl(ss)) + data.append(spl(ss_sam)) return np.array(data).T - def evald(self, ss): + def evald(self, ss_sam): data = [] for spl in self.uspld: - data.append(spl(ss)) + data.append(spl(ss_sam)) return np.array(data).T - def evaldd(self, ss): + def evaldd(self, ss_sam): data = [] for spl in self.uspldd: - data.append(spl(ss)) + data.append(spl(ss_sam)) return np.array(data).T @@ -429,54 +505,69 @@ class PolynomialPath(Interpolator): .. math:: coeff[i, 0] + coeff[i, 1] s + coeff[i, 2] s^2 + ... - - Parameters - ---------- - coeff : ndarray - Coefficients of the polynomials. - s_start: float, optional - Starting path position. - s_end: float, optional - Goal path position. """ - def __init__(self, coeff, s_start=0, s_end=1): + + def __init__(self, coeff, s_start=0.0, s_end=1.0): + # type: (np.ndarray, float, float) -> None + """Initialize the polynomial path. + + Parameters + ---------- + coeff + Coefficients of the polynomials. + s_start + Starting path position. + s_end + Ending path position. + """ + super(PolynomialPath, self).__init__() self.coeff = np.array(coeff) - self.s_start = s_start self.s_end = s_end - self.duration = s_end - s_start + self.s_start = s_start if np.isscalar(self.coeff[0]): - self.dof = 1 self.poly = [np.polynomial.Polynomial(self.coeff)] self.coeff = self.coeff.reshape(1, -1) else: - self.dof = self.coeff.shape[0] self.poly = [ np.polynomial.Polynomial(self.coeff[i]) - for i in range(self.dof)] + for i in range(self.dof) + ] self.polyd = [poly.deriv() for poly in self.poly] self.polydd = [poly.deriv() for poly in self.polyd] + @property + def dof(self): + return self.coeff.shape[0] + + @property + def duration(self): + return self.s_end - self.s_start + def get_duration(self): + warnings.warn("get_duration is deprecated, use duration", + PendingDeprecationWarning) return self.duration + def get_dof(self): + warnings.warn("get_dof is deprecated, use dof", + PendingDeprecationWarning) + return self.dof + def eval(self, ss_sam): res = [poly(np.array(ss_sam)) for poly in self.poly] if self.dof == 1: return np.array(res).flatten() - else: - return np.array(res).T + return np.array(res).T def evald(self, ss_sam): res = [poly(np.array(ss_sam)) for poly in self.polyd] if self.dof == 1: return np.array(res).flatten() - else: - return np.array(res).T + return np.array(res).T def evaldd(self, ss_sam): res = [poly(np.array(ss_sam)) for poly in self.polydd] if self.dof == 1: return np.array(res).flatten() - else: - return np.array(res).T + return np.array(res).T diff --git a/toppra/planning_utils.py b/toppra/planning_utils.py index 6de51354..5c11b431 100644 --- a/toppra/planning_utils.py +++ b/toppra/planning_utils.py @@ -1,6 +1,6 @@ from .interpolator import RaveTrajectoryWrapper, SplineInterpolator from .constraint import JointAccelerationConstraint, JointVelocityConstraint, \ - DiscretizationType, CanonicalLinearSecondOrderConstraint + DiscretizationType, SecondOrderConstraint from .algorithm import TOPPRA import numpy as np import logging @@ -9,8 +9,15 @@ logger = logging.getLogger(__name__) -def retime_active_joints_kinematics(traj, robot, output_interpolator=False, vmult=1.0, amult=1.0, - N=100, use_ravewrapper=False, additional_constraints=[], solver_wrapper='hotqpoases'): +def retime_active_joints_kinematics(traj, + robot, + output_interpolator=False, + vmult=1.0, + amult=1.0, + N=100, + use_ravewrapper=False, + additional_constraints=[], + solver_wrapper='hotqpoases'): """ Retime a trajectory wrt velocity and acceleration constraints. Parameters @@ -50,7 +57,8 @@ def retime_active_joints_kinematics(traj, robot, output_interpolator=False, vmul ss_waypoints = np.linspace(0, 1, len(traj)) path = SplineInterpolator(ss_waypoints, traj, bc_type='natural') elif use_ravewrapper: - logger.warn("Use RaveTrajectoryWrapper. This might not work properly!") + logger.warning( + "Use RaveTrajectoryWrapper. This might not work properly!") path = RaveTrajectoryWrapper(traj, robot) elif isinstance(traj, SplineInterpolator): path = traj @@ -69,8 +77,8 @@ def retime_active_joints_kinematics(traj, robot, output_interpolator=False, vmul else: ss_waypoints.append(ss_waypoints[-1] + dt) waypoints.append( - spec.ExtractJointValues( - data, robot, robot.GetActiveDOFIndices())) + spec.ExtractJointValues(data, robot, + robot.GetActiveDOFIndices())) path = SplineInterpolator(ss_waypoints, waypoints) vmax = robot.GetActiveDOFMaxVel() * vmult @@ -82,8 +90,7 @@ def retime_active_joints_kinematics(traj, robot, output_interpolator=False, vmul pc_acc = JointAccelerationConstraint( alim, discretization_scheme=DiscretizationType.Interpolation) logger.debug( - "Number of constraints {:d}".format( - 2 + len(additional_constraints))) + "Number of constraints %d", 2 + len(additional_constraints)) logger.debug(str(pc_vel)) logger.debug(str(pc_acc)) for _c in additional_constraints: @@ -95,20 +102,21 @@ def retime_active_joints_kinematics(traj, robot, output_interpolator=False, vmul for i in range(len(path.ss_waypoints) - 1): Ni = int( np.ceil((path.ss_waypoints[i + 1] - path.ss_waypoints[i]) / ds)) - gridpoints.extend( - path.ss_waypoints[i] - + np.linspace(0, 1, Ni + 1)[1:] * (path.ss_waypoints[i + 1] - path.ss_waypoints[i])) - instance = TOPPRA([pc_vel, pc_acc] + additional_constraints, path, - gridpoints=gridpoints, solver_wrapper=solver_wrapper) + gridpoints.extend(path.ss_waypoints[i] + + np.linspace(0, 1, Ni + 1)[1:] * + (path.ss_waypoints[i + 1] - path.ss_waypoints[i])) + instance = TOPPRA([pc_vel, pc_acc] + additional_constraints, + path, + gridpoints=gridpoints, + solver_wrapper=solver_wrapper) _t1 = time.time() traj_ra, aux_traj = instance.compute_trajectory(0, 0) _t2 = time.time() logger.debug("t_setup={:.5f}ms, t_solve={:.5f}ms, t_total={:.5f}ms".format( - (_t1 - _t0) * 1e3, (_t2 - _t1) * 1e3, (_t2 - _t0) * 1e3 - )) + (_t1 - _t0) * 1e3, (_t2 - _t1) * 1e3, (_t2 - _t0) * 1e3)) if traj_ra is None: - logger.warn("Retime fails.") + logger.warning("Retime fails.") traj_rave = None else: logger.debug("Retime successes!") @@ -155,15 +163,21 @@ def inv_dyn(q, qd, qdd): robot.SetDOFVelocityLimits(vlim) robot.SetDOFAccelerationLimits(alim) return res[active_dofs] + tau_max = robot.GetDOFTorqueLimits()[robot.GetActiveDOFIndices()] - F = np.vstack((np.eye(robot.GetActiveDOF()), - - np.eye(robot.GetActiveDOF()))) + F = np.vstack( + (np.eye(robot.GetActiveDOF()), -np.eye(robot.GetActiveDOF()))) g = np.hstack((tau_max, tau_max)) - def cnst_F(q): return F + def cnst_F(q): + return F - def cnst_g(q): return g + def cnst_g(q): + return g - cnst = CanonicalLinearSecondOrderConstraint(inv_dyn, cnst_F, cnst_g, dof=robot.GetActiveDOF(), - discretization_scheme=discretization_scheme) + cnst = SecondOrderConstraint(inv_dyn, + cnst_F, + cnst_g, + dof=robot.GetActiveDOF(), + discretization_scheme=discretization_scheme) return cnst diff --git a/toppra/solverwrapper/cvxpy_solverwrapper.py b/toppra/solverwrapper/cvxpy_solverwrapper.py index 0d1173f5..da32ac3c 100644 --- a/toppra/solverwrapper/cvxpy_solverwrapper.py +++ b/toppra/solverwrapper/cvxpy_solverwrapper.py @@ -27,6 +27,9 @@ class cvxpyWrapper(SolverWrapper): guarantee that the solution is not too large, in which case cvxpy can't handle very well. + `cvxpyWrapper` should not be used in production due to robustness + issue. + Parameters ---------- constraint_list: list of :class:`.Constraint` @@ -116,7 +119,7 @@ def solve_stagewise_optim(self, i, H, g, x_min, x_max, x_next_min, x_next_max): objective = cvxpy.Minimize(0.5 * cvxpy.quad_form(ux, H) + g * ux) problem = cvxpy.Problem(objective, constraints=cvxpy_constraints) try: - problem.solve(verbose=False, solver='ECOS') + problem.solve(verbose=False) except cvxpy.SolverError: # solve fail pass diff --git a/toppra/solverwrapper/ecos_solverwrapper.py b/toppra/solverwrapper/ecos_solverwrapper.py index cee54bc6..aa9355d1 100644 --- a/toppra/solverwrapper/ecos_solverwrapper.py +++ b/toppra/solverwrapper/ecos_solverwrapper.py @@ -4,10 +4,16 @@ import logging import numpy as np import scipy.sparse -import ecos logger = logging.getLogger(__name__) +try: + import ecos + IMPORT_ECOS = True +except ImportError as err: + logger.warn("Unable to import ecos") + IMPORT_ECOS = False + class ecosWrapper(SolverWrapper): """A solver wrapper that handles linear and conic-quadratic constraints using ECOS. @@ -38,8 +44,9 @@ class ecosWrapper(SolverWrapper): """ def __init__(self, constraint_list, path, path_discretization): - logger.debug("Initialize a ECOS SolverWrapper.") super(ecosWrapper, self).__init__(constraint_list, path, path_discretization) + if not IMPORT_ECOS: + raise RuntimeError("Unable to start ecos wrapper because ECOS solver is not installed.") # NOTE: Currently receive params in dense form. self._linear_idx = [] self._conic_idx = [] @@ -173,7 +180,7 @@ def solve_stagewise_optim(self, i, H, g, x_min, x_max, x_next_min, x_next_max): success = True else: success = False - logger.warn("Optimization fails. Result dictionary: \n {:}".format(result)) + logger.warning("Optimization fails. Result dictionary: \n {:}".format(result)) ux_opt = np.zeros(2) if success: