diff --git a/gonio-analysis/.gitignore b/gonio-analysis/.gitignore
new file mode 100644
index 0000000..b6e4761
--- /dev/null
+++ b/gonio-analysis/.gitignore
@@ -0,0 +1,129 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+pip-wheel-metadata/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+.python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
diff --git a/gonio-analysis/LICENSE b/gonio-analysis/LICENSE
new file mode 100644
index 0000000..53d1f3d
--- /dev/null
+++ b/gonio-analysis/LICENSE
@@ -0,0 +1,675 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
+
diff --git a/gonio-analysis/README.md b/gonio-analysis/README.md
new file mode 100644
index 0000000..94370c2
--- /dev/null
+++ b/gonio-analysis/README.md
@@ -0,0 +1,77 @@
+Goniometric Analysis suite
+Specialised spatial motion analysis software for Gonio Imsoft data.
+
+In general, can be used for data that follows hierarchy
+```
+data_directory
+├── specimen_01
+│ ├── experiment_01
+│ │ ├── image_001.tif
+│ │ └── image_002.tif
+│ └── ...
+└── ...
+```
+
+
+Installing
+
+There are two supported installation ways at the moment.
+On Windows, the stand-alone installer is possibly the best option unless you feel familiar with Python.
+On other platforms, use pip.
+
+Installer on Windows (easiest)
+
+A Windows installer that bundles together the Gonio Analysis suite and all its depencies,
+including a complete Python runtime, is provided at
+[Releases](https://github.com/jkemppainen/gonio-analysis/releases).
+
+The installer creates a start menu shorcut called Gonio Analysis,
+which can be used to launch the program.
+
+To uninstall, use Add or Remove programs feature in Windows.
+
+
+Using pip (the python standard way)
+
+The latest version from [PyPi](https://pypi.org/) can be installed with the command
+
+```
+pip install gonio-analysis
+```
+
+This should install all the required dependencies, except when on Windows, OpenCV may require
+[Visual C++ Runtime 2015](https://www.microsoft.com/download/details.aspx?id=48145) to be installed.
+
+
+Afterwards, to upgrade an existing installation to the latest version
+
+```
+pip install --upgrade gonio-analysis
+```
+
+In case of regressions, a specific version of the suite (for example 0.1.2) can be installed
+
+```
+pip install gonio-analysis==0.1.2
+```
+
+Finally, to open the program
+
+```
+python -m gonioanalysis.tkgui
+```
+
+How to use
+
+First, open a data directory (containing the folders containing the images).
+Next, select the regions of interest (ROIs) and then run the motion analysis.
+The ROIs and movements are saved on disk (C:\Users\USER\GonioAnalysis or /home/USER/.gonioanalysis), so these steps are needed only once per specimen.
+
+After the initial steps you, can perform further analyses in the program or
+export the data by
+1) copy-pasting to your favourite spread sheet or plotting program
+or 2) exporting CSV files.
+
+Notes
+This is still an early development version (expect rough edges).
+
diff --git a/gonio-analysis/bin/pupilanalysis b/gonio-analysis/bin/pupilanalysis
new file mode 100755
index 0000000..6d6d352
--- /dev/null
+++ b/gonio-analysis/bin/pupilanalysis
@@ -0,0 +1,3 @@
+#!/usr/bin/env python3
+from gonioanalysis.drosom import terminal
+terminal.main()
diff --git a/gonio-analysis/bin/pupilanalysis-tkgui b/gonio-analysis/bin/pupilanalysis-tkgui
new file mode 100755
index 0000000..5e3cbd7
--- /dev/null
+++ b/gonio-analysis/bin/pupilanalysis-tkgui
@@ -0,0 +1,3 @@
+#!/usr/bin/python3
+import gonioanalysis.tkgui as tkgui
+tkgui.run()
diff --git a/gonio-analysis/bin/pupilanalysis-tkgui.cmd b/gonio-analysis/bin/pupilanalysis-tkgui.cmd
new file mode 100644
index 0000000..2b3769f
--- /dev/null
+++ b/gonio-analysis/bin/pupilanalysis-tkgui.cmd
@@ -0,0 +1,3 @@
+REM This startup sciprt requires python to be in PATH.
+REM On recent versions of Windows 10 the store page opens instead...
+python -m gonioanalysis.tkgui.examine
diff --git a/gonio-analysis/bin/pupilanalysis.cmd b/gonio-analysis/bin/pupilanalysis.cmd
new file mode 100644
index 0000000..fa178a5
--- /dev/null
+++ b/gonio-analysis/bin/pupilanalysis.cmd
@@ -0,0 +1,3 @@
+REM This startup sciprt requires python to be in PATH.
+REM On recent versions of Windows 10 the store page opens instead...
+python -m gonioanalysis.drosom.terminal
diff --git a/gonio-analysis/gonioanalysis/__init__.py b/gonio-analysis/gonioanalysis/__init__.py
new file mode 100644
index 0000000..b281ce6
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/__init__.py
@@ -0,0 +1,117 @@
+'''
+Goniometric Analysis
+---------------------
+
+Python pacakge `gonioanalysis` provides an intergrated set of graphical
+and command line tools to analyse goniometric imaging data.
+Here goniometry means that the rotation of a sample with respect
+to the imaging device is well documented while rotating the sample
+between the imaging runs.
+
+More precesily, `gonioanalysis` takes advantage of the following data structuring
+
+ data_directory
+ ├── specimen_01
+ │ ├── pos(horizontal, vertical)_some-suffix-stufff
+ │ │ ├── image_rep0_0.tif
+ │ │ └── image_rep0_1.tif
+ │ └── ...
+ └── ...
+
+Here, the rotation of the sample is encoded in the image folder name
+with pos prefix.
+Alternatively, images with a generalized directory structure can be also used
+
+ data_directory
+ ├── specimen_01
+ │ ├── experiment_01
+ │ │ ├── image_001.tif
+ │ │ └── image_002.tif
+ │ └── ...
+ └── ...
+
+
+Please mind that `gonioanalysis` was created to analyse Drosophila's
+deep pseudogonio movement and orientation data across its left and right eyes
+and because of this the language used in many places can be droso-centric.
+
+
+User interfaces
+---------------
+
+Currently `gonioanalysis` comes with two user interfaces, one being command line based and
+the other graphical.
+
+The documentation on this page gives out only general instrctions and ideas.
+To have exact usage instructions, please refer to submodule documentation of `gonioanalysis.drosom.terminal` and
+`gonioanalysis.tkgui`.
+
+
+Initial analysis
+----------------
+The following two steps have to be performed for every imaged specimen, but luckily only once.
+The first one is manual needing user intercation but can be performed quite fast depending the amount of image folders,
+The second one is automated but can take a long time depending on the amount of data.
+
+### 1) Selecting regions of interest (ROIs)
+
+In `gonioanalysis`, rectangular ROIs are manually drawn by the user
+once per each image folder.
+The idea is to confine the moving feature inside a rectangle but only in the first frame.
+
+
+### 2) Measuring movements
+
+After the ROIs have been selected, movement analysis can be run.
+Using 2D cross-correlation based methods (template matching), `gonioanalysis`
+automatically quntifies the movement of the ROI bound method across all the frames and repeats.
+
+Because this part requires no user interactions and takes a while,
+usually it is best to select all the ROIs for all specimens beforehand,
+then batch run the movement analysis.
+
+
+After initial analysis
+----------------------
+
+### Vectormap - 3D directions
+
+The 2D data of the moving features in the camera coordinates can be transferred into a 3D coordinate
+system of specimens frame of reference.
+To do so, we need to have the rotation of the specimen with respect to the camera specified
+in some fixed coordinate system, most naturally in the coordinate system set by the rotation stages
+that are used to rotate the fly.
+At the moment, `gonioanalysis` supports only one rotation stage configuration that is
+a specimen fixed on a vertical rotation stage that is fixed on a horizontal rotation stage that is fixed on a table.
+
+
+### Orientation analysis
+To analyse directionaly of any arbitrary features (such as hair pointin directions across the head),
+you can override the `gonioanalysis.drosom.analysing.MAnalyser` with `gonioanalysis.drosom.orientation_analysis.OAnalyser`,
+making the movement measurement part to be a manual user drawing arrows according to the feature direction analysis.
+Then later, you can use any `gonioanalysis` code paths just remembering that for example in the vectormap,
+the arrows point the direction of the features, not their movement.
+
+
+Exporting data
+--------------
+
+All quitely saved files are stored in a location set in `gonioanalysis.directories`.
+By default, on Windows this is C:/Users/USER/GonioAnalysis
+and on other platforms /home/USER/.gonioanalysis
+
+
+Command line interface
+----------------------
+
+`gonioanalysis` also includes a command interface, that can be invoked by
+
+ python -m gonioanalysis.drosom.terminal
+
+
+For all different options and help use `--help` option.
+In case no ROIs have been selected, the
+When elections of the ROIs cannot be done in headless environments.
+'''
+
+from .version import __version__
diff --git a/gonio-analysis/gonioanalysis/antenna_level.py b/gonio-analysis/gonioanalysis/antenna_level.py
new file mode 100644
index 0000000..bbc3790
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/antenna_level.py
@@ -0,0 +1,243 @@
+'''
+Code paths related to manually alinging the vertical angles for many
+specimens (aka. antenna level or zero correction)
+'''
+
+import os
+import ast
+
+import numpy as np
+import matplotlib.pyplot as plt
+
+from gonioanalysis.directories import ANALYSES_SAVEDIR, CODE_ROOTDIR, PROCESSING_TEMPDIR
+from gonioanalysis.droso import DrosoSelect
+from gonioanalysis.drosom.loading import load_data
+from gonioanalysis.image_tools import ImageShower
+from gonioanalysis.binary_search import binary_search_middle
+from gonioanalysis.rotary_encoders import to_degrees
+
+ZERO_CORRECTIONS_SAVEDIR = os.path.join(PROCESSING_TEMPDIR, 'vectical_corrections')
+
+
+def load_reference_fly(reference_name):
+ '''
+ Returns the reference fly data, dictionary with pitch angles as keys and
+ image filenames as items.
+ '''
+ pitches = []
+ with open(os.path.join(ZERO_CORRECTIONS_SAVEDIR, reference_name, 'pitch_angles.txt'), 'r') as fp:
+ for line in fp:
+ pitches.append(line)
+
+ images = [os.path.join(ZERO_CORRECTIONS_SAVEDIR, reference_name, fn) for fn in os.listdir(
+ os.path.join(ZERO_CORRECTIONS_SAVEDIR, reference_name)) if fn.endswith('.tif') or fn.endswith('.tiff')]
+
+ images.sort()
+ return {pitch: fn for pitch,fn in zip(pitches, images)}
+
+
+
+#OLD def _drosom_load(self, folder):
+def load_drosom(folder):
+ '''
+ Loads frontal images of the specified drosom
+
+ folder Path to drosom folder
+ '''
+
+ data = load_data(folder)
+
+ pitches =[]
+ images = []
+
+ for str_angle_pair in data.keys():
+ #angle_pair = strToDegrees(str_angle_pair)
+ i_start = str_angle_pair.index('(')
+ i_end = str_angle_pair.index(')')+1
+
+ #print(str_angle_pair[i_start:i_end])
+ angle_pair = [list(ast.literal_eval(str_angle_pair[i_start:i_end]))]
+ to_degrees(angle_pair)
+ angle_pair = angle_pair[0]
+
+ if -10 < angle_pair[0] < 10:
+ pitches.append(angle_pair[1])
+ images.append(data[str_angle_pair][0][0])
+
+ pitches, images = zip(*sorted(zip(pitches, images)))
+
+ return pitches, images
+
+
+def save_antenna_level_correction(fly_name, result):
+ '''
+ Saves the antenna level correction that should be a float
+ '''
+ directory = ZERO_CORRECTIONS_SAVEDIR
+ os.makedirs(directory, exist_ok=True)
+ with open(os.path.join(directory, fly_name+'.txt'), 'w') as fp:
+ fp.write(str(float(result)))
+
+
+class AntennaLevelFinder:
+ '''
+ Ask user to find the antenna levels
+ '''
+
+
+ def find_level(self, folder):
+ '''
+ Call this to make user to select antenna levels.
+ folder Full path to the folder
+ '''
+
+ fly = os.path.split(folder)[1]
+
+ #if os.path.exists(os.path.join(ANALYSES_SAVEDIR, 'antenna_levels', fly+'.txt')):
+ # print('Fly {} is already analysed. Redo (y/n)?'.format(fly))
+ # if not input('>> ').lower() in ['yes', 'y']:
+ # return False
+
+ if 'DrosoX' in fly or 'DrosoALR' in fly:
+ # If DrosoX, use drosox loader and find antenna levels by user
+ # driven binary search.
+
+ fig, ax = plt.subplots()
+ shower = ImageShower(fig, ax)
+
+ if 'DrosoALR' in fly:
+ arl = True
+ else:
+ arl = False
+
+ pitches, images = self._drosox_load(folder, arl)
+
+ shower.setImages(images)
+ center = binary_search_middle(len(images), shower)
+
+ shower.close()
+
+ result = str(pitches[center])
+
+
+ else:
+
+ # DrosoM is harder, there's images every 10 degrees in pitch.
+ # Solution: Find closest matches using analysed DrosoX data
+
+ # Load DrosoM data
+ pitches, images = self._drosom_load(folder)
+
+
+ # Load reference fly data
+ reference_pitches = {fn: pitch for pitch, fn in load_reference_fly('alr_data').items()}
+ #print(reference_pitches)
+ reference_images = list(reference_pitches.keys())
+ reference_images.sort()
+
+ fig1, ax1 = plt.subplots()
+ fig1.canvas.set_window_title('Reference Drosophila')
+ ref_shower = ImageShower(fig1, ax1)
+ ref_shower.setImages(reference_images)
+
+ fig2, ax2 = plt.subplots()
+ fig2.canvas.set_window_title('{}'.format(fly))
+ m_shower = ImageShower(fig2, ax2)
+
+ offsets = []
+
+ for pitch, image in zip(pitches, images):
+
+ m_shower.setImages([image])
+ m_shower.setImage(0)
+
+ #best_drosox_image = matcher.findBest(image, drosox_images)
+ best_drosoref_image = reference_images[ binary_search_middle(len(reference_images), ref_shower) ]
+
+ reference_pitch = float(reference_pitches[best_drosoref_image])
+
+ print('Pitch {}, reference {}'.format(pitch, reference_pitch))
+
+ offsets.append( pitch - reference_pitch)
+
+ ref_shower.close()
+ m_shower.close()
+
+ result = np.mean(offsets)
+
+ print('Reporting mean offset of {} (from {})'.format(result, offsets))
+
+
+ with open(os.path.join(ANALYSES_SAVEDIR, 'antenna_levels', fly+'.txt'), 'w') as fp:
+ fp.write(str(float(result)))
+
+
+
+ def _load_drosox_reference(self, folder, fly):
+ '''
+ Load DrosoX reference for DrosoM antenna level finding.
+
+ returns dictionary dict = {image_fn1: correted_pitch_1, ....}
+ '''
+ pitches, images = self._drosox_load(folder, True)
+
+ with open(os.path.join(ZERO_CORRECTIONS_SAVEDIR, fly+'.txt'), 'r') as fp:
+ offset = float(fp.read())
+
+ return {image: pitch-offset for image, pitch in zip(images, pitches)}
+
+
+
+
+ def _drosox_load(self, folder, arl):
+ '''
+ Private method, not intented to be called from outside.
+ Loads pitches and images.
+ '''
+
+ xloader = XLoader()
+ data = xloader.getData(folder, arl_fly=arl)
+
+ pitches = []
+ images = []
+
+
+
+ # Try to open if any previously analysed data
+ analysed_data = []
+ try:
+ with open(os.path.join(ANALYSES_SAVEDIR, 'binary_search', 'results_{}.json'.format(fly)), 'r') as fp:
+ analysed_data = json.load(fp)
+ except:
+ pass
+ analysed_pitches = [item['pitch'] for item in analysed_data]
+
+
+ for i, (pitch, hor_im) in enumerate(data):
+
+ if pitch in analysed_pitches:
+ j = [k for k in range(len(analysed_pitches)) if analysed_pitches[k]['pitch'] == pitch]
+ center_index = analysed_data[j]['index_middle']
+ else:
+ center_index = int(len(hor_im)/2)
+
+ pitches.append(pitch)
+ images.append(hor_im[center_index][1])
+
+ return pitches, images
+
+
+
+
+def main():
+ finder = AntennaLevelFinder()
+
+ selector = DrosoSelect()
+ folders = selector.askUser()
+
+ for folder in folders:
+ finder.find_level(folder)
+
+if __name__ == "__main__":
+ # FIXME TODO
+ main()
diff --git a/gonio-analysis/gonioanalysis/binary_search.py b/gonio-analysis/gonioanalysis/binary_search.py
new file mode 100644
index 0000000..a8c14d5
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/binary_search.py
@@ -0,0 +1,263 @@
+'''
+binary_search.py
+Determining binocular overlap by half-interval search.
+
+DESCRIPTION
+
+
+TODO
+- examination mode, fine tune changes
+- combine binarySearchs to a single function for easier mainantance
+- remove reverse option from binarySearch
+
+'''
+import os
+import json
+import time
+from math import floor
+
+import matplotlib.pyplot as plt
+
+from gonioanalysis.directories import ANALYSES_SAVEDIR
+from gonioanalysis.droso import DrosoSelect
+from gonioanalysis.image_tools import ImageShower
+
+
+def inputRead():
+ while True:
+ try:
+ inpt = input('>> ')
+ if inpt == 's':
+ return 'skip'
+ direction = int(inpt)
+ break
+ except ValueError:
+ pass
+ return direction
+
+def calcM(L, R):
+ '''
+ Calculate m used in the binary search.
+ '''
+ return int(floor(abs(L+R)/2))
+
+
+def binary_search_middle(N_images, shower, reverse=False):
+ '''
+ Search for the border where pseudopupils are visible simultaneously.
+ Midpoint search.
+
+ shower UI class
+
+ '''
+ DIR = [1,2]
+ if reverse:
+ DIR = [2,1]
+
+
+ right_end = N_images-1
+
+ R = right_end
+ L = 0
+
+ #print('Find midpoint. "1" to go left or "2" to go right')
+ print('Binary search')
+ print(' Type in 1 or 2 to rotate\n -1 to return to the beginning\n 0 to instaselect')
+
+ shower.setTitle('Midpoint')
+
+ while L max_distance:
+ return False
+
+ return i_shortest
+
+
+def mean_vector(point, vectors):
+ '''
+ Average vectors and return a vector at point point.
+
+ '''
+
+ av = np.mean(vectors, axis=0)
+ if np.linalg.norm(av) != 0:
+
+ av += np.array(point)
+ av = force_to_tplane(point, av)
+
+
+
+ for i in range(0,len(vectors)):
+ wanted_len = np.linalg.norm(vectors[i])
+
+ if wanted_len != 0:
+ break
+ av -= np.array(point)
+ av = (av / np.linalg.norm(av) * wanted_len)
+ else:
+ av = np.array([0,0,0])
+ pass
+ #x,y,z = point
+
+ #return (angle_tag, (x, av[0]), (y, av[1]), (z, av[2]) )
+ return av
+
+
+
+
+def rotate_about_x(point, degs):
+ '''
+ Rotate a point in 3D space along the first axis (x-axis).
+ '''
+
+ c = cos(radians(degs))
+ s = sin(radians(degs))
+
+ Rx = np.array([[1,0,0], [0, c, -s], [0, s, c]])
+
+ return np.dot(Rx, np.array(point))
+
+
+
+def force_to_tplane(P0, P1, radius=1):
+ '''
+ Forces a vector (P0-P1) on a tangent plane of a sphere but
+ retaining the vector's length
+
+ P0 is the point on the sphere (and the tangent plane)
+ P1 is the point off the tangent plane
+
+ Returns P2, point on the tangent plane and the line connecting
+ the sphere centre point to the P1.
+
+ Notice DOES NOT RETURN VEC BUT P2 (vec=P2-P0)
+ '''
+
+
+ a = radius / (P0[0]*P1[0]+P0[1]*P1[1]+P0[2]*P1[2])
+
+ return P1 * a
+
+
+#def projection_to_tplane(P)
+
+
+def camera2Fly(horizontal, vertical, radius=1):
+ '''
+ With the given goniometer positions, calculates camera's position
+ in fly's cartesian coordinate system.
+
+ Input in degrees
+ '''
+ #print('Horizontal {}'.format(horizontal))
+ #print('Vertical {}'.format(vertical))
+
+ h = radians(horizontal)
+ v = radians(vertical)
+
+ #x = sqrt( radius**2 * (1 - (cos(h) * sin(v))**2 * (tan(v)**2+1)) )
+
+ #y = cos(h) * cos(v) * radius
+
+ #z = cos(h) * sin(v) * radius
+
+ y = cos(h)*cos(v)*radius
+ z = y * tan(v)
+
+ # abs added because finite floating point precision
+ x = sqrt(abs(radius**2 - y**2 - z**2))
+
+ # Make sure zero becomes zero
+ if x < 10**-5:
+ x = 0
+
+ # Obtain right sign for x
+ looped = int(h / (2*pi))
+ if not 0 < (h - 2*pi*looped ) < pi:
+ x = -x
+
+
+ return x, y, z
+
+
+def camera_rotation(horizontal, vertical, return_degrees=False):
+ '''
+ Camera's rotation
+ '''
+
+ if vertical > 90:
+ vvertical = 180-vertical
+ else:
+ vvertical = vertical
+
+ rot = -(sin(radians(horizontal))) * radians(vvertical)
+ #rot= -((radians(horizontal))/(pi/2)) * radians(vvertical)
+
+ if vertical > 90:
+ rot += radians(180)
+ rot = -rot
+
+ if return_degrees:
+ rot = degrees(rot)
+
+ return -rot
+
+
+
+def camvec2Fly(imx, imy, horizontal, vertical, radius=1, normalize=False):
+ '''
+ Returns 3D vector endpoints.
+
+ normalize If true, return unit length vectors
+ '''
+
+ #imx = 0
+ #imy = 1
+
+ #rot = (1- cos(radians(horizontal))) * radians(vertical)
+
+ #rot = -(radians(horizontal)/(np.pi/2)) * radians(vertical)
+ '''
+ rot = camera_rotation(horizontal, vertical)
+
+ cimx = imx * cos(rot) - imy * sin(rot)
+ cimy = imx * sin(rot) + imy * cos(rot)
+
+
+ # find the plane
+
+ # coordinates from the plane to 3d
+
+ #dz = cos()
+
+ x,y,z = camera2Fly(horizontal, vertical, radius=radius)
+
+ #print('x={} y={} z={}'.format(x,y,z))
+ '''
+ # Create unit vectors in camera coordinates
+ '''
+ if x == 0 and y > 0:
+ b = pi/2
+ elif x== 0 and y < 0:
+ b = pi + pi/2
+ else:
+ b = atan(y/x) # angle in xy-plane, between radial line and x-axis
+ #b = atan2(y,x)
+
+ if x < 0:
+ b += pi
+ e = acos(z/sqrt(x**2+y**2+z**2))# anti-elevation
+
+ if y < 0:
+ e += pi
+
+ uimx = np.array([-sin(b) , cos(b), 0])
+ #uimy = np.asarray([-cos(e) * sin(b) , - cos(e) * cos(b), sin(e) ])
+ # Fixed this on 6.9.2019
+ uimy = np.array([-cos(b) * cos(e) , -sin(b) * cos(e), sin(e) ])
+ '''
+
+ x,y,z = camera2Fly(horizontal, vertical, radius=radius)
+
+ uimx = np.array(camera2Fly(horizontal, vertical, radius=radius)) - np.array(camera2Fly(horizontal+1, vertical, radius=radius))
+ uimx = uimx / np.linalg.norm(uimx)
+
+ uimy = np.array([0, -sin(radians(vertical)), cos(radians(vertical))])
+
+ #print('vertical {}'.format(vertical))
+ #print('imx is {}'.format(imx))
+ #fx, fy, fz = np.array([x,y,z]) + uimx*cimx + uimy*cimy
+ vector = uimx*imx + uimy*imy
+
+ if normalize:
+ length = np.linalg.norm(vector)
+ if length != 0:
+
+ if type(normalize) == type(42) or type(normalize) == type(4.2):
+ length /= normalize
+
+ vector = vector / length
+
+
+ fx, fy, fz = np.array([x,y,z]) + vector
+ '''
+ if normalize:
+ uim = uimx*cimx + uimy*cimy
+ length = np.sqrt(uim[0]**2 + uim[1]**2 + uim[2]**2)
+
+ if length != 0:
+
+ if type(normalize) == type(42) or type(normalize) == type(4.2):
+ length /= normalize
+
+ fx, fy, fz = np.array([x,y,z]) + (uimx*cimx + uimy*cimy)/length
+ '''
+
+ #print("Elevation {}".format(degrees(e)))
+ #print('B {}'.format(b))
+ #print('uimx {}'.format(uimx))
+ #print('uimy {}'.format(uimy))
+ #print()
+
+ return fx, fy, fz
+#
+#def findDistance(point1, point2):
+# '''
+# Returns PSEUDO-distance between two points in the rotation stages angles coordinates (horizontal_angle, vertical_angle).
+#
+# It's called pseudo-distance because its not likely the real 3D cartesian distance + the real distance would depend
+# on the radius that is in our angles coordinate system omitted.
+#
+# In the horizontal/vertical angle system, two points may seem to be far away but reality (3D cartesian coordinates)
+# the points are closed to each other. For example, consider points
+# (90, 10) and (90, 70)
+# These points are separated by 60 degrees in the vertical (pitch) angle, but because the horizontal angle is 90 degrees
+# in both cases, they are actually the same point in reality (with different camera rotation)
+#
+# INPUT ARGUMENTS DESCRIPTION
+# point1 (horizontal, vertical)
+#
+#
+# TODO: - Implement precise distance calculation in 3D coordinates
+# - ASSURE THAT THIS ACTUALLY WORKS???
+# '''
+# # Scaler: When the horizontal angle of both points is close to 90 or -90 degrees, distance
+# # should be very small
+# scaler = abs(math.sin((point1[0] + point2[0])/ 2))
+# # All this is probably wrong, right way to do this is calculate distances on a sphere
+# return scaler * math.sqrt( (point1[0]-point2[0])**2 + (point1[1]-point2[1])**2 )
+#
+#
+#def findClosest(point1, points, distance_function=None):
+# '''
+# Using findDistance, find closest point to point1.
+# '''
+#
+# distances = []
+#
+# if not callable(distance_function):
+# distance_function = findDistance
+#
+# for point2 in points:
+# distances.append( distance_function(point1, point2) )
+#
+# argmax_i = distances.index(min(distances))
+#
+# return points[argmax_i]
+#
+
+
+def get_rotation_matrix(axis, rot):
+ '''
+ Returns an elementar rotation matrix.
+
+ axis 'x', 'y', or 'z'
+ rot rotation in radians
+ '''
+ # Calculate sin and cos terms beforehand
+ c = cos(rot)
+ s = sin(rot)
+
+ if axis == 'x':
+ return np.array([[1,0,0], [0,c,-s], [0,s,c]])
+ elif axis == 'y':
+ return np.array([[c,0,s], [0,1,0], [-s,0,c]])
+ elif axis == 'z':
+ return np.array([[c,-s,0], [s,c,0], [0,0,1]])
+ else:
+ raise ValueError('Axis has to be x, y, or z, not {}'.format(axis))
+
+
+def rotate_along_arbitrary(P1, points, rot):
+ '''
+ Rotate along arbitrary axis.
+
+ P0 is at origin.
+
+ P0 and P1 specify the rotation axis
+
+ Implemented from here:
+ http://paulbourke.net/geometry/rotate/
+
+ Arguments
+ ---------
+ P1 : np.ndarray
+ points : np.ndarray
+ rot : float or int
+ Rotation in radians (not degres!)
+ '''
+
+ a,b,c = P1 / np.linalg.norm(P1)
+ d = math.sqrt(b**2 + c**2)
+
+ if d == 0:
+ Rx = np.eye(3)
+ Rxr = np.eye(3)
+ else:
+ Rx = np.array([[1,0,0],[0,c/d, -b/d],[0,b/d, c/d]])
+ Rxr = np.array([[1,0,0],[0,c/d, b/d],[0,-b/d, c/d]])
+
+ Ry = np.array([[d,0,-a],[0,1,0], [a,0,d]])
+ Ryr = np.array([[d,0,a],[0,1,0], [-a,0,d]])
+
+ Rz = get_rotation_matrix('z', rot)
+
+ return (Rxr @ Ryr @ Rz @ Ry @ Rx @ points.T).T
+
+
+def rotate_points(points, yaw, pitch, roll):
+ '''
+ Just as rotate_vectors but only for points.
+
+ Arguments
+ ---------
+ yaw, pitch, roll : float or int
+ Rotations in radians (not degrees!)
+ '''
+ yaw_ax = (0,0,1)
+ pitch_ax = (1,0,0)
+ roll_ax = (0,1,0)
+
+ axes = np.array([yaw_ax, pitch_ax, roll_ax])
+
+ rotations = [yaw, pitch, roll]
+
+ for i in range(3):
+ points = rotate_along_arbitrary(axes[i], points, rotations[i])
+
+ return points
+
+
+
+def rotate_vectors(points, vectors, yaw, pitch, roll):
+ '''
+ In the beginning, it is assumed that
+ yaw rotation along Z
+ pitch rotation along X
+ roll rotation along Y
+
+ ie that the fly head is at zero rotation, antenna roots pointing towards
+ positive y-axis.
+ '''
+
+ yaw_ax = (0,0,1)
+ pitch_ax = (1,0,0)
+ roll_ax = (0,1,0)
+
+ axes = np.array([yaw_ax, pitch_ax, roll_ax])
+
+ rotations = [yaw, pitch, roll]
+
+ for i in range(3):
+ new_points = rotate_along_arbitrary(axes[i], points, rotations[i])
+ new_vectors = rotate_along_arbitrary(axes[i], points+vectors, rotations[i]) - new_points
+
+ points = new_points
+ vectors = new_vectors
+
+
+ # Update axes
+ #axes = rotate_along_arbitrary(axes[i], axes, rotations[i])
+
+ return points, vectors
+
+
+def distance(a, b):
+ '''
+ Calculates distance between two points in 3D cartesian space.
+ a,b (x,y,z)
+ '''
+
+ return sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2 + (a[2]-b[2])**2)
+
+
+def optimal_sampling(horizontals, verticals):
+ '''
+ Determine optimal way to sample using two orthogonal goniometers.
+ '''
+
+ steps = ((horizontals[1]-horizontals[0]), (verticals[1]-verticals[0]))
+
+ min_distance = 0.75 * distance(camera2Fly(steps[0], steps[1]), camera2Fly(0,0))
+
+
+ goniometer_vals = {}
+
+ points = []
+
+ for vertical in verticals:
+ goniometer_vals[vertical] = []
+ for horizontal in horizontals:
+ point = camera2Fly(horizontal, vertical)
+
+ append = True
+
+ for previous_point in points:
+ if distance(previous_point, point) < min_distance:
+ append = False
+ break
+
+ if append:
+ points.append(point)
+ goniometer_vals[vertical].append(horizontal)
+
+ #for hor, vers in sorted(goniometer_vals.items(), key=lambda x: int(x[0])):
+ # print('{}: {}'.format(hor, vers))
+
+ return np.array(points)
+
+
+
+
+def test_rotate_vectors():
+ '''
+
+ '''
+
+ points = []
+ vectors = []
+
+ horizontals = np.linspace(-60, 60, 10)
+ verticals = np.linspace(0, 180, 10)
+
+ for horizontal in horizontals:
+ for vertical in verticals:
+ point = camera2Fly(horizontal, vertical)
+ vector = np.array([0,0,.2])
+
+ P2 = force_to_tplane(point, point+vector)
+
+ points.append(point)
+ vectors.append(P2-point)
+
+ points = np.array(points)
+ vectors = np.array(vectors)
+
+ points, vectors = rotate_vectors(points, vectors, radians(0), radians(89), 0)
+
+ from gonioanalysis.drosom.plotting import vector_plot
+
+
+
+ fig = plt.figure()
+ ax = fig.add_subplot(111, projection='3d')
+ ax.view_init(elev=90, azim=0)
+ vector_plot(ax, points, vectors)
+
+ plt.savefig('test_rotate_vectors.png')
+
+
+def test_imx():
+
+ P0 = (2, -6)
+
+ b = atan(P0[1] / P0[0])
+ P1 = (P0[0]-sin(b), P0[1]+cos(b))
+
+
+ plt.scatter(*P0, color='blue')
+ plt.scatter(*P1, color='red')
+ plt.scatter(0,0)
+
+ plt.show()
+
+
+def test_camvec2Fly():
+ fig = plt.figure()
+ ax = fig.add_subplot(111, projection='3d')
+ ax.scatter(0,0,0, s=10)
+
+ horizontals = np.linspace(0, 360)
+
+ for vertical in horizontals:
+
+ imx, imy, horizontal = (1,0,0)
+
+ point0 = camera2Fly(horizontal, vertical)
+ point1 = camvec2Fly(imx,imy,horizontal,vertical)
+
+ print(point0)
+ ax.scatter(*point0, s=10, color='blue')
+ ax.scatter(*point1, s=10, color='red')
+
+
+
+
+ plt.show()
+
+def test1_camera_rotation():
+
+ from drosom import get_data
+ import tifffile
+ import matplotlib.pyplot as plt
+ from scipy import ndimage
+ from gonioimsoft.anglepairs import strToDegrees
+
+ data = get_data('/home/joni/smallbrains-nas1/array1/pseudopupil_imaging/DrosoM23')
+
+ for angle, image_fns in data.items():
+
+ horizontal, vertical = strToDegrees(angle)
+ rot = degrees(camera_rotation(horizontal, vertical))
+
+ im = tifffile.imread(image_fns[0][0])
+ im = ndimage.rotate(im, rot)
+
+ print('{} {}'.format(horizontal, vertical))
+ print(rot)
+
+ plt.imshow(im)
+ plt.show()
+
+
+def test2_camera_rotation():
+
+ horizontals = np.linspace(0,360)
+ vertical = 0
+ for horizontal in horizontals:
+ rot = camera_rotation(horizontal, vertical)
+ if rot != 0:
+ raise ValueError('rot should be 0 for ALL horizontals when vertical = 0')
+
+ horizontals = np.linspace(0,360)
+ vertical = 180
+ for horizontal in horizontals:
+ rot = camera_rotation(horizontal, vertical)
+ if round(degrees(rot)) != 180:
+ raise ValueError('rot should be 180deg for ALL horizontals when vertical = -180. rot{}'.format(degrees(rot)))
+
+ rot = camera_rotation(0, 95)
+ if round(degrees(rot)) != 180:
+ raise ValueError('rot should be 180deg for 0 horizontal when vertical = -95. rot{}'.format(degrees(rot)))
+
+ rot = camera_rotation(-90, 45)
+ if round(degrees(rot)) != 45:
+ raise ValueError('rot should be 45deg for -90 horizontal when vertical = 45. rot{}'.format(degrees(rot)))
+
+
+
+if __name__ == "__main__":
+ import matplotlib.pyplot as plt
+ from mpl_toolkits.mplot3d import Axes3D
+
+
+
+ #test_camvec2Fly()
+ #test_force_to_plane()
+ #test1_camera_rotation()
+ test_rotate_vectors()
diff --git a/gonio-analysis/gonioanalysis/directories.py b/gonio-analysis/gonioanalysis/directories.py
new file mode 100644
index 0000000..758a7d5
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/directories.py
@@ -0,0 +1,102 @@
+'''
+Central settings for save/load directories.
+'''
+
+import os
+import platform
+
+CODE_ROOTDIR = os.path.dirname(os.path.realpath(__file__))
+USER_HOMEDIR = os.path.expanduser('~')
+
+if platform.system() == "Windows":
+ GONIODIR = os.path.join(USER_HOMEDIR, 'GonioAnalysis')
+else:
+ GONIODIR = os.path.join(USER_HOMEDIR, '.Gonioanalysis')
+
+
+ANALYSES_SAVEDIR = os.path.join(GONIODIR, 'final_results')
+PROCESSING_TEMPDIR = os.path.join(GONIODIR, 'intermediate_data')
+PROCESSING_TEMPDIR_BIGFILES = os.path.join(GONIODIR, 'intermediate_bigfiles')
+
+
+# DIRECTORIES THAT HAVE TO BE CREATED
+ALLDIRS= {'ANALYSES_SAVEDIR': ANALYSES_SAVEDIR,
+ 'PROCESSING_TEMPDIR': PROCESSING_TEMPDIR,
+ 'PROCESSING_TEMPDIR_BIGFILES': PROCESSING_TEMPDIR_BIGFILES}
+
+
+def print_directories():
+
+ print('These are the directories')
+
+ for key, item in ALLDIRS.items():
+ print('{} {}'.format(key, item))
+
+
+
+def cli_ask_creation(needed_directories):
+ '''
+ Short command line yes/no.
+ '''
+
+ # Temporary fix; Launching GUI for the first time fails
+ # if directories not present so always make them
+ return True
+
+ print("The following directories have to be created")
+
+ for directory in needed_directories:
+ print(" {}".format(directory))
+
+ print("\nIs this okay? (yes/no)")
+
+ while True:
+ selection = input(">> ").lower()
+
+ if selection == "yes":
+ return True
+ elif selection == "no":
+ return False
+ else:
+ print("Choice not understood, please type yes or no")
+
+
+
+def directories_check(ui=cli_ask_creation):
+ '''
+ Perform a check that the saving directories exist.
+
+ ui Callable that returns True if user wants to create
+ the directories or False if not.
+ As the first argument it gets the list of to be created directories
+
+ If ui is not callable, raise an error.
+ '''
+ non_existant = []
+
+ for key, item in ALLDIRS.items():
+
+ if not os.path.exists(item):
+ non_existant.append(item)
+
+ # If some directories are not created, launch an input requiring
+ # user interaction.
+ if non_existant:
+ if callable(ui):
+ if ui(non_existant) == True:
+ for directory in non_existant:
+ os.makedirs(directory, exist_ok=True)
+ else:
+ raise NotImplementedError("Reselecting directories in UI not yet implemented")
+ else:
+ raise OSError("All directories not created and ui is not callable")
+
+
+
+if __name__ == "__main__":
+ print_directories()
+else:
+ directories_check()
+
+
+
diff --git a/gonio-analysis/gonioanalysis/droso.py b/gonio-analysis/gonioanalysis/droso.py
new file mode 100644
index 0000000..0ab6523
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/droso.py
@@ -0,0 +1,210 @@
+'''
+General methods common for both DrosoM and DrosoX.
+'''
+
+import os
+import json
+from os import listdir
+from os.path import isdir, join
+
+
+from gonioanalysis.directories import GONIODIR
+
+
+def simple_select(list_of_strings):
+ '''
+ Simple command line user interface for selecting a string
+ from a list of many strings.
+
+ Returns the string selected.
+ '''
+
+ for i_option, option in enumerate(list_of_strings):
+ print('{}) {}'.format(i_option+1, option))
+
+ while True:
+ sel = input('Type in selection by number: ')
+
+ try:
+ sel = int(sel)
+ except TypeError:
+ print('Please input a number')
+ continue
+
+ if 1 <= sel <= len(list_of_strings):
+ return list_of_strings[sel-1]
+
+
+
+
+class SpecimenGroups:
+ '''
+ Managing specimens into groups.
+ '''
+
+ def __init__(self):
+ self.groups = {}
+
+ self.load_groups()
+
+ def load_groups(self):
+ try:
+ with open(os.path.join(GONIODIR, 'specimen_groups.txt'), 'r') as fp:
+ self.groups = json.load(fp)
+ except FileNotFoundError:
+ print('No specimen groups')
+
+ def save_groups(self):
+ '''
+ Save groups but only if groups has some members (does not save an empty dict)
+ '''
+ if len(self.groups.keys()) >= 0:
+ with open(os.path.join(GONIODIR, 'specimen_groups.txt'), 'w') as fp:
+ json.dump(self.groups, fp)
+
+
+ def new_group(self, group_name, *specimens):
+ self.groups[group_name] = [*specimens]
+
+
+ def get_groups(self):
+ '''
+ Returns the groups dictionary
+ '''
+ return self.groups
+
+ def get_specimens(self, group_name):
+ return self.groups[group_name]
+
+
+class DrosoSelect:
+ '''
+ Selecting a Droso folder based on user input or programmatically.
+
+ Folder has to start with "Droso".
+
+ TODO:
+ - add programmatic selection methods of folders
+ '''
+
+ def __init__(self, datadir=None):
+ '''
+ datadir Where the different droso folder are in
+ '''
+ if datadir is None:
+ self.path = input("Input data directory >> ")
+ else:
+ self.path = datadir
+
+ folders = [fn for fn in os.listdir(self.path) if isdir(join(self.path, fn))]
+ self.folders = [os.path.join(self.path, fn) for fn in folders]
+
+ self.groups = SpecimenGroups()
+
+
+ def parse_specimens(self, user_input):
+ '''
+ Parse user input to get manalyser names.
+
+ Arguments
+ ---------
+ *user_input : string
+ Comma separated list of specimen names or indices of self.folders,
+ or a group name.
+
+ Raises ValueError if unable to parse.
+ '''
+
+ # 1) If user supplies a specimen group name
+ if user_input in self.groups.get_groups().keys():
+ user_input = ','.join(self.groups.get_specimens(user_input))
+
+ sel_keys = [os.path.basename(x) for x in user_input.split(',')]
+ selections = [folder for folder in self.folders if os.path.basename(folder) in sel_keys]
+
+ if len(selections) == len(sel_keys):
+ print('Selecting by group.')
+ else:
+ print('Group is invalid; Specified specimens do not exist')
+
+ else:
+ # 2) Otherwise first try if indices to self.folders
+ try:
+ sel_indices = [int(i) for i in user_input.split(',')]
+ selections = [self.filt_folders[i] for i in sel_indices]
+ except IndexError:
+ print('One of the given numbers goes over limits, try again.')
+
+ # 3) Next try if specifying by specimen names
+ except ValueError:
+ print('Not number values given, trying with base names')
+
+ sel_keys = [os.path.basename(x) for x in user_input.split(',')]
+ selections = [folder for folder in self.folders if os.path.basename(folder) in sel_keys]
+
+ if len(selections) == len(sel_keys):
+ print('Worked.')
+ else:
+ print('Did not work, try again.')
+
+ try:
+ selections
+ except:
+ ValueError("parse_specimens unable to process {}".format(user_input))
+
+ return selections
+
+
+ def ask_user(self, startswith='', endswith='', contains=''):
+ '''
+ In terminal, ask user to select a Droso folder and can perform simple
+ filtering of folders based on folder name.
+
+ INPUT ARGUMENTS DESCRIPTION
+ startswith Folder's name has to start with this string
+ endswith Folder's name has to have this string in the end
+ contains Folder's name has to have this string somewhere
+
+ RETURNS A list of directories (strings) that the user selected?
+ '''
+ available_commands = ['new_group', 'list_groups', ]
+
+ # Filtering of folders based on their name: startswith, endswith, and contains
+ self.filt_folders = [f for f in self.folders if
+ os.path.split(f)[1].startswith(startswith) and os.path.split(f)[1].endswith(endswith) and contains in os.path.split(f)[1]]
+
+ folders = self.filt_folders
+
+ print('\nSelect a Droso folder (give either number or drosoname, to select many comma split)')
+ for i, folder in enumerate(folders):
+ print(" {}) {}".format(i, folder))
+
+ print('Type help for additional commands')
+
+ while True:
+ user_input = input('>> ')
+
+ #
+ splitted = user_input.split(' ')
+ if splitted[0] == 'help':
+ print('Following commands are avaible')
+ for cmd in available_commands:
+ print(' '+cmd)
+ elif splitted[0] == 'new_group':
+ self.groups.new_group(*splitted[1:])
+ elif splitted[0] == 'list_groups':
+ print(self.groups.get_groups())
+ else:
+ try:
+ selections = self.parse_specimens(user_input)
+ break
+ except ValueError:
+ pass
+
+ print('\nSelected {}\n'.format(selections))
+
+ self.groups.save_groups()
+
+ return selections
+
+
diff --git a/gonio-analysis/gonioanalysis/drosom/README.txt b/gonio-analysis/gonioanalysis/drosom/README.txt
new file mode 100644
index 0000000..5fb7a04
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/README.txt
@@ -0,0 +1,20 @@
+Goniometric movement analysis
+
+On the first run, the script makes you to select regions of interest (ROIs),
+on which it applies cross-correlation movement analysis. The ROIs and
+cross-correlation results are cached on disk.
+
+You can pass any of the following options (arguments) to the script:
+
+ vectormap Interactive 3D plot of the pseudopupil movement directions
+ averaged Vectormap but averaging over the selected specimen
+ trajectories 2D movement trajectories
+
+With averaged, one can also use
+ animation Create a video rotating the 3dplot
+ complete_flow_analysis Create a video with the simulated optic flow and
+ flow / vector map difference over many specimen
+ rotations
+
+Use space to separate the arguments.
+Any undocumented options can be found in drosom/terminal.py
diff --git a/gonio-analysis/gonioanalysis/drosom/__init__.py b/gonio-analysis/gonioanalysis/drosom/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/gonio-analysis/gonioanalysis/drosom/analyser_commands.py b/gonio-analysis/gonioanalysis/drosom/analyser_commands.py
new file mode 100644
index 0000000..1dc2981
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/analyser_commands.py
@@ -0,0 +1,84 @@
+'''
+Attribute ANALYSER_CMDS dict here contains functions that accept
+the Analyser object as their only argument.
+'''
+
+import numpy as np
+
+from gonioanalysis.drosom import plotting
+from gonioanalysis.drosom.plotting.common import save_3d_animation
+from gonioanalysis.drosom.plotting import basics, illustrate_experiments
+from gonioanalysis.drosom.plotting.plotter import MPlotter
+from gonioanalysis.drosom.plotting import complete_flow_analysis, error_at_flight
+from gonioanalysis.drosom.special.norpa_rescues import norpa_rescue_manyrepeats
+from gonioanalysis.drosom.special.paired import cli_group_and_compare
+import gonioanalysis.drosom.reports as reports
+
+I_WORKER = None
+N_WORKERS = None
+
+plotter = MPlotter()
+
+
+# Functions that take only one input argument that is the MAnalyser
+ANALYSER_CMDS = {}
+ANALYSER_CMDS['pass'] = print
+ANALYSER_CMDS['vectormap'] = basics.plot_3d_vectormap
+ANALYSER_CMDS['vectormap_mayavi'] = plotter.plot_3d_vectormap_mayavi
+ANALYSER_CMDS['vectormap_video'] = lambda analyser: save_3d_animation(analyser, plot_function=basics.plot_3d_vectormap, guidance=True, i_worker=I_WORKER, N_workers=N_WORKERS)
+ANALYSER_CMDS['vectormap_oldvideo'] = lambda analyser: plotter.plot_3d_vectormap(analyser, animation=True)
+ANALYSER_CMDS['magtrace'] = basics.plot_1d_magnitude
+ANALYSER_CMDS['2d_vectormap'] = basics.plot_2d_vectormap
+ANALYSER_CMDS['trajectories'] = plotter.plot_2d_trajectories
+ANALYSER_CMDS['2dmagnitude'] = plotter.plotMagnitude2D
+
+
+# Analyser + image_folder
+#ANALYSER_CMDS['1dmagnitude'] = plotter.plot_1d_magnitude_from_folder
+
+ANALYSER_CMDS['moving_rois_video'] = illustrate_experiments.moving_rois
+ANALYSER_CMDS['illustrate_experiments_video'] = illustrate_experiments.illustrate_experiments
+ANALYSER_CMDS['rotation_mosaic'] = illustrate_experiments.rotation_mosaic
+ANALYSER_CMDS['norpa_rescue_manyrepeats'] = norpa_rescue_manyrepeats
+ANALYSER_CMDS['compare_paired'] = cli_group_and_compare
+ANALYSER_CMDS['lr_displacements'] = lambda analyser: reports.left_right_displacements(analyser, 'test')
+ANALYSER_CMDS['left_right_summary'] = reports.left_right_summary
+ANALYSER_CMDS['pdf_summary'] = reports.pdf_summary
+
+rotations = np.linspace(-180,180, 360)
+ANALYSER_CMDS['flow_analysis_yaw'] = lambda analyser: complete_flow_analysis(analyser, rotations, 'yaw')
+ANALYSER_CMDS['flow_analysis_roll'] = lambda analyser: complete_flow_analysis(analyser, rotations, 'roll')
+ANALYSER_CMDS['flow_analysis_pitch'] = lambda analyser: complete_flow_analysis(analyser, rotations, 'pitch')
+
+ANALYSER_CMDS['error_at_flight'] = error_at_flight
+
+ANALYSER_CMDS['export_vectormap'] = lambda analyser: analyser.export_3d_vectors()
+
+
+# Functions that take two input arguments;
+# MAanalyser object and the name of the imagefolder, in this order
+IMAGEFOLDER_CMDS = {}
+IMAGEFOLDER_CMDS['magtrace'] = basics.plot_1d_magnitude
+
+
+# Functions that take two manalyser as input arguments
+DUALANALYSER_CMDS = {}
+DUALANALYSER_CMDS['difference'] = basics.plot_3d_differencemap
+DUALANALYSER_CMDS['compare'] = basics.compare_3d_vectormaps
+DUALANALYSER_CMDS['compare_compact'] = basics.compare_3d_vectormaps_compact
+DUALANALYSER_CMDS['compare_manyviews'] = basics.compare_3d_vectormaps_manyviews
+
+DUALANALYSER_CMDS['difference_video'] = lambda analyser1, analyser2: save_3d_animation([analyser1, analyser2],
+ plot_function=basics.plot_3d_differencemap, guidance=False, hide_axes=True, colorbar=False, hide_text=True,
+ i_worker=I_WORKER, N_workers=N_WORKERS)
+
+# Manyviews videos
+for animation_type in ['rotate_plot', 'rotate_arrows', 'pitch_rot', 'yaw_rot', 'roll_rot']:
+ DUALANALYSER_CMDS['compare_manyviews_{}_video'.format(animation_type.replace('_',''))] = lambda an1, an2, at=animation_type: save_3d_animation([an1, an2], plot_function=basics.compare_3d_vectormaps_manyviews, animation_type=at)
+
+
+# Functions that take in a list of manalysers (first positional argument)
+MULTIANALYSER_CMDS = {}
+MULTIANALYSER_CMDS['magnitude_probability'] = basics.plot_magnitude_probability
+MULTIANALYSER_CMDS['moving_rois_mosaic'] = illustrate_experiments.moving_rois_mosaic
+
diff --git a/gonio-analysis/gonioanalysis/drosom/analysing.py b/gonio-analysis/gonioanalysis/drosom/analysing.py
new file mode 100644
index 0000000..8010bf8
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/analysing.py
@@ -0,0 +1,1754 @@
+'''
+Analysing motion from goniometrically measured image series.
+
+-------
+Classes
+-------
+ MAnalyser
+ Main programmatic interfacce to process and interact with imaging data
+ produced by gonio-imsoft
+
+ MAverager
+ Takes in many MAnalysers to generate a mean specimen.
+ Only implements some of MAnalyser methods
+
+ VectorGettable
+ Internal, caches results for better performance
+
+
+'''
+
+import os
+import json
+import ast
+import math
+import datetime
+
+import numpy as np
+import matplotlib.pyplot as plt
+from scipy.spatial import cKDTree as KDTree
+
+from gonioanalysis.drosom.loading import load_data, angles_from_fn, arange_fns
+from gonioanalysis.coordinates import camera2Fly, camvec2Fly, rotate_about_x, nearest_neighbour, mean_vector, optimal_sampling
+from gonioanalysis.directories import ANALYSES_SAVEDIR, PROCESSING_TEMPDIR
+from gonioanalysis.rotary_encoders import to_degrees, step2degree, DEFAULT_STEPS_PER_REVOLUTION
+
+from roimarker import Marker
+from movemeter import Movemeter
+
+
+
+def vertical_filter_points(points_3d, vertical_lower=None, vertical_upper=None, reverse=False):
+ ''''
+ Takes in 3D points and returns an 1D True/False array of length points_3d
+ '''
+
+ verticals = np.degrees(np.arcsin(points_3d[:,2]/ np.cos(points_3d[:,0]) ))
+
+ for i_point in range(len(points_3d)):
+ if points_3d[i_point][1] < 0:
+ if verticals[i_point] > 0:
+ verticals[i_point] = 180-verticals[i_point]
+ else:
+ verticals[i_point] = -180-verticals[i_point]
+
+
+ booleans = np.ones(len(points_3d), dtype=np.bool)
+ if vertical_lower is not None:
+ booleans = booleans * (verticals > vertical_lower)
+ if vertical_upper is not None:
+ booleans = booleans * (verticals < vertical_upper)
+
+ if reverse:
+ booleans = np.invert(booleans)
+
+ return booleans
+
+
+
+class ShortNameable:
+ '''
+ Inheriting this class adds getting and setting
+ short_name attribute, and style for matplotlib text.
+ '''
+
+ def get_short_name(self):
+ '''
+ Returns the short_name of object or an emptry string if the short_name
+ has not been set.
+ '''
+ try:
+ return self.short_name
+ except AttributeError:
+ return ''
+
+ def set_short_name(self, short_name):
+ self.short_name = short_name
+
+
+
+class SettingAngleLimits:
+
+ def __init__(self):
+ self.va_limits = [None, None]
+ self.ha_limits = [None, None]
+ self.alimits_reverse = False
+
+ def set_angle_limits(self, va_limits=(None, None), reverse=False):
+ '''
+ Limit get_3d_vectors
+
+ All units in degrees.
+ '''
+ self.va_limits = va_limits
+ self.alimits_reverse = reverse
+
+
+
+class VectorGettable:
+ '''
+ Inheriting this class grants abilities to get vectors and caches results
+ for future use, minimizing computational time penalty when called many times.
+ '''
+
+ def __init__(self):
+ self.cached = {}
+
+ # Define class methods dynamically
+ #for key in self.cached:
+ # exec('self.get_{} = ')
+
+
+ def _get(self, key, *args, **kwargs):
+ '''
+
+ '''
+ dkey = ''
+ for arg in args:
+ dkey += arg
+ for key, val in kwargs.items():
+ dkey += '{}{}'.format(key, val)
+
+ try:
+ self.cached[dkey]
+ except KeyError:
+ self.cached[dkey] = self._get_3d_vectors(*args, **kwargs)
+ return self.cached[dkey]
+
+
+ def get_3d_vectors(self, *args, **kwargs):
+ '''
+ Returns the sampled points and cartesian 3D-vectors at these points.
+ '''
+ #return self._get('3d_vectors', *args, **kwargs)
+ return self._get_3d_vectors(*args, **kwargs)
+
+
+
+class Discardeable():
+ '''
+ Inheriting this class and initializing it grants
+ '''
+ def __init__(self):
+ self.discard_savefn = os.path.join(PROCESSING_TEMPDIR, 'Manalyser', 'discarded_recordings',
+ 'discarded_{}.json'.format(self.folder))
+
+ os.makedirs(os.path.dirname(self.self.discard_savefn), exist_ok=True)
+
+ self.load_discarded()
+
+
+ def discard_recording(self, image_folder, i_repeat):
+ '''
+ Discard a recording
+
+ image_folder Is pos-folder
+ i_repeat From 0 to n, or 'all' to discard all repeats
+ '''
+ if image_folder not in self.discarded_recordings.keys():
+ self.discarded_recordings[image_folder] = []
+
+ self_discarded_recordings[image_folder].append(i_repeat)
+
+
+ def is_discarded(self, image_folder, i_repeat):
+ '''
+ Checks if image_folder and i_repeats is discarded.
+ '''
+ if image_folder in self.discarded_recordings.keys():
+ if i_repeat in self.discarded_recordings[image_folder] or 'all' in self.discarded_recordings[image_folder]:
+ return True
+ return False
+
+
+ def save_discarder(self):
+ '''
+ Save discarded recordings.
+
+ This has to be called manually ( not called at self.discard_recording() )
+ '''
+ with open(self.discard_savefn, 'w') as fp:
+ json.dump(self.discarded_recordings, fp)
+
+
+ def load_discarded(self):
+ '''
+ Load discard data from disk or if does not exists, initialize
+ self.discarded_recordings
+
+ Is called at __init__
+ '''
+ if os.path.exists(self.discard_savefn):
+ with open(self.discard_savefn, 'r') as fp:
+ self.discarded_recordings = json.load(fp)
+ else:
+ self.discarded_recordings = {}
+
+
+
+class MAnalyser(VectorGettable, SettingAngleLimits, ShortNameable):
+ '''
+ Cross-correlation analysis of DrosoM data, saving and loading, and getting
+ the analysed data out.
+
+ ------------------
+ Input argument naming convetions
+ ------------------
+ - specimen_name
+ - recording_name
+
+ -----------
+ Attributes
+ -----------
+ - self.movements self.movements[eye][angle][i_repeat][x/y/time]
+ where eye = "left" or "right"
+ angle = recording_name.lstrip('pos'), so for example angle="(0, 0)_uv"
+
+
+ eyes : tuple of strings
+ By default, ("left", "right")
+
+ active_analysis : string
+ Name of the active analysis. Sets MOVEMENTS_SAVEFN
+
+ vector_rotation : float or None
+ Rotation of 2D vectors (affects 3D)
+
+ '''
+
+ def __init__(self, data_path, folder, clean_tmp=False, no_data_load=False):
+ '''
+ INPUT ARGUMENTS DESCRIPTION
+ data_path directory where DrosoM folder lies
+ folder Name of the DrosoM folder, for example "DrosoM1"
+ no_data_load Skip loading data in the constructor
+ '''
+ super().__init__()
+ #Discardeable().__init__()
+
+ self.ROIs = None
+
+
+ self.data_path = data_path
+ self.folder = folder
+
+
+ # Skip image_folders. i_repeat
+ self.imagefolder_skiplist = {}
+
+
+ self.manalysers = [self]
+ self.eyes = ("left", "right")
+ self.vector_rotation = None
+
+ self._rois_skelefn = 'rois_{}{}.json' # specimen_name active_analysis
+ self._movements_skelefn = 'movements_{}_{}{}.json' # specimen_name, eye, active_analysis
+
+ self.skiplist_savefn = os.path.join(PROCESSING_TEMPDIR, 'MAnalyser_data', folder, 'imagefolder_skiplist.json')
+ self.CROPS_SAVEFN = os.path.join(PROCESSING_TEMPDIR, 'MAnalyser_data', folder, self._rois_skelefn.format(folder, ''))
+ self.MOVEMENTS_SAVEFN = os.path.join(PROCESSING_TEMPDIR, 'MAnalyser_data', folder, self._movements_skelefn.format(folder, '{}', ''))
+
+ self.LINK_SAVEDIR = os.path.join(PROCESSING_TEMPDIR, 'MAnalyser_data', folder, 'linked_data')
+
+
+ self.active_analysis = ''
+
+
+
+ if no_data_load:
+ # no_data_load was speciefied, skip all data loading
+ pass
+
+ # Python dictionary for linked data
+ self.linked_data = {}
+
+
+ else:
+ self.stacks = load_data(os.path.join(self.data_path, self.folder))
+
+ if os.path.isfile(self.skiplist_savefn):
+ with open(self.skiplist_savefn, 'r') as fp:
+ self.imagefolder_skiplist = json.load(fp)
+
+
+ # Load movements and ROIs if they exists
+ if self.are_rois_selected():
+ self.load_ROIs()
+
+ if self.is_measured():
+ self.load_analysed_movements()
+
+ self.antenna_level_correction = self._getAntennaLevelCorrection(folder)
+
+ self.load_linked_data()
+
+ # Ensure the directories where the crops and movements are saved exist
+ os.makedirs(os.path.dirname(self.CROPS_SAVEFN), exist_ok=True)
+ os.makedirs(os.path.dirname(self.MOVEMENTS_SAVEFN), exist_ok=True)
+
+
+ # For cahcing frequently used data
+ self.cahced = {'3d_vectors': None}
+
+ self.stop_now = False
+ self.va_limits = [None, None]
+ self.ha_limits = [None, None]
+ self.alimits_reverse = False
+
+ # If receptive fields == True then give out receptive field
+ # movement directions instead of DPP movement directions
+ self.receptive_fields = False
+
+
+ @property
+ def name(self):
+ return self.folder
+
+ @name.setter
+ def name(self, name):
+ self.folder=name
+
+
+ @property
+ def active_analysis(self):
+ if self.__active_analysis == '':
+ return 'default'
+ else:
+ return self.__active_analysis
+
+
+ @active_analysis.setter
+ def active_analysis(self, name):
+ '''
+ Setting active analysis sets self.MOVEMENTS_SAVEFN.
+
+ name : string
+ The default is "default"
+ '''
+
+ if name == 'default':
+ name = ''
+
+ if name == '':
+ self.CROPS_SAVEFN = os.path.join(PROCESSING_TEMPDIR, 'MAnalyser_data', self.folder, self._rois_skelefn.format(self.folder, ''))
+ self.MOVEMENTS_SAVEFN = os.path.join(PROCESSING_TEMPDIR, 'MAnalyser_data', self.folder, self._movements_skelefn.format(self.folder, '{}', ''))
+ else:
+ self.CROPS_SAVEFN = os.path.join(PROCESSING_TEMPDIR, 'MAnalyser_data', self.folder, self._rois_skelefn.format(self.folder, '_'+name))
+ self.MOVEMENTS_SAVEFN = os.path.join(PROCESSING_TEMPDIR, 'MAnalyser_data', self.folder, self._movements_skelefn.format(self.folder, '{}', '_'+name))
+
+ if self.are_rois_selected():
+ self.load_ROIs()
+ else:
+ try:
+ del self.ROIs
+ except AttributeError:
+ pass
+ if self.is_measured():
+ self.load_analysed_movements()
+ else:
+ try:
+ del self.movements
+ except AttributeError:
+ pass
+
+ self.__active_analysis = name
+
+
+
+
+ def list_analyses(self):
+ '''
+ Returns a list of analysis names that exist.
+ '''
+
+ manalyser_dir = os.path.dirname(self.MOVEMENTS_SAVEFN)
+
+ if os.path.isdir(manalyser_dir):
+ fns = [fn for fn in os.listdir(manalyser_dir) if
+ self._movements_skelefn.split('{')[0] in fn and
+ self.eyes[0] in ''.join(fn.split('_')[-2:])]
+ else:
+ fns = []
+
+
+ names = []
+
+ for fn in fns:
+ secondlast, last = fn.split('.')[0].split('_')[-2:]
+
+ if secondlast in self.eyes and last not in self.eyes:
+ names.append(last)
+ elif secondlast not in self.eyes and last in self.eyes:
+ names.append('default')
+ else:
+ RuntimeWarning('Fixme list_analyses in MAnalyser')
+
+ return names
+
+
+ def __fileOpen(self, fn):
+ with open(fn, 'r') as fp:
+ data = json.load(fp)
+ return data
+
+
+ def __fileSave(self, fn, data):
+ with open(fn, 'w') as fp:
+ json.dump(data, fp)
+
+
+ def mark_bad(self, image_folder, i_repeat):
+ '''
+ Marks image folder and repeat to be bad and excluded
+ when loading movements.
+
+ i_repeat : int or 'all'
+ '''
+
+ if self.imagefolder_skiplist.get(image_folder, None) is None:
+ self.imagefolder_skiplist[image_folder] = []
+
+ self.imagefolder_skiplist[image_folder].append(i_repeat)
+
+ with open(self.skiplist_savefn, 'w') as fp:
+ json.dump(self.imagefolder_skiplist, fp)
+
+
+
+ def list_rotations(self, list_special=True, special_separated=False,
+ horizontal_condition=None, vertical_condition=None,
+ _return_imagefolders=False):
+ '''
+ List all the imaged vertical-horizontal pair rotations.
+
+ Arguments
+ ---------
+ list_special : bool
+ If false, include only rotations whose folders have no suffix
+ special_separated : bool
+ If true, return standard and special image folders separetly.
+ horizontal_condition : callable or None
+ A callable, that when supplied with horizontal (in steps),
+ returns either True (includes) or False (excludes).
+ vertical_condition : callable or None
+ Same as horizontal condition but for vertical rotation
+
+ Returns
+ -------
+ rotations : list of tuples
+ List of rotations.
+ '''
+
+ def check_conditions(vertical, horizontal):
+ if callable(horizontal_condition):
+ if not horizontal_condition(horizontal):
+ return False
+ if callable(vertical_condition):
+ if not vertical_condition(vertical):
+ return False
+ return True
+
+ standard = []
+ special = []
+
+ for key in self.stacks.keys():
+ try:
+ horizontal, vertical = ast.literal_eval(key)
+ if check_conditions(vertical, horizontal) == False:
+ continue
+
+ except (SyntaxError, ValueError):
+ # This is now a special folder, ie. with a suffix or something else
+ # Try to get the angle anyhow
+ splitted = key.replace('(', ')').split(')')
+ if len(splitted) == 3:
+ try:
+ horizontal, vertical = splitted[1].replace(' ', '').split(',')
+ horizontal = int(horizontal)
+ vertical = int(vertical)
+
+ if check_conditions(vertical, horizontal) == False:
+ continue
+ except:
+ pass
+
+ if _return_imagefolders:
+ special.append('pos'+key)
+ else:
+ special.append((horizontal, vertical))
+ continue
+
+ if _return_imagefolders:
+ standard.append('pos'+key)
+ else:
+ standard.append((horizontal, vertical))
+
+ if not list_special:
+ special = []
+
+ if special_separated:
+ return standard, special
+ else:
+ return standard + special
+
+
+ def list_imagefolders(self, endswith='', only_measured=False, **kwargs):
+ '''
+ Returns a list of the image folders (specimen subfolders that contain
+ the images).
+
+ Arguments
+ ---------
+ only_measured : bool
+ Return only image_folders with completed movement analysis
+
+ See list_rotations for other allowed keyword arguments.
+
+ Returns
+ -------
+ image_folders : list of strings
+ '''
+
+ image_folders, special_image_folders = self.list_rotations(
+ special_separated=True,
+ _return_imagefolders=True,
+ **kwargs)
+
+ all_folders = [fn for fn in sorted(image_folders) + sorted(special_image_folders) if fn.endswith(endswith)]
+
+ if only_measured:
+ all_folders = [fn for fn in all_folders if self.folder_has_movements(fn)]
+
+ return all_folders
+
+
+ def get_horizontal_vertical(self, image_folder, degrees=True):
+ '''
+ Tries to return the horizontal and vertical for an image folder.
+
+ image_folder
+ degrees If true, return in degrees
+ '''
+ # Trusting that ( and ) only reserved for the angle
+ splitted = key.replace('(', ')').split(')')
+ if len(splitted) == 3:
+ horizontal, vertical = splitted[1].replace(' ', '').split(',')
+ horizontal = int(horizontal)
+ vertical = int(vertical)
+
+ if degrees:
+ return step2degree(horizontal), step2degree(vertical)
+ else:
+ return horizontal, vertical
+
+
+ def get_specimen_directory(self):
+ return os.path.join(self.data_path, self.folder)
+
+
+ def list_images(self, image_folder, absolute_path=False):
+ '''
+ List all image filenames in an image folder
+
+ FIXME: Alphabetical order not right because no zero padding
+
+ image_folder Name of the image folder
+ absolute_path If true, return filenames with absolute path instead of relative
+
+ '''
+
+ fns = [fn for fn in os.listdir(os.path.join(self.data_path, self.folder, image_folder)) if fn.endswith('.tiff') or fn.endswith('.tif')]
+
+ fns = arange_fns(fns)
+
+ if absolute_path:
+ fns = [os.path.join(self.data_path, self.folder, image_folder, fn) for fn in fns]
+
+ return fns
+
+
+ def get_specimen_name(self):
+ '''
+ Return the name of the data (droso) folder, such as DrosoM42
+ '''
+ return self.folder
+
+
+ @staticmethod
+ def get_imagefolder(image_fn):
+ '''
+ Gets the name of the folder where an image lies, for example
+ /a/b/c/image -> c
+
+ based on the image filename.
+ '''
+ return os.path.split(os.path.dirname(image_fn))[1]
+
+
+ @staticmethod
+ def _getAntennaLevelCorrection(fly_name):
+ fn = os.path.join(ANALYSES_SAVEDIR, 'antenna_levels', fly_name+'.txt')
+
+ if os.path.exists(fn):
+ with open(fn, 'r') as fp:
+ antenna_level_offset = float(fp.read())
+ else:
+ antenna_level_offset = False
+
+ return antenna_level_offset
+
+
+ def _correctAntennaLevel(self, angles):
+ '''
+ angles In degrees, tuples, (horizontal, pitch)
+ '''
+ if self.antenna_level_correction != False:
+ for i in range(len(angles)):
+ angles[i][1] -= self.antenna_level_correction
+
+ return angles
+
+
+ def get_antenna_level_correction(self):
+ '''
+ Return the antenna level correction or if no correction exists, False.
+ '''
+ return self._getAntennaLevelCorrection(self.folder)
+
+
+ def get_imaging_parameters(self, image_folder):
+ '''
+ Returns a dictionary of the Gonio Imsoft imaging parameters.
+ The dictionary is empty if the descriptions file is missing.
+
+ image_folder : string
+ '''
+
+ parameters = {}
+
+
+ fn = os.path.join(self.data_path, self.folder, image_folder, 'description.txt')
+
+ if not os.path.isfile(fn):
+ # Fallback for older Imsoft data where only
+ # one descriptions file for each imaging
+ old_fn = os.path.join(self.data_path, self.folder, self.folder+'.txt')
+ if os.path.isfile(old_fn):
+ fn = old_fn
+ else:
+ return {}
+
+
+ with open(fn, 'r') as fp:
+ for line in fp:
+ if line.startswith('#') or line in ['\n', '\r\n']:
+ continue
+ split= line.strip('\n\r').split(' ')
+
+ if len(split) >= 1:
+ parameters[split[0]] = split[1]
+ else:
+ parameters[split[0]] = ''
+
+ return parameters
+
+ def get_specimen_age(self):
+ '''
+ Returns age of the specimen, or None if unkown.
+
+ If many age entries uses the latest for that specimen.
+ '''
+
+ try:
+ self.descriptions_file
+ except AttributeError:
+ self.descriptions_file = self._load_descriptions_file()
+
+ for line in self.descriptions_file[::-1]:
+ if line.startswith('age '):
+ return line.lstrip('age ')
+
+ return None
+
+
+ def get_specimen_sex(self):
+ '''
+ Returns sex of the specimen, or None if unkown.
+
+ If many sex entries uses the latest for that specimen.
+ '''
+
+ try:
+ self.descriptions_file
+ except AttributeError:
+ self.descriptions_file = self._load_descriptions_file()
+
+ for line in self.descriptions_file[::-1]:
+ if line.startswith('sex '):
+ return line.lstrip('sex ').strip(' ').strip('\n')
+
+ return None
+
+
+ def get_imaging_frequency(self, image_folder):
+ '''
+ Return imaging frequency (how many images per second) for an image folder
+ by searching for frame_length field in the descriptions file.
+
+ Returns None if the imaging frequency could not be determined.
+ '''
+ fs = self.get_imaging_parameters(image_folder).get('frame_length', None)
+
+ if fs is None:
+ # FIXME
+ return 100. # Better fallback value than None?
+ else:
+ return 1/float(fs)
+
+ def get_pixel_size(self, image_folder):
+ '''
+ Return the pixel size of the imaging.
+ Currently always returns the same static value of 1.22375.
+ '''
+ # Based on the stage micrometer;
+ # 0.8 µm in the images 979 pixels
+ return 1/1.22376
+
+
+ def get_rotstep_size(self):
+ '''
+ Returns how many degrees one rotation encoder step was
+ (the return value * steps == rotation in degrees)
+ '''
+ return 360/DEFAULT_STEPS_PER_REVOLUTION
+
+
+ def get_snap_fn(self, i_snap=0, absolute_path=True):
+ '''
+ Returns the first snap image filename taken (or i_snap'th if specified).
+
+ Many time I took a snap image of the fly at (0,0) horizontal/vertical, so this
+ can be used as the "face photo" of the fly.
+ '''
+
+ snapdir = os.path.join(self.data_path, self.folder, 'snaps')
+ fns = [fn for fn in os.listdir(snapdir) if fn.endswith('.tiff')]
+ fns.sort()
+
+ if absolute_path:
+ fns = [os.path.join(snapdir, fn) for fn in fns]
+
+ return fns[i_snap]
+
+
+ def load_ROIs(self):
+ '''
+ Load ROIs (selected before) for the left/right eye.
+
+ INPUT ARGUMENTS DESCRIPTION
+ eye 'left' or 'right'
+
+ DETAILS
+ While selecting ROIs, both eyes are selcted simultaneously. There's
+ no explicit information about from which eye each selected ROI is from.
+ Here we reconstruct the distinction to left/right using following way:
+ 1 ROI: horizontal angle determines
+ 2 ROIs: being left/right in the image determines
+
+ Notice that this means that when the horizontal angle is zero (fly is facing towards the camera),
+ image rotation has to be so that the eyes are on image's left and right halves.
+ '''
+
+ self.ROIs = {'left': {}, 'right': {}}
+
+ with open(self.CROPS_SAVEFN, 'r') as fp:
+ marker_markings = json.load(fp)
+
+ for image_fn, ROIs in marker_markings.items():
+
+ # Since use to relative filenames in the ROIs savefile
+ image_fn = os.path.join(self.data_path, self.folder, image_fn)
+
+ # ROIs smaller than 7 pixels a side are not loaded
+ good_rois = []
+ for i_roi in range(len(ROIs)):
+ if not (ROIs[i_roi][2] < 7 and ROIs[i_roi][3] < 7):
+ good_rois.append(ROIs[i_roi])
+ ROIs = good_rois
+
+ pos = self.get_imagefolder(image_fn)
+ try:
+ horizontal, pitch = angles_from_fn(pos)
+ except:
+ horizontal, pitch = (0, 0)
+ pos = pos[3:]
+
+ # ROI belonging to the eft/right eye is determined solely by
+ # the horizontal angle when only 1 ROI exists for the position
+ if len(ROIs) == 1:
+
+ if horizontal > 0:
+ self.ROIs['left'][pos] = ROIs[0]
+ else:
+ self.ROIs['right'][pos] = ROIs[0]
+
+ # If there's two ROIs
+ elif len(ROIs) == 2:
+
+ if ROIs[0][0] > ROIs[1][0]:
+ self.ROIs['left'][pos] = ROIs[0]
+ self.ROIs['right'][pos] = ROIs[1]
+ else:
+ self.ROIs['left'][pos]= ROIs[1]
+ self.ROIs['right'][pos] = ROIs[0]
+
+ elif len(ROIs) > 2:
+ print('Warning. len(ROIs) == {} for {}'.format(len(ROIs), image_fn))
+
+ self.N_folders_having_rois = len(marker_markings)
+
+
+
+ def select_ROIs(self, **kwargs):
+ '''
+ Selecting the ROIs from the loaded images.
+ Currently, only the first frame of each recording is shown.
+
+ kwargs Passed to the marker constructor
+ '''
+
+ to_cropping = [stacks[0][0] for str_angles, stacks in self.stacks.items()]
+
+ fig, ax = plt.subplots()
+ marker = Marker(fig, ax, to_cropping, self.CROPS_SAVEFN,
+ relative_fns_from=os.path.join(self.data_path, self.folder), **kwargs)
+ marker.run()
+
+
+ def are_rois_selected(self):
+ '''
+ Returns True if a file for crops/ROIs is found.
+ '''
+ return os.path.exists(self.CROPS_SAVEFN)
+
+
+ def count_roi_selected_folders(self):
+ '''
+ Returns the number of imagefolders that have ROIs selected
+ '''
+ if self.are_rois_selected():
+ return self.N_folders_having_rois
+ else:
+ return 0
+
+
+ def folder_has_rois(self, image_folder):
+ '''
+ Returns True if for specified image_folder at least one
+ ROI exsits. Otherwise False.
+ '''
+ try:
+ self.ROIs
+ except AttributeError:
+ return False
+
+ if self.get_rois(image_folder) != []:
+ return True
+
+ return False
+
+
+ def get_rois(self, image_folder):
+ rois = []
+ for eye in ['left', 'right']:
+ try:
+ roi = self.ROIs[eye][image_folder[3:]]
+ rois.append(roi)
+ except:
+ continue
+ return rois
+
+
+ def is_measured(self):
+ '''
+ Returns (True, True) if analyseMovement results can be found for the fly and bot eyes.
+ '''
+ return all((os.path.exists(self.MOVEMENTS_SAVEFN.format('left')), os.path.exists(self.MOVEMENTS_SAVEFN.format('right'))))
+
+
+
+ def folder_has_movements(self, image_folder):
+ '''
+ Returns True if for specified image_folder has movements
+ measured. Otherwise False.
+ '''
+ try:
+ self.movements
+ except AttributeError:
+ return False
+
+ if any([image_folder[3:] in self.movements[eye].keys()] for eye in ['left', 'right']):
+ if len(self.get_displacements_from_folder(image_folder)) > 0:
+ return True
+ return False
+
+
+ def load_analysed_movements(self):
+ self.movements = {}
+ with open(self.MOVEMENTS_SAVEFN.format('right'), 'r') as fp:
+ self.movements['right'] = json.load(fp)
+ with open(self.MOVEMENTS_SAVEFN.format('left'), 'r') as fp:
+ self.movements['left'] = json.load(fp)
+
+ if self.imagefolder_skiplist:
+
+ for image_folder, skip_repeats in self.imagefolder_skiplist.items():
+ for eye in self.eyes:
+
+ if self.movements[eye].get(image_folder[3:], None) is None:
+ continue
+
+ # Iterate repeats reversed so we can just pop things
+ for i_repeat in sorted(skip_repeats)[::-1]:
+ self.movements[eye][image_folder[3:]].pop(i_repeat)
+
+
+
+
+ def measure_both_eyes(self, **kwargs):
+ '''
+ Wrapper to self.measure_movement() for both left and right eyes.
+ '''
+ for eye in ['left', 'right']:
+ self.measure_movement(eye, **kwargs)
+
+
+ def measure_movement(self, eye, only_folders=None,
+ max_movement=30, absolute_coordinates=False, join_repeats=False,
+ stop_event=None):
+ '''
+ Performs cross-correlation analysis for the selected ROIs (regions of interest)
+ using Movemeter package.
+
+ If ROIs haven't been selected, calls method self.selectROIs.
+ Movements are saved into a tmp directory.
+
+ INPUT ARGUMENTS DESCRIPTION
+ eye 'left' or 'right'
+ only_folders Analyse only image folders in the given list (that is only_folders).
+ max_movement Maximum total displacement in x or y expected. Lower values faster.
+ absolute_coordinates Return movement values in absolute image coordinates
+ join_repeats Join repeats together as if they were one long recording.
+ stop_event None or threading.Event for stopping the movement measurement
+
+
+ Cross-correlation analysis is the slowest part of the DrosoM pipeline.
+ '''
+
+ self.movements = {}
+
+ if not os.path.exists(self.CROPS_SAVEFN):
+ self.selectROIs()
+ self.load_ROIs()
+
+
+ angles = []
+ stacks = []
+ ROIs = []
+
+ if not self.ROIs[eye] == {}:
+
+ for angle in self.stacks:
+ #if angle in str(self.ROIs[eye].keys()):
+
+ # Continue if no ROI for this eye exists
+ try :
+ self.ROIs[eye][angle]
+ except KeyError:
+ continue
+
+ # Continue if only_folders set and the angle is not in
+ # the only folders
+ if only_folders and not 'pos'+angle in only_folders:
+ continue
+
+ if join_repeats:
+ fused = []
+ for i_repetition in range(len(self.stacks[angle])):
+ fused += self.stacks[angle][i_repetition]
+
+ self.stacks[angle] = [fused]
+
+ for i_repetition in range(len(self.stacks[angle])):
+ angles.append(angle)
+ stacks.append( self.stacks[angle][i_repetition] )
+ ROIs.append( [self.ROIs[eye][angle]] )
+
+
+ if ROIs == []:
+ return None
+
+
+ # Old upscale was 4
+ meter = Movemeter(upscale=10, absolute_results=absolute_coordinates)
+ meter.set_data(stacks, ROIs)
+
+ for stack_i, angle in enumerate(angles):
+
+ if stop_event and stop_event.is_set():
+ self.stop_now = True
+
+ if self.stop_now:
+ self.stop_now = False
+ self.movements = {}
+ print('{} EYE CANCELLED'.format(eye.upper()))
+ return None
+
+
+ print('Analysing {} eye, motion from position {}, done {}/{} for this eye'.format(eye.upper(), angle, stack_i+1, len(ROIs)))
+
+ print("Calculating ROI's movement...")
+ x, y = meter.measure_movement(stack_i, max_movement=max_movement)[0]
+
+ print('Done.')
+
+ try:
+ self.movements[angle]
+ except KeyError:
+ self.movements[angle] = []
+
+ tags = meter.get_metadata(stack_i)['Image ImageDescription'].values.split('"')
+
+ # GonioImsoft start time tag in the images
+ if 'start_time' in tags:
+ time = tags[tags.index('start_time') + 2]
+ else:
+ time = None
+
+ self.movements[angle].append({'x': x, 'y':y, 'time': time})
+
+ else:
+ self.movements = {}
+
+ # If only_folders set ie. only some angles were (re)measured,
+ # load previous movements also for saving
+ if only_folders:
+ with open(self.MOVEMENTS_SAVEFN.format(eye), 'r') as fp:
+ previous_movements = json.load(fp)
+
+ # Update previous movements with the new movements and set
+ # the updated previous movements to be the current movements
+ previous_movements.update(self.movements)
+ self.movements = previous_movements
+
+
+ # Save movements
+ with open(self.MOVEMENTS_SAVEFN.format(eye), 'w') as fp:
+ json.dump(self.movements, fp)
+
+
+ #for key, data in self.movements.items():
+ # plt.plot(data['x'])
+ # plt.plot(data['y'])
+ # plt.show()
+
+
+
+ def get_time_ordered(self, angles_in_degrees=True, first_frame_only=False,
+ exclude_imagefolders=[]):
+ '''
+ Get images, ROIs and angles, ordered in recording time for movie making.
+
+ exclude_imagefolders : list
+ Imagefolders to exclude
+
+ Returns 3 lists: image_fns, ROIs, angles
+ image_fns
+ '''
+ self.load_ROIs()
+
+ times_and_data = []
+ seen_angles = []
+
+ for eye in self.movements:
+ for angle in self.movements[eye]:
+
+ if 'pos'+angle in exclude_imagefolders:
+ continue
+
+ if not angle in seen_angles:
+ time = self.movements[eye][angle][0]['time']
+
+ fn = self.stacks[angle][0]
+ ROI = self.get_moving_ROIs(eye, angle)
+ deg_angle = [list(ast.literal_eval(angle.split(')')[0]+')' ))]
+
+ if angles_in_degrees:
+ to_degrees(deg_angle)
+
+ deg_angle = [deg_angle[0] for i in range(len(fn))]
+
+ times_and_data.append([time, fn, ROI, deg_angle])
+ seen_angles.append(angle)
+
+ # Everything gets sorted according to the time
+ times_and_data.sort(key=lambda x: x[0])
+
+ image_fns = []
+ ROIs = []
+ angles = []
+
+ if not first_frame_only:
+ for time, fns, ROI, angle in times_and_data:
+ image_fns.extend(fns)
+ ROIs.extend(ROI)
+ angles.extend(angle)
+ else:
+ for time, fns, ROI, angle in times_and_data:
+ image_fns.append(fns[0])
+ ROIs.append(ROI[0])
+ angles.append(angle[0])
+
+ return image_fns, ROIs, angles
+
+
+ def get_movements_from_folder(self, image_folder):
+ '''
+
+ '''
+ data = {}
+ for eye in ['left', 'right']:
+ try:
+ data[eye] = self.movements[eye][image_folder[3:]]
+ except KeyError:
+ pass
+
+ return data
+
+
+ def get_displacements_from_folder(self, image_folder):
+ '''
+ Returns a list of 1D numpy arrays, which give displacement
+ over time for each repeat.
+
+ If no displacement data, returns an empty list
+
+ Calculated from separete (x,y) data
+ '''
+ displacements = []
+
+ for eye, data in self.get_movements_from_folder(image_folder).items():
+ for repetition_data in data:
+ x = repetition_data['x']
+ y = repetition_data['y']
+ mag = np.sqrt(np.asarray(x)**2 + np.asarray(y)**2)
+ displacements.append(mag)
+
+ return displacements
+
+
+ def get_raw_xy_traces(self, eye):
+ '''
+ Return angles, values
+ angles Each recorded fly orientation in steps
+ values X and Y
+ '''
+ angles = [list(ast.literal_eval(angle)) for angle in self.movements[eye]]
+ movement_dict = [self.movements[eye][str(angle)] for angle in angles]
+
+ return angles, movement_dict
+
+
+ def get_2d_vectors(self, eye, mirror_horizontal=True, mirror_pitch=True, correct_level=True, repeats_separately=False):
+ '''
+ Creates 2D vectors from the movements analysis data.
+ Vector start point: ROI's position at the first frame
+ Vector end point: ROI's position at the last frame
+
+ mirror_pitch Should make so that the negative values are towards dorsal and positive towards frontal
+ (this is how things on DrosoX were)
+ '''
+
+ # Make the order of angles deterministic
+ sorted_angle_keys = sorted(self.movements[eye])
+
+ angles = [list(ast.literal_eval(angle.split(')')[0]+')' )) for angle in sorted_angle_keys]
+
+
+ values = [self.movements[eye][angle] for angle in sorted_angle_keys]
+ #suffix = sorted_angle_keys[0].split(')')[1]
+ #values = [self.movements[eye][angle] for angle in [str(tuple(a))+suffix for a in angles]]
+
+
+ to_degrees(angles)
+
+ if correct_level:
+ angles = self._correctAntennaLevel(angles)
+
+
+ if mirror_horizontal:
+ for i in range(len(angles)):
+ angles[i][0] *= -1
+ xdirchange = -1
+ else:
+ xdirchange = 1
+
+ if mirror_pitch:
+ for i in range(len(angles)):
+ angles[i][1] *= -1
+
+
+
+ # Vector X and Y components
+ # Fix here if repetitions are needed to be averaged
+ # (don't take only x[0] but average)
+ if repeats_separately:
+ tmp_angles = []
+ X = []
+ Y = []
+
+ for angle, val in zip(angles, values):
+ for repeat in val:
+ tmp_angles.append(angle)
+ X.append( xdirchange*(repeat['x'][-1]-repeat['x'][0]) )
+ Y.append( repeat['y'][-1]-repeat['y'][0] )
+
+ angles = tmp_angles
+
+ else:
+
+ X = [xdirchange*(x[0]['x'][-1]-x[0]['x'][0]) for x in values]
+ Y = [x[0]['y'][-1]-x[0]['y'][0] for x in values]
+
+
+
+ #i_frame = int(len(values[0][0]['x'])/3)
+ #X = [xdirchange*(x[0]['x'][i_frame]-x[0]['x'][0]) for x in values]
+ #Y = [x[0]['y'][i_frame]-x[0]['y'][0] for x in values]
+
+ if self.receptive_fields:
+ X = [-x for x in X]
+ Y = [-y for y in Y]
+
+ #X = [0.1 for x in values]
+ #Y = [0. for x in values]
+
+ if self.vector_rotation:
+ r = math.radians(self.vector_rotation)
+ for i in range(len(X)):
+ x = X[i]
+ y = Y[i]
+
+ if angles[i][1] > 0:
+ sr = -1 * r
+ else:
+ sr = 1 * r
+
+ if eye == 'left':
+ sr = sr
+ elif eye == 'right':
+ sr = -sr
+
+ X[i] = x * math.cos(sr) - y * math.sin(sr)
+ Y[i] = x * math.sin(sr) + y * math.cos(sr)
+
+
+ return angles, X, Y
+
+
+
+ def get_magnitude_traces(self, eye, image_folder=None,
+ mean_repeats=False, mean_imagefolders=False,
+ microns=False, _phase=False, _derivative=False):
+ '''
+ Get all movement magnitudes (sqrt(x**2+y**2)) from the specified eye.
+ The results are returned as a dictionary where the keys are the
+ angle pairs (self.movements keys)
+
+ eye : string or None
+ "left" or "right".
+ None leads to taking mean where eyes overlap
+ image_folder : string
+ If specified, return movements from this image folder.
+ Otherwise by default None, movements from all image folders.
+ mean_repeats : bool
+ Wheter take mean of the mean repeats.
+ mean_imagefolders : bool
+ Only makes sense when image_folder is None
+ mean_eyes : bool
+ Only makes sense when eye is None
+ microns : bool
+ Call self.get_pixel_size(image_folder) to convert from
+ pixel units to micrometers.
+ _phase: bool
+ If true return phase in degrees instead.
+
+ Returns
+ if mean_repeats == True
+ magnitude_traces = {angle_01: [mag_mean], ...}
+ if mean_repeats == False
+ magnitude_traces = {angle_01: [mag_rep1, mag_rep2,...], ...}
+
+ if mean_imagefolders, there's only one key 'mean'
+
+ '''
+ alleye_magnitude_traces = {}
+
+ if eye is None:
+ eyes = self.eyes
+ else:
+ eyes = [eye]
+
+ if image_folder is None:
+ movement_keys = set().union(*[list(self.movements[eye].keys()) for eye in eyes])
+ else:
+ movement_keys = [image_folder[3:]]
+
+ for eye in eyes:
+ magnitude_traces = {}
+ for angle in movement_keys:
+
+ if self.movements[eye].get(angle, None) is None:
+ # Continue if data for this eye
+ continue
+
+ magnitude_traces[angle] = []
+
+ for i_repeat in range(len(self.movements[eye][angle])):
+ x = self.movements[eye][angle][i_repeat]['x']
+ y = self.movements[eye][angle][i_repeat]['y']
+
+ if _phase:
+ mag = np.degrees(np.arctan2(y, -np.asarray(x)))
+ else:
+ mag = np.sqrt(np.asarray(x)**2 + np.asarray(y)**2)
+
+ if _derivative:
+ mag = np.diff(mag)
+
+ magnitude_traces[angle].append( mag )
+
+ if mean_repeats:
+ magnitude_traces[angle] = [np.mean(magnitude_traces[angle], axis=0)]
+
+ if magnitude_traces == {}:
+ # If nothing for this eye
+ continue
+
+ if mean_imagefolders:
+ tmp = np.mean([val for val in magnitude_traces.values()], axis=0)
+
+ magnitude_traces = {}
+ magnitude_traces['imagefoldersmean'] = tmp
+
+ alleye_magnitude_traces[eye] = magnitude_traces
+
+
+
+
+ if len(eyes) > 1:
+ merge = {}
+ # Merge (mean) eyes where one imagefolder hols data from both eyes
+
+ angles = [list(val.keys()) for val in alleye_magnitude_traces.values()]
+ angles = set().union(*angles)
+
+ for angle in angles:
+
+ data = [alleye_magnitude_traces.get(eye, {}).get(angle, None) for eye in eyes]
+ data = [d for d in data if d is not None]
+
+ merge[angle] = np.mean(data, axis=0)
+
+
+ magnitude_traces = merge
+
+ if microns and not _phase:
+ for image_folder in magnitude_traces:
+ pixel_size = self.get_pixel_size(image_folder)
+ magnitude_traces[image_folder] = [t*pixel_size for t in magnitude_traces[image_folder]]
+
+
+ return magnitude_traces
+
+
+ def get_moving_ROIs(self, eye, angle, i_repeat=0):
+ '''
+ Returns a list of ROIs how they move over time.
+ Useful for visualizing.
+ '''
+
+ moving_ROI = []
+
+ if not self.ROIs:
+ self.load_ROIs()
+
+ movements = self.movements[eye][angle][i_repeat]
+ rx,ry,rw,rh = self.ROIs[eye][angle]
+
+ for i in range(len(movements['x'])):
+ x = -movements['x'][i]
+ y = -movements['y'][i]
+
+ moving_ROI.append([rx+x,ry+y,rw,rh])
+ return moving_ROI
+
+
+ def _get_3d_vectors(self, eye, return_angles=False, correct_level=True, repeats_separately=False, normalize_length=0.1, strict=None, vertical_hardborder=None):
+ '''
+ Returns 3D vectors and their starting points.
+
+ correct_level Use estimated antenna levels
+
+ va_limits Vertical angle limits in degrees, (None, None) for no limits
+ '''
+ angles, X, Y = self.get_2d_vectors(eye, mirror_pitch=False, mirror_horizontal=True,
+ correct_level=False, repeats_separately=repeats_separately)
+
+
+ N = len(angles)
+
+ points = np.zeros((N,3))
+ vectors = np.zeros((N,3))
+
+
+ for i, (angle, x, y) in enumerate(zip(angles, X, Y)):
+ horizontal, pitch = angle
+
+ point0 = camera2Fly(horizontal, pitch)
+ point1 = camvec2Fly(x, y, horizontal, pitch, normalize=normalize_length)
+
+ if correct_level:
+ rotation = -self.antenna_level_correction
+ point0 = rotate_about_x(point0, rotation)
+ point1 = rotate_about_x(point1, rotation)
+
+ x0,y0,z0 = point0
+ x1,y1,z1 = point1
+
+ #vectors.append( (tuple(angle), (x0,x1), (y0,y1), (z0, z1)) )
+ points[i] = np.array(point0)
+ vectors[i] = np.array(point1) - points[i]
+
+ # Vertical/horizontal angle limiting
+ booleans = vertical_filter_points(points, vertical_lower=self.va_limits[0],
+ vertical_upper=self.va_limits[1], reverse=self.alimits_reverse)
+ points = points[booleans]
+ vectors = vectors[booleans]
+
+ if return_angles:
+ return points, vectors, angles
+ else:
+ return points, vectors
+
+
+ def get_recording_time(self, image_folder, i_rep=0):
+ '''
+ Returns the timestamp of a recording, if measure_movement() method has been
+ run for the recording.
+
+ If no time is found,
+ returns None
+
+ recording_Name Recording name
+ i_rep Return the time for recording repeat i_rep
+ By default, i_rep=0
+ '''
+
+ angle = image_folder.lstrip('pos')
+
+ for eye in ['left', 'right']:
+ try:
+ return self.movements[eye][angle][i_rep]['time']
+ except (KeyError, IndexError):
+ pass
+ except AttributeError:
+ print('No time for {} because movements not analysed'.format(image_folder))
+ return None
+
+ return None
+
+
+
+ def stop(self):
+ '''
+ Stop long running activities (now measurement).
+ '''
+ self.stop_now = True
+
+
+ # ------------
+ # LINKED DATA
+ # linking external data such as ERGs to the DPP data (MAnalyser)
+ # ------------
+
+ def link_data(self, key, data):
+ '''
+ Data linked to the MAnalyser
+ '''
+ self.linked_data[key] = data
+
+
+ def save_linked_data(self):
+ '''
+ Attempt saving the linked data on disk in JSON format.
+ '''
+ os.makedirs(self.LINK_SAVEDIR, exist_ok=True)
+
+ for key, data in self.linked_data.items():
+ with open(os.path.join(self.LINK_SAVEDIR, "{}.json".format(key)), 'w') as fp:
+ json.dump(data, fp)
+
+
+ def load_linked_data(self):
+ '''
+ Load linked data from specimen datadir.
+ '''
+ # Initialize linked data to an empty dict
+ self.linked_data = {}
+
+ # Check if linked data directory exsists, if not, the no linked data for this specimen
+ if os.path.exists(self.LINK_SAVEDIR):
+
+ dfiles = [fn for fn in os.listdir(self.LINK_SAVEDIR) if fn.endswith('.json')]
+
+ for dfile in dfiles:
+ with open(os.path.join(self.LINK_SAVEDIR, dfile), 'r') as fp:
+ data = json.load(fp)
+ self.linked_data[dfile.replace('.json', '')] = data
+
+
+
+class MAverager(VectorGettable, ShortNameable, SettingAngleLimits):
+ '''
+ Combining and averaging results from many MAnalyser objects.
+
+ MAverager acts like MAnalyser object for getting data (like get_2d_vectors)
+ but lacks the movement analysis (cross-correlation) related parts.
+ '''
+ def __init__(self, manalysers, short_name=''):
+
+ self.manalysers = manalysers
+
+ self.interpolation = {}
+ self.va_limits = [None, None]
+ self.ha_limits = [None, None]
+ self.alimits_reverse = False
+
+ self.intp_step = (5, 5)
+
+ self.eyes = manalysers[0].eyes
+ self.vector_rotation = None
+
+ self.interpolated_raw= {}
+
+
+ def get_N_specimens(self):
+ return len(self.manalysers)
+
+
+ def get_specimen_name(self):
+ return 'averaged_'+'_'.join([manalyser.folder for manalyser in self.manalysers])
+
+
+ def setInterpolationSteps(self, horizontal_step, vertical_step):
+ '''
+ Set the resolution of the N-nearest neighbour interpolation in Maverager.get_2d_vectors.
+
+ INPUT ARGUMENTS
+ horizontal_step
+ vertical_step
+
+ Arguments horizontal_step and vertical_step refer to the rotation stages.
+
+ '''
+
+ self.intp_step = (horizontal_step, vertical_step)
+
+
+ def get_2d_vectors(self, eye, **kwargs):
+ '''
+ Get's the 2D movement vectors (in the camera coordinate system)
+ using N_nearest neighbour interpolation and averaging.
+ '''
+ #Modified from get_3d_vectors
+
+ interpolated = [[],[],[]]
+
+ points_2d = []
+ vectors_2d = []
+
+ for analyser in self.manalysers:
+ angles, X, Y = analyser.get_2d_vectors(eye, mirror_horizontal=False, mirror_pitch=False)
+ vecs = [[x,y] for x,y in zip(X, Y)]
+ points_2d.append(np.array(angles))
+ vectors_2d.append( np.array(vecs) )
+
+ vectors_2d = np.array(vectors_2d)
+
+ kdtrees = [KDTree(points) for points in points_2d]
+
+ hmin, hmax = (-90, 90)
+ vmax, hmax = (-180, 180)
+
+ intp_points = []
+ for h in np.arange(hmin, hmax+0.01, 10):
+ for v in np.arange(hmin, hmax+0.1, 10):
+ intp_points.append((h,v))
+
+ for intp_point in intp_points:
+
+ nearest_vectors = []
+
+ for kdtree, vectors in zip(kdtrees, vectors_2d):
+ distance, index = kdtree.query(intp_point)
+
+ if distance < math.sqrt(self.intp_step[0]**2+self.intp_step[1]**2):
+ nearest_vectors.append(vectors[index])
+
+ if len(nearest_vectors) > len(vectors_2d)/2:
+ avec = np.mean(nearest_vectors, axis=0)
+ avec /= np.linalg.norm(avec)
+ interpolated[0].append(np.array(intp_point))
+ interpolated[1].append(avec[0])
+ interpolated[2].append(avec[1])
+
+ angles, x, y = interpolated
+ return angles, x, y
+
+
+ def get_3d_vectors(self, eye, correct_level=True, normalize_length=0.1,
+ recalculate=False, strict=False, vertical_hardborder=False,
+ repeats_separately=False, **kwargs):
+ '''
+ Equivalent to MAnalysers get_3d_vectors but interpolates with N-nearest
+ neughbours.
+
+ repeats_separately : bool
+ If True, return underlying MAnalyser vectors separetly
+ (same points get repeated many times)
+ '''
+
+ cachename = ';'.join([str(item) for item in [self.vector_rotation, correct_level, normalize_length, strict, vertical_hardborder]])
+
+
+ if self.interpolation.get(eye, {}).get(cachename) is None or recalculate:
+
+ interpolated = [[],[]]
+ self.interpolated_raw[eye] = [] # key points str, value list of vectors
+
+ R = 1
+ intp_dist = (2 * R * np.sin(math.radians(self.intp_step[0])))
+
+ vectors_3d = []
+
+ for analyser in self.manalysers:
+ analyser.vector_rotation = self.vector_rotation
+ vec = analyser.get_3d_vectors(eye, correct_level=True,
+ normalize_length=normalize_length, **kwargs)
+
+
+ vectors_3d.append(vec)
+
+ if not strict:
+ intp_points = optimal_sampling(np.arange(-90, 90.01, self.intp_step[0]), np.arange(0, 360.01, self.intp_step[1]))
+ else:
+ if eye == 'left':
+ intp_points = optimal_sampling(np.arange(-90, 0.01, self.intp_step[0]), np.arange(0, 360.01, self.intp_step[1]))
+ else:
+ intp_points = optimal_sampling(np.arange(0, 90.01, self.intp_step[0]), np.arange(0, 360.01, self.intp_step[1]))
+
+ for intp_point in intp_points:
+
+ nearest_vectors = []
+ for vectors in vectors_3d:
+ i_nearest = nearest_neighbour(intp_point, vectors[0], max_distance=intp_dist)
+
+ if not i_nearest is False:
+
+ if vertical_hardborder:
+ if np.sign(intp_point[2]) != np.sign(vectors[0][i_nearest][2]):
+ continue
+
+ nearest_vectors.append(vectors[1][i_nearest])
+
+ if len(nearest_vectors) > len(vectors_3d)/2:
+ avec = mean_vector(intp_point, nearest_vectors)
+ interpolated[0].append(np.array(intp_point))
+ interpolated[1].append(avec)
+
+ self.interpolated_raw[eye].append(nearest_vectors)
+
+ self.interpolation[eye] = {}
+ self.interpolation[eye][cachename] = np.array(interpolated[0]), np.array(interpolated[1])
+
+ else:
+ pass
+
+
+ points, vectors = self.interpolation[eye][cachename]
+
+ if repeats_separately:
+ newpoints = []
+ newvecs = []
+ for i_point, point in enumerate(points):
+ for vec in self.interpolated_raw[eye][i_point]:
+ newpoints.append(point)
+ newvecs.append(vec)
+ points = np.array(newpoints)
+ vectors = np.array(newvecs)
+
+
+ # Vertical/horizontal angle limiting
+ booleans = vertical_filter_points(points, vertical_lower=self.va_limits[0],
+ vertical_upper=self.va_limits[1], reverse=self.alimits_reverse)
+ points = points[booleans]
+ vectors = vectors[booleans]
+
+
+ return points, vectors
+
+
+ def export_3d_vectors(self, *args, **kwargs):
+ '''
+ Exports the 3D vectors in json format.
+
+ optic_flow If true, export optic flow instead of the fly vectors
+ '''
+
+ folder = os.path.join(ANALYSES_SAVEDIR, 'exported_3d_vectors')
+ os.makedirs(folder, exist_ok=True)
+
+ if optic_flow:
+ fn = '3d_optic_flow_vectors_{}_{}.json'.format(self.get_specimen_name(), datetime.datetime.now())
+ else:
+ fn = '3d_vectors_{}_{}.json'.format(self.get_specimen_name(), datetime.datetime.now())
+
+ data = {}
+ for eye in ['left', 'right']:
+ points, vectors = self.get_3d_vectors(eye, *args, *kwargs)
+
+ data[eye] = {'points': points.tolist(), 'vectors': vectors.tolist()}
+
+ with open(os.path.join(folder, fn), 'w') as fp:
+ json.dump(data, fp)
diff --git a/gonio-analysis/gonioanalysis/drosom/fitting.py b/gonio-analysis/gonioanalysis/drosom/fitting.py
new file mode 100644
index 0000000..ed72024
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/fitting.py
@@ -0,0 +1,42 @@
+import numpy as np
+
+from gonioanalysis.coordinates import force_to_tplane, normalize
+
+def get_reference_vector(P0):
+ '''
+ Generate a reference vector for the coffiecient of determination
+ (R**2 value) calculation.
+
+ The reference vectors are always in the XY-plane and the sphere's
+ tangential plane pointing east.
+
+ Returns vec so that P0+vec=P1
+ '''
+ px,py,pz = P0
+ aP0 = np.array(P0)
+
+ if px==0:
+ if py > 0:
+ phi = np.pi/2
+ else:
+ phi = -np.pi/2
+
+ else:
+ phi = np.arctan(py/px)
+
+ if px < 0:
+ phi += np.pi
+
+ # x,y,z components
+ vx = -np.sin(phi)
+ vy = np.cos(phi)
+
+ vec = force_to_tplane(aP0, aP0+np.array((vx,vy,0)))
+ vec = normalize(aP0, vec, scale=.15)
+ vec = vec - aP0
+
+ return vec
+
+
+
+
diff --git a/gonio-analysis/gonioanalysis/drosom/kinematics.py b/gonio-analysis/gonioanalysis/drosom/kinematics.py
new file mode 100644
index 0000000..292186e
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/kinematics.py
@@ -0,0 +1,347 @@
+import os
+import csv
+
+import numpy as np
+import scipy.optimize
+
+import matplotlib.pyplot as plt
+
+
+
+def _logistic_function(t, k, L, t0):
+ '''
+ The sigmoidal function.
+
+ t : float or np.ndarray
+ Independent variable, time
+ k : float
+ Steepness of the curve
+ L : float
+ Curve's maximum value
+ t0: : float
+ Timepoint of the mid value
+
+ See https://en.wikipedia.org/wiki/Logistic_function
+ '''
+ return L / (1+np.exp(-k*(t-t0)))
+
+
+
+def mean_max_response(manalyser, image_folder, maxmethod='max'):
+ '''
+ Averages over repetitions, and returns the maximum of the
+ mean trace.
+
+ manalyser
+ image_folder
+ maxmethod : string
+ Method how the determine the maximum displacement
+ 'max': Take the furthest point
+ 'mean_latterhalf': final 50% mean discplacement
+ 'final' : Use the final value
+ '''
+
+ displacements = manalyser.get_displacements_from_folder(image_folder)
+
+ mean = np.mean(displacements, axis=0)
+ if maxmethod == 'max':
+ return np.max(mean)
+ elif maxmethod == 'mean_latterhalf':
+ return np.mean(mean[int(len(mean)/2):])
+ elif maxmethod == 'final':
+ return mean[-1]
+ else:
+ raise ValueError
+
+
+
+def magstd_over_repeats(manalyser, image_folder, maxmethod='max'):
+ '''
+ Standard deviation in responses
+ (std of max displacement of each repeat)
+
+ maxmethod : string
+ See mean_max_response
+ '''
+ displacements = manalyser.get_displacements_from_folder(image_folder)
+
+ if maxmethod == 'max':
+ displacements = np.max(displacements, axis=1)
+ elif maxmethod == 'mean_latterhalf':
+ displacements = np.mean([d[int(len(d)/2):] for d in displacements], axis=1)
+
+ return np.std(displacements)
+
+
+def mean_topspeed(manalyser, image_folder):
+ '''
+ Returns the top speed of the mean response.
+ '''
+ mean = np.mean(manalyser.get_displacements_from_folder(image_folder), axis=0)
+
+ # FIXME: Replace the loop with numpy routines
+ top = 0
+ for i in range(len(mean)-1):
+ top = max(top, mean[i+1] - mean[i])
+
+ return top
+
+
+def _simple_latencies(displacements, fs, threshold=0.1):
+ '''
+ Interpolate and take when goes over 1/10th
+ '''
+ latencies = []
+
+ timepoints = np.linspace(0, len(displacements[0])/fs, len(displacements[0]))
+ newx = np.linspace(0, len(displacements[0])/fs, 200)
+
+ for displacement in displacements:
+ y = np.interp(newx, timepoints, displacement)
+
+ index = np.where(y>(np.max(y)*threshold))[0][0]
+
+ latencies.append(newx[index])
+
+ return latencies
+
+
+
+def latency(manalyser, image_folder, threshold=0.05, method='sigmoidal'):
+ '''
+ Response latency ie. when the response exceedes by default 5% of
+ its maximum value.
+
+ Arguments
+ ---------
+ threshold : float
+ Between 0 and 1
+ method : string
+ Either "sigmoidal" (uses the sigmoidal fit) or
+ "simple" (uses the data directly).
+
+ Returns
+ -------
+ latency : sequency
+ The time durations in seconds that it takes for the responses
+ to reach (by default) 5% of its maximum value (length of repeats).
+ '''
+ fs = manalyser.get_imaging_frequency(image_folder)
+
+ if method == 'simple':
+ # Take the mean response of the image_folder's data
+ displacements = manalyser.get_displacements_from_folder(image_folder)
+ trace = np.mean(displacements, axis=0)
+ elif method == 'sigmoidal':
+ # Make a sigmoidal fit and use the sigmoidal curve
+ params = sigmoidal_fit(manalyser, image_folder)
+ N = len(manalyser.get_displacements_from_folder(image_folder)[0])
+ time = np.linspace(0, N/fs, N)
+ trace = _logistic_function(time,
+ np.mean(params[1]),
+ np.mean(params[0]),
+ np.mean(params[2]))
+ else:
+ raise ValueError("method has to be 'sigmoidal' or 'simple', not {}".format(
+ method))
+
+ # Check when climbs over the threshold
+ latency = _simple_latencies([trace], fs, threshold)
+ return latency
+
+
+
+def _sigmoidal_fit(displacements, fs, debug=True):
+ amplitudes = []
+ speeds = []
+ latencies = []
+
+ if debug:
+ fig, ax = plt.subplots()
+
+ timepoints = np.linspace(0, len(displacements[0])/fs, len(displacements[0]))
+
+ for i_repeat, displacement in enumerate(displacements):
+
+ # Initial guesses for k,L,t0
+ est_L = displacement[-1]
+ if est_L > 0:
+ est_t0 = (np.argmax(displacement)/fs)/2
+ else:
+ est_t0 = (np.argmin(displacement)/fs)/2
+ est_k = abs(est_L/est_t0)
+
+ print('est L={} t0={} k={}'.format(est_L, est_t0, est_k))
+
+ try:
+ popt, pcov = scipy.optimize.curve_fit(_logistic_function, timepoints, displacement,
+ p0=[est_k, est_L, est_t0])
+ except RuntimeError:
+ # Runtime Error occurs when curve fitting takes over maxfev iterations
+ # Usually then we have nonsigmoidal data (no response)
+ continue
+
+
+ if debug:
+ ax.plot(timepoints, displacement, '-')
+ ax.plot(timepoints, _logistic_function(timepoints, *popt),
+ '--', label='fit rep {}'.format(i_repeat))
+
+ plt.show(block=False)
+ plt.pause(.1)
+ if not input('good?')[0].lower() == 'y':
+ plt.close(fig)
+ return None
+ plt.close(fig)
+
+ speeds.append(popt[0])
+ amplitudes.append(popt[1])
+ latencies.append(popt[2])
+
+
+ return amplitudes, speeds, latencies
+
+
+
+def sigmoidal_fit(manalyser, image_folder, figure_savefn=None):
+ '''
+
+ Assuming sigmoidal (logistic function) response.
+
+ Arguments
+ ---------
+ manalyser : object
+ image_folder : string
+ figure_savefn : string
+ If given, saves a figure of the fit
+
+ Returns the following lists
+ amplitudes, speeds, halfrise_times
+ '''
+
+ if figure_savefn is not None:
+ fig, ax = plt.subplots()
+
+
+ amplitudes = []
+ speeds = []
+ halfrise_times = []
+
+ pcovs = []
+
+ displacements = manalyser.get_displacements_from_folder(image_folder)
+ fs = manalyser.get_imaging_frequency(image_folder)
+
+ timepoints = np.linspace(0, len(displacements[0])/fs, len(displacements[0]))
+
+
+ for i_repeat, displacement in enumerate(displacements):
+
+ amplitude, speed, halfrisetime = sigmoidal_fit([displacement], fs)
+
+ speeds.append(speed[0])
+ amplitudes.append(amplitude[0])
+ halfrise_times.append(halfrise_time[0])
+
+ if figure_savefn is not None:
+ ax.plot(timepoints, displacement, '-')
+ ax.plot(timepoints, _logistic_function(timepoints, *popt),
+ '--', label='fit rep {}'.format(i_repeat))
+
+
+ if figure_savefn:
+ fig.savefig(figure_savefn)
+
+ return amplitudes, speeds, halfrise_times
+
+
+
+def save_sigmoidal_fit_CSV(analysers, savefn, save_fits=False, with_extra=True,
+ microns=True):
+ '''
+ Takes in analysers, performs sigmoidal_fit for each and all image_folders.
+ Then saves the results as a CSV file, and by default fit images as well.
+
+ analysers : list of objects
+ List of analyser objects
+ savefn : string
+ Filename.
+ save_fits : bool
+ Save png images of the fits.
+ '''
+
+ with open(savefn, 'w') as fp:
+ writer = csv.writer(fp)
+
+ writer.writerow(['#'])
+ writer.writerow(['# Kinematics by sigmoidal fit', 'L / (1+np.exp(-k*(t-t0)))'])
+ writer.writerow(['# t0 latency (s)', 'k rise speed (pixels/s)', 'L response amplitude (pixels)'])
+ writer.writerow(['#'])
+
+ if microns:
+ y_units = 'pixels'
+ else:
+ y_units = 'µm'
+
+ header = ['Name', 'Image folder',
+ 'Mean L ({})', 'STD L ({})',
+ 'Mean k ({}/s)', 'STD k ({}/s)',
+ 'Mean t0 (s)', 'STD t0 (s)', 'Fit image']
+
+ if with_extra:
+ header.extend( ['Mean max amplitude ({})',
+ 'Mean final amplitude ({})' ,
+ 'Mean latency (s)',
+ 'Top speed ({}/s)'] )
+
+ writer.writerow([text.format(y_units) for text in header])
+
+ i_fit = 0
+
+ for analyser in analysers:
+
+ if save_fits:
+ dirname = os.path.dirname(savefn)
+ folder = os.path.basename(dirname)
+ figure_savefn = os.path.join(dirname, folder+'_fits', 'fit_{0:07d}.png'.format(i_fit))
+
+ if i_fit == 0:
+ os.makedirs(os.path.dirname(figure_savefn), exist_ok=True)
+
+ else:
+ figure_savefn = False
+
+ for image_folder in analyser.list_imagefolders():
+
+ amplitudes, speeds, halfrise_times = sigmoidal_fit(analyser, image_folder,
+ figure_savefn=figure_savefn)
+
+ if microns:
+ scaler = analyser.get_pixel_size(image_folder)
+ else:
+ scaler = 1
+
+ if figure_savefn:
+ figure_savefn = os.path.basename(figure_savefn)
+
+ row = [analyser.folder, image_folder,
+ np.mean(amplitudes)*scaler, np.std(amplitudes)*scaler,
+ np.mean(speeds)*scaler, np.std(speeds)*scaler,
+ np.mean(halfrise_times), np.std(halfrise_times),
+ figure_savefn]
+
+ if with_extra:
+ max_amplitude = scaler * mean_max_response(analyser, image_folder, maxmethod='max')
+ end_amplitude = scaler * mean_max_response(analyser, image_folder, maxmethod='final')
+ latency_value = np.mean(latency(analyser, image_folder))
+
+ top_speed = scaler * mean_topspeed(analyser, image_folder)
+
+ row.extend([max_amplitude, end_amplitude, latency_value, top_speed])
+
+ writer.writerow(row)
+
+ i_fit += 1
+
+ return None
+
diff --git a/gonio-analysis/gonioanalysis/drosom/linked_data/__init__.py b/gonio-analysis/gonioanalysis/drosom/linked_data/__init__.py
new file mode 100644
index 0000000..3e7fdb2
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/linked_data/__init__.py
@@ -0,0 +1 @@
+from .electrophysiology import link_erg_labbook
diff --git a/gonio-analysis/gonioanalysis/drosom/linked_data/electrophysiology.py b/gonio-analysis/gonioanalysis/drosom/linked_data/electrophysiology.py
new file mode 100644
index 0000000..3b0696b
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/linked_data/electrophysiology.py
@@ -0,0 +1,89 @@
+'''
+Linked Biosyst electrophysiology data, intended for electroretinograms (ERGs).
+'''
+
+import os
+import csv
+import glob
+
+import numpy as np
+
+from gonioanalysis.directories import ANALYSES_SAVEDIR
+
+from biosystfiles import extract as bsextract
+
+
+def _load_ergs(ergs_labbook, ergs_rootdir):
+ '''
+ Fetches ERGs for the specimen matching the name.
+
+ Requirements
+ - ERGs are Biosyst recorded .mat files.
+ - Requires also a lab book that links each specimen name to a ERG file,
+ and possible other parameter values such as intensity, repeats,
+ UV/green etc.
+
+ Returns ergs {specimen_name: data}
+ where data is a list [[ergs, par1, par2, ...],[..],..]
+ ergs are np arrays
+ '''
+ ergs = {}
+
+ csvfile = []
+ with open(ergs_labbook, 'r') as fp:
+ reader = csv.reader(fp)
+ for row in reader:
+ csvfile.append(row)
+
+ previous_specimen = ''
+
+ # Header (column names)
+ column_names = csvfile[0]
+
+ for line in csvfile[1:]:
+ efn = line[1]
+ match = glob.glob(ergs_rootdir+'/**/'+efn)
+ if len(match) != 1:
+ print('{} not found'.format(efn))
+ else:
+ specimen = line[0]
+ if not specimen:
+ specimen = previous_specimen
+ previous_specimen = specimen
+
+ try:
+ ergs[specimen]
+ except KeyError:
+ ergs[specimen] = []
+
+ ddict = {key: value for key, value in zip(column_names[2:], line[2:])}
+ trace, fs = bsextract(match[0], 0)
+
+ # Use the mean if many repeats present
+ ddict['data'] = np.mean(trace, axis=1).flatten().tolist()
+ ddict['fs'] = int(fs)
+ ergs[specimen].append(ddict)
+
+ return ergs
+
+
+
+def link_erg_labbook(manalysers, ergs_labbook, ergs_rootdir):
+ '''
+ Links MAnalyser objects with erg data fetched from an electronic
+ labbook. For specification, see function _load_ergs
+ '''
+
+ erg_data = _load_ergs(ergs_labbook, ergs_rootdir)
+
+ for manalyser in manalysers:
+ mname = manalyser.get_specimen_name()
+
+ if mname in erg_data:
+ manalyser.link_data('ERGs', erg_data[mname])
+ manalyser.save_linked_data()
+
+ else:
+ print('No ERGs for {}'.format(mname))
+
+
diff --git a/gonio-analysis/gonioanalysis/drosom/loading.py b/gonio-analysis/gonioanalysis/drosom/loading.py
new file mode 100644
index 0000000..3790a3e
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/loading.py
@@ -0,0 +1,192 @@
+'''
+Functions related to DrosoM data loading.
+
+MODULE LEVEL VARIABLES
+----------------------
+REPETITION_INDICATOR : str
+ In filenames, the text preceding the repetition value
+POSITION_INDICATOR : str
+ In filenames, the text preceding the imaging location value
+IMAGE_NAME_EXTENSIONS : str
+ File name extensions that are treated as image files.
+'''
+
+import os
+import ast
+
+from gonioanalysis.rotary_encoders import to_degrees
+
+
+REPETITION_INDICATOR = 'rep'
+POSITION_INDICATOR = 'pos'
+
+IMAGE_NAME_EXTENSIONS = ('.tiff', '.tif')
+
+
+def arange_fns(fns):
+ '''
+ Arange filenames based on REPETITION_INDICATOR and POSITION_INDICATOR
+ in their time order (repeat 1, image1,2,3,4, repeat 2, image1,2,3,4, ...).
+
+ If no indicators are found in the filenames then the ordering is
+ at least alphabetical.
+ '''
+
+ # Sort by i_frame
+ try:
+ fns.sort(key=lambda x: int(x.split('_')[-1].split('.')[0]))
+ except ValueError:
+ # Here if image fn not enging with _somenumber.tif(f)
+ pass
+
+ # Sort by i_repeat
+ try:
+ fns.sort(key=lambda x: int(x.split('_')[-2][3:]))
+ except ValueError:
+ fns.sort()
+
+ return fns
+
+
+def split_to_repeats(fns):
+ '''
+ Split a list of filenames into repeats (sublists) based on the
+ REPETITION_INDICATOR
+
+ Arguments
+ ---------
+ fns : list
+ 1D sequence of filenames
+
+ Returns
+ --------
+ splitted_fns : list
+ A list where each item is a sublist of filenames (or empty list
+ if there were no filenames for that repeat)
+ '''
+
+ repeats = {}
+
+ for fn in fns:
+ try:
+ i_repeat = str(int(fn[fn.index(REPETITION_INDICATOR)+len(REPETITION_INDICATOR):].split('_')[0]))
+ except ValueError:
+ print('Warning: Cannot determine i_repeat for {}'.format(fn))
+
+ if i_repeat not in repeats:
+ repeats[i_repeat] = []
+
+ repeats[i_repeat].append(fn)
+
+ return [fns for i_repeat, fns in repeats.items()]
+
+
+
+def angleFromFn(fn):
+ '''
+ Returns the horizontal and vertical angles from a given filename
+ The filename must be IMSOFT formatted as
+ im_pos(-30, 170)_rep0_0.tiff
+
+ fn Filename, from which to read the angles
+ '''
+ hor, ver = fn.split('(')[1].split(')')[0].split(',')
+ hor = int(hor)
+ ver = int(ver)
+
+ angles = [[hor,ver]]
+ to_degrees(angles)
+ return angles[0]
+
+
+def angles_from_fn(fn, prefix='pos'):
+ '''
+ Takes in a filename that somewhere contains string "pos(hor, ver)",
+ for example "pos(-30, 170)" and returns tuple (-30, 170)
+
+ Returns
+ -------
+ angle : tuple of ints
+ Rotation stage values or (0, 0) if the rotation was not found.
+ '''
+ try:
+ i_start = fn.index(prefix) + len(prefix)
+ except ValueError:
+ #raise ValueError("Cannot find prefix {} from filename {}".format(fn))
+ return (0,0)
+ try:
+ i_end = fn[i_start:].index(')') + i_start + 1
+ except ValueError:
+ #raise ValueError("Cannot find ')' after 'pos' in filename {}".format(fn))
+ return (0,0)
+ return ast.literal_eval(fn[i_start:i_end])
+
+
+
+def load_data(drosom_folder):
+ '''
+ Loads DrosoM imaging data from the following save structure
+
+ DrosoM2
+ pos(0, 0)
+ .tif files
+ pos(20, 20)
+ .tif files
+ pos(0, 10)
+ .tif files
+ ...
+
+ in a dictionary where the keys are str((horizontal, pitch)) and the items are
+ a list of image stacks:
+
+ stacks_dictionary = {"(hor1, pitch1): [[stack_rep1], [stack_rep2], ...]"},
+
+ where stack_rep1 = [image1_fn, image2_fn, ...].
+
+ Horizontal and pitch are given in rotatry encoder steps, not degrees.
+
+ '''
+
+ stacks_dictionary = {}
+
+ pos_folders = [fn for fn in os.listdir(drosom_folder) if os.path.isdir(os.path.join(drosom_folder, fn))]
+
+ # Import all tif images
+ for folder in pos_folders:
+
+ if folder.startswith(POSITION_INDICATOR):
+ str_angles = folder[len(POSITION_INDICATOR):] # Should go from "pos(0, 0)" to "(0, 0)"
+ else:
+ str_angles = folder
+
+ files = os.listdir(os.path.join(drosom_folder, folder))
+ tiff_files = [f for f in files if f.endswith(IMAGE_NAME_EXTENSIONS)]
+
+ if len(tiff_files) == 0:
+ # Skip if no images in the folder
+ continue
+
+ tiff_files = arange_fns(tiff_files)
+
+ stacks_dictionary[str_angles] = []
+
+ # Subdivide into repetitions
+ for tiff in tiff_files:
+ try:
+ i_repetition = int(tiff[tiff.index(REPETITION_INDICATOR)+len(REPETITION_INDICATOR):].split('_')[0])
+ except ValueError:
+ print('Warning: Cannot determine i_repetition for {}'.format(tiff))
+ i_repetition = 0
+
+ while i_repetition >= len(stacks_dictionary[str_angles]):
+ stacks_dictionary[str_angles].append([])
+
+
+
+ stacks_dictionary[str_angles][i_repetition].append(os.path.join(drosom_folder, folder, tiff))
+
+ # Remove empty lists, if one repetition index or more is missing from the data
+ stacks_dictionary[str_angles] = [alist for alist in stacks_dictionary[str_angles] if not alist == []]
+
+ return stacks_dictionary
+
diff --git a/gonio-analysis/gonioanalysis/drosom/optic_flow.py b/gonio-analysis/gonioanalysis/drosom/optic_flow.py
new file mode 100644
index 0000000..a3273d3
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/optic_flow.py
@@ -0,0 +1,251 @@
+'''
+Estimating optic flow field
+'''
+
+from math import cos, sin, radians
+
+import numpy as np
+from scipy.spatial import cKDTree as KDTree
+from scipy.stats import mannwhitneyu
+
+import gonioanalysis.coordinates as coordinates
+from gonioanalysis.drosom.analysing import MAnalyser
+
+
+def flow_direction(point, xrot=0):
+ '''
+ Estimates optic flow at the given point by placing the optic flow vector to
+ the point, and forcing it to the tangent plane of the sphere.
+
+ The optic flow vector is a unit vector -j (ie. pointing to negative y-axis),
+ unless rotated. This should be okay since we are interested only in the
+ direction of the flow field, not magnitude.
+
+ INPUT ARGUMENTS DESCRIPTION
+ point (x0,y0,z0)
+ xrot Rotation about x-axis
+
+ RETURNS
+ xi,yj,zk Flow direction vector at origo
+ '''
+
+ rxrot = radians(xrot)
+ ov = 2*np.array(point) + np.array([0,-1*cos(rxrot),sin(rxrot)])
+
+ P1 = coordinates.force_to_tplane(point, ov)
+
+ P1 = coordinates.normalize(point, P1, scale=0.10)
+
+ return P1-np.array(point)
+
+
+
+def flow_vectors(points, xrot=0):
+ '''
+ Returns optic flow vectors (from flow_direction) as a numpy array.
+ '''
+ return np.array([flow_direction(P0, xrot=xrot) for P0 in points])
+
+
+def field_error(points_A, vectors_A, points_B, vectors_B, direction=False, colinear=False):
+ '''
+ Relieved version where points dont have to overlap
+
+ Put to A the eye vecotrs
+
+ vectors_X list of vector
+ vector (x,y,z)
+
+ direction Try to get the direction also (neg/pos)
+
+ colinear : bool
+
+ Returns the errors at points_A
+ '''
+
+ N_vectors = len(vectors_A)
+
+ errors = np.empty(N_vectors)
+
+ kdtree = KDTree(points_B)
+
+
+ distances, indices = kdtree.query(points_A, k=10, n_jobs=-1)
+ weights = 1/(np.array(distances)**2)
+
+ # Check for any inf
+ for i_weights in range(weights.shape[0]):
+ if any(np.isinf(weights[i_weights])):
+ weights[i_weights] = np.isinf(weights[i_weights]).astype('int')
+
+ #if any(np.isinf(weights):
+ compare_vectors = [[vectors_B[i] for i in indx] for indx in indices]
+
+ for i, (vecA, vecBs, vecB_weights) in enumerate(zip(vectors_A, compare_vectors, weights)):
+
+ vec_errors = []
+
+ for vecB in vecBs:
+
+ angle = np.arccos(np.inner(vecA, vecB)/(np.linalg.norm(vecA) * np.linalg.norm(vecB)))
+ error = angle / np.pi
+ if not 0<=error<=1:
+ # Error is nan if either of the vectors is zero because this leads to division
+ # by zero because np.linalg.norm(vec0) = 0
+ # -> set error to 1 if vecA != vecB or 0 otherwise
+ if np.array_equal(vecA, vecB):
+ error = 0
+ else:
+ error = 1
+
+ if direction:
+ counter = coordinates.rotate_along_arbitrary(points_A[i], vecB, angle)
+ clock = coordinates.rotate_along_arbitrary(points_A[i], vecB, -angle)
+
+ if np.sum(counter - vecB) > np.sum(clock - vecB):
+ error = -error
+
+ vec_errors.append(error)
+
+ errors[i] = np.average(vec_errors, weights=vecB_weights)
+
+ if direction:
+ if colinear:
+ errors *= 2
+ errors = (errors + 1)/2
+ else:
+ if colinear:
+ errors = 2 * np.abs(errors - 0.5)
+ else:
+ errors = 1 - errors
+
+ return errors
+
+
+def _find_unique_points(points):
+ '''
+ Returns
+ -------
+ points : list
+ indices : dict of int
+ '''
+
+ unique_points = list({p for p in points})
+ indices = [[] for i in range(unique_points)]
+ for i_point, point in enumerate(points):
+ indices[unique_points.index(point)].append(i_point)
+
+ return unique_points, indices
+
+
+def _angle(self, vecA, vecB):
+ return np.arccos(np.inner(vecA, vecB)/(np.linalg.norm(vecA) * np.linalg.norm(vecB)))
+
+def field_pvals(points_A, vectors_A, points_B, vectors_B, direction=False, colinear=False):
+ '''
+ Assuming
+ '''
+ # same points are repeated many times (otherwise it would make sense
+ # to do statistical testing. Find the these "unique" points
+ un_points_A, un_indices_A = _find_unique_points(points_A)
+ un_points_B, un_indices_B = _find_unique_points(points_B)
+
+ kdtree = KDTree(points_B)
+
+ for point, indices_A in zip(un_points_A, un_indices_A):
+ # Closest point
+ distance_B, index_B = kdtree.query(point, k=1, n_jobs=-1)
+
+ Avecs = [vectors_A[i] for i in indices_A]
+ Bvecs = [vectors_B[i] for i in un_indices_B[index_B]]
+
+ # Mean of Avecs
+ mean_Avec = np.mean(Avecs, axis=0)
+
+ # Relative rotations of vectors with respect to the mean Avec
+ Adirs = [_angle(vec, mean_Avec) for vec in Avecs]
+ Bdirs = [_angle(vec, mean_Avec) for vec in Avecs]
+
+ u_stats, pval = mannwhitneyu(Adirs, Bdirs)
+
+ pvals.append(pval)
+
+ return un_points_A, pvals
+
+
+
+class FAnalyser(MAnalyser):
+ '''
+ Sham analyser to just output optic flow vectors with the same
+ api as MAnalyer does.
+ '''
+
+ def __init__(self, *args, **kwargs):
+
+ print('inited')
+
+ self.manalysers = [self]
+ self.folder = 'optic_flow'
+ self.eyes = ['left', 'right']
+ self.vector_rotation = 0
+
+
+ # FAnalyser specific
+ self.pitch_rot = 0
+ self.roll_rot = 0
+ self.yaw_rot = 0
+ self.points = {'right': coordinates.optimal_sampling(np.arange(0, 60, 5), np.arange(-100, 100, 5)),
+ 'left': coordinates.optimal_sampling(np.arange(-60, 0, 5), np.arange(-100, 100, 5))}
+
+ self.constant_points = False
+
+ def get_3d_vectors(self, eye, constant_points=None, *args, **kwargs):
+ '''
+
+ constant_points : bool
+ If true, points stay the same and only vectors get rotated.
+ If false, smooth rotation of the whole optic flow sphere.
+ '''
+
+ if constant_points is None:
+ constant_points = self.constant_points
+
+ if constant_points:
+ # Rotate points, calculate vectors, rotate back
+ points = coordinates.rotate_points(self.points[eye],
+ radians(self.yaw_rot),
+ radians(self.pitch_rot),
+ radians(self.roll_rot))
+
+ points, vectors = coordinates.rotate_vectors(points, flow_vectors(points, xrot=0),
+ -radians(self.yaw_rot),
+ -radians(self.pitch_rot),
+ -radians(self.roll_rot))
+ else:
+ points = coordinates.optimal_sampling(np.arange(-90,90,5), np.arange(-180,180,5))
+ points, vectors = coordinates.rotate_vectors(points, flow_vectors(points, xrot=0),
+ -radians(self.yaw_rot),
+ -radians(self.pitch_rot),
+ -radians(self.roll_rot))
+
+ # Fixme. Make me with numpy, not list comprehension
+ if eye == 'left':
+ indices = [i for i, point in enumerate(points) if point[0] <= 0]
+ elif eye == 'right':
+ indices = [i for i, point in enumerate(points) if point[0] >= 0]
+
+ points = [points[i] for i in indices]
+ vectors = [vectors[i] for i in indices]
+
+ return points, vectors
+
+
+ def is_measured(self, *args, **kwargs):
+ return True
+
+ def are_rois_selected(self, *args, **kwargs):
+ return True
+
+ def load_analysed_movements(self, *args, **kwargs):
+ return None
+
diff --git a/gonio-analysis/gonioanalysis/drosom/orientation_analysis.py b/gonio-analysis/gonioanalysis/drosom/orientation_analysis.py
new file mode 100644
index 0000000..c008e13
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/orientation_analysis.py
@@ -0,0 +1,107 @@
+'''
+Rhabdomere orientation
+'''
+import os
+import json
+
+import matplotlib.pyplot as plt
+
+from roimarker import Marker
+
+from gonioanalysis.drosom.analysing import MAnalyser
+from gonioanalysis.directories import PROCESSING_TEMPDIR
+
+
+class OAnalyser(MAnalyser):
+ '''
+ Rhabdomere orientation analyser.
+
+ Inherits from MAnalyser though most of it's methods
+ have no meaning.
+
+ Measure movement opens Marker to draw lines/arrows
+
+ '''
+
+ def __init__(self, *args, **kwargs):
+
+
+ super().__init__(*args, **kwargs)
+
+ self._movements_skelefn = self._movements_skelefn.replace('movements_', 'orientation_')
+ self.active_analysis = ''
+
+
+ def measure_movement(self, eye, *args, **kwargs):
+ '''
+ The measure movement method overridden to meausure the (rhabdomere)
+ orientation.
+
+ In the end calls self.load_analysed_movements in order to
+ match the MAnalyser behaviour.
+ '''
+
+ self.movements = {}
+
+ images = []
+ rois = []
+
+ for angle in self.stacks:
+
+ roi = self.ROIs[eye].get(angle, None)
+
+ if roi is not None:
+ images.append(self.stacks[angle][0][0])
+
+
+ extended_roi = [roi[0]-roi[2]/2, roi[1]-roi[3]/2, 2*roi[2], 2*roi[3]]
+
+ rois.append(extended_roi)
+
+ fig, ax = plt.subplots(num='Draw arrows for the {} eye'.format(eye))
+ marker = Marker(fig, ax, images, self.MOVEMENTS_SAVEFN.format(eye),
+ relative_fns_from=os.path.join(self.data_path, self.folder),
+ drop_imagefn=True,
+ selection_type='arrow',
+ crops=rois,
+ callback_on_exit=lambda eye=eye: self._hotedit_marker_output(eye))
+
+ marker.run()
+
+ print('Marker should run now')
+
+
+ def _hotedit_marker_output(self, eye):
+ '''
+ Edits Marker output to be Movemeter like output.
+ '''
+
+ with open(self.MOVEMENTS_SAVEFN.format(eye), 'r') as fp:
+ marker_data = json.load(fp)
+
+ edited_data = {}
+
+ for image_folder, arrows in marker_data.items():
+
+ repeats = []
+
+ for arrow in arrows:
+ x1, y1, x2, y2 = arrow
+
+ repeats.append( {'x': [0, x1-x2], 'y': [0, y1-y2]} )
+
+ # drop pos prefix [3:]
+ if repeats != []:
+ edited_data[image_folder[3:]] = repeats
+
+
+ with open(self.MOVEMENTS_SAVEFN.format(eye), 'w') as fp:
+ json.dump(edited_data, fp)
+
+
+
+ def is_measured(self):
+ fns = [self.MOVEMENTS_SAVEFN.format(eye) for eye in self.eyes]
+ return all([os.path.exists(fn) for fn in fns])
+
+
diff --git a/gonio-analysis/gonioanalysis/drosom/plotting/__init__.py b/gonio-analysis/gonioanalysis/drosom/plotting/__init__.py
new file mode 100644
index 0000000..267bbd8
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/plotting/__init__.py
@@ -0,0 +1,6 @@
+'''
+Beware!
+Everything imported here may become a menu item or a cli argument.
+'''
+
+from .compare_opticflow import error_at_flight, complete_flow_analysis
diff --git a/gonio-analysis/gonioanalysis/drosom/plotting/basics.py b/gonio-analysis/gonioanalysis/drosom/plotting/basics.py
new file mode 100644
index 0000000..9e1778a
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/plotting/basics.py
@@ -0,0 +1,1269 @@
+'''
+Most commonly needed functions to plot the data.
+'''
+
+import os
+import math
+import copy
+
+import numpy as np
+import matplotlib.pyplot as plt
+from mpl_toolkits.mplot3d import proj3d
+import mpl_toolkits.axes_grid1
+import matplotlib.image
+import matplotlib.colors
+import matplotlib.cm
+from mpl_toolkits.axes_grid1 import make_axes_locatable
+from scipy.ndimage import rotate
+import PIL
+
+from .common import (
+ vector_plot,
+ surface_plot,
+ add_rhabdomeres,
+ add_line,
+ plot_2d_opticflow,
+ plot_guidance
+ )
+from gonioanalysis.directories import CODE_ROOTDIR
+from gonioanalysis.drosom.optic_flow import field_error, field_pvals
+from gonioanalysis.coordinates import rotate_vectors
+from tk_steroids.routines import extend_keywords
+
+plt.rcParams.update({'font.size': 12})
+
+EYE_COLORS = {'right': 'blue', 'left': 'red'}
+REPEAT_COLORS = ['green', 'orange', 'pink']
+
+
+DEFAULT_ELEV = 10
+DEFAULT_AZIM = 70
+DEFAULT_FIGSIZE = (16,9)
+
+def plot_1d_magnitude(manalyser, image_folder=None, i_repeat=None,
+ mean_repeats=False, mean_imagefolders=False, mean_eyes=False,
+ color_eyes=False, gray_repeats=False, progressive_colors=False,
+ show_mean=False, show_std=False,
+ show_label=True, milliseconds=False, microns=False,
+ phase=False, derivative=False,
+ label="EYE-ANGLE-IREPEAT", ax=None):
+ '''
+ Plots 1D displacement magnitude over time, separately for each eye.
+
+ Arguments
+ ---------
+ manalyser : object
+ MAnalyser object instance
+ image_folder : string or None
+ Image folder to plot the data from. If None (default), plot all image folders.
+ mean_repeats : bool
+ Wheter to take mean of the repeats or plot each repeat separately
+ mean_imagefolders : bool
+ If True and image_folder is None (plotting all image folders), takes the mean
+ of all image folders.
+ mean_eyes : bool
+ Wheter to take a mean over the left and right eyes
+ label : string
+ Label to show. If None, no label. Otherwise
+ EYE gets replaced with eye
+ ANGLE gets replaced by image folder name
+ IREPEAT gets reaplaced by the number of repeat
+
+ Returns
+ ax
+ Matplotlib axes
+ traces
+ What has been plotted
+ N_repeats
+ The total number of repeats (independent of i_repeat)
+ '''
+
+ def get_x_yscaler(mag_rep_i):
+ # FIXME Pixel size and fs should be read from the data
+ pixel_size = manalyser.get_pixel_size(image_folder)
+ fs = manalyser.get_imaging_frequency(image_folder)
+ N = len(mag_rep_i)
+
+ if milliseconds:
+ # In milliseconds
+ X = 1000* np.linspace(0, N/fs, N)
+ else:
+ X = np.arange(N)
+
+ if microns:
+ yscaler = pixel_size
+ else:
+ yscaler = 1
+
+ return X, yscaler
+
+ X = None
+ yscaler = None
+
+ if ax is None:
+ fig, ax = plt.subplots()
+
+ if mean_eyes:
+ eyes = [None]
+ else:
+ eyes = manalyser.eyes
+
+ N_repeats = 0
+ traces = []
+
+
+
+ for eye in eyes:
+ magtraces = manalyser.get_magnitude_traces(eye, image_folder=image_folder,
+ mean_repeats=mean_repeats, mean_imagefolders=mean_imagefolders,
+ _phase=phase, _derivative=derivative)
+
+ for angle, repeat_mags in magtraces.items():
+
+ if X is None or yscaler is None:
+ X, yscaler = get_x_yscaler(repeat_mags[0])
+
+ if progressive_colors:
+ if gray_repeats:
+ cmap = matplotlib.cm.get_cmap('binary', len(repeat_mags))
+ else:
+ cmap = matplotlib.cm.get_cmap('viridis', len(repeat_mags))
+
+ for _i_repeat, mag_rep_i in enumerate(repeat_mags):
+
+ N_repeats += 1
+
+ if i_repeat is not None and _i_repeat != i_repeat:
+ continue
+
+
+ if label:
+ if eye is None:
+ eyename = '+'.join(manalyser.eyes)
+ else:
+ eyename = eye
+ _label = label.replace('EYE', eyename).replace('ANGLE', str(angle)).replace('IREPEAT', str(_i_repeat))
+ else:
+ _label = ''
+
+ Y = yscaler * mag_rep_i
+
+ if color_eyes:
+ ax.plot(X, Y, label=_label, color=EYE_COLORS.get(eye, 'green'))
+ elif progressive_colors:
+ color = cmap(_i_repeat)
+ ax.plot(X, Y, label=_label, color=color)
+ else:
+ if gray_repeats:
+ ax.plot(X, Y, label=_label, color='gray')
+ else:
+ ax.plot(X, Y, label=_label)
+
+ traces.append(Y)
+
+ meantrace = np.mean(traces, axis=0)
+ if show_mean:
+ ax.plot(X, meantrace, label='mean-of-all', color='black', lw=3)
+
+ if show_std:
+ ax.plot(X, meantrace+np.std(traces, axis=0), '--', label='std-of-mean-of-all', color='black', lw=2)
+ ax.plot(X, meantrace-np.std(traces, axis=0), '--', color='black', lw=2)
+
+ if label and show_label:
+ ax.legend(fontsize='xx-small', labelspacing=0.1, ncol=int(len(traces)/10)+1, loc='upper left')
+
+
+ if milliseconds:
+ ax.set_xlabel('Time (ms)')
+ else:
+ ax.set_xlabel('Frame')
+
+ if microns:
+ ax.set_ylabel('Displacement magnitude (µm)')
+ else:
+ ax.set_ylabel('Displacement magnitude (pixels)')
+
+
+ ax.spines['right'].set_visible(False)
+ ax.spines['top'].set_visible(False)
+
+ return ax, traces, N_repeats
+
+
+def plot_xy_trajectory(manalysers, wanted_imagefolders=None, i_repeat=None,
+ mean_repeats=True,
+ ax=None):
+ '''
+ A (x,y) 2D plot of the movement trajectory where time is encoded
+ in color.
+
+ manalysers : list
+ wanted_imagefolders : dict
+ {specimen_name: [image_folder1, ...]}
+ i_repeat : int
+ mean_repeats : bool
+ '''
+ if ax is None:
+ fig, ax = plt.subplots()
+
+ xys = []
+
+
+ for manalyser in manalysers:
+
+ if wanted_imagefolders:
+ image_folders = wanted_imagefolders[manalyser.name]
+ else:
+ image_folders = manalyser.list_imagefolders()
+
+ for image_folder in image_folders:
+
+ movement_data = manalyser.get_movements_from_folder(image_folder)
+
+ for eye, movements in movement_data.items():
+
+ N_repeats = len(movements)
+ X = [[-x for x in movements[i]['x']] for i in range(N_repeats) if not (
+ (i_repeat is not None) and i_repeat != i)]
+
+ Y = [movements[i]['y'] for i in range(N_repeats) if not (
+ (i_repeat is not None) and i_repeat != i)]
+
+ if mean_repeats:
+ X = [np.mean(X, axis=0)]
+ Y = [np.mean(Y, axis=0)]
+
+ for x, y in zip(X, Y):
+ N = len(x)
+ cmap = matplotlib.cm.get_cmap('tab20', N)
+ for i_point in range(1, N):
+ ax.plot([x[i_point-1], x[i_point]], [y[i_point-1], y[i_point]], color=cmap((i_point-1)/(N-1)))
+
+ ax.scatter(x[0], y[0], color='black')
+ ax.scatter(x[-1], y[-1], color='gray')
+
+ xys.append([x, y])
+
+
+ # Colormap
+ if xys:
+ if getattr(ax, 'xy_colorbar', None) is None:
+ time = [i for i in range(N)]
+ sm = matplotlib.cm.ScalarMappable(cmap=cmap)
+ sm.set_array(time)
+
+ fig = ax.get_figure()
+ ax.xy_colorbar = fig.colorbar(sm, ticks=time, boundaries=time, ax=ax, orientation='horizontal')
+ ax.xy_colorbar.set_label('Frame')
+ else:
+ pass
+
+ ax.set_xlabel('Displacement in X (pixels)')
+ ax.set_ylabel('Displacement in Y (pixels)')
+ ax.spines['right'].set_visible(False)
+ ax.spines['top'].set_visible(False)
+
+ ax.yaxis.set_ticks_position('left')
+ ax.xaxis.set_ticks_position('bottom')
+ ax.set_aspect('equal', adjustable='box')
+
+ return ax, xys
+
+
+def plot_magnitude_probability(manalysers, wanted_imagefolders=None, ax=None,
+ mean_repeats=False, mean_imagefolders=False,
+ microns=True, milliseconds=True, xpoints=100):
+ '''
+ With many analysers and image folders, calculate probablity of the magnitude
+ response (2D histogram).
+
+ Arguments
+ ---------
+ manalysers : list of objs
+ Analyser objects
+ wanted_imagefolders : None or dict
+ Keys are analyser names, items lists of image folder names
+ to incldue. If None, use all image folders.
+ '''
+ print(wanted_imagefolders)
+
+ for logaritmic in [False, True]:
+
+ N_real = None
+
+ all_magtraces = []
+
+ for manalyser in manalysers:
+ for eye in manalyser.eyes:
+
+ if wanted_imagefolders:
+ image_folders = wanted_imagefolders[manalyser.name]
+ else:
+ image_folders = manalyser.list_imagefolders()
+
+ for image_folder in image_folders:
+ magtraces = manalyser.get_magnitude_traces(eye, image_folder=image_folder,
+ mean_repeats=mean_repeats, mean_imagefolders=mean_imagefolders)
+
+ magtraces = list(magtraces.values())
+
+ if len(magtraces) == 0:
+ continue
+
+ magtraces = magtraces[0]
+
+ if N_real is None:
+ N_real = len(magtraces[0])
+ elif N_real != len(magtraces[0]):
+ raise ValueError('All magtrace not the same length')
+
+ # Interpolate
+ magtraces = [np.interp(np.linspace(0,1,xpoints), np.linspace(0,1,len(magtrace)), magtrace) for magtrace in magtraces]
+
+ all_magtraces.extend(magtraces)
+
+ all_magtraces = np.array(all_magtraces, dtype=np.float)
+
+ limits = (min(0, np.min(all_magtraces)), np.max(all_magtraces)*1.5)
+ tmax = len(all_magtraces[0]) * (N_real/xpoints)
+
+
+ #if ax is None:
+ fig, ax = plt.subplots()
+
+ if microns:
+ pixel_size = manalyser.get_pixel_size(image_folder)
+ limits = [z*pixel_size for z in limits]
+ ax.set_ylabel('Displacement (µm)')
+
+ if milliseconds:
+ tmax *= 1000 / manalyser.get_imaging_frequency(image_folder)
+ ax.set_xlabel('Time (ms)')
+ else:
+ ax.set_xlabel('i_frame')
+
+ prob = []
+
+ for points_t in all_magtraces.T:
+ hist = np.histogram(points_t, range=limits, bins=25)[0]
+ prob.append(hist/np.sum(hist))
+
+ if logaritmic:
+ norm = matplotlib.colors.LogNorm()
+ else:
+ norm = None
+
+ im = ax.imshow(np.array(prob).T, origin='lower', cmap='binary',
+ norm=norm,
+ extent=[0, tmax,limits[0], limits[1]],
+ aspect='auto')# interpolation='lanczos')
+
+ divider = make_axes_locatable(ax)
+ cax = divider.append_axes('right', size='5%', pad=0.05)
+ ax.figure.colorbar(im, cax)
+
+ return ax, prob
+
+
+def _set_analyser_attributes(analyser, skip_none=True, raise_errors=False, **kwargs):
+ '''
+ Sets attributes to manalyser.
+ Raises AttributeError if the analyser does not have the attribute beforehand.
+
+ Returns a dictionary of the original parameters
+ '''
+ for key, value in kwargs:
+ if value is not None or skip_none == False:
+ if hasattr(analyser, key):
+ setattr(analyser, key, value)
+ elif raise_errors:
+ raise AttributeError('{} has no attribute {} prior setting'.format(anlayser, key))
+
+
+def plot_2d_vectormap(manalyser,
+ ax=None):
+ '''
+ Plots a 2-dimensional vector map.
+
+ Arguments
+ ----------
+ manalyser : object
+ Instance of MAnalyser class or MAverager class (having get2DVectors method)
+ ax : object
+ Matplotlib axes object
+ '''
+
+ if not ax:
+ fig, ax = plt.subplots(figsize=(8,16))
+
+ hmin = 99999
+ hmax = -99999
+ vmin = 99999
+ vmax = -99999
+
+ for color, eye in zip(['red', 'blue'], ['left', 'right']):
+ angles, X, Y = manalyser.get_2d_vectors(eye,
+ mirror_horizontal=False, mirror_pitch=False)
+ for angle, x, y in zip(angles, X, Y):
+
+ horizontal, pitch = angle
+
+ hmin = min(hmin, horizontal)
+ hmax = max(hmax, horizontal)
+ vmin = min(vmin, pitch)
+ vmax = max(vmax, pitch)
+
+ # If magnitude too small, its too unreliable to judge the orientation so skip over
+ #movement_magnitude = math.sqrt(x**2 + y**2)
+ #if movement_magnitude < 2:
+ # continue
+ # Scale all vectors to the same length
+ #scaler = math.sqrt(x**2 + y**2) / 5 #/ 3
+ #scaler = 0
+ #if scaler != 0:
+ # x /= scaler
+ # y /= scaler /2.4 # FIXME
+ scaler = np.linalg.norm([x, y]) / 8
+ x /= scaler
+ y /= scaler
+
+ #ar = matplotlib.patches.Arrow(horizontal, pitch, xc, yc)
+ ar = matplotlib.patches.FancyArrowPatch((horizontal, pitch),
+ (horizontal-x, pitch+y), mutation_scale=10,
+ color=color, picker=True)
+ #fig.canvas.mpl_connect('pick_event', self.on_pick)
+ ax.add_patch(ar)
+
+ ax.scatter(horizontal, pitch, marker='x', color='gray')
+
+ ax.set_xlabel('Horizontal rotation (degrees)')
+ ax.set_ylabel('Vertical rotation (degrees)')
+
+ hmin = -60
+ hmax = 40
+ vmin = -90
+ vmax = 120
+
+ ax.set_xlim(hmin-10, hmax+10)
+ ax.set_ylim(vmin-10, vmax+10)
+ ax.set_aspect('equal', adjustable='box')
+
+ for key in ['top', 'right']:
+ ax.spines[key].set_visible(False)
+
+
+@extend_keywords(vector_plot)
+def plot_3d_vectormap(manalyser, arrow_rotations = [0],
+ rhabdomeres=False, repeats_separately=False, vertical_hardborder=False,
+ elev=None, azim=None,
+ pitch_rot=None, roll_rot=None, yaw_rot=None,
+ animation=None, animation_type=None, animation_variable=None, i_frame=0,
+ ax=None, **kwargs):
+ '''
+ Plot a 3D vectormap, where the arrows point movement (MAnalyser, FAnalyser) or
+ feature (OAnalyser) directions.
+
+ Arguments
+ ---------
+ manalyser : object
+ Analyser object
+ arrow_rotations : list of int
+ Rotation of arrows in the plane of the arrows (ie. radially).
+ rhabdomeres : bool
+ If True, draw rhabdomere pattern where the arrows are.
+ repeats_separately : bool
+ If True, and repeat data exists, draw each repeat
+ with it's own arrow.
+ vertical_hardborder : bool
+ For MAverager, interpolate dorsal and ventral separetly
+ elev, azim : float or None
+ Plot elevation and azim
+ animation : None
+ animation_type : None
+ animation_variable : None
+ i_frame : int
+ ax : object
+ Maptlotlib ax object. If not specified, creates a new figure.
+ kwargs : dict
+ Variable keyword arguments are passed to vector_plot function.
+
+ Returns
+ -------
+ ax : object
+ Matplotlib Axes object
+ vectors : list of objects
+ List of arrow artist drawn on the figure.
+ '''
+ colors = EYE_COLORS
+
+ # Work out MAnalyser type
+ manalyser_type = 'MAnalyser'
+
+ if manalyser.__class__.__name__ == 'OAnalyser' and len(arrow_rotations) == 1 and arrow_rotations[0] == 0:
+ manalyser_type = 'OAnalyser'
+ elif manalyser.__class__.__name__ == 'MAverager':
+ if all([partanalyser.__class__.__name__ == 'OAnalyser' for partanalyser in manalyser.manalysers]):
+ manalyser_type = 'OAnalyser'
+ elif manalyser.__class__.__name__ == 'FAnalyser':
+ colors = ['darkviolet']*5
+ if animation_type != 'rotate_arrows':
+ i_frame = 0
+
+ _set_analyser_attributes({'pitch_rot': pitch_rot,
+ 'roll_rot': roll_rot, 'yaw_rot': yaw_rot})
+
+
+ if manalyser_type == 'OAnalyser':
+ colors = REPEAT_COLORS
+ i_frame = 0
+
+ # OAnalyser specific for Drosophila; Assuming that R3-R6 line is
+ # analysed, let's also draw the line from R3 to R1.
+ if arrow_rotations[0] == 0 and len(arrow_rotations) == 1:
+ arrow_rotations.append(29)
+
+ if ax is None:
+ fig = plt.figure(figsize=DEFAULT_FIGSIZE)
+ ax = fig.add_subplot(111, projection='3d')
+
+ vectors = {}
+
+ original_rotation = manalyser.vector_rotation
+
+ if animation_type == 'rotate_plot':
+ elev, azim = animation_variable
+
+ if azim is not None and elev is not None:
+ camerapos = (elev, azim)
+ else:
+ camerapos = None
+
+ # For OAnalyser, when rhabdomeres is set True,
+ # plot the rhabdomeres also
+ if manalyser_type == 'OAnalyser' and rhabdomeres:
+ manalyser.vector_rotation = 0
+ for eye in manalyser.eyes:
+
+ vectors_3d = manalyser.get_3d_vectors(eye, correct_level=True,
+ repeats_separately=repeats_separately,
+ strict=True, vertical_hardborder=vertical_hardborder)
+
+ if eye == 'left':
+ mirror_lr = True
+ else:
+ mirror_lr = False
+
+ for point, vector in zip(*vectors_3d):
+ add_rhabdomeres(ax, *point, *vector,
+ mirror_lr=mirror_lr, mirror_bf='auto',
+ camerapos=camerapos,
+ resolution=9, edgecolor=None, facecolor='gray')
+
+ for i_rotation, rotation in enumerate(arrow_rotations):
+
+ for eye in manalyser.eyes:
+ if isinstance(colors, dict):
+ colr = colors[eye]
+ elif isinstance(colors, list):
+ colr = colors[i_rotation]
+
+ # Set arrow/vector rotation
+ if rotation is not None or rotation != 0:
+ manalyser.vector_rotation = rotation
+
+ vectors_3d = manalyser.get_3d_vectors(eye, correct_level=True,
+ repeats_separately=repeats_separately,
+ strict=True, vertical_hardborder=vertical_hardborder)
+
+ if manalyser_type == 'OAnalyser' and rhabdomeres:
+
+ for point, vector in zip(*vectors_3d):
+ add_line(ax, *point, *vector, color=REPEAT_COLORS[i_rotation])
+ else:
+ vector_plot(ax, *vectors_3d, color=colr,
+ **kwargs
+ )
+
+ vectors[eye] = vectors_3d
+
+ manalyser.vector_rotation = original_rotation
+
+
+ ax.set_xlim3d((-1,1))
+ ax.set_ylim3d((-1,1))
+ ax.set_zlim3d((-1,1))
+ ax.set_box_aspect((1, 1, 1))
+
+ if azim is None and elev is None:
+ ax.view_init(elev=DEFAULT_ELEV, azim=DEFAULT_AZIM)
+ else:
+ ax.view_init(elev=elev, azim=azim)
+
+
+ return ax, vectors
+
+
+def plot_3d_differencemap(manalyser1, manalyser2, ax=None, stats_map=False,
+ elev=DEFAULT_ELEV, azim=DEFAULT_AZIM, colinear=True, direction=False,
+ colorbar=True, colorbar_text=True, colorbar_ax=None, reverse_errors=False,
+ colorbar_text_positions=[[1.1,0.95,'left', 'top'],[1.1,0.5,'left', 'center'],[1.1,0.05,'left', 'bottom']],
+ i_frame=0, arrow_rotations=[0], pitch_rot=None, yaw_rot=None, roll_rot=None,
+ hide_axes=False, hide_text=False, guidance=False, **kwargs):
+ '''
+ Plots 3d heatmap presenting the diffrerence in the vector orientations
+ for two analyser objects, by putting the get_3d_vectors of both analysers
+ to field_error and making a surface plot.
+
+ Notes:
+ - Errors (differences) are calculated at manalyser1's points.
+ - arrow_rotations, pitch_rot only affects manalyser2
+
+ manalyser1, manalyser2 : object
+ Analyser objects to plot the difference with 3d vectors
+ ax : object or None
+ Matplotlib Axes object
+ stats_map : bool
+ If true, plot p-vals.
+ colorbar : bool
+ Whether to add the colors explaining colorbar
+ colorbar_text: bool
+ Wheter to add the text annotations on the colorbar
+ colorbar_ax : object
+ Optional Axes where to put the colorbar
+ arrow_rotations : list
+ Arrow rotations, for the second manalyser
+ i_frame : int
+ Neglected here
+ hide_axes : bool
+ hide_text : bool
+ guidance : bool
+ '''
+
+ if ax is None:
+ fig = plt.figure(figsize=DEFAULT_FIGSIZE)
+ ax = fig.add_subplot(111, projection='3d')
+ ax.set_xlabel('x')
+ ax.set_ylabel('y')
+ ax.set_zlabel('z')
+
+ if arrow_rotations:
+ original_rotation = manalyser2.vector_rotation
+ manalyser2.vector_rotation = arrow_rotations[0]
+
+ if pitch_rot is not None:
+ manalyser2.pitch_rot = pitch_rot
+ if yaw_rot is not None:
+ manalyser2.yaw_rot = yaw_rot
+ if roll_rot is not None:
+ manalyser2.roll_rot = roll_rot
+
+ if guidance:
+ plot_guidance(ax, camerapos=(ax.elev, ax.azim), hide_text=hide_text)
+
+ if hide_axes:
+ ax.set_axis_off()
+
+ if hide_text:
+ ax.axes.xaxis.set_ticklabels([])
+ ax.axes.yaxis.set_ticklabels([])
+ ax.axes.zaxis.set_ticklabels([])
+ ax.set_xlabel('')
+ ax.set_ylabel('')
+ ax.set_zlabel('')
+ else:
+ ax.set_xlabel('x')
+ ax.set_ylabel('y')
+ ax.set_zlabel('z')
+
+
+ all_errors = []
+ for eye in manalyser1.eyes:
+ vectors = []
+ points = []
+
+ for manalyser in [manalyser1, manalyser2]:
+
+ vectors_3d = manalyser.get_3d_vectors(eye, correct_level=True,
+ repeats_separately=stats_map,
+ strict=True, vertical_hardborder=True)
+
+ points.append(vectors_3d[0])
+ vectors.append(vectors_3d[1])
+
+ # Errors at the points[0]
+ if stats_map:
+ points, errors = field_pvals(points[0], vectors[0], points[1], vectors[1], colinear=colinear)
+ else:
+ # regular difference map
+ errors = field_error(points[0], vectors[0], points[1], vectors[1],
+ colinear=colinear, direction=direction)
+
+ if reverse_errors:
+ errors = 1-errors
+
+ all_errors.append(errors)
+
+ if eye=='left':
+ all_phi_points = [np.linspace(math.pi/2, 3*math.pi/2, 50)]
+ else:
+ all_phi_points = [np.linspace(-math.pi/2, math.pi/2, 50)]
+ #all_phi_points = [np.linspace(0, math.pi/2, 25), np.linspace(3*math.pi/2, 2*math.pi,25)]
+
+ if direction:
+ colormap = 'own-diverge'
+ else:
+ colormap = 'own'
+
+ print('{} eye, mean error {}'.format(eye, np.mean(errors)))
+
+ for phi_points in all_phi_points:
+ m = surface_plot(ax, points[0], errors, phi_points=phi_points,
+ colormap=colormap)
+
+ errors = np.concatenate(all_errors)
+
+ ax.view_init(elev=elev, azim=azim)
+
+ # COLORBAR
+ cax = getattr(ax, 'differencemap_colorbar_ax', None)
+ colorbar_obj = getattr(ax, 'differencemap_colorbar', None)
+ if colorbar and (cax is None or colorbar_obj is None):
+
+ if colorbar_ax is None:
+ cbox = ax.get_position()
+ cbox.x1 -= abs(cbox.x1 - cbox.x0)/10
+ cbox.x0 += abs((cbox.x1 - cbox.x0))/1.1
+ cbox.y0 -= 0.18*abs(cbox.y1-cbox.y0)
+ cax = ax.figure.add_axes(cbox)
+ else:
+ cax = colorbar_ax
+
+ ax.differencemap_colorbar_ax = cax
+ ax.differencemap_colorbar = plt.colorbar(m, cax)
+
+ # COLORBAR INFO TEXT
+ if colorbar_text:
+ #text_x = 1+0.1
+ #ha = 'left'
+ if direction:
+ cax.text(colorbar_text_positions[0][0], colorbar_text_positions[0][1],
+ 'Counterclockwise +90',
+ ha=colorbar_text_positions[0][2], va=colorbar_text_positions[0][3],
+ transform=cax.transAxes)
+ cax.text(colorbar_text_positions[1][0], colorbar_text_positions[1][1],
+ 'Perpendicular',
+ ha=colorbar_text_positions[1][2], va=colorbar_text_positions[1][3],
+ transform=cax.transAxes)
+ cax.text(colorbar_text_positions[2][0], colorbar_text_positions[2][1],
+ 'Clockwise -90',
+ ha=colorbar_text_positions[2][2], va=colorbar_text_positions[2][3],
+ transform=cax.transAxes)
+ elif colinear:
+ cax.text(colorbar_text_positions[0][0], colorbar_text_positions[0][1],
+ 'Collinear',
+ ha=colorbar_text_positions[0][2], va=colorbar_text_positions[0][3],
+ transform=cax.transAxes)
+ cax.text(colorbar_text_positions[2][0], colorbar_text_positions[2][1],
+ 'Perpendicular',
+ ha=colorbar_text_positions[2][2], va=colorbar_text_positions[2][3],
+ transform=cax.transAxes)
+ else:
+ cax.text(colorbar_text_positions[0][0], colorbar_text_positions[0][1],
+ 'Matching',
+ ha=colorbar_text_positions[0][2], va=colorbar_text_positions[0][3],
+ transform=cax.transAxes)
+ cax.text(colorbar_text_positions[1][0], colorbar_text_positions[1][1],
+ 'Perpendicular',
+ ha=colorbar_text_positions[1][2], va=colorbar_text_positions[1][3],
+ transform=cax.transAxes)
+ cax.text(colorbar_text_positions[2][0], colorbar_text_positions[2][1],
+ 'Opposing',
+ ha=colorbar_text_positions[2][2], va=colorbar_text_positions[2][3],
+ transform=cax.transAxes)
+
+ cax.set_axis_off()
+ elif colorbar is False and colorbar_obj is not None:
+ ax.differencemap_colorbar.remove()
+ ax.differencemap_colorbar = None
+
+ if arrow_rotations:
+ manalyser2.vector_rotation = original_rotation
+
+ ax.set_xlim3d((-1,1))
+ ax.set_ylim3d((-1,1))
+ ax.set_zlim3d((-1,1))
+ ax.set_box_aspect((1, 1, 1))
+
+ return ax, errors
+
+
+def compare_3d_vectormaps(manalyser1, manalyser2, axes=None,
+ illustrate=True, total_error=True, compact=False,
+ animation=None, animation_type=None, animation_variable=None,
+ optimal_ranges=None, pulsation_length=1, biphasic=False,
+ kwargs1={}, kwargs2={}, kwargsD={}, **kwargs):
+ '''
+ Calls get 3d vectors for both analysers.
+ Arrow rotation option only affects manalyser2
+
+ Note! If total_error is true, sets attributes pupil_compare_errors
+ and pupil_compare_rotations to the last ax in axes
+
+ manalyser1,manalyser2 : objects
+ Analyser objects
+ axes : list of objects
+ List of Matplotlib Axes objects. Expected base length == 3 which
+ is modified by options orientation, total_error and compact.
+
+ illustrate : bool
+ Wheter to plot the illustrative plot. Increases the axes requirement by 1
+ total_error: bool
+ Whether to plot the total error plot Increases the axes requirement by 1
+ compact : bool
+ Join vectorplots in one. Decreases axes requirement by 1
+
+ animation : list
+ For total error x-axis limits
+ kwargs1,kwargs2 : dict
+ List of keyword arguments to pass to `plot_3d_vectormap`
+ kwargsD : dict
+ List of keywords arguments to pass to `plot_3d_differencemap`
+
+ Returns [axes]
+ '''
+
+
+ optimal=False
+
+ if axes is None:
+ fig = plt.figure(figsize=(DEFAULT_FIGSIZE[0]*2,DEFAULT_FIGSIZE[1]*2))
+ axes = []
+
+ N_plots = 3
+
+ if compact:
+ N_plots -= 1
+ if total_error:
+ N_plots += 1
+ if illustrate:
+ N_plots += 1
+
+ n_rows = 1
+ n_cols = N_plots
+
+ for i_plot in range(N_plots):
+ axes.append(fig.add_subplot(n_rows, n_cols, i_plot+1, projection='3d'))
+
+
+
+ kwargs1 = kwargs.copy()
+ kwargs2 = kwargs.copy()
+
+ if animation_type == 'rotate_arrows':
+ kwargs2['arrow_rotations'] = [animation_variable]
+ kwargsD['colorbar_text_positions'] = [[0.5,1.03,'center', 'bottom'],
+ [1.1,0.5,'left', 'center'],
+ [0.5,-0.03,'center', 'top']]
+
+ if manalyser1.__class__.__name__ == 'FAnalyser':
+ manalyser1.constant_points = True
+
+ elif animation_type in ['pitch_rot', 'roll_rot', 'yaw_rot']:
+ kwargs2[animation_type] = animation_variable
+ kwargsD['colinear'] = False
+ elif animation_type == 'rotate_plot':
+ kwargs1['elev'] = animation_variable[0]
+ kwargs1['azim'] = animation_variable[1]
+ kwargs2['elev'] = animation_variable[0]
+ kwargs2['azim'] = animation_variable[1]
+
+ # set 10 deg pitch for flow
+ for manalyser in [manalyser1, manalyser2]:
+ if manalyser.manalysers[0].__class__.__name__ == 'FAnalyser' and animation_type != 'pitch_rot':
+ if manalyser == manalyser1:
+ kwargs1['pitch_rot'] = 10
+ if manalyser == manalyser2:
+ kwargs2['pitch_rot'] = 10
+
+ manalyser.pitch_rot = 10
+
+ if manalyser2.__class__.__name__ == 'MAverage' and manalyser2.manalysers[0].__class__.__name__ == 'OAnalyser':
+ kwargs2['arrows'] = False
+
+ iax = 0
+ plot_3d_vectormap(manalyser1, animation_type=animation_type, ax=axes[iax], **kwargs1)
+
+ if not compact:
+ iax += 1
+ plot_3d_vectormap(manalyser2, animation_type=animation_type, ax=axes[iax], **kwargs2)
+
+
+ if biphasic:
+ # Difference with the slow phase
+ iax += 1
+ kwargsDr = kwargsD.copy()
+ kwargsDr['colorbar'] = False
+ daxr, reverse_errors = plot_3d_differencemap(manalyser1, manalyser2,
+ ax=axes[iax], reverse_errors=True, **kwargsDr, **kwargs2)
+
+
+ iax += 1
+ dax, errors = plot_3d_differencemap(manalyser1, manalyser2,
+ ax=axes[iax], **kwargsD, **kwargs2)
+
+ if illustrate:
+
+ # Check if we are at the optimal
+ if optimal_ranges:
+ for optimal_range in optimal_ranges:
+ if optimal_range[0] < animation_variable < optimal_range[1]:
+ optimal=True
+ continue
+
+ iax += 1
+ ax = axes[iax]
+
+ if animation_type == 'rotate_arrows':
+
+ upscale = 4
+ image = matplotlib.image.imread(os.path.join(CODE_ROOTDIR, 'images', 'dpp.tif'))
+ image = PIL.Image.fromarray(image)
+ ow,oh = image.size
+ image = image.resize((int(ow*upscale), int(oh*upscale)), PIL.Image.NEAREST)
+ image = np.array(image)
+
+ # Vector p0,rp1 to point the current axis rotation
+ R6R3line = 40
+ rot = -math.radians( R6R3line + animation_variable )
+ p0 = [int(image.shape[0]/2), int(image.shape[1]/2)]
+ p1 = np.array([p0[0],0])/2
+ rp1 = np.array([p1[0]*math.cos(rot)-p1[1]*math.sin(rot), p1[0]*math.sin(rot)+p1[1]*math.cos(rot)])
+
+ # Optimal
+ orot = -math.radians( R6R3line + np.mean(optimal_ranges[0][0:2]))
+ orp1 = np.array([p1[0]*math.cos(orot)-p1[1]*math.sin(orot), p1[0]*math.sin(orot)+p1[1]*math.cos(orot)])
+
+ # Make image pulsate
+ sx, sy = (0,0)
+ r= (0,0)
+ if manalyser1.manalysers[0].__class__.__name__ == 'MAnalyser':
+ r = image.shape
+ r = [int(pr) for pr in [r[0]/20, r[1]/20, r[0]-r[0]/10, r[1]-r[1]/10]]
+
+ # FIXME 0.6 is the low value of the pulsation
+ sx = int(upscale * (orp1[0]) * (pulsation_length-0.8) / 10)
+ sy = int(upscale * (orp1[1]) * (pulsation_length-0.8) / 10)
+
+ #if not optimal:
+ # sx = 0
+ # sy = 0
+
+ print('Animation pulsataion sx {} sy {}, imshape {}'.format(sx,sy, image.shape))
+
+ #image[r[1]+sy:r[1]+r[3]+sy, r[0]+sx:r[0]+r[2]+sx] = image[r[1]:r[1]+r[3], r[0]:r[0]+r[2]]
+ image = image[r[1]-sy:r[1]+r[3]-sy, r[0]-sx:r[0]+r[2]-sx]
+ ax.imshow(image, cmap='gray')
+
+ # R3-R6 dotted white line
+ if manalyser1.manalysers[0].__class__.__name__ == 'FAnalyser':
+ rot36 = -math.radians( R6R3line )
+ rp1_36 = np.array([p1[0]*math.cos(rot36)-p1[1]*math.sin(rot36), p1[0]*math.sin(rot36)+p1[1]*math.cos(rot36)])
+ ax.axline((p0[0]-r[0], p0[1]-r[1]), (p0[0]+rp1_36[0]-r[0], p0[1]+rp1_36[1]-r[1]), ls='--', color='white', lw=0.5)
+
+ ax.axline((p0[0]-r[0], p0[1]-r[1]), (p0[0]+rp1[0]-r[0], p0[1]+rp1[1]-r[1]), color=REPEAT_COLORS[0])
+
+ # Rhabdomere locations, dpp.tiff specific
+ rhabdomere_locs = [(x*upscale+sx-r[0],y*upscale+sy-r[1]) for x,y in [(74,60),(68,79),(58,101),(80,94),(96,87),(100,66),(85,74)]]
+ for i_rhabdomere, loc in enumerate(rhabdomere_locs):
+ ax.text(*loc, 'R'+str(i_rhabdomere+1), color=(0.2,0.2,0.2), ha='center', va='center', fontsize=10)
+
+
+ elif animation_type == 'pitch_rot':
+
+ image = matplotlib.image.imread(os.path.join(CODE_ROOTDIR, 'images', 'from_mikko_annotated.png'))
+ ax.imshow(rotate(image, animation_variable, mode='nearest', reshape=False))
+ plot_2d_opticflow(ax, 'side')
+
+ elif animation_type == 'yaw_rot':
+ image = matplotlib.image.imread(os.path.join(CODE_ROOTDIR, 'images', 'rotation_yaw.png'))
+ ax.imshow(rotate(image, animation_variable, mode='nearest', reshape=False))
+ plot_2d_opticflow(ax, 'side')
+
+ elif animation_type == 'roll_rot':
+ image = matplotlib.image.imread(os.path.join(CODE_ROOTDIR, 'images', 'rotation_roll.png'))
+ ax.imshow(rotate(image, animation_variable, mode='nearest', reshape=False))
+ plot_2d_opticflow(ax, 'outofplane')
+
+
+ if optimal_ranges:
+ for optimal_range in optimal_ranges:
+ if optimal_range[0] < animation_variable < optimal_range[1]:
+ rect = matplotlib.patches.Rectangle((0,0), 1, 1, transform=ax.transAxes, fill=False,
+ color='yellow', linewidth=8)
+
+ ax.add_patch(rect)
+ ax.text(0.5, 0.9, optimal_range[2], ha='center', va='top', color='gold', transform=ax.transAxes, fontsize=12)
+ optimal=True
+ continue
+
+ if manalyser1.manalysers[0].__class__.__name__ == 'FAnalyser' and manalyser2.manalysers[0].__class__.__name__ == 'OAnalyser':
+
+
+ if getattr(axes[0], 'extra_illustrate_ax', None) is None:
+
+ tmp_ax = axes[0].figure.add_subplot(3,4,8)
+ tmp_ax.set_axis_off()
+ cbox = tmp_ax.get_position()
+ w = abs(cbox.x1 - cbox.x0)
+ cbox.x0 += 0.25*w
+ cbox.x1 += 0.25*w
+ axes[0].extra_illustrate_ax = axes[0].figure.add_axes(cbox)
+ axes[0].extra_illustrate_ax.set_frame_on(False)
+ axes[0].extra_illustrate_ax.set_axis_off()
+ else:
+ axes[0].extra_illustrate_ax.clear()
+ axes[0].extra_illustrate_ax.set_axis_off()
+ axes[0].extra_illustrate_ax.set_frame_on(False)
+
+ image = matplotlib.image.imread(os.path.join(CODE_ROOTDIR, 'images', 'from_mikko_annotated.png'))
+ axes[0].extra_illustrate_ax.imshow(rotate(image, manalyser1.pitch_rot, mode='nearest', reshape=False))
+
+ rect = matplotlib.patches.Rectangle((0.9+0.05,0), 0.02, 1, transform=axes[0].extra_illustrate_ax.transAxes, fill=True,
+ color='yellow', linewidth=1)
+
+ axes[0].extra_illustrate_ax.add_patch(rect)
+ # Optic flow arrows
+ arrows = [np.array((1.2,ik))*len(image) for ik in np.arange(0.1,0.91,0.1)]
+ for x, y in arrows:
+ axes[0].extra_illustrate_ax.arrow(x, y, -0.1*len(image)*pulsation_length/2., 0, width=0.01*len(image), color='darkviolet')
+
+
+ if total_error:
+ iax += 1
+
+
+
+ ax = axes[iax]
+ if getattr(ax, 'pupil_compare_errors', None) is None:
+ ax.pupil_compare_errors = []
+ ax.pupil_compare_rotations = []
+
+ ax.pupil_compare_errors.append(np.mean(errors))
+ ax.pupil_compare_rotations.append( animation_variable )
+
+ ax.plot( ax.pupil_compare_rotations, 1-np.array(ax.pupil_compare_errors), color='black',
+ label='Fast phase')
+ ax.scatter( ax.pupil_compare_rotations[-1], 1-ax.pupil_compare_errors[-1], color='black' )
+
+
+ print('Minimum and maximum errors so far: {} (min, at angle {}), {} (max, at angle {})'.format(
+ np.min(ax.pupil_compare_errors), ax.pupil_compare_rotations[np.argmin(ax.pupil_compare_errors)],
+ np.max(ax.pupil_compare_errors), ax.pupil_compare_rotations[np.argmax(ax.pupil_compare_errors)]))
+
+ ax.spines['right'].set_visible(False)
+ ax.spines['top'].set_visible(False)
+
+ ax.set_xlabel('Degrees')
+ ax.set_ylabel('Mean error')
+
+ if animation is not None:
+ ax.set_xlim(np.min(animation), np.max(animation))
+ ax.set_ylim(0,1)
+ ax.set_yticks([0, 0.5, 1])
+
+ if optimal:
+ formatting = '{:.1f}'
+ else:
+ formatting = '{:.0f}'
+
+ if animation_type == 'rotate_arrows':
+ text = 'Rotation from R3-R6 line\n{} degrees'
+ elif animation_type in ['pitch_rot', 'yaw_rot', 'roll_rot']:
+ text = 'Head tilt {} degrees'
+ else:
+ text = 'Animation variable {}'
+
+ if animation_variable is not None:
+ text = text.format(formatting).format(float(animation_variable))
+ ax.text(0.1, 1, text, transform=ax.transAxes, va='bottom', ha='left',fontsize=12)
+
+ if animation:
+ if np.min(animation) < -100 and np.max(animation) > 100:
+ ax.set_xticks([-90,0,90])
+ ax.set_xticklabels(['-90$^\circ$', '0$^\circ$','90$^\circ$'])
+ else:
+ ax.set_xticks([-45, 0, 45])
+ ax.set_xticklabels(['-45$^\circ$', '0$^\circ$','45$^\circ$'])
+
+ if biphasic:
+ if getattr(ax, 'pupil_compare_reverse_errors', None) is None:
+ ax.pupil_compare_reverse_errors = []
+
+ ax.pupil_compare_reverse_errors.append(np.mean(reverse_errors))
+
+ ax.plot( ax.pupil_compare_rotations, 1-np.array(ax.pupil_compare_reverse_errors), color='gray',
+ label='Slower phase')
+ ax.scatter( ax.pupil_compare_rotations[-1], 1-ax.pupil_compare_reverse_errors[-1], color='gray' )
+
+ ax.legend(loc=(0.39,1.2))
+
+
+ return [axes]
+
+
+
+def compare_3d_vectormaps_compact(*args, **kwargs):
+ '''
+ Wrapper for compare_3d_vectormaps but
+ compact=True, total_error=False, illustrate=False
+ '''
+ return compare_3d_vectormaps(*args, compact=True, total_error=False, illustrate=False, **kwargs)
+
+
+
+def compare_3d_vectormaps_manyviews(*args, axes=None,
+ column_titles=['Microsaccades', 'Rhabdomere orientation', 'Difference', 'Mean microsaccade'],
+ row_titles=['Dorsal\nview', 'Anterior\nview', 'Ventral\nview'],
+ **kwargs):
+ '''
+ Just with different views rendered
+
+ First axes gets attributes .orientation_ax and .error_ax
+ '''
+
+
+ views = [[50,90], [0,90], [-50,90]]
+ rows = len(views)
+ cols = 4
+
+ biphasic = False
+ if kwargs.get('animation_type', None) in ['pitch_rot', 'yaw_rot', 'roll_rot']:
+ biphasic = True
+ cols += 1
+ column_titles.insert(3, 'Difference2')
+
+ if kwargs.get('animation_type', None) in ['pitch_rot', 'yaw_rot', 'roll_rot']:
+ column_titles[2] = 'Difference\n with slower phase'
+ column_titles[3] = 'Difference\n with fast phase'
+ column_titles[-1] = ''
+
+ plt.subplots_adjust(left=0.05, bottom=0.04, right=0.9, top=0.94, wspace=0.05, hspace=0.05)
+ else:
+ plt.subplots_adjust(left=0.1, bottom=0.04, right=0.9, top=0.95, wspace=0.05, hspace=0.05)
+
+ if axes is None:
+ fig = plt.figure(figsize=DEFAULT_FIGSIZE,dpi=300)
+ axes = []
+
+
+ for i_view in range(rows):
+ for column in range(cols-1):
+ axes.append(fig.add_subplot(rows,cols,column+1+i_view*cols, projection='3d'))
+
+ if getattr(kwargs, 'illustrate_ax', None) in [True, None]:
+ # FIXME Very hacky way to move the subplot right. For some reason
+ # when moved this way the plot also gets smaller?
+ axes[0].illustrate_ax = fig.add_subplot(rows,cols,cols)
+ cbox = axes[0].illustrate_ax.get_position()
+ w = abs(cbox.x1 - cbox.x0)
+ h = abs(cbox.y1 - cbox.y0)
+ cbox.x0 -= w/8
+ cbox.x1 += w / 3.3 +w/8
+ cbox.y1 += h / 3.3
+
+ if kwargs.get('animation_type', None) == 'pitch_rot':
+ w = abs(cbox.x1 - cbox.x0)
+ h = abs(cbox.y1 - cbox.y0)
+ cbox.x0 -= w/5
+ cbox.x1 += w/5
+ cbox.y0 -= w/5
+ cbox.y1 += w/5
+
+ axes[0].illustrate_ax.set_position(cbox)
+ axes[0].illustrate_ax.cbox = cbox
+
+ if getattr(kwargs, 'total_error', None) in [True, None]:
+ ax = fig.add_subplot(rows,cols,3*cols)
+ ax_pos = ax.get_position()
+ ax_pos = [ax_pos.x0+0.02, ax_pos.y0-0.04, ax_pos.width+0.022, ax_pos.height+0.02]
+ ax.remove()
+ ax = fig.add_axes(ax_pos)
+ axes[0].error_ax = ax
+
+ tmp_ax = fig.add_subplot(rows, cols, 2*cols)
+ tmp_ax.set_axis_off()
+ cbox = tmp_ax.get_position()
+
+ cbox.x1 -= abs(cbox.x1 - cbox.x0)/1.1
+ w = abs(cbox.x1 - cbox.x0)
+ cbox.x0 -= 2*w
+ cbox.x1 -= 2*w
+ axes[0].colorbar_ax = ax.figure.add_axes(cbox)
+
+ # Clear axes that are attributes of the axes[0]; These won't
+ # otherwise get cleared for the animation/video
+
+ if getattr(axes[0], 'error_ax', None) is not None:
+ axes[0].error_ax.clear()
+
+ # Set custom column titles
+ for i_manalyser in range(2):
+ manalyser = args[i_manalyser]
+ if manalyser.manalysers[0].__class__.__name__ == 'FAnalyser' or manalyser.__class__.__name__ == 'FAnalyser':
+ if '\n' in ''.join(column_titles):
+ column_titles[i_manalyser] = 'Optic flow\n'
+ else:
+ column_titles[i_manalyser] = 'Optic flow'
+
+ if manalyser.manalysers[0].__class__.__name__ == 'MAnalyser' and manalyser.manalysers[0].receptive_fields == True:
+ column_titles[i_manalyser] = 'Biphasic receptive field\nmovement directions'
+
+
+ if args[0].manalysers[0].__class__.__name__ == 'FAnalyser':
+ column_titles[cols-1] = 'Mean optic flow axis'
+
+ if getattr(axes[0], 'illustrate_ax', None) is not None:
+ axes[0].illustrate_ax.clear()
+ axes[0].illustrate_ax.set_title(column_titles[-1], color=REPEAT_COLORS[0])
+ #axes[0].illustrate_ax.text(0.5,1, column_titles[-1], transform=axes[0].illustrate_ax.transAxes, ha='center', va='bottom')
+ axes[0].illustrate_ax.set_frame_on(False)
+ axes[0].illustrate_ax.set_axis_off()
+
+ # Add column titles
+ for title, ax in zip(column_titles[:-1], axes[0:cols-1]):
+ ax.set_title(title)
+
+ # Add row titles for the views
+ for title, ax in zip(row_titles, axes[::cols-1]):
+ if biphasic:
+ ax.text2D(-0.1, 0.5, title.replace('\n', ' '), transform=ax.transAxes, va='center', ha='center', rotation=90)
+ else:
+ ax.text2D(-0.375, 0.5, title, transform=ax.transAxes, va='center')
+
+ for ax in axes:
+ ax.set_axis_off()
+
+ naxes = cols -1
+
+ for i in range(3):
+ viewargs = copy.deepcopy(kwargs)
+ viewargs['elev'] = views[i][0]
+ viewargs['azim'] = views[i][1]
+
+ if i == 0:
+ compare_3d_vectormaps(axes=axes[i*naxes:(i+1)*naxes]+[axes[0].illustrate_ax, axes[0].error_ax],
+ biphasic=biphasic,
+ kwargsD={'colorbar': True, 'colorbar_ax': axes[0].colorbar_ax},
+ illustrate=True, total_error=True,
+ *args, **viewargs)
+ else:
+ compare_3d_vectormaps(axes=axes[i*naxes:(i+1)*naxes]+[axes[0].illustrate_ax, axes[0].error_ax],
+ biphasic=biphasic, illustrate=False, total_error=False,
+ kwargsD={'colorbar': False},
+ *args, **viewargs)
+
+ for ax in axes:
+ ax.dist = 6
+
+ axes[0].illustrate_ax.set_position(axes[0].illustrate_ax.cbox)
+ return [axes]
diff --git a/gonio-analysis/gonioanalysis/drosom/plotting/common.py b/gonio-analysis/gonioanalysis/drosom/plotting/common.py
new file mode 100644
index 0000000..b8ba7b3
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/plotting/common.py
@@ -0,0 +1,783 @@
+'''
+Common helper functions likely needed in many different plots.
+'''
+
+import os
+import math
+import copy
+import multiprocessing
+import datetime
+
+import numpy as np
+from scipy.spatial import cKDTree as KDTree
+import matplotlib.pyplot as plt
+import matplotlib.animation
+import matplotlib.colors
+from matplotlib.patches import FancyArrowPatch, CirclePolygon
+from mpl_toolkits.mplot3d import proj3d, art3d
+from matplotlib import cm
+
+from gonioanalysis.coordinates import (
+ nearest_neighbour,
+ get_rotation_matrix,
+ rotate_points
+ )
+from gonioanalysis.directories import ANALYSES_SAVEDIR
+
+CURRENT_ARROW_LENGTH = 1
+
+VECTORMAP_PULSATION_PARAMETERS = {'step_size': 0.02, 'low_val': 0.33, 'high_val': 1}
+
+
+
+# Taken from drosoeyes.blend
+RHABDOMERE_LOCATIONS = [(-1.6881, 1.0273), (-1.8046, -0.9934),
+ (-1.7111, -2.9717), (-0.0025, -1.9261), (1.6690, -0.9493),
+ (1.6567, 0.9762), (0.0045, -0.0113)]
+RHABDOMERE_DIAMETERS = [1.8627,1.8627,1.8627,1.8627,1.8627,1.8627, 1.5743]
+RHABDOMERE_R3R6_ROTATION = math.radians(-49.7)
+
+
+
+class Arrow3D(FancyArrowPatch):
+ def __init__(self, x0, y0, z0, x1, y1, z1, *args, **kwargs):
+ FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
+ self._verts3d = (x0, x1), (y0, y1), (z0, z1)
+
+ def draw(self, renderer):
+ xs3d, ys3d, zs3d = self._verts3d
+ xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
+ self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
+ FancyArrowPatch.draw(self, renderer)
+
+
+def add_line(ax, x0, y0, z0, x1, y1, z1, camerapos=None, **kwargs):
+ '''
+ Add a centered 3D line plot
+ '''
+ if camerapos and is_behind_sphere(*camerapos, (x0,y0,z0)):
+ return None
+
+ ax.plot([x0-x1/2, x0, x0+x1/2], [y0-y1/2, y0, y0+y1/2], [z0-z1/2, z0, z0+z1/2], **kwargs)
+
+
+def add_rhabdomeres(ax, x0, y0, z0, x1, y1, z1, mirror_lr=False, mirror_bf=False,
+ scale=0.015, camerapos=None, **kwargs):
+ '''
+ Add rhabdomeres R1-R7/8 patches to a 3D plot.
+
+ ax : object
+ Matplotlib Axes object to add attach the patches onto
+ x0,y0,z0 : float
+ Coordinates of the rhabdomeres center point (R6/R7)
+ x1,y1,z1 : float
+ Vector pointing the direction of R6R3 line
+ mirror_lr, mirror_bf : True, False (or auto for bf)
+ Wheter to mirror this rhabdomere or not (left/right, back/front)
+ scale : float
+ Scale of the rhabdemeres
+ **kwargs : dict
+ To matplotlib.patches.CirclePolygon
+
+ Returns a list of matplotlib 3d patches
+ '''
+
+ if camerapos and is_behind_sphere(*camerapos, (x0,y0,z0)):
+ return None
+
+
+ #v = np.asarray([x0,y0,z0])
+ #uv = v / np.linalg.norm(v)
+
+ try:
+ phi = math.asin(z0)
+ except:
+ phi = math.pi / 2
+
+ try:
+ theta = math.atan(x0/y0)
+ except:
+ theta = math.pi / 2
+ if y0 < 0:
+ theta = theta + math.pi
+
+
+ # Calculate rhabdomere rotation to match x1,y1,z1 by transforming
+ # point [1,0,0] to the x0,y0,z0 point and calculating angle of
+ # transformed ux and x1,y1,z1
+ ux = np.array([1,0,0])
+ ux = get_rotation_matrix('x', phi) @ ux
+ ux = get_rotation_matrix('z', -theta) @ ux
+ rot = np.arccos(np.inner(ux, [x1,y1,z1])/(np.linalg.norm(ux) * np.linalg.norm([x1,y1,z1])))
+
+ if z1 < 0:
+ rot = -rot
+
+ patches = []
+
+ if mirror_bf == 'auto' and z0 > 0:
+ mirror_bf = True
+ elif mirror_bf is not True:
+ mirror_bf = False
+
+ for diameter, location in zip(RHABDOMERE_DIAMETERS, RHABDOMERE_LOCATIONS):
+
+ patch = CirclePolygon((location[0]*scale, location[1]*scale), diameter/2*scale,
+ **kwargs)
+ patches.append(patch)
+ ax.add_patch(patch)
+
+ art3d.patch_2d_to_3d(patch)
+
+ #if mirror_lr:
+ # patch._segment3d = [get_rotation_matrix('y', math.pi) @ p for p in patch._segment3d]
+
+ # Rotate according to the vector (x1,y1,z1)
+ # First z rotation to set initial rotation
+ patch._segment3d = [get_rotation_matrix('z', RHABDOMERE_R3R6_ROTATION) @ p for p in patch._segment3d]
+
+ #if not mirror_lr and not mirror_bf:
+ # pass
+
+ if mirror_lr and not mirror_bf:
+ patch._segment3d = [get_rotation_matrix('x', math.pi) @ p for p in patch._segment3d]
+
+ if not mirror_lr and mirror_bf:
+ patch._segment3d = [get_rotation_matrix('x', math.pi) @ p for p in patch._segment3d]
+ #patch._segment3d = [get_rotation_matrix('z', math.pi) @ p for p in patch._segment3d]
+
+ #if not mirror_lr and mirror_bf:
+ # patch._segment3d = [get_rotation_matrix('x', math.pi) @ p for p in patch._segment3d]
+ # patch._segment3d = [get_rotation_matrix('z', math.pi) @ p for p in patch._segment3d]
+
+ #if mirror_lr and mirror_bf:
+ # patch._segment3d = [get_rotation_matrix('z', math.pi) @ p for p in patch._segment3d]
+
+
+
+ patch._segment3d = [get_rotation_matrix('z', rot) @ p for p in patch._segment3d]
+
+ patch._segment3d = [get_rotation_matrix('x', math.pi/2) @ p for p in patch._segment3d]
+
+
+ patch._segment3d = [get_rotation_matrix('x', phi) @ p for p in patch._segment3d]
+ patch._segment3d = [get_rotation_matrix('z', -theta) @ p for p in patch._segment3d]
+
+ # Translate
+ patch._segment3d = [(x+x0,y+y0,z+z0) for x,y,z in patch._segment3d]
+
+
+ return patches
+
+
+def make_animation_angles(step=0.5):
+ '''
+ Returns the matplotlib angles to rotate a 3D plot
+
+ This really shouldnt be here...
+ '''
+
+ animation = []
+ sidego = 30
+ # go up, to dorsal
+ for i in np.arange(-30,60,step):
+ animation.append((i,90))
+ #rotate azim
+ for i in np.arange(90,90+sidego,step*2):
+ animation.append((60,i))
+ # go back super down, to ventral
+ for i in np.arange(0,120,step):
+ animation.append((60-i,90+sidego))
+ # rotate -azim
+ for i in np.arange(0,2*sidego,step*2):
+ animation.append((-60,90+sidego-i))
+ # go up back to dorsal
+ for i in np.arange(0,120, step):
+ animation.append((-60+i,90-sidego))
+ return animation
+
+
+
+CURRENT_ARROW_DIRECTION = 1
+def make_animation_timestep(step_size=0.075, low_val=0.6, high_val=1, rel_return_speed=3, twoway=False):
+ '''
+ step_size between 0 and 1.
+ Once total displacement 1 has been reached go back to low_value
+ '''
+ # Update arrow length
+ global CURRENT_ARROW_LENGTH
+ global CURRENT_ARROW_DIRECTION
+
+ step = step_size * 1.5
+ if CURRENT_ARROW_DIRECTION < 0:
+ step *= rel_return_speed
+
+ CURRENT_ARROW_LENGTH += step
+
+ if twoway:
+ s = -1
+ else:
+ s = 1
+
+ if CURRENT_ARROW_DIRECTION > 0:
+ if CURRENT_ARROW_LENGTH > high_val*1.5:
+ CURRENT_ARROW_LENGTH = low_val*1.5
+ CURRENT_ARROW_DIRECTION = s * CURRENT_ARROW_DIRECTION
+ else:
+ if CURRENT_ARROW_LENGTH > high_val * 1.5:
+ CURRENT_ARROW_LENGTH = low_val*1.5
+ CURRENT_ARROW_DIRECTION = s * CURRENT_ARROW_DIRECTION
+
+
+def plot_2d_opticflow(ax, direction):
+
+ x0, x1 = ax.get_xlim()
+ y0, y1 = ax.get_ylim()
+
+ width = abs(x1-x0)
+ height = abs(y1-y0)
+
+ if direction == 'side':
+ arrows = [np.array((1.05,ik))*width for ik in np.arange(0.1,0.91,0.1)]
+ for x, y in arrows:
+ ax.arrow(x, y, -0.1*width, 0, width=0.01*width, color='darkviolet')
+ else:
+ x = []
+ y = []
+ for i in range(10):
+ for j in range(10):
+ x.append((0.5+i)*height/10)
+ y.append((0.5+j)*width/10)
+ ax.scatter(x,y, marker='x', color='darkviolet')
+
+
+def is_behind_sphere(elev, azim, point):
+ '''
+ Calculates wheter a point seend by observer at (elev,azim) in spehrical
+ coordinates is behind a sphere (radius == point) or not.
+
+ NOTICE: Elev from horizontal plane (a non-ISO convention) and azim as in ISO
+ '''
+
+ cx = np.sin(np.radians(90-elev)) * np.cos(np.radians(azim))
+ cy = np.sin(np.radians(90-elev)) * np.sin(np.radians(azim))
+ cz = np.cos(np.radians(90-elev))
+
+ vec_cam = (cx,cy,cz)
+ vec_arr = point
+
+ angle = np.arccos(np.inner(vec_cam, vec_arr)/(np.linalg.norm(vec_cam)*np.linalg.norm(vec_arr)))
+ if angle > np.pi/2:
+ return True
+ else:
+ return False
+
+
+def plot_guidance(ax, camerapos=None, r=1,
+ mutation_scale=6, hide_text=False):
+ '''
+ Plot help elements to point left,right,front,back
+ '''
+ arrows = []
+ guidances = {'Right': ((r,0,0), (0.2,0,0)),
+ 'Left': ((-r,0,0),(-0.2,0,0)),
+ ' Ventral': ((0,0,-r),(0,0,-0.3)),
+ ' Dorsal': ((0,0,r),(0,0,0.3))}
+
+ for name, (point, vector) in guidances.items():
+ point = np.array(point)
+ vector = np.array(vector)
+
+ if is_behind_sphere(*camerapos, point):
+ zorder = 1
+ else:
+ zorder = 8
+
+ ar = Arrow3D(*point, *(point+vector), mutation_scale=mutation_scale,
+ lw=0.2, color='black', zorder=zorder)
+ ax.add_artist(ar)
+ arrows.append(ar)
+
+ if not hide_text:
+ if name in ('Left', 'Right'):
+ ha = 'center'
+ else:
+ ha = 'left'
+ ax.text(*(point+vector/1.05), name, color='black',
+ fontsize='xx-large', va='bottom', ha=ha,
+ linespacing=1.5, zorder=zorder+1)
+
+ return arrows
+
+
+def plot_vrot_lines(ax, vrots, n_verts=16, camerapos=None):
+ '''
+ Plot vetical rotation lines
+
+ Arguments
+ ---------
+ vrots: list of floats
+ Vertical rotations in degrees
+ verts : int
+ How many vertices per a half cirle. Higher values give
+ smoother and more round results.
+ '''
+
+ horizontals = np.radians(np.linspace(-70, 70, n_verts))
+ points = np.vstack( (np.sin(horizontals), np.cos(horizontals)) )
+ points = np.vstack( (points, np.zeros(n_verts)) ).T
+
+ points = points * 0.95
+
+ print(points.shape)
+
+ for vrot in vrots:
+ pnts = rotate_points(points, 0, math.radians(vrot), 0)
+ if -1 < vrot < 1:
+ color = (0.2,0.2,0.2)
+ style = '-'
+ else:
+ color = (0.2,0.2,0.2)
+ style = '--'
+
+ if camerapos:
+ visible = [p for p in pnts if not is_behind_sphere(*camerapos, p)]
+
+ if not visible:
+ continue
+ pnts = np.array(visible)
+
+ ax.plot(pnts[:,0], pnts[:,1], pnts[:,2], style, lw=1, color=color)
+
+
+def vector_plot(ax, points, vectors, color='black', mutation_scale=6, scale_length=1,
+ i_pulsframe=None, guidance=False, camerapos=None, draw_sphere=True,
+ vrot_lines=False,
+ hide_axes=False, hide_text=False,
+ **kwargs):
+ '''
+ Plot vectors on a 3D matplotlib Axes object as arrows.
+
+ ax : object
+ Matplotlib ax (axes) instance
+ points : array_like
+ Sequence of arrow starting/tail (x,y,z) points
+ vectors : array_like
+ Arrow lengts and directions, sequence of (x,y,z)
+ color : string
+ Matplotlib valid color for the drawn arrows
+ mutation_scale : float
+ Size of the arrow head basically
+ i_pulsframe : int
+ Index of the pulsation frame, setting the arrow length. For animation.
+ guidance : bool
+ Add help elements to point left,right,front,back
+ camerapos : tuple or None
+ Values of (elev, axzim) to hide vectors behind the sphere. If none,
+ use values from ax.elev and ax.azim.
+ draw_sphere : bool
+ If true draw a gray sphere
+ hide_axes : bool
+ Call set_axis_off
+ hide_text : bool
+ Omit from drawing any text
+ kwargs : dict
+ Passed to matplotlib FancyArrowPatch
+
+ Returns
+ -------
+ arrow_artists : list
+ All ArrowArtists added to the ax
+
+ '''
+ global CURRENT_ARROW_DIRECTION
+ r = 0.9
+
+ arrow_artists = []
+
+
+ if hide_axes:
+ ax.set_axis_off()
+
+ if hide_text:
+ ax.axes.xaxis.set_ticklabels([])
+ ax.axes.yaxis.set_ticklabels([])
+ ax.axes.zaxis.set_ticklabels([])
+ ax.set_xlabel('')
+ ax.set_ylabel('')
+ ax.set_zlabel('')
+ else:
+ ax.set_xlabel('x')
+ ax.set_ylabel('y')
+ ax.set_zlabel('z')
+
+
+
+ if not camerapos:
+ camerapos = (ax.elev, ax.azim)
+
+ if guidance:
+ plot_guidance(ax, camerapos=camerapos, hide_text=hide_text)
+
+ if draw_sphere:
+ N = 75
+ phi, theta = np.meshgrid(np.linspace(0, 2*np.pi, N), np.linspace(0, np.pi, N))
+ X = r * np.sin(theta) * np.cos(phi)
+ Y = r * np.sin(theta) * np.sin(phi)
+ Z = r * np.cos(theta)
+ ax.plot_surface(X, Y, Z, color='lightgray')
+
+
+ if vrot_lines:
+ plot_vrot_lines(ax, np.arange(-120, 120.1, 20), n_verts=16,
+ camerapos=camerapos)
+
+
+ if i_pulsframe:
+ global CURRENT_ARROW_LENGTH
+ scaler = CURRENT_ARROW_LENGTH
+ else:
+ scaler = 1.1
+
+ scaler *= scale_length
+
+ for point, vector in zip(points, vectors):
+
+ if camerapos:
+ vec_arr = point
+
+ if is_behind_sphere(*camerapos, vec_arr):
+ alpha = 0
+ else:
+ alpha = 1
+ zorder = 10
+ else:
+ alpha = 1
+ zorder = 10
+
+ if CURRENT_ARROW_DIRECTION > 0 or i_pulsframe is None:
+ A = point
+ B = point+scaler*vector
+ else:
+ A = point
+ B = point-scaler*vector
+
+ ar = Arrow3D(*A, *B, arrowstyle="-|>", lw=1,
+ mutation_scale=mutation_scale, color=color, alpha=alpha, zorder=10)
+ ax.add_artist(ar)
+ arrow_artists.append(ar)
+
+
+ ax.set_xlim(-1.1, 1.1)
+ ax.set_ylim(-1.1,1.1)
+ ax.set_zlim(-1.1, 1.1)
+
+ return arrow_artists
+
+
+def surface_plot(ax, points, values, cb=False, phi_points=None, theta_points=None,
+ colormap='own'):
+ '''
+ 3D surface plot of the error between the optic flow vectors and the actual
+ eye-movements vector map.
+
+ Points and values have to be in the same order.
+
+ points
+ values
+
+ Arguments
+ ---------
+ ax : object
+ Matplolib Axes object
+ points : list
+ List of x,y,z coordinates
+ values : list of scalars
+ List of scalar values at the given points, in the same order.
+ colormap : string
+ "own", "own-diverge" or any matplotlib colormap name
+ '''
+
+ if len(points) != len(values):
+ raise ValueError('For sufrace_plot, points and values have to be same lenght (and in the same order)')
+
+ # Points where the error is "evaluated" (actually interpolated)
+
+
+ N = 100
+ if phi_points is None:
+ phi_points = np.linspace(0, 2*np.pi, N)
+
+ phi, theta = np.meshgrid(phi_points, np.linspace(0, np.pi, N))
+ X = np.sin(theta) * np.cos(phi)
+ Y = np.sin(theta) * np.sin(phi)
+ Z = np.cos(theta)
+
+
+ def color_function(theta, phi):
+
+ intp_dist = (2 * np.sin(np.radians(5)))
+
+ x = np.sin(theta) * np.cos(phi)
+ y = np.sin(theta) * np.sin(phi)
+ z = np.cos(theta)
+
+ errs = np.empty_like(x)
+
+ for i in range(x.size):
+
+ i_point = nearest_neighbour((x.flat[i], y.flat[i], z.flat[i]), points,
+ max_distance=intp_dist)
+
+ if i_point is False:
+ errs.flat[i] = 0
+ else:
+ errs.flat[i] = values[i_point]
+ return errs
+
+ kdtree = KDTree(points)
+
+ def color_function_optimized(theta, phi):
+
+ intp_dist = (2 * np.sin(np.radians(5)))
+
+ x = (np.sin(theta) * np.cos(phi))
+ y = (np.sin(theta) * np.sin(phi))
+ z = np.cos(theta)
+
+ errs = np.empty_like(x)
+ positions = [[x.flat[i], y.flat[i], z.flat[i]] for i in range(x.size)]
+
+ distances, i_points = kdtree.query( positions, n_jobs=-1 )
+
+ for i in range(errs.size):
+ if distances[i] < intp_dist:
+ errs.flat[i] = values[i_points[i]]
+ else:
+ errs.flat[i] = 0
+ return errs
+
+
+ colors = color_function_optimized(theta, phi)
+
+ if colormap == 'own':
+ culurs = [(0.2, 0.1, 0),(1,0.55,0),(1,1,0.4)]
+ elif colormap == 'own-diverge':
+ culurs = [(0,(0,0,0)),(0.001,(1,0,0)), (0.5,(1,1,1)), (1,(0,0,1))]
+ else:
+ # Must be Matplotlib colormap othewise
+ culurs = matplotlib.cm.get_cmap(colormap).colors
+
+ ownmap = matplotlib.colors.LinearSegmentedColormap.from_list('ownmap', culurs, 100)
+ ax.plot_surface(X, Y, Z, facecolors=ownmap(colors), linewidth=0, vmin=0, vmax=1)
+
+
+ m = cm.ScalarMappable(cmap=ownmap)
+ m.set_array(colors)
+ return m
+
+
+def histogram_heatmap(all_errors, nbins=20, horizontal=True, drange=None):
+ '''
+
+ all_errors [errors_rot1, errors_rot1, ....] where
+ errors_rot_i = [error1, error2, ...], error_i is float
+ '''
+ N_bins = 20
+ #N_bins = 3
+ if drange == 'auto':
+ data_range = (np.min(all_errors), np.max(all_errors))
+ print('histogram_heatmap data_range {}'.format(data_range))
+ elif drange != 'auto':
+ data_range = drange
+ else:
+ data_range = (0, 1)
+
+ image = []
+
+ for rotation_errors in all_errors:
+ hist, bin_edges = np.histogram(rotation_errors, bins=N_bins, range=data_range)
+ image.append(hist)
+
+ image = np.array(image)
+
+ if horizontal:
+ image = image.T
+
+ return image
+
+
+
+def save_3d_animation(manalyser, ax=None, plot_function=None,interframe_callback=print,
+ i_worker=None, N_workers=None, animation_type='rotate_plot', video_writer=False,
+ *args, **kwargs):
+ '''
+ interframe_callback
+
+ manalyser : object or list of objects
+ Either one analyser or a list of analysers
+
+ animation_type : string
+ "rotate_plot" or "rotate_arrows"
+ '''
+ try:
+ # If manalyer is a list of manalysers
+ manalyser[0]
+ manalysers = manalyser
+ except:
+ # If it is actually only one manalyser
+ manalysers = [manalyser]
+
+
+ fps = 30
+ frameskip = False
+
+ biphasic=False
+ optimal_ranges = []
+
+ # FIXME No predetermined optimal ranges
+ if len(manalysers) > 1 and animation_type not in['rotate_plot']:
+
+ if manalysers[0].manalysers[0].__class__.__name__ == 'MAnalyser':
+ optimal_ranges = [[24.3-3, 24.3+3, 'Typical photoreceptor\nmovement axis']]
+ elif manalysers[0].__class__.__name__ == 'FAnalyser':
+ optimal_ranges = [[-80-3, -80+3, 'Typical ommatidial\nrhabdomere aligment']]
+
+ if plot_function.__class__.__name__ == 'plot_3d_vectormap':
+ kwargs['arrow_rotations'] = [0, 29]
+
+ if animation_type == 'rotate_plot':
+ animation = make_animation_angles(step=0.5 * (20/fps))
+ elif animation_type == 'rotate_arrows':
+
+ animation = np.linspace(-90, 90, 12*fps)
+
+
+ elif animation_type in ['pitch_rot', 'yaw_rot', 'roll_rot']:
+ biphasic = True
+ animation = np.linspace(-180, 180, 16*fps)
+
+ if animation_type == 'pitch_rot':
+ optimal_ranges = [[0, 20, 'Typical head tilt\nrange']]
+ else:
+ optimal_ranges = [[-10, 10, 'Typical head tilt\nrange']]
+
+ if len(optimal_ranges) > 0:
+ A = optimal_ranges[0][0]
+ B = optimal_ranges[-1][1]
+
+ for optimal_range in optimal_ranges:
+ a,b,n = optimal_range
+ start, trash, end = np.split(animation, [np.where(a<=animation)[0][0], np.where(b 1:
+ plot_function(*manalysers, i_frame=i_frame, axes=axes, *args, **kwargs)
+ else:
+ plot_function(*manalysers, i_frame=i_frame, ax=axes[0], *args, **kwargs)
+
+ print('Animation variable: {}'.format(animation_variable))
+
+ if animation_type == 'rotate_plot':
+ for ax in axes:
+ ax.view_init(elev=animation_variable[0], azim=animation_variable[1])
+
+ axes[0].figure.canvas.draw_idle()
+ interframe_callback()
+
+ if video_writer:
+ video_writer.grab_frame()
+ if doublegrab_next:
+ video_writer.grab_frame()
+ doublegrab_next = False
+
+ ax.dist=7
+ axes[0].figure.savefig(os.path.join(savedir, 'frame_{0:07d}.png'.format(i_frame)), transparent=True, dpi=300)
+ #except Exception as e:
+ # print('Could not make a frame, error message on the next line')
+ # print(e)
+ # doublegrab_next = True
+
+ if video_writer:
+ video_writer.finish()
+
diff --git a/gonio-analysis/gonioanalysis/drosom/plotting/compare_opticflow.py b/gonio-analysis/gonioanalysis/drosom/plotting/compare_opticflow.py
new file mode 100644
index 0000000..683a00d
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/plotting/compare_opticflow.py
@@ -0,0 +1,487 @@
+import os
+from math import radians
+
+import numpy as np
+import matplotlib.pyplot as plt
+import matplotlib.patches
+import matplotlib.animation
+
+from gonioanalysis.directories import ANALYSES_SAVEDIR, CODE_ROOTDIR
+
+from .common import vector_plot, surface_plot, make_animation_timestep
+
+CURRENT_ARROW_LENGTH = 1
+
+
+def error_at_flight(manalyser):
+ '''
+ Plot 2D heatmap with vertical limits (upper_lower) as x,y axes and
+ pixel color values as the on flight orientation.
+
+ To see what (lower,upper) vertical excluding would give smallest error.
+ '''
+
+ #optimal_rot = 50
+ optimal_rot = 50
+
+ rotations = np.linspace(-180, 180, 180)
+
+ widths = np.linspace(10, 200, 20)
+ centers = np.linspace(-50, 50, 10)
+
+ # Absolute minimum errors, no matter where happens
+ min_errors = []
+ # Distance of the minimum error from the flight position
+ distances = []
+
+ for i, width in enumerate(widths):
+ print('upper {}/{}'.format(i+1, len(widths)))
+ min_errors.append([])
+ distances.append([])
+ for center in centers:
+ manalyser.set_angle_limits(va_limits=(-width/2+center, width/2+center))
+ p, e = optic_flow_error(manalyser, rotations)
+ e = np.mean(e, axis=1)
+
+
+ error_argmin = np.argmin(e)
+ distance = rotations[error_argmin] - optimal_rot
+
+ distances[-1].append(distance)
+ min_errors[-1].append(np.min(e))
+
+ manalyser.set_angle_limits()
+
+ for im, figname, text in zip([min_errors, distances], ['min_errors.jpg', 'distances.jpg'], ['Minimum error (between 0 and 1)','The distance of flight rotation from minimum error (degrees)']):
+
+ fig = plt.figure()
+ zlimit = max(abs(np.min(im)), abs(np.max(im)))
+ plt.imshow(im, extent=[np.min(centers), np.max(centers), np.min(widths), np.max(widths)], cmap='seismic',vmin=-zlimit, vmax=zlimit, origin='lower')
+ plt.xlabel('Center point (degrees)')
+ plt.ylabel('Width (degrees)')
+ cbar = plt.colorbar()
+ cbar.set_label(text)
+
+ savedir = os.path.join(ANALYSES_SAVEDIR, 'error_at_flight', manalyser.get_specimen_name())
+ os.makedirs(savedir, exist_ok=True)
+ fig.savefig(os.path.join(savedir,figname))
+ plt.close()
+
+
+
+def complete_flow_analysis(manalyser, rotations, rotation_axis,
+ text=True, animate=True, subdivide_regions=False, mutation_scale=3,
+ dual_difference=True, elevations=[50, 0, -50]):
+ '''
+ Creates the combined plot of the rhabdomere movement vectormap, simulated
+ optic flow and the difference between these two, rendered from 3 views
+
+ PLOT
+ ROW
+ 1 measured optic flow vectors
+ 2 simulated optic flow
+ 3 error heatplot
+
+ INPUT ARGUMENTS
+ manalyser
+ rotations
+ rotation_axis Either 'yaw', 'roll', or 'pitch'
+ text wheter to render text
+ dual_difference Create a a difference plot for the opposite direction
+ as well, adding a column in the plot
+ subdivide_regions True/False, will divide the
+
+
+ '''
+ from scipy.ndimage import rotate
+ import matplotlib.image as mpli
+ from gonioanalysis.coordinates import rotate_vectors, optimal_sampling
+ from gonioanalysis.drosom.optic_flow import flow_vectors, field_error
+
+ # Parse keywork aguments
+ # -----------------------
+ N_fig_columns = 4
+ if dual_difference:
+ N_fig_columns += 1
+ N_fig_rows = 3
+
+ if not text:
+ global CURRENT_ARROW_LENGTH
+ CURRENT_ARROW_LENGTH = 1.5
+
+ if text:
+ dpi = 150
+ else:
+ dpi = 600
+
+ elevation_texts = ['Dorsal\nview', 'Anterior\nview', 'Ventral\nview']
+
+ column_texts = ['Biphasic receptive field\nmovement directions', #'Pseudopupil movement\ndirections',
+ 'Experienced optic flow',
+ 'Difference with slower phase',
+ 'Head orientation']
+
+ if dual_difference:
+ column_texts.insert(3, 'Difference with fast phase')
+
+ if rotation_axis == 'pitch':
+ zero_rot = 10
+ optimal_rot = 10
+ fly_image = os.path.join(CODE_ROOTDIR, 'droso6_rotated.png')
+ sideflow=True
+ elif rotation_axis == 'yaw':
+ zero_rot = 0
+ optimal_rot = 0
+ fly_image = os.path.join(CODE_ROOTDIR, 'rotation_yaw.png')
+ sideflow=True
+ elif rotation_axis == 'roll':
+ zero_rot = 0
+ optimal_rot = 0
+ fly_image = os.path.join(CODE_ROOTDIR, 'rotation_roll.png')
+ sideflow=False
+
+
+ # End parsing keyword arguments
+ # ------------------------------
+
+ lp, lvecs = manalyser.get_3d_vectors('left')
+ rp, rvecs = manalyser.get_3d_vectors('right')
+
+ points = np.concatenate((lp, rp))
+ lrvecs = np.concatenate((lvecs, rvecs))
+
+ # Flow field errors for each rotation
+ #points, all_errors = optic_flow_error(manalyser, rotations)
+
+ # Flow field vectors for each rotation
+ vector_points = optimal_sampling(np.arange(-90, 90, 5), np.arange(-180, 180, 5))
+
+ if rotation_axis == 'yaw':
+ all_flow_vectors = [rotate_vectors(vector_points, flow_vectors(vector_points), -radians(rot), 0, 0) for rot in rotations]
+ elif rotation_axis == 'pitch':
+ all_flow_vectors = [rotate_vectors(vector_points, flow_vectors(vector_points), 0, -radians(rot), 0) for rot in rotations]
+ elif rotation_axis == 'roll':
+ all_flow_vectors = [rotate_vectors(vector_points, flow_vectors(vector_points), 0, 0, -radians(rot)) for rot in rotations]
+
+ all_errors = [field_error(points, lrvecs, *flow_vectors) for flow_vectors in all_flow_vectors]
+
+ # 1D errorplot for the mean error over rotations
+ #average_errors_1D = np.mean(all_errors, axis=1)
+ #average_errors_1D_stds = np.std(all_errors, axis=1)
+
+
+ savedir = os.path.join(ANALYSES_SAVEDIR, 'comparision_to_optic_flow', manalyser.get_specimen_name()+'_'+rotation_axis)
+ os.makedirs(savedir, exist_ok=True)
+
+ # SUBDIVIDE REGIONS
+ # ------------------
+
+ subdivide_flow_points = optimal_sampling(np.arange(-90, 90, 3), np.arange(-180, 180, 3))
+
+ if subdivide_regions:
+ subdivide_regions = [(-70, 70), (70, None), (None, -70)]
+ else:
+ subdivide_regions = [(None, None)]
+
+ subdivide_regions_colors = ['black', 'gray', 'gray']#['pink', 'lime', 'turquoise']
+ subdivide_styles = ['-', '-', '--']
+ subdivide_lws = [3, 1, 1]
+ subdivide_errors = []
+
+ subdivide_points = []
+ subdivide_vectors = []
+ subdivide_flow_vectors = []
+
+ for reg in subdivide_regions:
+ manalyser.set_angle_limits(va_limits=reg)
+
+ reglp, reglvecs = manalyser.get_3d_vectors('left')
+ regrp, regrvecs = manalyser.get_3d_vectors('right')
+
+ regpoints = np.concatenate((reglp, regrp))
+ reglrvecs = np.concatenate((reglvecs, regrvecs))
+
+ if rotation_axis == 'yaw':
+ reg_all_flow_vectors = [rotate_vectors(subdivide_flow_points, flow_vectors(subdivide_flow_points), -radians(rot), 0, 0) for rot in rotations]
+ elif rotation_axis == 'pitch':
+ reg_all_flow_vectors = [rotate_vectors(subdivide_flow_points, flow_vectors(subdivide_flow_points), 0, -radians(rot), 0) for rot in rotations]
+ elif rotation_axis == 'roll':
+ reg_all_flow_vectors = [rotate_vectors(subdivide_flow_points, flow_vectors(subdivide_flow_points), 0, 0, -radians(rot)) for rot in rotations]
+
+
+ reg_all_errors = [field_error(regpoints, reglrvecs, *flow_vectors) for flow_vectors in reg_all_flow_vectors]
+
+ #p, e = optic_flow_error(manalyser, all_flow_errors)
+ e = np.mean(reg_all_errors, axis=1)
+ subdivide_errors.append(e)
+
+ #subdivide_flow_vectors.append( [flow_vectors(p, xrot=rot) for rot in rotations] )
+ subdivide_flow_vectors.append( reg_all_flow_vectors )
+
+ subdivide_points.append([reglp, regrp])
+ subdivide_vectors.append([reglvecs, regrvecs])
+
+ manalyser.set_angle_limits(va_limits=(None, None))
+
+ # END OF SUBDIVIDE REGIONS
+ # -------------------------
+
+
+
+ fim = mpli.imread(fly_image)
+ im_scaler = len(fim)
+
+
+ N_steady_frames = 20*3
+ steadied = False
+
+ i_image = -1 # Saved image number
+ i_steady = 0 # Steady looping
+ i_rot = -1 # Determines current rotation
+ while True:
+
+ i_rot += 1
+ rot = rotations[i_rot]
+
+ print('Rotation {} degrees'.format(rot))
+
+ if i_steady > N_steady_frames:
+ steadied = True
+
+ if rot > optimal_rot and not steadied:
+ i_rot -= 1
+ i_steady += 1
+
+ if not text:
+ if rot < optimal_rot:
+ continue
+
+
+ # Data collection part
+ flow_vectors = all_flow_vectors[i_rot]
+ flow_errors = all_errors[i_rot]
+
+ i_image += 1
+ fn = 'image_{:0>8}.png'.format(i_image)
+ savefn = os.path.join(savedir, fn)
+
+
+ # Plotting part
+ fig = plt.figure(figsize=(11.69,8.27), dpi=dpi)
+
+
+
+ for i, elev in enumerate(elevations):
+ ax = fig.add_subplot(N_fig_rows, N_fig_columns, N_fig_columns*i+1, projection='3d')
+
+ for ppp, vecs, color in zip(subdivide_points, subdivide_vectors, subdivide_regions_colors):
+
+ lcolor = 'red'
+ rcolor = 'blue'
+
+ if color != 'black':
+ lcolor = color
+ rcolor = color
+
+ vector_plot(ax, ppp[0], vecs[0], color=lcolor, mutation_scale=mutation_scale, animate=animate, camerapos=(elev,90))
+ vector_plot(ax, ppp[1], vecs[1], color=rcolor, mutation_scale=mutation_scale, animate=animate, camerapos=(elev,90))
+
+ ax.view_init(elev=elev, azim=90)
+
+ ax.dist = 6
+
+ if text:
+ if dual_difference:
+ ax.text2D(0, 0.5, elevation_texts[i].replace('\n', ' '), transform=ax.transAxes, va='center', ha='center', rotation=90)
+ else:
+ ax.text2D(-0.15, 0.5, elevation_texts[i], transform=ax.transAxes, va='center')
+
+
+
+
+
+
+
+ for i, elev in enumerate(elevations):
+ ax = fig.add_subplot(N_fig_rows, N_fig_columns, N_fig_columns*i+2, projection='3d')
+ vector_plot(ax, *flow_vectors, color='darkviolet', mutation_scale=mutation_scale, camerapos=(elev,90))
+ ax.view_init(elev=elev, azim=90)
+
+ ax.dist = 6
+
+ for i, elev in enumerate(elevations):
+ ax = fig.add_subplot(N_fig_rows, N_fig_columns, N_fig_columns*i+3, projection='3d')
+ m = surface_plot(ax, points, 1-np.array(flow_errors), cb=False)
+ ax.view_init(elev=elev, azim=90)
+
+ ax.dist = 6
+
+ if dual_difference:
+ for i, elev in enumerate(elevations):
+ ax = fig.add_subplot(N_fig_rows, N_fig_columns, N_fig_columns*i+4, projection='3d')
+ m = surface_plot(ax, points, np.array(flow_errors), cb=False)
+ ax.view_init(elev=elev, azim=90)
+
+ ax.dist = 6
+
+
+ axes = fig.get_axes()
+ j=0
+ for ikk, ax in enumerate(axes):
+ if ikk in [0,3,6,9]:
+ if text:
+ ax.text2D(0.5, 1.05, column_texts[j], transform=ax.transAxes, ha='center', va='top')
+ j +=1
+
+ # Plot image of the fly
+ ax = fig.add_subplot(N_fig_rows, N_fig_columns, N_fig_columns)
+ ax.imshow(rotate(fim, rot, mode='nearest', reshape=False), cmap='gray')
+ ax.set_frame_on(False)
+
+
+ # Stop video at the optimal
+ if 0 0 and i_ax in [2, 5]:
+ style = '--'
+
+
+ phi = np.linspace(0,180)
+ phi = np.radians(phi)
+ theta = np.radians(90-theta)
+ x = np.cos(phi)
+ y = np.sin(phi) * np.sin(theta)
+
+ z = np.sin(phi) * np.cos(theta)
+
+
+ ax.plot(x,y,z, style, color=color, lw=1)
+
+
+ if dual_difference:
+ plt.subplots_adjust(left=0.02, bottom=0.05, right=0.95, top=0.90, wspace=0.0, hspace=0.1)
+ else:
+ plt.subplots_adjust(left=0.08, bottom=0, right=0.95, top=0.95, wspace=0.1, hspace=0.1)
+
+ make_animation_timestep(step_size=0.025, low_val=0.7)
+
+ if savefn:
+ fig.savefig(savefn)
+ plt.close()
+
diff --git a/gonio-analysis/gonioanalysis/drosom/plotting/illustrate_experiments.py b/gonio-analysis/gonioanalysis/drosom/plotting/illustrate_experiments.py
new file mode 100644
index 0000000..2aa5f0b
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/plotting/illustrate_experiments.py
@@ -0,0 +1,570 @@
+import os
+import math
+import multiprocessing as mp
+
+import numpy as np
+import matplotlib.pyplot as plt
+from matplotlib.patches import Arrow, Circle
+import tifffile
+from scipy.spatial import cKDTree as KDTree
+import cv2
+
+from gonioanalysis.directories import ANALYSES_SAVEDIR
+import gonioanalysis.coordinates as coordinates
+from gonioanalysis.drosom.plotting.common import vector_plot
+from gonioanalysis.drosom.loading import angles_from_fn
+
+
+def _load_image(fn, roi, e):
+
+ image = tifffile.imread(fn)
+
+ x,y,w,h = [int(round(z)) for z in roi]
+
+ upper = np.percentile(image[y-e:y+h+e,x-e:x+w+e], 99.5)
+ lower = np.percentile(image[y-e:y+h+e,x-e:x+w+e], 0.5)
+
+ image = np.clip(image, lower, upper) - lower
+ image = (image-np.min(image)) / np.max(image)
+ image *= 255
+
+ return np.array([image,image,image])
+
+
+def _box(image, roi, lw, color, crop_factor=1):
+ x,y,w,h = [int(round(z)) for z in roi]
+
+ for i, c in enumerate(color):
+ # Top
+ image[i, y:y+lw,x:x+w] = c
+ # Left
+ image[i, y:y+h,x:x+lw] = c
+ # Bottom
+ image[i, y+h-lw:y+h,x:x+w] = c
+ # Right
+ image[i, y:y+h,x+w-lw:x+w] = c
+
+ return image
+
+def _crop(image, roi, factor):
+
+ x,y,w,h = [int(round(z)) for z in roi]
+ cp = [x+int(w/2), y+int(h/2)]
+
+ new_image = []
+
+ if factor < 1:
+ h2 = int(round(factor*image.shape[1]/2))
+ a = cp[1]-h2
+ b = cp[1]+h2
+
+ if a < 0:
+ a = 0
+ b = h2*2
+ if b > image.shape[1]:
+ a = image.shape[1] - h2*2
+ b = image.shape[1]
+
+ for i in range(len(image)):
+ new_image.append( image[i, a:b, :] )
+ elif factor > 1:
+ w2 = int(round(image.shape[2]/2/factor))
+ for i in range(len(image)):
+ new_image.append( image[i, cp[0]-w2:cp[0]+w2, :] )
+ else:
+ return image
+
+ return np.array(new_image)
+
+
+
+
+
+def moving_rois(manalyser, roi_color='red,blue', lw=3, e=50,
+ rel_rotation_time=1, crop_factor=0.5,
+ _exclude_imagefolders=[], _order=None,
+ _draw_arrow=False):
+ '''
+ Visualization video how the ROI boxes track the analyzed features,
+ drawn on top of the original video frames.
+
+ Arguments
+ ---------
+ roi : string
+ A valid matplotlib color. If two comma separated colors
+ given use the first for the left eye and the second for the right.
+ lw : int
+ ROI box line width, in pixels
+ e : int
+ Extended region for brightness normalization, in pixels
+ rel_rotation_time : int or float
+ Blend the last and the first next frame for "smooth"
+ transition
+ crop_factor : int
+ If smaller than 1 then cropped in Y.
+
+ RETURNS
+ -------
+ None
+ '''
+ savedir = os.path.join(ANALYSES_SAVEDIR, 'illustrate_experiments', 'moving_rois', manalyser.get_specimen_name())
+ os.makedirs(savedir, exist_ok=True)
+ if _draw_arrow:
+ os.makedirs(os.path.join(savedir, 'inset'), exist_ok=True)
+
+ colors = roi_color.split(',')
+
+ image_fns, ROIs, angles = manalyser.get_time_ordered()
+
+ # ------------------
+ # For mosaic
+ if _exclude_imagefolders:
+
+ newdata = []
+ for fn, ROI, angle in zip(image_fns, ROIs, angles):
+ if not angles_from_fn(os.path.basename(os.path.dirname(fn))) in _exclude_imagefolders:
+ newdata.append([fn, ROI, angle])
+ else:
+ pass
+ image_fns, ROIs, angles = list(zip(*newdata))
+
+ if _order:
+ image_fns = list(image_fns)
+ ROIs = list(ROIs)
+ angles = list(angles)
+ newdata = []
+
+ for o in _order:
+
+ indices = [i for i in range(len(image_fns)) if angles_from_fn(os.path.basename(os.path.dirname((image_fns[i])))) == o]
+
+ for i in indices:
+ newdata.append([image_fns[i], ROIs[i], angles[i]])
+
+ image_fns, ROIs, angles = list(zip(*newdata))
+ # End for mosaic
+ # ------------------
+
+ N = len(image_fns)
+ i_frame = 0
+
+ crop_roi = ROIs[0]
+
+
+ if _draw_arrow:
+ # Create and setup figure
+ fig, ax = plt.subplots(figsize=(10,10))
+ ax._myarrow = None
+ ax.set_xlim(-1.5, 1.5)
+ ax.set_ylim(-1.5, 1.5)
+ ax.set_axis_off()
+
+ # vector normalisation values
+ normalisations = {fol: manalyser.get_displacements_from_folder(fol)[0][-1] for fol in manalyser.list_imagefolders(only_measured=True)}
+
+ ax.add_patch( Circle((0,0), 1, fill=False, lw=3, color="gray") )
+
+
+ def draw_arrow_inset(savefn, vx, vy, **kwargs):
+ if ax._myarrow:
+ ax._myarrow.remove()
+
+ ax._myarrow = Arrow(0,0,vx,vy, width=0.5, **kwargs)
+ ax.add_patch(ax._myarrow)
+ fig.savefig(savefn, transparent=True)
+
+
+ for i_fn, (fn, roi, angle) in enumerate(zip(image_fns, ROIs, angles)):
+
+ if i_fn+1 < len(image_fns) and angle != angles[i_fn-1]:
+ crop_roi = roi
+
+ print("{}/{}".format(i_fn+1, N))
+
+ image = _load_image(fn, roi, e)
+
+ if angle[0] > 0:
+ color = (255, 0, 0)
+ else:
+ color = (0,0,255)
+
+ image = _box(image, roi, lw, color=color)
+
+ image = _crop(image, crop_roi, crop_factor)
+
+ savefn = os.path.join(savedir, 'image_{:08d}.png'.format(i_frame))
+ tifffile.imsave(savefn, image.astype(np.uint8))
+ i_frame += 1
+
+ if _draw_arrow:
+ vx, vy = (roi[0] - crop_roi[0], roi[1] - crop_roi[1])
+ vx, vy = np.array([vx, -vy]) / normalisations[os.path.basename(os.path.dirname(fn))]
+ draw_arrow_inset(os.path.join(savedir, 'inset', 'image_{:08d}.png'.format(i_frame)), vx, vy,
+ color='white')
+
+
+ if rel_rotation_time and angle != angles[i_fn+1]:
+ next_image = _load_image(image_fns[i_fn+1], roi, e)
+ if angles[i_fn+1][0] > 0:
+ color = (255, 0, 0)
+ else:
+ color = (0,0,255)
+ next_image = _box(next_image, ROIs[i_fn+1], lw, color=color)
+
+ for blend in np.zeros(5).tolist() + np.linspace(0, 1, 25).tolist():
+ im = image*(1-blend) + _crop(next_image, ROIs[i_fn+1], crop_factor)*(blend)
+ savefn = os.path.join(savedir, 'image_{:08d}.png'.format(i_frame))
+ tifffile.imsave(savefn, im.astype(np.uint8))
+
+ if _draw_arrow:
+ draw_arrow_inset(os.path.join(savedir, 'inset', 'image_{:08d}.png'.format(i_frame)), vx, vy,
+ color='white')
+
+ i_frame += 1
+
+
+def _get_closest(folder_a, folders_b, index=False):
+ '''
+ pos
+
+ returns closets_folder, distance
+ '''
+
+ dists = []
+
+ A = np.array( folder_a )
+ for b in folders_b:
+ B = np.array( b )
+ dists.append( np.linalg.norm(A-B) )
+
+ if index:
+ return np.argmin(dists), np.min(dists)
+ else:
+ return folders_b[np.argmin(dists)], np.min(dists)
+
+
+
+def moving_rois_mosaic(manalysers, common_threshold=7.5, **kwargs):
+ '''
+ Uses moving_rois() to make a mosaic video of the experiments, in which
+ the specimens move in sync.
+
+ The first specimen (manalyser[0]) determines the rotation order (in the
+ order as it was recorded).
+
+
+ ARGUMENTS
+ ---------
+ common_threshold : int
+ In rotation stage steps, how close the recordings of different
+ analysers have to be classified as the same.
+ kwargs : dict
+ Passed to moving_rois
+
+ RETURNS
+ -------
+ None
+ '''
+
+ orders = {manalyser.name: [] for manalyser in manalysers}
+ excludes = {}
+
+ folders = [angles_from_fn(os.path.basename(os.path.dirname(fn))) for fn in manalysers[0].get_time_ordered(first_frame_only=True)[0]]
+
+ has_matches = {fol: 0 for fol in folders}
+ conversion = {manalyser.name: {} for manalyser in manalysers}
+
+ for folder in folders:
+ for manalyser in manalysers[1:]:
+ fols = [angles_from_fn(fol) for fol in manalyser.list_imagefolders(only_measured=True)]
+ closest, distance = _get_closest(folder, fols)
+
+ if distance < common_threshold:
+ orders[manalyser.name].append(closest)
+ has_matches[folder] += 1
+ conversion[manalyser.name][closest] = folder
+
+ orders[manalysers[0].name] = folders.copy()
+ orders[manalysers[0].name] = [fol for fol in orders[manalysers[0].name] if has_matches[fol] == len(manalysers)-1]
+ fols = [angles_from_fn(fol) for fol in manalysers[0].list_imagefolders(only_measured=True)]
+ excludes[manalysers[0].name] = [fol for fol in fols if not fol in orders[manalysers[0].name]]
+
+ for manalyser in manalysers[1:]:
+ orders[manalyser.name] = [fol for fol in orders[manalyser.name] if has_matches[conversion[manalyser.name][fol]] == len(manalysers)-1]
+
+ fols = [angles_from_fn(fol) for fol in manalyser.list_imagefolders(only_measured=True)]
+ excludes[manalyser.name] = [fol for fol in fols if not fol in orders[manalyser.name]]
+
+
+ # FIXME Starts too many processes with many specimens, possibly leading to
+ # out of RAM
+
+ processes = []
+
+ for manalyser in manalysers:
+ p = mp.Process(target=moving_rois, args=[manalyser],
+ kwargs={ **{"_exclude_imagefolders": excludes[manalyser.name],
+ "_order": orders[manalyser.name],
+ "_draw_arrow": True}, **kwargs})
+ p.start()
+
+ processes.append(p)
+
+ for p in processes:
+ p.join()
+
+
+
+def illustrate_experiments(manalyser, rel_rotation_time=1):
+ '''
+ Create a visualizing video how the vectormap is built.
+
+ Arguments
+ ---------
+ rel_rotation_time : int or float
+ Relative time spend on incrimentally rotating the vectormap
+ between the stimuli.
+ '''
+ print('illustrate_experiments')
+
+ savedir = os.path.join(ANALYSES_SAVEDIR, 'illustrate_experiments', manalyser.get_specimen_name())
+ os.makedirs(savedir, exist_ok=True)
+
+ fig = plt.figure(figsize=(6,6))
+ ax = fig.add_subplot(1,1,1, projection='3d')
+ plt.axis('off')
+ plt.subplots_adjust(left=-0.2, bottom=-0.2, right=1.2, top=1.2, wspace=0.0, hspace=0)
+
+
+ # Points, vectors, angles
+ lp, lv, la = manalyser.get_3d_vectors('left', return_angles=True)
+ rp, rv, ra = manalyser.get_3d_vectors('right', return_angles=True)
+
+ image_fns, ROIs, angles = manalyser.get_time_ordered()
+ arrow_artists = []
+
+ lpoints, lvectors = [[],[]]
+ rpoints, rvectors = [[],[]]
+
+ print(len(image_fns))
+
+ print(len(ROIs))
+ print(len(angles))
+
+ i_frame = 0
+ for i_angle, (image_fn, ROI, angle) in enumerate(zip(image_fns, ROIs, angles)):
+
+ length_scale = (i_angle%20)/20 + 0.5
+ final_image = (i_angle+1)%20 == 0
+
+ #if not final_image:
+ # continue
+ # Fix to account mirror_horizontal in 2D vectors
+ angle = [-1*angle[0], angle[1]]
+
+
+ # Calculate cameras place
+ #x,y,z = coordinates.camera2Fly(*angle)
+ #r, phi, theta = coordinates.to_spherical(x, y, z, return_degrees=True)
+
+ #elev = float(90-theta)
+ azim = -angle[0]+90
+
+ if angle in la:
+ indx = la.index(angle)
+
+ nlp = coordinates.rotate_about_x(lp[indx], -angle[1])
+ nlv = coordinates.rotate_about_x(lp[indx]+lv[indx], -angle[1]) - nlp
+
+ if final_image:
+ lpoints.append(lp[indx])
+ lvectors.append(lv[indx])
+ else:
+ arrow_artists.extend(vector_plot(ax, [nlp], [length_scale*nlv], color='red'))
+
+
+ if angle in ra:
+ indx = ra.index(angle)
+
+ nrp = coordinates.rotate_about_x(rp[indx], -angle[1])
+ nrv = coordinates.rotate_about_x(rp[indx] + rv[indx], -angle[1]) - nrp
+
+ if final_image:
+ rpoints.append(rp[indx])
+ rvectors.append(rv[indx])
+ else:
+ arrow_artists.extend(vector_plot(ax, [nrp], [length_scale*nrv], color='blue'))
+
+ if lpoints:
+ tmp_lpoints, tmp_lvectors = coordinates.rotate_vectors(np.array(lpoints), np.array(lvectors), 0, -math.radians(angle[1]), 0)
+
+ arrow_artists.extend(vector_plot(ax, tmp_lpoints, tmp_lvectors, color='red', mutation_scale=3,
+ camerapos=[0,azim]))
+
+
+ if rpoints:
+ tmp_rpoints, tmp_rvectors = coordinates.rotate_vectors(np.array(rpoints), np.array(rvectors), 0, -math.radians(angle[1]), 0)
+
+
+ arrow_artists.extend(vector_plot(ax, tmp_rpoints, tmp_rvectors, color='blue', mutation_scale=3,
+ camerapos=[0,azim]))
+
+
+ #ax.dist = 2
+ ax.view_init(elev=0, azim=azim)
+ print('Horizontal {}, vertical {}'.format(*angle))
+
+ savefn = os.path.join(savedir, 'image_{:08d}.png'.format(i_frame))
+ fig.savefig(savefn, dpi=300, transparent=True)
+ i_frame += 1
+
+ # Rotating the saved image
+ #camera_rotation = coordinates.correct_camera_rotation(*angle, return_degrees=True)
+ #saved_image = Image.open(savefn)
+ #saved_image.rotate(-camera_rotation).save(savefn)
+
+ # Final image of this location, rotate the plot to the
+ # new location
+ if final_image and rel_rotation_time:
+ next_angle = angles[i_angle+1]
+ duration = 25
+ hold_duration = 5
+
+ for i, (h, v) in enumerate(zip(np.linspace(-angle[0], next_angle[0], duration), np.linspace(angle[1], next_angle[1], duration))):
+
+ angle = [-h,v]
+ azim = -angle[0]+90
+
+ # FIXME The following part is copy paste from the part above
+ # -> put it behind one function etc.
+
+ # Clear arrows
+ for arrow_artist in arrow_artists:
+ arrow_artist.remove()
+ arrow_artists = []
+
+ # Redraw arros
+ if lpoints:
+ tmp_lpoints, tmp_lvectors = coordinates.rotate_vectors(np.array(lpoints), np.array(lvectors), 0, -math.radians(angle[1]), 0)
+
+ arrow_artists.extend(vector_plot(ax, tmp_lpoints, tmp_lvectors, color='red', mutation_scale=3,
+ camerapos=[0,azim]))
+
+
+ if rpoints:
+ tmp_rpoints, tmp_rvectors = coordinates.rotate_vectors(np.array(rpoints), np.array(rvectors), 0, -math.radians(angle[1]), 0)
+
+
+ arrow_artists.extend(vector_plot(ax, tmp_rpoints, tmp_rvectors, color='blue', mutation_scale=3,
+ camerapos=[0,azim]))
+
+
+ ax.view_init(elev=0, azim=azim)
+
+ savefn = os.path.join(savedir, 'image_{:08d}.png'.format(i_frame))
+ fig.savefig(savefn, dpi=300, transparent=True)
+ i_frame += 1
+
+ if i == 0:
+ for repeat in range(hold_duration):
+ savefn = os.path.join(savedir, 'image_{:08d}.png'.format(i_frame))
+ fig.savefig(savefn, dpi=300, transparent=True)
+ i_frame += 1
+
+
+
+ for arrow_artist in arrow_artists:
+ arrow_artist.remove()
+ arrow_artists = []
+
+ plt.close()
+
+
+
+def rotation_mosaic(manalyser, imsize=(512,512),
+ e=50, crop_factor=0.5):
+ '''
+ A mosaic (matrix) of the taken images.
+
+ Arguments
+ ---------
+ manalyser : obj
+ Analyser object
+ n_vecticals : int
+ How many vertical rotations rows to show
+ n_horizontals : int
+ How many horizontal rotation columns to show
+ e, crop_factor
+ '''
+
+ # Part 1) Find rotations matching the interpolation
+
+ rotations = manalyser.list_rotations()
+
+ kdtree = KDTree(rotations)
+
+ hrots, vrots = zip(*rotations)
+
+ hmin, hmax = (np.min(hrots), np.max(hrots))
+ vmin, vmax = (np.min(vrots), np.max(vrots))
+
+ hstep = int(10 * (1024/360))
+ vstep = int(10 * (1024/360))
+
+ plot_data = []
+
+ intp_v = np.arange(vmin, vmax, vstep)[::-1]
+ intp_h = np.arange(hmin, hmax, hstep)
+
+ for i_v, vrot in enumerate(intp_v):
+ for i_h, hrot in enumerate(intp_h):
+
+ distance, i_point = kdtree.query( (hrot, vrot), n_jobs=-1)
+
+ if distance > math.sqrt((hstep/1.5)**2 + (vstep/1.5)**2):
+ continue
+
+ plot_data.append((i_v, i_h, i_point))
+
+
+ # Part 2: Plot the images
+
+ image_fns, ROIs, angles = manalyser.get_time_ordered(angles_in_degrees=False,
+ first_frame_only=True)
+
+ w_mosaic = int(imsize[0]*len(intp_h))
+ h_mosaic = int(imsize[1]*len(intp_v))
+ mosaic = 255 * np.ones( (h_mosaic, w_mosaic) )
+
+ print('Mosaic shape {}'.format(mosaic.shape))
+
+ for i_plot_data, (i_v, i_h, i_point) in enumerate(plot_data):
+ print("{}/{}".format(i_plot_data+1, len(plot_data)))
+
+ x = i_v * imsize[0]
+ y = i_h * imsize[1]
+
+ try:
+ index = angles.index(list(rotations[i_point]))
+ except ValueError:
+ continue
+
+ try:
+ image = _load_image(image_fns[index], ROIs[index], 50)
+ except:
+ continue
+
+ image = image[0, :, :]
+ image = cv2.resize(image, dsize=(*imsize,))
+
+ mosaic[x:x+imsize[0], y:y+imsize[1]] = image
+
+ drot = manalyser.get_rotstep_size()
+
+ fig, ax = plt.subplots(figsize=(10,10))
+ ax.imshow(mosaic, cmap='gray', extent=[hmin*drot, hmax*drot, vmin*drot, vmax*drot])
+
+ fig.savefig(os.path.join(ANALYSES_SAVEDIR, 'illustrate_experiments', "mosaic_"+manalyser.name+'.jpg'),
+ dpi=600)
+
+ plt.close()
diff --git a/gonio-analysis/gonioanalysis/drosom/plotting/plotter.py b/gonio-analysis/gonioanalysis/drosom/plotting/plotter.py
new file mode 100644
index 0000000..a78d977
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/plotting/plotter.py
@@ -0,0 +1,345 @@
+import os
+import math
+
+import numpy as np
+import matplotlib.pyplot as plt
+import matplotlib.patches
+import matplotlib.animation
+from matplotlib.patches import FancyArrowPatch
+from mpl_toolkits.mplot3d import proj3d
+import mpl_toolkits.axes_grid1
+
+from gonioanalysis.directories import ANALYSES_SAVEDIR
+from gonioanalysis.drosom.optic_flow import flow_direction
+
+
+from .common import make_animation_angles, vector_plot
+
+
+class MPlotter:
+
+
+ def __init__(self):
+
+ self.savedir = os.path.join(ANALYSES_SAVEDIR, 'mplots')
+ os.makedirs(self.savedir, exist_ok=True)
+
+ # Variables for figures that get plotted to the same figure between specimens
+ self.magnitude_1d_figax = None
+
+
+
+ def plot_2d_trajectories(self, manalyser):
+ '''
+ Plot 2D movement trajectories of the ROIs, separetly for each imaged position.
+ '''
+
+ plt_limits = [[],[],[],[]]
+
+ figure_content = []
+
+ for eye in ['left', 'right']:
+ angles, movements = manalyser.get_raw_xy_traces(eye)
+
+ for movement in movements:
+ subfig_dict = {'eye': eye}
+
+ x, y = [[],[]]
+ for repeat in movement:
+ x.extend(repeat['x'])
+ y.extend(repeat['y'])
+
+ plt_limits[0].append(np.min(x))
+ plt_limits[1].append(np.max(x))
+ plt_limits[2].append(np.min(y))
+ plt_limits[3].append(np.max(y))
+
+ subfig_dict = {'x': x, 'y': y, **subfig_dict}
+ figure_content.append(subfig_dict)
+
+ ROWS, COLS = (8, 6)
+ i_page = 0
+
+
+ for i, data in enumerate(figure_content):
+
+ if i == i_page * ROWS * COLS:
+ fig = plt.figure()
+ i_page += 1
+
+
+ ax = plt.subplot(ROWS, COLS, i - ((i_page-1) * ROWS * COLS)+1)
+ ax.axis('off')
+ cmap = matplotlib.cm.get_cmap('inferno', len(data['x']))
+
+ for i_point in range(1, len(data['x'])):
+ ax.plot([data['x'][i_point], data['x'][i_point-1]], [data['y'][i_point], data['y'][i_point-1]], color=cmap(i_point/len(data['x'])))
+
+
+ #ax.set_xlim([np.percentile(plt_limits[0], 99), np.percentile(plt_limits[1], 99)])
+ #ax.set_ylim([np.percentile(plt_limits[2], 99), np.percentile(plt_limits[3], 99)])
+
+
+
+ #ax.suptitle('{} eye, {}'.format(data['eye'], data['time']))
+
+ plt.show()
+
+
+ def plotMagnitude2D(self, manalyser):
+ '''
+
+ TODO
+ - combine eyes to yield better picture
+ - axes from pixel values to actual
+
+ '''
+
+ distancef = lambda p1,p2: math.sqrt((p2[0]-p1[0])**2+(p2[1]-p1[1])**2)
+
+ fig, ax = plt.subplots(ncols=2)
+ for eye_i, (color, eye) in enumerate(zip(['red', 'blue'], ['left', 'right'])):
+ angles, X, Y = manalyser.get2DVectors(eye)
+
+
+ HOR = []
+ PIT = []
+ for angle, x, y in zip(angles, X, Y):
+ horizontal, pitch = angle
+ HOR.append(horizontal)
+ PIT.append(pitch)
+
+ # TRY NEAREST NEIGHBOUR INTERPOLATION
+ res = (50, 50)
+ xi = np.linspace(np.min(HOR), np.max(HOR), res[0])
+ yi = np.linspace(np.min(PIT), np.max(PIT), res[1])
+ zi = np.zeros(res)
+ for j in range(len(yi)):
+ for i in range(len(xi)):
+ point = findClosest((xi[i], yi[j]), angles, distance_function=distancef)
+
+ index = angles.index(point)
+
+ zi[j][i] = (math.sqrt(X[index]**2 + Y[index]**2))
+
+
+ print('{} to {}'.format(xi[0], xi[-1]))
+ print('{} to {}'.format(yi[0], yi[-1]))
+
+ im = ax[eye_i].imshow(zi, interpolation='none', extent=[xi[0], xi[-1], yi[0], yi[-1]])
+ #cax = fig.add_axes([0.27, 0.8, 0.5, 0.05])
+ divider = mpl_toolkits.axes_grid1.make_axes_locatable(ax[eye_i])
+ cax = divider.append_axes('right', size='5%', pad=0.05)
+
+ fig.colorbar(im, cax=cax)
+
+ ax[eye_i].title.set_text('{} eye'.format(eye.capitalize()))
+
+ #XYZ.append([xi,yi,zi])
+
+ #fig = plotter.contourplot(XYZ, 1, 2, colorbar=True)
+ #X,Y = np.meshgrid(X, Y)
+ #plt.pcolor(X,Y,Z)
+
+ #ax.set_xlim(-np.max(HOR)-10, -np.min(HOR)+10)
+ #ax.set_ylim(-np.max(PIT)-10, -np.min(PIT)+10)
+ #ax.set_xlabel('Horizontal angle (degrees)')
+ #ax.set_ylabel('Pitch angle (degrees)')
+
+
+ class Arrow3D(FancyArrowPatch):
+ def __init__(self, x0, y0, z0, x1, y1, z1, *args, **kwargs):
+ FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
+ #self._verts3d = xs, ys, zs
+ self._verts3d = (x0, x1), (y0, y1), (z0, z1)
+
+ def draw(self, renderer):
+ xs3d, ys3d, zs3d = self._verts3d
+ xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
+ self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
+ FancyArrowPatch.draw(self, renderer)
+
+
+ def plot_3d_vectormap_mayavi(self, manalyser):
+ '''
+ Use mayavi to make the 3D image that then can be saved in obj file format.
+ '''
+
+ for color, eye in zip([(1.,0,0), (0,0,1.)], [('left'), 'right']):
+ vectors_3d = manalyser.get_3d_vectors(eye, correct_level=True)
+
+
+ N = len(vectors_3d)
+ arrays = [np.zeros(N) for i in range(6)]
+
+ for i in range(N):
+ arrays[0][i] = vectors_3d[i][1][0]
+ arrays[1][i] = vectors_3d[i][2][0]
+ arrays[2][i] = vectors_3d[i][3][0]
+
+ arrays[3][i] = vectors_3d[i][1][1] - arrays[0][i]
+ arrays[4][i] = vectors_3d[i][2][1] - arrays[1][i]
+ arrays[5][i] = vectors_3d[i][3][1] - arrays[2][i]
+
+ mlab.quiver3d(*arrays, color=color)
+
+ mlab.show()
+
+
+ def when_moved(self, event):
+ '''
+ Callback to make two axes to have synced rotation when rotating
+ self.axes[0].
+ '''
+ if event.inaxes == self.axes[0]:
+ self.axes[1].view_init(elev = self.axes[0].elev, azim = self.axes[0].azim)
+ self.fig.canvas.draw_idle()
+
+
+ def plot_3d_vectormap(self, manalyser, with_optic_flow=False, animation=False, arrow_animation=True):
+ '''
+ relp0 Relative zero point
+
+ with_optic_flow Angle in degrees. If non-false, plot also estimated optic
+ flow with this parameter
+ animation Sequence of (elevation, azimuth) points to create an
+ animation of object rotation
+ '''
+
+ if animation:
+ animation = make_animation_angles()
+
+ fig = plt.figure(figsize=(15,15))
+ fig.canvas.set_window_title(manalyser.get_specimen_name())
+
+
+ if with_optic_flow:
+ axes = []
+ axes.append( fig.add_subplot(121, projection='3d') )
+ axes.append( fig.add_subplot(122, projection='3d') )
+
+ else:
+ axes = [fig.add_subplot(111, projection='3d')]
+
+
+
+ points = []
+ pitches = []
+
+
+ for color, eye in zip(['red', 'blue'], ['left', 'right']):
+ vectors_3d = manalyser.get_3d_vectors(eye, correct_level=True)
+ vector_plot(axes[0], *vectors_3d, color=color, mutation_scale=15)
+
+ if with_optic_flow:
+ flow_vectors = [flow_direction(P0, xrot=with_optic_flow) for P0 in vectors_3d[0]]
+ vector_plot(axes[1], vectors_3d[0], flow_vectors)
+
+
+
+ for ax in axes:
+ ax.set_xlim(-1, 1)
+ ax.set_ylim(-1,1)
+ ax.set_zlim(-1, 1)
+
+ ax.set_xlabel('x')
+ ax.set_ylabel('y')
+ ax.set_zlabel('z')
+
+ ax.view_init(elev=90, azim=90)
+
+ plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
+
+
+
+ if with_optic_flow and not animation:
+ connection = fig.canvas.mpl_connect('motion_notify_event', self.when_moved)
+ self.axes = axes
+ self.fig = fig
+
+ if animation:
+ savedir = os.path.join(self.savedir, 'vectormap_3d_anim_{}'.format(manalyser.get_specimen_name()))
+ os.makedirs(savedir, exist_ok=True)
+
+ #plt.show(block=False)
+
+ try:
+ video_writer = matplotlib.animation.writers['ffmpeg'](fps=20, metadata={'title':manalyser.get_specimen_name()})
+ video_writer.setup(fig, os.path.join(savedir,'{}.mp4'.format(manalyser.get_specimen_name())))
+ except RuntimeError:
+ print('Install ffmpeg by "pip install ffmpeg" to get the video')
+ video_writer = False
+
+ doublegrab_next = False
+
+ plt.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0)
+
+ for i, (elevation, azimuth) in enumerate(animation):
+
+ try:
+
+
+ if arrow_animation:
+ axes[0].clear()
+ for color, eye in zip(['red', 'blue'], ['left', 'right']):
+ vectors_3d = manalyser.get_3d_vectors(eye, correct_level=True)
+ vector_plot(axes[0], *vectors_3d, color=color, mutation_scale=15,
+ animate=arrow_animation, guidance=True, camerapos=(elevation, azimuth))
+
+ make_animation_timestep(**VECTORMAP_PULSATION_PARAMETERS)
+
+
+ style = 'normal'
+ title_string = manalyser.get_short_name()
+
+ if ';' in title_string:
+ title_string, style = title_string.split(';')
+
+ if title_string is '':
+ # Use full name if short name is not set
+ title_string = manalyser.get_specimen_name()
+
+ #axes[0].text2D(0.5, 0.85, title_string, transform=ax.transAxes,
+ # ha='center', va='center', fontsize=38, fontstyle=style)
+ #axes[0].text2D(0.75, 0.225, "n={}".format(manalyser.get_N_specimens()),
+ # transform=ax.transAxes, ha='center', va='center', fontsize=30)
+
+ print('{} {}'.format(elevation, azimuth))
+ for ax in axes:
+ ax.view_init(elev=elevation, azim=azimuth)
+
+ #ax.dist = 6
+
+ fig.canvas.draw_idle()
+ # for debugging here plt.show()
+ fn = 'image_{:0>8}.png'.format(i)
+ #fig.savefig(os.path.join(savedir, fn), bbox_inches=Bbox.from_bounds(2.75,3,10,10))
+ if video_writer:
+ video_writer.grab_frame()
+ if doublegrab_next:
+ video_writer.grab_frame()
+ doublegrab_next = False
+ #plt.pause(0.1)
+
+ except Exception as e:
+ print('Could not make a frame, error message on the next line')
+ print(e)
+ doublegrab_next = True
+ if video_writer:
+ video_writer.finish()
+
+
+
+ else:
+ #plt.show()
+ pass
+ # make the panes transparent
+ #ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
+ #ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
+ #ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
+ # make the grid lines transparent
+ #ax.xaxis._axinfo["grid"]['color'] = (1,1,1,0)
+ #ax.yaxis._axinfo["grid"]['color'] = (1,1,1,0)
+ #ax.zaxis._axinfo["grid"]['color'] = (1,1,1,0)
+ #plt.savefig('vectormap.svg', transparent=True)
+
diff --git a/gonio-analysis/gonioanalysis/drosom/reports/__init__.py b/gonio-analysis/gonioanalysis/drosom/reports/__init__.py
new file mode 100644
index 0000000..0c996eb
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/reports/__init__.py
@@ -0,0 +1,5 @@
+
+
+from .left_right import left_right_summary, left_right_displacements
+from .pdf_summary import pdf_summary
+
diff --git a/gonio-analysis/gonioanalysis/drosom/reports/left_right.py b/gonio-analysis/gonioanalysis/drosom/reports/left_right.py
new file mode 100644
index 0000000..a797bcc
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/reports/left_right.py
@@ -0,0 +1,532 @@
+'''
+Exporting results for the set of experiments where
+one location in each left/right eye was measured.
+'''
+import os
+import csv
+import numpy as np
+
+from gonioanalysis.drosom.kinematics import (
+ mean_max_response,
+ _sigmoidal_fit,
+ _simple_latencies,
+ )
+from gonioanalysis.directories import ANALYSES_SAVEDIR
+
+LR_SAVEDIR = os.path.join(ANALYSES_SAVEDIR, 'LR_exports')
+
+
+def write_CSV_cols(fn, columns):
+ '''
+ Note
+ -----
+ Writes as many rows as there are rows in the first columns.
+
+ Attributes
+ ----------
+ columns : list of lists
+ Each item is column, and in each item that is also a list,
+ each item is the value of the row.
+ '''
+
+
+ with open(fn, 'w') as fp:
+ writer = csv.writer(fp, delimiter=',')
+ for i in range(len(columns[0])):
+ row = []
+ for sublist in columns:
+ try:
+ row.append(sublist[i])
+ except:
+ row.append('')
+ writer.writerow(row)
+ #writer.writerow([sublist[i] for sublist in columns])
+
+
+def read_CSV_cols(fn):
+ rows = []
+ with open(fn, 'r') as fp:
+ reader = csv.reader(fp, delimiter=',')
+ for row in reader:
+ rows.append(row)
+
+ return list(map(list, zip(*rows)))
+
+
+def left_right_displacements(manalysers, group_name,
+ fn_prefix='LR-displacements',
+ savedir=LR_SAVEDIR,
+ stimuli={'uv': ['uv', ')'], 'green': ['green'], 'NA': []},
+ strong_weak_division=False, divide_threshold=3,
+ wanted_imagefolders=None,
+ microns=True, phase=False, mean_lr=False,
+ reference_frame=False):
+ '''
+ Saves CSV files of left and right eye movements and ERGs.
+
+ If many recordings for an eye/stimulus/specimen combination exist,
+ then takes the mean of these (so that each eye appears only once).
+
+
+ Arguments
+ ----------
+ manalysers : list of objects
+ MAnalyser objects for
+ group_name : string
+ Name that describes the manalysers. For example, "blind_norpa" or
+ "controls".
+ fn_prefix : string
+ Text to append in the beginnign of the CSV filename.
+ stimuli : dict of lists of strings
+ Each key is the name of the stimulus, and matching value is a list of
+ the suffixes that match the stimulus (the suffix in the end of imagefolder name)
+ strong_weak_division : bool
+ If True, group data based on strong and weak eye instead of
+ combined left and right.
+ divide_threshold : int
+ Related to the strong_weak_divison argument. For some specimen there may be
+ recordings only from one eye, and divide_threshold or more is required
+ to in total to do the division.
+ wanted_imagefolders: None or a dict
+ Keys specimen names, items a sequence of wanted imagefolders
+ Relaxes horizontal conditions.
+ microns : bool
+ Convert pixel movement values to microns
+ phase : bool
+ If True, return phase (vector direction) instead of the magnitude.
+ mean_lr : bool
+ If True, average the left and right eye data together.
+ reference_frame : False or int
+ If an interger (between 0 and N_frames-1), use the corresponding
+ frame as a reference zero point.
+
+ '''
+
+ # each "file" is a list of columns
+
+ fs = None
+ efs = None
+
+ if wanted_imagefolders:
+ conditions = [None, None]
+ csv_files = {'NA': []}
+ else:
+ conditions = [lambda h: h>10, lambda h: h<-10]
+ csv_files = {stim: [] for stim in stimuli.keys()}
+
+ for manalyser in manalysers:
+
+ # Left eye
+ for eye, condition in zip(['left', 'right'], conditions):
+
+ if eye=="left" or mean_lr == False:
+ eyedata = {stim: [] for stim in stimuli.keys()}
+
+ for image_folder in manalyser.list_imagefolders(horizontal_condition=condition):
+
+ if wanted_imagefolders and image_folder not in wanted_imagefolders.get(manalyser.name, []):
+ # Skip image_folder if not in wanted_imagefolders (if it's not None)
+ continue
+
+ if wanted_imagefolders is None:
+ # look for match
+ stims = []
+ for _stim in stimuli.keys():
+ if any([image_folder.endswith(match) for match in stimuli[_stim]]):
+ stims.append( _stim )
+ else:
+ stims = ['NA']
+
+ for stim in stims:
+
+ trace = manalyser.get_magnitude_traces(eye,
+ image_folder=image_folder,
+ mean_repeats=True, microns=microns,
+ _phase=phase)
+
+ trace = list(trace.values())
+ if len(trace) >= 2:
+ raise NotImplementedError('mistake in implementation')
+
+
+ if trace:
+ # Check that fs matches
+ nfs = manalyser.get_imaging_frequency(image_folder=image_folder)
+ if fs is None:
+ fs = nfs
+ elif fs != nfs:
+ raise ValueError('Analysers with multiple fs!')
+
+ trace = trace[0][0]
+
+ if reference_frame is not False:
+ trace = [val - trace[reference_frame] for val in trace]
+
+ eyedata[stim].append(trace)
+
+ if eye == "right" or mean_lr == False:
+ for stim in stimuli.keys():
+ if eyedata[stim]:
+ column_name = '{}_mean_{}'.format(manalyser.name, eye)
+
+ csv_files[stim].append( np.mean(eyedata[stim], axis=0).tolist() )
+ csv_files[stim][-1].insert(0, column_name)
+
+
+ if "ERGs" in manalyser.linked_data:
+ data = manalyser.linked_data['ERGs']
+
+ repeatdata = {}
+
+ erg_columns = {}
+
+ for recording in data:
+ name = 'ERGs_' + recording['Stimulus'] +'!;!'+ recording['eye']
+ try:
+ N_repeats = int(recording['N_repeats'])
+ except ValueError:
+ N_repeats = 1
+
+ if repeatdata.get(name, 0) < N_repeats:
+ erg_columns[name] = (np.array(recording['data'])-recording['data'][0]).tolist()
+ erg_columns[name].insert(0, '{}_{} (mV)'.format(manalyser.name, recording['eye']))
+
+ if efs is None:
+ efs = recording['fs']
+ elif efs != recording['fs']:
+ raise ValueError('ERGs with multiple sampling frequencies!')
+
+ for name in erg_columns:
+ uname = name.split('!;!')[0]
+ try:
+ csv_files[uname]
+ except:
+ csv_files[uname] = []
+
+ csv_files[uname].append(erg_columns[name])
+
+
+ if strong_weak_division:
+ new_csv_files = {}
+
+ # Process first DPP data then ERGs
+ strong_eyes = {}
+ keys = [k for k in csv_files if not 'ERGs' in k] + [k for k in csv_files if 'ERGs' in k]
+
+ for csv_file in keys:
+ pairs = []
+
+ column_titles = [column[0] for column in csv_files[csv_file]]
+
+ for column in csv_files[csv_file]:
+
+ if not 'right' in column[0]:
+ try:
+ indx = column_titles.index( column[0].replace('left', 'right'))
+ except ValueError:
+ continue
+
+ pairs.append((column, csv_files[csv_file][indx]))
+
+
+ if len(pairs) > divide_threshold:
+ new_csv_files[csv_file+'_strong'] = []
+ new_csv_files[csv_file+'_weak'] = []
+
+ for left, right in pairs:
+ # Fixme
+ rdata = [float(num) for num in right[1:]]
+ ldata = [float(num) for num in left[1:]]
+
+ if not 'ERGs' in csv_file:
+ specimen_name = '_'.join(left[0].split('_')[:-2])
+ else:
+ specimen_name = '_'.join(left[0].split('_')[:-1])
+
+ print(specimen_name)
+
+ if 'ERGs' in csv_file:
+ #ab = [400, 800]
+
+ if strong_eyes[specimen_name] == 'right':
+ new_csv_files[csv_file+'_strong'].append(right)
+ new_csv_files[csv_file+'_weak'].append(left)
+ else:
+ new_csv_files[csv_file+'_strong'].append(left)
+ new_csv_files[csv_file+'_weak'].append(right)
+ else:
+ ab = [None, None]
+ if abs(quantify_metric(rdata, ab=ab)) > abs(quantify_metric(ldata, ab=ab)):
+ new_csv_files[csv_file+'_strong'].append(right)
+ new_csv_files[csv_file+'_weak'].append(left)
+ strong_eyes[specimen_name] = 'right'
+ else:
+ new_csv_files[csv_file+'_strong'].append(left)
+ new_csv_files[csv_file+'_weak'].append(right)
+ strong_eyes[specimen_name] = 'left'
+
+ else:
+ new_csv_files[csv_file+'_all'] = csv_files[csv_file]
+
+ csv_files = new_csv_files
+
+
+
+ os.makedirs(savedir, exist_ok=True)
+
+ for csv_file in csv_files:
+ # Mean in the end
+ csv_files[csv_file].append(np.mean([csv_files[csv_file][i][1:] for i in range(len(csv_files[csv_file]))], axis=0).tolist())
+ try:
+ csv_files[csv_file][-1].insert(0, 'mean')
+ except AttributeError as e:
+ if csv_file.startswith('ERGs'):
+ print(csv_files[csv_file])
+ raise ValueError("No ERGs, check linking the ERG data")
+ else:
+ #raise e
+ continue
+
+ if csv_file.startswith('ERGs_'):
+ ufs = efs
+ else:
+ ufs = fs
+
+ # Add xaxis (time) in all files
+ data = csv_files[csv_file][0][1:]
+ xaxis = np.linspace(0, (len(data)-1)/ufs, len(data)).tolist()
+ xaxis.insert(0, 'time (s)')
+ csv_files[csv_file].insert(0, xaxis)
+
+ fn = '{}_{}_{}.csv'.format(fn_prefix, group_name, csv_file)
+ fn = os.path.join(savedir, fn)
+ write_CSV_cols(fn, csv_files[csv_file])
+
+
+def quantify_metric(data1d, metric_type='mean', ab=(None, None)):
+ '''
+ From a 1D array (time series) quantify single value metric.
+
+ metric_type : string
+ "mean" to take the mean of the range
+ ab : tuple of integers
+ The range as datapoint indices.
+ '''
+ part = data1d
+ if ab[1] is not None:
+ part = part[:ab[1]]
+ if ab[0] is not None:
+ part = part[ab[0]:]
+
+
+ if metric_type == 'mean':
+ value = np.mean(part)
+
+ return value
+
+
+def lrfiles_summarise(lrfiles, point_type='mean', ab=(None, None)):
+ '''
+ Datapoints for making box/bar plots and/or for statistical testing.
+
+ Arguments
+ ---------
+ lrfiles : list of filenames
+ LR-displacements files of the left_right_displacements.
+ point_type : string
+ Either "mean" to take mean of the range (used for DPP movement data) or
+ min-start to take the mean around the minimum and subtract start value (used for ERGs).
+ If not specified, use 'mean'.
+ ab : tuple
+ Specify the range as indices (rows of the lrfiles excluding the header)
+ If not specified, try to autodetect based on if ERGs is contained in the filesames
+ ((half,end) for DPP, (400, 800) for ERGs).
+ '''
+
+ csv_files = {}
+
+
+ for fn in lrfiles:
+
+
+ sfn = os.path.basename(fn)
+ specimen_name = '_'.join(sfn.split('_')[1:-1])
+ stim = sfn.split('_')[-1].split('.')[0]
+
+ csv_rows = csv_files.get(stim, {})
+
+
+ coldata = read_CSV_cols(fn)
+
+ if specimen_name not in csv_rows:
+ csv_rows[specimen_name] = []
+
+ if ab[0] is None or ab[1] is None:
+
+ # FIXME Quite specific
+ if 'ERGs' in sfn:
+ a, b = [400, 800]
+ else:
+ a, b = [int((len(coldata[0])-1)/2), len(coldata[0])-1]
+ else:
+ a, b = ab
+
+ # First column is time, the last is the mean, skip these
+ for col in coldata[1:-1]:
+
+ if point_type != 'kinematics':
+ if a is not None and b is not None:
+ numerical_col = [float(num) for num in col[a+1:b]]
+ else:
+ numerical_col = [float(num) for num in col[1:]]
+ else:
+ numerical_col = [float(num) for num in col[1:]]
+
+ if point_type == 'mean':
+ value = np.mean(numerical_col)
+ elif point_type.startswith('min-start'):
+ value = np.min(numerical_col) - float(col[1])
+
+ elif point_type == 'kinematics':
+ fs = 1/(float(coldata[0][2]) - float(coldata[0][1]))
+ value = _sigmoidal_fit([numerical_col], fs)
+
+ if value is None:
+ continue
+
+ value = [value[0][0], value[1][0], value[2][0]]
+ #value = _simple_latencies([numerical_col], fs)[0]
+
+ if len(lrfiles)==1 and point_type == "kinematics":
+ # expand CSV rows
+ for name, val in zip(['displacement', 'logistic growth rate', '1/2-risetime'], value):
+ try:
+ csv_rows[specimen_name+'_'+name]
+ except:
+ csv_rows[specimen_name+'_'+name] = []
+ csv_rows[specimen_name+'_'+name].append(val)
+
+ else:
+ csv_rows[specimen_name].append(value)
+
+
+ csv_files[stim] = csv_rows
+
+ path = os.path.join(os.path.dirname(lrfiles[0]), 'summary')
+ os.makedirs(path, exist_ok=True)
+
+ for fn in csv_files:
+ ofn = os.path.join( path, 'LR_summary_{}.csv'.format(fn) )
+ with open(ofn, 'w') as fp:
+ writer = csv.writer(fp, delimiter=',')
+ for row in csv_files[fn]:
+ writer.writerow([row]+csv_files[fn][row])
+
+
+
+def left_right_summary(manalysers):
+ '''
+ Condensed from left/right
+ '''
+ csvfile = []
+
+ header = ['Specimen name', 'Left mean response (pixels)', 'Right mean response (pixels)', 'Right eye ERG response (mV)', 'Eyes with microsaccades', '"Normal" ERGs']
+
+ csvfile.append(header)
+
+ for manalyser in manalysers:
+
+ print(manalyser.get_specimen_name())
+
+ line = []
+
+ line.append(manalyser.get_specimen_name())
+
+ # Left eye
+
+ responses = []
+
+ for image_folder in manalyser.list_imagefolders(horizontal_condition=lambda h: h>=0):
+ if image_folder.endswith("green"):
+ continue
+
+ print(image_folder)
+ mean = mean_max_response(manalyser, image_folder)
+ if not np.isnan(mean):
+ responses.append(mean)
+
+ line.append(np.mean(responses))
+
+ # Right eye
+ responses = []
+
+ for image_folder in manalyser.list_imagefolders(horizontal_condition=lambda h: h<0):
+ if image_folder.endswith('green'):
+ continue
+ mean = mean_max_response(manalyser, image_folder)
+ if not np.isnan(mean):
+ responses.append(mean)
+
+ line.append(np.mean(responses))
+
+
+ uvresp = None
+ greenresp = None
+
+ print(manalyser.linked_data.keys())
+
+ if "ERGs" in manalyser.linked_data:
+ data = manalyser.linked_data['ERGs']
+
+ print(data[0].keys())
+
+ for recording in data:
+ print(recording.keys())
+ if int(recording['N_repeats']) == 25:
+
+ if recording['Stimulus'] == 'uv':
+ uvresp = np.array(recording['data'])
+ if recording['Stimulus'] == 'green':
+ greenresp = np.array(recording['data'])
+
+
+
+ uvresp = uvresp - uvresp[0]
+ uvresp = np.mean(uvresp[500:1000])
+
+ greenresp = greenresp - greenresp[0]
+ greenresp = np.mean(greenresp[500:1000])
+
+ line.append(uvresp)
+
+ # Descide groups
+ dpp_threshold = 0.9999
+ if line[1] < dpp_threshold and line[2] < dpp_threshold:
+ line.append('None')
+ elif line[1] < dpp_threshold or line[2] < dpp_threshold:
+ line.append('One')
+ else:
+ line.append('Two')
+
+ erg_threshold = 0.5
+ if abs(line[3]) < erg_threshold:
+ line.append('No')
+ else:
+ line.append('Yes')
+
+
+ csvfile.append(line)
+
+
+ for line in csvfile:
+ print(line)
+
+ with open("left_right_summary.csv", 'w') as fp:
+ writer = csv.writer(fp)
+ for line in csvfile:
+ writer.writerow(line)
+
+
+
+
+
diff --git a/gonio-analysis/gonioanalysis/drosom/reports/pdf_summary.py b/gonio-analysis/gonioanalysis/drosom/reports/pdf_summary.py
new file mode 100644
index 0000000..054c69e
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/reports/pdf_summary.py
@@ -0,0 +1,111 @@
+
+import os
+import datetime
+
+import numpy as np
+import matplotlib.pyplot as plt
+from matplotlib.backends.backend_pdf import PdfPages
+
+from gonioanalysis.image_tools import open_adjusted
+from gonioanalysis.directories import ANALYSES_SAVEDIR
+
+
+def _make_figure(manalyser, n_rows, i_pg):
+ i_pg += 1
+
+ height_ratios = [1 for i in range(n_rows)]
+ if i_pg == 0:
+ height_ratios[0] *= 1.4
+
+ fig, axes = plt.subplots(n_rows ,3, figsize=(8.27, 11.69),
+ gridspec_kw={'width_ratios': [0.618, 1, 1], 'height_ratios': height_ratios})
+
+ fig.suptitle( "{}, page {}".format(manalyser.get_specimen_name(), i_pg))
+
+ return fig, axes, i_pg
+
+
+def _imshow(image, ax):
+ ax.imshow(image, cmap='gray')
+ ax.set_axis_off()
+
+
+def _image_folder_info(manalyser, image_folder, ax):
+
+ pass
+
+def _specimen_info(manalyser, ax):
+
+ string = ""
+
+ string += manalyser.get_specimen_sex() + '\n'
+ string += manalyser.get_specimen_age() + '\n'
+
+
+ ax.text()
+
+
+def pdf_summary(manalysers):
+ '''
+ Make a structured PDF plotting all the DPP data of
+ the fly and any linked data as well.
+ '''
+
+
+ pdf_savedir = os.path.join(ANALYSES_SAVEDIR, 'reports')
+ os.makedirs(pdf_savedir, exist_ok=True)
+
+ # Subplot rows per page
+ n_rows = 4
+
+ with PdfPages(os.path.join(pdf_savedir, 'pdf_summary_{}.pdf'.format(datetime.datetime.now()))) as pdf:
+
+ for manalyser in manalysers:
+
+ # The page index for this fly
+ i_pg = -1
+ fig, axes, i_pg = _make_figure(manalyser, n_rows, i_pg)
+
+ # SNAP / FACE IMAGE
+ snap_fn = manalyser.get_snap_fn()
+
+ if snap_fn:
+ face = open_adjusted( manalyser.get_snap_fn() )
+ _imshow(face, axes[0][2])
+
+ # Information about the fly
+
+ # DPP data
+ for i, image_folder in enumerate(manalyser.list_imagefolders()):
+
+ if i+1 >= n_rows:
+ plt.tight_layout()
+ pdf.savefig()
+ plt.close()
+
+ fig, axes, i_pg = _make_figure(manalyser, n_rows, i_pg)
+
+ i_row = i+1 - i_pg*n_rows
+
+ axes[i_row][1].set_title(image_folder)
+
+ # Photo of the location
+ location_im = open_adjusted( manalyser.list_images(image_folder, absolute_path=True)[0] )
+ axes[i_row][0].imshow(location_im, cmap='gray')
+ _imshow(location_im, axes[i_row][0])
+
+ displacements = manalyser.get_displacements_from_folder(image_folder)
+ for dis in displacements:
+ axes[i_row][1].plot(dis, lw=1)
+
+ axes[i_row][1].plot(np.mean(displacements, axis=0), lw=3, color='black')
+
+
+ plt.tight_layout()
+ pdf.savefig()
+ plt.close()
+
+
+
+
+
diff --git a/gonio-analysis/gonioanalysis/drosom/reports/repeats.py b/gonio-analysis/gonioanalysis/drosom/reports/repeats.py
new file mode 100644
index 0000000..3dc5929
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/reports/repeats.py
@@ -0,0 +1,84 @@
+
+import os
+
+import numpy as np
+
+from gonioanalysis.directories import ANALYSES_SAVEDIR
+from gonioanalysis.drosom.kinematics import (
+ magstd_over_repeats,
+ sigmoidal_fit,
+ )
+from .left_right import write_CSV_cols
+
+
+SAVEDIR = os.path.join(ANALYSES_SAVEDIR, 'repeats_exports')
+
+
+def mean_repeats(manalysers, group_name, wanted_imagefolders=None,
+ savedir=SAVEDIR):
+ '''
+ Here we mean the repeat 1 of all flies together, then
+ the repeat 2 of all flies together, and so on.
+
+ This is for example for the intensity series where the stimulus intensity
+ increases by every repeat and we want to know the mean response
+ of the flies to the flash (repeat) 1, the flash (repeat) 2, and so on.
+ '''
+
+ all_traces = []
+
+ for manalyser in manalysers:
+
+ if wanted_imagefolders:
+ _image_folders = wanted_imagefolders.get(manalyser.name, [])
+ else:
+ _image_folders = manalyser.list_imagefolders()
+
+ for image_folder in _image_folders:
+
+ for eye in manalyser.eyes:
+ traces = manalyser.get_magnitude_traces(eye,
+ image_folder=image_folder)
+
+ traces = list(traces.values())
+ if traces:
+ all_traces.append(traces[0])
+
+ # Average repeat 1 together, repeat 2 together etc.
+ mean_traces = []
+
+ for i_repeat in range(len(all_traces[0])):
+ mean = np.mean([data[i_repeat] for data in all_traces], axis=0)
+ mean_traces.append(mean.tolist())
+
+ os.makedirs(savedir, exist_ok=True)
+ write_CSV_cols(os.path.join(savedir, group_name+'.csv'), mean_traces)
+
+
+
+def repeat_stds(manalysers, group_name, wanted_imagefolders=None,
+ savedir=SAVEDIR):
+ '''
+ Variation within an specimen(s) (not between specimens)
+ '''
+ stds = [['name', 'disp-std', 'speed-std', '1/2-time std']]
+ for manalyser in manalysers:
+ if wanted_imagefolders:
+ _image_folders = wanted_imagefolders.get(manalyser.name, [])
+ else:
+ _image_folders = manalyser.list_imagefolders()
+
+ for image_folder in _image_folders:
+
+ std = [np.std(z) for z in sigmoidal_fit(manalyser, image_folder)]
+
+ std[0] = magstd_over_repeats(manalyser, image_folder, maxmethod='mean_latterhalf')
+
+ std.insert(0, manalyser.name+'_'+image_folder)
+
+ stds.append(std)
+
+
+ os.makedirs(savedir, exist_ok=True)
+ write_CSV_cols(os.path.join(savedir, group_name+'.csv'), stds)
+
diff --git a/gonio-analysis/gonioanalysis/drosom/reports/stats.py b/gonio-analysis/gonioanalysis/drosom/reports/stats.py
new file mode 100644
index 0000000..bee596a
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/reports/stats.py
@@ -0,0 +1,66 @@
+
+import numpy as np
+from gonioanalysis.drosom.kinematics import magstd_over_repeats, mean_max_response
+
+
+def response_magnitude(manalysers, group_name='noname',
+ imagefolder_endswith=''):
+ '''
+
+ - group name
+ - condition/stimulus name (imagefolder_endswith)
+ - mean response amplitude
+ - variation between animals
+ - mean variation in between animals
+ - the number N of animals
+ '''
+
+ cols = []
+
+ respmags = []
+ respstds = []
+
+ for manalyser in manalysers:
+ image_folders = manalyser.list_imagefolders(endswith=imagefolder_endswith, only_measured=True)
+
+ for image_folder in image_folders:
+ respmags.append( mean_max_response(manalyser, image_folder, maxmethod='mean_latterhalf') )
+ respstds.append( magstd_over_repeats(manalyser, image_folder, maxmethod='mean_latterhalf') )
+
+
+ cols.append(group_name)
+ cols.append(imagefolder_endswith)
+ cols.append(np.mean(respmags))
+ cols.append(np.std(respmags))
+ cols.append(np.mean(respstds))
+ cols.append(len(manalysers))
+
+ return cols
+
+
+def response_magnitudes(grouped_manalysers, stimuli = ['uv', 'green']):
+ '''
+ Statistics for manalyser groups.
+ See resposne_magnitude
+
+ Arguments
+ ---------
+ grouped_manalysers : dict of lists of objects
+ Keys are group names, items are lists that contain the manalyser
+ objects.
+ '''
+ rows = []
+
+ for name, manalysers in grouped_manalysers.items():
+ for stimulus in stimuli:
+ rows.append( response_magnitude(manalysers, group_name=name,
+ imagefolder_endswith=stimulus) )
+
+
+ for row in rows:
+ print(row)
+
+ return rows
+
+
+
diff --git a/gonio-analysis/gonioanalysis/drosom/sinesweep.py b/gonio-analysis/gonioanalysis/drosom/sinesweep.py
new file mode 100644
index 0000000..98008ef
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/sinesweep.py
@@ -0,0 +1,357 @@
+'''
+Analysing sinusoidal sweep data.
+
+Currently supports Gonio Imsoft flash_type's
+'''
+
+import os
+import csv
+import math
+
+import numpy as np
+import scipy.stats
+import scipy.signal
+import scipy.interpolate
+import matplotlib.pyplot as plt
+
+
+from gonioanalysis.directories import ANALYSES_SAVEDIR
+
+def _get_stimulus(flash_type, t, fs):
+ '''
+
+ flash_type : strings
+ Flash_type used in Gonio Imsoft
+ t : float
+ Duration of the stimulus
+ fs : float
+ Sampling rate of the stimulus
+ '''
+
+ if ',' in flash_type:
+ flash_type, f0, f1 = flash_type.split(',')
+ f0 = float(f0)
+ f1 = float(f1)
+ else:
+ f0 = 0.5
+ f1 = 100
+
+
+ timepoints = np.linspace(1/fs, t, int(t*fs))
+
+ stimulus_frequency = f0 * (f1/f0) ** (timepoints/t)
+ stimulus_amplitude = scipy.signal.chirp(timepoints, f0=f0, t1=t, f1=f1,
+ phi=-90, method='logarithmic')
+ if flash_type == 'squarelogsweep':
+ stimulus_amplitude[stimulus_amplitude>=0] = 1
+ stimulus_amplitude[stimulus_amplitude<0] = -1
+ elif flash_type == '3steplogsweep':
+ cstep = np.sin(np.pi/4)
+ stimulus_amplitude[np.abs(stimulus_amplitude) <= cstep] = 0
+ stimulus_amplitude[stimulus_amplitude > cstep] = 1
+ stimulus_amplitude[stimulus_amplitude < -cstep] = -1
+ else:
+ pass
+
+ stimulus_amplitude = (stimulus_amplitude+1)/2
+
+ return timepoints, stimulus_frequency, stimulus_amplitude
+
+
+from numpy.fft import fftshift
+
+def _find_zeroindices(stimulus):
+ '''
+ Poor mans iterative zero point finding.
+ '''
+ zero_indices = []
+
+ previous_dir = 0
+
+ for i in range(len(stimulus)):
+ dirr = np.sign(stimulus[i] - stimulus[i-1])
+
+ if dirr != previous_dir:
+ zero_indices.append(i)
+
+ previous_dir = dirr
+ else:
+ pass
+ return zero_indices
+
+
+def _sham_frequency_response(stimulus_timepoints, stimulus_frequencies, stimulus, response,
+ interpolate=True):
+ '''
+ Calculates frequency response to sinusoidal sweep signal.
+
+ interpolate : bool
+ Interpolate to stimulus_frequencies
+ '''
+ #fn = '/home/joni/.gonioanalysis/final_results/sineweep_analysis/wtb_sinusoidal_07_right_sinelogsweep.csv'
+ #fn = '/home/joni/.gonioanalysis/final_results/sineweep_analysis/wtb_sinusoidal_07_right_squarewave_cam200Hz.csv'
+ #data = np.loadtxt(fn,
+ # skiprows=2, delimiter=',').T
+
+ #print(data)
+
+ #stimulus_timepoints = data[1][:-5]
+ #stimulus_frequencies = data[2][:-5]
+ #stimulus = data[3][:-5]
+ #response = data[4][:-5]
+
+ fs = 1 / (stimulus_timepoints[1]-stimulus_timepoints[0])
+
+ #b, a = scipy.signal.butter(1, 0.5, 'high', fs=fs)
+ #response = scipy.signal.filtfilt(b, a, response)
+
+ #calculate_frequency_response(stimulus_timepoints, stimulus)
+
+
+ cut_indices = _find_zeroindices(stimulus)[1::2]
+
+ #for indx in cut_indices:
+ # plt.plot(2*[stimulus_timepoints[indx]], [0, 1], '--', color='black')
+
+ #plt.plot(stimulus_timepoints, response, color='red')
+ #plt.plot(stimulus_timepoints, stimulus, color='blue')
+ #plt.show()
+
+ freqs = []
+ resps = []
+
+ for i1, i2 in zip(cut_indices[0:-1], cut_indices[1:]):
+ #plt.plot(stimulus_timepoints[i1:i2], stimulus[i1:i2], color='blue')
+ #plt.plot(stimulus_timepoints[i1:i2], response[i1:i2], color='red')
+ #plt.plot(stimulus_timepoints, response2, color='orange')
+ #plt.show()
+ chunk = response[i1:i2]
+ resp = max(chunk) - min(chunk)
+
+ freq = np.mean(stimulus_frequencies[i1:i2])
+
+ resps.append(resp)
+ freqs.append(freq)
+
+ #plt.plot(freqs, resps)
+ #plt.xscale('log')
+ #plt.show()
+
+ if interpolate:
+ f = scipy.interpolate.interp1d(freqs, resps, fill_value='extrapolate', bounds_error=False)
+ resps = f(stimulus_frequencies)
+ freqs = stimulus_frequencies
+
+ return freqs, resps
+
+
+
+def save_sinesweep_analysis_CSV(analysers, debug=False):
+ '''
+ Save X and Y components of the movement in a csv file
+ for the selected manalysers.
+
+ Columns
+ i_camframe time stimulus_fs mean_response response_rep1, response_rep2...
+ '''
+
+ stimuli = {}
+
+ savedir = os.path.join(ANALYSES_SAVEDIR, 'sineweep_analysis')
+ os.makedirs(savedir, exist_ok=True)
+
+ final_cols = {}
+
+ if debug:
+ fig, ax = plt.subplots()
+
+ for analyser in analysers:
+ for eye in analyser.eyes:
+
+ for image_folder in analyser.list_imagefolders():
+
+ imagefolder_key = image_folder[3:]
+
+ try:
+ N_repeats = len(analyser.movements[eye][imagefolder_key])
+ except KeyError:
+ print('No data for {}'.format(analyser))
+ continue
+
+ # Get imaging parameters
+ im_fs = analyser.get_imaging_frequency(image_folder)
+
+ im_params = analyser.get_imaging_parameters(image_folder)
+ flash_type = im_params.get('flash_type', '')
+
+ if flash_type.split(',')[0] not in ['squarelogsweep', 'sinelogsweep', '3steplogsweep']:
+ print('Unkown flash_type {}, skipping'.format(flash_type))
+ continue
+
+
+ # Get movement data
+ pixel_size = analyser.get_pixel_size(image_folder)
+ print('N_repeats {}'.format(N_repeats))
+ Xs = [np.array(analyser.movements[eye][imagefolder_key][i_repeat]['x'])*pixel_size for i_repeat in range(N_repeats)]
+ Ys = [np.array(analyser.movements[eye][imagefolder_key][i_repeat]['y'])*pixel_size for i_repeat in range(N_repeats)]
+
+ #mean_displacement = np.mean([np.sqrt(np.array(X)**2+np.array(Y)**2) for X, Y in zip(Xs, Ys)], axis=0)
+
+ meanX = np.mean(Xs, axis=0)
+ meanY = np.mean(Ys, axis=0)
+ meanX -= meanX[0]
+ meanY -= meanY[0]
+
+ mean_displacement = np.sqrt(meanX**2+meanY**2)
+
+ #slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(meanX, meanY)
+
+ # Rotate fit along xaxis
+ #rot = -math.atan(slope)
+ #c = math.cos(rot)
+ #s = math.sin(rot)
+ #pc1 = [c*x - s*y for x,y in zip(meanX, meanY)]
+ #pc2 = [s*x + c*y for x,y in zip(meanX, meanY)]
+
+ # Stimulus frequencies
+ #stim_fs = 1000
+
+
+ timepoints, stimulus_frequency, stimulus_amplitude = _get_stimulus(flash_type, len(Xs[0])/im_fs, im_fs)
+
+ if flash_type not in stimuli:
+ # FIXME 1kHz output in Imsoft but information not saved
+ stim_fs = 1000
+ dense = _get_stimulus(flash_type, len(Xs[0])/im_fs, stim_fs)
+
+ stimuli[flash_type] = dense
+
+ # "Frequency response"
+ fr_freqs, fr = _sham_frequency_response(timepoints, stimulus_frequency,
+ stimulus_amplitude, mean_displacement, interpolate=True)
+
+ # Save csv
+
+ fn = '{}_{}_{}.csv'.format(analyser.folder, eye, im_params['suffix'])
+
+ if debug:
+ ax.clear()
+ ax.scatter(meanX, meanY)
+ ax.plot(meanX, intercept + slope*meanX)
+ fig.savefig(os.path.join(savedir, fn.replace('.csv', '.png')))
+
+ if final_cols.get(flash_type, None) is None:
+ final_cols[flash_type] = {'time (s)': [], 'f_stimulus (Hz)': [],
+ 'displacement': [], 'frequency_response': [], 'specimen_name': []}
+
+
+ final_cols[flash_type]['time (s)'].append(timepoints)
+ final_cols[flash_type]['f_stimulus (Hz)'].append(stimulus_frequency)
+ final_cols[flash_type]['displacement'].append(mean_displacement)
+ final_cols[flash_type]['specimen_name'].append(analyser.folder)
+ final_cols[flash_type]['frequency_response'].append(fr)
+
+
+ with open(os.path.join(savedir, fn), 'w') as fp:
+ writer = csv.writer(fp)
+
+ writer.writerow(['Displacement = sqrt(X_mean**2+Y_mean**2)'])
+
+ row = ['i_camframe',
+ 'time (s)',
+ 'f_stimulus (Hz)',
+ 'instantaneous stimulus amplitude (0-1)',
+ #'pc1 (µm)',
+ #'displacement',
+ 'displacement2 (µm)',
+ #'pc2 (µm)'
+ #'X_mean (µm)',
+ #'Y_mean (µm)',
+ #'Displacement (µm)'
+ 'Frequency response (µm)']
+
+ for i_repeat, (x, y) in enumerate(zip(Xs, Ys)):
+ row.append('X rep_{}'.format(i_repeat))
+ row.append('Y rep_{}'.format(i_repeat))
+
+ writer.writerow(row)
+
+ for i in range(len(Xs[0])-10):
+
+ row = []
+ row.append(i)
+ row.append(timepoints[i])
+ row.append(stimulus_frequency[i])
+ row.append(stimulus_amplitude[i])
+ #row.append(pc1[i])
+ #row.append(pc2[i])
+ #row.append(meanX[i])
+ #row.append(meanY[i])
+ #row.append(math.sqrt(meanX[i]**2+meanY[i]**2))
+ row.append(mean_displacement[i])
+ row.append(fr[i])
+
+ for x, y in zip(Xs, Ys):
+ row.append(x[i])
+ row.append(y[i])
+
+ writer.writerow(row)
+
+
+ for flash_type in final_cols:
+ with open(os.path.join(savedir, 'mean_'+flash_type+'.csv'), 'w') as fp:
+ writer = csv.writer(fp)
+
+ row = []
+ row.append('i_camframe')
+ row.append('time (s)')
+ row.append('f_stimulus (Hz)')
+
+ N = len(final_cols[flash_type]['displacement'])
+
+ for k in range(N):
+ row.append('{} response (µm)'.format(final_cols[flash_type]['specimen_name'][k]))
+
+ row.append('mean response (µm)')
+
+ for k in range(N):
+ row.append('frequency response (µm)')
+
+
+ row.append('mean frequency response (µm)')
+
+ writer.writerow(row)
+
+ for i in range(len(final_cols[flash_type]['time (s)'][0])-10):
+
+ row = []
+ row.append(i)
+ row.append(final_cols[flash_type]['time (s)'][0][i])
+ row.append(final_cols[flash_type]['f_stimulus (Hz)'][0][i])
+
+
+ displacements = []
+ for j in range(N):
+ row.append(final_cols[flash_type]['displacement'][j][i])
+ displacements.append(row[-1])
+
+ row.append(np.mean(displacements))
+
+
+ fr = []
+ for j in range(N):
+ row.append(final_cols[flash_type]['frequency_response'][j][i])
+ fr.append(row[-1])
+ row.append(np.mean(fr))
+
+ writer.writerow(row)
+
+ for stimulus in stimuli:
+
+ with open(os.path.join(savedir, 'stimulus_{}.csv'.format(stimulus)), 'w') as fp:
+ writer = csv.writer(fp)
+ writer.writerow(['Time (ms)', 'Frequency (Hz)', 'Amplitude (V)'])
+ for i in range(len(stimuli[stimulus][0])):
+ writer.writerow([data[i] for data in stimuli[stimulus]])
+
+
diff --git a/gonio-analysis/gonioanalysis/drosom/special/__init__.py b/gonio-analysis/gonioanalysis/drosom/special/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/gonio-analysis/gonioanalysis/drosom/special/norpa_rescues.py b/gonio-analysis/gonioanalysis/drosom/special/norpa_rescues.py
new file mode 100644
index 0000000..237b423
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/special/norpa_rescues.py
@@ -0,0 +1,771 @@
+'''
+Analysing and creating figures from the norpA rescues.
+'''
+
+import os
+import csv
+import glob
+
+import matplotlib.pyplot as plt
+import matplotlib.image as mati
+import matplotlib.patches as patches
+import matplotlib.colors as mplcolors
+from matplotlib.backends.backend_pdf import PdfPages
+import numpy as np
+#import pandas as pd
+#import seaborn as sns
+#from statannot import add_stat_annotation
+
+from gonioanalysis.directories import ANALYSES_SAVEDIR
+from gonioanalysis.image_tools import open_adjusted
+
+
+#from xray_erg.plotting import Animator
+#from xray_erg.interpolating import interpolate
+from biosystfiles import extract as bsextract
+#from mpl_steroids.scalebar import add_scalebar
+
+
+RESCUES = ['Rh1', 'Rh3', 'Rh4', 'Rh5', 'Rh6']
+STIMULI = ['uv', 'green', 'nostim']
+
+
+# FANCY NAMES
+STIMULUS_FANCY_NAMES = {STIMULI[0]: 'UV flash', STIMULI[1]: 'Green flash', STIMULI[2]: 'No stimulus',
+ STIMULI[0]+'_erg': 'UV flash', STIMULI[1]+'_erg': 'Green flash',
+ 'sens': ''}
+
+
+# COLORS
+PALETTE = ['violet', 'lime', 'gray']
+
+RESCUE_COLORS = {'Rh1': '#4ce801', 'Rh3': '#e201f0', 'Rh4': '#7e01f1', 'Rh5': '#01d2d3', 'Rh6': '#c4d201'}
+
+STIMULUS_COLORS = {'uv': 'purple', 'green': 'green', 'nostim': 'black'}
+STIMULUS_COLORS['uv_erg'] = STIMULUS_COLORS['uv']
+STIMULUS_COLORS['green_erg'] = STIMULUS_COLORS['green']
+
+
+EXPERIMENT_COLORS = {'ergs': (0.99,0.99,0.97), 'dpp': (0.97,0.99,0.99)}
+
+
+THIS_SAVEDIR = os.path.join(ANALYSES_SAVEDIR, 'norpa_rescues')
+
+
+# BLockdict for blacklisting
+BLOCKDICT = {'norpA_Rh1_02_manyrepeats': 'No ERGs',
+ 'norpA_Rh1_10_manyrepeats': 'Miniscule ERGs',
+ 'norpA_Rh4_06_manyrepeats': 'No ERGs',
+ 'norpA_Rh6_06_manyrepeats': 'ERGs not recorded',
+ 'norpA_Rh1_06_manyrepeats_right': 'Not responding to Green',
+ 'norpA_Rh3_01_manyrepeats_left': 'Not clearly responding to UV',
+ 'norpA_Rh3_03_manyrepeats_right': 'Not clearly responding to UV',
+ 'norpA_Rh3_04_manyrepeats_right': 'Not clearly responding to UV',
+ 'norpA_Rh3_07_manyrepeats_right': 'Not clearly responding to UV',
+ 'norpA_Rh5_03_manyrepeats_right': 'Not clearly responding to UV',
+ 'norpA_Rh5_05_manyrepeats_right': 'Not clearly responding to UV',
+ 'norpA_Rh5_09_manyrepeats_left': 'Not clearly responding to UV'
+ }
+
+# Temporal pixel size, should go to analysing.py
+PXLSIZE=0.81741 # microns per pixel
+
+# Led spectrums
+LED_SPECTRUM_DIR = '/home/joni/data/DPP/DPP_cal_spectrometer_data/DPP_cal_10/'
+LED_SPECTRUM_FNS = ['green_center_8V_calculated.csv', 'uv_center_8V.csv', 'ergsetup_green_center_5V.csv', 'ergsetup_uv_center_5V.csv']
+LED_SPECTRUM_FNS = [os.path.join(LED_SPECTRUM_DIR, fn) for fn in LED_SPECTRUM_FNS]
+
+
+def norpa_rescue_manyrepeats(manalysers):
+ '''
+ Analyse norpA Rh{1,3,4,5,6} rescue mutants recorded with
+ experimental protocol manyrepeats.
+
+ In the so called manyrepeats protocol, each eye was imaged
+ with 25 repeats while flashing green, UV or no stimulus
+ at location vertical= -37 deg and horizontal= +- 28 deg.
+
+ The experiment was rotated almost without exception as follows:
+ 1) right eye UV
+ 2) right eye NOSTIM
+ 3) right eye GREEN
+ 4) left eye GREEN
+ 5) left eye NOSTIM
+ 5) left eye UV
+
+ Specimens were named as norpA_Rh{i_rhodopsin}_{i_specimen}_manyrepeats,
+ where i_rhodopsin is 1, 3, 4, 5, or 6 and i_specimen 01, 02, ...
+ '''
+
+ results = {}
+
+ rescues = RESCUES
+ stimuli = STIMULI
+
+
+
+
+ for manalyser in manalysers:
+
+ specimen_name = manalyser.get_specimen_name()
+
+ if specimen_name in BLOCKDICT.keys():
+ print('Specimen {} on the block list because {}'.format(specimen_name, BLOCKDICT[specimen_name]))
+ continue
+
+ # Look which mutant we have
+ specimen_rescue = None
+ for rescue in rescues:
+ if rescue in specimen_name:
+
+ if specimen_rescue is not None:
+ raise ValueError('2 or more Rhi rescues fit the name {}'.format(specimen_name))
+
+ specimen_rescue = rescue
+ break
+
+ # If none of the rescues, then skip this Manalyser
+ if specimen_rescue is None:
+ continue
+
+
+ # Then iterate over the image folder
+ for image_folder in sorted(manalyser.list_imagefolders(list_special=False)):
+
+ # Those image folders without suffix were in my recordings always
+ # actually stimulated with UV; It was just in the beginning, I
+ # sometimes forgot to add the suffix UV. Recordings were always
+ # started with UV.
+ specimen_stimtype = 'uv'
+
+ for stimtype in stimuli:
+ if image_folder.endswith('_'+stimtype):
+ specimen_stimtype = stimtype
+
+ data = manalyser.get_movements_from_folder(image_folder)
+ eye = list(data.keys())[0]
+ data = data[eye]
+
+ print('Specimen {}_{}'.format(specimen_name, eye))
+ if '{}_{}'.format(specimen_name, eye) in BLOCKDICT.keys():
+ print(' Specimen {}_{} blocked'.format(specimen_name, eye))
+ continue
+
+ times = []
+ traces = []
+ responses = []
+
+ for i_repeat in range(len(data)):
+ X = np.asarray(data[i_repeat]['x'])
+ Y = np.asarray(data[i_repeat]['y'])
+
+ mag_trace = np.sqrt(X**2+Y**2)
+
+ A = np.mean(mag_trace[:4])
+ B = np.mean(mag_trace[-10:])
+ response = B-A
+
+ traces.append(mag_trace)
+ responses.append(response)
+ times.append(manalyser.get_recording_time(image_folder, i_rep=i_repeat))
+
+
+ if specimen_rescue not in results.keys():
+ results[specimen_rescue] = {}
+
+ if specimen_stimtype not in results[specimen_rescue].keys():
+ results[specimen_rescue][specimen_stimtype] = {'times': [], 'separate_traces': [], 'traces': [],
+ 'sexes': [], 'names': [], 'ages': [], 'responses': [], 'eyes': [], 'image_interval': []}
+
+ results[specimen_rescue][specimen_stimtype]['separate_traces'].append(traces)
+ results[specimen_rescue][specimen_stimtype]['traces'].append(np.mean(traces, axis=0))
+ results[specimen_rescue][specimen_stimtype]['responses'].append(responses)
+
+ results[specimen_rescue][specimen_stimtype]['times'].append( times[int(len(times)/2)] )
+ results[specimen_rescue][specimen_stimtype]['ages'].append( manalyser.get_specimen_age() )
+ results[specimen_rescue][specimen_stimtype]['sexes'].append( manalyser.get_specimen_sex() )
+ results[specimen_rescue][specimen_stimtype]['names'].append( manalyser.get_specimen_name() )
+ results[specimen_rescue][specimen_stimtype]['eyes'].append( eye )
+ results[specimen_rescue][specimen_stimtype]['image_interval'].append( manalyser.get_image_interval() )
+
+
+ #plot_individual(manalysers, results)
+ #plot_mean_timecourses(results)
+ #plot_mean_timecourses(results, sex='male')
+ #plot_box_summary(results)
+
+ export_mean_timecourses(results)
+
+
+def plot_bar_summary(results):
+
+
+ panda_data = []
+ for rescue in results.keys():
+ for stimtype in results[rescue].keys():
+ responses = results[rescue][stimtype]['responses']
+ #for i_repeat, value in enumerate(values):
+ panda_data.append([rescue, STIMULUS_FANCY_NAMES[stimtype], np.mean(responses), np.std(responses), responses])
+
+ df = pd.DataFrame(panda_data, columns=['norpA rescue', 'Stimulus type', 'mean', 'std', 'responses'])
+ print(df)
+
+
+ a = df.pivot('norpA rescue', 'Stimulus type', 'mean').plot(kind='bar',
+ yerr=df.pivot('norpA rescue', 'Stimulus type', 'std'))
+
+def plot_box_summary(results):
+
+ rescue_labels = {'No s'}
+
+ panda_data = []
+ for rescue in results.keys():
+ for stimtype in results[rescue].keys():
+ data = results[rescue][stimtype]
+ for i_eye in range(len(data['eyes'])):
+ panda_data.append([rescue, STIMULUS_FANCY_NAMES[stimtype],
+ data['names'][i_eye], data['eyes'][i_eye], np.mean(data['responses'][i_eye])])
+
+ df = pd.DataFrame(panda_data, columns=['norpA rescue', 'Stimulus type', 'name', 'eye', 'response'])
+ print(df)
+
+
+ #a = df.pivot('norpA rescue', 'Stimulus type', 'mean').plot(kind='bar',
+ # yerr=df.pivot('norpA rescue', 'Stimulus type', 'std'))
+
+ plt.figure()
+ ax = sns.boxplot(x='norpA rescue', y='response', hue='Stimulus type', data=df,
+ hue_order=[STIMULUS_FANCY_NAMES[stim] for stim in STIMULI], palette=PALETTE)
+
+ box_pairs = []
+ for rescue in results.keys():
+ for i in range(len(STIMULI)-1):
+ box_pairs.append(((rescue, STIMULUS_FANCY_NAMES[STIMULI[i]]), (rescue, STIMULUS_FANCY_NAMES[STIMULI[-1]])))
+
+ print(box_pairs)
+
+ add_stat_annotation(ax, data=df, x='norpA rescue', y='response', hue='Stimulus type',
+ box_pairs=box_pairs, test='Wilcoxon', loc='inside')
+ #ax = sns.swarmplot(x="norpA rescue", y="response", data=df, color=".25")
+
+def plot_mean_timecourses(results, sex=None):
+ '''
+ Create a figure with time courses.
+ '''
+
+
+ fig = plt.figure(figsize=(16,9))
+ subplots = {}
+
+ nrows = 5
+ ncols = 4
+
+ cols = ['', 'sens', 'uv', 'green', 'nostim', 'uv_erg', 'green_erg']
+ rows = ['colnames', 'Rh1', 'Rh3', 'Rh4', 'Rh5', 'Rh6']
+
+
+ maxval = 12.5
+
+
+ animator = Animator(fig)
+
+
+ for row in rows[::-1]:
+ for col in cols:
+ irow = rows.index(row)
+ icol = cols.index(col)
+
+ ax = fig.add_subplot(len(rows),len(cols), 1+len(cols)*irow+icol)
+
+ if row == rows[-1] and col == 'sens':
+ ax.set_xlabel('Wavelength (nm)')
+ ax.get_yaxis().set_visible(False)
+ ax.spines['top'].set_visible(False)
+ ax.spines['right'].set_visible(False)
+ ax.spines['left'].set_visible(False)
+ else:
+ ax.set_axis_off()
+
+ if col == 'sens' or row == 'colnames':
+ ax.set_xlim(300,650)
+ ax.set_ylim(0,1.05)
+
+ subplots[row+col] = ax
+
+ if icol == 1 and row!='colnames':
+ color = RESCUE_COLORS.get(row, 'black')
+ ax.text(0, 0.5, row, va='center', ha='right',
+ color=color, transform=ax.transAxes)
+
+ if irow == 0:
+ color = STIMULUS_COLORS.get(col, 'black')
+ ax.text(1-0.618, 0.618, STIMULUS_FANCY_NAMES.get(col, col), va='bottom', ha='center',
+ color=color, transform=ax.transAxes)
+
+ # Set titles ERG and DPP
+ subplots['colnames'+'green'].text(0.5, 1.3, 'DPP microsaccades',
+ ha='center', va='bottom', fontsize='large',
+ backgroundcolor=EXPERIMENT_COLORS['dpp'], transform=subplots['colnames'+'green'].transAxes)
+
+ subplots['colnames'+'green_erg'].text(0, 1.3, 'ERGs',
+ ha='center', va='bottom', fontsize='large',
+ backgroundcolor=EXPERIMENT_COLORS['ergs'], transform=subplots['colnames'+'green_erg'].transAxes)
+
+ # Title lines
+ #box1 = subplots['colnames'+'uv'].get_position()
+ #box2 = subplots['colnames'+'nostim'].get_position()
+ #arrow = patches.FancyArrow(box1.x0, box1.y1+box1.height/10, box2.x1-box1.x0, 0,
+ # shape='right', hatch='|', transform=fig.transFigure, figure=fig)
+ #fig.patches.extend([arrow])
+
+
+ # Plot norpA rescue illustrative images
+ imagedir = '/home/joni/Pictures/NorpA rescues/'
+ for fn in os.listdir(imagedir):
+ for row in rows:
+ if row in fn:
+ image = mati.imread(os.path.join(imagedir, fn))
+ h,w,d = image.shape
+
+ im = subplots[row+''].imshow(image)
+
+ im.set_clip_path( patches.Circle((int(w/2),int(h/2)),int(min(w,h)/2), transform=subplots[row].transData) )
+ continue
+
+ led_wavelengths, led_spectrums = _load_led_spectrums(LED_SPECTRUM_FNS)
+
+ # Plot spectral sensitivities
+ wave_axis, sensitivities = _load_spectral_sensitivities()
+ for rescue, sensitivity in zip([row for row in rows if row in RESCUES], sensitivities):
+ subplots[rescue+'sens'].plot(wave_axis, sensitivity, color=RESCUE_COLORS[rescue])
+
+ # Plot LED spectrums in each figure
+ subplots[rescue+'sens'].plot(led_wavelengths[0], led_spectrums[0], '--', color='green', lw=1)
+ subplots[rescue+'sens'].plot(led_wavelengths[0], led_spectrums[1], '--', color='purple', lw=1)
+
+
+ # Plot stimulus/LED spectral curves
+ subplots['colnames'+'green'].plot(led_wavelengths[0], led_spectrums[0], color='green')
+ subplots['colnames'+'uv'].plot(led_wavelengths[0], led_spectrums[1], color='purple')
+ subplots['colnames'+'nostim'].plot([led_wavelengths[0][0], led_wavelengths[0][-1]], [0,0], color='black')
+ subplots['colnames'+'green_erg'].plot(led_wavelengths[0], led_spectrums[2], color='green')
+ subplots['colnames'+'uv_erg'].plot(led_wavelengths[0], led_spectrums[3], color='purple')
+
+ # Plot ERGs
+
+ erg_data = _load_ergs()
+
+ ergs_min = np.inf
+ ergs_max = -np.inf
+ for rescue in results.keys():
+ for stimtype in results[rescue].keys():
+
+ erg_traces = []
+
+ # For this rescue/stimtype combination, go through every erg
+ for specimen_name in erg_data.keys():
+ if specimen_name in BLOCKDICT.keys():
+ print('Blocked {}'.format(specimen_name))
+ continue
+ # Looking for correct specimen
+ if rescue in specimen_name:
+ # and for
+ for ergs in erg_data[specimen_name]:
+ # correct stimulus type
+ if stimtype in ergs and '25' in ergs:
+ erg = ergs[0][0]
+ erg = erg - erg[0]
+
+ color = np.array(mplcolors.to_rgb(RESCUE_COLORS[rescue]))
+ color = np.mean([color, (1,1,1)], axis=0)
+ subplots[rescue+stimtype+'_erg'].plot(erg, color=color, lw=1)
+ erg_traces.append(erg)
+
+ ergs_min = min(ergs_min, np.min(erg))
+ ergs_max = max(ergs_max, np.max(erg))
+
+ if erg_traces:
+ color = np.array(mplcolors.to_rgb(RESCUE_COLORS[rescue])) * 0.75
+ subplots[rescue+stimtype+'_erg'].plot(np.mean(erg_traces, axis=0), color=color)
+
+ # Set ERG axis limits
+ for rescue in results.keys():
+ for stimtype in results[rescue].keys():
+ if rescue+stimtype+'_erg' in subplots.keys():
+ subplots[rescue+stimtype+'_erg'].set_ylim(ergs_min, ergs_max)
+
+
+ # Set DPP and ERG plots background color
+
+ box1 = subplots['Rh6'+'uv_erg'].get_position()
+ box2 = subplots['Rh1'+'green_erg'].get_position()
+ rect = patches.Rectangle((box1.x0, box1.y0), (box2.x0-box1.x0)+box2.width, (box2.y0-box1.y0)+box2.height, color=EXPERIMENT_COLORS['ergs'], zorder=-1)
+ fig.add_artist(rect)
+
+ box1 = subplots['Rh6'+'uv'].get_position()
+ box2 = subplots['Rh1'+'nostim'].get_position()
+ rect = patches.Rectangle((box1.x0, box1.y0), (box2.x0-box1.x0)+box2.width, (box2.y0-box1.y0)+box2.height, color=EXPERIMENT_COLORS['dpp'], zorder=-1)
+ fig.add_artist(rect)
+
+
+ # Add scale bars
+ add_scalebar(subplots['Rh6'+'nostim'], 50, 5, position=(1,5), xunits='ms', yunits='µm')
+ add_scalebar(subplots['Rh6'+'green_erg'], 300, -3, position=(100,-3), xunits='ms', yunits='mV')
+
+
+ # Plot DPP data
+ for rescue in results.keys():
+ for stimtype in results[rescue].keys():
+
+ ax = subplots[rescue+stimtype]
+
+ traces = results[rescue][stimtype]['traces']
+
+ # How many fps images were taken by camera
+ image_interval = results[rescue][stimtype]['image_interval']
+
+ color = None
+ for i in range(len(traces)):
+ if sex is not None:
+ print(results[rescue][stimtype]['sexes'][i])
+ if sex == results[rescue][stimtype]['sexes'][i]:
+ color = 'red'
+ else:
+ color = 'gray'
+
+ x, y = interpolate(np.arange(0, len(traces[i])*image_interval[i]*1000, image_interval[i]*1000), traces[i]*PXLSIZE, len(traces[i]))
+ color = np.array(mplcolors.to_rgb(RESCUE_COLORS[rescue]))
+ color = np.mean([color, (1,1,1)], axis=0)
+ line = ax.plot(x, y, label=results[rescue][stimtype]['ages'][i], lw=1, color=color)
+
+ animator.add_animation(line[0], [x,y], hide=False)
+
+ color = np.array(mplcolors.to_rgb(RESCUE_COLORS[rescue])) * 0.75
+ ax.plot(x, np.mean(traces,axis=0)*PXLSIZE, label='mean', lw=2, color=color)
+
+ #ax.set_title(rescue+'_'+stimtype)
+ ax.set_ylim(0,maxval)
+
+
+ animator.frames += len(x)
+
+ os.makedirs(THIS_SAVEDIR, exist_ok=True)
+
+ animation = animator.get_animation(interval=40)
+ #animation.save(os.path.join(THIS_SAVEDIR, 'timecourses.mp4'), dpi=600)
+
+ plt.subplots_adjust(wspace=0.1, hspace=0.1)
+
+
+
+def export_mean_timecourses(results):
+ '''
+ Exports the plot_mean_timecourses as csv files for plotting/manipulating
+ with an external program.
+ '''
+
+ savedir = os.path.join(THIS_SAVEDIR, 'exports')
+ os.makedirs(savedir, exist_ok=True)
+
+ # Each item is for a file
+ csv_files = {}
+
+ # DPP data
+ for rescue in results.keys():
+ for stimtype in results[rescue].keys():
+
+ # Each item for column
+ csv_file = {}
+
+ traces = results[rescue][stimtype]['traces']
+ image_interval = results[rescue][stimtype]['image_interval']
+
+ avg_traces = []
+
+ for i in range(len(traces)):
+ x, y = interpolate(np.arange(0, len(traces[i])*image_interval[i]*1000, image_interval[i]*1000), traces[i]*PXLSIZE, len(traces[i]))
+
+ if i == 0:
+ csv_file['time (ms)'] = x
+
+ specimen_name = results[rescue][stimtype]['names'][i]
+ eye = results[rescue][stimtype]['eyes'][i]
+ column_name = '{}_mean_{}'.format(specimen_name, eye)
+
+ csv_file[column_name] = y
+
+ avg_traces.append(y)
+
+
+ if csv_file:
+ # Add mean trace
+ csv_file['mean'] = np.mean(avg_traces, axis=0)
+ csv_files[rescue+'_'+stimtype] = csv_file
+
+ # ERGs
+ erg_data = _load_ergs()
+ for rescue in results.keys():
+ for stimtype in results[rescue].keys():
+
+ csv_file = {}
+
+ traces = []
+
+ # For this rescue/stimtype combination, go through every erg
+ for specimen_name in erg_data.keys():
+ if specimen_name in BLOCKDICT.keys():
+ print('Blocked {}'.format(specimen_name))
+ continue
+ # Looking for correct specimen
+ if rescue in specimen_name:
+ # and for
+ for ergs in erg_data[specimen_name]:
+ # correct stimulus type
+ if stimtype in ergs and '25' in ergs:
+ erg = ergs[0][0]
+ erg = erg - erg[0]
+ erg = erg.flatten()
+
+ if not csv_file:
+ fs = ergs[0][1]
+ dt = 1/fs
+ csv_file['time (ms)'] = 1000 * np.arange(0, len(erg)*dt, dt)
+
+ csv_file[specimen_name+'_erg'] = erg
+ traces.append(erg)
+ if csv_file:
+ csv_file['mean'] = np.mean(traces, axis=0)
+ csv_files[rescue+'_'+stimtype +'_erg'] = csv_file
+
+ # Measured specturms for LEDs
+ led_wavelengths, led_spectrums = _load_led_spectrums(LED_SPECTRUM_FNS)
+
+ for i_led, (wave, spec) in enumerate(zip(led_wavelengths, led_spectrums)):
+ csv_file = {}
+ csv_file['wavelength (nm)'] = wave
+ csv_file['relative_intensity'] = spec
+ csv_files[os.path.basename(LED_SPECTRUM_FNS[i_led]).rstrip('.csv')] = csv_file
+ print('i_led {}'.format(i_led))
+
+ # Spectral sensitivities
+ wave_axis, sensitivities = _load_spectral_sensitivities()
+ for rescue, sensitivity in zip(RESCUES, sensitivities):
+
+ csv_file = {}
+ csv_file['wavelength'] = wave_axis
+ csv_file['sensitivity'] = sensitivity
+
+ csv_files['sensitivity_{}'.format(rescue)] = csv_file
+
+
+ # Export
+ for csv_name, csv_file in csv_files.items():
+
+ with open(os.path.join(savedir, csv_name+'.csv'), 'w') as fp:
+
+ writer = csv.writer(fp)
+
+ column_names = sorted(list(csv_file.keys()))
+ if 'time (ms)' in column_names:
+ column_names.insert(0, column_names.pop(column_names.index('time (ms)')))
+
+ if 'mean' in column_names:
+ column_names.insert(len(column_names), column_names.pop(column_names.index('mean')))
+
+
+ N = len(csv_file[column_names[0]])
+
+ print("{} len {}".format(csv_name, len(column_names)-2))
+
+ writer.writerow(column_names)
+
+ for i in range(0, N):
+ row = [csv_file[col][i] for col in column_names]
+
+ writer.writerow(row)
+
+
+
+
+def plot_ergs():
+ '''
+ Plot ERGs alone.
+ '''
+
+ ergs = get_ergs()
+
+ for rescue in RESCUES:
+ for stimtype in STIMULI:
+ pass
+
+
+
+def _load_ergs():
+ '''
+ Fetches ERGs for the specimen matching the name.
+
+ Requirements
+ - ERGs are Biosyst recorded .mat files.
+ - Requires also a lab book that links each specimen name to a ERG file,
+ and possible other parameter values such as intensity, repeats,
+ UV/green etc.
+
+ Returns ergs {specimen_name: data}
+ where data is a list [[ergs, par1, par2, ...],[..],..]
+ ergs are np arrays
+ '''
+ ergs = {}
+
+ ergs_rootdir = '/home/joni/data/DPP_ERGs'
+ ergs_labbook = '/home/joni/data/DPP_ERGs/labbook_norpa_rescues.csv'
+
+ csvfile = []
+ with open(ergs_labbook, 'r') as fp:
+ reader = csv.reader(fp)
+ for row in reader:
+ csvfile.append(row)
+
+ previous_specimen = ''
+
+ for line in csvfile:
+ efn = line[1]
+ match = glob.glob(ergs_rootdir+'/**/'+efn)
+ if len(match) != 1:
+ print('{} not found'.format(efn))
+ #ergs.append(None)
+ else:
+ #print(efn + ' ' + match[0])
+
+ specimen = line[0]
+ if not specimen:
+ specimen = previous_specimen
+ previous_specimen = specimen
+
+ try:
+ ergs[specimen]
+ except KeyError:
+ ergs[specimen] = []
+
+ ergs[specimen].append([bsextract(match[0], 0), *line[2:]])
+
+ return ergs
+
+
+def _load_spectral_sensitivities(fn='/home/joni/analyses/digitized_rh_rescues.csv'):
+ '''
+ Spectral sensitivies of different Rh opsins. Digitalized from a figure into a csv file.
+
+ Returns
+ wavelengths, [rh1, ...]
+ where wavelengths a numpy array 1D
+ and rh1, rh3,... also
+ '''
+
+ data = np.loadtxt(fn, delimiter=' ', skiprows=1)
+ print(data.shape)
+ return data[:,0], [data[:, i] for i in range(1, data.shape[1])]
+
+
+
+def _load_led_spectrums(spectrometer_csv_files):
+ '''
+ Returns spectrums.
+
+ spectometer_csv_files A list of spectrometer csv files.
+
+ Returns wavelengts spectrums
+ which both are 1d lists containing 1d numpy arrays
+ '''
+
+ wavelengths = []
+ spectrums = []
+
+ # Load spectrums
+ for fn in spectrometer_csv_files:
+ spectrum = np.loadtxt(fn, delimiter=',', skiprows=1)[:,1]
+
+ wavelength = np.loadtxt(fn, delimiter=',', skiprows=1)[:,0]
+ wavelength = [207.1545+0.3796126*w+0.00002822671*(w**2) for w in wavelength]
+
+ spectrums.append(spectrum)
+ wavelengths.append(wavelength)
+
+
+ # Load integration times from txt files
+ for i, fn in enumerate([fn.rstrip('.csv')+'.txt' for fn in spectrometer_csv_files]):
+ with open(fn, 'r') as fp:
+ integration_time = fp.readline()
+ integration_time = float(integration_time.split(' ')[-2]) # ms
+
+ spectrums[i] = spectrums[i] / integration_time
+
+ cmax = np.max(spectrums)
+
+ return wavelengths, [spectrum/cmax for spectrum in spectrums]
+
+
+
+
+def _get_results_keys(results, specimen_name):
+ '''
+ '''
+
+ matches = []
+
+ for rescue in results.keys():
+ for stimtype in results[rescue].keys():
+ try:
+ index = results[rescue][stimtype]['names'].index(specimen_name)
+ matches.append([rescue, stimtype, index])
+ except:
+ pass
+
+ return matches
+
+
+def plot_individual(manalysers, results):
+
+
+
+ pdf_savedir = os.path.join(ANALYSES_SAVEDIR, 'norpa_rescues')
+ os.makedirs(pdf_savedir, exist_ok=True)
+
+ ergs = _load_ergs()
+
+
+ with PdfPages(os.path.join(pdf_savedir, 'individual.pdf')) as pdf:
+
+ for manalyser in manalysers[:5]:
+ specimen_name = manalyser.get_specimen_name()
+ try:
+ ergs[specimen_name]
+ print('ERGS')
+ except KeyError:
+ print('No ERGs for {}'.format(specimen_name))
+
+ keys = _get_results_keys(results, specimen_name)
+ print(keys)
+
+ # Specimen info
+ fig, axes = plt.subplots(1+len(keys),2, figsize=(8.3,11.7))
+ plt.title(specimen_name)
+
+ # Plot face image
+ axes[0][0].imshow(open_adjusted(manalyser.get_snap()), cmap='gray')
+
+ # Plot DPP
+
+ for i, key in enumerate(keys):
+ axes[i][0].plot( results[key[0]][key[1]]['separate_traces'][key[2]] )
+
+
+ pdf.savefig()
+
+ plt.close()
+
+
+
+
+
+
+
diff --git a/gonio-analysis/gonioanalysis/drosom/special/paired.py b/gonio-analysis/gonioanalysis/drosom/special/paired.py
new file mode 100644
index 0000000..5451169
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/special/paired.py
@@ -0,0 +1,79 @@
+
+import numpy as np
+import matplotlib.pyplot as plt
+
+from gonioanalysis.droso import simple_select
+
+
+def cli_group_and_compare(manalysers):
+ '''
+ Command line user interface to create grouped data for
+ compare_before_after() function.
+ '''
+
+ grouped_data = []
+
+ print('For compare_before_after function, we have to group the data')
+
+ for i_manalyser, manalyser in enumerate(manalysers):
+ print('Specimen {}/{}'.format(i_manalyser+1, manalyser))
+
+
+ image_folders = manalyser.list_imagefolders()
+ image_folders.sort()
+
+ for i_pair in range(0, int(len(image_folders)/2)):
+
+ print('Select a BEFORE experiment')
+ be = simple_select(image_folders)
+
+ image_folders.remove(be)
+
+ print('Select an AFTER experiment')
+ af = simple_select(image_folders)
+
+ image_folders.remove(af)
+
+ grouped_data.append([manalyser, be, af])
+
+
+ compare_before_after(grouped_data)
+
+
+def compare_before_after(grouped):
+ '''
+
+ Grouped: List where each element is
+ [manalyser, image_folder_before, image_folder_after]
+
+ '''
+
+ fig, axes = plt.subplots(1, 2, sharey=True, sharex=True)
+
+
+
+ colors = plt.cm.get_cmap('Dark2', len(grouped))
+
+
+ # if stands for image_folder
+ for i, (manalyser, if_before, if_after) in enumerate(grouped):
+
+ be = manalyser.get_displacements_from_folder(if_before)
+ af = manalyser.get_displacements_from_folder(if_after)
+
+ mbe = np.mean(be, axis=0)
+ maf = np.mean(af, axis=0)
+
+
+ axes[0].plot(mbe, color=colors(i))
+ axes[1].plot(maf, color=colors(i))
+
+
+ axes[0].set_title('Before')
+ axes[1].set_title('After')
+
+
+
+
+
+
diff --git a/gonio-analysis/gonioanalysis/drosom/terminal.py b/gonio-analysis/gonioanalysis/drosom/terminal.py
new file mode 100644
index 0000000..935e22f
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/terminal.py
@@ -0,0 +1,318 @@
+#!/usr/bin/env python3
+'''
+Analyse Gonio Imsoft data and output the results.
+'''
+import sys
+import os
+import datetime
+import argparse
+
+import numpy as np
+import matplotlib.pyplot as plt
+
+from gonioanalysis.drosom import analyser_commands
+from gonioanalysis.drosom.analyser_commands import (
+ ANALYSER_CMDS,
+ DUALANALYSER_CMDS,
+ MULTIANALYSER_CMDS,
+ )
+from gonioanalysis.directories import ANALYSES_SAVEDIR, PROCESSING_TEMPDIR_BIGFILES
+from gonioanalysis.droso import DrosoSelect
+from gonioanalysis.antenna_level import AntennaLevelFinder
+from gonioanalysis.drosom.analysing import MAnalyser, MAverager
+from gonioanalysis.drosom.orientation_analysis import OAnalyser
+from gonioanalysis.drosom.optic_flow import FAnalyser
+from gonioanalysis.drosom.transmittance_analysis import TAnalyser
+from gonioanalysis.drosom import plotting
+from gonioanalysis.drosom.plotting.plotter import MPlotter
+from gonioanalysis.drosom.plotting import complete_flow_analysis, error_at_flight
+from gonioanalysis.drosom.special.norpa_rescues import norpa_rescue_manyrepeats
+from gonioanalysis.drosom.special.paired import cli_group_and_compare
+import gonioanalysis.drosom.reports as reports
+
+
+if '--tk_waiting_window' in sys.argv:
+ from gonioanalysis.tkgui.widgets import WaitingWindow
+
+
+
+Analysers = {'orientation': OAnalyser, 'motion': MAnalyser, 'flow': FAnalyser,
+ 'transmittance': TAnalyser}
+
+analyses = {**ANALYSER_CMDS, **DUALANALYSER_CMDS, **MULTIANALYSER_CMDS}
+
+
+def roimovement_video(analyser):
+ '''
+ Create a video where the imaging data is played and the analysed ROI is moved on the
+ image, tracking the moving feature.
+
+ Good for confirming visually that the movment analysis works.
+ '''
+
+ print(analyser.getFolderName())
+ images, ROIs, angles = analyser.get_time_ordered()
+
+ workdir = os.path.join(PROCESSING_TEMPDIR_BIGFILES, 'movie_{}'.format(str(datetime.datetime.now())))
+ os.makedirs(workdir, exist_ok=True)
+
+ newnames = [os.path.join(workdir, '{:>0}.jpg'.format(i)) for i in range(len(images))]
+
+
+ adj = ROIAdjuster()
+ newnames = adj.writeAdjusted(images, ROIs, newnames, extend_factor=3, binning=1)
+
+ enc = Encoder()
+ fps = 25
+ enc.encode(newnames, os.path.join(ANALYSES_SAVEDIR, 'movies','{}_{}fps.mp4'.format(analyser.getFolderName(), fps)), fps)
+
+ for image in newnames:
+ os.remove(image)
+ try:
+ os.rmdir(workdir)
+ except OSError:
+ print("Temporal directory {} left behind because it's not empty".format(workdir))
+
+
+
+def export_optic_flow():
+ '''
+ Exports the optic flow vectors.
+ '''
+ import json
+ from gonioanalysis.coordinates import optimal_sampling
+ from gonioanalysis.drosom.optic_flow import flow_vectors
+ points = optimal_sampling(np.arange(-90, 90, 5), np.arange(-180, 180, 5))
+ vectors = flow_vectors(points)
+
+ with open('optic_flow_vectors.json', 'w') as fp:
+ json.dump({'points': np.array(points).tolist(), 'vectors': np.array(vectors).tolist()}, fp)
+
+
+
+
+def main(custom_args=None):
+
+ if custom_args is None:
+ custom_args = sys.argv[1:]
+
+ parser = argparse.ArgumentParser(description=__doc__)
+
+
+ # DATA ARGUMENTS
+ parser.add_argument('-D', '--data_directory', nargs='+',
+ help='Data directory')
+
+ parser.add_argument('-S', '--specimens', nargs='+',
+ help=('Comma separeted list of specimen names.'
+ ' Separate groups by space when averaging is on.'
+ ' If needed, wanted image folders can be specified with semicolons'
+ ' for example, specimen1;imf1;imf2:specimen2;imf1'
+ ' (does not work with groups). If image folders contain commas, use'
+ ' semicolons, use colons (:) to separate specimens'))
+
+
+ # Analyser settings
+ parser.add_argument('-a', '--average', action='store_true',
+ help='Average and interpolate the results over the specimens')
+
+ parser.add_argument('--short-name', nargs=1,
+ help='Short name to set if --average is set')
+
+ parser.add_argument('-t', '--type', nargs='+',
+ help='Analyser type, either "motion" or "orientation". Space separate gor groups')
+
+ parser.add_argument('-r', '--reselect-rois', action='store_true',
+ help='Reselect ROIs')
+
+ parser.add_argument('-R', '--recalculate-movements', action='store_true',
+ help='Recalculate with Movemeter')
+
+ parser.add_argument('--reverse-directions', action='store_true',
+ help='Reverse movement directions')
+
+ parser.add_argument('--active-analysis', nargs='?', default='',
+ help='Name of the analysis')
+
+ # Other settings
+ parser.add_argument('--tk_waiting_window', action='store_true',
+ help='(internal) Launches a tkinter waiting window')
+ parser.add_argument('--dont-show', action='store_true',
+ help='Skips showing the plots')
+ parser.add_argument('--worker-info', nargs=2,
+ help='Worker id and total number of parallel workers. Only 3D video plotting now')
+
+ # Different analyses for separate specimens
+
+ parser.add_argument('-A', '--analysis', nargs=1,
+ choices=analyses.keys(),
+ help='Analysis method or action. Allowed analyses are '+', '.join(analyses.keys()))
+
+ args = parser.parse_args(custom_args)
+
+
+
+
+ if args.tk_waiting_window:
+ waiting_window = WaitingWindow('terminal.py', 'When ready, this window closes.')
+
+ if args.worker_info:
+ analyser_commands.I_WORKER = int(args.worker_info[0])
+ analyser_commands.N_WORKERS = int(args.worker_info[1])
+
+ # Getting the data directory
+ # --------------------------
+ if args.data_directory:
+ print('Using data directory {}'.format(args.data_directory[0]))
+
+ data_directories = args.data_directory
+ else:
+ data_directories = input('Data directory >> ')
+
+ # Check that the data directory exists
+ for directory in data_directories:
+ if not os.path.isdir(directory):
+ raise ValueError("{} is not a directory".format(directory))
+
+ # {specimen name : [image_folder_1, ...]}
+ wanted_imagefolders = {}
+
+ # Getting the specimens
+ # ---------------------
+ directory_groups = []
+ if args.specimens:
+
+ for group in args.specimens:
+ print('Using specimens {}'.format(group))
+
+ if group == 'none':
+ directory_groups.append(None)
+ continue
+
+ if ':' in group:
+ specimen_separator = ':'
+ else:
+ specimen_separator = ','
+
+ if ';' in group:
+
+ for specimen in group.split(specimen_separator):
+ if ';' in specimen:
+ splitted = specimen.split(';')
+ wanted_imagefolders[splitted[0]] = splitted[1:]
+
+ # Remove specified imagefolders
+ group = ','.join([z.split(';')[0] for z in group.split(specimen_separator)])
+
+
+ # dont commit me
+ group = group.replace(':', ',')
+
+
+ directories = []
+ for directory in data_directories:
+ selector = DrosoSelect(datadir=directory)
+ directories.extend( selector.parse_specimens(group) )
+ directory_groups.append(directories)
+ else:
+ selector = DrosoSelect(datadir=data_directories[0])
+ directories = selector.ask_user()
+
+
+ # Setting up analysers
+ # ---------------------
+
+ if not args.type:
+ args.type = ['motion' for i in directory_groups]
+
+ analyser_groups = []
+
+ for i_group, directories in enumerate(directory_groups):
+
+ analysers = []
+ Analyser = Analysers[args.type[i_group]]
+
+ print('Using {}'.format(Analyser.__name__))
+
+ if directories is None:
+ analysers.append(Analyser(None, None))
+ else:
+
+ for directory in directories:
+
+ path, folder_name = os.path.split(directory)
+ analyser = Analyser(path, folder_name)
+
+ if args.active_analysis:
+ analyser.active_analysis = args.active_analysis
+
+ analysers.append(analyser)
+
+ # Ask ROIs if not selected
+ for analyser in analysers:
+ if analyser.are_rois_selected() == False or args.reselect_rois:
+ analyser.select_ROIs()
+
+ # Analyse movements if not analysed, othewise load these
+ for analyser in analysers:
+ if analyser.is_measured() == False or args.recalculate_movements:
+ analyser.measure_movement(eye='left')
+ analyser.measure_movement(eye='right')
+ analyser.load_analysed_movements()
+
+
+ if args.reverse_directions:
+ for analyser in analysers:
+ analyser.receptive_fields = True
+
+
+
+ if args.average:
+
+ if len(analysers) >= 2:
+
+ avg_analyser = MAverager(analysers)
+ avg_analyser.setInterpolationSteps(5,5)
+
+ if args.short_name:
+ avg_analyser.set_short_name(args.short_name[0])
+
+ analysers = avg_analyser
+ else:
+ analysers = analysers[0]
+ else:
+ if len(analysers) == 1:
+ analysers = analysers[0]
+
+ analyser_groups.append(analysers)
+
+
+ function = analyses[args.analysis[0]]
+
+ print(analyser_groups)
+
+ kwargs = {}
+ if wanted_imagefolders:
+ kwargs['wanted_imagefolders'] = wanted_imagefolders
+
+ if function in MULTIANALYSER_CMDS.values():
+ for analysers in analyser_groups:
+ function(analysers, **kwargs)
+ elif args.average or function in DUALANALYSER_CMDS.values():
+ function(*analyser_groups, **kwargs)
+ else:
+ for analysers in analyser_groups:
+ if not isinstance(analysers, list):
+ analysers = [analysers]
+ for analyser in analysers:
+ function(analyser, **kwargs)
+
+ if args.tk_waiting_window:
+ waiting_window.close()
+
+ if not args.dont_show:
+ plt.show()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/gonio-analysis/gonioanalysis/drosom/transmittance_analysis.py b/gonio-analysis/gonioanalysis/drosom/transmittance_analysis.py
new file mode 100644
index 0000000..3a50e20
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosom/transmittance_analysis.py
@@ -0,0 +1,104 @@
+
+import os
+import json
+
+import numpy as np
+import tifffile
+
+from gonioanalysis.drosom.analysing import MAnalyser
+
+
+
+class TAnalyser(MAnalyser):
+ '''
+ Transmittance analyser.
+
+ Analyses the ROI's light throughput while the ROI tracks
+ its moving feature (ie. using MAnalyser motion analysis results)
+
+ Mean (average) pixel value of the ROI is quantified.
+ '''
+
+ def __init__(self, *args, **kwargs):
+
+ super().__init__(*args, **kwargs)
+
+ self._real_movements_savefn = self.MOVEMENTS_SAVEFN
+
+ self._movements_skelefn = self._movements_skelefn.replace('movements_', 'transmittance_')
+ self.active_analysis = ''
+
+
+ def measure_movement(self, eye, *args, **kwargs):
+ '''
+ Analyse transmittance/brightness by calculating mean (average)
+ pixel value of the ROI in its time locations, and save results.
+ '''
+ self.movements = {}
+
+ intensities = {}
+
+ manalyser = MAnalyser(self.data_path, self.folder)
+ manalyser.active_analysis = self.active_analysis
+
+ for i, angle in enumerate(self.stacks):
+ print(' Image folder {}/{}'.format(i+1, len(self.stacks)))
+
+ roi = self.ROIs[eye].get(angle, None)
+
+ if roi is not None:
+
+ images = self.stacks[angle]
+
+ intensities[angle] = []
+
+ for i_repeat, repeat in enumerate(images):
+ ints = []
+
+ try:
+ _roi = manalyser.get_moving_ROIs(eye, angle, i_repeat)
+ except AttributeError:
+ _roi = None
+
+ i_frame = 0
+
+ for fn in repeat:
+
+ tiff = tifffile.TiffFile(fn)
+
+ for i_page in range(len(tiff.pages)):
+
+ images = tiff.asarray(key=i_page)
+ if len(images.shape) == 2:
+ images = [images]
+
+ for image in images:
+ if _roi is None:
+ x,y,w,h = roi
+ else:
+ try:
+ x,y,w,h = [int(round(z)) for z in _roi[i_frame]]
+ except IndexError:
+ # No ROI movement, skip
+ break
+
+ intensity = np.mean(image[y:y+h,x:x+w])
+ ints.append(intensity)
+
+ i_frame += 1
+ print("fn {}: {}/{}".format(os.path.basename(fn), i_frame+1, len(tiff.pages)))
+
+ intensities[angle].append({'x': ints, 'y':ints})
+
+
+ self.movements[eye] = intensities
+
+ # Save movements
+ with open(self.MOVEMENTS_SAVEFN.format(eye), 'w') as fp:
+ json.dump(intensities, fp)
+
+
+ def is_measured(self):
+ fns = [self.MOVEMENTS_SAVEFN.format(eye) for eye in self.eyes]
+ return all([os.path.exists(fn) for fn in fns])
+
diff --git a/gonio-analysis/gonioanalysis/drosox/analysing.py b/gonio-analysis/gonioanalysis/drosox/analysing.py
new file mode 100644
index 0000000..cd33db5
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosox/analysing.py
@@ -0,0 +1,172 @@
+import os
+import json
+import time
+
+import matplotlib.pyplot as plt
+
+from gonioanalysis.directories import PROCESSING_TEMPDIR
+from gonioanalysis.drosox.loading import load_data
+from gonioanalysis.binary_search import (
+ binary_search_middle,
+ binary_search_left,
+ binary_search_right
+)
+from gonioanalysis.image_tools import ImageShower
+
+
+
+class XAnalyser:
+ '''
+ Analysing and getting the analysed results out
+ '''
+ def __init__(self, data_path, folder):
+ '''
+ data_path Path to the data folders
+ folder Name of the DrosoX folder
+ '''
+ self.data_path = data_path
+ self.folder = folder
+ self.fly = folder
+
+ # Move these away from here
+ self.males = ['DrosoX6', 'DrosoX7', 'DrosoX8', 'DrosoX9', 'DrosoX15']
+ self.females = ['DrosoX10', 'DrosoX11', 'DrosoX12', 'DrosoX13', 'DrosoX14']
+ self.skip_flies = ['DrosoX14']
+
+ # Set saving directories and create them
+ self.savedirs = {'overlap': 'binocular_overlap', 'level': 'vertical_correction'}
+ for key in self.savedirs:
+ self.savedirs[key] = os.path.join(PROCESSING_TEMPDIR,
+ 'XAnalyser_data',
+ self.savedirs[key])
+ os.makedirs(self.savedirs[key], exist_ok=True)
+
+
+ print('Initializing XAnalyser, datapath {}, folder {}'.format(data_path, folder))
+
+
+ def get_data(self):
+ '''
+ Calls drosox.loading.load_data for this fly.
+ '''
+ return load_data(os.path.join(self.data_path, self.folder))
+
+
+ def measure_overlap(self):
+ '''
+ Analyses binocular overlap by the binary search method, where
+ the user makes decisions wheter the both pseudopupils are visible
+ or not.
+ '''
+ start_time = time.time()
+
+ data = load_data(os.path.join(self.data_path, self.folder))
+
+ fig, ax = plt.subplots()
+ shower = ImageShower(fig, ax)
+
+ # Try to open if any previously analysed data
+ analysed_data = []
+ try:
+ with open(os.path.join(PROCESSING_TEMPDIR, 'binary_search', 'results_{}.json'.format(fly)), 'r') as fp:
+ analysed_data = json.load(fp)
+ except:
+ pass
+ analysed_pitches = [item['pitch'] for item in analysed_data]
+
+ print('Found {} pitches of previously analysed data'.format(len(analysed_data)))
+
+ for i, (pitch, hor_im) in enumerate(data):
+
+ # Skip if this pitch is already analysed
+ if pitch in analysed_pitches:
+ continue
+
+ horizontals = [x[0] for x in hor_im]
+ images = [x[1] for x in hor_im]
+
+ N = len(images)
+ shower.setImages(images)
+
+ # Ask user to determine middle, left, and right
+ i_m = binary_search_middle(N, shower)
+ if i_m == 'skip':
+ continue
+
+ i_l = binary_search_left(N, shower, i_m)
+ i_r = binary_search_right(N, shower, i_m)
+
+ analysed_data.append({})
+
+ analysed_data[-1]['N_images'] = N
+ analysed_data[-1]['pitch'] = pitch
+ analysed_data[-1]['horizontals']= horizontals
+ analysed_data[-1]['image_fns']= images
+
+ analysed_data[-1]['index_middle'] = i_m
+ analysed_data[-1]['index_left'] = i_l
+ analysed_data[-1]['index_right']= i_r
+
+ analysed_data[-1]['horizontal_middle'] = horizontals[i_m]
+ analysed_data[-1]['horizontal_left'] = horizontals[i_l]
+ analysed_data[-1]['horizontal_right']= horizontals[i_r]
+
+
+ print('Done {}/{} in time {} minutes'.format(i+1, len(data), int((time.time()-start_time)/60) ))
+
+ # Save on every round to avoid work loss
+ with open(os.path.join(self.savedirs['overlap'],
+ 'results_{}.json'.format(self.fly)), 'w') as fp:
+ json.dump(analysed_data, fp)
+
+
+
+ def get_overlaps(self, correct_antenna_level=True):
+ '''
+ Load the results of binary search.
+
+ correct_antenna_level Corrects with antenna level
+ '''
+ fn = os.path.join(self.savedirs['overlap'], 'results_{}.json'.format(self.fly))
+
+
+ with open(fn, 'r') as fp:
+ overlap_markings = json.load(fp)
+
+
+ if correct_antenna_level:
+ antenna_level = self.get_antenna_level()
+
+ for i in range(len(overlap_markings)):
+ overlap_markings[i]['pitch'] -= antenna_level
+
+
+ return overlap_markings
+
+
+
+ def get_antenna_level(self):
+ '''
+ Load pitch points where the pseudopupils align with antenna.
+ Returns Fales if no antenna level for the specimen.
+
+ Run antenna_levels.py first to find antenna levels.
+ '''
+ fn = os.path.join(self.savedirs['level'], '{}.txt'.format(self.fly))
+
+ if os.path.exists(fn):
+ with open(fn, 'r') as fp:
+ antenna_level = float(fp.read())
+
+ return antenna_level
+ else:
+ #raise OSError('Cannot find antenna level corretion {}'.format(fn))
+ return 0.
+
+ def print_overlap(self):
+ for d in self.get_overlaps():
+ overlap = abs(d['horizontal_right']-d['horizontal_left'])
+ line = 'Vertical {} deg: overlap width {} deg'.format(d['pitch'],
+ overlap)
+
+ print(line)
diff --git a/gonio-analysis/gonioanalysis/drosox/loading.py b/gonio-analysis/gonioanalysis/drosox/loading.py
new file mode 100644
index 0000000..5ffc468
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosox/loading.py
@@ -0,0 +1,272 @@
+import os
+import json
+import warnings
+import csv
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+from gonioanalysis.directories import PROCESSING_TEMPDIR, ANALYSES_SAVEDIR
+from gonioanalysis.rotary_encoders import to_degrees
+
+
+def load_angle_pairs(fn):
+ '''
+ Loading angle pairs from a file.
+
+ Detached from gonio_imsoft.
+ '''
+ angles = []
+ with open(fn, 'r') as fp:
+ reader = csv.reader(fp)
+ for row in reader:
+ if row:
+ angles.append([int(a) for a in row])
+ return angles
+
+
+def load_data(folder, arl_fly=False):
+ '''
+ Loading a data folder.
+
+ Returns a list where horizontal angles are grouped by the pitch angle
+ and each horizontal angle is next to it's image's filename.
+
+ grouped = [pit1, [[hor1, fn1], ...] ...]
+
+
+ INPUT ARGUMENTS DESCRIPTION
+ arl_fly Set true if normal DrosoX processing should be skipped
+ (meaning no outliner remove, pitch grouping etc...)
+
+ '''
+
+ if os.path.isdir(os.path.join(folder, 'rot')):
+ # DrosoX format (MM + trigger): Sequence saved images + anglepairs.txt
+ fns = [os.path.join(folder,'rot',fn) for fn in os.listdir(os.path.join(folder, 'rot')) if fn.endswith('.tif')]
+ fns.sort()
+
+ angles = load_angle_pairs(os.path.join(folder, 'anglepairs.txt'))
+
+ # FIXME: Cannot really load any stack to check how many images in it,
+ # takes too long if remote filesystem
+ # If saved as stack (much less images than angles)
+ if 10*len(fns) < len(angles):
+ fns = [fns[0]+'_{}'.format(i) for i in range(len(angles))]
+
+
+ else:
+ # DrosoM format (gonio imsoft): each position in own folder
+ # Here for DrosoX, use only the first image in each pos folder
+ # if many exists
+ _folders = [f for f in os.listdir(folder) if f.startswith('pos')]
+ fns = [[os.path.join(folder, f, fn) for fn in
+ os.listdir(os.path.join(folder, f)) if fn.endswith('.tiff')][0] for f in _folders]
+
+ angles = [ [int(n) for n in f.split('(')[1].split(')')[0].split(',')] for f in _folders]
+
+ to_degrees(angles)
+
+ print('Angles {} and images {}'.format(len(angles), len(fns)))
+ if abs(len(angles) - len(fns)) > 10:
+ warnings.warn("Large missmatch between the number of recorded the angles and images.", UserWarning)
+
+ fns, angles = _makeSameLength(fns, angles)
+
+ angles_and_images = _pitchGroupedHorizontalsAndImages(fns, angles, arl_fly=arl_fly)
+ if not arl_fly:
+
+ print('Determing pitches to be combined...')
+ angles_to_combine = _pitchesToBeCombined(angles_and_images, angles)
+
+
+ # GROUP NEAR PITCHES
+ print('Combining pitches...')
+ angles_and_images = _groupPitchesNew(angles_and_images, angles_to_combine)
+ print('After grouping: {}'.format(len(angles_and_images)))
+
+ # NO PROBLEM AFTER THIS
+ # -------------------------
+ print('Removeing lonely outliners...')
+ angles_and_images = _removeOutliners(angles_and_images, 2)
+
+ #angles_and_images = self.removeShorts(angles_and_images)
+
+ # SORT PITCHES
+ angles_and_images.sort(key=lambda x: x[0], reverse=True)
+
+
+ # SORT HORIZONTALS
+ for i in range(len(angles_and_images)):
+ angles_and_images[i][1].sort(key=lambda x: x[0])
+
+ return angles_and_images
+
+
+def _removeOutliners(angles_and_images, degrees_threshold):
+ '''
+
+ '''
+
+ for pitch, hor_im in angles_and_images:
+ remove_indices = []
+
+ for i in range(len(hor_im)):
+ center = hor_im[i][0]
+ try:
+ previous = hor_im[i-1][0]
+ except IndexError:
+ previous = None
+ try:
+ forward = hor_im[i+1][0]
+ except IndexError:
+ forward = None
+
+ if not (previous == None and forward == None):
+
+ if forward == None:
+ if abs(previous-center) > degrees_threshold:
+ remove_indices.append(i)
+ if previous == None:
+ if abs(forward-center) > degrees_threshold:
+ remove_indices.append(i)
+
+ #if previous != None and forward != None:
+ # if abs(previous-center) > degrees_threshold and abs(forward-center) > degrees_threshold:
+ # remove_indices.append(i)
+
+ for i in sorted(remove_indices, reverse=True):
+ hor_im.pop(i)
+
+ return angles_and_images
+
+
+def _getPitchIndex(pitch, angles_and_images):
+
+ for i in range(len(angles_and_images)):
+ if angles_and_images[i][0] == pitch:
+ return i
+ print('Warning: No pitch {} in angles_and_images'.format(pitch))
+ return None
+
+
+def _groupPitchesNew(angles_and_images, to_combine):
+ '''
+ Rotatory encoders have some uncertainty so that the pitch can "flip"
+ to the next value if encoder's position in
+ '''
+ grouped = []
+
+ for pitches in to_combine:
+
+ combinated = []
+ for pitch in pitches:
+ index = _getPitchIndex(pitch, angles_and_images)
+
+ combinated.extend(angles_and_images.pop(index)[1])
+
+ grouped.append([np.mean(pitches), combinated])
+
+
+ angles_and_images.extend(grouped)
+
+ return angles_and_images
+
+
+def _makeSameLength(lista, listb):
+ if len(lista) > len(listb):
+ lista = lista[0:len(listb)]
+ elif len(lista) < len(listb):
+ listb = listb[0:len(lista)]
+ return lista, listb
+
+
+def _pitchesToBeCombined(angles_and_images, angles):
+ '''
+ Assuming gonio scanning was done keeping pitch constant while
+ varying horizontal angle, it's better to group line scans together
+ because there may be slight drift in the pitch angle.
+ '''
+
+ pitches = [[]]
+ scan_direction = -10
+
+ anglefied_angles_and_images = []
+ for pitch, hor_im in angles_and_images:
+ for horizontal, fn in hor_im:
+ anglefied_angles_and_images.append([horizontal, pitch])
+
+ # Determine pitches that should be combined
+ for i in range(1, len(angles)-1):
+ if angles[i] in anglefied_angles_and_images:
+
+ direction = np.sign( angles[i][0] - angles[i-1][0] )
+ future_direction = np.sign(angles[i+1][0] - angles[i][0])
+
+ if direction != scan_direction and not future_direction == scan_direction:
+ pitches.append([])
+ scan_direction = direction
+
+ if direction == scan_direction or (scan_direction == 0 and future_direction == scan_direction):
+ if not angles[i][1] in pitches[-1]:
+ pitches[-1].append(angles[i][1])
+
+
+ pitches = [p for p in pitches if len(p)>=2 and len(p)<5]
+
+
+ # A pitch can appear more than one time to be combined. This seems
+ # usually happen in adjacent pitch groupings.
+ # Here, combine [a,b] [b,c] -> [a,b,c]
+ combine = []
+ for i in range(len(pitches)-1):
+ for j, pitch in enumerate(pitches[i]):
+ if pitch in pitches[i+1]:
+ combine.append([i, j])
+
+ for i, j in sorted(combine, reverse=True):
+ pitches[i].pop(j)
+ pitches[i] += pitches[i+1]
+ pitches.pop(i+1)
+ # -----------------------------------------------------
+
+ print("Pitches to be combined")
+ for p in pitches:
+ print(p)
+
+ return pitches
+
+
+def _pitchGroupedHorizontalsAndImages(image_fns, angles, arl_fly=False):
+ '''
+ Returns horizontal angles grouped by pitch (as groupHorizontals)
+ but also links image fn with each horizontal angle.
+
+ Note: image_fns and angles must have one to one correspondence.
+
+ IDEAL STRUCTURE TO WORK WITH
+
+ grouped = [pit1, [[hor1, fn1], ...] ...]
+ '''
+
+ grouped = []
+ pitches_in_grouped = []
+
+ for fn, (horizontal, pitch) in zip(image_fns, angles):
+
+ if not pitch in pitches_in_grouped:
+ pitches_in_grouped.append(pitch)
+ grouped.append([pitch, []])
+
+ i = pitches_in_grouped.index(pitch)
+ grouped[i][1].append([horizontal, fn])
+
+ # For each pitch angle there must be more than 10 images
+ # or the whole pitch is removed
+ if not arl_fly:
+ grouped = [x for x in grouped if len(x[1]) > 10]
+ else:
+ print('ARL fly, not capping of imaging row.')
+
+ return grouped
+
diff --git a/gonio-analysis/gonioanalysis/drosox/plotting.py b/gonio-analysis/gonioanalysis/drosox/plotting.py
new file mode 100644
index 0000000..f68b7dd
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosox/plotting.py
@@ -0,0 +1,423 @@
+'''
+Functions to create plots using matplotlib out of
+XAnalyser objects.
+'''
+
+import os
+import json
+import warnings
+
+import tifffile
+import matplotlib.pyplot as plt
+import matplotlib.patches
+
+from numpy import sign, mean
+import numpy as np
+import scipy.interpolate
+
+from gonioanalysis.directories import PROCESSING_TEMPDIR, ANALYSES_SAVEDIR
+
+
+def plot_1d_overlap(xanalysers):
+ '''
+ Plot binocular overlap so that the x-axis is the pitch angle (vertical angle)
+ and the y-axis is the width of binocular overlap.
+
+ xanalysers List of XAnalyser objects
+
+ Returns the created figure.
+ '''
+ X = []
+ Y = []
+ flies = []
+
+ figure = plt.figure()
+
+ for xanalyser in xanalysers:
+
+ X.append([])
+ Y.append([])
+
+ for marking in sorted(xanalyser.get_overlaps(), key=lambda x: x['pitch']):
+ Y[-1].append(abs(marking['horizontal_left'] - marking['horizontal_right']))
+ X[-1].append(marking['pitch'])
+
+ flies.append(xanalyser.fly)
+
+
+ # Interpolate the data; Required for the mean traces
+ # ----------------------------------------------------
+ intp_step = 1
+
+ XXs_span = np.arange(int(np.min(np.min(X))/intp_step)*intp_step, int(np.max(np.max(X))/intp_step)*intp_step, intp_step)
+
+ XX = []
+ YY = []
+
+ for fly, x, y in zip(flies, X,Y):
+ xx = np.arange(int(np.min(x)/intp_step)*intp_step, int(np.max(x)/intp_step)*intp_step, intp_step)
+ yy = np.interp(xx, x, y)
+ plt.scatter(xx, yy, s=5)
+ XX.append(xx)
+ YY.append(yy)
+
+
+ # Mean trace
+ # ----------------
+ mean_YY = []
+
+ for x in XXs_span:
+ yys_to_average = []
+
+ for yy, xx in zip(YY, XX):
+ try:
+ index = list(xx).index(x)
+ except:
+ continue
+
+ yys_to_average.append(yy[index])
+
+ if yys_to_average:
+ mean_YY.append(np.mean(yys_to_average))
+ else:
+ mean_YY.append(0)
+
+
+ plt.plot(XXs_span, mean_YY, linewidth=2)
+ plt.xlabel('Vertical angle (degrees)')
+ plt.ylabel('Binocular overlap (degrees)')
+
+ return figure
+
+
+def plot_matrix_overlap(xanalysers):
+ '''
+ Plot the binocular overlap in a kind of "matrix representation" by discreting
+ the continues data further, and plotting colored squared.
+ '''
+
+ def _plotMatrix(matrix, newfig=False, subplot=111):
+ '''
+ Temporarily hidden here. Needs commeting.
+ '''
+ matrix_height, matrix_width = matrix.shape
+
+ if newfig == True:
+ plt.figure()
+ plt.subplot(subplot)
+
+ plt.imshow(matrix, cmap='coolwarm', interpolation='none',
+ extent=(hor_range[0], hor_range[1], ver_range[1], ver_range[0]),
+ aspect='auto')
+
+ ax = plt.gca();
+ ax.set_xticks(np.arange(hor_range[0]+hor_step, hor_range[1]+hor_step, hor_step), minor=True)
+ ax.set_yticks(np.arange(ver_range[0]+ver_step, ver_range[1]+ver_step, ver_step), minor=True)
+ ax.grid(which='minor', color='black', linestyle='-', linewidth=1)
+
+
+ hor_step = 1
+ ver_step = 4
+ hor_range = (-40, 40)
+ ver_range = (-90, 90)
+
+ matrices = []
+
+ matrix_width = int((hor_range[1]-hor_range[0])/hor_step)
+ matrix_height = int((ver_range[1]-ver_range[0])/ver_step)
+
+ for xanalyser in xanalysers:
+ fly = xanalyser.fly
+ markings = xanalyser.get_overlaps()
+
+ X = []
+ Y = []
+ midpoints = []
+
+ # FIXME or not?
+ # The following code for the matrix overlap plot goes quite verbose.
+ # Maybe it could be made mode readable? At least commenting whats
+ # going on in each section. Seems to work never the less.
+
+ for marking in markings:
+
+ mid = marking['horizontal_middle']
+ marking['horizontal_left'] - mid
+
+ row = []
+
+ marking['horizontals'].sort()
+
+ if marking['horizontal_right'] - marking['horizontal_left'] < hor_step:
+ row = [0]*matrix_width
+ row[int(matrix_width/2)] = 2
+
+ else:
+ for angle in range(hor_range[0], hor_range[1], hor_step):
+ if angle < marking['horizontal_left']:
+ row.append(0)
+ elif marking['horizontal_left'] <= angle <= marking['horizontal_right']:
+ row.append(1)
+ elif marking['horizontal_right'] < angle:
+ row.append(0)
+
+ midpoints.append( (marking['horizontal_middle']) / (int(hor_range[1]-hor_range[0])/2) )
+
+ if len(row) != matrix_width:
+ print(row)
+ print(marking['horizontal_left'])
+ print(marking['horizontal_right'])
+ raise UserWarning('Row length {} but matrix width {}'.format(len(row), matrix_width))
+
+
+ X.append(row)
+ Y.append(marking['pitch'])
+
+ matrix = np.zeros( (int((ver_range[1]-ver_range[0])/ver_step), int((hor_range[1]-hor_range[0])/hor_step)) )
+ matrix_i_midpoint = int(matrix.shape[0] / 2)
+ for j, pitch in enumerate(range(*ver_range, ver_step)):
+
+ indices = [y for y in Y if pitch-ver_step/2 <= y <= pitch+ver_step/2]
+ indices = [Y.index(y) for y in indices]
+
+ for index in indices:
+ i_midpoint = int((midpoints[index])*int(matrix_width/2))
+ shift = -1*(i_midpoint)
+ if shift >= 0:
+ matrix[j][shift:] += np.asarray(X[index])[0:matrix_width-shift]
+ elif shift < 0:
+ matrix[j][0:matrix_width+shift] += np.asarray(X[index])[-shift:]
+
+ matrix = np.round(matrix)
+ matrix = np.clip(matrix, 0, 1)
+ matrices.append(matrix)
+
+ avg_matrix = matrices[0] / len(matrices)
+ for i in range(1, len(matrices)):
+ avg_matrix += matrices[i] / len(matrices)
+
+
+ matrix = np.round(avg_matrix)
+
+ _plotMatrix(avg_matrix, newfig=True)
+
+ for j in range(0, avg_matrix.shape[0]):
+ row_max = np.max(avg_matrix[j])
+ if row_max > np.min(avg_matrix[j]):
+ avg_matrix[j] /= row_max
+
+ figure = plt.figure()
+
+ #FIXME
+ '''
+ for i, matrix in enumerate(matrices):
+ for j in range(0, matrix.shape[0]):
+ if not np.any(matrix[j]):
+ matrix[j] += 0.5
+
+ _plotMatrix(matrix, subplot=int('{}{}{}'.format(3,round(len(matrices)/3),i+1)))
+ '''
+ _plotMatrix(avg_matrix, newfig=True)
+
+ # Phiuw, we're out.
+ return figure
+
+
+def plot_experiment_illustration(xanalyser):
+ '''
+ Plot a video of hor the fly was rotated while simultaneously reconstructing
+ the matrix plot.
+ '''
+
+ dpi = 300
+
+ savepath = os.path.join(ANALYSES_SAVEDIR, 'binocular_overlap_illustration', xanalyser.fly)
+ os.makedirs(savepath, exist_ok=True)
+
+ hor_step = 1
+ ver_step = 4
+ hor_range = (-40, 40)
+ ver_range = (-90, 90)
+
+ fig = plt.figure(figsize=(8, 4), dpi=dpi)
+ ax = fig.add_subplot(121)
+ #ax.set_axis_off()
+ ax.set_xlim(*hor_range)
+ ax.set_ylim(*ver_range)
+
+ imax = fig.add_subplot(122)
+ imax.set_axis_off()
+
+ markings = sorted(xanalyser.get_overlaps(), key=lambda x: x['pitch'])
+
+ # Variables to keep track of the steps
+ visited_pitches = []
+ i_image = 0
+ direction = True
+
+ antenna_level = xanalyser.get_antenna_level()
+
+ for pitch, horizontals_images in xanalyser.get_data():
+
+
+ visited_horizontals = []
+
+ if visited_pitches and abs(pitch - visited_pitches[-1]) < ver_step:
+ continue
+
+ print('Pitch {}'.format(pitch))
+
+ # Find the marking of this pitch
+ for marking in markings:
+ if marking['pitch'] == pitch - antenna_level:
+ break
+
+ hm = marking['horizontal_middle']
+ hl = marking['horizontal_left']
+ hr = marking['horizontal_right']
+
+ if visited_pitches:
+ if pitch > visited_pitches[-1]:
+ y = visited_pitches[-1] + ver_step/2 - 0.1
+ h = (pitch + ver_step/2) - y
+ else:
+ print('naan')
+ y2 = visited_pitches[-1] - ver_step/2
+ y1 = pitch - ver_step/2
+ y = y1
+ h = abs(y2 - y1)
+ else:
+ y = pitch - ver_step/2
+ h = ver_step
+
+ if direction:
+ horizontals_images.reverse()
+ direction = False
+ else:
+ direction = True
+
+ for horizontal, image_fn in horizontals_images:
+
+ if visited_horizontals and abs(horizontal - visited_horizontals[-1]) < hor_step:
+ continue
+
+ if not hor_range[0] <= horizontal <= hor_range[1]:
+ continue
+
+ # Decide color of drawing
+ if hl <= horizontal <= hr:
+ color = "purple"
+ elif horizontal < hl:
+ color = "red"
+ elif horizontal > hr:
+ color = 'blue'
+
+ if visited_horizontals:
+ if horizontal > visited_horizontals[-1]:
+ x = visited_horizontals[-1] + hor_step/2
+ w = (horizontal + hor_step/2) - x
+ else:
+ x2 = visited_horizontals[-1] - hor_step/2
+ x1 = horizontal - hor_step/2
+ x = x1
+ w = x2 - x1
+ else:
+ x = horizontal - hor_step/2
+ w = hor_step
+
+
+ rect = matplotlib.patches.Rectangle((-(x+w-hm), -(y+h-antenna_level)), w, h, color=color)
+ ax.add_patch(rect)
+
+ image = tifffile.imread(image_fn)
+ image = np.clip(image, 0, np.percentile(image, 95))
+ image = np.rot90(image) / np.max(image)
+
+ try:
+ imshow_obj.set_data(image)
+ except UnboundLocalError:
+ imshow_obj = imax.imshow(image, cmap='gray')
+
+
+ fig.savefig(os.path.join(savepath, 'image_{:08d}'.format(i_image)), dpi=dpi)
+ i_image += 1
+
+
+
+ visited_horizontals.append(horizontal)
+
+ visited_pitches.append(pitch)
+
+
+
+
+
+# FIXME or remove?
+'''
+def plot2DOverlap(xanalysers):
+
+ Xl = []
+ Yl = []
+ Xr = []
+ Yr = []
+ Xm = []
+ Ym = []
+
+
+
+ for marking in self.overlap_markings:
+
+ mid = marking['horizontal_middle']
+ #mid = 0
+ Xl.append(marking['horizontal_left']-mid)
+ Xr.append(marking['horizontal_right']-mid)
+ Xm.append(marking['horizontal_middle']-mid)
+ Yl.append(marking['pitch'])
+ Yr.append(marking['pitch'])
+ Ym.append(marking['pitch'])
+
+ plt.scatter(Xl, Yl, color='blue')
+ plt.scatter(Xr, Yr, color='red')
+ plt.scatter(Xm, Ym, color='yellow')
+
+
+ plt.show()
+
+
+def plotFancy2DOverlap():
+
+ X,Y,C = [[],[],[]]
+ for marking in self.overlap_markings:
+
+ mid = marking['horizontal_middle']
+
+ for i in range(len(marking['horizontals'])):
+
+ pitch = marking['pitch']
+ horizontal = marking['horizontals'][i]
+ L = min(marking['horizontal_left'], marking['horizontal_right'])
+ R = max(marking['horizontal_left'], marking['horizontal_right'])
+
+ if L < horizontal < R:
+ C.append(2)
+ else:
+ C.append(1)
+
+ X.append(horizontal-mid)
+ Y.append(pitch)
+
+ f_int = scipy.interpolate.interp2d(X, Y, C, fill_value=1)
+
+ X = np.linspace(np.min(X), np.max(X), 100)
+ Y = np.linspace(np.min(Y), np.max(Y), 100)
+ C = f_int(X, Y)
+
+ X, Y = np.meshgrid(X, Y)
+
+ C = np.around(C)
+
+ plt.pcolormesh(X, Y, C)
+
+ plt.show()
+'''
+
diff --git a/gonio-analysis/gonioanalysis/drosox/terminal.py b/gonio-analysis/gonioanalysis/drosox/terminal.py
new file mode 100644
index 0000000..885a007
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/drosox/terminal.py
@@ -0,0 +1,90 @@
+import os
+import argparse
+
+import matplotlib.pyplot as plt
+
+from gonioanalysis.droso import DrosoSelect, simple_select
+from gonioanalysis.drosox.analysing import XAnalyser
+from gonioanalysis.drosox.plotting import (
+ plot_1d_overlap,
+ plot_matrix_overlap,
+ plot_experiment_illustration
+)
+
+
+def main():
+
+ parser = argparse.ArgumentParser(description='DrosoX: Analyse DPP static imaging data')
+ parser.add_argument('--datadir',
+ help='Path to the specimens data directory or ASK')
+
+ parser.add_argument('--specimens',
+ help='Space separated list of specimens. Ased if not specified.',
+ nargs='*')
+
+
+ parser.add_argument('--measure-overlap',
+ help='(Re)measure binocular overlap using user-assisted binary search',
+ action='store_true')
+
+
+ # Overlap data
+ parser.add_argument('--print-overlap',
+ help='Print overlap data as text',
+ action='store_true')
+
+ # Plotting arguments
+ parser.add_argument('--plot-overlap',
+ help='Plot 1D binocular overlap',
+ action='store_true')
+ parser.add_argument('--plot-matrix-overlap',
+ help='Plot binocular overlap as discrete rectangulars',
+ action='store_true')
+ parser.add_argument('--plot-experiment-illustration',
+ help='Plot illustrative video how the experiments were done',
+ action='store_true')
+
+
+
+ args = parser.parse_args()
+
+ if args.datadir.lower() == 'ask':
+ datadir = input("Input data directory >> ")
+ else:
+ datadir = args.datadir
+
+
+ if not args.specimens:
+ specimens = DrosoSelect(datadir=datadir).ask_user()
+ specimens = [os.path.basename(specimen) for specimen in specimens]
+ else:
+ specimens = args.specimens
+
+
+ xanalysers = [XAnalyser(datadir, specimen) for specimen in specimens]
+
+ # Perform the tasks that have to be performed per xanalyser
+ for xanalyser in xanalysers:
+ print(xanalyser.fly)
+ if args.print_overlap:
+ xanalyser.print_overlap()
+
+ if args.measure_overlap:
+ xanalyser.measure_overlap()
+
+ if args.plot_experiment_illustration:
+ plot_experiment_illustration(xanalyser)
+
+
+ if args.plot_overlap:
+ plot_1d_overlap(xanalysers)
+ elif args.plot_matrix_overlap:
+ plot_matrix_overlap(xanalysers)
+
+ if not args.measure_overlap:
+ plt.show()
+
+
+if __name__ == "__main__":
+ main()
+
diff --git a/gonio-analysis/gonioanalysis/image_tools.py b/gonio-analysis/gonioanalysis/image_tools.py
new file mode 100644
index 0000000..ceabf5a
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/image_tools.py
@@ -0,0 +1,120 @@
+
+import numpy as np
+import tifffile
+import matplotlib.pyplot as plt
+
+
+def open_adjusted(fn):
+ '''
+ Images can have high glares. This function tries to
+ clip the image (adjust contrast) so that the DPP
+ is clearly visible.
+
+ Usually the glare is at one point so this uses
+ the fact.
+ '''
+
+ image = tifffile.imread(fn)
+
+ glare_max = np.max(image)
+ median = np.median(image)
+
+ image = np.clip(image, 0, int(np.mean([glare_max, median])/6) )
+
+ return image
+
+
+
+class ImageShower:
+ '''
+ From cascade training, a generalized image shower
+ '''
+
+ def __init__(self, fig, ax):
+
+ self.fig = fig
+ self.ax = ax
+
+ self.fns = None
+
+ self.buttonActions = []
+ self.image_brightness = 0
+ self.image_maxval = 1
+ self.cid = self.fig.canvas.mpl_connect('key_press_event', self.callbackButtonPressed)
+
+ self.pupils = -1
+
+ self.title = ''
+
+ self.fig.show()
+
+
+ def callbackButtonPressed(self, event):
+ '''
+ A callback function connecting to matplotlib's event manager.
+ '''
+
+ # Navigating between the images
+ if event.key == 'z':
+ self.image_maxval -= 0.3
+ self.updateImage(strong=True)
+
+ elif event.key == 'x':
+ self.image_maxval += 0.3
+ self.updateImage(strong=True)
+
+ elif event.key == 'a':
+ self.image_brightness += 50
+ self.updateImage(strong=True)
+ elif event.key == 'c':
+ self.image_brightness += -50
+ self.updateImage(strong=True)
+
+ for button, action in self.buttonActions:
+ if event.key == button:
+ print(event.key)
+ action()
+
+
+ def setImages(self, image_fns):
+ '''
+ Set the images that ImageShower shows.
+ '''
+ self.fns = image_fns
+ self.cache = {}
+
+ def setImage(self, i):
+ fn = self.fns[i]
+ try:
+ self.image = self.cache[fn]
+ except KeyError:
+ self.image = tifffile.imread(fn)
+
+ self.updateImage()
+
+ def cacheImage(self, i):
+ '''
+ Loads image to cache for faster showup.
+ '''
+ fn = self.fns[i]
+ # Uncomment to take cahcing in use
+ #self.cache[fn] = tifffile.imread(fn)
+
+ def setTitle(self, title):
+ self.title = title
+
+ def updateImage(self, strong=False):
+ capvals = (0, np.mean(self.image) *self.image_maxval)
+ self.ax.clear()
+ self.ax.set_title(self.title)
+ self.ax.imshow(self.image-self.image_brightness,cmap='gist_gray', interpolation='nearest', vmin=capvals[0], vmax=capvals[1])
+
+ if not strong:
+ self.fig.canvas.draw()
+ else:
+ self.fig.show()
+
+ def close(self):
+ plt.close(self.fig)
+
+
diff --git a/gonio-analysis/gonioanalysis/images/dpp.tif b/gonio-analysis/gonioanalysis/images/dpp.tif
new file mode 100644
index 0000000..d752f97
Binary files /dev/null and b/gonio-analysis/gonioanalysis/images/dpp.tif differ
diff --git a/gonio-analysis/gonioanalysis/images/droso_roll.png b/gonio-analysis/gonioanalysis/images/droso_roll.png
new file mode 100644
index 0000000..0955bdd
Binary files /dev/null and b/gonio-analysis/gonioanalysis/images/droso_roll.png differ
diff --git a/gonio-analysis/gonioanalysis/images/from_mikko.png b/gonio-analysis/gonioanalysis/images/from_mikko.png
new file mode 100644
index 0000000..a733bc2
Binary files /dev/null and b/gonio-analysis/gonioanalysis/images/from_mikko.png differ
diff --git a/gonio-analysis/gonioanalysis/images/from_mikko_annotated.png b/gonio-analysis/gonioanalysis/images/from_mikko_annotated.png
new file mode 100644
index 0000000..8616b4e
Binary files /dev/null and b/gonio-analysis/gonioanalysis/images/from_mikko_annotated.png differ
diff --git a/gonio-analysis/gonioanalysis/images/mikkos_alternative_fly.png b/gonio-analysis/gonioanalysis/images/mikkos_alternative_fly.png
new file mode 100644
index 0000000..793d54a
Binary files /dev/null and b/gonio-analysis/gonioanalysis/images/mikkos_alternative_fly.png differ
diff --git a/gonio-analysis/gonioanalysis/images/rotation_roll.png b/gonio-analysis/gonioanalysis/images/rotation_roll.png
new file mode 100644
index 0000000..ed4b647
Binary files /dev/null and b/gonio-analysis/gonioanalysis/images/rotation_roll.png differ
diff --git a/gonio-analysis/gonioanalysis/images/rotation_yaw.png b/gonio-analysis/gonioanalysis/images/rotation_yaw.png
new file mode 100644
index 0000000..54cc489
Binary files /dev/null and b/gonio-analysis/gonioanalysis/images/rotation_yaw.png differ
diff --git a/gonio-analysis/gonioanalysis/images/side_aligning_pupil_antenna_whiteBG.jpg b/gonio-analysis/gonioanalysis/images/side_aligning_pupil_antenna_whiteBG.jpg
new file mode 100644
index 0000000..10aff63
Binary files /dev/null and b/gonio-analysis/gonioanalysis/images/side_aligning_pupil_antenna_whiteBG.jpg differ
diff --git a/gonio-analysis/gonioanalysis/rotary_encoders.py b/gonio-analysis/gonioanalysis/rotary_encoders.py
new file mode 100644
index 0000000..64b774e
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/rotary_encoders.py
@@ -0,0 +1,35 @@
+'''
+Transforming rotary encoder step values into degrees and vice versa.
+'''
+
+DEFAULT_STEPS_PER_REVOLUTION = 1024
+
+
+def to_degrees(angle_pairs):
+ '''
+ Transform angle pairs (paired the steps of rotary encoder)
+ to corresponding degree angle values in place.
+
+ angle_pairs List of (horizontal, vertical)
+ '''
+
+ for i in range(len(angle_pairs)):
+ angle_pairs[i][0] *= (360/1024)
+ angle_pairs[i][1] *= (360/1024)
+
+
+def step2degree(step, steps_per_revolution=DEFAULT_STEPS_PER_REVOLUTION):
+ '''
+ Transform a rotary encoder step count (such 54 steps) into corresponding
+ rotation in degrees (54 * steps_per_revolution).
+ '''
+ return step * (360/steps_per_revolution)
+
+
+def degree2step(angle, steps_per_revolution=DEFAULT_STEPS_PER_REVOLUTION):
+ '''
+ Transform a rotation angle from degrees to corresponging rotary encoder steps.
+ '''
+ return angle * (steps_per_revolution/360)
+
+
diff --git a/gonio-analysis/gonioanalysis/settings.py b/gonio-analysis/gonioanalysis/settings.py
new file mode 100644
index 0000000..a04ca5e
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/settings.py
@@ -0,0 +1,51 @@
+'''
+Whole gonioanalysis wide settings here.
+
+Attributes
+-----------
+
+DEFAULT_FILENAME
+'''
+
+import os
+import json
+
+from gonioanalysis.directories import GONIODIR
+
+
+DEFAULT_SAVENAME = "gonioanalysis-settings.json"
+
+
+def _load(fn):
+ fullfn = os.path.join(GONIODIR, fn)
+ if os.path.isfile(fullfn):
+ with open(fullfn, 'r') as fp:
+ return json.load(fp)
+ else:
+ return {}
+
+def _save(data, fn):
+ with open(os.path.join(GONIODIR, fn), 'w') as fp:
+ json.dump(data, fp)
+
+
+def set(key, value, fn=DEFAULT_SAVENAME):
+ '''
+ Set and save a setting.
+ '''
+ settings = _load(fn)
+
+ settings[key] = value
+
+ _save(settings, fn)
+
+
+def get(key, default=None, fn=DEFAULT_SAVENAME):
+ '''
+ Get a setting.
+ Specidy default for default value like in Python's standard get.
+ '''
+ settings = _load(fn)
+
+ return settings.get(key, default)
+
diff --git a/gonio-analysis/gonioanalysis/tkgui/__init__.py b/gonio-analysis/gonioanalysis/tkgui/__init__.py
new file mode 100644
index 0000000..c9da0f8
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/tkgui/__init__.py
@@ -0,0 +1,2 @@
+
+from .examine import main as run
diff --git a/gonio-analysis/gonioanalysis/tkgui/__main__.py b/gonio-analysis/gonioanalysis/tkgui/__main__.py
new file mode 100644
index 0000000..b2e180e
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/tkgui/__main__.py
@@ -0,0 +1,4 @@
+from .examine import main
+
+if __name__ == "__main__":
+ main()
diff --git a/gonio-analysis/gonioanalysis/tkgui/core.py b/gonio-analysis/gonioanalysis/tkgui/core.py
new file mode 100644
index 0000000..23b7f7e
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/tkgui/core.py
@@ -0,0 +1,254 @@
+
+import os
+import subprocess
+import sys
+import platform
+
+from gonioanalysis.droso import SpecimenGroups
+from gonioanalysis.directories import CODE_ROOTDIR
+from gonioanalysis.drosom.analysing import MAnalyser
+from gonioanalysis.drosom.orientation_analysis import OAnalyser
+from gonioanalysis.drosom.transmittance_analysis import TAnalyser
+from gonioanalysis.directories import ANALYSES_SAVEDIR
+
+
+class Core:
+ '''
+ Tkinter independent functions, reusable for other GUI implementations.
+
+ Attributes
+ ----------
+ data_directory : list of strings
+ Current data directories
+ current_specimen : string
+ Name of the current specimen
+ analyser : object
+ MAnalyser (or OAnalsyer) object of the current specimen
+ selected_recording : string
+ Selected recording name (image_folder)
+ analyser_class : class
+ Class of the new analysers to create (MAnalyser or OAnalyser)
+ analyser_classes: list of classes
+ List of available analyser classes for reference
+ active_analysis : string or None
+ Name of the active analysis
+ '''
+
+ def __init__(self):
+
+ self.data_directory = []
+ self.current_specimen = None
+ self.analyser = None
+ self.selected_recording = None
+
+ self.analyser_class = MAnalyser
+ self.analyser_classes = [MAnalyser, OAnalyser, TAnalyser]
+
+ self.active_analysis = None
+
+ self._folders = {}
+
+ self.groups = SpecimenGroups()
+
+
+ def set_data_directory(self, data_directory):
+ '''
+ Update Core's knowledge about the currently selected data_directory.
+
+ Arguments
+ ---------
+ data_directory : list of strings
+ List of paths to the data
+ '''
+ self.data_directory = data_directory
+
+ self._folders = {}
+ for data_directory in self.data_directory:
+ self._folders[data_directory] = os.listdir(data_directory)
+
+
+ def set_current_specimen(self, specimen_name):
+ '''
+ Update Core's knowledge about the currently selected specimen.
+ '''
+ self.current_specimen = specimen_name
+ self.analyser = self.get_manalyser(specimen_name)
+
+
+ def set_selected_recording(self, selected_recording):
+ self.selected_recording = selected_recording
+
+
+ def set_analyser_class(self, class_name):
+ index = [i for i, ac in enumerate(self.analyser_classes) if ac.__name__ == class_name]
+ self.analyser_class = self.analyser_classes[index[0]]
+
+ if self.data_directory:
+ self.update_gui(changed_specimens=True)
+
+
+ def list_specimens(self, with_rois=None, with_movements=None, with_correction=None):
+ '''
+ List specimens in the data directory. May contain bad folders also (no check for contents)
+
+ With the following keyword arguments one select only the specimens fulfilling the conditions
+ by setting the keyword argument either to True (has to fulfill condition), False (negative)
+ or to None (the condition is not considered)
+
+ with_rois Specimens with ROIs selected
+ with_movements Specimens with movements measured
+ with_correction Specimens with antenna_level (zero level) correction
+
+ For example, if you want the specimens with movements but without antenna_level corrections
+ and you won't care about ROIs, set
+ with_rois=None, with_movements=True, with_correction=False
+
+ '''
+ specimens = []
+ for data_directory in self.data_directory:
+ specimens.extend( [fn for fn in os.listdir(data_directory) if os.path.isdir(os.path.join(data_directory, fn))] )
+
+ if with_rois is not None:
+ specimens = [specimen for specimen in specimens if self.get_manalyser(specimen, no_data_load=True).are_rois_selected() == with_rois]
+
+ if with_movements is not None:
+ specimens = [specimen for specimen in specimens if self.get_manalyser(specimen, no_data_load=True).is_measured() == with_movements]
+
+ if with_correction is not None:
+ specimens = [specimen for specimen in specimens if self.get_manalyser(specimen, no_data_load=True).get_antenna_level_correction() is not False]
+
+ return sorted(specimens)
+
+
+ def get_specimen_fullpath(self, specimen_name=None):
+ '''
+ Returns the full path of a specimen (datadir + specimen_patch)
+
+ Arguments
+ ---------
+ specimen_namse : string or None
+ If None use self.current_specimen
+ '''
+ if specimen_name is None:
+ specimen_name = self.current_specimen
+
+ for directory in self.data_directory:
+ if specimen_name in self._folders[directory]:
+ return os.path.join(directory, specimen_name)
+
+ raise ValueError("no specimen with name {}".format(specimen_name))
+
+
+ def _configure_analyser(self, analyser):
+ if self.active_analysis:
+ analyser.active_analysis = self.active_analysis
+ return analyser
+
+
+ def get_manalyser(self, specimen_name, **kwargs):
+ '''
+ Gets manalyser for the specimen specified by the given name.
+ '''
+ for directory in self.data_directory:
+ if specimen_name in self._folders[directory]:
+ break
+
+ analyser = self.analyser_class(directory, specimen_name, **kwargs)
+
+ return self._configure_analyser(analyser)
+
+
+ def get_manalysers(self, specimen_names, **kwargs):
+ '''
+ Like get_manalyser but returns a list of analyser objects and also
+ checks for specimen groups if a specimen cannot be found.
+ '''
+ analysers = []
+ for name in specimen_names:
+ try:
+ ans = [self.get_manalyser(name, **kwargs)]
+ except FileNotFoundError:
+ ans = [self.get_manalyser(n, **kwargs) for n in self.groups.groups.get(name, [])]
+
+ # Try again and load
+ if ans is []:
+ self.groups.load_groups()
+ ans = [self.get_manalyser(n, **kwargs) for n in self.groups.groups.get(name, [])]
+
+ if ans is []:
+ raise FileNotFoundError('Cannot find specimen {}'.format(name))
+
+ for an in ans:
+ analysers.append( self._configure_analyser(an) )
+
+ return analysers
+
+
+ def adm_subprocess(self, specimens, terminal_args, open_terminal=False):
+ '''
+ Invokes drosom/terminal.py
+
+ Arguments
+ ---------
+ specimens : list of string
+ List of specimen names or 'current'
+ terminal_args : string
+ Agruments passed to the plotter
+ open_terminal : bool
+ If true open in a cmd window (on Windows) or lxterm (on Linux)
+ '''
+
+ # 1) Find python executable and wrap the filename by quation marks if spaces present
+ python = sys.executable
+ if ' ' in python:
+ python = '"' + python + '"'
+
+
+ # 2) Find the full path to the adm Python file in the gonio root
+ pyfile = os.path.join(CODE_ROOTDIR, 'drosom/terminal.py')
+
+ # Check for spaces in the filename. If there are spaces in the filename,
+ # we have to encapsulate the filename by quation marks
+ if ' ' in pyfile:
+ pyfile = '"' + pyfile + '"'
+
+ # 3) Get the specimen directoires (full paths separated by space)
+ if specimens == 'current':
+ specimen_names = self.analyser.folder
+ else:
+ specimen_names = ':'.join(specimens)
+ if not ':' in specimen_names:
+ specimen_names += ':'
+
+ if self.active_analysis not in ['default', '', None]:
+ terminal_args += ' --active-analysis '+ self.active_analysis
+
+ arguments = '-D "{}" -S "{}" {}'.format(' '.join(self.data_directory), specimen_names, terminal_args)
+
+ if self.analyser_class is MAnalyser:
+ pass
+ elif self.analyser_class is OAnalyser:
+ arguments = '--type orientation ' + arguments
+ elif self.analyser_class is TAnalyser:
+ arguments = '--type transmittance ' + arguments
+ else:
+ raise NotImplementedError
+
+ command = '{} {} {} &'.format(python, pyfile, arguments)
+
+ if open_terminal:
+ if platform.system() == 'Linux':
+ command = 'lxterm -e ' + command
+ elif platform.system() == 'Windows':
+ command = 'start /wait ' + command
+ else:
+ raise OSError('Operating system not supported by gonio?')
+
+ print(command)
+
+ subprocess.run(command, shell=True)
+
+
+ def update_gui(self, changed_specimens=False):
+ raise ValueError("GUI should overload update_gui method in Core core.py")
+
diff --git a/gonio-analysis/gonioanalysis/tkgui/examine.py b/gonio-analysis/gonioanalysis/tkgui/examine.py
new file mode 100644
index 0000000..85b4197
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/tkgui/examine.py
@@ -0,0 +1,649 @@
+'''
+
+TODO
+
+Features
++ video of the moving ROIs
++ no need for manual add to PythonPath on Windows
+- window to strecth to full screen
+- multiselect ROIs (if many separate recordings at the same site)
+
+Polishing
+- specimens control title, retain name specimen
+- image folders control title as specimens
+- after movement measure, update MAnalyser
+- displacement plot Y axis label
+- highlight specimens/image_folders:
+ red: no rois / movements
+ yellow: rois but no movements
+- x-axis from frames to time?
+
+
+'''
+import os
+import sys
+import ctypes
+import itertools
+
+import numpy as np
+import tkinter as tk
+from tkinter import filedialog
+
+from tk_steroids.routines import inspect_booleans
+from tk_steroids.elements import (
+ Listbox,
+ Tabs,
+ ButtonsFrame,
+ ColorExplanation,
+ TickboxFrame
+ )
+from tk_steroids.matplotlib import CanvasPlotter
+
+from gonioanalysis import __version__
+from gonioanalysis.directories import PROCESSING_TEMPDIR, GONIODIR
+from gonioanalysis.rotary_encoders import to_degrees
+from gonioanalysis.drosom.loading import angles_from_fn
+from gonioanalysis.drosom.plotting.common import save_3d_animation
+from gonioanalysis.drosom.plotting.basics import (
+ plot_1d_magnitude,
+ plot_xy_trajectory,
+ plot_3d_vectormap,
+ )
+from gonioanalysis.drosom.analyser_commands import ANALYSER_CMDS
+from gonioanalysis.tkgui.core import Core
+from gonioanalysis.tkgui.plotting import RecordingPlotter
+from gonioanalysis.tkgui.widgets import RepetitionSelector
+
+from gonioanalysis.tkgui.menu_commands import (
+ ModifiedMenuMaker,
+ FileCommands,
+ ImageFolderCommands,
+ SpecimenCommands,
+ ManySpecimenCommands,
+ OtherCommands
+ )
+
+
+
+class ExamineMenubar(tk.Frame):
+ '''
+ Menubar class for the examine GUI.
+ '''
+
+ def __init__(self, parent):
+ tk.Frame.__init__(self, parent)
+
+ self.parent = parent
+ self.root = parent.root
+ self.core = parent.core
+ self.menubar = tk.Menu(self)
+
+ # File command and menu
+ self.file_commands = FileCommands(self.parent, self.core, 'File')
+ self.file_commands._connect(self.menubar, tearoff=0)
+
+ self.menubar.add_command(label="|")
+
+ # Imagefolder command and menu
+ self.imagefolder_commands = ImageFolderCommands(self.parent, self.core, 'Image folder')
+ self.imagefolder_commands._connect(self.menubar, tearoff=0)
+
+ # Specimen commands and menu
+ self.specimen_commands = SpecimenCommands(self.parent, self.core, 'Specimen')
+ self.specimen_commands._connect(self.menubar, tearoff=0)
+
+ # Submenu: Add terminal commands
+ self.terminal_commands = ModifiedMenuMaker(self.parent, self.core, 'Terminal interface commands')
+ for name in ANALYSER_CMDS:
+ setattr(self.terminal_commands, name, lambda name=name: self.core.adm_subprocess('current', "-A "+name) )
+ self.terminal_commands._connect(self.specimen_commands.tkmenu)
+
+ # Many specimen commands and menu
+ self.many_specimen_commands = ManySpecimenCommands(self.parent, self.core, 'Many specimens')
+ self.many_specimen_commands._connect(self.menubar, tearoff=0)
+
+ self.menubar.add_command(label="|")
+
+ # Other commands and menu
+ self.other_commands = OtherCommands(self.parent, self.core, 'Other')
+ self.other_commands._connect(self.menubar, tearoff=0)
+
+
+ self.winfo_toplevel().config(menu=self.menubar)
+
+
+
+
+class ExamineView(tk.Frame):
+ '''
+ The examine frame. Selection of
+ - data directory
+ - specimen
+ - recording
+ and plotting the intemediate result for each recording.
+
+ '''
+
+ def __init__(self, parent):
+
+ self.last_saveplotter_dir = GONIODIR
+
+ tk.Frame.__init__(self, parent)
+
+ self.core = Core()
+ self.core.update_gui = self.update_specimen
+
+ self.root = self.winfo_toplevel()
+
+ # Make canvas plotter to stretch
+ self.grid_rowconfigure(0, weight=1)
+ self.grid_columnconfigure(1, weight=3)
+ self.grid_columnconfigure(0, weight=1)
+ self.grid_columnconfigure(0, minsize=400)
+
+
+ #tk.Button(self, text='Set data directory...', command=self.set_data_directory).grid(row=0, column=0)
+
+ # Uncomment to have the menubar
+ self.menu = ExamineMenubar(self)
+
+ # LEFTSIDE frame
+ self.leftside_frame = tk.Frame(self)
+ self.leftside_frame.grid(row=0, column=0, sticky='NSWE')
+ self.leftside_frame.grid_rowconfigure(4, weight=1)
+ self.leftside_frame.grid_columnconfigure(0, weight=1)
+ self.leftside_frame.grid_columnconfigure(1, weight=1)
+
+
+
+ # The 1st buttons frame, selecting root data directory
+ self.buttons_frame_1 = ButtonsFrame(self.leftside_frame, ['Set data directory'],
+ [self.menu.file_commands.set_data_directory, self.menu.file_commands.set_data_directory])
+ self.buttons_frame_1.grid(row=0, column=0, sticky='NW', columnspan=2)
+
+
+ self.specimen_control_frame = tk.LabelFrame(self.leftside_frame, text='Specimen')
+ self.specimen_control_frame.grid(row=1, column=0, sticky='NWES', columnspan=2)
+
+
+ # The 2nd buttons frame, ROIs and movements
+ self.buttons_frame_2 = ButtonsFrame(self.specimen_control_frame,
+ ['Select ROIs', 'Measure movement'],
+ [self.menu.specimen_commands.select_ROIs, self.menu.specimen_commands.measure_movement])
+ self.buttons_frame_2.grid(row=1, column=0, sticky='NW', columnspan=2)
+ self.button_rois, self.button_measure = self.buttons_frame_2.get_buttons()
+
+ # Subframe for 2nd buttons frame
+ #self.status_frame = tk.Frame(self.leftside_frame)
+ #self.status_frame.grid(row=2)
+
+ self.status_rois = tk.Label(self.specimen_control_frame, text='ROIs selected 0/0', font=('system', 8))
+ self.status_rois.grid(row=2, column=0, sticky='W')
+
+ self.status_antenna_level = tk.Label(self.specimen_control_frame, text='Zero correcter N/A', font=('system', 8))
+ #self.status_antenna_level.grid(row=3, column=0, sticky='W')
+
+ self.status_active_analysis = tk.Label(self.specimen_control_frame, text='Active analysis: default', font=('system', 8), justify=tk.LEFT)
+ self.status_active_analysis.grid(row=4, column=0, sticky='W')
+
+ self.tickbox_analyses = TickboxFrame(self.specimen_control_frame, [], ncols=4)
+ self.tickbox_analyses.grid(row=5, column=0, sticky='W')
+
+
+ # Image folder manipulations
+ self.folder_control_frame = tk.LabelFrame(self.leftside_frame, text='Image folder')
+ self.folder_control_frame.grid(row=2, column=0, sticky='NWES', columnspan=2)
+
+ self.buttons_frame_3 = ButtonsFrame(self.folder_control_frame,
+ ['Reselect ROI', 'Remeasure'],
+ [self.menu.imagefolder_commands.select_ROIs, self.menu.imagefolder_commands.measure_movement])
+ self.buttons_frame_3.grid(row=1, column=0, sticky='NW', columnspan=2)
+ self.button_one_roi = self.buttons_frame_2.get_buttons()[0]
+
+
+ self.status_horizontal = tk.Label(self.folder_control_frame, text='Horizontal angle N/A', font=('system', 8))
+ self.status_horizontal.grid(row=2, column=0, sticky='W')
+
+ self.status_vertical = tk.Label(self.folder_control_frame, text='Vertical angle N/A', font=('system', 8))
+ self.status_vertical.grid(row=3, column=0, sticky='W')
+
+
+
+ # Selecting the specimen
+ tk.Label(self.leftside_frame, text='Specimens').grid(row=3, column=0)
+ self.specimen_box = Listbox(self.leftside_frame, ['(select directory)'], self.on_specimen_selection)
+ self.specimen_box.grid(row=4, column=0, sticky='NSEW')
+
+
+ # Selecting the recording
+ tk.Label(self.leftside_frame, text='Image folders').grid(row=3, column=1)
+ self.recording_box = Listbox(self.leftside_frame, [''], self.on_recording_selection)
+ self.recording_box.grid(row=4, column=1, sticky='NSEW')
+
+
+ # Add color explanation frame in the bottom
+ ColorExplanation(self.leftside_frame, ['white', 'green', 'yellow'],
+ ['Movements measured', 'ROIs selected', 'No ROIs']).grid(row=5, column=0, sticky='NW')
+
+
+ # RIGHTSIDE frame
+ self.rightside_frame = tk.Frame(self)
+ self.rightside_frame.grid(row=0, column=1, sticky='NWES')
+
+
+ tab_kwargs = [{}, {}, {}, {'projection': '3d'}]
+ tab_names = ['ROI', 'Displacement', 'XY', '3D']
+ canvas_constructors = [lambda parent, kwargs=kwargs: CanvasPlotter(parent, visibility_button=False, **kwargs) for kwargs in tab_kwargs]
+ self.tabs = Tabs(self.rightside_frame, tab_names, canvas_constructors,
+ on_select_callback=self.update_plot)
+
+ self.tabs.grid(row=0, column=0, sticky='NWES')
+
+
+ # Make canvas plotter to stretch
+ self.rightside_frame.grid_rowconfigure(0, weight=1)
+ self.rightside_frame.grid_columnconfigure(0, weight=1)
+
+
+ self.canvases = self.tabs.get_elements()
+
+ # Controls for displacement plot (means etc)
+ displacementplot_options, displacementplot_defaults = inspect_booleans(
+ plot_1d_magnitude, exclude_keywords=['mean_imagefolders'])
+ self.displacement_ticks = TickboxFrame(self.canvases[1], displacementplot_options,
+ defaults=displacementplot_defaults, callback=lambda:self.update_plot(1))
+ self.displacement_ticks.grid()
+
+ xyplot_options, xyplot_defaults = inspect_booleans(
+ plot_xy_trajectory)
+ self.xy_ticks = TickboxFrame(self.canvases[2], xyplot_options,
+ defaults=xyplot_defaults, callback=lambda:self.update_plot(2))
+ self.xy_ticks.grid()
+
+
+
+ # Controls for the vector plot
+ # Controls for displacement plot (means etc)
+ vectorplot_options, vectorplot_defaults = inspect_booleans(
+ plot_3d_vectormap, exclude_keywords=[])
+ self.vectorplot_ticks = TickboxFrame(self.canvases[3], vectorplot_options,
+ defaults=vectorplot_defaults, callback=lambda:self.update_plot(3))
+ self.vectorplot_ticks.grid()
+
+ tk.Button(self.canvases[3], text='Save animation', command=self.save_3d_animation).grid()
+
+
+ self.default_button_bg = self.button_rois.cget('bg')
+
+ self.plotter = RecordingPlotter(self.core)
+
+ # Add buttons for selecting single repeats from a recording
+ self.repetition_selector = RepetitionSelector(self.rightside_frame, self.plotter, self.core,
+ update_command=lambda: self.on_recording_selection('current'))
+ self.repetition_selector.grid(row=1, column=0)
+
+
+ tk.Button(self.repetition_selector, text='Copy data',
+ command=self.copy_plotter_to_clipboard).grid(row=0, column=5)
+
+ tk.Button(self.repetition_selector, text='Save view...',
+ command=self.save_plotter_view).grid(row=0, column=6)
+
+
+
+
+ def _color_specimens(self, specimens):
+ '''
+ See _color_recording for reference.
+ '''
+ colors = []
+ for specimen in specimens:
+ analyser = self.core.get_manalyser(specimen, no_data_load=True)
+ color = 'yellow'
+ if analyser.are_rois_selected():
+ color = 'green'
+ if analyser.is_measured():
+ color = 'white'
+
+ colors.append(color)
+ return colors
+
+
+ def save_3d_animation(self):
+
+ def callback():
+ self.canvases[3].update()
+
+ fig, ax = self.canvases[3].get_figax()
+ save_3d_animation(self.core.analyser, ax=ax, interframe_callback=callback)
+
+
+ def copy_to_csv(self, formatted):
+ with open(os.path.join(GONIODIR, 'clipboard.csv'), 'w') as fp:
+ fp.write(formatted.replace('\t', ','))
+
+
+ def specimen_traces_to_clipboard(self, mean=False):
+ '''
+ If mean==True, copy only the average trace.
+ Otherwise, copy all the traces of the fly.
+ '''
+
+ formatted = ''
+
+ # Always first clear clipboard; If something goes wrong, the user
+ # doesn't want to keep pasting old data thinking it's new.
+ self.root.clipboard_clear()
+
+ if self.core.selected_recording is None:
+ return None
+
+ data = []
+
+ self.root.clipboard_append(formatted)
+
+ for pos_folder in self.core.analyser.list_imagefolders():
+ all_movements = self.core.analyser.get_movements_from_folder(pos_folder)
+
+ for eye, movements in all_movements.items():
+ for repetition in range(len(movements)):
+ mag = np.sqrt(np.array(movements[repetition]['x'])**2 + np.array(movements[repetition]['y'])**2)
+ data.append(mag)
+
+ if mean:
+ data = [np.mean(data, axis=0)]
+
+ for i_frame in range(len(data[0])):
+ formatted += '\t'.join([str(data[i_repeat][i_frame]) for i_repeat in range(len(data)) ]) + '\n'
+
+ self.root.clipboard_append(formatted)
+ self.copy_to_csv(formatted)
+
+
+
+ def copy_plotter_to_clipboard(self, force_i_tab=None):
+ '''
+ Copies data currently visible on theopen plotter tab to the clipboard.
+
+ force_i_tab Copy from the specified tab index, instead of
+ the currently opened tab
+ '''
+ formatted = ''
+
+ # Always first clear clipboard; If something goes wrong, the user
+ # doesn't want to keep pasting old data thinking it's new.
+ self.root.clipboard_clear()
+
+ if self.core.selected_recording is None:
+ return None
+
+ if force_i_tab is not None:
+ i_tab = int(force_i_tab)
+ else:
+ i_tab = self.tabs.i_current
+
+ # Make sure we have the correct data in the plot by reissuing
+ # the plotting command
+ self.update_plot(i_tab)
+
+ # Select data based on where we want to copy
+ if i_tab == 0:
+ data = self.plotter.image
+ elif i_tab == 1:
+ data = self.plotter.magnitudes
+ elif i_tab == 2:
+ data = self.plotter.xys
+ data = list(itertools.chain(*data))
+ elif i_tab == 3:
+ raise NotImplementedError('Cannot yet cliboard vectormap data')
+
+ # Format the data for tkinter clipboard copy
+ for i_frame in range(len(data[0])):
+ formatted += '\t'.join([str(data[i_repeat][i_frame]) for i_repeat in range(len(data)) ]) + '\n'
+
+ self.root.clipboard_append(formatted)
+ self.copy_to_csv(formatted)
+
+
+
+ def save_plotter_view(self):
+ '''
+ Launches a save dialog for the current plotter view.
+ '''
+ fig, ax = self.canvases[self.tabs.i_current].get_figax()
+
+ dformats = fig.canvas.get_supported_filetypes()
+ formats = [(value, '*.'+key) for key, value in sorted(dformats.items())]
+
+ # Make png first
+ if 'png' in dformats.keys():
+ i = formats.index((dformats['png'], '*.png'))
+ formats.insert(0, formats.pop(i))
+
+ fn = filedialog.asksaveasfilename(title='Save current view',
+ initialdir=self.last_saveplotter_dir,
+ filetypes=formats)
+
+ if fn:
+ self.last_saveplotter_dir = os.path.dirname(fn)
+
+ fig.savefig(fn, dpi=1200)
+
+
+
+
+ def _color_recordings(self, recordings):
+ '''
+ Returns a list of colours, each corresponding to a recording
+ in recordings, based on wheter the ROIs have been selected or
+ movements measured for the recording.
+
+ yellow No ROIs, no movements
+ green ROIs, no movements
+ white ROIs and movements
+ '''
+ colors = []
+ for recording in recordings:
+ color = 'yellow'
+ if self.core.analyser.folder_has_rois(recording):
+ color = 'green'
+ if self.core.analyser.folder_has_movements(recording):
+ color = 'white'
+
+ colors.append(color)
+ return colors
+
+
+ def on_specimen_selection(self, specimen):
+ '''
+ When a selection happens in the specimens listbox.
+ '''
+ self.specimen_control_frame.config(text=specimen)
+
+ self.core.set_current_specimen(specimen)
+
+
+ # Recordings box
+ recordings = self.core.analyser.list_imagefolders()
+ self.recording_box.enable()
+ self.recording_box.set_selections(recordings, colors=self._color_recordings(recordings))
+
+
+ # Logick to set buttons inactive/active and their texts
+ if self.core.analyser.are_rois_selected():
+
+ self.button_rois.config(text='Reselect ROIs')
+ self.button_rois.config(bg=self.default_button_bg)
+
+ self.button_measure.config(state=tk.NORMAL)
+
+ self.button_one_roi.config(state=tk.NORMAL)
+
+ # Enable image_folder buttons
+ for button in self.buttons_frame_3.get_buttons():
+ button.config(state=tk.NORMAL)
+
+ if self.core.analyser.is_measured():
+ self.button_measure.config(text='Remeasure movement')
+ self.button_measure.config(bg=self.default_button_bg)
+ else:
+ self.button_measure.config(text='Measure movement')
+ self.button_measure.config(bg='green')
+ else:
+ #self.recording_box.disable()
+
+ self.button_rois.config(text='Select ROIs')
+ self.button_rois.config(bg='yellow')
+
+ self.button_measure.config(state=tk.DISABLED)
+ self.button_measure.config(text='Measure movement')
+ self.button_measure.config(bg=self.default_button_bg)
+
+ self.button_one_roi.config(state=tk.DISABLED)
+
+ # Disable image_folder buttons
+ for button in self.buttons_frame_3.get_buttons():
+ button.config(state=tk.DISABLED)
+
+
+
+ if self.core.analyser.are_rois_selected():
+ self.core.analyser.load_ROIs()
+
+ # Loading cached analyses and setting the recordings listbox
+
+ if self.core.analyser.is_measured():
+ self.core.analyser.load_analysed_movements()
+ #self.recording_box.enable()
+
+
+ N_rois = self.core.analyser.count_roi_selected_folders()
+ N_image_folders = len(self.core.analyser.list_imagefolders())
+ self.status_rois.config(text='ROIs selected {}/{}'.format(N_rois, N_image_folders))
+
+ try:
+ self.correction = self.core.analyser.get_antenna_level_correction()
+ except:
+ self.correction = False
+ if self.correction is not False:
+ self.status_antenna_level.config(text='Zero corrected, {:.2f} degrees'.format(self.correction))
+ else:
+ self.status_antenna_level.config(text='Zero corrected FALSE')
+
+ self.status_active_analysis.config(text='Active analysis: {}'.format(self.core.analyser.active_analysis))
+
+
+ # FIXME Instead of destroyign tickbox, make changes to tk_steroids
+ # so that the selections can be reset
+ self.tickbox_analyses.grid_forget()
+ self.tickbox_analyses.destroy()
+ self.tickbox_analyses = TickboxFrame(self.specimen_control_frame, self.core.analyser.list_analyses(),
+ defaults=[self.core.analyser.active_analysis == an for an in self.core.analyser.list_analyses()],
+ ncols=4, callback=lambda: self.update_plot(None))
+ self.tickbox_analyses.grid(row=5, column=0, sticky='W')
+
+ self.button_rois.config(state=tk.NORMAL)
+
+
+
+ def on_recording_selection(self, selected_recording):
+ '''
+ When a selection happens in the recordings listbox.
+
+ selected_recording Name of the recording. If 'current', keeps the current
+ '''
+ if selected_recording == 'current':
+ selected_recording = self.core.selected_recording
+ else:
+ self.core.set_selected_recording(selected_recording)
+
+
+ print(self.core.analyser.get_recording_time(selected_recording))
+
+ angles = [list(angles_from_fn(selected_recording))]
+ to_degrees(angles)
+ horizontal, vertical = angles[0]
+ self.status_horizontal.config(text='Horizontal angle {:.2f} degrees'.format(horizontal))
+ self.status_vertical.config(text='Vertical angle {:.2f} degrees'.format(vertical))
+
+
+ # Plotting only the view we have currently open
+ self.update_plot(self.tabs.i_current)
+
+
+
+ def update_plot(self, i_plot):
+ '''
+ i_plot : int or None
+ Index of the plot (from 0 to N-1 tabs) or None just to update
+ '''
+ if self.core.selected_recording is None:
+ return None
+
+ if i_plot is None:
+ i_plot = self.tabs.i_current
+
+ fig, ax = self.canvases[i_plot].get_figax()
+
+ if i_plot == 0:
+ self.plotter.ROI(ax)
+ else:
+
+ ax.clear()
+
+ remember_analysis = self.core.analyser.active_analysis
+
+ for analysis in [name for name, state in self.tickbox_analyses.states.items() if state == True]:
+
+ self.core.analyser.active_analysis = analysis
+
+ if i_plot == 1:
+ self.plotter.magnitude(ax, **self.displacement_ticks.states)
+ elif i_plot == 2:
+ self.plotter.xy(ax, **self.xy_ticks.states)
+ elif i_plot == 3:
+ self.plotter.vectormap(ax, **self.vectorplot_ticks.states)
+
+ self.core.active_analysis = remember_analysis
+
+
+ self.canvases[i_plot].update()
+
+ self.repetition_selector.update_text()
+
+
+ def update_specimen(self, changed_specimens=False):
+ '''
+ Updates GUI colors, button states etc. to right values.
+
+ Call this if there has been changes to specimens/image_folders by an
+ external process or similar.
+ '''
+ if changed_specimens:
+ specimens = self.core.list_specimens()
+ self.specimen_box.set_selections(specimens, self._color_specimens(specimens))
+
+ if self.core.current_specimen is not None:
+ self.on_specimen_selection(self.core.current_specimen)
+
+
+
+
+
+def main():
+
+ if 'win' in sys.platform:
+ ctypes.windll.shcore.SetProcessDpiAwareness(1)
+
+ root = tk.Tk()
+ root.title('Gonio analysis - Tkinter GUI - {}'.format(__version__))
+ root.columnconfigure(0, weight=1)
+ root.rowconfigure(0, weight=1)
+ root.minsize(800,600)
+ ExamineView(root).grid(sticky='NSWE')
+ root.mainloop()
+
+
+if __name__ == "__main__":
+ main()
+
diff --git a/gonio-analysis/gonioanalysis/tkgui/menu_commands.py b/gonio-analysis/gonioanalysis/tkgui/menu_commands.py
new file mode 100644
index 0000000..f0bd082
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/tkgui/menu_commands.py
@@ -0,0 +1,620 @@
+'''
+This module contains the menu bar command classes, that inherit from
+tk_steroids' MenuMaker for easy initialization.
+
+In the beginning of the module, there are some needed functions.
+'''
+
+
+import os
+
+import numpy as np
+import tkinter as tk
+import tkinter.messagebox as messagebox
+import tkinter.filedialog as filedialog
+import tkinter.simpledialog as simpledialog
+
+from tk_steroids.dialogs import popup_tickselect, popup
+from tk_steroids.elements import DropdownList
+from tk_steroids.menumaker import MenuMaker
+from tk_steroids.datamanager import ListManager
+
+import gonioanalysis
+from gonioanalysis.directories import USER_HOMEDIR, ANALYSES_SAVEDIR
+from gonioanalysis.droso import SpecimenGroups
+from gonioanalysis.drosom import linked_data
+from gonioanalysis.drosom import kinematics
+from gonioanalysis.drosom import sinesweep
+from gonioanalysis.drosom.reports.left_right import left_right_displacements, lrfiles_summarise
+from gonioanalysis.drosom.reports.repeats import mean_repeats, repeat_stds
+from gonioanalysis.drosom.reports.stats import response_magnitudes
+from gonioanalysis.tkgui import settings
+from gonioanalysis.tkgui.run_measurement import MeasurementWindow
+from gonioanalysis.tkgui.widgets import (
+ select_specimens,
+ select_specimen_groups,
+ ZeroCorrect,
+ CompareVectormaps,
+ ImagefolderMultisel,
+ )
+
+
+
+
+def ask_string(title, prompt, tk_parent):
+ '''
+ Asks the user for a string.
+ '''
+ string = simpledialog.askstring(title, prompt, parent=tk_parent)
+ return string
+
+
+
+def prompt_result(tk_root, string, title='Message'):
+ '''
+ Shows the result and also sets it to the clipboard
+ '''
+ tk_root.clipboard_clear()
+ tk_root.clipboard_append(string)
+
+ messagebox.showinfo(title=title, message=string)
+
+
+
+class ModifiedMenuMaker(MenuMaker):
+ '''
+ Modify the MenuMaker so that at object initialization, we pass
+ the common core to all of the command objects.
+ '''
+
+ def __init__(self, tk_root, core, *args, **kwargs):
+ '''
+ core An instance of the core.py Core Class
+ '''
+ super().__init__(*args, **kwargs)
+ self.core = core
+ self.tk_root = tk_root
+
+ self.replacement_dict['DASH'] = '-'
+ self.replacement_dict['_'] = ' '
+
+ def _message(self, message, **kwargs):
+ if message == 'nospecimen':
+ message = 'Select a specimen first'
+
+ prompt_result(self.tk_root, message, **kwargs)
+
+ def _ask_string(self, message, title='Input text'):
+ return ask_string(title, message, self.tk_root)
+
+
+class FileCommands(ModifiedMenuMaker):
+ '''
+ File menu commands for examine view.
+ '''
+
+ def _force_order(self):
+ '''
+ Let's force menu ordering. See the documentation from the
+ menu maker.
+ '''
+ menu = ['set_data_directory',
+ 'add_data_directory',
+ 'settings',
+ '.',
+ 'exit']
+ return menu
+
+
+ def set_data_directory(self, append=False):
+ '''
+ Asks user for the data directory and sets it active in Core.
+ '''
+ previousdir = settings.get('last_datadir', default=USER_HOMEDIR)
+
+ # Check if the previous data directory stil exists (usb drive for example)
+ if not os.path.isdir(previousdir):
+ previousdir = USER_HOMEDIR
+
+ directory = filedialog.askdirectory(
+ parent=self.tk_root,
+ title='Select directory containing specimens',
+ mustexist=True,
+ initialdir=previousdir
+ )
+
+ if not directory:
+ return None
+
+ if append == False:
+ self.core.set_data_directory([directory])
+ else:
+ self.core.set_data_directory(self.core.data_directory + [directory])
+ self.core.update_gui(changed_specimens=True)
+
+ settings.set('last_datadir', directory)
+
+
+ def add_data_directory(self):
+ '''
+ Like set_data_directory, but instead of changing the data directory,
+ comdines the entries from previous and the new data directories.
+ '''
+ self.set_data_directory(append=True)
+
+
+ def settings(self):
+ pass
+
+
+ def exit(self):
+ self.tk_root.winfo_toplevel().destroy()
+
+
+
+
+class ImageFolderCommands(ModifiedMenuMaker):
+ '''
+ Commands for the currently selected image folder.
+ '''
+
+ def _force_order(self):
+ return ['select_ROIs', 'measure_movement',
+ 'measure_movement_DASH_in_absolute_coordinates',
+ '.',
+ 'max_of_the_mean_response',
+ 'half_rise_time',
+ 'latency']
+
+ def max_of_the_mean_response(self):
+
+ result = kinematics.mean_max_response(self.core.analyser, self.core.selected_recording)
+ prompt_result(self.tk_root, result)
+
+
+ def half_rise_time(self):
+ result = kinematics.sigmoidal_fit(self.core.analyser, self.core.selected_recording)[2]
+ prompt_result(self.tk_root, str(np.mean(result)))
+
+
+ def latency(self):
+ result = kinematics.latency(self.core.analyser, self.core.selected_recording)
+ prompt_result(self.tk_root, str(np.mean(result)))
+
+
+ def select_ROIs(self):
+ self.core.analyser.select_ROIs(callback_on_exit=self.core.update_gui,
+ reselect_fns=[self.core.selected_recording], old_markings=True)
+
+
+ def measure_movement(self, absolute_coordinates=False):
+ '''
+ Run Movemeter (cross-correlation) on the selected image folder.
+ '''
+ func = lambda stop: self.core.analyser.measure_both_eyes(only_folders=str(self.core.selected_recording), absolute_coordinates=absolute_coordinates, stop_event=stop)
+
+ MeasurementWindow(self.tk_root, [func], title='Measure movement', callback_on_exit=lambda: self.core.update_gui(changed_specimens=True))
+
+
+ def measure_movement_DASH_in_absolute_coordinates(self):
+ self.measure_movement(absolute_coordinates=True)
+
+
+
+class SpecimenCommands(ModifiedMenuMaker):
+ '''
+ Commands for the currentlt selected specimen.
+ '''
+
+ def _force_order(self):
+ return ['set_active_analysis', 'set_vector_rotation',
+ '.',
+ 'select_ROIs', 'measure_movement', 'set_vertical_zero_rotation',
+ '.',
+ 'measure_movement_DASH_in_absolute_coordinates',
+ '.',
+ 'mean_displacement_over_time',
+ '.']
+
+
+ def set_active_analysis(self):
+
+ name = ask_string('Active analysis', 'Give new or existing analysis name (empty for default)', self.tk_root)
+
+ self.core.active_analysis = name
+ if self.core.analyser:
+ self.core.analyser.active_analysis = name
+ self.tk_root.status_active_analysis.config(text='Active analysis: {}'.format(self.core.active_analysis))
+
+ self.core.update_gui(changed_specimens=True)
+
+
+ def set_vector_rotation(self):
+
+ rotation = ask_string('Active analysis', 'Give new or existing analysis name (empty for default)', self.tk_root)
+
+ if rotation:
+ self.core.analyser.vector_rotation = float(rotation)
+ else:
+ self.core.analyser.vector_rotation = None
+
+
+
+ def select_ROIs(self):
+ '''
+ Select regions of interests (ROIs) for the currently selected specimen.
+ '''
+
+ # Ask confirmation if ROIs already selected
+ if self.core.analyser.are_rois_selected():
+ sure = messagebox.askokcancel('Reselect ROIs', 'Are you sure you want to reselect ROIs?')
+ if not sure:
+ return None
+
+ self.core.analyser.select_ROIs(callback_on_exit=lambda: self.core.update_gui(changed_specimens=True))
+
+
+
+ def measure_movement(self, absolute_coordinates=False):
+ '''
+ Run Movemeter (cross-correlation) on the specimen.
+ '''
+ if not self.core.current_specimen:
+ self._message('nospecimen')
+ return None
+
+ # Ask confirmation if ROIs already selected
+ if self.core.analyser.is_measured():
+ sure = messagebox.askokcancel('Remeasure movements', 'Are you sure you want to remeasure?')
+ if not sure:
+ return None
+
+ func = lambda stop: self.core.analyser.measure_both_eyes(absolute_coordinates=absolute_coordinates, stop_event=stop)
+
+ if self.core.analyser.__class__.__name__ != 'OAnalyser':
+ MeasurementWindow(self.tk_root, [func], title='Measure movement', callback_on_exit=lambda: self.core.update_gui(changed_specimens=True))
+ else:
+ # OAnalyser; Threading in MeasurementWindow would cause problems for plotting
+ func(stop=None)
+ self.core.update_gui(changed_specimens=True)
+
+
+ def measure_movement_DASH_in_absolute_coordinates(self):
+ self.measure_movement(absolute_coordinates=True)
+
+
+ def set_vertical_zero_rotation(self):
+ '''
+ Start antenna level search for the current specimen (zero correction)
+ '''
+
+ # Try to close and destroy if any other antenna_level
+ # windows are open (by accident)
+ try:
+ self.correct_window.destroy()
+ except:
+ # Nothing to destroy
+ pass
+
+ if not self.core.current_specimen:
+ self._message("nospecimen")
+ else:
+
+ self.correct_window = tk.Toplevel()
+ self.correct_window.title('Zero correction - {}'.format(self.core.current_specimen))
+ self.correct_window.grid_columnconfigure(0, weight=1)
+ self.correct_window.grid_rowconfigure(0, weight=1)
+
+ def callback():
+ self.correct_window.destroy()
+ self.core.update_gui()
+
+ self.correct_frame = ZeroCorrect(self.correct_window,
+ self.core.get_specimen_fullpath(),
+ 'alr_data',
+ callback=callback)
+ self.correct_frame.grid(sticky='NSEW')
+
+
+ def vectormap_DASH_interactive_plot(self):
+ self.core.adm_subprocess('current', '-A vectormap')
+
+
+ def vectormap_DASH_rotating_video(self):
+ self.core.adm_subprocess('current', '--tk_waiting_window -A vectormap_video')
+
+
+ def vectormap_DASH_export_npy(self):
+ analysername = self.core.analyser.get_specimen_name()
+ fn = tk.filedialog.asksaveasfilename(initialfile=analysername, defaultextension='.npy')
+ if fn:
+ base = fn.rstrip('.npy')
+ for eye in ['left', 'right']:
+ d3_vectors = self.core.analyser.get_3d_vectors(eye)
+ np.save(base+'_'+eye+'.npy', d3_vectors)
+
+
+ def mean_displacement_over_time(self):
+ self.core.adm_subprocess('current', '-A magtrace')
+
+
+ def mean_latency_by_sigmoidal_fit(self):
+ results_string = ''
+ for image_folder in self.core.analyser.list_imagefolders():
+ result = kinematics.sigmoidal_fit(self.core.analyser, image_folder)[2]
+ results_string += '{} {}'.format(image_folder, np.mean(result))
+
+
+ prompt_result(self.tk_root, results_string)
+
+
+class ManySpecimenCommands(ModifiedMenuMaker):
+ '''
+ Commands for all of the specimens in the current data directory.
+ Usually involves a checkbox to select the wanted specimens.
+ '''
+
+ def _force_order(self):
+ return ['measure_movements_DASH_list_all', 'measure_movements_DASH_list_only_unmeasured',
+ 'measure_movements_DASH_in_absolute_coordinates',
+ '.',
+ 'averaged_vectormap_DASH_interactive_plot', 'averaged_vectormap_DASH_rotating_video',
+ 'averaged_vectormap_DASH_rotating_video_DASH_set_title',
+ '.',
+ 'compare_vectormaps',
+ '.',
+ 'comparision_to_optic_flow_DASH_video',
+ '.',
+ 'export_LR_displacement_CSV',
+ 'export_LR_displacement_CSV_DASH_strong_weak_eye_division',
+ 'save_kinematics_analysis_CSV',
+ 'save_sinesweep_analysis_CSV',
+ '.',
+ 'detailed_export',]
+
+
+ def _batch_measure(self, specimens, absolute_coordinates=False):
+
+ # Here lambda requires specimen=specimen keyword argument; Otherwise only
+ # the last specimen gets analysed N_specimens times
+ targets = [lambda stop, specimen=specimen: self.core.get_manalyser(specimen).measure_both_eyes(absolute_coordinates=absolute_coordinates, stop_event=stop) for specimen in specimens]
+
+
+ if self.core.analyser_class.__name__ != 'OAnalyser':
+ MeasurementWindow(self.parent_menu.winfo_toplevel(), targets, title='Measure movement',
+ callback_on_exit=lambda: self.core.update_gui(changed_specimens=True))
+ else:
+ # For OAnalyser; Threading in MeasurementWindow causes problems for plotting
+ for target in targets:
+ target()
+ self.core.update_gui(changed_specimens=True)
+
+
+ def measure_movements_DASH_list_all(self):
+
+ select_specimens(self.core, self._batch_measure, with_rois=True)
+
+
+ def measure_movements_DASH_list_only_unmeasured(self):
+
+ select_specimens(self.core, self._batch_measure, with_rois=True, with_movements=False)
+
+
+ def measure_movements_DASH_in_absolute_coordinates(self):
+ func = lambda specimens: self._batch_measure(specimens, absolute_coordinates=True)
+ select_specimens(self.core, func, with_rois=True)
+
+
+
+ def averaged_vectormap_DASH_interactive_plot(self):
+ select_specimens(self.core, lambda specimens: self.core.adm_subprocess(specimens, '--tk_waiting_window --average -A vectormap'), with_movements=True)
+
+
+
+ def averaged_vectormap_DASH_rotating_video(self):
+ select_specimens(self.core, lambda specimens: self.core.adm_subprocess(specimens, '--tk_waiting_window --average -A vectormap_video'), with_movements=True)
+
+
+ def averaged_vectormap_DASH_rotating_video_multiprocessing(self):
+
+ def run_workers(specimens):
+ if len(specimens) > 0:
+ N_workers = os.cpu_count()
+ for i_worker in range(N_workers):
+ if i_worker != 0:
+ additional = '--dont-show'
+ else:
+ additional = ''
+ self.core.adm_subprocess(specimens, '--tk_waiting_window --worker-info {} {} --average -A vectormap_video'.format(i_worker, N_workers))
+
+
+ select_specimens(self.core, run_workers, with_movements=True)
+
+
+ def averaged_vectormap_DASH_rotating_video_DASH_set_title(self):
+ ask_string('Set title', 'Give video title', lambda title: select_specimens(self.core, lambda specimens: self.core.adm_subprocess(specimens, '--tk_waiting_window --average --short-name {} -A vectormap_video'.format(title)), with_movements=True))
+
+
+ def compare_vectormaps(self):
+ popup(self.tk_root, CompareVectormaps, args=[self.core],
+ title='Vectormap comparison')
+
+
+ def comparision_to_optic_flow_DASH_video(self):
+ select_specimens(self.core, lambda specimens: self.core.adm_subprocess(specimens, '--tk_waiting_window --average -A flow_analysis_pitch'), with_movements=True)
+
+
+
+ def export_LR_displacement_CSV(self, strong_weak_division=False):
+ '''
+ Grouped to left right
+ '''
+ def callback(specimens):
+ group_name = ask_string('Group name', 'Name the selected group of specimens', self.tk_root)
+ analysers = self.core.get_manalysers(specimens)
+ left_right_displacements(analysers, group_name,
+ strong_weak_division=strong_weak_division)
+
+ select_specimens(self.core, callback, with_movements=True)
+
+
+ def export_LR_displacement_CSV_DASH_strong_weak_eye_division(self):
+ '''
+ Grouped to strong vs weak eye.
+ '''
+ self.export_LR_displacement_CSV(strong_weak_division=True)
+
+
+ def save_kinematics_analysis_CSV(self):
+
+ def callback(specimens):
+
+ fn = tk.filedialog.asksaveasfilename(title='Save kinematics analysis', initialfile='latencies.csv')
+
+ if fn:
+ analysers = self.core.get_manalysers(specimens)
+ kinematics.save_sigmoidal_fit_CSV(analysers, fn)
+
+
+ select_specimens(self.core, callback, with_movements=True)
+
+
+ def save_sinesweep_analysis_CSV(self):
+ def callback(specimens):
+ analysers = self.core.get_manalysers(specimen)
+ sinesweep.save_sinesweep_analysis_CSV(analysers)
+
+
+ select_specimens(self.core, callback, with_movements=True)
+
+
+ def response_magnitude_stats(self):
+ def callback(grouped_manalysers):
+ response_magnitudes(grouped_manalysers)
+
+ select_specimen_groups(self.core, callback)
+
+
+ def LR_stasts(self):
+
+ fns = filedialog.askopenfilenames(initialdir=ANALYSES_SAVEDIR)
+
+ if fns:
+ lrfiles_summarise(fns)
+
+ def LR_kinematics(self):
+
+ fns = filedialog.askopenfilenames(initialdir=ANALYSES_SAVEDIR)
+
+ if fns:
+ lrfiles_summarise(fns, point_type='kinematics')
+
+
+ def detailed_export(self):
+
+ def callback(wanted_imagefolders):
+
+ analysers = self.core.get_manalysers(list(wanted_imagefolders.keys()))
+
+ sel = self._export_selection.ticked[0]
+
+ if 'CSV' in sel:
+ group_name = ask_string('Group name', 'Name the selected group of specimens', self.tk_root)
+ if sel == 'Mean displacement curve CSV':
+ left_right_displacements(analysers, group_name,
+ wanted_imagefolders=wanted_imagefolders)
+ elif sel == 'Mean over repeats CSV':
+ mean_repeats(analysers, group_name,
+ wanted_imagefolders=wanted_imagefolders)
+ elif sel == 'Stds over repeats CSV':
+ repeat_stds(analysers, group_name,
+ wanted_imagefolders=wanted_imagefolders)
+ elif sel == 'Displacement probability TIFF':
+ specimens = [';'.join([specimen, *image_folders]) for specimen, image_folders in wanted_imagefolders.items()]
+ self.core.adm_subprocess(specimens, '-A magnitude_probability')
+ elif sel == 'XY trajectory plot':
+ specimens = [';'.join([specimen, *image_folders]) for specimen, image_folders in wanted_imagefolders.items()]
+ self.core.adm_subprocess(specimens, '-A xy_trajectory')
+ else:
+ raise ValueError('Invalid export type selection')
+
+ top, imagefolder_multisel = popup(self.tk_root, ImagefolderMultisel,
+ args=[self.core, callback], title='Detailed export...')
+
+ self._export_selection = DropdownList(top,
+ ['Mean displacement curve CSV', 'Mean over repeats CSV',
+ 'Stds over repeats CSV',
+ 'Displacement probability TIFF',
+ 'XY trajectory plot'])
+ self._export_selection.grid()
+
+
+
+class OtherCommands(ModifiedMenuMaker):
+ '''
+ All kinds of various commands and tools.
+ '''
+
+ def _force_order(self):
+ return ['manage_specimen_groups',
+ 'link_ERG_data_from_labbook',
+ '.',
+ 'change_Analyser_DASH_object',
+ '.',
+ 'about']
+
+
+ def manage_specimen_groups(self):
+ '''
+ Fixme: This is a little hacked together.
+ '''
+
+ def _preedit():
+ select_specimens(self.core, _postedit)
+
+ def _postedit(specimens):
+ self.dm.im2.set_data(specimens)
+ self.dm.im2.postchange_callback(self.dm.im2.data)
+
+ def onsave():
+ self.groups.groups = self.dm.im1.data
+ self.groups.save_groups()
+
+
+ def oncancel():
+ top.destroy()
+
+ self.groups = SpecimenGroups()
+
+ top = tk.Toplevel(self.tk_root)
+
+ self.dm = ListManager(top, start_data=self.groups.groups,
+ save_callback=onsave, cancel_callback=oncancel)
+ tk.Button(self.dm.im2.buttons, text='Select specimens', command=_preedit).grid(row=3, column=1)
+ self.dm.grid(row=1, column=1, sticky='NSWE')
+ top.rowconfigure(1, weight=1)
+ top.columnconfigure(1, weight=1)
+
+ top.mainloop()
+
+
+ def link_ERG_data_from_labbook(self):
+ select_specimens(self.core, linked_data.link_erg_labbook, command_args=[lambda: filedialog.askopenfilename(title='Select ERG'), lambda: filedialog.askdirectory(title='Select data folder')], return_manalysers=True )
+
+
+
+ def change_Analyser_DASH_object(self):
+
+ popup_tickselect(self.tk_root,
+ [c.__name__ for c in self.core.analyser_classes],
+ lambda selections: self.core.set_analyser_class(selections[0]),
+ ticked=[self.core.analyser_class],
+ single_select=True)
+
+
+ def about(self):
+ message = 'Gonio analysis'
+ message += "\nVersion {}".format(gonioanalysis.__version__)
+ message += '\n\nGPL-3.0 License'
+ tk.messagebox.showinfo(title='About', message=message)
+
diff --git a/gonio-analysis/gonioanalysis/tkgui/plotting.py b/gonio-analysis/gonioanalysis/tkgui/plotting.py
new file mode 100644
index 0000000..e0c73ef
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/tkgui/plotting.py
@@ -0,0 +1,204 @@
+import os
+import sys
+
+import numpy as np
+
+import matplotlib.pyplot as plt
+import matplotlib.cm
+import matplotlib.widgets
+import tifffile
+
+from tk_steroids.matplotlib import CanvasPlotter
+
+from gonioanalysis.drosom.plotting.basics import (
+ plot_1d_magnitude,
+ plot_xy_trajectory,
+ plot_3d_vectormap
+ )
+
+class RecordingPlotter:
+ '''
+ Plotting single image folder data on the tk_steroids.matplotlib.CanvasPlotter.
+
+ Tkinter intependend in sense that CanvasPlotter can easily be reimplemented on
+ any other GUI toolkit (it's just a fusion of tkinter Canvas and matplotlib figure).
+
+ -----------
+ Attributes
+ -----------
+ self.core
+ Reference to the Core instance
+ self.selected_recording
+ The recording currently plotted at the plotter
+ self.i_repeat
+ None if show all traces, otherwise the index of repeat to be shown
+ self.N_repeats
+ The total number of repeats in the image folder.
+
+ '''
+
+ def __init__(self, core):
+ '''
+ core An instance of the Core class in core.py
+ '''
+
+ self.core = core
+
+ # Keeps internally track of the current recording on plot
+ self.selected_recording = None
+
+ self.colorbar = None
+ self.roi_rectangles = []
+
+ self.N_repeats = 0
+ self.i_repeat = None
+
+
+ def _check_recording(self, skip_datafetch=False):
+ '''
+ Check from core if the selected has changed.
+ '''
+ selected_recording = self.core.selected_recording
+
+ if self.selected_recording != selected_recording:
+ self.i_repeat = None
+
+ self.selected_recording = selected_recording
+
+ if not skip_datafetch:
+ if self.core.analyser.is_measured():
+ self.movement_data = self.core.analyser.get_movements_from_folder(selected_recording)
+ self.N_repeats = len(next(iter(self.movement_data.values())))
+ pass
+ else:
+ self.movement_data = {}
+ self.N_repeats = 0
+
+
+
+ def magnitude(self, ax, **kwargs):
+ '''
+ Plot a displacement over time of the current specimen/recording.
+ '''
+ self._check_recording(skip_datafetch=True)
+
+ ax, self.magnitudes, self.N_repeats = plot_1d_magnitude(self.core.analyser,
+ self.selected_recording,
+ i_repeat=self.i_repeat,
+ label='EYE-repIREPEAT',
+ ax=ax,
+ **kwargs)
+
+
+ def vectormap(self, ax, **kwargs):
+
+ self.N_repeats = 0
+
+ ax, self.vectors = plot_3d_vectormap(self.core.analyser,
+ ax=ax,
+ **kwargs)
+
+
+
+ def xy(self, ax, **kwargs):
+ '''
+ Plot (x, y) where time is encoded by color.
+ '''
+ self._check_recording()
+
+ ax, self.xys = plot_xy_trajectory([self.core.analyser],
+ {self.core.analyser.name: [self.selected_recording]},
+ i_repeat=self.i_repeat,
+ ax=ax,
+ **kwargs)
+
+
+ def ROI(self, ax):
+ '''
+ Plot specimen/recording image, and the ROIs and imaging parameters on top of it.
+ '''
+ self._check_recording(skip_datafetch=True)
+
+ self.roi_ax = ax
+ fig = ax.get_figure()
+
+ try:
+ self.slider_ax
+ except AttributeError:
+ self.slider_ax = fig.add_axes([0.2, 0.01, 0.6, 0.05])
+
+ # Get a list of image filenames and how many
+ image_fns = self.core.analyser.list_images(self.selected_recording)
+ self.N_repeats = len(image_fns)
+
+ if self.i_repeat:
+ i_frame = self.i_repeat
+ else:
+ i_frame = 0
+
+ image_fn = os.path.join(self.core.analyser.get_specimen_directory(), self.selected_recording, image_fns[i_frame])
+
+ self.image = tifffile.TiffFile(image_fn).asarray(key=0)
+
+ try:
+ self.range_slider
+ except AttributeError:
+ self.range_slider = matplotlib.widgets.Slider(self.slider_ax, 'Range %' , 0, 100, valinit=90, valstep=1)
+ self.range_slider.on_changed(self.update_ROI_plot)
+
+ # Draw ROI rectangles
+ for old_roi in self.roi_rectangles:
+ try:
+ old_roi.remove()
+ except NotImplementedError:
+ # This error occurs when something goes from from here on before
+ # the ax.add_patch(roi) line. We can just ignore this as the
+ # ROI has not been ever added to the ax
+ continue
+ self.roi_rectangles = []
+
+
+ for roi in self.core.analyser.get_rois(self.selected_recording):
+ patch = matplotlib.patches.Rectangle((roi[0], roi[1]), roi[2], roi[3],
+ fill=False, edgecolor='White')
+ self.roi_rectangles.append(patch)
+
+ self.update_ROI_plot(self.range_slider.val)
+
+ for roi in self.roi_rectangles:
+ ax.add_patch(roi)
+
+
+ def update_ROI_plot(self, slider_value):
+ '''
+ This gets called when the brightness cap slider is moved.
+ '''
+ clipped = np.clip(self.image, 0, np.percentile(self.image, slider_value))
+ clipped /= np.max(clipped)
+
+ try:
+ self.roi_imshow.set_data(clipped)
+ except AttributeError:
+ self.roi_imshow = self.roi_ax.imshow(clipped, cmap='gray')
+
+ imaging_params = self.core.analyser.get_imaging_parameters(self.selected_recording)
+ if imaging_params:
+ text = '\n'.join(['{}: {}'.format(setting, value) for setting, value in imaging_params.items()])
+ else:
+ text = 'Unable to fetch imaging parameters'
+
+ try:
+ self.roi_text.set_text(text)
+ except AttributeError:
+ self.roi_text = self.roi_ax.text(0,1, text, ha='left', va='top', fontsize='small',
+ transform=self.roi_ax.transAxes)
+
+
+
+def main():
+ pass
+
+
+if __name__ == "__main__":
+ main()
+
diff --git a/gonio-analysis/gonioanalysis/tkgui/run_measurement.py b/gonio-analysis/gonioanalysis/tkgui/run_measurement.py
new file mode 100644
index 0000000..c0d67cb
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/tkgui/run_measurement.py
@@ -0,0 +1,116 @@
+
+import io
+import sys
+import threading
+
+import tkinter as tk
+from tk_steroids.elements import BufferShower
+
+class MeasurementWindow:
+ '''
+ Sets the manalyser to do the movement measurements and
+ opens a window showing the progress.
+
+ Because this sets stdout to a StringIO temporalily, it's best
+ to run this in a subprocess.
+ '''
+
+ def __init__(self, tk_root, thread_targets, title='', callback_on_exit=None):
+ '''
+ tk_root Tkinter root object, needed for scheduling events with after-method
+ thread_targets Callables
+ '''
+ self.root = tk_root
+ self.title = title
+ self.callback_on_exit = callback_on_exit
+
+ self.i_target = -1
+ self.thread_targets = thread_targets
+ self.processes = []
+
+ self.all_run = False
+ self.exit = False
+ self.run()
+
+
+ def run(self):
+ self.top = tk.Toplevel()
+ self.top.title(self.title)
+
+ self.oldout = sys.stdout
+ sys.stdout = io.StringIO()
+
+ BufferShower(self.top, sys.stdout).grid()
+ self.cancel_button = tk.Button(self.top, text='Cancel', command=self.on_cancel)
+ self.cancel_button.grid()
+
+ self.check_finished()
+
+
+ def _run_next_target(self):
+ '''
+ Set next manalyser to work or return False if none left.
+ '''
+ self.i_target += 1
+
+ if self.i_target == len(self.thread_targets):
+ return False
+
+ self.stop_event = threading.Event()
+
+ p = threading.Thread(target=self.thread_targets[self.i_target],
+ args=[self.stop_event,])
+
+ p.start()
+ self.processes.append(p)
+
+ return True
+
+
+ def alives(self):
+ return [process.is_alive() for process in self.processes]
+
+
+ def check_finished(self):
+ '''
+ Check if all the threads are finished and if there are more targets to run.
+ Reschedule every 1000 ms.
+ '''
+ if not self.exit:
+ if not any(self.alives()):
+ targets_left = self._run_next_target()
+ else:
+ targets_left = True
+
+ if targets_left:
+ self.root.after(1000, self.check_finished)
+ else:
+ self.cancel_button.config(text='Ok')
+ self.all_run = True
+
+
+ def on_cancel(self):
+ self.exit = True
+
+ # Calcelled, only send thread signal to stop
+ self.stop_event.set()
+
+ if self.all_run:
+ self.callback_on_exit()
+
+ if not all(self.alives()):
+ self.top.destroy()
+ sys.stdout = self.oldout
+ else:
+ self.root.after(1000, self.on_cancel)
+
+
+def main():
+ '''
+ Read data directory and specimen name from sys.argv
+ and run movement measurement for that manalyser.
+ '''
+ pass
+
+if __name__ == main():
+ main()
diff --git a/gonio-analysis/gonioanalysis/tkgui/settings.py b/gonio-analysis/gonioanalysis/tkgui/settings.py
new file mode 100644
index 0000000..f18b44b
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/tkgui/settings.py
@@ -0,0 +1,10 @@
+import gonioanalysis.settings as settings
+
+DEFAULT_SAVEFN = 'gonioanalysis-tkgui-settings.json'
+
+def set(*args, fn=DEFAULT_SAVEFN, **kwargs):
+ return settings.set(*args, fn=fn, **kwargs)
+
+def get(*args, fn=DEFAULT_SAVEFN, **kwargs):
+ return settings.get(*args, fn=fn, **kwargs)
+
diff --git a/gonio-analysis/gonioanalysis/tkgui/widgets.py b/gonio-analysis/gonioanalysis/tkgui/widgets.py
new file mode 100644
index 0000000..3eb6189
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/tkgui/widgets.py
@@ -0,0 +1,738 @@
+'''
+Gonio-analysis tkinter GUI widgets.
+'''
+import os
+import numpy as np
+import threading
+
+import tkinter as tk
+from tkinter import filedialog
+import tifffile
+
+from tk_steroids.elements import (Tabs,
+ ButtonsFrame,
+ TickboxFrame,
+ Listbox,
+ )
+from tk_steroids.matplotlib import CanvasPlotter
+from tk_steroids.dialogs import TickSelect
+from tk_steroids.routines import inspect_booleans
+from tk_steroids.menumaker import MenuMaker
+
+from gonioanalysis.droso import SpecimenGroups
+from gonioanalysis.drosom.analysing import MAverager
+from gonioanalysis.antenna_level import (
+ load_drosom,
+ save_antenna_level_correction,
+ load_reference_fly,
+ )
+from gonioanalysis.drosom.plotting.basics import (
+ plot_3d_vectormap,
+ plot_3d_differencemap,
+ )
+
+
+
+
+
+
+
+def select_specimens(core, command, with_rois=None, with_movements=None, with_correction=None,
+ command_args=[], execute_callable_args=True, breaking_args=[()],
+ return_manalysers=False):
+ '''
+ Opens a specimen selection window and after ok runs command using
+ selected specimens list as the only input argument.
+
+ command Command to close after fly selection
+ with_rois List specimens with ROIs selected if True
+ with_movements List specimens with movements measured if True
+ command_args A list of arguments passed to the command
+ execute_callable_args Callable command_args will get executed and return
+ value is used instead
+ breaking_args If command_args callable return value in this list,
+ halt the command
+ return_manalysers Instead of passing the list of the specimen names as the first
+ argument to the command, already construct MAnalyser objects and pass those
+ '''
+ parsed_args = []
+ for arg in command_args:
+ if execute_callable_args and callable(arg):
+ result = arg()
+ if result in breaking_args:
+ # Halting
+ return None
+ parsed_args.append(result)
+ else:
+ parsed_args.append(arg)
+
+
+ top = tk.Toplevel()
+ top.title('Select specimens')
+ top.grid_columnconfigure(1, weight=1)
+ top.grid_rowconfigure(1, weight=1)
+
+
+ if with_rois or with_movements or with_correction:
+ notify_string = 'Listing specimens with '
+ notify_string += ' and '.join([string for onoff, string in zip([with_rois, with_movements, with_correction],
+ ['ROIs', 'movements', 'correction']) if onoff ])
+ tk.Label(top, text=notify_string).grid(row=0, column=1)
+
+ specimens = core.list_specimens(with_rois=with_rois, with_movements=with_movements, with_correction=with_correction)
+
+ groups = list(SpecimenGroups().groups.keys())
+
+ if return_manalysers:
+ # This is quite wierd what is going on here
+ def commandx(specimens, *args, **kwargs):
+ manalysers = core.get_manalysers(specimens)
+ return command(manalysers, *args, **kwargs)
+ else:
+ commandx = command
+
+ tabs = Tabs(top, ['Specimens', 'Groups'])
+
+ for tab, selectable in zip(tabs.tabs, [specimens, groups]):
+ selector = TickSelect(tab, selectable, commandx, callback_args=parsed_args)
+ selector.grid(sticky='NSEW', row=1, column=1)
+
+ tk.Button(selector, text='Close', command=top.destroy).grid(row=2, column=1)
+
+ tabs.grid(row=1, column=1,sticky='NSEW')
+
+
+def select_specimen_groups(core, command):
+ '''
+ command gets the following dictionary
+ {'group1_name': [manalyser1_object, ...], ...}
+ '''
+ top = tk.Toplevel()
+ top.title('Select specimen groups')
+ top.grid_columnconfigure(0, weight=1)
+ top.grid_rowconfigure(1, weight=1)
+
+
+ gm = SpecimenGroups()
+ gm.load_groups()
+
+ def commandx(group_names):
+ grouped = {}
+ for group_name in group_names:
+ print(gm.groups[group_name])
+ manalysers = [core.get_manalyser(specimen) for specimen in gm.groups[group_name]]
+ grouped[group_name] = manalysers
+ command(grouped)
+
+ selector = TickSelect(top, list(gm.groups.keys()), commandx)
+ selector.grid(sticky='NSEW')
+
+ tk.Button(selector, text='Close', command=top.destroy).grid(row=2, column=1)
+
+
+
+class ImagefolderMultisel(tk.Frame):
+ '''
+ Widget to select image folders from the specimens
+
+ Attributes
+ ----------
+ core
+ specimens_listbox
+ imagefolders_listbox
+ buttons_frame
+ '''
+
+ def __init__(self, tk_parent, core, callback, **kwargs):
+ '''
+ *kwargs to core.list_specimens
+ '''
+ tk.Frame.__init__(self, tk_parent)
+
+ self.tk_parent = tk_parent
+ self.core = core
+ self.callback = callback
+ self._separator = ';'
+
+ specimens = core.list_specimens(**kwargs)
+ self.specimens_listbox = Listbox(self, specimens, self.on_specimen_selection)
+ self.specimens_listbox.grid(row=0, column=0, sticky='NSWE')
+
+ self.imagefolders_listbox = Listbox(self, [''], None)
+ self.imagefolders_listbox.grid(row=0, column=1, sticky='NSWE')
+
+ self.buttons_frame = ButtonsFrame(self,
+ button_names=['Add', 'Remove', 'Ok'], horizontal=False,
+ button_commands=[self.on_add_press, self.on_remove_press, self.on_ok])
+ self.buttons_frame.grid(row=0, column=2)
+
+ self.selected_listbox = Listbox(self, [], None)
+ self.selected_listbox.grid(row=0, column=3, sticky='NSWE')
+
+ for i in [0, 1, 3]:
+ self.grid_columnconfigure(i, weight=1)
+ self.grid_rowconfigure(0, weight=1)
+
+ def on_specimen_selection(self, name):
+ analyser = self.core.get_manalyser(name)
+ image_folders = analyser.list_imagefolders()
+ self.imagefolders_listbox.set_selections(image_folders)
+
+
+ def on_add_press(self):
+ image_folder = self.imagefolders_listbox.current
+ if image_folder:
+ sel = self.specimens_listbox.current + self._separator + image_folder
+
+ selections = self.selected_listbox.selections + [sel]
+ self.selected_listbox.set_selections(selections)
+
+
+ def on_remove_press(self):
+ to_remove = self.selected_listbox.current
+ if to_remove:
+ selections = self.selected_listbox.selections
+ selections.remove(to_remove)
+
+ self.selected_listbox.set_selections(selections)
+
+
+ def on_ok(self):
+ image_folders = {}
+ for z in self.selected_listbox.selections:
+ s, i = z.split(self._separator)
+ if s not in image_folders:
+ image_folders[s] = []
+ image_folders[s].append(i)
+
+ self.callback(image_folders)
+ self.tk_parent.destroy()
+
+
+
+
+class WaitingFrame(tk.Frame):
+
+ def __init__(self, tk_master, text):
+ tk.Frame.__init__(self, tk_master)
+ tk.Label(self, text=text).grid()
+
+
+class WaitingWindow():
+ '''
+ Spans a new tkinter root window so use this only
+ if there's no tkinter root beforehad.
+ '''
+ def __init__(self, title, text):
+
+ self.root = tk.Tk()
+ self.root.title(title)
+ WaitingFrame(self.root, text).grid()
+ self.root.update()
+
+ def close(self):
+ self.root.destroy()
+
+
+
+class ZeroCorrect(tk.Frame):
+ '''
+ Creates a frame where one can perform zero correction (atenna level search)
+ for a specimen using reference specimen (alr, antenna level reference)
+
+ This is (maybe better) alternative to the command line tool, antenna_level.py,
+ that uses binary search tactics to match.
+ '''
+
+ def __init__(self, tk_parent, specimen_path, alr_data_path, callback=None):
+ tk.Frame.__init__(self, tk_parent)
+ self.parent = tk_parent
+ self.callback = callback
+
+ self.specimen_name = os.path.basename(specimen_path)
+
+ # Load data
+ self.specimen_pitches, self.specimen_images = load_drosom(specimen_path)
+ #self.reference_pitches, self.reference_images = {fn: pitch for pitch, fn in loadReferenceFly(alr_data_path).items()}
+
+ try:
+ alr_data = load_reference_fly(alr_data_path)
+ except FileNotFoundError:
+ alr_data = {}
+
+ self.reference_pitches, self.reference_images = [[],[]]
+ for pitch, fn in sorted(alr_data.items(), key=lambda x: float(x[0])):
+ self.reference_pitches.append(pitch)
+ self.reference_images.append(fn)
+
+ # Set plotters
+ self.specimen_plotter = CanvasPlotter(self, text=specimen_path)
+ self.specimen_plotter.grid(row=1, column=0, sticky='NSWE')
+
+ self.reference_plotter = CanvasPlotter(self, text='Reference fly')
+ self.reference_plotter.grid(row=1, column=1, sticky='NSEW')
+
+ if not alr_data:
+ self.reference_plotter.ax.text(0.1, 0.1, (
+ "No reference data created. Options:\n"
+ " A) Set correction manually\n"
+ " B) Run scripts/create_alr_data.py"),
+ color='red')
+
+ self.grid_columnconfigure(0, weight=1)
+ self.grid_columnconfigure(1, weight=1)
+ self.grid_rowconfigure(1, weight=1)
+
+
+ # Help text
+ tk.Label(self, text='Rotate the reference until it matches the specimen and press Next image.\nAlternatively, set manual correction.').grid(row=0, column=0, columnspan=2)
+
+ # Set buttons
+ buttons_frame = tk.LabelFrame(self, text='Rotate reference')
+ buttons_frame.grid(row=2, column=0, columnspan=2)
+ steps = [-20, -5, -3, -1, 1, 3, 5, 20]
+ for i_column, step in enumerate(steps):
+ button = tk.Button(buttons_frame, text=str(step), command=lambda step=step: self.rotate_reference(step))
+ button.grid(row=1, column=i_column)
+
+ self.set_button = tk.Button(self, text='Next image', command=self.set_image)
+ self.set_button.grid(row=3, column=0, columnspan=2)
+
+ self.set_manual_button = tk.Button(self, text='Set manual correction...', command=self.set_manual)
+ self.set_manual_button.grid(row=3, column=1, sticky='E')
+
+
+ # Loop variables
+ self.i_specimen = 0
+ self.i_reference = 0
+
+ # Offset between each specimen-reference image is saved here.
+ self.offsets = []
+
+ self.update_plots()
+
+
+ def rotate_reference(self, steps):
+ '''
+ When user clicks to rotate the reference fly.
+ '''
+ self.i_reference += steps
+
+ if self.i_reference >= len(self.reference_pitches):
+ self.i_reference = len(self.reference_pitches) - 1
+ elif self.i_reference < 0:
+ self.i_reference = 0
+
+ self.update_plots()
+
+
+ def set_image(self):
+ '''
+ When user sets the current reference rotation as the best match
+ '''
+ offset = float(self.specimen_pitches[self.i_specimen]) - float(self.reference_pitches[self.i_reference])
+ self.offsets.append(offset)
+ self.i_specimen += 1
+
+ if self.i_specimen == len(self.specimen_pitches):
+ self.report()
+ else:
+ self.update_plots()
+
+
+ def update_plots(self):
+ '''
+ Call to update imshow plots.
+ '''
+ if self.reference_images:
+ self.reference_image = tifffile.imread(self.reference_images[self.i_reference])
+ self.reference_plotter.imshow(self.reference_image, cmap='gray', slider=True)
+
+ self.specimen_image = tifffile.imread(self.specimen_images[self.i_specimen])
+ self.specimen_plotter.imshow(self.specimen_image, cmap='gray', slider=True)
+
+
+ def set_manual(self):
+ '''
+ Let the user specify a manual correction, skipping the rotation process.
+ '''
+ value = tk.simpledialog.askstring("Manual correction value",
+ "The vertical angle when the deep\npseudopupils align with the antenna?", parent=self)
+
+ if value:
+ self.offsets = float(value)
+ self.report()
+
+
+ def report(self):
+ '''
+ Report the results with a pop up window
+ '''
+ message = 'Correction value set as {}'.format(np.mean(self.offsets))
+ tk.messagebox.showinfo('Zero correction ready', message, parent=self)
+
+ save_antenna_level_correction(self.specimen_name, np.mean(self.offsets))
+
+ if self.callback:
+ self.callback()
+
+ self.destroy()
+
+
+class RepetitionSelector(tk.Frame):
+
+ def __init__(self, tk_master, RecordingPlotter, core, update_command):
+ '''
+
+ update_command Callable that updates the plots, no input arguments
+ '''
+
+ self.update_command = update_command
+
+ tk.Frame.__init__(self, tk_master)
+ self.core = core
+
+ self.plotter = RecordingPlotter
+
+ self.text = tk.StringVar()
+ self.infotext = tk.Label(self, textvariable = self.text)
+ self.infotext.grid(row=0, column=0)
+
+ self.all = tk.Button(self, text='Show all', command=lambda: self.move_selection(None))
+ self.all.grid(row=0, column=1)
+
+ self.previous = tk.Button(self, text='Previous', command=lambda: self.move_selection(-1))
+ self.previous.grid(row=0, column=2)
+
+ self.next = tk.Button(self, text='Next', command=lambda: self.move_selection(1))
+ self.next.grid(row=0, column=3)
+
+ self.mark_bad = tk.Button(self, text='Mark bad', command=self.mark_bad)
+ self.mark_bad.grid(row=0, column=4)
+
+ def mark_bad(self):
+ im_folder = self.core.selected_recording
+
+ if self.plotter.i_repeat == None:
+ pass
+ else:
+ self.core.analyser.mark_bad(im_folder, self.plotter.i_repeat)
+
+
+ def move_selection(self, direction):
+ '''
+ None sets plotter to show all repeats
+ 1 or -1 Move to next/previous repetition
+ '''
+
+ if direction == None:
+ self.plotter.i_repeat = None
+ else:
+ if self.plotter.i_repeat == None:
+ self.plotter.i_repeat = 0
+ else:
+ self.plotter.i_repeat += direction
+
+ if self.plotter.i_repeat < 0:
+ self.plotter.i_repeat = 0
+ elif self.plotter.i_repeat >= self.plotter.N_repeats:
+ self.plotter.i_repeat = self.plotter.N_repeats -1
+
+ self.update_text()
+
+ print(self.plotter.i_repeat)
+
+ if self.update_command:
+ self.update_command()
+
+ def update_text(self):
+ if self.plotter.i_repeat is None:
+ isel = None
+ else:
+ isel = self.plotter.i_repeat + 1
+ self.text.set("{}/{}".format(isel, self.plotter.N_repeats))
+
+
+
+class RotationButtons(tk.Frame):
+ '''
+ Create buttons to set a matplotlib 3D plot rotations
+
+ Attrubutes
+ ----------
+ axes : list
+ Associated matplotlib axes that get rotated
+ buttons_frame : object
+ Buttons frame object containing the tkinter under the buttons attribute
+ rotation_offset : tuple
+ (elev, azim) offset in rotations
+ callback : callable or None
+ Additional callback to be called after changing rotation.
+ '''
+
+ def __init__(self, tk_parent, axes, rotations, callback=None,
+ label='', hide_none=True, rotation_offset=(0,0)):
+ '''
+ tk_parent : object
+ Tkinter parent object
+ axes : list
+ List of matplotlib axes
+ rotations : list of tuples
+ Rotations [(elev, azim), ...]. If any None, keeps the corresponding
+ rotation as it is.
+ callback : None or callable
+ Callback after each rotation update
+ hide_none : bool
+ When one of the rotations is None, hide the None from
+ button text
+ rotation_offset : tuple
+ Offset in elevation and azitmuth, respectively in degrees
+ '''
+ tk.Frame.__init__(self, tk_parent)
+
+ self.axes = axes
+ self.rotation_offset = rotation_offset
+
+ if hide_none:
+ names = []
+ for rotation in rotations:
+ if None in rotation:
+ for r in rotation:
+ if r is not None:
+ names.append(r)
+ else:
+ names.append(rotation)
+ else:
+ names = rotations
+
+ commands = [lambda rot=rot: self.set_rotation(*rot) for rot in rotations]
+ self.buttons_frame = ButtonsFrame(self, names, commands, label=label)
+ self.buttons_frame.grid(row=1, column=2)
+
+ self.callback = callback
+
+
+ def set_rotation(self, elev, azim):
+ for ax in self.axes:
+ if elev is None:
+ uelev = ax.elev
+ else:
+ uelev = elev + self.rotation_offset[0]
+
+ if azim is None:
+ uazim = ax.azim
+ else:
+ uazim = azim + self.rotation_offset[1]
+
+ ax.view_init(uelev, uazim)
+
+ if callable(self.callback):
+ self.callback()
+
+
+
+class CompareVectormaps(tk.Frame):
+ '''
+ Widget to compare two vectormaps interactively to
+ each other.
+
+ Attributes
+ ----------
+ tk_parent : object
+ Parent widget
+ plot_functions : list
+ List of plot functions,
+ default [plot_3d_vectormap, plot_3d_vectormap, plot_3d_differencemap]
+ canvases : list
+ CanvasPlotter objects, canvas2 to show difference
+ tickbox_frames : list
+ List of tickbox frame objects
+ buttons : list
+ Corresponding tk.Buttons under the canvases
+ analysers : list
+ Analyser objects selected by the user (len == 2)
+ '''
+
+
+ def __init__(self, tk_parent, core):
+ class FileMenu(MenuMaker):
+
+ def __init__(self, main_widget, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.main_widget = main_widget
+
+ def save_all_views(self):
+ self.main_widget.savefig()
+
+ def close_window(self):
+ self.main_widget.tk_parent.destroy()
+
+ tk.Frame.__init__(self, tk_parent)
+ self.tk_parent = tk_parent
+
+ self.core = core
+
+ self.plot_functions = [plot_3d_vectormap, plot_3d_vectormap,
+ plot_3d_differencemap]
+ self.canvases = []
+ self.tickbox_frames = []
+ self.buttons = []
+ self.analysers = [None, None]
+
+ self.grid_rowconfigure(0, weight=1)
+
+ axes = []
+ for i in range(3):
+ canvas = CanvasPlotter(self, projection='3d')
+ canvas.grid(row=3, column=i, sticky='NSWE')
+ self.canvases.append(canvas)
+
+ axes.append(canvas.ax)
+ axes[-1].elev = 15
+ axes[-1].azim = 60
+
+ # Plot settings
+ if i in [0,1]:
+ cmd = lambda i=i: self.set_vectormap(i_canvas=i)
+ else:
+ cmd = self.plot_difference
+ options, defaults = inspect_booleans(self.plot_functions[i])
+ tickboxes = TickboxFrame(self, options, defaults=defaults,
+ callback=cmd)
+ tickboxes.grid(row=4, column=i, sticky='NSWE')
+ self.tickbox_frames.append(tickboxes)
+
+ # Main buttons
+ if i in [0, 1]:
+ cmd = lambda i=i: self.select_specimens(i_canvas=i)
+ txt = 'Select specimens...'
+ else:
+ cmd = self.plot_difference
+ txt = 'Compare'
+
+ button = tk.Button(self, text=txt, command=cmd)
+ button.grid(row=45, column=i)
+ self.buttons.append(button)
+
+ self.grid_columnconfigure(i, weight=1)
+
+
+ hors = [-80, -60, -50, -30, -15, 0, 30, 15, 50, 60, 80]
+ verts = hors
+
+ hors = [(None, hor) for hor in hors]
+ verts = [(ver, None) for ver in verts]
+
+ for i, (name, rotations) in enumerate(zip(['Horizontal', 'Vertical'], [hors, verts])):
+ self.rotation_buttons = RotationButtons(self, axes, rotations,
+ label=name+' rotation', callback=self._update_canvases,
+ rotation_offset=(0,90))
+ self.rotation_buttons.grid(row=i+1, column=0, columnspan=3)
+
+ self.menubar = tk.Menu()
+ self.filemenu = FileMenu(self, 'File')
+ self.filemenu._connect(self.menubar)
+ self.winfo_toplevel().config(menu=self.menubar)
+
+
+ def _update_canvases(self):
+ for i in range(3):
+ ax = self.canvases[i].ax
+ if ax.dist != 8.5:
+ ax.dist = 8.5
+
+ self.canvases[i].update()
+
+ def select_specimens(self, i_canvas):
+ select_specimens(self.core, self.set_vectormap,
+ command_args=[i_canvas], return_manalysers=True,
+ with_movements=True)
+
+
+ def set_vectormap(self, manalysers=None, i_canvas=None):
+ import time
+ start_time = time.time()
+
+ canvas = self.canvases[i_canvas]
+ ax = canvas.ax
+
+ if manalysers is None:
+ analyser = self.analysers[i_canvas]
+ if analyser is None:
+ return None
+ else:
+ if len(manalysers) > 1:
+ analyser = MAverager(manalysers)
+ else:
+ analyser = manalysers[0]
+
+ azim, elev = (ax.azim, ax.elev)
+ ax.clear()
+
+
+ kwargs = self.tickbox_frames[i_canvas].states
+
+ plot_3d_vectormap(analyser, ax=ax, azim=azim, elev=elev,
+ mutation_scale=6, scale_length=1.2, **kwargs)
+
+ if ax.dist != 8.5:
+ ax.dist = 8.5
+
+ canvas.update()
+
+ self.analysers[i_canvas] = analyser
+
+ print('took {} seconds'.format(time.time()-start_time))
+
+
+ def plot_difference(self):
+
+ if any([an is None for an in self.analysers]):
+ return None
+
+ kwargs = self.tickbox_frames[-1].states
+
+ ax = self.canvases[-1].ax
+ ax.clear()
+ plot_3d_differencemap(*self.analysers[0:2], ax=ax, **kwargs)
+
+ if ax.dist != 8.5:
+ ax.dist = 8.5
+
+ self.canvases[-1].update()
+
+
+ def savefig(self, i_canvas=None, fn=None):
+ '''
+ Save current images visible on the canvases
+
+ i_canvas : int or None
+ If none, save all views by inserting index of the canvas
+ to the end of the saved filename.
+ fn : string or None
+ If not given, save name is asked from the user.
+ '''
+
+ if i_canvas is None:
+ iterate = range(len(self.canvases))
+ elif isinstance(i_canvas, int) and i_canvas:
+ iterate = [i_canvas]
+ else:
+ raise ValueError('wrong type for i_canvas: {}'.format(i_canvas))
+
+ if fn is None:
+
+ if i_canvas == 'all':
+ text = 'Select common save name for the views'
+ else:
+ text = 'Save image on a view'
+
+ fn = filedialog.asksaveasfilename(title=text)
+
+ if not '.' in os.path.basename(fn):
+ fn = fn + '.png'
+
+ if fn:
+ for i_canvas in iterate:
+ efn = '.'.join(fn.split('.')[:-1]+[str(i_canvas)]+fn.split('.')[-1:])
+ self.canvases[i_canvas].figure.savefig(efn, dpi=600)
diff --git a/gonio-analysis/gonioanalysis/version.py b/gonio-analysis/gonioanalysis/version.py
new file mode 100644
index 0000000..906d362
--- /dev/null
+++ b/gonio-analysis/gonioanalysis/version.py
@@ -0,0 +1 @@
+__version__ = "0.6.0"
diff --git a/gonio-analysis/scripts/add_gridlines.py b/gonio-analysis/scripts/add_gridlines.py
new file mode 100644
index 0000000..440ddd2
--- /dev/null
+++ b/gonio-analysis/scripts/add_gridlines.py
@@ -0,0 +1,62 @@
+'''
+Add grid lines to the selected GHS-DPP images.
+Usefull for example still-image series illustrations.
+'''
+import os
+
+import numpy as np
+from tkinter import filedialog, simpledialog
+import cv2
+
+def main():
+
+ fns = filedialog.askopenfilenames()
+
+ if fns:
+
+ print(fns)
+
+ pixel_size = 0.817
+ every = 10
+
+ directory = os.path.dirname(fns[0])
+
+ newdir = os.path.join(directory, 'gridded')
+ os.makedirs(newdir, exist_ok=True)
+
+ for fn in fns:
+ image = cv2.imread(fn)
+
+
+ for i_line, j in enumerate(np.arange(0, image.shape[0]-1, every/pixel_size)):
+ j = int(j)
+
+ if True:
+ image[j:j+1, :, 2] = 255
+ image[j:j+1, :, 0] = 0
+ image[j:j+1, :, 1] = 0
+ else:
+ image[j:j+1, :, 0] = 255
+ image[j:j+1, :, 1] = 0
+ image[j:j+1, :, 2] = 0
+
+
+ for i_line, i in enumerate(np.arange(0, image.shape[1]-1, every/pixel_size)):
+ i = int(i)
+
+ if True:
+ image[:, i:i+1, 2] = 255
+ image[:, i:i+1, 0] = 0
+ image[:, i:i+1, 1] = 0
+ else:
+ image[:, i:i+1, 0] = 255
+ image[:, i:i+1, 1] = 0
+ image[:, i:i+1, 2] = 0
+
+ cv2.imwrite(os.path.join(newdir, os.path.basename(fn)), image)
+
+
+if __name__ == "__main__":
+ main()
+
+
diff --git a/gonio-analysis/scripts/compress_datadir.py b/gonio-analysis/scripts/compress_datadir.py
new file mode 100644
index 0000000..dcfbff2
--- /dev/null
+++ b/gonio-analysis/scripts/compress_datadir.py
@@ -0,0 +1,54 @@
+
+import os
+import tifffile
+import cv2
+from multiprocessing import Pool
+
+from videowrapper import VideoWrapper
+from gonioanalysis.drosom.loading import arange_fns, split_to_repeats
+
+datadir = input("DATADIR TO COMPRESS (av1 encode): ")
+newdir = datadir+'_av1compressed'
+print("New data will be saved in {}".format(newdir))
+os.makedirs(newdir, exist_ok=True)
+
+
+def process_dir(root):
+ directory = root
+
+ fns = [fn for fn in os.listdir(directory) if fn.endswith('.tiff')]
+ fns = arange_fns(fns)
+ fns = split_to_repeats(fns)
+
+ for repeat in fns:
+ common = os.path.commonprefix(repeat)
+ savedir = root.replace(os.path.basename(datadir), os.path.basename(newdir))
+
+ movie_fn = os.path.join(savedir, common+'stack.mp4')
+
+ if os.path.exists(movie_fn):
+ continue
+
+ os.makedirs(savedir, exist_ok=True)
+ video = VideoWrapper()
+ video.images_to_video([os.path.join(root, fn) for fn in repeat], movie_fn, 1)
+
+
+def main():
+
+ tiffdirs = []
+ for root, dirs, fns in os.walk(datadir):
+
+ if os.path.basename(root) == "snaps":
+ continue
+
+ hastiffs = any([fn.endswith('.tiff') for fn in fns])
+
+ if hastiffs:
+ tiffdirs.append(root)
+
+ with Pool() as p:
+ p.map(process_dir, tiffdirs)
+
+if __name__ == "__main__":
+ main()
diff --git a/gonio-analysis/scripts/create_alr_data.py b/gonio-analysis/scripts/create_alr_data.py
new file mode 100644
index 0000000..89d5624
--- /dev/null
+++ b/gonio-analysis/scripts/create_alr_data.py
@@ -0,0 +1,153 @@
+'''
+Extension of find antenna levels to create a reference fly.
+
+This script has to be (ideally) run only once on DrosoALR flies, creating
+an antenna lvl reference fly that can be used for other flies as well.
+'''
+
+import os
+
+import numpy as np
+import tifffile
+
+from gonioanalysis.antenna_level import AntennaLevelFinder
+from gonioanalysis.directories import PROCESSING_TEMPDIR_BIGFILES
+
+from imalyser.averaging import Templater
+from imalyser.aligning import Aligner
+from imalyser.common import imwrite
+
+
+DROSO_DATADIR = input('Input data directory >> ')
+
+
+def loadReferenceFly(folder):
+ '''
+ Returns the reference fly data, dictionary with pitch angles as keys and
+ image filenames as items.
+ '''
+ pitches = []
+ with open(os.path.join(folder, 'pitch_angles.txt'), 'r') as fp:
+ for line in fp:
+ pitches.append(line)
+
+ images = [os.path.join(folder, fn) for fno in s.listdir(folder) if fn.endswith('.tif') or fn.endswith('.tiff')]
+
+ return {pitch: fn for pitch,fn in zip(pitches, fns)}
+
+
+class ReferenceCreator:
+ '''
+ Create a refenrence fly that can be loaded like an ALR (antenna level reference) fly.
+ '''
+
+ def __init__(self, name):
+ self.name = name
+ self.savedir = 'alr_data'
+ os.makedirs(self.savedir, exist_ok=True)
+
+ def _loadData(self):
+ '''
+ Loads all the present ALR flies.
+ '''
+
+ reference_data = []
+
+
+ alr_flies = [fn for fn in os.listdir(DROSO_DATADIR) if 'DrosoALR' in fn]
+ alr_folders = [os.path.join(DROSO_DATADIR, fn)for fn in alr_flies]
+
+ lfinder = AntennaLevelFinder()
+
+ for folder,fly in zip(alr_folders, alr_flies):
+ reference_data.append( lfinder._load_drosox_reference(folder, fly) )
+
+ return reference_data
+
+
+
+ def _restructureData(self, reference_data):
+ '''
+ Restructures data from self._loadData to work in self.createReferenceFly.
+ Limits that are not present in all the flies are removed (FIXME structure this phrase better).
+
+ Input data = [fly1_dict, fly2_dict, ...] where
+ flyi_dict = {image_fn_1: pitch_1, ...}
+
+ Output data = {ptich_1: [image_fn_1_fly1,...]}
+ '''
+ restructured_data = {}
+
+ step_size = 1 # degree
+ N = len(reference_data)
+
+ # 1) MAKE ALL FLIES TO SPAN THE SAME ANGLES
+ angles = [[angle for image_fn, angle in reference_data[i].items()] for i in range(N)]
+
+ mins = [np.min(angles[i]) for i in range(N)]
+ maxs = [np.max(angles[i]) for i in range(N)]
+
+ limits = (int(np.max(mins)), int(np.min(maxs)))
+
+
+
+ # 2) BIN together based on step_size
+
+ for iter_pitch in range(*limits, step_size):
+
+ restructured_data[str(iter_pitch)] = []
+
+ for fly_i_data in reference_data:
+ for image_fn, pitch in fly_i_data.items():
+
+ if iter_pitch <= pitch < iter_pitch + step_size:
+ restructured_data[str(iter_pitch)].append( image_fn )
+
+ return restructured_data
+
+ def createReferenceFly(self):
+
+ data = self._loadData()
+ data = self._restructureData(data)
+
+ templater = Templater()
+ aligner = Aligner()
+
+ templates = []
+ angles_order = []
+
+ for i, (angle, images) in enumerate(sorted(data.items(), key=lambda x:float(x[0]))):
+
+ print('Templating for pitch {} degrees ({}/{}), {} images to align and average together.'.format(angle, i+1, len(data), len(images)))
+
+ template = templater.template(images)
+ templates.append(template)
+ #imwrite(fn, template)
+
+ angles_order.append(angle)
+
+ print('Aligning all template images...')
+ offsets = aligner.calcOffsets(templates)
+ templates = aligner.getAligned(templates, offsets)
+
+ for i, template in enumerate(templates):
+
+
+ fn = os.path.join(self.savedir, 'im{:0>5}.tif'.format(i))
+ print('Saving {}'.format(fn))
+ tifffile.imwrite(fn, template)
+
+ with open(os.path.join(self.savedir, 'pitch_angles.txt'), 'w') as fp:
+ for pitch in angles_order:
+ fp.write(pitch+'\n')
+
+
+
+
+def main():
+
+ creator = ReferenceCreator('Drosophila')
+ creator.createReferenceFly()
+
+if __name__ == "__main__":
+ main()
diff --git a/gonio-analysis/scripts/drosox_spin.py b/gonio-analysis/scripts/drosox_spin.py
new file mode 100644
index 0000000..4d19f12
--- /dev/null
+++ b/gonio-analysis/scripts/drosox_spin.py
@@ -0,0 +1,118 @@
+'''
+A script to make a video where DrosoM format saved fly
+is spinned over the horizontal rotation (binocular overlap control).
+'''
+
+import sys
+import os
+
+
+from PIL import Image
+import numpy as np
+import tifffile
+import tkinter as tk
+
+from tk_steroids.matplotlib import SequenceImshow
+
+def main():
+ '''
+ usage
+ python3 drosox_spin.py dir1 dir2 dir3 ...
+ '''
+
+ order = ['nostop', 'ownstop', 'extstop']
+
+ #ycrop = 300
+ #yshift = -50
+ ycrop, yshift = (1,0
+ data = []
+ clip = 95
+
+ folders = []
+ for name in order:
+ for arg in sys.argv[1:]:
+ if name in arg:
+ folders.append(arg)
+ break
+
+ print(folders)
+
+ # get data
+ for arg in folders:
+ print(arg)
+ folders = [f for f in os.listdir(arg) if os.path.isdir(os.path.join(arg, f)) and 'snap' not in f]
+ hors = [int( f.split('(')[1].split(',')[0] ) for f in folders]
+
+ hors, folders = zip(*sorted(zip(hors, folders), key=lambda x: x[0]))
+
+
+ imfns = [[os.path.join(arg, f, fn) for fn in os.listdir(os.path.join(arg, f)) if fn.endswith('.tiff')][0] for f in folders]
+
+ data.append([np.array(hors), imfns])
+
+
+ # min max
+ hor_start = max([hors[0] for hors, imfns in data])
+ hor_stop = min([hors[-1] for hors, imfns in data])
+
+ print('hor_start {}, hor_stop {}'.format(hor_start, hor_stop))
+
+
+
+ # select center points
+ for hors, imfns in data:
+ break
+ loaded = [tifffile.imread(fn) for i_fn, fn in enumerate(imfns) if not print('{}/{}'.format(i_fn, len(imfns)))]
+ print(len(loaded))
+ tkroot = tk.Tk()
+ imshow = SequenceImshow(tkroot)
+ imshow.imshow(loaded, cmap='gray', slider=True)
+ imshow.grid(row=1, column=1, sticky='NSWE')
+ tkroot.columnconfigure(1, weight=1)
+ tkroot.rowconfigure(1, weight=1)
+ tkroot.mainloop()
+
+ # normalization values
+ normalization = []
+ for hors, imfns in data:
+ image = tifffile.imread(imfns[0])[ycrop+yshift:-ycrop+yshift:,]
+ normalization.append( np.percentile(image, clip) )
+
+ print(normalization)
+
+ remove_fns = []
+
+ for i_image, hor in enumerate(range(hor_start, hor_stop+1, 1)):
+ images = []
+ for i_data, (hors, imfns) in enumerate(data):
+ i_closest = np.argmin(np.abs(hors-hor))
+
+ image = np.clip(tifffile.imread(imfns[i_closest])[ycrop+yshift:-ycrop+yshift:,], 0, normalization[i_data])
+ image = image - np.min(image)
+ image = 255*image.astype(np.float) / np.max(image)
+ images.append(image)
+
+ image = Image.fromarray( np.concatenate((*images,),axis=0).astype(np.uint8) )
+
+ #imfn = 'images/im{0:03d}.jpg'.format(i_image)
+ imfn = 'images/im{0:03d}_horXZ.jpg'.format(i_image).replace('XZ', '{}').format(hor)
+ image.save(imfn)
+
+ remove_fns.append(imfn)
+
+
+ # encode mp4
+ os.chdir('images')
+ command = 'ffmpeg -framerate 20 -i im%03d.jpg -c:v libx264 -vf fps=20 -pix_fmt yuv420p -preset slow -crf 28 -vf "pad=ceil(iw/2)*2:ceil(ih/2)*2" {}.mp4'.format('out')
+
+ #os.system(command)
+ os.chdir('..')
+
+ #for fn in remove_fns:
+ # os.remove(fn)
+
+
+
+if __name__ == "__main__":
+ main()
+
diff --git a/gonio-analysis/scripts/framepuller.py b/gonio-analysis/scripts/framepuller.py
new file mode 100644
index 0000000..b2282ba
--- /dev/null
+++ b/gonio-analysis/scripts/framepuller.py
@@ -0,0 +1,57 @@
+
+import subprocess
+import tifffile
+import io
+import matplotlib.pyplot as plt
+import numpy as np
+from PIL import Image
+import os
+
+class FramePuller:
+ '''
+ Read image frames from a video file.
+ Uses ffmpeg's piping and Pillow to read.
+ '''
+ def __init__(self, fn):
+ self.fn = fn
+
+ def _get_cmd(self, vframes):
+ return ["ffmpeg", "-hide_banner", "-loglevel", "error", "-i", self.fn, "-r", "1/1", "-c:v", "tiff", "-vframes", str(vframes), '-pixel_format', 'gray16le', "-f", "image2pipe", "-"]
+
+ def get_frames(self):
+ process = subprocess.Popen(self._get_cmd(20), stdout=subprocess.PIPE)
+ output = process.communicate()[0]
+
+ barr = bytearray(output)
+
+ b1 = barr[0:4]
+ parts = barr.split(b1)
+
+ images = []
+
+ for part in parts[1:]:
+ part = bytes(b1 + part)
+
+ f = io.BytesIO()
+ f.write(part)
+
+ images.append(Image.open(f))
+
+ return np.asarray(images)
+
+
+def main():
+
+ fn = TESTVIDEOFILE
+
+ images = FramePuller(fn).get_frames()
+
+ print(images.shape)
+
+ i = 4000
+ for image in images:
+ tifffile.imsave(os.path.join(os.path.dirname(fn), os.path.basename(fn)+str(i)+'.tiff'), np.array(image))
+ i+=1
+
+if __name__ == "__main__":
+ main()
diff --git a/gonio-analysis/scripts/make_cmds.py b/gonio-analysis/scripts/make_cmds.py
new file mode 100644
index 0000000..c017641
--- /dev/null
+++ b/gonio-analysis/scripts/make_cmds.py
@@ -0,0 +1,20 @@
+'''
+A script to autogenerate .cmd files for Windows in the bin.
+'''
+
+import os
+
+def main():
+
+ os.chdir('bin')
+
+ pyfiles = [fn for fn in os.listdir() if fn.endswith('.py')]
+
+ for pyfile in pyfiles:
+ # Here assuming there's only one . in the filenames
+ cmd_fn = pyfile.split('.')[0] + '.cmd'
+ with open(cmd_fn, 'w') as fp:
+ fp.write('python {}'.format(pyfile))
+
+if __name__ == "__main__":
+ main()
diff --git a/gonio-analysis/scripts/optimal_sampling.py b/gonio-analysis/scripts/optimal_sampling.py
new file mode 100644
index 0000000..0bb88fb
--- /dev/null
+++ b/gonio-analysis/scripts/optimal_sampling.py
@@ -0,0 +1,58 @@
+
+from math import sin, cos, tan, sqrt, radians, pi
+
+import numpy as np
+import matplotlib.pyplot as plt
+from mpl_toolkits import mplot3d
+
+from gonioanalysis.coordinates import camera2Fly, optimal_sampling
+
+
+def test():
+ fig = plt.figure()
+ ax = fig.add_subplot(111, projection='3d')
+
+ verticals = [1, 30, 45, 60, 90]
+ colors = ['red', 'orange', 'yellow', 'green', 'blue']
+ for vertical, color in zip(verticals, colors):
+ horizontals = np.linspace(-90, 90)
+
+ for horizontal in horizontals:
+ x,y,z = camera2Fly(horizontal, vertical)
+ ax.scatter(x,y,z, color=color)
+
+ plt.show()
+
+def plot_optimal(points):
+
+
+ print(len(points))
+
+ fig = plt.figure()
+ ax = fig.add_subplot(111, projection='3d')
+
+ ax.scatter(*list(zip(*points)))
+
+ plt.show()
+
+
+
+if __name__ == "__main__":
+
+ steps = (10, 10)
+
+
+ # For Gabor
+ #horizontals = np.arange(-90,40+0.01, steps[0])
+ #verticals = np.arange(-20,180+0.01, steps[1])
+
+ # For vector map
+ horizontals = np.arange(-50,40+0.01, steps[0])
+ verticals = np.arange(-20,180+0.01, steps[1])
+
+ # Full sphere just for illustration
+ #horizontals = np.arange(-90,90+0.01, steps[0])
+ #verticals = np.arange(0,360+0.01, steps[1])
+
+ points = optimal_sampling(horizontals, verticals, steps)
+ plot_optimal(points)
diff --git a/gonio-analysis/scripts/plot_spatial_calibration.py b/gonio-analysis/scripts/plot_spatial_calibration.py
new file mode 100644
index 0000000..cf151f5
--- /dev/null
+++ b/gonio-analysis/scripts/plot_spatial_calibration.py
@@ -0,0 +1,51 @@
+'''
+Plot the results from spatial 2D calibration where the pinhole/fibre was
+moved in the camera's (x, y) coordinates while only the green stimulus
+LED was turned on and the fibre was connected to the spectrometer.
+'''
+
+import tifffile
+import numpy as np
+import matplotlib.pyplot as plt
+
+from movemeter import Movemeter
+from marker import Marker
+
+def get_xy_coordinates(image_fn, match_image):
+ '''
+ Takes in an image of the pinhole and returs coordinates
+ of the pinhole.
+
+ image_fn Image where we look for the match
+ match_image Matching target
+ '''
+
+ image = tifffile.imread(image_fn)
+
+ movemeter = Movemeter()
+ movemeter.set_data(image, )
+ #plt.imshow(np.clip(image, np.min(image), np.percentile(image, 50)))
+ #plt.show()
+
+
+
+def main():
+ image_fn = '/home/joni/smallbrains-nas1/array1/pseudopupil_joni/Spectrometer/DPP_cal_1_ty2/snap_2020-02-21_14.15.08.088000_0.tiff'
+
+ fig, ax = plt.subplots()
+ marker = Marker(fig, ax, [image_fn], None)
+ pinhole = marker.run()
+
+ crop = pinhole[image_fn])
+ pinhole_image = image_fn
+
+
+ coodinates = get_xy_coordinates(image_fn)
+
+
+
+if __name__ == "__main__":
+ main()
+
+
+
diff --git a/gonio-analysis/scripts/videowrapper.py b/gonio-analysis/scripts/videowrapper.py
new file mode 100644
index 0000000..7dc324b
--- /dev/null
+++ b/gonio-analysis/scripts/videowrapper.py
@@ -0,0 +1,87 @@
+'''
+Simple tool (Encoder) to transform an image recording into a movie clip.
+'''
+
+import os
+import datetime
+import subprocess
+
+PROCESSING_TEMPDIR = '/tmp/videowrapper_tmp'
+
+
+class VideoWrapper:
+ '''
+ Simple Python interface for encoding images into a movie clip
+ using ffmpeg (required to be installed).
+
+ See Encoder.encode for usage.
+ '''
+
+ def __init__(self):
+
+ # Ensure that ffmpeg is installed
+ try:
+ subprocess.check_output('ffmpeg -version', shell=True)
+ except subprocess.CalledProcessError:
+ raise RuntimeError('ffmpeg is not installed (required by movie.Encoder)')
+
+ # Temporal stuff directory
+ self.tempdir = os.path.join(PROCESSING_TEMPDIR, 'movie-encoder')
+ os.makedirs(self.tempdir, exist_ok=True)
+
+
+ def _makeImageList(self, images, fps):
+ '''
+ PRIVATE Used from self.encode.
+
+ Creates a textfile that contains all the image filenames and is passed to ffmpeg.
+ '''
+ imagelist_fn = os.path.join(self.tempdir, 'imagelist_{}.txt'.format(str(datetime.datetime.now()).replace(' ', '_')))
+ imagelist_fn = os.path.abspath(imagelist_fn)
+ with open(imagelist_fn, 'w') as fp:
+ for image in images:
+ fp.write("file '"+os.path.abspath(image)+"'\n")
+ fp.write('duration {}\n'.format(float(1/fps)))
+
+ return imagelist_fn
+
+
+ def images_to_video(self, images, output_fn, fps, codec='libaom-av1'):
+ '''
+ Encoding a video clip using ffmpeg.
+
+ INPUT ARGUMENTS DESCRIPTION
+ images Ordered list of images; Movie will be encoded in this order.
+ output_fn Output name of the movie.
+ fps Desrided frames per second.
+
+ Encoding libx264, preset veryslow and crf 28 will be used.
+ '''
+
+ # Check that input is correct and that images actually exist (avoids many trouble).
+
+ options = {}
+
+ if type(output_fn) != type('string'):
+ raise TypeError('output_fn has to be a string (required by movie.Encoder)')
+
+ try:
+ float(fps)
+ except:
+ raise TypeError('fps has to be floatable')
+
+ for image in images:
+ if not os.path.exists(image):
+ raise FileNotFoundError('Precheck in movie.Encoder failed. File {} does not exist'.format(image))
+
+
+
+ # Check that directory for ouput_fn exsits, if not, create
+ os.makedirs(os.path.dirname(output_fn), exist_ok=True)
+
+ imagelist_fn = self._makeImageList(images, fps)
+ print(imagelist_fn)
+ command = 'ffmpeg -r {} -f concat -safe 0 -i "{}" -c:v {} -pix_fmt gray12 -crf 7 -b:v 0 "{}"'.format(fps,imagelist_fn, codec, output_fn)
+ subprocess.check_output(command, shell=True)
+
+ os.remove(imagelist_fn)
diff --git a/gonio-analysis/setup.py b/gonio-analysis/setup.py
new file mode 100644
index 0000000..54aba7d
--- /dev/null
+++ b/gonio-analysis/setup.py
@@ -0,0 +1,38 @@
+import setuptools
+
+with open("README.md", "r") as fh:
+ long_description = fh.read()
+
+# Version number to __version__ variable
+exec(open("gonioanalysis/version.py").read())
+
+install_requires = [
+ 'numpy',
+ 'scipy',
+ 'tifffile',
+ 'matplotlib',
+ 'tk-steroids>=0.6.0',
+ 'roimarker>=0.2.0',
+ 'movemeter>=0.4.0',
+ 'python-biosystfiles',
+ ]
+
+
+setuptools.setup(
+ name="gonio-analysis",
+ version=__version__,
+ author="Joni Kemppainen",
+ author_email="jjtkemppainen1@sheffield.ac.uk",
+ description="Spatial motion analysis program",
+ long_description=long_description,
+ long_description_content_type="text/markdown",
+ url="https://github.com/jkemppainen/gonio-analysis",
+ packages=setuptools.find_packages(),
+ install_requires=install_requires,
+ classifiers=[
+ "Programming Language :: Python :: 3",
+ "License :: OSI Approved :: GNU General Public License v3 (GPLv3) ",
+ "Operating System :: OS Independent",
+ ],
+ python_requires='>=3.0',
+)
diff --git a/gonio-analysis/tests/test_terminal.py b/gonio-analysis/tests/test_terminal.py
new file mode 100644
index 0000000..840ad4f
--- /dev/null
+++ b/gonio-analysis/tests/test_terminal.py
@@ -0,0 +1,30 @@
+import os
+import unittest
+
+import gonioanalysis.drosom.analyser_commands as ac
+import gonioanalysis.drosom.terminal as terminal
+
+TESTPATH = os.path.dirname(__file__)
+
+datadir = os.path.join(TESTPATH, 'test_data')
+testspecimen = 'test_specimen_01'
+
+class TestTerminal(unittest.TestCase):
+
+ def test_analysis_targets(self):
+ '''
+ Test all the analysis targets in the terminal.py, ie essentially
+
+ terminal.py -D datadir -S testspecimen analysis1
+ terminal.py -D datadir -S testspecimen analysis2
+ ...
+
+ only checking if they can be run successfully without errors.
+ '''
+
+ targets = ac.ANALYSER_CMDS
+
+ for target in targets:
+ with self.subTest(target=target):
+ args = ['--dont-show','-D', datadir, '-S', testspecimen, target]
+ terminal.main(custom_args=args)
diff --git a/gonio-analysis/windows_installer/get_tkdeps.py b/gonio-analysis/windows_installer/get_tkdeps.py
new file mode 100644
index 0000000..dfee1a1
--- /dev/null
+++ b/gonio-analysis/windows_installer/get_tkdeps.py
@@ -0,0 +1,21 @@
+
+import os
+import sys
+import shutil
+
+def main():
+ workdir = os.getcwd()
+ pdir = os.path.dirname(sys.executable)
+
+ shutil.copytree(os.path.join(pdir, 'tcl'), 'lib')
+ os.makedirs('pynsist_pkgs', exist_ok=True)
+
+ copyfiles = [os.path.join(pdir, 'DLLs', fn) for fn in ['_tkinter.pyd', 'tcl86t.dll', 'tk86t.dll']]
+ copyfiles.append(os.path.join(pdir, 'libs', '_tkinter.lib'))
+
+ for origfile in copyfiles:
+ newfile = os.path.join('pynsist_pkgs', os.path.basename(origfile))
+ shutil.copy(origfile, newfile)
+
+if __name__ == "__main__":
+ main()
diff --git a/gonio-analysis/windows_installer/logo_colored.ico b/gonio-analysis/windows_installer/logo_colored.ico
new file mode 100644
index 0000000..39b1679
Binary files /dev/null and b/gonio-analysis/windows_installer/logo_colored.ico differ
diff --git a/gonio-analysis/windows_installer/make_installer.py b/gonio-analysis/windows_installer/make_installer.py
new file mode 100644
index 0000000..63e94ab
--- /dev/null
+++ b/gonio-analysis/windows_installer/make_installer.py
@@ -0,0 +1,82 @@
+'''
+A script to make an all-in-one Windows installer for gonio-analyis using
+pip, pynsist and NSIS.
+
+You have to
+ - be on the Windows platfrom (64-bit)
+ - have pynsist and NSIS installed
+ - have the same major.minor version of Python as PYTHONVERSION in here
+
+Because the wheels embedded in the installer are fetched from PyPi, this script
+can make installers only for PyPi released versions of gonio-analysis.
+
+Attributes
+----------
+GONIOVERSION : string
+ Version of the Gonio Analysis to use.
+ By default, it is assumed that we are in a git work copy.
+PYTHONVERSION : string
+ Version of the Python interpreter to use
+'''
+
+import os
+import sys
+import shutil
+
+try:
+ # Version number to __version__ variable
+ exec(open("../gonioanalysis/version.py").read())
+except:
+ __version__ == input('gonioanalysis version use (exmpl. 0.1.2) >>')
+
+GONIOVERSION = __version__
+PYTHONVERSION = '{}.{}.{}'.format(*sys.version_info[0:3])
+
+def fetch_wheels():
+
+ if os.path.isdir('wheels'):
+ shutil.rmtree('wheels')
+ os.makedirs('wheels')
+
+ os.chdir('wheels')
+ os.system('pip download gonio-analysis=='+GONIOVERSION)
+ os.chdir('..')
+
+
+
+def build(gonioversion, pythonversion):
+
+ os.system('get_tkdeps.py')
+
+ fetch_wheels()
+ wheels = [os.path.join('wheels', fn) for fn in os.listdir('wheels') if fn.endswith('.whl')]
+
+ str_wheels = '\n '.join(wheels)
+
+ moveversion = [fn for fn in wheels if 'movemeter-' in fn][0]
+ moveversion = moveversion.split('-')[1]
+
+ cfg_file = []
+ with open('pynsist_template.cfg', 'r') as fp:
+ for line in fp:
+ edited = line.replace('GONIOVERSION', gonioversion)
+ edited = edited.replace('PYTHONVERSION', pythonversion)
+ edited = edited.replace('LOCAL_WHEELS', str_wheels)
+ edited = edited.replace('MOVEVERSION', moveversion)
+ cfg_file.append(edited)
+
+ with open('pynsist_temp.cfg', 'w') as fp:
+ for line in cfg_file:
+ fp.write(line)
+
+ print(cfg_file)
+
+ os.system('pynsist pynsist_temp.cfg')
+ #shutil.rmtree('wheels')
+
+def main():
+ build(GONIOVERSION, PYTHONVERSION)
+
+if __name__ == "__main__":
+ main()
+
diff --git a/gonio-analysis/windows_installer/movemeter_logo.ico b/gonio-analysis/windows_installer/movemeter_logo.ico
new file mode 100644
index 0000000..36d8b97
Binary files /dev/null and b/gonio-analysis/windows_installer/movemeter_logo.ico differ
diff --git a/gonio-analysis/windows_installer/pynsist_template.cfg b/gonio-analysis/windows_installer/pynsist_template.cfg
new file mode 100644
index 0000000..e6656b2
--- /dev/null
+++ b/gonio-analysis/windows_installer/pynsist_template.cfg
@@ -0,0 +1,28 @@
+[Application]
+name=Gonio Analysis GONIOVERSION
+version=GONIOVERSION
+entry_point=gonioanalysis.tkgui.__main__:main
+icon=logo_colored.ico
+
+[Shortcut Movemeter MOVEVERSION]
+entry_point=movemeter.tkgui:main
+icon=movemeter_logo.ico
+
+[Command gonioanalysis]
+entry_point=gonioanalysis.drosom.terminal:main
+
+[Python]
+version=PYTHONVERSION
+bitness=64
+
+[Include]
+
+# Tkinter workaround
+# https://pynsist.readthedocs.io/en/latest/faq.html#packaging-with-tkinter
+packages =
+ tkinter
+ _tkinter
+files=lib
+
+# Wheels that we fetch with pip (see make_installer.py)
+local_wheels = LOCAL_WHEELS
diff --git a/gonio-imsoft/.gitignore b/gonio-imsoft/.gitignore
new file mode 100644
index 0000000..b6e4761
--- /dev/null
+++ b/gonio-imsoft/.gitignore
@@ -0,0 +1,129 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+pip-wheel-metadata/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+.python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
diff --git a/gonio-imsoft/LICENSE b/gonio-imsoft/LICENSE
new file mode 100644
index 0000000..53d1f3d
--- /dev/null
+++ b/gonio-imsoft/LICENSE
@@ -0,0 +1,675 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
+
diff --git a/gonio-imsoft/README.md b/gonio-imsoft/README.md
new file mode 100755
index 0000000..3b1a39b
--- /dev/null
+++ b/gonio-imsoft/README.md
@@ -0,0 +1,75 @@
+# Goniometric imaging software
+
+Gonio Imsoft is a command line Python program designed to control the
+goniometric high-speed imaging experiments where
+
+* rotary encoder values are read over serial (pySerial)
+* NI-DAQmx is used for general input/output (nidaqmx)
+* the camera is controlled over MicroManager (MMCorePy)
+
+It was developed for the need of imaging 200 distinct
+rotations (eye locations) per specimen fast, requiring
+only the space bar to be pressed between the rotations.
+
+For general imaging, it is more convinient
+to use MicroManager or similar.
+
+
+## Required hardware and current limitations
+
+* Any MicroManager supported imaging device
+* Any National Instruments input/output board (NI specificity can be lifted in
+ future by using PyVISA or similar)
+* Any serial device reporting rotation values in format "pos1,pos2\n"
+
+There are currently some limitations however (to be fixed soon)
+
+1) Imsoft yet lacks dialogs to select and configure
+ devices in a user-friendly manner.
+ Currently, the same can be achieved by modifications in
+ `camera_server.Camera.__init__`, `core.Dynamic.analog_output`
+ and `arduino_serial.ArduinoReader`.
+
+1) At least previously, MicroManager used to ship only Python 2 bindings
+ and because of this, the *camera_server.py*
+ has to be ran with Python 2 and rest of the software with
+ Python 3.
+
+1) Some parts only work on Windows (nidaqmx and msvcrt modules)
+
+
+## How to install
+
+### Rotary encoders
+
+We connected two 1024-step rotary encoders to two perpendicular
+rotation stages (goniometers), and used Arduino for readout.
+
+When using similar setup to us, you can modify and flash
+`arduino/angle_sensors/angle_sensors.ino`, and use Serial Monitor
+in the Arduino IDE to confirm that the readout work.
+
+Alternatively, any serial device reporting rotations in format "pos1,pos2\n"
+where pos1 and pos2 are rotation steps of the two encoders will do.
+
+
+### Main software
+
+First please make sure that you have
+* MicroManager installation with a working camera
+* National Insturments cards configured with names *Dev1* and *Dev2* for input
+ and output, respectively
+* Python 3 and Python 2
+
+Then, with Python 3's pip
+
+```
+pip install gonio-imsoft
+```
+
+## How to use
+
+```
+python -m gonioimsoft.tui
+```
+
diff --git a/gonio-imsoft/gonioimsoft/__init__.py b/gonio-imsoft/gonioimsoft/__init__.py
new file mode 100644
index 0000000..8d1c8b6
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/__init__.py
@@ -0,0 +1 @@
+
diff --git a/gonio-imsoft/gonioimsoft/anglepairs.py b/gonio-imsoft/gonioimsoft/anglepairs.py
new file mode 100755
index 0000000..da3e311
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/anglepairs.py
@@ -0,0 +1,48 @@
+'''
+Working with anglepairs.txt file
+'''
+
+import csv
+
+
+def saveAnglePairs(fn, angles):
+ '''
+ Saving angle pairs to a file.
+ '''
+ with open(fn, 'w') as fp:
+ writer = csv.writer(fp)
+ for angle in angles:
+ writer.writerow(angle)
+
+def loadAnglePairs(fn):
+ '''
+ Loading angle pairs from a file.
+ '''
+ angles = []
+ with open(fn, 'r') as fp:
+ reader = csv.reader(fp)
+ for row in reader:
+ if row:
+ angles.append([int(a) for a in row])
+ return angles
+
+def toDegrees(angles):
+ '''
+ Transform 'angles' (that here are just the steps of rotary encoder)
+ to actual degree angle values.
+ '''
+ for i in range(len(angles)):
+ angles[i][0] *= (360/1024)
+ angles[i][1] *= (360/1024)
+
+
+def step2degree(step):
+ return step * (360/1024)
+
+
+def degrees2steps(angle):
+ '''
+ Transform an angle (degrees) to corresponging rotary encoder steps.
+ '''
+ return angle*(1024/360)
+
diff --git a/gonio-imsoft/gonioimsoft/arduino/angle_sensors/angle_sensors.ino b/gonio-imsoft/gonioimsoft/arduino/angle_sensors/angle_sensors.ino
new file mode 100644
index 0000000..b8c78c3
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/arduino/angle_sensors/angle_sensors.ino
@@ -0,0 +1,63 @@
+/* angle_sensors (Part of gonio-imsoft)
+
+Reading 2 rotary encoders' values and reporting
+them to a computer through serial.
+
+Reporting from: 'pos1,pos2\n'
+
+Based on brianlow's rotary encoder library for Arduino
+and its examples.
+https://github.com/brianlow/Rotary (GNU GPL Version 3)
+*/
+
+
+#include
+
+Rotary r1 = Rotary(2, 3);
+Rotary r2 = Rotary(4, 5);
+
+unsigned char result1;
+unsigned char result2;
+
+// Keeping record of rotatory encoders' positions
+int pos1 = 0;
+int pos2 = 0;
+
+// Evaluate change in step, -1, 0, or +1
+int stepChange(unsigned char result) {
+ if (result == DIR_CW) {
+ return -1;
+
+ }
+ if (result == DIR_CCW) {
+ return 1;
+ }
+ return 0;
+}
+
+void setup() {
+ Serial.begin(9600);
+ r1.begin();
+ r2.begin();
+
+}
+
+void loop() {
+
+ // Update 1st rotatory encoder
+ result1 = r1.process();
+ if (result1) {
+ pos1 += stepChange(result1);
+ }
+
+ // Update 2nd rotatory encoder
+ result2 = r2.process();
+ if (result2) {
+ pos2 += stepChange(result2);
+ }
+
+ // Print of either has changed
+ if (result1 or result2) {
+ Serial.println((String) pos1 + ',' + pos2);
+ }
+}
diff --git a/gonio-imsoft/gonioimsoft/arduino/combined_sensors_motors/combined_sensors_motors.ino b/gonio-imsoft/gonioimsoft/arduino/combined_sensors_motors/combined_sensors_motors.ino
new file mode 100644
index 0000000..8d249d3
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/arduino/combined_sensors_motors/combined_sensors_motors.ino
@@ -0,0 +1,282 @@
+/*
+combined_sensors_motors (Part of gonio-imsoft)
+
+Combined reading of 2 rotary encoders' values and
+controlling stepper motors.
+
+1) READING ROTARY ENCODERS
+ Reporting form: 'pos1,pos2\n'
+
+ Based on brianlow's rotary encoder library for Arduino
+ and its examples.
+ https://github.com/brianlow/Rotary (GNU GPL Version 3)
+
+
+2) CONTROLLING STEPPER MOTORS (using a middle man)
+ Doesn't really actually control the stepper motors, with
+ the circuitry "just" simulates button pressing.
+
+ This is because we're using the stepper motor units build by
+ Mick Swan that come with their own controller boxes and we're
+ just hacking the remote control part.
+
+
+TODO) Change motor control to use object oriented interface rather than the current
+ if blocks mess.
+
+*/
+
+
+#include
+
+// ---------------------------------------------
+// DEFINING UP ROTARY
+Rotary r1 = Rotary(2, 3);
+Rotary r2 = Rotary(4, 5);
+
+unsigned char result1;
+unsigned char result2;
+
+// Keeping record of rotatory encoders' positions
+int pos1 = 0;
+int pos2 = 0;
+
+// Evaluate change in step, -1, 0, or +1
+int stepChange(unsigned char result) {
+ if (result == DIR_CW) {
+ return -1;
+
+ }
+ if (result == DIR_CCW) {
+ return 1;
+ }
+ return 0;
+}
+// ----------------------------------------------
+// ----------------------------------------------
+
+// ----------------------------------------------
+// STEPPER MOTOR CONTROL PART
+int pin_a = 12;
+int pin_A = 11;
+unsigned long a_time = 0;
+unsigned long A_time = 0;
+
+
+int pin_b = 10;
+int pin_B = 9;
+unsigned long b_time = 0;
+unsigned long B_time = 0;
+
+
+int pin_c = 8;
+int pin_C = 7;
+unsigned long c_time = 0;
+unsigned long C_time = 0;
+
+// How long motor pin stays in HIGH position
+unsigned long on_time_ms = 100;
+
+int action;
+
+int get_action()
+{
+ if(Serial.available() > 0)
+ {
+ return Serial.read();
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+// Checks wheter a timer has experied
+bool is_experied(unsigned long atime)
+{
+ if (atime < millis())
+ {
+ return true;
+ }
+ else
+ {
+ return false;
+ }
+}
+
+
+// ----------------------------------------------
+// ----------------------------------------------
+
+
+
+
+void setup() {
+ Serial.begin(9600);
+
+ // ROTARY ENCODER SENSORS
+ r1.begin();
+ r2.begin();
+
+ // STEPPER MOTOR PART
+ pinMode(pin_a, OUTPUT);
+ pinMode(pin_A, OUTPUT);
+ pinMode(pin_b, OUTPUT);
+ pinMode(pin_B, OUTPUT);
+ pinMode(pin_c, OUTPUT);
+ pinMode(pin_C, OUTPUT);
+
+ digitalWrite(pin_a, LOW);
+ digitalWrite(pin_A, LOW);
+
+ digitalWrite(pin_b, LOW);
+ digitalWrite(pin_B, LOW);
+
+ digitalWrite(pin_c, LOW);
+ digitalWrite(pin_C, LOW);
+
+
+
+
+}
+
+
+
+void loop() {
+
+ // Update 1st rotatory encoder
+ result1 = r1.process();
+ if (result1) {
+ pos1 += stepChange(result1);
+ }
+
+ // Update 2nd rotatory encoder
+ result2 = r2.process();
+ if (result2) {
+ pos2 += stepChange(result2);
+ }
+
+ // Print if either has changed
+ // Notice: This is what gets send to serial
+ if (result1 or result2) {
+ Serial.println((String) pos1 + ',' + pos2);
+ }
+
+
+ // READ SERIAL INPUT, WHAT MOTOR PINS TO SET HIGH
+ do {
+ action = get_action();
+
+ if (action == 'a')
+ {
+
+ if (is_experied(a_time))
+ {
+ digitalWrite(pin_a, HIGH);
+ a_time = millis() + on_time_ms;
+ }
+ else
+ {
+ a_time += on_time_ms;
+ }
+ }
+ else if (action == 'A')
+ {
+ if (is_experied(A_time))
+ {
+ digitalWrite(pin_A, HIGH);
+ A_time = millis() + on_time_ms;
+ }
+ else
+ {
+ A_time += on_time_ms;
+ }
+
+ }
+ else if (action == 'b')
+ {
+ if (is_experied(b_time))
+ {
+ digitalWrite(pin_b, HIGH);
+ b_time = millis() + on_time_ms;
+ }
+ else
+ {
+ b_time += on_time_ms;
+ }
+
+ }
+ else if (action == 'B')
+ {
+ if (is_experied(B_time))
+ {
+ digitalWrite(pin_B, HIGH);
+ B_time = millis() + on_time_ms;
+ }
+ else
+ {
+ B_time += on_time_ms;
+ }
+
+ }
+ else if (action == 'c')
+ {
+ if (is_experied(c_time))
+ {
+ digitalWrite(pin_c, HIGH);
+ c_time = millis() + on_time_ms;
+ }
+ else
+ {
+ c_time += on_time_ms;
+ }
+
+ }
+ else if (action == 'C')
+ {
+ if (is_experied(C_time))
+ {
+ digitalWrite(pin_C, HIGH);
+ C_time = millis() + on_time_ms;
+ }
+ else
+ {
+ C_time += on_time_ms;
+ }
+
+ }
+ else
+ {
+ action = 0;
+ }
+
+ } while(action != 0);
+
+ // CHECK IF SOME MOTORS HAVE TO GO DOWN ALREADY
+ if (is_experied(a_time))
+ {
+ digitalWrite(pin_a, LOW);
+ }
+ if (is_experied(A_time))
+ {
+ digitalWrite(pin_A, LOW);
+ }
+ if (is_experied(b_time))
+ {
+ digitalWrite(pin_b, LOW);
+ }
+ if (is_experied(B_time))
+ {
+ digitalWrite(pin_B, LOW);
+ }
+ if (is_experied(c_time))
+ {
+ digitalWrite(pin_c, LOW);
+ }
+ if (is_experied(C_time))
+ {
+ digitalWrite(pin_C, LOW);
+ }
+
+
+}
diff --git a/gonio-imsoft/gonioimsoft/arduino/focus/focus.ino b/gonio-imsoft/gonioimsoft/arduino/focus/focus.ino
new file mode 100644
index 0000000..36c1c0e
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/arduino/focus/focus.ino
@@ -0,0 +1,56 @@
+/*
+focus (part of gonio-imsoft)
+
+Writing with digital pins to control the microcope focus
+through a stepper motor.
+*/
+
+int pin_closer = 8;
+int pin_further = 12;
+
+int action;
+
+int get_action()
+{
+ if(Serial.available() > 0)
+ {
+ return Serial.read();
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+
+void setup()
+{
+ Serial.begin(9600);
+ pinMode(pin_closer, OUTPUT);
+ pinMode(pin_further, OUTPUT);
+
+ digitalWrite(pin_closer, LOW);
+ digitalWrite(pin_further, LOW);
+}
+
+
+void loop()
+{
+ action = get_action();
+
+ if(action == 'c')
+ {
+ digitalWrite(pin_closer, HIGH);
+ delay(100);
+ }
+ if(action == 'f')
+ {
+ digitalWrite(pin_further, HIGH);
+ delay(100);
+ }
+ if(action != 'f' and action != 'c')
+ {
+ digitalWrite(pin_further, LOW);
+ digitalWrite(pin_closer, LOW);
+ }
+}
diff --git a/gonio-imsoft/gonioimsoft/arduino_serial.py b/gonio-imsoft/gonioimsoft/arduino_serial.py
new file mode 100755
index 0000000..1a3f101
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/arduino_serial.py
@@ -0,0 +1,123 @@
+'''
+1) Reading rotation encoder signals from an Arduino Board.
+2) and controlling stepper motors.
+'''
+
+try:
+ import serial
+except ModuleNotFoundError:
+ serial = None
+
+DEFAULT_PORT_NAME = 'COM4'
+
+class ArduinoReader:
+ '''
+ Class for reading angle pairs (states of the rotation encoders) from Arduino.
+ '''
+
+ def __init__(self, port=DEFAULT_PORT_NAME):
+ '''
+ port On Windows, "COM4" or similar. May change if other serial devices
+ are addded or removed?
+ '''
+
+ if serial:
+ self.serial = serial.Serial(port=port, baudrate=9600, timeout=0.01)
+ else:
+ self.serial = None
+
+ self.latest_angle = (0,0)
+ self.offset = (0,0)
+
+ def _offset_correct(self, angles):
+ '''
+ Rreturn the offset (zero-point) corrected angles pair.
+ '''
+ return (angles[0] - self.offset[0], angles[1] - self.offset[1])
+
+
+ def read_angles(self):
+ '''
+ Read the oldest unread angles pair that Arduino has sent to the serial.
+
+ Returns angle pair, (horizontal_angle, vertical_angle).
+ '''
+ if self.serial is None:
+ return (0,0)
+
+ read_string = self.serial.readline().decode("utf-8")
+ if read_string:
+ angles = read_string.split(',')
+ self.latest_angle = tuple(map(int, angles))
+
+ return self._offset_correct(self.latest_angle)
+
+ def get_latest(self):
+ '''
+ Returns the latest angle that has been read from Arduino.
+ (Arduino sends an angle only when it has changed)
+ '''
+ return self._offset_correct(self.latest_angle)
+
+
+ def close_connection(self):
+ '''
+ If it is required to manually close the serial connection.
+ '''
+ if self.serial:
+ self.serial.close()
+
+ def current_as_zero(self):
+ '''
+ Sets the current angle pair value to (0,0)
+ '''
+ self.offset = self.latest_angle
+
+
+ def move_motor(self, i_motor, direction, time=1):
+ '''
+ Move motor i_motor to given direction for the given time (default 1 s)
+
+ Communication to the Arduino board controlling the motor states
+ happens by sending characters to the serial; each sent character makes
+ the motor to move for 100 ms ideally; letters as
+ a for the motor 0 to + direction
+ A for the motor 0 to - direction
+ b for the motor 1 to + direction
+ ....
+
+
+ Input arguments
+ i_motor Index number of the motor
+ direction Positive for + direction, negative number for - direction
+ 0 makes nothing
+ time In seconds
+ '''
+ if self.serial is None:
+ print('Pretending to drive motor {}'.format(i_motor))
+ return None
+
+ motor_letters = ['a', 'b', 'c', 'd', 'e']
+
+ letter = motor_letters[i_motor]
+
+ if not direction == 0:
+
+
+ if direction > 0:
+ letter = letter.lower()
+ else:
+ letter = letter.upper()
+
+ N = round(time * 10)
+ string = ''.join([letter for i in range(N)])
+
+ self.serial.write(bytearray(string.encode()))
+
+ def get_sensor(self, i_sensor):
+ '''
+ Yet another way to read anglepairs, separated.
+ '''
+ angles = self.get_latest()
+ return angles[i_sensor]
+
diff --git a/gonio-imsoft/gonioimsoft/camera_client.py b/gonio-imsoft/gonioimsoft/camera_client.py
new file mode 100755
index 0000000..eedd543
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/camera_client.py
@@ -0,0 +1,156 @@
+'''
+Camera client software.
+
+Meant to be running using Python 3.
+'''
+
+import socket
+import time
+import os
+import subprocess
+import platform
+
+from gonioimsoft.directories import CODE_ROOTDIR
+import gonioimsoft.camera_communication as cac
+
+MAX_RETRIES = 100
+RETRY_INTERVAL = 1
+
+class CameraClient:
+ '''
+ Connecting to the CameraServer and sending imaging commands.
+
+ No data is transmitted over the socket connection, only commands (strings).
+ It's CameraServer's job to store the images.
+
+ Attributes
+ -----------
+ python2 : string
+ In path command to open Python 2. If empty string, use
+ defaults "C:\Python27\python.exe" (Windows) or "python2" (other)
+ '''
+ def __init__(self):
+ '''
+ Initialization of the CameraClient
+ '''
+ self.host = cac.SERVER_HOSTNAME
+ self.port = cac.PORT
+
+ self._python2 = ''
+
+ @property
+ def python2(self):
+ if self._python2:
+ cmd = self._python2
+ else:
+ if platform.system() == 'Windows':
+ cmd = 'C:\Python27\python.exe'
+ else:
+ cmd = 'python2'
+ return cmd
+
+ @python2.setter
+ def python2(self, string):
+ self._python2 = string
+
+
+ def sendCommand(self, command_string, retries=MAX_RETRIES):
+ '''
+ Send an arbitrary command to the CameraServer.
+ All the methods of the Camera class (see camera_server.py) are supported.
+
+ INPUT ARGUMETNS DESCRIPTION
+ command_string function;parameters,comma,separated
+ For example "acquireSeries;0,01,0,5,'label'"
+
+ This is where a socket connection to the server is formed. After the command_string
+ has been send, the socket terminates.
+ '''
+
+ tries = 0
+
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
+
+ while True:
+ try:
+ s.connect((self.host, self.port))
+ break
+ except ConnectionRefusedError:
+ tries += 1
+ if tries > retries:
+ raise ConnectionRefusedError('Cannot connect to the camera server')
+ print('Camera server connection refused, retrying...')
+ time.sleep(RETRY_INTERVAL)
+
+ s.sendall(command_string.encode())
+
+
+ def acquireSeries(self, exposure_time, image_interval, N_frames, label, subdir, trigger_direction):
+ '''
+ Acquire a time series of images.
+ For more see camera_server.py.
+
+ Notice that it is important to give a new label every time
+ or to change data savedir, otherwise images may be written over
+ each other (or error raised).
+ '''
+ function = 'acquireSeries;'
+ parameters = "{}:{}:{}:{}:{}:{}".format(exposure_time, image_interval, N_frames, label, subdir, trigger_direction)
+ message = function+parameters
+
+ self.sendCommand(message)
+
+
+ def acquireSingle(self, save, subdir):
+ self.sendCommand('acquireSingle;{}:{}'.format(str(save), subdir))
+
+
+ def setSavingDirectory(self, saving_directory):
+ self.sendCommand('setSavingDirectory;'+saving_directory)
+
+
+ def saveDescription(self, filename, string):
+ self.sendCommand('saveDescription;'+filename+':'+string)
+
+ def set_roi(self, roi):
+ self.sendCommand('set_roi;{}:{}:{}:{}'.format(*roi))
+
+ def set_save_stack(self, boolean):
+ self.sendCommand('set_save_stack;{}'.format(boolean))
+
+ def isServerRunning(self):
+ try:
+ self.sendCommand('ping;Client wants to know if server is running', retries=0)
+ except ConnectionRefusedError:
+ return False
+ return True
+
+
+ def startServer(self):
+ '''
+ Start a local camera server instance.
+ '''
+
+ subprocess.Popen([self.python2, os.path.join(CODE_ROOTDIR, 'camera_server.py')],
+ stdout=open(os.devnull, 'w'))
+
+
+ def close_server(self):
+ '''
+ Sends an exit message to the server, to which the server should respond
+ by closing itself down.
+ '''
+ try:
+ self.sendCommand('exit;'+'None', retries=0)
+ except ConnectionRefusedError:
+ pass
+
+
+
+def test():
+ cam = CameraClient()
+ cam.acquireSeries(0.01, 0, 5, 'test')
+
+
+if __name__ == "__main__":
+ test()
diff --git a/gonio-imsoft/gonioimsoft/camera_communication.py b/gonio-imsoft/gonioimsoft/camera_communication.py
new file mode 100644
index 0000000..5fcf039
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/camera_communication.py
@@ -0,0 +1,18 @@
+'''
+Common settings for camera_sever.py (Python2) and camera_client.py (Python3)
+'''
+
+# Hostname (or IP address) of the server. This is the address that the client
+# tries to connect to
+SERVER_HOSTNAME = '127.0.0.1'
+
+PORT = 50075
+
+# If for some reason the default port is in use / does not work,
+# try in order some other from this list
+# FIXME NOT IMPLEMENTED YET
+# ALTERNATIVE_PORTS = list(range(50071, 50080))
+
+
+
+SAVING_DRIVE = 'D:\\'
diff --git a/gonio-imsoft/gonioimsoft/camera_server.py b/gonio-imsoft/gonioimsoft/camera_server.py
new file mode 100755
index 0000000..49cf496
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/camera_server.py
@@ -0,0 +1,525 @@
+'''
+Image accusition using Micro-Manager's Python (2) bindings (Camera class)
+and a server program (CameraServer class).
+
+On Windows, MM builds come compiled with Python 2 support only, so in this solution
+there is a Python 2 server program that controls the camera and image saving
+and then the client end that can be run with Python 3.
+'''
+
+import os
+import time
+import datetime
+import socket
+import threading
+import multiprocessing
+
+import MMCorePy
+import tifffile
+import numpy as np
+import matplotlib.pyplot as plt
+from matplotlib.animation import FuncAnimation
+from matplotlib.widgets import RectangleSelector
+
+import camera_communication as cac
+from camera_communication import SAVING_DRIVE
+
+DEFAULT_SAVING_DIRECTORY = "D:\imaging_data"
+
+
+
+class ImageShower:
+ '''
+ Showing images on the screen on its own window.
+
+ In future, may be used to select ROIs as well to allow
+ higher frame rate imaging / less data.
+
+ ------------------
+ Working principle
+ ------------------
+ Image shower works so that self.loop is started as a separate process
+ using multiprocessing library
+
+ -------
+ Methods
+ -------
+ self.loop Set this as multiprocessing target
+
+ '''
+ def __init__(self):
+ self.fig = plt.figure()
+ self.ax = self.fig.add_subplot(111)
+ self.close = False
+
+ #self.cid = self.fig.canvas.mpl_connect('key_press_event', self.callbackButtonPressed)
+
+ self.image_brightness = 0
+ self.image_maxval = 1
+
+ self.selection = None
+
+ self.image_size = None
+
+ def callbackButtonPressed(self, event):
+
+ if event.key == 'r':
+ self.image_maxval -= 0.05
+ self._updateImage(strong=True)
+
+ elif event.key == 't':
+ self.image_maxval += 0.05
+ self._updateImage(strong=True)
+
+
+
+ def __onSelectRectangle(self, eclick, erelease):
+
+ # Get selection box coordinates and set the box inactive
+ x1, y1 = eclick.xdata, eclick.ydata
+ x2, y2 = erelease.xdata, erelease.ydata
+ #self.rectangle.set_active(False)
+
+ x = int(min((x1, x2)))
+ y = int(min((y1, y2)))
+ width = int(abs(x2-x1))
+ height = int(abs(y2-y1))
+
+ self.selection = [x, y, width, height]
+
+ def _updateImage(self, i):
+
+ data = None
+ while not self.queue.empty():
+ # Get the latest image in the queue
+ data = self.queue.get(True, timeout=0.01)
+ if data is None:
+ return self.im, ''
+ elif data == 'close':
+ self.close = True
+ return self.im, ''
+
+ if self.selection and data.size != self.image_size:
+ self.selection = None
+
+ if self.selection:
+ x,y,w,h = self.selection
+ if w<1 or h<1:
+ # If selection box empty (accidental click on the image)
+ # use the whole image instead
+ inspect_area = data
+ else:
+ inspect_area = data[y:y+h, x:x+w]
+ else:
+ inspect_area = data
+
+
+ per95 = np.percentile(inspect_area, 95)
+ data = np.clip(data, np.percentile(inspect_area, 5), per95)
+
+ data = data - np.min(data)
+ data_max = np.max(data)
+ data = data.astype(float)/data_max
+
+ self.image_size = data.size
+
+
+ self.im.set_array(data)
+ self.fig.suptitle('Selection 95th percentile: {}'.format(per95), fontsize=10)
+ text = ''
+ return self.im, text
+
+
+ def loop(self, queue):
+ '''
+ Runs the ImageShower by reading images from the given queue.
+ Set this as a multiprocessing target.
+
+ queue Multiprocessing queue with a get method.
+ '''
+ self.queue = queue
+ self.rectangle = RectangleSelector(self.ax, self.__onSelectRectangle, useblit=True)
+
+ image = queue.get()
+ self.im = plt.imshow(1000*image/np.max(image), cmap='gray', vmin=0, vmax=1, interpolation='none', aspect='auto')
+ self.ani = FuncAnimation(plt.gcf(), self._updateImage, frames=range(100), interval=5, blit=False)
+
+ plt.show(block=False)
+
+ while not self.close:
+ plt.pause(1)
+
+
+class DummyCamera:
+ '''
+ A dummy camera class, used when unable to load the real Camera class
+ due to camera being off or something similar.
+ '''
+ def acquire_single(self, save, subdir):
+ pass
+ def acquire_series(self, exposure_time, image_interval, N_frames, label, subdir, trigger_direction):
+ pass
+ def save_images(images, label, metadata, savedir):
+ pass
+ def set_saving_directory(self, saving_directory):
+ pass
+ def set_binning(self, binning):
+ pass
+ def save_description(self, filename, string):
+ pass
+ def close(self):
+ pass
+
+
+class Camera:
+ '''
+ Controlling ORCA FLASH 4.0 camera using Micro-Manager's
+ Python (2) bindings.
+ '''
+
+ def __init__(self, saving_directory=DEFAULT_SAVING_DIRECTORY):
+
+ self.set_saving_directory(saving_directory)
+
+ self.mmc = MMCorePy.CMMCore()
+ self.mmc.loadDevice('Camera', 'HamamatsuHam', 'HamamatsuHam_DCAM')
+ self.mmc.initializeAllDevices()
+ self.mmc.setCameraDevice('Camera')
+
+ self.settings = {'binning': '1x1'}
+
+ self.mmc.prepareSequenceAcquisition('Camera')
+ #self.mmc.setCircularBufferMemoryFootprint(4000)
+ self.live_queue= False
+
+ self.shower = ImageShower()
+
+ # Description file string
+ self.description_string = ''
+
+ self.save_stack = False
+
+
+
+ def acquire_single(self, save, subdir):
+ '''
+ Acquire a single image.
+
+ save 'True' or 'False'
+ subdir Subdirectory for saving
+ '''
+
+ exposure_time = 0.01
+ binning = '2x2'
+
+ self.set_binning(binning)
+ self.mmc.setExposure(exposure_time*1000)
+
+ start_time = str(datetime.datetime.now())
+
+ self.mmc.snapImage()
+ image = self.mmc.getImage()
+
+ if not self.live_queue:
+ self.live_queue = multiprocessing.Queue()
+ self.live_queue.put(image)
+
+ self.livep = multiprocessing.Process(target=self.shower.loop, args=(self.live_queue,))
+ self.livep.start()
+
+ self.live_queue.put(image)
+
+ if save == 'True':
+ metadata = {'exposure_time_s': exposure_time, 'binning': binning, 'function': 'acquireSingle', 'start_time': start_time}
+
+ save_thread = threading.Thread(target=self.save_images,args=([image],'snap_{}'.format(start_time.replace(':','.').replace(' ','_')), metadata,os.path.join(self.saving_directory, subdir)))
+ save_thread.start()
+
+
+
+ def acquire_series(self, exposure_time, image_interval, N_frames, label, subdir, trigger_direction):
+ '''
+ Acquire a series of images
+
+ exposure_time How many seconds to expose each image
+ image_interval How many seconds to wait in between the exposures
+ N_frames How many images to take
+ label Label for saving the images (part of the filename later)
+ subdir
+ trigger_direction "send" (camera sends a trigger pulse when it's ready) or "receive" (camera takes an image for every trigger pulse)
+ '''
+
+ exposure_time = float(exposure_time)
+ image_interval = float(image_interval)
+ N_frames = int(N_frames)
+ label = str(label)
+
+ print "Now aquire_series with label " + label
+ print "- IMAGING PARAMETERS -"
+ print " exposure time " + str(exposure_time) + " seconds"
+ print " image interval " + str(image_interval) + " seconds"
+ print " N_frames " + str(N_frames)
+ print "- CAMERA SETTINGS"
+
+ self.set_binning('2x2')
+ print " Pixel binning 2x2"
+
+ if trigger_direction == 'send':
+ print " Camera sending a trigger pulse"
+ self.mmc.setProperty('Camera', "OUTPUT TRIGGER KIND[0]","EXPOSURE")
+ self.mmc.setProperty('Camera', "OUTPUT TRIGGER POLARITY[0]","NEGATIVE")
+ elif trigger_direction== 'receive':
+ print " Camera recieving / waiting for a trigger pulse"
+ self.mmc.setProperty('Camera', "TRIGGER SOURCE","EXTERNAL")
+ self.mmc.setProperty('Camera', "TriggerPolarity","POSITIVE")
+ else:
+ raise ValueError('trigger_direction has to be {} or {}, not {}'.format('receive', 'send', trigger_direction))
+
+
+ print "Circular buffer " + str(self.mmc.getCircularBufferMemoryFootprint()) + " MB"
+
+ self.mmc.setExposure(exposure_time*1000)
+
+
+ self.wait_for_client()
+
+
+ start_time = str(datetime.datetime.now())
+ self.mmc.startSequenceAcquisition(N_frames, image_interval, False)
+
+
+ while self.mmc.isSequenceRunning():
+ time.sleep(exposure_time)
+
+ images = []
+
+ for i in range(N_frames):
+ while True:
+ try:
+ image = self.mmc.popNextImage()
+ break
+ except MMCorePy.CMMError:
+ time.sleep(exposure_time)
+
+ images.append(image)
+
+
+ metadata = {'exposure_time_s': exposure_time, 'image_interval_s': image_interval,
+ 'N_frames': N_frames, 'label': label, 'function': 'acquireSeries', 'start_time': start_time}
+ metadata.update(self.settings)
+
+ save_thread = threading.Thread(target=self.save_images, args=(images,label,metadata,os.path.join(self.saving_directory, subdir)))
+ save_thread.start()
+
+ self.mmc.setProperty('Camera', "TRIGGER SOURCE","INTERNAL")
+ print('acquired')
+
+
+ def save_images(self, images, label, metadata, savedir):
+ '''
+ Save given images as grayscale tiff images.
+ '''
+ if not os.path.isdir(savedir):
+ os.makedirs(savedir)
+
+ if self.save_stack == False:
+ # Save separate images
+ for i, image in enumerate(images):
+ fn = '{}_{}.tiff'.format(label, i)
+ tifffile.imsave(os.path.join(savedir, fn), image, metadata=metadata)
+ else:
+ # Save a stack
+ fn = '{}_stack.tiff'.format(label)
+ tifffile.imsave(os.path.join(savedir, fn), np.asarray(images), metadata=metadata)
+
+ self.save_description(os.path.join(savedir, 'description'), self.description_string, internal=True)
+
+
+ def set_saving_directory(self, saving_directory):
+ '''
+ Sets where the specimen folders are saved and if the directory
+ does not yet exist, creates it.
+ '''
+ saving_directory = os.path.join(SAVING_DRIVE, saving_directory)
+ if not os.path.isdir(saving_directory):
+ os.makedirs(saving_directory)
+
+ self.saving_directory = saving_directory
+
+
+ def set_save_stack(self, boolean):
+ '''
+ If boolean == "True", save images as stacks instead of separate images.
+ '''
+ if boolean == 'True':
+ self.save_stack = True
+ elif boolean == 'False':
+ self.save_stack = False
+ else:
+ print("Did not understand wheter to save stacks. Given {}".format(boolean))
+
+ def set_binning(self, binning):
+ '''
+ Binning '2x2' for example.
+ '''
+ if not self.settings['binning'] == binning:
+ self.mmc.setProperty('Camera', 'Binning', binning)
+ self.settings['binning'] = binning
+
+ def set_roi(self, x,y,w,h):
+ '''
+ In binned pixels
+ roi (x,y,w,h) or None
+ '''
+ x = int(x)
+ y = int(y)
+ w = int(w)
+ h = int(h)
+
+ if w == 0 or h==0:
+ self.mmc.clearROI()
+ else:
+ self.mmc.setROI(x,y,w,h)
+
+
+ def save_description(self, specimen_name, desc_string, internal=False):
+ '''
+ Allows saving a small descriptive text file into the main saving directory.
+ Filename should be the same as the folder where it's saved.
+
+ Appends to the previous file.
+
+ specimen_name DrosoM42 for example, name of the specimen folder
+ desc_string String, what to write in the file
+ internal If true, specimen_name becomes filename of the file
+ '''
+ if internal:
+ fn = specimen_name
+ else:
+ fn = os.path.join(self.saving_directory, specimen_name, specimen_name)
+
+ # Check if the folder exists
+ if not os.path.exists(os.path.dirname(fn)):
+ #raise OSError('File {} already exsits'.format(fn))
+ os.makedirs(os.path.dirname(fn))
+
+ with open(fn+'.txt', 'w') as fp:
+ fp.write(desc_string)
+
+ print "Wrote file " + fn+'.txt'
+
+
+ self.description_string = desc_string
+
+
+ def close(self):
+ self.live_queue.put('close')
+ self.lifep.join()
+
+ def wait_for_client(self):
+ pass
+
+
+class CameraServer:
+ '''
+ Camera server listens incoming connections and
+ controls the camera through Camera class
+ '''
+ def __init__(self):
+
+ PORT = cac.PORT
+ HOST = '' # This is not cac.SERVER_HOSTNAME, leave empty
+
+ self.running = False
+
+ self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.socket.bind((HOST, PORT))
+ self.socket.listen(1)
+
+ try:
+ self.cam = Camera()
+ self.cam.wait_for_client = self.wait_for_client
+ except Exception as e:
+ print e
+ print "Using DUMMY camera instead"
+ self.cam = DummyCamera()
+
+ self.functions = {'acquireSeries': self.cam.acquire_series,
+ 'setSavingDirectory': self.cam.set_saving_directory,
+ 'acquireSingle': self.cam.acquire_single,
+ 'saveDescription': self.cam.save_description,
+ 'set_roi': self.cam.set_roi,
+ 'set_save_stack': self.cam.set_save_stack,
+ 'ping': self.ping,
+ 'exit': self.stop}
+
+ def ping(self, message):
+ print message
+
+
+
+ def wait_for_client(self):
+ '''
+ Waits until client confirms that it is ready
+ '''
+ conn, addr = self.socket.accept()
+ string = ''
+ while True:
+ data = conn.recv(1024)
+ if not data: break
+ string += data
+ conn.close()
+ print "Client ready"
+
+
+
+ def run(self):
+ '''
+ Loop waiting for incoming connections.
+ Each established connection can give one command and then the connection
+ is closed.
+ '''
+ self.running = True
+ while self.running:
+ conn, addr = self.socket.accept()
+ string = ''
+ while True:
+ data = conn.recv(1024)
+ if not data: break
+ string += data
+
+ conn.close()
+ print('Recieved command "'+string+'" at time '+str(time.time()))
+ if string:
+ func, parameters = string.split(';')
+ parameters = parameters.split(':')
+ target=self.functions[func](*parameters)
+
+ def stop(self, placeholder):
+ '''
+ Stop running the camera server.
+ '''
+ self.camera.close()
+ self.running = False
+
+
+def test_camera():
+ cam = Camera()
+ images = cam.acquireSeries(0.01, 1, 5, 'testing')
+
+ for image in images:
+ plt.imshow(image, cmap='gray')
+ plt.show()
+
+
+
+def run_server():
+ '''
+ Running the server.
+ '''
+ cam_server = CameraServer()
+ cam_server.run()
+
+
+if __name__ == "__main__":
+ run_server()
diff --git a/gonio-imsoft/gonioimsoft/core.py b/gonio-imsoft/gonioimsoft/core.py
new file mode 100644
index 0000000..9435559
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/core.py
@@ -0,0 +1,607 @@
+'''
+Pupil Imsoft's core module; Classes for static and dynamic imaging
+'''
+
+import os
+import sys
+import time
+import datetime
+import copy
+
+import numpy as np
+
+try:
+ import nidaqmx
+except ModuleNotFoundError:
+ nidaqmx = None
+
+from gonioimsoft.anglepairs import saveAnglePairs, loadAnglePairs, toDegrees
+from gonioimsoft.arduino_serial import ArduinoReader
+from gonioimsoft.camera_client import CameraClient
+from gonioimsoft.camera_communication import SAVING_DRIVE
+from gonioimsoft.motors import Motor
+from gonioimsoft.imaging_parameters import (
+ DEFAULT_DYNAMIC_PARAMETERS,
+ load_parameters,
+ getModifiedParameters)
+from gonioimsoft.stimulus import StimulusBuilder
+import gonioimsoft.macro as macro
+
+
+
+class Dynamic:
+ '''
+ Dynamic imaging procedure.
+ '''
+
+
+ def __init__(self, dynamic_parameters=DEFAULT_DYNAMIC_PARAMETERS):
+ '''
+ Sets up ArduinoReader, CameraClient/Server.
+ '''
+
+ # Angle pairs reader (rotary encoder values)
+ self.reader = ArduinoReader()
+
+ # Initiate camera client/server
+ self.camera = CameraClient()
+ if not self.camera.isServerRunning():
+ print('Camera server not running')
+ # self.camera.startServer()
+
+ # Details about preparation (name, sex, age) are saved in this
+ self.preparation = {'name': 'test', 'sex': '', 'age': ''}
+
+ self.dynamic_parameters = dynamic_parameters
+ self.locked_parameters = {}
+
+ self.previous_angle = None
+
+ # Suffix to be appended in the subfolders, see set_subfolder_suffix method
+ self.suffix = ''
+
+ # Set up motors followingly
+ # 0) Horizontal + sensor
+ # 1) Vertical + sensor
+ # 2) Microscope focus (no sensor)
+ self.motors = []
+ for i_motor, i_sensor in zip([0,1,2],[0,1,None]):
+ self.motors.append(Motor(self.reader, i_motor, i_sensor))
+
+
+ # Macro imaging: Automatically move motors and take images
+ # self macro is a list of anglepairs where to move the horizontal/vertical,
+ # take image "commands" (string 'image') and other functions.
+ # For more details see the tick method of this class.
+ self.macro = None
+ self.i_macro = 0
+ self.waittime = 0
+
+ self.trigger_rotation = False
+
+ self.trigger_signal = np.array([5,5,5,0])
+ self.triggered_anglepairs = None
+
+ self.data_savedir = None
+
+
+ def analog_output(self, channels, stimuli, fs, wait_trigger, camera=True):
+ '''
+
+ channels List of channel names
+ stimuli List of 1D numpy arrays
+ fs Sampling frequency of the stimuli
+ camera : bool
+ If True, send the camera server a "ready" command
+ '''
+
+ if nidaqmx is None:
+ print(' pretending analog_output on channels {}'.format(channels))
+ return None
+
+ with nidaqmx.Task() as task:
+ for i_channel, channel in enumerate(channels):
+ if type(channel) == type('string'):
+ task.ao_channels.add_ao_voltage_chan(channel)
+ else:
+
+ for subchan in channel:
+ task.ao_channels.add_ao_voltage_chan(subchan)
+ stimuli.insert(i_channel, stimuli[i_channel])
+ stimuli.pop(i_channel)
+
+ task.timing.cfg_samp_clk_timing(float(fs), samps_per_chan=len(stimuli[0]))
+
+
+ if len(stimuli) > 1:
+ stimulus = stimuli[0]
+ for s in stimuli[1:]:
+ stimulus = np.vstack((stimulus, s))
+ else:
+ stimulus = stimuli[0]
+
+ task.write(stimulus)
+
+ if wait_trigger:
+ task.triggers.start_trigger.cfg_dig_edge_start_trig("/Dev1/PFI0", trigger_edge=nidaqmx.constants.Edge.RISING)
+
+
+ task.start()
+
+ if camera:
+ self.camera.sendCommand('ready')
+
+ task.wait_until_done(timeout=(len(stimuli[0])/fs)*1.5+20)
+
+
+ def send_trigger(self):
+ '''
+ Sending trigger.
+ '''
+ chan = self.dynamic_parameters.get('trigger_out_channel', None)
+ if chan:
+ with nidaqmx.Task() as task:
+ task.ao_channels.add_ao_voltage_chan(chan)
+ task.timing.cfg_samp_clk_timing(1000., samps_per_chan=4)
+ task.write(self.trigger_signal)
+ task.start()
+ task.wait_until_done(timeout=1.)
+ else:
+ print('trigger_out_channel not specified, no trigger out')
+
+ self.triggered_anglepairs.append(self.reader.get_latest())
+
+
+ def set_led(self, device, value, wait_trigger=False):
+ '''
+ Set an output channel to a specific voltage value.
+
+ INPUT ARGUMENTS DESCRIPTION
+ device A string (single device) or a list of strings (many devices at once)
+ '''
+ with nidaqmx.Task() as task:
+
+ if type(device) == type('string'):
+ # If there's only a single device
+ task.ao_channels.add_ao_voltage_chan(device)
+ else:
+ # If device is actually a list of devices
+ for dev in device:
+ task.ao_channels.add_ao_voltage_chan(dev)
+ value = [value for i in range(len(device))]
+
+ if wait_trigger:
+ task.timing.cfg_samp_clk_timing(10000)
+ task.triggers.start_trigger.cfg_dig_edge_start_trig("/Dev1/PFI0", trigger_edge=nidaqmx.constants.Edge.FALLING)
+ task.write(value)
+
+
+
+ def wait_for_trigger(self):
+ '''
+ Doesn't return until trigger signal is received.
+ '''
+ with nidaqmx.Task() as task:
+ device = nidaqmx.system.device.Device('Dev1')
+
+ task.ai_channels.add_ai_voltage_chan('Dev1/ai0' )
+
+ task.timing.cfg_samp_clk_timing(10000)
+
+ task.triggers.start_trigger.cfg_dig_edge_start_trig("/Dev1/PFI0", trigger_edge=nidaqmx.constants.Edge.RISING)
+ task.read(number_of_samples_per_channel=1)
+
+
+
+ #
+ # IMAGING METHODS
+ #
+
+ def take_snap(self, save=False):
+ '''
+ Takes a snap image.
+
+ save Whether to save the image directly or not.
+ '''
+ if save:
+ self.set_led(self.dynamic_parameters['ir_channel'], self.dynamic_parameters['ir_imaging'])
+ time.sleep(0.3)
+ self.camera.acquireSingle(True, os.path.join(self.preparation['name'], 'snaps'))
+ self.set_led(self.dynamic_parameters['ir_channel'], self.dynamic_parameters['ir_livefeed'])
+ time.sleep(0.2)
+
+ print('A snap image taken')
+ else:
+ self.camera.acquireSingle(False, '')
+ time.sleep(0.1)
+
+
+
+
+ def image_trigger_softhard(self):
+ '''
+ For dynamic imaging of pseudopupil movements.
+
+ How this works?
+ CameraClient (self.camera) send message to CameraServer to start image acquisition. Starting
+ image acquistion takes some time, so to synchronize, we wait a trigger signal from the camera
+ before turning the LED on. What is done is essentially half software half hardware triggering.
+ Not ideal but seems to work.
+ '''
+
+ self.set_led(dynamic_parameters['ir_channel'], dynamic_parameters['ir_imaging'])
+ time.sleep(0.5)
+
+ # Subfolder suffix so if experimenter takes many images from the same position in different conditions
+ self.camera.acquireSeries(frame_length, 0, N_frames, label, os.path.join(self.preparation['name'], 'pos{}_{}'.format(imaging_angle, dynamic_parameters['suffix']+self.suffix)), 'send')
+
+ self.wait_for_trigger()
+ time.sleep(dynamic_parameters['pre_stim'])
+
+ self.set_led(dynamic_parameters['flash_channel'], dynamic_parameters['flash_on'][i])
+ time.sleep(dynamic_parameters['stim'])
+
+ self.set_led(dynamic_parameters['flash_channel'], dynamic_parameters['flash_off'])
+
+ time.sleep(dynamic_parameters['post_stim'])
+
+ self.set_led(dynamic_parameters['ir_channel'], dynamic_parameters['ir_waiting'])
+
+
+ def image_trigger_hard_cameraslave(self):
+ '''
+ Where camera is triggered by square wave.
+ Since the camera cannot be run this way at 100Hz at full frame,
+ image_series3 is used instead.
+ '''
+
+ fs = 1000
+
+ stimulus, illumination, camera = get_pulse_stimulus(dynamic_parameters['stim'],
+ dynamic_parameters['pre_stim'], dynamic_parameters['post_stim'],
+ dynamic_parameters['frame_length'], dynamic_parameters['flash_on'][i],
+ dynamic_parameters['ir_imaging'], fs,
+ stimulus_finalval=dynamic_parameters['flash_off'],
+ illumination_finalval=dynamic_parameters['ir_waiting'])
+
+
+ self.camera.acquireSeries(frame_length, 0, N_frames, label,
+ os.path.join(self.preparation['name'], 'pos{}{}'.format(imaging_angle,
+ dynamic_parameters['suffix']+self.suffix)), 'receive')
+
+ time.sleep(5)
+
+
+ self.analog_output([dynamic_parameters['flash_channel'],
+ dynamic_parameters['ir_channel'],
+ dynamic_parameters['trigger_channel']],
+ [stimulus, illumination, camera], fs)
+
+
+
+ def image_series(self, trigger='hard_cameramaster', inter_loop_callback=None):
+ '''
+ Contains common steps for all image_series methods.
+
+ triggering_type "softhard", "hard_cameraslave", "hard_cameramaster"
+
+ Returns True if finished properly and False if user cancelled.
+ '''
+
+
+
+ exit_imaging = False
+
+ print('Starting dynamic imaging using {} triggering'.format(trigger))
+ if trigger == 'softhard':
+ imaging_function = self.image_trigger_softhard
+ elif trigger == 'hard_cameraslave':
+ imaging_function = self.image_trigger_hard_cameraslave
+ elif trigger == 'hard_cameramaster':
+ imaging_function = self.image_trigger_hard_cameramaster
+
+ # Wait ISI period here, if it has not passed since the last series imaging
+ # Otherwise, continue.
+ try:
+ if time.time() < self.isi_slept_time:
+ print('Waiting ISI to be fullfilled from the last run...')
+ time.sleep(self.isi_slept_time - time.time())
+ print('READY')
+ except AttributeError:
+ pass
+
+ dynamic_parameters = copy.deepcopy(self.dynamic_parameters)
+
+ # Check that certain variables are actually lists (used for intensity series etc.)
+ # Check also for correct length if a list
+ for param in ['isi', 'flash_on']:
+ if type(dynamic_parameters[param]) != type([]):
+ dynamic_parameters[param] = [dynamic_parameters[param]] * dynamic_parameters['repeats']
+
+ elif len(dynamic_parameters[param]) != int(dynamic_parameters['repeats']):
+ print('Warning! Dynamic parameter {} length is {} but repeats is set to {}'.format(param,
+ len(dynamic_parameters[param]), dynamic_parameters['repeats'] ))
+ dynamic_parameters[param] = [dynamic_parameters[param][0]] * dynamic_parameters['repeats']
+
+ # Set stack save option
+ self.camera.set_save_stack(dynamic_parameters.get('save_stack', False))
+
+
+ # Get the current rotation stage angles and use this through the repeating
+ # (even if it would change during imaging)
+ imaging_angle = self.reader.get_latest()
+
+ # Prepare some variables that stay constant over imaging
+ image_directory = os.path.join(self.preparation['name'], 'pos{}{}'.format(imaging_angle, dynamic_parameters['suffix']+self.suffix))
+ N_frames = int((dynamic_parameters['pre_stim']+dynamic_parameters['stim']+dynamic_parameters['post_stim'])/dynamic_parameters['frame_length'])
+
+
+ for i in range(dynamic_parameters['repeats']):
+
+ label = 'im_pos{}_rep{}'.format(imaging_angle, i)
+
+ # INTER_LOOP_CALLBACK for showing info to the user and for exiting
+ if callable(inter_loop_callback) and inter_loop_callback(label, i) == False:
+ exit_imaging = True
+ if exit_imaging:
+ break
+
+ fs = 1000
+ builder = StimulusBuilder(dynamic_parameters['stim'],
+ dynamic_parameters['pre_stim'], dynamic_parameters['post_stim'],
+ dynamic_parameters['frame_length'], dynamic_parameters['flash_on'][i],
+ dynamic_parameters['ir_imaging'], fs,
+ stimulus_finalval=dynamic_parameters['flash_off'],
+ illumination_finalval=dynamic_parameters['ir_waiting'],
+ wtype=dynamic_parameters['flash_type'])
+
+ if dynamic_parameters.get('biosyst_stimulus', ''):
+ bsstim, fs = builder.overload_biosyst_stimulus(dynamic_parameters['biosyst_stimulus'], dynamic_parameters['biosyst_channel'])
+ N_frames = int(round((len(bsstim)/fs) / dynamic_parameters['frame_length']))
+
+ if i==0 and dynamic_parameters['avgint_adaptation']:
+ self.set_led(dynamic_parameters['flash_channel'], np.mean(builder.get_stimulus_pulse()))
+ time.sleep(dynamic_parameters['avgint_adaptation'])
+
+ imaging_function(dynamic_parameters, builder, label, N_frames, image_directory, set_led=bool(dynamic_parameters['isi'][i]))
+
+ # Wait the total imaging period; If ISI is short and imaging period is long, we would
+ # start the second imaging even before the camera is ready
+ # Better would be wait everything clear signal from the camera.
+ #total_imaging_time = dynamic_parameters['pre_stim'] + dynamic_parameters['stim'] + dynamic_parameters['post_stim']
+ #print("Total imaging time " + str(total_imaging_time))
+ # Do the actual waiting together with ISI, see just below
+
+ # WAITING ISI PERIOD
+ if i+1 == dynamic_parameters['repeats']:
+ self.isi_slept_time = time.time() + dynamic_parameters['isi'][i]
+ else:
+ wakeup_time = time.time() + dynamic_parameters['isi'][i] #+ total_imaging_time
+
+ while wakeup_time > time.time():
+ if callable(inter_loop_callback) and inter_loop_callback(None, i) == False:
+ exit_imaging = True
+ break
+ time.sleep(0.01)
+
+ self.set_led(dynamic_parameters['flash_channel'], dynamic_parameters['flash_off'])
+ self.set_led(dynamic_parameters['ir_channel'], dynamic_parameters['ir_livefeed'])
+ print('DONE!')
+
+ if exit_imaging:
+ return False
+ else:
+ return True
+
+
+ def image_trigger_hard_cameramaster(self, dynamic_parameters, builder, label, N_frames, image_directory, set_led=True):
+ '''
+ When starting the imaging, the camera sends a trigger pulse to NI board, leading to onset
+ of the stimulus (hardware triggering by the camera).
+
+ Illumination IR light is hardware triggered together with the stimulus.
+
+ set_led Set IR to ir_waiting in between (if long enough ISI)
+ '''
+ if set_led:
+ self.set_led(dynamic_parameters['ir_channel'], dynamic_parameters['ir_imaging'])
+ time.sleep(0.5)
+
+ fs = builder.fs
+
+ stimulus = builder.get_stimulus_pulse()
+
+ irwave = dynamic_parameters['ir_imaging'] * np.ones(stimulus.shape)
+ if set_led:
+ irwave[-1] = dynamic_parameters['ir_waiting']
+
+
+ self.camera.acquireSeries(dynamic_parameters['frame_length'], 0, N_frames, label, image_directory, 'send')
+
+ self.analog_output([dynamic_parameters['flash_channel'], dynamic_parameters['ir_channel']], [stimulus,irwave], fs, wait_trigger=True)
+
+
+
+
+ def set_savedir(self, savedir, camera=True):
+ '''
+ Set the directory where the taken images are saved.
+
+ camera : bool
+ If False, do not attempt to update save folder to the camera server
+ '''
+ if camera:
+ self.camera.setSavingDirectory(savedir)
+ self.data_savedir = savedir
+
+
+ def set_subfolder_suffix(self, suffix):
+ '''
+ Set any suffix to a data folder containing the images.
+ For example
+ pos(-14,0) would become pos(-14,0)_highmag if suffix == "highmag"
+ '''
+ if suffix:
+ self.suffix = '_'+suffix
+ else:
+ self.suffix = ''
+
+ def _update_descriptions_file(self):
+ # Saving description file
+ desc_string = "name {}\nsex {}\nage {}".format(self.preparation['name'], self.preparation['sex'], self.preparation['age'])
+ desc_string += "\n\n#DYNAMIC PROTOCOL PARAMETERS\n"
+ for name, value in self.dynamic_parameters.items():
+ desc_string += '{} {}\n'.format(name, value)
+ print(desc_string)
+ self.camera.saveDescription(self.preparation['name'], desc_string)
+
+
+ def initialize(self, name, sex, age, camera=False):
+ '''
+ Call this to initialize the experiments.
+
+ name, sex age Can be '' (empty string)
+ '''
+ # Preparations, ask droso name
+ if name != '':
+ self.preparation['name'] = name
+ if sex != '':
+ self.preparation['sex'] = sex
+ if age != '':
+ self.preparation['age'] = age
+
+ self.dynamic_parameters = getModifiedParameters(
+ locked_parameters=self.locked_parameters)
+ print('Preparation name set as {}, sex {}, age {} days.'.format(self.preparation['name'], self.preparation['sex'], self.preparation['age']))
+
+ if camera:
+ self._update_descriptions_file()
+ else:
+ self.triggered_anglepairs = []
+
+ self.set_led(self.dynamic_parameters['ir_channel'], self.dynamic_parameters['ir_livefeed'])
+ self.set_led(self.dynamic_parameters['flash_channel'], self.dynamic_parameters['flash_off'])
+
+
+ def load_preset(self, preset_name):
+ fn = os.path.join('presets', preset_name)
+ self.dynamic_parameters = {**load_parameters(fn), **self.locked_parameters}
+ self._update_descriptions_file()
+
+
+
+ def tick(self, horizontal_trigger=True, vertical_trigger=False):
+ '''
+ Updates the current angle. In the future may do other houskeeping functions also.
+
+ Call this once while in a loop that the angles have to be updated.
+ '''
+
+ change = False
+
+ while True:
+
+ # Update current angle and echo it to the console
+ current_angle = [list(self.reader.read_angles())]
+ toDegrees(current_angle)
+
+ if self.previous_angle != current_angle:
+ horchanged = self.previous_angle and self.previous_angle[0][0] != current_angle[0][0]
+ verchanged = self.previous_angle and self.previous_angle[0][1] != current_angle[0][1]
+ if (horizontal_trigger and horchanged) or (vertical_trigger and verchanged):
+ self.trigger_rotation = True
+
+ print("Horizontal-vertical is {}".format(current_angle[0]))
+ self.previous_angle = current_angle
+ change = True
+ else:
+ break
+
+ if not change:
+ self.trigger_rotation = False
+
+ # Run macro if set
+ if self.macro:
+
+ next_macro_step = False
+
+ action = self.macro[self.i_macro]
+ print(action)
+
+
+ if type(action) == type((0,0)):
+ # Move motors only if they have reached their positions
+ if all([self.motors[i].reached_target() for i in [0,1]]):
+ self.motors[0].move_to(action[0])
+ self.motors[1].move_to(action[1])
+ next_macro_step = True
+ if 'wait' in action:
+ self.waittime = time.time() + float(action.split(' ')[-1])
+ next_macro_step = True
+
+ if next_macro_step and self.waittime < time.time():
+ self.i_macro += 1
+ if self.i_macro == len(self.macro):
+ self.macro = None
+ self.i_macro = 0
+
+ def set_zero(self):
+ '''
+ Define the current angle pair as the zero point
+ (Like tar button on scales)
+ '''
+ self.reader.current_as_zero()
+
+
+
+ def finalize(self):
+ '''
+ Finalising the experiments, houskeeping stuff.
+ '''
+ self.set_led(self.dynamic_parameters['ir_channel'], 0)
+ self.set_led(self.dynamic_parameters['flash_channel'], 0)
+
+ for motor in self.motors:
+ motor.move_to(0)
+
+ if self.triggered_anglepairs:
+ fn = os.path.join(SAVING_DRIVE, self.data_savedir, self.preparation['name'], 'anglepairs.txt')
+ os.makedirs(os.path.dirname(fn), exist_ok=True)
+
+ print(fn)
+
+ with open(fn, 'w') as fp:
+ for line in self.triggered_anglepairs:
+ fp.write(str(line).strip('()').replace(' ', '')+'\n')
+
+ self.triggered_anglepairs = None
+
+
+ def exit(self):
+ self.camera.close_server()
+
+ #
+ # CONTROLLING LEDS, MOTORS ETC
+ #
+
+ def move_motor(*args, **kwargs):
+ self.reader.move_motor(*args, **kwargs)
+
+
+ #
+ # PROTOCOL QUEUE / MACRO
+ #
+
+
+ @staticmethod
+ def list_macros():
+ return macro.list_macros()
+
+
+ def run_macro(self, macro_name):
+ self.macro = macro.load(macro_name)
+ self.i_macro = 0
+
+
+
+
diff --git a/gonio-imsoft/gonioimsoft/directories.py b/gonio-imsoft/gonioimsoft/directories.py
new file mode 100644
index 0000000..3a439f8
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/directories.py
@@ -0,0 +1,14 @@
+'''
+Places where to save and load temporary files.
+'''
+
+import os
+import platform
+
+CODE_ROOTDIR = os.path.dirname(os.path.realpath(__file__))
+USER_HOMEDIR = os.path.expanduser('~')
+
+if platform.system() == "Windows":
+ PUPILDIR = os.path.join(USER_HOMEDIR, 'GonioImsoft')
+else:
+ PUPILDIR = os.path.join(USER_HOMEDIR, '.gonioimsoft')
diff --git a/gonio-imsoft/gonioimsoft/imaging_parameters.py b/gonio-imsoft/gonioimsoft/imaging_parameters.py
new file mode 100644
index 0000000..9223d6c
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/imaging_parameters.py
@@ -0,0 +1,312 @@
+'''
+Default dynamic parameters and ParameterEditor for letting the user
+modify them in the program.
+'''
+import os
+import time
+import ast
+import json
+
+from gonioimsoft.directories import PUPILDIR
+
+
+DEFAULT_DYNAMIC_PARAMETERS = {'isi': 10.0, 'repeats': 1, 'pre_stim': 0.000,
+ 'stim': 0.200, 'post_stim': 0.00, 'frame_length' : 0.010,
+ 'ir_imaging': 5, 'ir_waiting': 0, 'ir_livefeed': 1,
+ 'flash_on': 8, 'flash_off': 0,
+ 'ir_channel': "Dev1/ao1", 'flash_channel': "Dev1/ao0",
+ 'suffix': '', 'trigger_channel': "/Dev1/PFI0",
+ 'trigger_out_channel': "Dev2/ao0",
+ 'biosyst_stimulus': '',
+ 'biosyst_channel': 2,
+ 'avgint_adaptation': 0,
+ 'flash_type': 'square',
+ 'save_stack': False}
+
+DYNAMIC_PARAMETERS_TYPES = {'seconds': ['isi', 'pre_stim', 'stim', 'post_stim', 'frame_length', 'avgint_adaptation'],
+ 'voltage': ['ir_imaging', 'ir_waiting', 'ir_livefeed', 'flash_on', 'flash_off'],
+ 'channel': ['ir_channel', 'flash_channel', 'trigger_channel', 'trigger_out_channel'],
+ 'integer': ['repeats', 'biosyst_channel'],
+ 'string': ['suffix', 'biosyst_stimulus', 'flash_type'],
+ 'boolean': ['save_stack']}
+
+
+DYNAMIC_PARAMETERS_HELP = {'isi': 'Inter stimulus intervali[s]',
+ 'repeats': 'How many times the protocol is repeated',
+ 'pre_stim': 'How long to image before the pulse [s]',
+ 'stim': 'Stimulus (step pulse) length [s]',
+ 'post_stim': 'How long to image after the pulse [s]',
+ 'frame_length': 'Exposure time / inter-frame interval',
+ 'ir_imaging': 'IR brightness during image acqusition',
+ 'ir_waiting': 'IR brightness when waiting ISI',
+ 'ir_livefeed': 'IR brightness while updating the live image',
+ 'flash_on': 'Flash brightness during stim',
+ 'flash_off':' Flash brightness during image acqustition',
+ 'ir_channel': 'NI channel for IR',
+ 'flash_channel': 'NI channel for Flash',
+ 'trigger_channel': 'Trigger recieve/in channel',
+ 'trigger_out_channel': 'Trigger send/out channel',
+ 'suffix': 'Tag added to the saved folders',
+ 'biosyst_stimulus': 'Override the square pulse by a biosyst stimulus',
+ 'biosyst_channel': 'Channel of the biosyst simulus',
+ 'avgint_adaptation': 'Time to show stimulus mean value before imaging [s]',
+ 'flash_type': '"square" or sinelogsweep',
+ 'save_stack': 'If true, save stack instead separate images'}
+
+
+def getRightType(parameter_name, string_value):
+ '''
+ Convert user inputted string to correct parameter value based on
+ DYNAMIC_PARAMETER_TYPES
+
+ TODO - channel checking, check that the channel is proper NI channel
+ '''
+ if parameter_name in DYNAMIC_PARAMETERS_TYPES['integer']:
+ return int(string_value)
+
+
+ if parameter_name in DYNAMIC_PARAMETERS_TYPES['seconds']:
+ if string_value.startswith('[') and string_value.endswith(']'):
+ seconds = ast.literal_eval(string_value)
+
+ for s in seconds:
+ if s < 0:
+ raise ValueError('Here time is required to be strictly positive.')
+ else:
+ seconds = float(string_value)
+ if seconds < 0:
+ raise ValueError('Here time is required to be strictly positive.')
+ return seconds
+
+ if parameter_name in DYNAMIC_PARAMETERS_TYPES['voltage']:
+ if string_value.startswith('[') and string_value.endswith(']'):
+ voltages = ast.literal_eval(string_value)
+ for voltage in voltages:
+ if not -10<=voltage<=10:
+ raise ValueError('Voltage value range -10 to 10 V exceeded.')
+ return voltages
+ else:
+ voltage = float(string_value)
+ if not -10<=voltage<=10:
+ raise ValueError('Voltage value range -10 to 10 V exceeded.')
+ return voltage
+
+ if parameter_name in DYNAMIC_PARAMETERS_TYPES['channel']:
+ if type(string_value) == type(''):
+ if string_value.startswith('[') and string_value.endswith([']']):
+ return ast.literal_eval(string_value)
+ else:
+ return string_value
+
+ if parameter_name in DYNAMIC_PARAMETERS_TYPES['string']:
+ return str(string_value)
+
+ if parameter_name in DYNAMIC_PARAMETERS_TYPES['boolean']:
+ if string_value.lower() == 'true':
+ return True
+ elif string_value.lower() == 'false':
+ return False
+ else:
+ raise ValueError('Boolean falue has to be either "True" or "False"')
+
+ raise NotImplementedError('Add {} correctly to DYNAMIC_PARAMETER_TYPES in dynamic_parameters.py')
+
+
+
+def load_parameters(fn):
+ '''
+ Loading imaging parameters, saved as a json file.
+ '''
+ with open(fn, 'r') as fp:
+ data = json.load(fp)
+ return data
+
+
+def save_parameters(fn, parameters):
+ '''
+ Loading imaging parameters, saved as a json file.
+ '''
+ with open(fn, 'w') as fp:
+ json.dump(parameters, fp)
+
+
+
+class ParameterEditor:
+ '''
+ Dictionary editor on command line with ability to load and save presets.
+ '''
+ def __init__(self, dynamic_parameters, locked_parameters={}):
+ '''
+ dynamic_parameters Dictionary of the dynamic imaging parameters.
+ '''
+ self.dynamic_parameters = dynamic_parameters
+ self.parameter_names = sorted(self.dynamic_parameters.keys())
+
+ self.presets_savedir = os.path.join(PUPILDIR, 'presets')
+ self.presets = self.load_presets(self.presets_savedir)
+
+ self.locked_parameters = locked_parameters
+
+
+ def load_presets(self, directory):
+
+ presets = {}
+
+ if os.path.isdir(directory):
+ files = [os.path.join(directory, fn) for fn in os.listdir(directory)]
+ else:
+ files = []
+
+ for afile in files:
+ try:
+ preset = load_parameters(afile)
+ except:
+ print("Couldn't load preset {}".format(afile))
+ continue
+
+ # If older files lack some parameters, use default parameters
+ for key in self.parameter_names:
+ if not key in preset:
+ preset[key] = DEFAULT_DYNAMIC_PARAMETERS[key]
+
+ presets[os.path.basename(afile)] = preset
+
+ return presets
+
+
+ def print_preset(self, preset):
+ '''
+ Prints the current parameters and the help strings.
+ '''
+
+ parameter_names = sorted(self.dynamic_parameters.keys())
+
+ print()
+
+ print('{:<20} {:<40} {}'.format('PARAMETER NAME', 'VALUE', 'DESCRIPTION'))
+ for parameter in parameter_names:
+ if parameter in self.locked_parameters:
+ lck = ' (LOCKED to {})'.format(self.locked_parameters[parameter])
+ else:
+ lck = ''
+ print('{:<20} {:<40} {}'.format(parameter, str(preset[parameter])+lck,
+ DYNAMIC_PARAMETERS_HELP[parameter]))
+ print()
+
+
+ def getModified(self):
+ '''
+ Ask user to edit the parameters until happy and then return
+ the parameters.
+ '''
+
+ while True:
+ print('MODIFYING IMAGING PARAMETERS')
+ self.print_preset(self.dynamic_parameters)
+ parameter = input('Parameter name or (list/save/load) (Enter to continue) >> ')
+
+ # If breaking free
+ if parameter == '':
+ break
+
+
+ self.presets = self.load_presets(self.presets_savedir)
+
+
+ # If saving preset
+ if parameter.lower() == 'save':
+ name = input('Save current parameters under preset name (if empty == suffix) >> ')
+ if name == '' and self.dynamic_parameters['suffix'] != '':
+ name = self.dynamic_parameters['suffix']
+
+ if os.path.isdir(PUPILDIR):
+ os.makedirs(self.presets_savedir, exist_ok=True)
+ save_parameters(os.path.join(self.presets_savedir, name), self.dynamic_parameters)
+ else:
+ print('Saving the preset failed, {} does not exist'.format(PUPILDIR))
+
+ continue
+
+ if parameter.lower() == 'list':
+ if self.presets is {}:
+ print('There are no existing presets!')
+ else:
+ print('These are the existing presets:')
+ for preset in self.presets.keys():
+ print(' '+preset)
+ print('')
+ continue
+
+ if parameter.lower() == 'load':
+ # If parameter is actually a preset
+
+ while True:
+
+ preset_names = sorted(self.presets.keys())
+
+ for i, name in enumerate(preset_names):
+ print('{}) {}'.format(i+1, name))
+
+ sel = input('>> ')
+
+ try:
+ to_load = preset_names[int(sel)-1]
+ break
+ except:
+ print('Invalid preset.')
+
+ parameter = to_load
+
+ if parameter in self.presets.keys():
+ self.print_preset(self.presets[parameter])
+
+ confirm = input('Load this (y/n)>> ').lower()
+ if confirm and confirm[0] == 'y':
+ self.dynamic_parameters = self.presets[parameter]
+ else:
+ print('Answer not yes, loading of the preset cancelled')
+ continue
+
+
+
+ try:
+ self.dynamic_parameters[parameter]
+ except KeyError:
+ print('Invalid input, not a parameter name')
+ time.sleep(1)
+ continue
+
+ while True:
+
+ value = input('Value for {} >> '.format(parameter))
+
+ if value == '':
+ break
+
+ try:
+ value = getRightType(parameter, value)
+ except ValueError as e:
+ print(str(e))
+ print('Could not convert the value to right type. Try againg or Enter to skip.')
+ continue
+
+ self.dynamic_parameters[parameter] = value
+ break
+
+ return {**self.dynamic_parameters, **self.locked_parameters}
+
+
+def getModifiedParameters(**kwargs):
+ '''
+ Take in the DEFAULT parameters in the beginning of this code file
+ and let the user modify them using text based ParameterEditor
+ '''
+ editor = ParameterEditor(DEFAULT_DYNAMIC_PARAMETERS, **kwargs)
+ return editor.getModified()
+
+
+
+if __name__ == "__main__":
+ print(getModifiedParameters())
+
+
diff --git a/gonio-imsoft/gonioimsoft/imsoft.cmd b/gonio-imsoft/gonioimsoft/imsoft.cmd
new file mode 100644
index 0000000..d7f5392
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/imsoft.cmd
@@ -0,0 +1 @@
+python tui.py
\ No newline at end of file
diff --git a/gonio-imsoft/gonioimsoft/logo.ico b/gonio-imsoft/gonioimsoft/logo.ico
new file mode 100644
index 0000000..806b04e
Binary files /dev/null and b/gonio-imsoft/gonioimsoft/logo.ico differ
diff --git a/gonio-imsoft/gonioimsoft/logo.tif b/gonio-imsoft/gonioimsoft/logo.tif
new file mode 100644
index 0000000..9a10502
Binary files /dev/null and b/gonio-imsoft/gonioimsoft/logo.tif differ
diff --git a/gonio-imsoft/gonioimsoft/macro.py b/gonio-imsoft/gonioimsoft/macro.py
new file mode 100644
index 0000000..1b2ab56
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/macro.py
@@ -0,0 +1,37 @@
+
+import os
+import ast
+
+def list_macros():
+ '''
+ Lists all available macros by names that can be
+ directly passed to load function in this module.
+ '''
+
+ return [fn[:-4] for fn in os.listdir('macros') if fn.endswith('.txt')]
+
+def load(macro_name):
+ '''
+ Loads a macro from text file.
+
+ macro_name is the name of a macro file inside macros folder,
+ without the file ending.
+
+ Loaded macro that is returned is a list whose items
+ are macro commands understood by DynamicImaging in core.py
+ '''
+
+ macro = []
+
+ with open(os.path.join('macros', macro_name+'.txt'), 'r') as fp:
+ for line in fp:
+ if line:
+ macro.append(ast.literal_eval(line))
+
+ return macro
+
+def save(macro_name, macro):
+ with open(os.path.join('macros', macro_name+'.txt'), 'w') as fp:
+ for line in macro:
+ fp.write(str(line)+'\n')
+
diff --git a/gonio-imsoft/gonioimsoft/macros/default.txt b/gonio-imsoft/gonioimsoft/macros/default.txt
new file mode 100644
index 0000000..7e9f1cb
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/macros/default.txt
@@ -0,0 +1,162 @@
+(30, -20)
+(20, -20)
+(10, -20)
+(0, -20)
+(-10, -20)
+(-20, -20)
+(-30, -20)
+(-40, -20)
+(-50, -20)
+(-50, -10)
+(-40, -10)
+(-30, -10)
+(-20, -10)
+(-10, -10)
+(0, -10)
+(10, -10)
+(20, -10)
+(30, -10)
+(30, 0)
+(20, 0)
+(10, 0)
+(0, 0)
+(-10, 0)
+(-20, 0)
+(-30, 0)
+(-40, 0)
+(-50, 0)
+(-50, 10)
+(-40, 10)
+(-30, 10)
+(-20, 10)
+(-10, 10)
+(0, 10)
+(10, 10)
+(20, 10)
+(30, 10)
+(30, 20)
+(20, 20)
+(10, 20)
+(0, 20)
+(-10, 20)
+(-20, 20)
+(-30, 20)
+(-40, 20)
+(-50, 20)
+(-50, 30)
+(-40, 30)
+(-30, 30)
+(-20, 30)
+(-10, 30)
+(0, 30)
+(10, 30)
+(20, 30)
+(30, 30)
+(30, 40)
+(20, 40)
+(10, 40)
+(0, 40)
+(-10, 40)
+(-20, 40)
+(-30, 40)
+(-40, 40)
+(-50, 40)
+(-50, 50)
+(-40, 50)
+(-30, 50)
+(-20, 50)
+(-10, 50)
+(0, 50)
+(10, 50)
+(20, 50)
+(30, 50)
+(30, 60)
+(20, 60)
+(10, 60)
+(0, 60)
+(-10, 60)
+(-20, 60)
+(-30, 60)
+(-40, 60)
+(-50, 60)
+(-50, 70)
+(-40, 70)
+(-30, 70)
+(-20, 70)
+(-10, 70)
+(0, 70)
+(10, 70)
+(20, 70)
+(30, 70)
+(30, 80)
+(20, 80)
+(10, 80)
+(0, 80)
+(-10, 80)
+(-20, 80)
+(-30, 80)
+(-40, 80)
+(-50, 80)
+(-50, 90)
+(-40, 90)
+(-30, 90)
+(-20, 90)
+(-10, 90)
+(0, 90)
+(10, 90)
+(20, 90)
+(30, 90)
+(30, 100)
+(20, 100)
+(10, 100)
+(0, 100)
+(-10, 100)
+(-20, 100)
+(-30, 100)
+(-40, 100)
+(-50, 100)
+(-50, 110)
+(-40, 110)
+(-30, 110)
+(-20, 110)
+(-10, 110)
+(0, 110)
+(10, 110)
+(20, 110)
+(30, 110)
+(30, 120)
+(20, 120)
+(10, 120)
+(0, 120)
+(-10, 120)
+(-20, 120)
+(-30, 120)
+(-40, 120)
+(-50, 120)
+(-50, 130)
+(-40, 130)
+(-30, 130)
+(-20, 130)
+(-10, 130)
+(0, 130)
+(10, 130)
+(20, 130)
+(30, 130)
+(30, 140)
+(20, 140)
+(10, 140)
+(0, 140)
+(-10, 140)
+(-20, 140)
+(-30, 140)
+(-40, 140)
+(-50, 140)
+(-50, 150)
+(-40, 150)
+(-30, 150)
+(-20, 150)
+(-10, 150)
+(0, 150)
+(10, 150)
+(20, 150)
+(30, 150)
diff --git a/gonio-imsoft/gonioimsoft/make_macro.py b/gonio-imsoft/gonioimsoft/make_macro.py
new file mode 100644
index 0000000..ae7980c
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/make_macro.py
@@ -0,0 +1,22 @@
+import numpy as np
+
+import macro
+
+
+def main():
+ horizontals = [-50, -40, -30, -20, -10, 0, 10, 20, 30]
+ verticals = np.arange(-20, 160, 10)
+
+ new_macro = []
+
+ for i, vertical in enumerate(verticals):
+ horizontals.reverse()
+
+ for horizontal in horizontals:
+ new_macro.append((horizontal, vertical))
+ new_macro.append('wait 1')
+
+ macro.save('default', new_macro)
+
+if __name__ == "__main__":
+ main()
diff --git a/gonio-imsoft/gonioimsoft/motors.py b/gonio-imsoft/gonioimsoft/motors.py
new file mode 100644
index 0000000..8f01fce
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/motors.py
@@ -0,0 +1,158 @@
+
+import math
+import time
+import atexit
+import threading
+
+from gonioimsoft.anglepairs import degrees2steps
+
+class Motor:
+ '''
+ Moving motors with limits.
+ '''
+
+ def __init__(self, ArduinoReader, i_motor, i_sensor):
+ '''
+ ArduinoReader
+ i_motor Index number of the motor
+ #i_sensor None or index number of the sensor
+ '''
+ self.reader = ArduinoReader
+ self.i_motor = i_motor
+ self.i_sensor = i_sensor
+
+ # If no sensor is connected with the motor (i_sensor == None),
+ # at least we keep track how many times have we moved.
+ self.position = 0
+
+ self.limits = [-math.inf, math.inf]
+
+ # Moving motor specific place using a sensor
+ # maxmis = Maximum allowed error when using move_to
+ self.maxmis = 6
+ self.thread = None
+ self._stop = False
+
+ atexit.register(self.move_to, 0)
+
+
+ def get_position(self):
+ '''
+ Returns the current position of the motor
+ '''
+ if self.i_sensor is None:
+ return self.position
+ else:
+ return self.reader.get_sensor(self.i_sensor)
+
+ def move_raw(self, direction, time=1):
+ '''
+ Return False if movement wans't allowed, otherwise True.
+ '''
+ curpos = self.get_position()
+
+ # Only move so that we don't go over limits
+ if ((self.limits[0] <= curpos and direction >= 0) or
+ (curpos <= self.limits[1] and direction < 0) or
+ (self.limits[0] < curpos < self.limits[1])):
+
+ self.reader.move_motor(self.i_motor, direction, time=time)
+ self.position += time*direction
+
+ return True
+ else:
+ return False
+
+
+ def move_to(self, motor_position):
+ '''
+ Move motor to specific position.
+
+ If self.i_sensor == None:
+ motor_position is measured in time units, hoping that the motor
+ has constant speed
+
+ otherwise
+ motor_position is measured in degrees from the zero position
+ '''
+ print('Driving motor {} to {}'.format(self.i_motor, motor_position))
+ if self.i_sensor is None:
+ # If no extra sensor connected to the motor we'll just move
+ # based on where we think we are
+ position = self.get_position()
+ time = position - motor_position
+ if time >= 0:
+ direction = 1
+ else:
+ direction = -1
+ time = -time
+ self.move_raw(direction, time=time)
+ else:
+ # If we have a sensor we should move towards it, launch a thread
+ # that runs in background until the task finished
+
+ if not self.thread:
+ callable_getpos = lambda: self.reader.get_sensor(self.i_sensor)
+
+ self.thread = threading.Thread(target=self._move_to_thread,
+ args=(degrees2steps(motor_position), callable_getpos))
+ self._stop = False
+ self.thread.start()
+ print('started thread')
+
+
+ def _move_to_thread(self, target, callable_getpos):
+ '''
+ This is a target
+
+ callable_getpos A callable that returns the current position of the
+ '''
+ print('got to thread')
+
+ while not self._stop:
+
+ pos = callable_getpos()
+
+ if target-self.maxmis/2 < pos < target+self.maxmis/2:
+ break
+
+ direction = pos-target
+
+ if not self.move_raw(direction, time=0.1):
+ # If hitting the limits stop here
+ break
+
+ # The thread can sleep 95 ms while waiting the motor to move
+ time.sleep(0.095)
+
+ self.thread = None
+
+ def stop(self):
+ self._stop = True
+
+ def reached_target(self):
+ '''
+ Returns True if the motor has reached its target set at move_to.
+ '''
+ if self.thread:
+ return False
+ else:
+ return True
+
+ def set_upper_limit(self):
+ '''
+ Sets current position as the upper limit
+ '''
+ self.limits[1] = self.position
+
+
+ def set_lower_limit(self):
+ '''
+ Sets current position as the lower limit
+ '''
+ self.limits[0] = self.position
+
+ def get_limits(self):
+ return self.limits
+
+
diff --git a/gonio-imsoft/gonioimsoft/presets/3step_100ms b/gonio-imsoft/gonioimsoft/presets/3step_100ms
new file mode 100644
index 0000000..5ba3575
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/presets/3step_100ms
@@ -0,0 +1 @@
+{"isi": 20.0, "repeats": 25, "pre_stim": 0.0, "stim": 10.0, "post_stim": 0.0, "frame_length": 0.005, "ir_imaging": 5, "ir_waiting": 0, "ir_livefeed": 1, "flash_on": 8, "flash_off": 4.0, "ir_channel": "Dev1/ao1", "flash_channel": "Dev1/ao0", "suffix": "3step_100ms", "trigger_channel": "Dev2/ao3", "biosyst_stimulus": "", "biosyst_channel": 2, "avgint_adaptation": 0.0, "flash_type": "3steplogsweep,2.5,2.5", "save_stack": true}
\ No newline at end of file
diff --git a/gonio-imsoft/gonioimsoft/presets/default b/gonio-imsoft/gonioimsoft/presets/default
new file mode 100644
index 0000000..003eb37
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/presets/default
@@ -0,0 +1 @@
+{"isi": 10.0, "repeats": 1, "pre_stim": 0.0, "stim": 0.2, "post_stim": 0.0, "frame_length": 0.01, "ir_imaging": 5, "ir_waiting": 0, "ir_livefeed": 1, "flash_on": 8, "flash_off": 0, "ir_channel": "Dev1/ao1", "flash_channel": "Dev1/ao0", "suffix": "default", "trigger_channel": "Dev2/ao3", "biosyst_stimulus": "", "biosyst_channel": 2, "avgint_adaptation": 0, "flash_type": "square", "save_stack": false}
\ No newline at end of file
diff --git a/gonio-imsoft/gonioimsoft/presets/intensity_series_10p_log b/gonio-imsoft/gonioimsoft/presets/intensity_series_10p_log
new file mode 100644
index 0000000..f974568
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/presets/intensity_series_10p_log
@@ -0,0 +1 @@
+{"isi": 10.0, "repeats": 10, "pre_stim": 0.0, "stim": 0.2, "post_stim": 0.0, "frame_length": 0.01, "ir_imaging": 5, "ir_waiting": 0, "ir_livefeed": 1, "flash_on": [0.1, 0.16, 0.28, 0.46, 0.77, 1.3, 2.2, 3.6, 6, 9], "flash_off": 0, "ir_channel": ["Dev2/ao0", "Dev2/ao1"], "flash_channel": "Dev1/ao0", "suffix": "intensity_series_10p_log"}
\ No newline at end of file
diff --git a/gonio-imsoft/gonioimsoft/presets/intensser_25p b/gonio-imsoft/gonioimsoft/presets/intensser_25p
new file mode 100644
index 0000000..40326a4
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/presets/intensser_25p
@@ -0,0 +1 @@
+{"isi": 10.0, "repeats": 25, "pre_stim": 0.0, "stim": 0.2, "post_stim": 0.0, "frame_length": 0.01, "ir_imaging": 5, "ir_waiting": 0, "ir_livefeed": 1, "flash_on": [0.008, 0.010668171457306592, 0.014226235280311384, 0.018970989645293243, 0.025298221281347035, 0.03373572027428658, 0.044987306015227935, 0.05999153674659646, 0.08000000000000002, 0.10668171457306592, 0.14226235280311383, 0.18970989645293243, 0.2529822128134704, 0.3373572027428658, 0.4498730601522793, 0.5999153674659647, 0.8, 1.0668171457306592, 1.4226235280311383, 1.8970989645293241, 2.529822128134704, 3.373572027428658, 4.4987306015227935, 5.999153674659647, 8.0], "flash_off": 0, "ir_channel": "Dev1/ao1", "flash_channel": "Dev1/ao0", "suffix": "intensser_25p", "trigger_channel": "Dev2/ao3"}
\ No newline at end of file
diff --git a/gonio-imsoft/gonioimsoft/presets/intensser_25p_isi2s b/gonio-imsoft/gonioimsoft/presets/intensser_25p_isi2s
new file mode 100644
index 0000000..c515226
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/presets/intensser_25p_isi2s
@@ -0,0 +1 @@
+{"isi": 2.0, "repeats": 25, "pre_stim": 0.0, "stim": 0.2, "post_stim": 0.0, "frame_length": 0.01, "ir_imaging": 5, "ir_waiting": 0, "ir_livefeed": 1, "flash_on": [0.008, 0.010668171457306592, 0.014226235280311384, 0.018970989645293243, 0.025298221281347035, 0.03373572027428658, 0.044987306015227935, 0.05999153674659646, 0.08000000000000002, 0.10668171457306592, 0.14226235280311383, 0.18970989645293243, 0.2529822128134704, 0.3373572027428658, 0.4498730601522793, 0.5999153674659647, 0.8, 1.0668171457306592, 1.4226235280311383, 1.8970989645293241, 2.529822128134704, 3.373572027428658, 4.4987306015227935, 5.999153674659647, 8.0], "flash_off": 0, "ir_channel": "Dev1/ao1", "flash_channel": "Dev1/ao0", "suffix": "intensser_25p_isi2s", "trigger_channel": "Dev2/ao3"}
\ No newline at end of file
diff --git a/gonio-imsoft/gonioimsoft/presets/isisser_25p b/gonio-imsoft/gonioimsoft/presets/isisser_25p
new file mode 100644
index 0000000..8f52726
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/presets/isisser_25p
@@ -0,0 +1 @@
+{"isi": [1.0, 1.2115276586285884, 1.4677992676220695, 1.7782794100389228, 2.154434690031884, 2.610157215682537, 3.1622776601683795, 3.831186849557287, 4.641588833612778, 5.623413251903491, 6.812920690579611, 8.254041852680183, 10.0, 12.115276586285882, 14.67799267622069, 17.78279410038923, 21.544346900318832, 26.10157215682536, 31.622776601683793, 38.311868495572874, 46.41588833612777, 56.23413251903491, 68.12920690579611, 82.54041852680182, 100.0], "repeats": 25, "pre_stim": 0.0, "stim": 0.2, "post_stim": 0.0, "frame_length": 0.01, "ir_imaging": 5, "ir_waiting": 0, "ir_livefeed": 1, "flash_on": 8, "flash_off": 0, "ir_channel": "Dev1/ao1", "flash_channel": "Dev1/ao0", "suffix": "isisser_25p", "trigger_channel": "Dev2/ao3"}
\ No newline at end of file
diff --git a/gonio-imsoft/gonioimsoft/presets/manyrepeats b/gonio-imsoft/gonioimsoft/presets/manyrepeats
new file mode 100644
index 0000000..4667d91
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/presets/manyrepeats
@@ -0,0 +1 @@
+{"isi": 10.0, "repeats": 25, "pre_stim": 0.0, "stim": 0.2, "post_stim": 0.0, "frame_length": 0.01, "ir_imaging": 5, "ir_waiting": 0, "ir_livefeed": 1, "flash_on": 8, "flash_off": 0, "ir_channel": "Dev1/ao1", "flash_channel": "Dev1/ao0", "suffix": "manyrepeats", "trigger_channel": "Dev2/ao3", "avgint_adaptation": 0, "biosyst_channel": 2, "biosyst_stimulus": "", "flash_type": "square", "save_stack": false}
\ No newline at end of file
diff --git a/gonio-imsoft/gonioimsoft/presets/manyrepeats_isi2s b/gonio-imsoft/gonioimsoft/presets/manyrepeats_isi2s
new file mode 100644
index 0000000..860a87a
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/presets/manyrepeats_isi2s
@@ -0,0 +1 @@
+{"isi": 2.0, "repeats": 25, "pre_stim": 0.0, "stim": 0.2, "post_stim": 0.0, "frame_length": 0.01, "ir_imaging": 5, "ir_waiting": 0, "ir_livefeed": 1, "flash_on": 8, "flash_off": 0, "ir_channel": "Dev1/ao1", "flash_channel": "Dev1/ao0", "suffix": "manyrepeats_isi2s", "trigger_channel": "Dev2/ao3", "avgint_adaptation": 0, "biosyst_channel": 2, "biosyst_stimulus": "", "flash_type": "square", "save_stack": false}
\ No newline at end of file
diff --git a/gonio-imsoft/gonioimsoft/presets/manyrepeats_isi2s_frombg b/gonio-imsoft/gonioimsoft/presets/manyrepeats_isi2s_frombg
new file mode 100644
index 0000000..c89bd57
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/presets/manyrepeats_isi2s_frombg
@@ -0,0 +1 @@
+{"isi": 2.0, "repeats": 25, "pre_stim": 0.0, "stim": 0.2, "post_stim": 0.0, "frame_length": 0.01, "ir_imaging": 5, "ir_waiting": 0, "ir_livefeed": 1, "flash_on": 8, "flash_off": 2.0, "ir_channel": "Dev1/ao1", "flash_channel": "Dev1/ao0", "suffix": "manyrepeats_isi2s_frombg", "trigger_channel": "Dev2/ao3"}
\ No newline at end of file
diff --git a/gonio-imsoft/gonioimsoft/presets/manyrepeats_isi2s_todark b/gonio-imsoft/gonioimsoft/presets/manyrepeats_isi2s_todark
new file mode 100644
index 0000000..bf438fd
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/presets/manyrepeats_isi2s_todark
@@ -0,0 +1 @@
+{"isi": 2.0, "repeats": 25, "pre_stim": 0.0, "stim": 0.2, "post_stim": 0.0, "frame_length": 0.01, "ir_imaging": 5, "ir_waiting": 0, "ir_livefeed": 1, "flash_on": 0.0, "flash_off": 2.0, "ir_channel": "Dev1/ao1", "flash_channel": "Dev1/ao0", "suffix": "manyrepeats_isi2s_todark", "trigger_channel": "Dev2/ao3"}
\ No newline at end of file
diff --git a/gonio-imsoft/gonioimsoft/presets/prettyimages b/gonio-imsoft/gonioimsoft/presets/prettyimages
new file mode 100644
index 0000000..7173fcf
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/presets/prettyimages
@@ -0,0 +1 @@
+{"isi": 1.0, "repeats": 1, "pre_stim": 0.0, "stim": 0.2, "post_stim": 0.0, "frame_length": 0.02, "ir_imaging": 5, "ir_waiting": 0, "ir_livefeed": 1, "flash_on": 0.0, "flash_off": 0, "ir_channel": "Dev1/ao1", "flash_channel": "Dev1/ao0", "suffix": "prettyimages", "trigger_channel": "Dev2/ao3", "biosyst_stimulus": "", "biosyst_channel": 2, "avgint_adaptation": 0}
\ No newline at end of file
diff --git a/gonio-imsoft/gonioimsoft/presets/rotation_trigger b/gonio-imsoft/gonioimsoft/presets/rotation_trigger
new file mode 100644
index 0000000..2d9de2d
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/presets/rotation_trigger
@@ -0,0 +1 @@
+{"isi": 0.0, "repeats": 1, "pre_stim": 0.0, "stim": 0.1, "post_stim": 0.0, "frame_length": 0.1, "ir_imaging": 5, "ir_waiting": 0, "ir_livefeed": 1, "flash_on": 0.0, "flash_off": 0, "ir_channel": "Dev1/ao1", "flash_channel": "Dev1/ao0", "suffix": "", "trigger_channel": "Dev2/ao3", "biosyst_stimulus": "", "biosyst_channel": 2, "avgint_adaptation": 0, "flash_type": "square", "save_stack": false}
\ No newline at end of file
diff --git a/gonio-imsoft/gonioimsoft/presets/sinelogsweep b/gonio-imsoft/gonioimsoft/presets/sinelogsweep
new file mode 100644
index 0000000..340b1e8
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/presets/sinelogsweep
@@ -0,0 +1 @@
+{"isi": 60.0, "repeats": 25, "pre_stim": 0.0, "stim": 10.0, "post_stim": 0.0, "frame_length": 0.01, "ir_imaging": 5, "ir_waiting": 0, "ir_livefeed": 1, "flash_on": 8, "flash_off": 4.0, "ir_channel": "Dev1/ao1", "flash_channel": "Dev1/ao0", "suffix": "sinelogsweep", "trigger_channel": "Dev2/ao3", "biosyst_stimulus": "", "biosyst_channel": 2, "avgint_adaptation": 10.0, "flash_type": "sinelogsweep"}
\ No newline at end of file
diff --git a/gonio-imsoft/gonioimsoft/presets/sinelogsweep_cam200Hz b/gonio-imsoft/gonioimsoft/presets/sinelogsweep_cam200Hz
new file mode 100644
index 0000000..5f1b1fc
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/presets/sinelogsweep_cam200Hz
@@ -0,0 +1 @@
+{"isi": 20.0, "repeats": 25, "pre_stim": 0.0, "stim": 10.0, "post_stim": 0.0, "frame_length": 0.005, "ir_imaging": 5, "ir_waiting": 0, "ir_livefeed": 1, "flash_on": 8, "flash_off": 4.0, "ir_channel": "Dev1/ao1", "flash_channel": "Dev1/ao0", "suffix": "sinelogsweep_cam200Hz", "trigger_channel": "Dev2/ao3", "biosyst_stimulus": "", "biosyst_channel": 2, "avgint_adaptation": 0.0, "flash_type": "sinelogsweep", "save_stack": true}
\ No newline at end of file
diff --git a/gonio-imsoft/gonioimsoft/presets/squarelogsweep_cam200Hz b/gonio-imsoft/gonioimsoft/presets/squarelogsweep_cam200Hz
new file mode 100644
index 0000000..d3a6ad8
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/presets/squarelogsweep_cam200Hz
@@ -0,0 +1 @@
+{"isi": 20.0, "repeats": 25, "pre_stim": 0.0, "stim": 10.0, "post_stim": 0.0, "frame_length": 0.005, "ir_imaging": 5, "ir_waiting": 0, "ir_livefeed": 1, "flash_on": 8, "flash_off": 4.0, "ir_channel": "Dev1/ao1", "flash_channel": "Dev1/ao0", "suffix": "squarelogsweep_cam200Hz", "trigger_channel": "Dev2/ao3", "biosyst_stimulus": "", "biosyst_channel": 2, "avgint_adaptation": 0.0, "flash_type": "squarelogsweep", "save_stack": true}
\ No newline at end of file
diff --git a/gonio-imsoft/gonioimsoft/presets/squarewave_cam200Hz b/gonio-imsoft/gonioimsoft/presets/squarewave_cam200Hz
new file mode 100644
index 0000000..a8bc1b7
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/presets/squarewave_cam200Hz
@@ -0,0 +1 @@
+{"isi": 20.0, "repeats": 25, "pre_stim": 0.0, "stim": 10.0, "post_stim": 0.0, "frame_length": 0.005, "ir_imaging": 5, "ir_waiting": 0, "ir_livefeed": 1, "flash_on": 8, "flash_off": 4.0, "ir_channel": "Dev1/ao1", "flash_channel": "Dev1/ao0", "suffix": "squarewave_cam200Hz", "trigger_channel": "Dev2/ao3", "biosyst_stimulus": "", "biosyst_channel": 2, "avgint_adaptation": 0.0, "flash_type": "squarelogsweep,10,10", "save_stack": true}
\ No newline at end of file
diff --git a/gonio-imsoft/gonioimsoft/stimulus.py b/gonio-imsoft/gonioimsoft/stimulus.py
new file mode 100644
index 0000000..2f0cf43
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/stimulus.py
@@ -0,0 +1,156 @@
+
+import os
+
+import numpy as np
+import scipy.signal
+
+from biosystfiles import extract as bsextract
+
+class StimulusBuilder:
+ '''
+ Get various stimulus waveforms
+ - to the stimulus LED
+ - and on pulse for illumination LED
+ - and square wave for triggering the camera.
+
+
+ '''
+
+ def __init__(self, stim_time, prestim_time, poststim_time, frame_length,
+ stimulus_intensity, illumination_intensity, fs,
+ stimulus_finalval=0, illumination_finalval=0,
+ wtype='square'):
+ '''
+ stim_time The time stimulus LED is on
+ prestim_time The time the camera is running and illumination is on before the stimulus
+ poststim_time The time the camera is running and illumination is on after the stimulus
+ stimulus_intensity From 0 to 1, the brightness of the stimulus
+ illumination_intensity From 0 to 1, the brightness of the illumination lights
+ wtype "square" or "sinelogsweep" or "squarelogsweep"
+
+ '''
+
+ self.stim_time = stim_time
+ self.prestim_time = prestim_time
+ self.poststim_time = poststim_time
+ self.frame_length = frame_length
+ self.stimulus_intensity = stimulus_intensity
+ self.illumination_intensity = illumination_intensity
+ self.fs = fs
+ self.stimulus_finalval = stimulus_finalval
+ self.illumination_finalval = illumination_finalval
+
+ self.wtype = wtype
+
+ self.N_frames = int(round((stim_time+prestim_time+poststim_time)/frame_length))
+
+ self.overload_stimulus = None
+
+
+
+ def overload_biosyst_stimulus(self, fn, channel=0):
+ '''
+ Loads a Biosyst stimulus that gets returned then at
+ get_stimulus_pulse instead.
+
+ Returns the overload stimulus and new fs
+ '''
+ ffn = os.path.join('biosyst_stimuli', fn)
+ self.overload_stimulus, self.fs = bsextract(ffn, channel)
+ self.overload_stimulus = self.overload_stimulus.flatten()
+ print(self.overload_stimulus.shape)
+ print(np.max(self.overload_stimulus))
+
+ return self.overload_stimulus, self.fs
+
+ def get_stimulus_pulse(self):
+ '''
+ Constant value pulse
+
+ _________stimulus_intensity
+ | |
+ ________| |__________
+ prestim stim poststim
+ '''
+
+ if self.overload_stimulus is not None:
+ return self.overload_stimulus
+
+ N0_samples = int(self.prestim_time*self.fs)
+ N1_samples = int(self.stim_time*self.fs)
+ N2_samples = int(self.poststim_time*self.fs)
+
+ if self.wtype == 'square':
+ stimulus = np.concatenate( (np.zeros(N0_samples), np.ones(N1_samples), np.zeros(N2_samples)) )
+ elif 'logsweep' in self.wtype:
+ try:
+ wtype, f0, f1 = self.wtype.split(',')
+ f0 = float(f0)
+ f1 = float(f1)
+ except:
+ print("Doing logsweep from 0.5 Hz to 100 Hz")
+ f0=0.5
+ f1=100
+ wtype = self.wtype
+
+ times = np.linspace(0, self.stim_time, N1_samples)
+ active = scipy.signal.chirp(times, f0=f0, f1=f1, t1=self.stim_time, phi=-90, method='logarithmic')
+
+ if wtype == 'squarelogsweep':
+ active[active>0] = 1
+ active[active<0] = -1
+ elif wtype == '3steplogsweep':
+ cstep = np.sin(np.pi/4)
+ active[np.abs(active) <= cstep] = 0
+ active[active > cstep] = 1
+ active[active < -cstep] = -1
+
+ elif wtype == 'sinelogsweep':
+ pass
+ else:
+ raise ValueError('Unkown flash_type'.format(wtype))
+
+ # Join with pre and post 0.5 values
+ # and move and scale between 0 and 1 (from - 1 and 1)
+ stimulus = np.concatenate( (np.ones(N0_samples)/2, (active+1)/2, np.ones(N2_samples)/2) )
+
+ else:
+ raise ValueError('Invalid wtype given, has to be "square" or "sinelogsweep" or "3steplogsweep"')
+
+ stimulus = self.stimulus_intensity * stimulus
+
+
+ stimulus[-1] = self.stimulus_finalval
+
+ return stimulus
+
+
+
+ def get_illumination(self):
+ '''
+ Returns 1D np.array.
+ '''
+ illumination = np.ones( int((self.stim_time+self.prestim_time+self.poststim_time)*self.fs) )
+ illumination = self.illumination_intensity * illumination
+
+ illumination[-1] = self.illumination_finalval
+
+ return illumination
+
+
+
+ def get_camera(self):
+ '''
+ Get square wave camera triggering stimulus.
+
+ Returns 1D np.array.
+ '''
+
+ samples_per_frame = int(frame_length * fs /2)
+
+ camera = np.concatenate( ( np.ones((samples_per_frame, self.N_frames)), np.zeros((samples_per_frame, self.N_frames)) ) ).T.flatten()
+ camera = 5*camera
+
+ camera[-1] = 0
+
+ return camera
diff --git a/gonio-imsoft/gonioimsoft/tui.py b/gonio-imsoft/gonioimsoft/tui.py
new file mode 100644
index 0000000..694932f
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/tui.py
@@ -0,0 +1,641 @@
+
+import os
+import copy
+import platform
+import string
+import time
+import json
+
+
+OS = platform.system()
+if OS == 'Windows':
+ import msvcrt
+else:
+ import sys
+
+from gonioimsoft.version import __version__
+from gonioimsoft.directories import PUPILDIR
+import gonioimsoft.core as core
+from gonioimsoft.imaging_parameters import (
+ DEFAULT_DYNAMIC_PARAMETERS,
+ ParameterEditor,
+ )
+
+
+help_string = """List of commands and their options\n
+GENERAL
+ help Prints this message
+ suffix [SUFFIX] Add a suffix SUFFIX to saved image folders
+MOTORS
+ where [i_motor] Prints the coordinates of motor that has index i_motor
+ drive [i_motor] [pos] Drive i_motor to coordinates (float)
+
+
+"""
+
+help_limit = """Usage of limit command
+limit []"""
+
+
+class Console:
+ '''
+ Operation console for TUI or other user interfaces.
+
+ Capabilities:
+ - changing imaging parameters
+ - setting save suffix
+ - controlling motors and setting their limits
+
+ In tui, this console can be opened by pressing ` (the keyboard button next to 1)
+ '''
+ def __init__(self, core_dynamic):
+ '''
+ core_dynamic An instance of core.Dynamic class.
+ '''
+ self.dynamic = core_dynamic
+
+
+ def enter(self, command):
+ '''
+ Calling a command
+ '''
+ command_name = command.split(' ')[0]
+ args = command.split(' ')[1:]
+
+ if hasattr(self, command_name):
+ method = getattr(self, command_name)
+ try:
+ method(*args)
+ except TypeError as e:
+ print(e)
+ self.help()
+
+ else:
+ print('Command {} does not exit'.format(command_name))
+ self.help()
+
+
+ def help(self):
+ '''
+ Print the help string on screen.
+ '''
+ print(help_string)
+
+
+ def suffix(self, suffix):
+ '''
+ Set suffix to the image folders being saved
+ '''
+ # Replaces spaces by underscores
+ if ' ' in suffix:
+ suffix = suffix.replace(' ', '_')
+ print('Info: Replaced spaces in the suffix with underscores')
+
+ # Replace illegal characters by x
+ legal_suffix = ""
+ for letter in suffix:
+ if letter in string.ascii_letters+'_()-'+'0123456789.':
+ legal_suffix += letter
+ else:
+ print('Replacing illegal character {} with x'.format(letter))
+ legal_suffix += 'x'
+
+ print('Setting suffix {}'.format(legal_suffix))
+ self.dynamic.set_subfolder_suffix(legal_suffix)
+
+
+ def limitset(self, side, i_motor):
+ '''
+ Sets the current position as a limit.
+
+ action "set" or "get"
+ side "upper" or "lower"
+ i_motor 0, 1, 2, ...
+ '''
+
+ if side == 'upper':
+ self.dynamic.motors[i_motor].set_upper_limit()
+ elif side == 'lower':
+ self.dynamic.motors[i_motor].set_lower_limit()
+
+
+ def limitget(self, i_motor):
+ '''
+ Gets the current limits of a motor
+ '''
+ mlim = self.dynamic.motors[i_motor].get_limits()
+ print(' Motor {} limited at {} lower and {} upper'.format(i_motor, *mlim))
+
+
+ def where(self, i_motor):
+ # Getting motor's position
+ mpos = self.dynamic.motors[motor].get_position()
+ print(' Motor {} at {}'.format(motor, mpos))
+
+
+ def drive(self, i_motor, position):
+ self.dynamic.motors[i_motor].move_to(position)
+
+
+ def macro(self, command, macro_name):
+ '''
+ Running and setting macros (automated imaging sequences.)
+ '''
+ if command == 'run':
+ self.dynamic.run_macro(macro_name)
+ elif command == 'list':
+
+ print('Following macros are available')
+ for line in self.dynamic.list_macros():
+ print(line)
+
+ elif command == 'stop':
+ for motor in self.dynamic.motors:
+ motor.stop()
+
+
+ def set_roi(self, x,y,w,h):
+ self.dynamic.camera.set_roi( (x,y,w,h) )
+
+
+ def eternal_repeat(self, isi):
+
+ isi = float(isi)
+ print(isi)
+
+ suffix = "eternal_repeat_isi{}s".format(isi)
+ suffix = suffix + "_rep{}"
+ i_repeat = 0
+
+ while True:
+ self.suffix(suffix.format(i_repeat))
+
+ start_time = time.time()
+
+ if self.dynamic.image_series(inter_loop_callback=self.image_series_callback) == False:
+ break
+ i_repeat += 1
+
+ sleep_time = isi - float(time.time() - start_time)
+ if sleep_time > 0:
+ time.sleep(sleep_time)
+
+
+ def chain_presets(self, delay, *preset_names):
+ '''
+ Running multiple presets all one after each other,
+ in a fixed (horizonta, vertical) location.
+
+ delay In seconds, how long to wait between presets
+ '''
+ delay = float(delay)
+ original_parameters = copy.copy(self.dynamic.dynamic_parameters)
+
+
+ print('Repeating presets {}'.format(preset_names))
+ for preset_name in preset_names:
+ print('Preset {}'.format(preset_name))
+
+ self.dynamic.load_preset(preset_name)
+
+ if self.dynamic.image_series(inter_loop_callback=self.image_series_callback) == False:
+ break
+
+ time.sleep(delay)
+
+ print('Finished repeating presets')
+ self.dynamic.dynamic_parameters = original_parameters
+
+
+ def set_rotation(self, horizontal, vertical):
+ ho = int(horizontal)
+ ve = int(vertical)
+ cho, cve = self.dynamic.reader.latest_angle
+
+ self.dynamic.reader.offset = (cho-ho, cve-ve)
+
+
+
+class TextUI:
+ '''
+ A simple text based user interface goniometric imaging.
+
+ Attrubutes
+ ----------
+ console : object
+ choices : dict
+ Main menu choices
+ quit : bool
+ If changes to True, quit.
+ expfn : string
+ Filename of the experiments.json file
+ glofn : string
+ Filename of the locked parameters setting name
+
+ '''
+ def __init__(self):
+ self.dynamic = core.Dynamic()
+
+ # Get experimenters list or if not present, use default
+ self.expfn = os.path.join(PUPILDIR, 'experimenters.json')
+ if os.path.exists(self.expfn):
+ try:
+ with open(self.expfn, 'r') as fp: self.experimenters = json.load(fp)
+ except:
+ self.experimenters = ['gonioims']
+ else:
+ self.experimenters = ['gonioims']
+
+
+ # Get locked parameters
+ self.glofn = os.path.join(PUPILDIR, 'locked_parameters.json')
+ if os.path.exists(self.glofn):
+ try:
+ with open(self.glofn, 'r') as fp: self.locked_parameters = json.load(fp)
+ except:
+ self.locked_parameters = {}
+ else:
+ self.locked_parameters = {}
+
+
+
+ self.choices = [['Static imaging', self.loop_static],
+ ['Dynamic imaging', self.loop_dynamic],
+ ['Trigger only (external software for camera)', self.loop_trigger],
+ ['', None],
+ ['Edit locked parameters', self.locked_parameters_edit],
+ ['', None],
+ ['Quit', self.quit],
+ ['', None],
+ ['Start camera server (local)', self.dynamic.camera.startServer],
+ ['Stop camera server', self.dynamic.camera.close_server],
+ ['Set Python2 command (current {})', self.set_python2]]
+
+
+ self.quit = False
+
+ self.console = Console(self.dynamic)
+ self.console.image_series_callback = self.image_series_callback
+
+
+ @property
+ def menutext(self):
+
+ # Check camera server status
+ if self.dynamic.camera.isServerRunning():
+ cs = 'ON'
+ else:
+ cs = 'OFF'
+
+ # Check serial (Arduino) status
+ ser = self.dynamic.reader.serial
+ if ser is None:
+ ar = 'Serial UNAVAIBLE'
+ else:
+ if ser.is_open:
+ ar = 'Serial OPEN ({} @{} Bd)'.format(ser.port)
+ else:
+ ar = 'Serial CLOSED'
+
+ # Check DAQ
+ if core.nidaqmx is None:
+ daq = 'UNAVAILABLE'
+ else:
+ daq = 'AVAILABLE'
+
+ status = "\n CamServer {} | {} | nidaqmx {}".format(cs, ar, daq)
+
+ menutext = "Pupil Imsoft - Version {}".format(__version__)
+ menutext += "\n" + max(len(menutext), len(status)) * "-"
+ menutext += status
+ return menutext + "\n"
+
+
+ @staticmethod
+ def _readKey():
+ if OS == 'Windows':
+ if msvcrt.kbhit():
+ key = ord(msvcrt.getwch())
+ return chr(key)
+ return ''
+ else:
+ return sys.stdin.read(1)
+
+
+ @staticmethod
+ def _clearScreen():
+ if os.name == 'posix':
+ os.system('clear')
+ elif os.name == 'nt':
+ os.system('cls')
+
+
+ @staticmethod
+ def _print_lines(lines):
+
+ for text in lines:
+ print(text)
+
+
+ def _selectItem(self, items):
+ '''
+ Select an item from a list.
+
+ Empty string items are converted to a space
+ '''
+ real_items = []
+ i = 0
+ for item in items:
+ if item != '':
+ print('{}) {}'.format(i+1, item))
+ real_items.append(item)
+ i += 1
+ else:
+ print()
+
+ selection = ''
+ while True:
+ new_char = self._readKey()
+ if new_char:
+ selection += new_char
+ print(selection)
+ if selection.endswith('\r') or selection.endswith('\n'):
+ try:
+ selection = int(selection)
+ real_items[selection-1]
+ break
+ except ValueError:
+ print('Invalid input')
+ selection = ''
+ except IndexError:
+ print('Invalid input')
+ selection = ''
+ return real_items[selection-1]
+
+ def set_python2(self):
+ print('Current Python2 command: {}'.format(self.dynamic.camera.python2))
+ sel = input('Change (yes/no)').lower()
+ if sel == 'yes':
+ newp = input('>>')
+ print('Chaning...')
+ self.dynamic.camera.python2 = newp
+ input('press enter to continue')
+ else:
+ print('No changes!')
+
+ def loop_trigger(self):
+ '''
+ Simply NI trigger when change in rotatory encoders, leaving camera control
+ to an external software (the original loop static).
+
+ Space to toggle triggering.
+ '''
+ self.loop_dynamic(static=True, camera=False)
+
+
+ def loop_static(self):
+ '''
+ Running the static imaging protocol.
+ '''
+ self.loop_dynamic(static=True)
+
+
+ def image_series_callback(self, label, i_repeat):
+ '''
+ Callback passed to image_series
+ '''
+ if label:
+ print(label)
+
+ key = self._readKey()
+
+ if key == '\r':
+ # If Enter presed return False, stopping the imaging
+ print('User pressed enter, stopping imaging')
+ return False
+ else:
+ return True
+
+
+ def loop_dynamic(self, static=False, camera=True):
+ '''
+ Running the dynamic imaging protocol.
+
+ static : bool
+ If False, run normal imaging where pressing space runs the imaging protocol.
+ If True, run imaging when change in rotary encoders (space-key toggled)
+ camera : bool
+ If True, control camera.
+ If False, assume that external program is controlling the camera, and send trigger
+ '''
+ trigger = False
+
+ self.dynamic.locked_parameters = self.locked_parameters
+
+ self.dynamic.set_savedir(os.path.join('imaging_data_'+self.experimenter), camera=camera)
+ name = input('Name ({})>> '.format(self.dynamic.preparation['name']))
+ sex = input('Sex ({})>> '.format(self.dynamic.preparation['sex']))
+ age = input('Age ({})>> '.format(self.dynamic.preparation['age']))
+ self.dynamic.initialize(name, sex, age, camera=camera)
+
+ upper_lines = ['-','Dynamic imaging', '-', 'Help F1', 'Space ']
+
+ while True:
+
+ lines = upper_lines
+
+ key = self._readKey()
+
+ if static:
+ if trigger and self.dynamic.trigger_rotation:
+ if camera:
+ self.dynamic.image_series(inter_loop_callback=self.image_series_callback)
+ else:
+ self.dynamic.send_trigger()
+ if key == ' ':
+ trigger = not trigger
+ print('Rotation triggering now set to {}'.format(trigger))
+ else:
+ if key == ' ':
+ if camera:
+ self.dynamic.image_series(inter_loop_callback=self.image_series_callback)
+ else:
+ self.dynamic.send_trigger()
+
+ if key == 112:
+ lines.append('')
+ elif key == '0':
+ self.dynamic.set_zero()
+ elif key == 's':
+ if camera:
+ self.dynamic.take_snap(save=True)
+ elif key == '\r':
+ # If user hits enter we'll exit
+ break
+
+ elif key == '[':
+ self.dynamic.motors[0].move_raw(-1)
+ elif key == ']':
+ self.dynamic.motors[0].move_raw(1)
+
+ elif key == 'o':
+ self.dynamic.motors[1].move_raw(-1)
+ elif key == 'p':
+ self.dynamic.motors[1].move_raw(1)
+
+ elif key == 'l':
+ self.dynamic.motors[2].move_raw(-1)
+ elif key == ';':
+ self.dynamic.motors[2].move_raw(1)
+
+ elif key == '`':
+ user_input = input("Type command >> ")
+ self.console.enter(user_input)
+
+ elif key == '' and not (static and self.dynamic.trigger_rotation):
+ if camera:
+ # When there's no input just update the live feed
+ self.dynamic.take_snap(save=False)
+
+
+ #self._clearScreen()
+ #self._print_lines(lines)
+
+ self.dynamic.tick()
+
+ self.dynamic.finalize()
+
+
+ def run(self):
+ '''
+ Run TUI until user quitting.
+ '''
+ # Check if userdata directory settings exists
+ if not os.path.isdir(PUPILDIR):
+ print('\nFIRST RUN NOTICE\n------------------')
+ print(('Pupil Imsoft needs a location where '
+ 'to save user files\n - list of experimenters\n - settings'
+ '\n - created protocol files'))
+ print('This is not the location where imaging data gets saved (no big files)')
+ print('\nCreate {}'.format(PUPILDIR))
+
+ while True:
+ sel = input ('(yes/no) >> ').lower()
+ if sel == 'yes':
+ os.makedirs(PUPILDIR)
+ print('Sucess!')
+ time.sleep(2)
+ break
+ elif sel == 'no':
+ print('Warning! Cannot save any changes')
+ time.sleep(2)
+ break
+ else:
+ print('Whaat? Please try again')
+ time.sleep(1)
+
+
+ self._clearScreen()
+
+ print(self.menutext)
+
+ print('Select experimenter\n--------------------')
+ while True:
+ extra_options = [' (Add new)', ' (Remove old)', ' (Save current list)']
+ experimenter = self._selectItem(self.experimenters+extra_options).lower()
+
+ # Select operation
+ if experimenter == '(add new)':
+ name = input('Name >>')
+ self.experimenters.append(name)
+
+ elif experimenter == '(remove old)':
+ print('Select who to remove (data remains)')
+ name = self._selectItem(self.experimenters+['..back (no deletion)'])
+
+ if name in self.experimenters:
+ self.experimenters.pop(self.experimenters.index(name))
+ elif experimenter == '(save current list)':
+ if os.path.isdir(PUPILDIR):
+ with open(self.expfn, 'w') as fp: json.dump(self.experimenters, fp)
+ print('Saved!')
+ else:
+ print('Saving failed (no {})'.format(PUPILDIR))
+ time.sleep(2)
+ else:
+ # Got a name
+ break
+
+ self._clearScreen()
+
+ self.experimenter = experimenter
+ self._clearScreen()
+
+ self.quit = False
+ while not self.quit:
+ print(self.menutext)
+
+ menuitems = [x[0] for x in self.choices]
+ menuitems[-1] = menuitems[-1].format(self.dynamic.camera.python2)
+
+ selection = self._selectItem(menuitems)
+ self.choices[menuitems.index(selection)][1]()
+
+ time.sleep(1)
+
+ self._clearScreen()
+
+ self.dynamic.exit()
+ time.sleep(1)
+
+
+ def locked_parameters_edit(self):
+
+ while True:
+ self._clearScreen()
+ print(self.menutext)
+ print('Here, any of the imaging parameters can be made locked,')
+ print('overriding any presets/values setat imaging time.')
+
+ print('\nCurrent locked are')
+ if not self.locked_parameters:
+ print(' (NONE)')
+ for name in self.locked_parameters:
+ print(' {}'.format(name))
+ print()
+
+ sel = self._selectItem(['Add locked', 'Remove locked', 'Modify values', '.. back (and save)'])
+
+ if sel == 'Add locked':
+ choices = list(DEFAULT_DYNAMIC_PARAMETERS.keys())
+ sel2 = self._selectItem(choices+[' ..back'])
+
+ if sel2 in choices:
+ self.locked_parameters[sel2] = DEFAULT_DYNAMIC_PARAMETERS[sel2]
+ elif sel == 'Remove locked':
+ choices = list(self.locked_parameters.keys())
+ sel2 = self._selectItem(choices+[' ..back'])
+
+ if sel2 in choices:
+ del self.locked_parameters[sel2]
+
+ elif sel == 'Modify values':
+ self.locked_parameters = ParameterEditor(self.locked_parameters).getModified()
+
+ elif sel == '.. back (and save)':
+ if os.path.isdir(PUPILDIR):
+ with open(self.glofn, 'w') as fp: json.dump(self.locked_parameters, fp)
+ break
+
+
+ def quit(self):
+ self.quit = True
+
+
+
+def main():
+ tui = TextUI()
+ tui.run()
+
+if __name__ == "__main__":
+ main()
diff --git a/gonio-imsoft/gonioimsoft/version.py b/gonio-imsoft/gonioimsoft/version.py
new file mode 100644
index 0000000..c545adc
--- /dev/null
+++ b/gonio-imsoft/gonioimsoft/version.py
@@ -0,0 +1 @@
+__version__ = "0.0.2"
diff --git a/gonio-imsoft/setup.py b/gonio-imsoft/setup.py
new file mode 100644
index 0000000..36d67db
--- /dev/null
+++ b/gonio-imsoft/setup.py
@@ -0,0 +1,37 @@
+import setuptools
+
+with open("README.md", "r") as fh:
+ long_description = fh.read()
+
+# Version number to __version__ variable
+exec(open("gonioimsoft/version.py").read())
+
+install_requires = [
+ 'numpy',
+ 'matplotlib',
+ 'tifffile',
+ 'nidaqmx',
+ 'pyserial',
+ 'python-biosystfiles',
+ ]
+
+setuptools.setup(
+ name="gonio-imsoft",
+ version=__version__,
+ author="Joni Kemppainen",
+ author_email="jjtkemppainen1@sheffield.ac.uk",
+ description="Goniometric imaging software",
+ long_description=long_description,
+ long_description_content_type="text/markdown",
+ url="https://github.com/jkemppainen/gonio-imsoft",
+ packages=setuptools.find_packages(),
+ install_requires=install_requires,
+ classifiers=[
+ "Programming Language :: Python :: 3",
+ "License :: OSI Approved :: GNU General Public License v3 (GPLv3) ",
+ "Operating System :: Microsoft :: Windows",
+ 'Intended Audience :: Science/Research',
+ "Environment :: Console",
+ ],
+ python_requires='>=3.0',
+)