diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 7f46c9593..01bdd87f1 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -113,4 +113,4 @@ jobs: - name: Test with pytest run: | cd brainpy - pytest _src/ + pytest _src/ -p no:faulthandler diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml deleted file mode 100644 index 0c515d77a..000000000 --- a/.github/workflows/docs.yml +++ /dev/null @@ -1,43 +0,0 @@ -# This workflow will install Python dependencies, run tests and lint with a variety of Python versions -# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions - -name: Make documentation - -on: - push: - branches: [ master ] - pull_request: - branches: [ master ] - - -jobs: - make_docs: - runs-on: - group: Default - labels: self-hosted - - steps: - - uses: actions/checkout@v4 - - uses: conda-incubator/setup-miniconda@v3 - with: - auto-update-conda: true - python-version: "3.10" - miniconda-version: "latest" - - name: Conda info - shell: bash -el {0} - run: conda info - - name: Install dependencies - shell: bash -el {0} - run: | - conda activate - python -m pip install --upgrade pip - python -m pip install flake8 pytest - if [ -f requirements-doc.txt ]; then pip install -r requirements-doc.txt; fi - pip uninstall brainpy -y - python setup.py install - - name: Make docs - shell: bash -el {0} - run: | - conda activate - cd ~/brainpy_docs/docs - make html \ No newline at end of file diff --git a/brainpy-changelog.md b/brainpy-changelog.md new file mode 100644 index 000000000..c949b7010 --- /dev/null +++ b/brainpy-changelog.md @@ -0,0 +1,2663 @@ +# Release notes (``brainpy``) + + +## brainpy>2.3.x + + +### Version 2.5.0 + + +This release contains many new features and fixes. It is the first release with a mature solution for Brain Dynamics Operator Customization on both CPU and GPU platforms. + + +#### New Features + +1. Add synapse projection with Delta synapse models through ``brainpy.dyn.HalfProjDelta`` and ``brainpy.dyn.FullProjDelta``. +2. Add ``brainpy.math.exprel``, and change the code in the corresponding HH neuron models to improve numerical computation accuracy. These changes can significantly improve the numerical integration accuracy of HH-like models under x32 computation. +3. Add ``brainpy.reset_level()`` decorator so that the state resetting order can be customized by users. +4. Add ``brainpy.math.ein_rearrange``, ``brainpy.math.ein_reduce``, and ``brainpy.math.ein_repeat`` functions +5. Add ``brainpy.math.scan`` transformation. +6. Rebase all customized operators using Taichi JIT compiler. On the CPU platform, the speed performance can be boosted ten to hundred times. On the GPU platforms, the flexibility can be greatly improved. +7. Many bug fixes. +8. A new version of ``brainpylib>=0.2.4`` has been released, supporting operator customization through the Taichi compiler. The supported backends include Linux, Windows, MacOS Intel, and MacOS M1 platforms. Tutorials please see https://brainpy.readthedocs.io/en/latest/tutorial_advanced/operator_custom_with_taichi.html + +#### What's Changed +* [docs] Add taichi customized operators tutorial by @Routhleck in https://github.com/brainpy/BrainPy/pull/545 +* [docs] Optimize tutorial code in `operator_custom_with_taichi.ipynb` of documentations by @Routhleck in https://github.com/brainpy/BrainPy/pull/546 +* [running] fix multiprocessing bugs by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/547 +* [docs] Fix typo in docs by @Routhleck in https://github.com/brainpy/BrainPy/pull/549 +* :arrow_up: Bump conda-incubator/setup-miniconda from 2 to 3 by @dependabot in https://github.com/brainpy/BrainPy/pull/551 +* updates by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/550 +* ``brainpy.math.defjvp`` and ``brainpy.math.XLACustomOp.defjvp`` by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/554 +* :arrow_up: Bump actions/setup-python from 4 to 5 by @dependabot in https://github.com/brainpy/BrainPy/pull/555 +* Fix ``brainpy.math.ifelse`` bugs by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/556 +* [math & dyn] add ``brainpy.math.exprel``, and change the code in the corresponding HH neuron models to improve numerical computation accuracy by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/557 +* Update README by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/558 +* [doc] add conductance neuron model tutorial by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/559 +* Doc by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/560 +* add `brainpy.math.functional_vector_grad` and `brainpy.reset_level()` decorator by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/561 +* [math] change the internal implementation of surrogate function by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/562 +* Math by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/563 +* [doc] update citations by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/564 +* add support for multi-class margin loss by @charlielam0615 in https://github.com/brainpy/BrainPy/pull/566 +* Support for Delta synapse projections by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/568 +* [math] Add taichi customized operators(event csrmv, csrmv, jitconn event mv, jitconn mv) by @Routhleck in https://github.com/brainpy/BrainPy/pull/553 +* fix doc by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/571 +* Fix default math parameter setting bug by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/572 +* fix bugs in `brainpy.math.random.truncated_normal` by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/574 +* [doc] fix doc by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/576 +* fix bugs in truncated_normal; add TruncatedNormal init. by @charlielam0615 in https://github.com/brainpy/BrainPy/pull/575 +* [Dyn] Fix alpha synapse bugs by @ztqakita in https://github.com/brainpy/BrainPy/pull/578 +* fix `brainpy.math.softplus` and `brainpy.dnn.SoftPlus` by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/581 +* add `TruncatedNormal` to `initialize.py` by @charlielam0615 in https://github.com/brainpy/BrainPy/pull/583 +* Fix `_format_shape` in `random_inits.py` by @charlielam0615 in https://github.com/brainpy/BrainPy/pull/584 +* fix bugs in `truncated_normal` by @charlielam0615 in https://github.com/brainpy/BrainPy/pull/585 +* [dyn] fix warning of reset_state by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/587 +* [math] upgrade variable retrival by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/589 +* [math & dnn] add `brainpy.math.unflatten` and `brainpy.dnn.Unflatten` by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/588 +* [math] add ``ein_rearrange``, ``ein_reduce``, and ``ein_repeat`` functions by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/590 +* [math] Support taichi customized op with metal cpu backend by @Routhleck in https://github.com/brainpy/BrainPy/pull/579 +* Doc fix and standardize Dual Exponential model again by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/591 +* update doc, upgrade reset_state, update projection models by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/592 +* [taichi] Make taichi caches more transparent and Add clean caches function by @Routhleck in https://github.com/brainpy/BrainPy/pull/596 +* [test] remove test skip on macos, since brainpylib supports taichi interface on macos by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/597 +* [dyn] add `clear_input` in the `step_run` function. by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/601 +* [math] Refactor taichi operators by @Routhleck in https://github.com/brainpy/BrainPy/pull/598 +* [math] fix `brainpy.math.scan` by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/604 +* ``disable_ jit`` support in ``brainpy.math.scan`` by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/606 +* [math] Remove the logs that `taichi.init()` print by @Routhleck in https://github.com/brainpy/BrainPy/pull/609 +* Version control in Publish.yml CI by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/610 + +#### New Contributors +* @charlielam0615 made their first contribution in https://github.com/brainpy/BrainPy/pull/566 + +**Full Changelog**: https://github.com/brainpy/BrainPy/compare/V2.4.6...V2.5.0 + + + + +### Version 2.4.6 + +This release contains more than 130 commit updates, and has provided several new features. + + +#### New Features + + +##### 1. surrogate gradient functions are more transparent. + +New instances can be used to compute the surrogate gradients. For example: + +```python +import brainpy.math as bm +fun = bm.surrogate.Sigmoid() + +# forward function +spk = fun(membrane_potential) + +# backward function +dV = fun.surrogate_grad(1., membrane_potential) + +# surrogate forward function +surro_spk = fun.surrogate_fun(membrane_potential) +``` + +##### 2. Add ``brainpy.math.eval_shape`` for evaluating the all dynamical variables used in the target function. + +This function is similar to ``jax.eval_shape`` which has no FLOPs, while it can extract all variables used in the target function. For example: + +```python +net = ... # any dynamical system +inputs = ... # inputs to the dynamical system +variables, outputs= bm.eval_shape(net, inputs) +# "variables" are all variables used in the target "net" +``` + +In future, this function will be used everywhere to transform all jax transformations into brainpy's oo transformations. + +##### 3. Generalize tools and interfaces for state managements. + +For a single object: +- The ``.reset_state()`` defines the state resetting of all local variables in this node. +- The ``.load_state()`` defines the state loading from external disks (typically, a dict is passed into this ``.load_state()`` function). +- The ``.save_state()`` defines the state saving to external disks (typically, the ``.save_state()`` function generates a dict containing all variable values). + +Here is an example to define a full class of ``brainpy.DynamicalSystem``. + +```python +import brainpy as bp + +class YouDynSys(bp.DynamicalSystem): + def __init__(self, ): # define parameters + self.par1 = .... + self.num = ... + + def reset_state(self, batch_or_mode=None): # define variables + self.a = bp.init.variable_(bm.zeros, (self.num,), batch_or_mode) + + def load_state(self, state_dict): # load states from an external dict + self.a.value = bm.as_jax(state_dict['a']) + + def save_state(self): # save states as an external dict + return {'a': self.a.value} +``` + + +For a complex network model, brainpy provide unified state managment interface for initializing, saving, and loading states. +- The ``brainpy.reset_state()`` defines the state resetting of all variables in this node and its children nodes. +- The ``brainpy.load_state()`` defines the state loading from external disks of all variables in the node and its children. +- The ``brainpy.save_state()`` defines the state saving to external disks of all variables in the node and its children. +- The ``brainpy.clear_input()`` defines the clearing of all input variables in the node and its children. + + + + +##### 4. Unified brain simulation and brain-inspired computing interface through automatic membrane scaling. + +The same model used in brain simulation can be easily transformed into the one used for brain-inspired computing for training. For example, + + +```python +class EINet(bp.DynSysGroup): + def __init__(self): + super().__init__() + self.N = bp.dyn.LifRefLTC(4000, V_rest=-60., V_th=-50., V_reset=-60., tau=20., tau_ref=5., + V_initializer=bp.init.Normal(-55., 2.)) + self.delay = bp.VarDelay(self.N.spike, entries={'I': None}) + self.E = bp.dyn.ProjAlignPost1( + comm=bp.dnn.EventCSRLinear(bp.conn.FixedProb(0.02, pre=3200, post=4000), weight=bp.init.Normal(0.6, 0.01)), + syn=bp.dyn.Expon(size=4000, tau=5.), + out=bp.dyn.COBA(E=0.), + post=self.N + ) + self.I = bp.dyn.ProjAlignPost1( + comm=bp.dnn.EventCSRLinear(bp.conn.FixedProb(0.02, pre=800, post=4000), weight=bp.init.Normal(6.7, 0.01)), + syn=bp.dyn.Expon(size=4000, tau=10.), + out=bp.dyn.COBA(E=-80.), + post=self.N + ) + + def update(self, input): + spk = self.delay.at('I') + self.E(spk[:3200]) + self.I(spk[3200:]) + self.delay(self.N(input)) + return self.N.spike.value + + +# used for brain simulation +with bm.environment(mode=bm.nonbatching_mode): + net = EINet() + + +# used for brain-inspired computing +# define the `membrane_scaling` parameter +with bm.environment(mode=bm.TrainingMode(128), membrane_scaling=bm.Scaling.transform([-60., -50.])): + net = EINet() +``` + + + +##### 5. New apis for operator customization on CPU and GPU devices through ``brainpy.math.XLACustomOp``. + +Starting from this release, brainpy introduces [Taichi](https://github.com/taichi-dev/taichi) for operator customization. Now, users can write CPU and GPU operators through numba and taichi syntax on CPU device, and taichi syntax on GPu device. Particularly, to define an operator, user can use: + +```python + +import numba as nb +import taichi as ti +import numpy as np +import jax +import brainpy.math as bm + + +@nb.njit +def numba_cpu_fun(a, b, out_a, out_b): + out_a[:] = a + out_b[:] = b + + +@ti.kernel +def taichi_gpu_fun(a, b, out_a, out_b): + for i in range(a.size): + out_a[i] = a[i] + for i in range(b.size): + out_b[i] = b[i] + + +prim = bm.XLACustomOp(cpu_kernel=numba_cpu_fun, gpu_kernel=taichi_gpu_fun) +a2, b2 = prim(np.random.random(1000), np.random.random(1000), + outs=[jax.ShapeDtypeStruct(1000, dtype=np.float32), + jax.ShapeDtypeStruct(1000, dtype=np.float32)]) + +``` + +##### 6. Generalized STDP models which are compatible with diverse synapse models. + +See https://github.com/brainpy/BrainPy/blob/master/brainpy/_src/dyn/projections/tests/test_STDP.py + + +#### What's Changed +* [bug] fix compatible bug by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/508 +* [docs] add low-level op customization by @ztqakita in https://github.com/brainpy/BrainPy/pull/507 +* Compatible with `jax==0.4.16` by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/511 +* updates for parallelization support by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/514 +* Upgrade surrogate gradient functions by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/516 +* [doc] update operator customization by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/517 +* Updates for OO transforma and surrogate functions by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/519 +* [dyn] add neuron scaling by @ztqakita in https://github.com/brainpy/BrainPy/pull/520 +* State saving, loading, and resetting by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/521 +* [delay] rewrite previous delay APIs so that they are compatible with new brainpy version by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/522 +* [projection] upgrade projections so that APIs are reused across different models by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/523 +* [math] the interface for operator registration by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/524 +* FIx bug in Delay by @ztqakita in https://github.com/brainpy/BrainPy/pull/525 +* Fix bugs in membrane scaling by @ztqakita in https://github.com/brainpy/BrainPy/pull/526 +* [math] Implement taichi op register by @Routhleck in https://github.com/brainpy/BrainPy/pull/527 +* Link libtaichi_c_api.so when import brainpylib by @Routhleck in https://github.com/brainpy/BrainPy/pull/528 +* update taichi op customization by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/529 +* Fix error message by @HoshinoKoji in https://github.com/brainpy/BrainPy/pull/530 +* [math] remove the hard requirement of `taichi` by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/531 +* [math] Resolve encoding of source kernel when ti.func is nested in ti… by @Routhleck in https://github.com/brainpy/BrainPy/pull/532 +* [math] new abstract function for XLACustomOp, fix its bugs by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/534 +* [math] fix numpy array priority by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/533 +* [brainpy.share] add category shared info by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/535 +* [doc] update documentations by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/536 +* [doc] update doc by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/537 +* [dyn] add `brainpy.reset_state()` and `brainpy.clear_input()` for more consistent and flexible state managements by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/538 +* [math] simplify the taichi AOT operator customization interface by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/540 +* [dyn] add `save_state`, `load_state`, `reset_state`, and `clear_input` helpers by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/542 +* [dyn] update STDP APIs on CPUs and fix bugs by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/543 + +#### New Contributors +* @HoshinoKoji made their first contribution in https://github.com/brainpy/BrainPy/pull/530 + +**Full Changelog**: https://github.com/brainpy/BrainPy/compare/V2.4.5...V2.4.6 + + + + + +### Version 2.4.5 + + +#### New Features + +- A new version of ``brainpylib==0.1.10`` has been released. In this release, we have fixed some bugs of brainpy dedicated GPU operators. Users can freely use them in any application. +- Correspondingly, dedicated operators in ``brainpy.math`` have been refined. +- ``.tracing_variable()`` has been created to support tracing ``Variable``s during computations and compilations. Example usage please see #472 +- Add a new random API for creating multiple random keys: ``brainpy.math.random.split_keys()``. +- Fix bugs, including + - ``brainpy.dnn.AllToAll`` module + - RandomState. + - ``brainpy.math.cond`` and ``brainpy.math.while_loop`` when variables are used in both branches + +#### What's Changed +* Creat random key automatically when it is detected by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/461 +* [encoding] upgrade encoding methods by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/464 +* fix #466 by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/467 +* Update operators for compatible with ``brainpylib>=0.1.10`` by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/468 +* Support tracing ``Variable`` during computation and compilation by using ``tracing_variable()`` function by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/472 +* Add code of conduct and contributing guides by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/473 +* add Funding and Development roadmap by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/475 +* Create SECURITY.md by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/474 +* Create dependabot.yml by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/476 +* update maintainence info in README by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/479 +* :arrow_up: Bump actions/setup-python from 2 to 4 by @dependabot in https://github.com/brainpy/BrainPy/pull/477 +* :arrow_up: Bump actions/checkout from 2 to 4 by @dependabot in https://github.com/brainpy/BrainPy/pull/478 +* ad acknowledgment.md by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/482 +* update quickstart of `simulating a brain dynamics model` with new APIs by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/483 +* update advanced tutorials by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/484 +* [docs] Update installation.rst by @Routhleck in https://github.com/brainpy/BrainPy/pull/485 +* update requirements by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/486 +* [doc] update docs by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/487 +* [doc] update docs by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/488 +* Decouple Online and Offline training algorithms as ``brainpy.mixin.SupportOnline`` and `brainpy.mixin.SupportOffline` by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/489 +* [dyn] add STDP_Song2000 LTP model by @ztqakita in https://github.com/brainpy/BrainPy/pull/481 +* update STDP by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/491 +* [doc] update the API of `brainpy.dyn` module & add synaptic plasticity module by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/492 +* fix bug by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/493 +* [math] fix bugs in `cond` and `while_loop` when same variables are used in both branches by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/494 +* [docs] add BrainPy docker and docs by @ztqakita in https://github.com/brainpy/BrainPy/pull/496 +* [docs] update README and installation by @ztqakita in https://github.com/brainpy/BrainPy/pull/499 +* :arrow_up: Bump docker/build-push-action from 4 to 5 by @dependabot in https://github.com/brainpy/BrainPy/pull/498 +* :arrow_up: Bump docker/login-action from 2 to 3 by @dependabot in https://github.com/brainpy/BrainPy/pull/497 +* Add strings in bp._src.dyn.bio_models and abstract_models by @AkitsuFaye in https://github.com/brainpy/BrainPy/pull/500 +* [reset] update logics of state reset in `DynamicalSystem` by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/501 +* [doc] upgrade docs with the latest APIs, fix #463 by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/502 +* [doc] add synapse model documentations by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/503 +* Changed the order of code blocks in the docs of hh models and lif models by @AkitsuFaye in https://github.com/brainpy/BrainPy/pull/505 +* [mode] move recurrent models in brainpy.dnn model into `brainpy.dyn` module by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/506 + +#### New Contributors +* @dependabot made their first contribution in https://github.com/brainpy/BrainPy/pull/477 + +**Full Changelog**: https://github.com/brainpy/BrainPy/compare/V2.4.4...V2.4.5 + + + + + + +### Version 2.4.4 + + + +This release has fixed several bugs and updated the sustainable documentation. + +#### What's Changed +* [mixin] abstract the behavior of supporting input projection by ``brainpy.mixin.ReceiveInputProj`` by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/428 +* Update delays, models, and projections by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/429 +* Compatible with `jax=0.4.14` by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/431 +* Add new tests by @yygf123 in https://github.com/brainpy/BrainPy/pull/430 +* Add NonBatchingMode function by @yygf123 in https://github.com/brainpy/BrainPy/pull/433 +* [connect] Complete `FixedTotalNum` class and fix bugs by @Routhleck in https://github.com/brainpy/BrainPy/pull/434 +* Update the document "Concept 2: Dynamical System" by @yygf123 in https://github.com/brainpy/BrainPy/pull/435 +* [docs] Update three part of tutorial toolbox by @Routhleck in https://github.com/brainpy/BrainPy/pull/436 +* [docs] Update index.rst for surrogate gradient by @Routhleck in https://github.com/brainpy/BrainPy/pull/437 +* Reconstruct BrainPy documentations by @ztqakita in https://github.com/brainpy/BrainPy/pull/438 +* Renew doc requirements.txt by @ztqakita in https://github.com/brainpy/BrainPy/pull/441 +* Compatibility updates by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/442 +* update docs by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/443 +* Update optimizer by @yygf123 in https://github.com/brainpy/BrainPy/pull/451 +* [docs] Update custom saving and loading by @Routhleck in https://github.com/brainpy/BrainPy/pull/439 +* [doc] add new strings in bp._src.dyn.hh.py and bp._src.dyn.lif.py by @AkitsuFaye in https://github.com/brainpy/BrainPy/pull/454 +* Serveral updates by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/452 +* Update doc bug in index.rst by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/458 +* add `brainpy.dyn.Alpha` synapse model by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/459 +* [doc] update ODE doc by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/460 + +#### New Contributors +* @AkitsuFaye made their first contribution in https://github.com/brainpy/BrainPy/pull/454 + +**Full Changelog**: https://github.com/brainpy/BrainPy/compare/V2.4.3...V2.4.4 + + + + + +### Version 2.4.3 + + +This release has standardized the modeling of DNN and SNN models by two intercorrelated packages: ``brainpy.dnn`` and ``brainpy.dyn``. + +Overall, the modeling of brain dynamics in this release has the following advantages: + +- the automatic merging of the duplicate synapses, keeping the minimal device memory +- easy model and data parallelization across multiple devices +- easy integration with artificial neural networks +- a new abstraction that decouples dynamics from communication +- the unified ``DynamicalSystem`` interface + +#### New Features + +1. Support to define ion channel models which rely on multiple ions. For example, + +```python + +class HH(bp.dyn.CondNeuGroup): + def __init__(self, size): + super().__init__(size) + self.k = bp.dyn.PotassiumFixed(size) + self.ca = bp.dyn.CalciumFirstOrder(size) + + self.kca = bp.dyn.mix_ions(self.k, self.ca) # Ion that mixing Potassium and Calcium + self.kca.add_elem(ahp=bp.dyn.IAHP_De1994v2(size)) # channel that relies on both Potassium and Calcium + +``` + +2. New style ``.update()`` function in ``brainpy.DynamicalSystem`` which resolves all compatible issues. Starting from this version, all ``update()`` no longer needs to receive a global shared argument such as ``tdi``. + +```python + +class YourDynSys(bp.DynamicalSystem): + def update(self, x): + t = bp.share['t'] + dt = bp.share['dt'] + i = bp.share['i'] + ... + +``` + +3. Optimize the connection-building process when using ``brainpy.conn.ScaleFreeBA``, ``brainpy.conn.ScaleFreeBADual``, ``brainpy.conn.PowerLaw`` + +4. New dual exponential model ``brainpy.dyn.DualExponV2`` can be aligned with post dimension. + +5. More synaptic projection abstractions, including + - ``brainpy.dyn.VanillaProj`` + - ``brainpy.dyn.ProjAlignPostMg1`` + - ``brainpy.dyn.ProjAlignPostMg2`` + - ``brainpy.dyn.ProjAlignPost1`` + - ``brainpy.dyn.ProjAlignPost2`` + - ``brainpy.dyn.ProjAlignPreMg1`` + - ``brainpy.dyn.ProjAlignPreMg2`` + +5. Fix compatible issues, fix unexpected bugs, and improve the model tests. + + + +#### What's Changed +* [connect] Optimize the connector about ScaleFreeBA, ScaleFreeBADual, PowerLaw by @Routhleck in https://github.com/brainpy/BrainPy/pull/412 +* [fix] bug of `connect.base.py`'s `require` function by @Routhleck in https://github.com/brainpy/BrainPy/pull/413 +* Many Updates by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/414 +* Update docs by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/415 +* fix conflict by @yygf123 in https://github.com/brainpy/BrainPy/pull/416 +* add a new implementation of Dual Exponential Synapse model which can be aligned post. by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/417 +* Enable test when pull requests by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/418 +* Add random.seed() by @yygf123 in https://github.com/brainpy/BrainPy/pull/419 +* Remove windows CI because it always generates strange errors by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/420 +* Recent updates by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/421 +* upgrade Runner and Trainer for new style of ``DynamicalSystem.update()`` function by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/422 +* update docs by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/424 +* fix ``lif`` model bugs and support two kinds of spike reset: ``soft`` and ``hard`` by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/423 +* rewrite old synapses with decomposed components by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/425 +* fix autograd bugs by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/426 + +#### New Contributors +* @yygf123 made their first contribution in https://github.com/brainpy/BrainPy/pull/416 + +**Full Changelog**: https://github.com/brainpy/BrainPy/compare/V2.4.2...V2.4.3 + + + + + + + + +### Version 2.4.2 + + + +We are very excited to release this new version of BrainPy V2.4.2. In this new update, we cover several exciting features: +#### New Features +* Reorganize the model to decouple dynamics and communication. +* Add `brainpy.dyn` for dynamics models and `brainpy.dnn` for the ANN layer and connection structures. +* Supplement many docs for dedicated operators and common bugs of BrainPy. +* Fix many bugs. + +#### What's Changed +* [ANN] add more activation functions by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/379 +* Optimize Gaussian Decay initializer by @Routhleck in https://github.com/brainpy/BrainPy/pull/381 +* [update] new loss functions, surrograte base class, Array built-in functions by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/383 +* [parallelization] new module of ``brainpy.pnn`` for auto parallelization of brain models by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/385 +* [fix] fix the bug of loading states by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/388 +* [math] support `jax.disable_jit()` for debugging by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/389 +* [initialize] speed up ``brainpy.init.DOGDecay`` by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/390 +* [doc] fix doc build by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/391 +* Add deprecations for deprecated APIs or functions by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/393 +* [math] enable debugging for new style of transformations in BrainPy by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/392 +* [math] flow control updates by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/396 +* Test of rates by @shangyangli in https://github.com/brainpy/BrainPy/pull/386 +* Add math docs: NumPy-like operations and Dedicated operators by @c-xy17 in https://github.com/brainpy/BrainPy/pull/395 +* [doc] documentation about ``how to debug`` and ``common gotchas`` by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/397 +* Update requirements-doc.txt by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/399 +* debug (images not displayed) by @c-xy17 in https://github.com/brainpy/BrainPy/pull/400 +* Decouple dynamics and comminucations by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/401 +* [fix] bugs of control flows by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/404 +* Test for channels, neurons and synapses. by @ztqakita in https://github.com/brainpy/BrainPy/pull/403 +* Implement function to visualize connection matrix by @Routhleck in https://github.com/brainpy/BrainPy/pull/405 +* Optimize GaussianProb by @Routhleck in https://github.com/brainpy/BrainPy/pull/406 +* [dyn] add reduce models, HH-type models and channels by @ztqakita in https://github.com/brainpy/BrainPy/pull/408 +* [dnn] add various linear layers by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/407 +* [delay] `VariableDelay` and `DataDelay` by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/409 +* [dyn] add COBA examples using the interface of new `brainpy.dyn` module by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/410 +* [dyn] Update dyn.neurons docs and fix several bugs by @ztqakita in https://github.com/brainpy/BrainPy/pull/411 + +#### New Contributors +* @shangyangli made their first contribution in https://github.com/brainpy/BrainPy/pull/386 + +**Full Changelog**: https://github.com/brainpy/BrainPy/compare/V2.4.1...V2.4.2 + + + + +### Version 2.4.1 + + +#### New Features + +1. [math] Support the error report when modifying a `brainpy.math.Array` during compilation +2. [math] add `brainpy.math.event`, `brainpy.math.sparse` and `brainpy.math.jitconn` module, needs ``brainpylib >= 0.1.9`` +3. [interoperation] add apis and docs for `brainpy.layers.FromFlax` and `brainpy.layer.ToFlaxRNNCell` +4. [fix] Bug fixes: + - fix WilsonCowan bug + - fix `brainpy.connect.FixedProb` bug + - fix analysis jit bug + + + +#### What's Changed +* Update structures by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/364 +* create blocksparse matrix matrix multiplication opearator by @Routhleck in https://github.com/brainpy/BrainPy/pull/365 +* commit by @grysgreat in https://github.com/brainpy/BrainPy/pull/367 +* Fix bugs by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/368 +* [math] update dedicated operators by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/370 +* fix bugs by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/371 +* [bug] fix merging bug by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/372 +* [structure] update package structure by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/369 +* [test] update csrmv tests by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/373 +* [interoperation] add apis and docs for `brainpy.layers.FromFlax` and `brainpy.layer.ToFlaxRNNCell` by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/374 +* [doc] update documentation by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/375 +* [bug] fix `brainpy.connect.FixedProb` bug by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/376 +* [bug] fix analysis jit bug by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/377 +* update brainpylib requirements by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/378 + +#### New Contributors +* @Routhleck made their first contribution in https://github.com/brainpy/BrainPy/pull/365 +* @grysgreat made their first contribution in https://github.com/brainpy/BrainPy/pull/367 + +**Full Changelog**: https://github.com/brainpy/BrainPy/compare/V2.4.0...V2.4.1 + + + + + +### Version 2.4.0 + +This branch of releases (``brainpy==2.4.x``) are going to support the large-scale modeling for brain dynamics. + +As the start, this release provides support for automatic object-oriented (OO) transformations. + + +#### What's New + + +1. Automatic OO transformations on longer need to take ``dyn_vars`` or ``child_objs`` information. + These transformations are capable of automatically inferring the underlying dynamical variables. + Specifically, they include: + + - ``brainpy.math.grad`` and other autograd functionalities + - ``brainpy.math.jit`` + - ``brainpy.math.for_loop`` + - ``brainpy.math.while_loop`` + - ``brainpy.math.ifelse`` + - ``brainpy.math.cond`` + +2. Update documentation +3. Fix several bugs + +#### What's Changed +* reorganize operators in `brainpy.math` by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/357 +* Automatic transformations any function/object using `brainpy.math.Variable` by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/358 +* New OO transforms support ``jax.disable_jit`` mode by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/359 +* [oo transform] Enable new style of jit transformation to support `static_argnums` and `static_argnames` by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/360 +* [documentation] update documentation to brainpy>=2.4.0 by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/361 + + +**Full Changelog**: https://github.com/brainpy/BrainPy/compare/V2.3.8...V2.4.0 + + + + + + + +### Version 2.3.8 + + +This release continues to add support for improving the usability of BrainPy. + + +#### New Features + + +1. New data structures for object-oriented transformations. + - ``NodeList`` and ``NodeDict`` for a list/tuple/dict of ``BrainPyObject`` instances. + - ``ListVar`` and ``DictVar`` for a list/tuple/dict of brainpy data. +2. `Clip` transformation for brainpy initializers. +3. All ``brainpylib`` operators are accessible in ``brainpy.math`` module. Especially there are some dedicated operators for scaling up the million-level neuron networks. For an example, see example in [Simulating 1-million-neuron networks with 1GB GPU memory](https://brainpy-examples.readthedocs.io/en/latest/large_scale_modeling/EI_net_with_1m_neurons.html) +5. Enable monitoring GPU models on CPU when setting ``DSRunner(..., memory_efficient=True)``. This setting can usually reduce so much memory usage. +6. ``brainpylib`` wheels on the Linux platform support the GPU operators. Users can install GPU version of ``brainpylib`` (require ``brainpylib>=0.1.7``) directly by ``pip install brainpylib``. @ztqakita + +#### What's Changed +* Fix bugs and add more variable structures: `ListVar` and `DictVar` by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/345 +* add CI for testing various models by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/346 +* Update docs and tests by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/347 +* Fix `Runner(jit=False)`` bug by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/348 +* Compatible with jax>=0.4.7 by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/349 +* Updates by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/350 +* reconstruct BrainPy by merging brainpylib by @ztqakita in https://github.com/brainpy/BrainPy/pull/351 +* Intergate brainpylib operators into brainpy by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/352 +* fix `brainpylib` call bug by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/354 +* Enable memory-efficient ``DSRunner`` by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/355 +* fix `Array` transform bug by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/356 + + +**Full Changelog**: https://github.com/brainpy/BrainPy/compare/V2.3.7...V2.3.8 + + + +### Version 2.3.7 + +- Fix bugs on population models in ``brainpy.rate`` module +- Fix bug on ``brainpy.LoopOverTime`` +- Add more synaptic models including DualExpoenetial model and Alpha model in ``brainpy.experimental`` module +- Support call a module through right shift, such as ``data >> module1 >> module2`` + + +### Version 2.3.6 + +This release continues to add support for brain-inspired computation. + + +#### New Features + +##### More flexible customization of surrogate gradient functions. + +- brainpy.math.surrogate.Sigmoid +- brainpy.math.surrogate.PiecewiseQuadratic +- brainpy.math.surrogate.PiecewiseExp +- brainpy.math.surrogate.SoftSign +- brainpy.math.surrogate.Arctan +- brainpy.math.surrogate.NonzeroSignLog +- brainpy.math.surrogate.ERF +- brainpy.math.surrogate.PiecewiseLeakyRelu +- brainpy.math.surrogate.SquarewaveFourierSeries +- brainpy.math.surrogate.S2NN +- brainpy.math.surrogate.QPseudoSpike +- brainpy.math.surrogate.LeakyRelu +- brainpy.math.surrogate.LogTailedRelu +- brainpy.math.surrogate.ReluGrad +- brainpy.math.surrogate.GaussianGrad +- brainpy.math.surrogate.InvSquareGrad +- brainpy.math.surrogate.MultiGaussianGrad +- brainpy.math.surrogate.SlayerGrad + +##### Fix bugs + +- ``brainpy.LoopOverTime`` + + + + + +### Version 2.3.5 + + + +This release continues to add support for brain-inspired computation. + + +#### New Features + + +##### 1. ``brainpy.share`` for sharing data across submodules + +In this release, we abstract the shared data as a ``brainpy.share`` object. + +This object together with ``brainpy.Delay`` we will introduce below constitutes the support that enables us to define SNN models like ANN ones. + + +##### 2. ``brainpy.Delay`` for delay processing + +``Delay`` is abstracted as a dynamical system, which can be updated/retrieved by users. + +```python +import brainpy as bp + +class EINet(bp.DynamicalSystemNS): + def __init__(self, scale=1.0, e_input=20., i_input=20., delay=None): + super().__init__() + + self.bg_exc = e_input + self.bg_inh = i_input + + # network size + num_exc = int(3200 * scale) + num_inh = int(800 * scale) + + # neurons + pars = dict(V_rest=-60., V_th=-50., V_reset=-60., tau=20., tau_ref=5., + V_initializer=bp.init.Normal(-55., 2.), input_var=False) + self.E = bp.neurons.LIF(num_exc, **pars) + self.I = bp.neurons.LIF(num_inh, **pars) + + # synapses + we = 0.6 / scale # excitatory synaptic weight (voltage) + wi = 6.7 / scale # inhibitory synaptic weight + self.E2E = bp.experimental.Exponential( + bp.conn.FixedProb(0.02, pre=self.E.size, post=self.E.size), + g_max=we, tau=5., out=bp.experimental.COBA(E=0.) + ) + self.E2I = bp.experimental.Exponential( + bp.conn.FixedProb(0.02, pre=self.E.size, post=self.I.size, ), + g_max=we, tau=5., out=bp.experimental.COBA(E=0.) + ) + self.I2E = bp.experimental.Exponential( + bp.conn.FixedProb(0.02, pre=self.I.size, post=self.E.size), + g_max=wi, tau=10., out=bp.experimental.COBA(E=-80.) + ) + self.I2I = bp.experimental.Exponential( + bp.conn.FixedProb(0.02, pre=self.I.size, post=self.I.size), + g_max=wi, tau=10., out=bp.experimental.COBA(E=-80.) + ) + self.delayE = bp.Delay(self.E.spike, entries={'E': delay}) + self.delayI = bp.Delay(self.I.spike, entries={'I': delay}) + + def update(self): + e_spike = self.delayE.at('E') + i_spike = self.delayI.at('I') + e_inp = self.E2E(e_spike, self.E.V) + self.I2E(i_spike, self.E.V) + self.bg_exc + i_inp = self.I2I(i_spike, self.I.V) + self.E2I(e_spike, self.I.V) + self.bg_inh + self.delayE(self.E(e_inp)) + self.delayI(self.I(i_inp)) + +``` + + + +##### 3. ``brainpy.checkpoints.save_pytree`` and ``brainpy.checkpoints.load_pytree`` for saving/loading target from the filename + +Now we can directly use ``brainpy.checkpoints.save_pytree`` to save a network state into the file path we specified. + +Similarly, we can use ``brainpy.checkpoints.load_pytree`` to load states from the given file path. + + +##### 4. More ANN layers + + +- brainpy.layers.ConvTranspose1d +- brainpy.layers.ConvTranspose2d +- brainpy.layers.ConvTranspose3d +- brainpy.layers.Conv1dLSTMCell +- brainpy.layers.Conv2dLSTMCell +- brainpy.layers.Conv3dLSTMCell + + +##### 5. More compatible dense operators + +PyTorch operators: + +- brainpy.math.Tensor +- brainpy.math.flatten +- brainpy.math.cat +- brainpy.math.abs +- brainpy.math.absolute +- brainpy.math.acos +- brainpy.math.arccos +- brainpy.math.acosh +- brainpy.math.arccosh +- brainpy.math.add +- brainpy.math.addcdiv +- brainpy.math.addcmul +- brainpy.math.angle +- brainpy.math.asin +- brainpy.math.arcsin +- brainpy.math.asinh +- brainpy.math.arcsin +- brainpy.math.atan +- brainpy.math.arctan +- brainpy.math.atan2 +- brainpy.math.atanh + + +TensorFlow operators: + +- brainpy.math.concat +- brainpy.math.reduce_sum +- brainpy.math.reduce_max +- brainpy.math.reduce_min +- brainpy.math.reduce_mean +- brainpy.math.reduce_all +- brainpy.math.reduce_any +- brainpy.math.reduce_logsumexp +- brainpy.math.reduce_prod +- brainpy.math.reduce_std +- brainpy.math.reduce_variance +- brainpy.math.reduce_euclidean_norm +- brainpy.math.unsorted_segment_sqrt_n +- brainpy.math.segment_mean +- brainpy.math.unsorted_segment_sum +- brainpy.math.unsorted_segment_prod +- brainpy.math.unsorted_segment_max +- brainpy.math.unsorted_segment_min +- brainpy.math.unsorted_segment_mean +- brainpy.math.segment_sum +- brainpy.math.segment_prod +- brainpy.math.segment_max +- brainpy.math.segment_min +- brainpy.math.clip_by_value +- brainpy.math.cast + + +##### Others + +- Remove the hard requirements of ``brainpylib`` and ``numba``. + + + + +### Version 2.3.4 + + +This release mainly focuses on the compatibility with other frameworks: + +1. Fix Jax import error when `jax>=0.4.2` +2. Backward compatibility of `brainpy.dyn` module +3. Start to implement and be compatible with operators in pytorch and tensorflow, so that user's pytorch/tensorflow models can be easily migrated to brainpy + + +**Full Changelog**: https://github.com/brainpy/BrainPy/compare/V2.3.3...V2.3.4 + + + + +### Version 2.3.3 + + +Improve backward compatibility: + +- monitors and inputs in ``DSRunner`` +- models in ``brainpy.dyn`` +- constants and function in ``brainpy.analysis`` + + +### Version 2.3.2 + +This release (under the branch of ``brainpy=2.3.x``) continues to add support for brain-inspired computation. + + +#### New Features + + +##### 1. New package structure for stable API release + +Unstable APIs are all hosted in ``brainpy._src`` module. +Other APIs are stable and will be maintained for a long time. + + +##### 2. New schedulers + +- `brainpy.optim.CosineAnnealingWarmRestarts` +- `brainpy.optim.CosineAnnealingLR` +- `brainpy.optim.ExponentialLR` +- `brainpy.optim.MultiStepLR` +- `brainpy.optim.StepLR` + + +##### 3. Others + +- support `static_argnums` in `brainpy.math.jit` +- fix bugs of `reset_state()` and `clear_input()` in `brainpy.channels` +- fix jit error checking + + + + + + +### Version 2.3.1 + +This release (under the release branch of ``brainpy=2.3.x``) continues to add supports for brain-inspired computation. + + + +```python +import brainpy as bp +import brainpy.math as bm +``` + + + +#### Backwards Incompatible Changes + + + +###### 1. Error: module 'brainpy' has no attribute 'datasets' + +``brainpy.datasets`` module is now published as an independent package ``brainpy_datasets``. + +Please change your dataset access from + +```python +bp.datasets.xxxxx +``` + +to + +```python +import brainpy_datasets as bp_data + +bp_data.chaos.XXX +bp_data.vision.XXX +``` + +For a chaotic data series, + +```python +# old version +data = bp.datasets.double_scroll_series(t_warmup + t_train + t_test, dt=dt) +x_var = data['x'] +y_var = data['y'] +z_var = data['z'] + +# new version +data = bd.chaos.DoubleScrollEq(t_warmup + t_train + t_test, dt=dt) +x_var = data.xs +y_var = data.ys +z_var = data.zs +``` + +For a vision dataset, + +```python +# old version +dataset = bp.datasets.FashionMNIST(root, train=True, download=True) + +# new version +dataset = bd.vision.FashionMNIST(root, split='train', download=True) +``` + + + +###### 2. Error: DSTrainer must receive an instance with BatchingMode + +This error will happen when using ``brainpy.OnlineTrainer`` , ``brainpy.OfflineTrainer``, ``brainpy.BPTT`` , ``brainpy.BPFF``. + +From version 2.3.1, BrainPy explicitly consider the computing mode of each model. For trainers, all training target should be a model with ``BatchingMode`` or ``TrainingMode``. + +If you are training model with ``OnlineTrainer`` or ``OfflineTrainer``, + +```python +# old version +class NGRC(bp.DynamicalSystem): + def __init__(self, num_in): + super(NGRC, self).__init__() + self.r = bp.layers.NVAR(num_in, delay=2, order=3) + self.di = bp.layers.Dense(self.r.num_out, num_in) + + def update(self, sha, x): + di = self.di(sha, self.r(sha, x)) + return x + di + + +# new version +bm.set_enviroment(mode=bm.batching_mode) + +class NGRC(bp.DynamicalSystem): + def __init__(self, num_in): + super(NGRC, self).__init__() + self.r = bp.layers.NVAR(num_in, delay=2, order=3) + self.di = bp.layers.Dense(self.r.num_out, num_in, mode=bm.training_mode) + + def update(self, sha, x): + di = self.di(sha, self.r(sha, x)) + return x + di +``` + + If you are training models with ``BPTrainer``, adding the following line at the top of the script, + +```python +bm.set_enviroment(mode=bm.training_mode) +``` + + + +###### 3. Error: inputs_are_batching is no longer supported. + +This is because if the training target is in ``batching`` mode, this has already indicated that the inputs should be batching. + +Simple remove the ``inputs_are_batching`` from your functional call of ``.predict()`` will solve the issue. + + + + + +#### New Features + + + +##### 1. ``brainpy.math`` module upgrade + +###### ``brainpy.math.surrogate`` module for surrogate gradient functions. + +Currently, we support + +- `brainpy.math.surrogate.arctan` +- `brainpy.math.surrogate.erf` +- `brainpy.math.surrogate.gaussian_grad` +- `brainpy.math.surrogate.inv_square_grad` +- `brainpy.math.surrogate.leaky_relu` +- `brainpy.math.surrogate.log_tailed_relu` +- `brainpy.math.surrogate.multi_gaussian_grad` +- `brainpy.math.surrogate.nonzero_sign_log` +- `brainpy.math.surrogate.one_input` +- `brainpy.math.surrogate.piecewise_exp` +- `brainpy.math.surrogate.piecewise_leaky_relu` +- `brainpy.math.surrogate.piecewise_quadratic` +- `brainpy.math.surrogate.q_pseudo_spike` +- `brainpy.math.surrogate.relu_grad` +- `brainpy.math.surrogate.s2nn` +- `brainpy.math.surrogate.sigmoid` +- `brainpy.math.surrogate.slayer_grad` +- `brainpy.math.surrogate.soft_sign` +- `brainpy.math.surrogate.squarewave_fourier_series` + + + +###### New transformation function ``brainpy.math.to_dynsys`` + +New transformation function ``brainpy.math.to_dynsys`` supports to transform a pure Python function into a ``DynamicalSystem``. This will be useful when running a `DynamicalSystem` with arbitrary customized inputs. + +```python +import brainpy.math as bm + +hh = bp.neurons.HH(1) + +@bm.to_dynsys(child_objs=hh) +def run_hh(tdi, x=None): + if x is not None: + hh.input += x + +runner = bp.DSRunner(run_hhh, monitors={'v': hh.V}) +runner.run(inputs=bm.random.uniform(3, 6, 1000)) +``` + + + +###### Default data types + +Default data types `brainpy.math.int_`, `brainpy.math.float_` and `brainpy.math.complex_` are initialized according to the default `x64` settings. Then, these data types can be set or get by `brainpy.math.set_*` or `brainpy.math.get_*` syntaxes. + +Take default integer type ``int_`` as an example, + +```python +# set the default integer type +bm.set_int_(jax.numpy.int64) + +# get the default integer type +a1 = bm.asarray([1], dtype=bm.int_) +a2 = bm.asarray([1], dtype=bm.get_int()) # equivalent +``` + +Default data types are changed according to the `x64` setting of JAX. For instance, + +```python +bm.enable_x64() +assert bm.int_ == jax.numpy.int64 +bm.disable_x64() +assert bm.int_ == jax.numpy.int32 +``` + +``brainpy.math.float_`` and ``brainpy.math.complex_`` behaves similarly with ``brainpy.math.int_``. + + + +###### Environment context manager + +This release introduces a new concept ``computing environment`` in BrainPy. Computing environment is a default setting for current computation jobs, including the default data type (``int_``, ``float_``, ``complex_``), the default numerical integration precision (``dt``), the default computing mode (``mode``). All models, arrays, and computations using the default setting will be carried out under the environment setting. + +Users can set a default environment through + +```python +brainpy.math.set_environment(mode, dt, x64) +``` + +However, ones can also construct models or perform computation through a temporal environment context manager, this can be implemented through: + +```python +# constructing a HH model with dt=0.1 and x64 precision +with bm.environment(mode, dt=0.1, x64=True): + hh1 = bp.neurons.HH(1) + +# constructing a HH model with dt=0.05 and x32 precision +with bm.environment(mode, dt=0.05, x64=False): + hh2 = bp.neuron.HH(1) +``` + +Usually, users construct models for either brain-inspired computing (``training mode``) or brain simulation (``nonbatching mode``), therefore, there are shortcut context manager for setting a training environment or batching environment: + +```python +with bm.training_environment(dt, x64): + pass + +with bm.batching_environment(dt, x64): + pass +``` + + + +##### 2. ``brainpy.dyn`` module + + + +###### ``brainpy.dyn.transfom`` module for transforming a ``DynamicalSystem`` instance to a callable ``BrainPyObject``. + +Specifically, we provide + +- `LoopOverTime` for unrolling a dynamical system over time. +- `NoSharedArg` for removing the dependency of shared arguments. + + + + + +##### 3. Running supports in BrainPy + + + +###### All ``brainpy.Runner`` now are subclasses of ``BrainPyObject`` + +This means that all ``brainpy.Runner`` can be used as a part of the high-level program or transformation. + + + +###### Enable the continuous running of a differential equation (ODE, SDE, FDE, DDE, etc.) with `IntegratorRunner`. + +For example, + +```python +import brainpy as bp + +# differential equation +a, b, tau = 0.7, 0.8, 12.5 +dV = lambda V, t, w, Iext: V - V * V * V / 3 - w + Iext +dw = lambda w, t, V: (V + a - b * w) / tau +fhn = bp.odeint(bp.JointEq([dV, dw]), method='rk4', dt=0.1) + +# differential integrator runner +runner = bp.IntegratorRunner(fhn, monitors=['V', 'w'], inits=[1., 1.]) + +# run 1 +Iext, duration = bp.inputs.section_input([0., 1., 0.5], [200, 200, 200], return_length=True) +runner.run(duration, dyn_args=dict(Iext=Iext)) +bp.visualize.line_plot(runner.mon.ts, runner.mon['V'], legend='V') + +# run 2 +Iext, duration = bp.inputs.section_input([0.5], [200], return_length=True) +runner.run(duration, dyn_args=dict(Iext=Iext)) +bp.visualize.line_plot(runner.mon.ts, runner.mon['V'], legend='V-run2', show=True) + +``` + + + +###### Enable call a customized function during fitting of ``brainpy.BPTrainer``. + +This customized function (provided through ``fun_after_report``) will be useful to save a checkpoint during the training. For instance, + +```python +class CheckPoint: + def __init__(self, path='path/to/directory/'): + self.max_acc = 0. + self.path = path + + def __call__(self, idx, metrics, phase): + if phase == 'test' and metrics['acc'] > self.max_acc: + self.max_acc = matrics['acc'] + bp.checkpoints.save(self.path, net.state_dict(), idx) + +trainer = bp.BPTT() +trainer.fit(..., fun_after_report=CheckPoint()) +``` + + + +###### Enable data with ``data_first_axis`` format when predicting or fitting in a ``brainpy.DSRunner`` and ``brainpy.DSTrainer``. + +Previous version of BrainPy only supports data with the batch dimension at the first axis. Currently, ``brainpy.DSRunner`` and ``brainpy.DSTrainer`` can support the data with the time dimension at the first axis. This can be set through ``data_first_axis='T'`` when initializing a runner or trainer. + +```python +runner = bp.DSRunner(..., data_first_axis='T') +trainer = bp.DSTrainer(..., data_first_axis='T') +``` + + + +##### 4. Utility in BrainPy + + + +###### ``brainpy.encoding`` module for encoding rate values into spike trains + + Currently, we support + +- `brainpy.encoding.LatencyEncoder` +- `brainpy.encoding.PoissonEncoder` +- `brainpy.encoding.WeightedPhaseEncoder` + + + +###### ``brainpy.checkpoints`` module for model state serialization. + +This version of BrainPy supports to save a checkpoint of the model into the physical disk. Inspired from the Flax API, we provide the following checkpoint APIs: + +- ``brainpy.checkpoints.save()`` for saving a checkpoint of the model. +- ``brainpy.checkpoints.multiprocess_save()`` for saving a checkpoint of the model in multi-process environment. +- ``brainpy.checkpoints.load()`` for loading the last or best checkpoint from the given checkpoint path. +- ``brainpy.checkpoints.load_latest()`` for retrieval the path of the latest checkpoint in a directory. + + + + + +#### Deprecations + + + +##### 1. Deprecations in the running supports of BrainPy + +###### ``func_monitors`` is no longer supported in all ``brainpy.Runner`` subclasses. + +We will remove its supports since version 2.4.0. Instead, monitoring with a dict of callable functions can be set in ``monitors``. For example, + + + ```python + # old version + + runner = bp.DSRunner(model, + monitors={'sps': model.spike, 'vs': model.V}, + func_monitors={'sp10': model.spike[10]}) + ``` + + ```python + # new version + runner = bp.DSRunner(model, + monitors={'sps': model.spike, + 'vs': model.V, + 'sp10': model.spike[10]}) + ``` + + + +###### ``func_inputs`` is no longer supported in all ``brainpy.Runner`` subclasses. + + Instead, giving inputs with a callable function should be done with ``inputs``. + +```python +# old version + +net = EINet() + +def f_input(tdi): + net.E.input += 10. + +runner = bp.DSRunner(net, fun_inputs=f_input, inputs=('I.input', 10.)) +``` + +```python +# new version + +def f_input(tdi): + net.E.input += 10. + net.I.input += 10. +runner = bp.DSRunner(net, inputs=f_input) +``` + + + +###### ``inputs_are_batching`` is deprecated. + +``inputs_are_batching`` is deprecated in ``predict()``/``.run()`` of all ``brainpy.Runner`` subclasses. + + + +###### ``args`` and ``dyn_args`` are now deprecated in ``IntegratorRunner``. + +Instead, users should specify ``args`` and ``dyn_args`` when using ``IntegratorRunner.run()`` function. + +```python +dV = lambda V, t, w, I: V - V * V * V / 3 - w + I +dw = lambda w, t, V, a, b: (V + a - b * w) / 12.5 +integral = bp.odeint(bp.JointEq([dV, dw]), method='exp_auto') + +# old version +runner = bp.IntegratorRunner( + integral, + monitors=['V', 'w'], + inits={'V': bm.random.rand(10), 'w': bm.random.normal(size=10)}, + args={'a': 1., 'b': 1.}, # CHANGE + dyn_args={'I': bp.inputs.ramp_input(0, 4, 100)}, # CHANGE +) +runner.run(100.,) + +``` + +```python +# new version +runner = bp.IntegratorRunner( + integral, + monitors=['V', 'w'], + inits={'V': bm.random.rand(10), 'w': bm.random.normal(size=10)}, +) +runner.run(100., + args={'a': 1., 'b': 1.}, + dyn_args={'I': bp.inputs.ramp_input(0, 4, 100)}) +``` + + + +##### 2. Deprecations in ``brainpy.math`` module + +###### `ditype()` and `dftype()` are deprecated. + +`brainpy.math.ditype()` and `brainpy.math.dftype()` are deprecated. Using `brainpy.math.int_` and `brainpy.math.float()` instead. + + + +###### ``brainpy.modes`` module is now moved into ``brainpy.math`` + +The correspondences are listed as the follows: + +- ``brainpy.modes.Mode`` => ``brainpy.math.Mode`` +- ``brainpy.modes.NormalMode `` => ``brainpy.math.NonBatchingMode`` +- ``brainpy.modes.BatchingMode `` => ``brainpy.math.BatchingMode`` +- ``brainpy.modes.TrainingMode `` => ``brainpy.math.TrainingMode`` +- ``brainpy.modes.normal `` => ``brainpy.math.nonbatching_mode`` +- ``brainpy.modes.batching `` => ``brainpy.math.batching_mode`` +- ``brainpy.modes.training `` => ``brainpy.math.training_mode`` + + + + + + +### Version 2.3.0 + +This branch of releases aims to provide a unified computing framework for brain simulation and brain-inspired computing. + +#### New features + +1. ``brainpy.BPTT`` supports `train_data` and `test_data` with general Python iterators. For instance, one can train a model with PyTorch dataloader or TensorFlow datasets. + +```python +import torchvision +from torch.utils.data import DataLoader +data = torchvision.datasets.CIFAR10("./CIFAR10", train=False, transform=torchvision.transforms.ToTensor()) +loader = DataLoader(dataset=data, batch_size=4, shuffle=True, num_workers=0, drop_last=False) + +# any generator can be used for train_data or test_data +trainer = bp.BPTT() +trainer.fit(loader) +``` + +2. Consolidated object-oriented transformation in ``brainpy.math.object_transform`` module. All brainpy transformations generate a new ``BrainPyObject`` instance so that objects in brainpy can be composed hierarchically. ``brainpy.math.to_object()`` transformation transforms a pure Python function into a ``BrainPyObject``. + +3. New [documentation](https://brainpy.readthedocs.io/en/latest/tutorial_math/brainpy_transform_concept.html) is currently online for introducing the consolidated BrainPy concept of object-oriented transformation. + +4. Change ``brainpy.math.JaxArray`` to ``brainpy.math.Array``. + + + + +#### Deprecations + +1. ``brainpy.datasets`` module is no longer supported. New APIs will be moved into [``brainpy-datasets`` package](https://github.com/brainpy/datasets). +2. ``brainpy.train.BPTT`` no longer support to receive the train data `[X, Y]`. Instead, users should provide a data generator such like ``pytorch`` dataset or ``tensorflow`` dataset. +4. The update function of ``brainpy.math.TimeDealy`` does not support receiving a `time` index. Instead, one can update the new data by directly using ``TimeDealy.update(data)`` instead of `TimeDealy.update(time, data)`. +5. Fix the monitoring error of delay differential equations with ``brainpy.integrators.IntegratorRunner``. + +#### Bug Fixes + +1. Fix the bug on ``One2One`` connection. +2. Fix the bug in ``eprop`` example. +3. Fix `ij2csr` transformation error. +4. Fix test bugs + +#### What's Changed +* fix eprop example error by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/305 +* minor updates on API and DOC by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/306 +* Add new optimizers by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/307 +* add documentation of for random number generation by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/308 +* consolidate the concept of OO transformation by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/309 +* Upgrade documetations by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/310 +* Ready for publish by @chaoming0625 in https://github.com/brainpy/BrainPy/pull/311 + + +**Full Changelog**: https://github.com/brainpy/BrainPy/compare/V2.2.4.0...V2.3.0 + + +## brainpy 2.2.x + +BrainPy 2.2.x is a complete re-design of the framework, tackling the +shortcomings of brainpy 2.1.x generation, effectively bringing it to +research needs and standards. + + + + +### Version 2.2.4 + +This release has updated many functionalities and fixed several bugs in BrainPy. + +#### New Features + +1. More ANN layers, including ``brainpy.layers.Flatten`` and ``brainpy.layers.Activation``. +2. Optimized connection building for ``brainpy.connect`` module. +3. cifar dataset. +4. Enhanced API and Doc for parallel simulations via ``brainpy.running.cpu_ordered_parallel``, ``brainpy.running.cpu_unordered_parallel``, ``brainpy.running.jax_vectorize_map`` and ``brainpy.running.jax_parallelize_map``. + + +#### What's Changed +* add Activation and Flatten class by @LuckyHFC in https://github.com/PKU-NIP-Lab/BrainPy/pull/291 +* optimizes the connect time when using gpu by @MamieZhu in https://github.com/PKU-NIP-Lab/BrainPy/pull/293 +* datasets::vision: add cifar dataset by @hbelove in https://github.com/PKU-NIP-Lab/BrainPy/pull/292 +* fix #294: remove VariableView in `dyn_vars` of a runner by @chaoming0625 in https://github.com/PKU-NIP-Lab/BrainPy/pull/295 +* update issue template by @chaoming0625 in https://github.com/PKU-NIP-Lab/BrainPy/pull/296 +* add multiprocessing functions for batch running of BrainPy functions by @chaoming0625 in https://github.com/PKU-NIP-Lab/BrainPy/pull/298 +* upgrade connection apis by @chaoming0625 in https://github.com/PKU-NIP-Lab/BrainPy/pull/299 +* fix #300: update parallelization api documentation by @chaoming0625 in https://github.com/PKU-NIP-Lab/BrainPy/pull/302 +* update doc by @chaoming0625 in https://github.com/PKU-NIP-Lab/BrainPy/pull/303 + +#### New Contributors +* @LuckyHFC made their first contribution in https://github.com/PKU-NIP-Lab/BrainPy/pull/291 +* @MamieZhu made their first contribution in https://github.com/PKU-NIP-Lab/BrainPy/pull/293 +* @hbelove made their first contribution in https://github.com/PKU-NIP-Lab/BrainPy/pull/292 + +**Full Changelog**: https://github.com/PKU-NIP-Lab/BrainPy/compare/V2.2.3.6...V2.2.4 + + + + +### Version 2.2.1 (2022.09.09) + +This release fixes bugs found in the codebase and improves the usability +and functions of BrainPy. + +#### Bug fixes + +1. Fix the bug of operator customization in `brainpy.math.XLACustomOp` + and `brainpy.math.register_op`. Now, it supports operator + customization by using NumPy and Numba interface. For instance, + +``` python +import brainpy.math as bm + +def abs_eval(events, indices, indptr, post_val, values): + return post_val + +def con_compute(outs, ins): + post_val = outs + events, indices, indptr, _, values = ins + for i in range(events.size): + if events[i]: + for j in range(indptr[i], indptr[i + 1]): + index = indices[j] + old_value = post_val[index] + post_val[index] = values + old_value + +event_sum = bm.XLACustomOp(eval_shape=abs_eval, con_compute=con_compute) +``` + +1. Fix the bug of `brainpy.tools.DotDict`. Now, it is compatible with + the transformations of JAX. For instance, + +``` python +import brainpy as bp +from jax import vmap + +@vmap +def multiple_run(I): + hh = bp.neurons.HH(1) + runner = bp.dyn.DSRunner(hh, inputs=('input', I), numpy_mon_after_run=False) + runner.run(100.) + return runner.mon + +mon = multiple_run(bp.math.arange(2, 10, 2)) +``` + +#### New features + +1. Add numpy operators `brainpy.math.mat`, `brainpy.math.matrix`, + `brainpy.math.asmatrix`. +2. Improve translation rules of brainpylib operators, improve its + running speeds. +3. Support `DSView` of `DynamicalSystem` instance. Now, it supports + defining models with a slice view of a DS instance. For example, + +``` python +import brainpy as bp +import brainpy.math as bm + + +class EINet_V2(bp.dyn.Network): + def __init__(self, scale=1.0, method='exp_auto'): + super(EINet_V2, self).__init__() + + # network size + num_exc = int(3200 * scale) + num_inh = int(800 * scale) + + # neurons + self.N = bp.neurons.LIF(num_exc + num_inh, + V_rest=-60., V_th=-50., V_reset=-60., tau=20., tau_ref=5., + method=method, V_initializer=bp.initialize.Normal(-55., 2.)) + + # synapses + we = 0.6 / scale # excitatory synaptic weight (voltage) + wi = 6.7 / scale # inhibitory synaptic weight + self.Esyn = bp.synapses.Exponential(pre=self.N[:num_exc], post=self.N, + conn=bp.connect.FixedProb(0.02), + g_max=we, tau=5., + output=bp.synouts.COBA(E=0.), + method=method) + self.Isyn = bp.synapses.Exponential(pre=self.N[num_exc:], post=self.N, + conn=bp.connect.FixedProb(0.02), + g_max=wi, tau=10., + output=bp.synouts.COBA(E=-80.), + method=method) + +net = EINet_V2(scale=1., method='exp_auto') +# simulation +runner = bp.dyn.DSRunner( + net, + monitors={'spikes': net.N.spike}, + inputs=[(net.N.input, 20.)] + ) +runner.run(100.) + +# visualization +bp.visualize.raster_plot(runner.mon.ts, runner.mon['spikes'], show=True) +``` + +### Version 2.2.0 (2022.08.12) + +This release has provided important improvements for BrainPy, including +usability, speed, functions, and others. + +#### Backwards Incompatible changes + +1. `brainpy.nn` module is no longer supported and has been removed + since version 2.2.0. Instead, users should use `brainpy.train` + module for the training of BP algorithms, online learning, or + offline learning algorithms, and `brainpy.algorithms` module for + online / offline training algorithms. +2. The `update()` function for the model definition has been changed: + +``` +>>> # 2.1.x +>>> +>>> import brainpy as bp +>>> +>>> class SomeModel(bp.dyn.DynamicalSystem): +>>> def __init__(self, ): +>>> ...... +>>> def update(self, t, dt): +>>> pass +>>> # 2.2.x +>>> +>>> import brainpy as bp +>>> +>>> class SomeModel(bp.dyn.DynamicalSystem): +>>> def __init__(self, ): +>>> ...... +>>> def update(self, tdi): +>>> t, dt = tdi.t, tdi.dt +>>> pass +``` + +where `tdi` can be defined with other names, like `sha`, to represent +the shared argument across modules. + +#### Deprecations + +1. `brainpy.dyn.xxx (neurons)` and `brainpy.dyn.xxx (synapse)` are no + longer supported. Please use `brainpy.neurons`, `brainpy.synapses` + modules. +2. `brainpy.running.monitor` has been removed. +3. `brainpy.nn` module has been removed. + +#### New features + +1. `brainpy.math.Variable` receives a `batch_axis` setting to represent + the batch axis of the data. + +``` +>>> import brainpy.math as bm +>>> a = bm.Variable(bm.zeros((1, 4, 5)), batch_axis=0) +>>> a.value = bm.zeros((2, 4, 5)) # success +>>> a.value = bm.zeros((1, 2, 5)) # failed +MathError: The shape of the original data is (2, 4, 5), while we got (1, 2, 5) with batch_axis=0. +``` + +2. `brainpy.train` provides `brainpy.train.BPTT` for back-propagation + algorithms, `brainpy.train.Onlinetrainer` for online training + algorithms, `brainpy.train.OfflineTrainer` for offline training + algorithms. +3. `brainpy.Base` class supports `_excluded_vars` setting to ignore + variables when retrieving variables by using `Base.vars()` method. + +``` +>>> class OurModel(bp.Base): +>>> _excluded_vars = ('a', 'b') +>>> def __init__(self): +>>> super(OurModel, self).__init__() +>>> self.a = bm.Variable(bm.zeros(10)) +>>> self.b = bm.Variable(bm.ones(20)) +>>> self.c = bm.Variable(bm.random.random(10)) +>>> +>>> model = OurModel() +>>> model.vars().keys() +dict_keys(['OurModel0.c']) +``` + +4. `brainpy.analysis.SlowPointFinder` supports directly analyzing an + instance of `brainpy.dyn.DynamicalSystem`. + +``` +>>> hh = bp.neurons.HH(1) +>>> finder = bp.analysis.SlowPointFinder(hh, target_vars={'V': hh.V, 'm': hh.m, 'h': hh.h, 'n': hh.n}) +``` + +5. `brainpy.datasets` supports MNIST, FashionMNIST, and other datasets. +6. Supports defining conductance-based neuron models\`\`. + +``` +>>> class HH(bp.dyn.CondNeuGroup): +>>> def __init__(self, size): +>>> super(HH, self).__init__(size) +>>> +>>> self.INa = channels.INa_HH1952(size, ) +>>> self.IK = channels.IK_HH1952(size, ) +>>> self.IL = channels.IL(size, E=-54.387, g_max=0.03) +``` + +7. `brainpy.layers` module provides commonly used models for DNN and + reservoir computing. +8. Support composable definition of synaptic models by using + `TwoEndConn`, `SynOut`, `SynSTP` and `SynLTP`. + +``` +>>> bp.synapses.Exponential(self.E, self.E, bp.conn.FixedProb(prob), +>>> g_max=0.03 / scale, tau=5, +>>> output=bp.synouts.COBA(E=0.), +>>> stp=bp.synplast.STD()) +``` + +9. Provide commonly used surrogate gradient function for spiking + generation, including + - `brainpy.math.spike_with_sigmoid_grad` + - `brainpy.math.spike_with_linear_grad` + - `brainpy.math.spike_with_gaussian_grad` + - `brainpy.math.spike_with_mg_grad` +10. Provide shortcuts for GPU memory management via + `brainpy.math.disable_gpu_memory_preallocation()`, and + `brainpy.math.clear_buffer_memory()`. + +#### What\'s Changed + +- fix [#207](https://github.com/PKU-NIP-Lab/BrainPy/issues/207): + synapses update first, then neurons, finally delay variables by + [\@chaoming0625](https://github.com/chaoming0625) in + [#219](https://github.com/PKU-NIP-Lab/BrainPy/pull/219) +- docs: add logos by [\@ztqakita](https://github.com/ztqakita) in + [#218](https://github.com/PKU-NIP-Lab/BrainPy/pull/218) +- Add the biological NMDA model by + [\@c-xy17](https://github.com/c-xy17) in + [#221](https://github.com/PKU-NIP-Lab/BrainPy/pull/221) +- docs: fix mathjax problem by + [\@ztqakita](https://github.com/ztqakita) in + [#222](https://github.com/PKU-NIP-Lab/BrainPy/pull/222) +- Add the parameter R to the LIF model by + [\@c-xy17](https://github.com/c-xy17) in + [#224](https://github.com/PKU-NIP-Lab/BrainPy/pull/224) +- new version of brainpy: V2.2.0-rc1 by + [\@chaoming0625](https://github.com/chaoming0625) in + [#226](https://github.com/PKU-NIP-Lab/BrainPy/pull/226) +- update training apis by + [\@chaoming0625](https://github.com/chaoming0625) in + [#227](https://github.com/PKU-NIP-Lab/BrainPy/pull/227) +- Update quickstart and the analysis module by + [\@c-xy17](https://github.com/c-xy17) in + [#229](https://github.com/PKU-NIP-Lab/BrainPy/pull/229) +- Eseential updates for montors, analysis, losses, and examples by + [\@chaoming0625](https://github.com/chaoming0625) in + [#230](https://github.com/PKU-NIP-Lab/BrainPy/pull/230) +- add numpy op tests by [\@ztqakita](https://github.com/ztqakita) in + [#231](https://github.com/PKU-NIP-Lab/BrainPy/pull/231) +- Integrated simulation, simulaton and analysis by + [\@chaoming0625](https://github.com/chaoming0625) in + [#232](https://github.com/PKU-NIP-Lab/BrainPy/pull/232) +- update docs by [\@chaoming0625](https://github.com/chaoming0625) in + [#233](https://github.com/PKU-NIP-Lab/BrainPy/pull/233) +- unify `brainpy.layers` with other modules in `brainpy.dyn` by + [\@chaoming0625](https://github.com/chaoming0625) in + [#234](https://github.com/PKU-NIP-Lab/BrainPy/pull/234) +- fix bugs by [\@chaoming0625](https://github.com/chaoming0625) in + [#235](https://github.com/PKU-NIP-Lab/BrainPy/pull/235) +- update apis, docs, examples and others by + [\@chaoming0625](https://github.com/chaoming0625) in + [#236](https://github.com/PKU-NIP-Lab/BrainPy/pull/236) +- fixes by [\@chaoming0625](https://github.com/chaoming0625) in + [#237](https://github.com/PKU-NIP-Lab/BrainPy/pull/237) +- fix: add dtype promotion = standard by + [\@ztqakita](https://github.com/ztqakita) in + [#239](https://github.com/PKU-NIP-Lab/BrainPy/pull/239) +- updates by [\@chaoming0625](https://github.com/chaoming0625) in + [#240](https://github.com/PKU-NIP-Lab/BrainPy/pull/240) +- update training docs by + [\@chaoming0625](https://github.com/chaoming0625) in + [#241](https://github.com/PKU-NIP-Lab/BrainPy/pull/241) +- change doc path/organization by + [\@chaoming0625](https://github.com/chaoming0625) in + [#242](https://github.com/PKU-NIP-Lab/BrainPy/pull/242) +- Update advanced docs by + [\@chaoming0625](https://github.com/chaoming0625) in + [#243](https://github.com/PKU-NIP-Lab/BrainPy/pull/243) +- update quickstart docs & enable jit error checking by + [\@chaoming0625](https://github.com/chaoming0625) in + [#244](https://github.com/PKU-NIP-Lab/BrainPy/pull/244) +- update apis and examples by + [\@chaoming0625](https://github.com/chaoming0625) in + [#245](https://github.com/PKU-NIP-Lab/BrainPy/pull/245) +- update apis and tests by + [\@chaoming0625](https://github.com/chaoming0625) in + [#246](https://github.com/PKU-NIP-Lab/BrainPy/pull/246) +- Docs update and bugs fixed by + [\@ztqakita](https://github.com/ztqakita) in + [#247](https://github.com/PKU-NIP-Lab/BrainPy/pull/247) +- version 2.2.0 by [\@chaoming0625](https://github.com/chaoming0625) + in [#248](https://github.com/PKU-NIP-Lab/BrainPy/pull/248) +- add norm and pooling & fix bugs in operators by + [\@ztqakita](https://github.com/ztqakita) in + [#249](https://github.com/PKU-NIP-Lab/BrainPy/pull/249) + +**Full Changelog**: +[V2.1.12\...V2.2.0](https://github.com/PKU-NIP-Lab/BrainPy/compare/V2.1.12...V2.2.0) + +## brainpy 2.1.x + +### Version 2.1.12 (2022.05.17) + +#### Highlights + +This release is excellent. We have made important improvements. + +1. We provide dozens of random sampling in NumPy which are not + supportted in JAX, such as `brainpy.math.random.bernoulli`, + `brainpy.math.random.lognormal`, `brainpy.math.random.binomial`, + `brainpy.math.random.chisquare`, `brainpy.math.random.dirichlet`, + `brainpy.math.random.geometric`, `brainpy.math.random.f`, + `brainpy.math.random.hypergeometric`, + `brainpy.math.random.logseries`, `brainpy.math.random.multinomial`, + `brainpy.math.random.multivariate_normal`, + `brainpy.math.random.negative_binomial`, + `brainpy.math.random.noncentral_chisquare`, + `brainpy.math.random.noncentral_f`, `brainpy.math.random.power`, + `brainpy.math.random.rayleigh`, `brainpy.math.random.triangular`, + `brainpy.math.random.vonmises`, `brainpy.math.random.wald`, + `brainpy.math.random.weibull` +2. make efficient checking on numerical values. Instead of direct + `id_tap()` checking which has large overhead, currently + `brainpy.tools.check_erro_in_jit()` is highly efficient. +3. Fix `JaxArray` operator errors on `None` +4. improve oo-to-function transformation speeds +5. `io` works: `.save_states()` and `.load_states()` + +#### What's Changed + +- support dtype setting in array interchange functions by + \[@chaoming0625\]() in + [#209](https://github.com/PKU-NIP-Lab/BrainPy/pull/209) +- fix [#144](https://github.com/PKU-NIP-Lab/BrainPy/issues/144): + operations on None raise errors by + \[@chaoming0625\]() in + [#210](https://github.com/PKU-NIP-Lab/BrainPy/pull/210) +- add tests and new functions for random sampling by + \[@c-xy17\]() in + [#213](https://github.com/PKU-NIP-Lab/BrainPy/pull/213) +- feat: fix `io` for brainpy.Base by + \[@chaoming0625\]() in + [#211](https://github.com/PKU-NIP-Lab/BrainPy/pull/211) +- update advanced tutorial documentation by + \[@chaoming0625\]() in + [#212](https://github.com/PKU-NIP-Lab/BrainPy/pull/212) +- fix [#149](https://github.com/PKU-NIP-Lab/BrainPy/issues/149) + (dozens of random samplings in NumPy) and fix JaxArray op errors by + \[@chaoming0625\]() in + [#216](https://github.com/PKU-NIP-Lab/BrainPy/pull/216) +- feat: efficient checking on numerical values by + \[@chaoming0625\]() in + [#217](https://github.com/PKU-NIP-Lab/BrainPy/pull/217) + +**Full Changelog**: +[V2.1.11\...V2.1.12](https://github.com/PKU-NIP-Lab/BrainPy/compare/V2.1.11...V2.1.12) + +### Version 2.1.11 (2022.05.15) + +#### What\'s Changed + +- fix: cross-correlation bug by + [\@ztqakita](https://github.com/ztqakita) in + [#201](https://github.com/PKU-NIP-Lab/BrainPy/pull/201) +- update apis, test and docs of numpy ops by + [\@chaoming0625](https://github.com/chaoming0625) in + [#202](https://github.com/PKU-NIP-Lab/BrainPy/pull/202) +- docs: add sphinx_book_theme by + [\@ztqakita](https://github.com/ztqakita) in + [#203](https://github.com/PKU-NIP-Lab/BrainPy/pull/203) +- fix: add requirements-doc.txt by + [\@ztqakita](https://github.com/ztqakita) in + [#204](https://github.com/PKU-NIP-Lab/BrainPy/pull/204) +- update control flow, integrators, operators, and docs by + [\@chaoming0625](https://github.com/chaoming0625) in + [#205](https://github.com/PKU-NIP-Lab/BrainPy/pull/205) +- improve oo-to-function transformation speed by + [\@chaoming0625](https://github.com/chaoming0625) in + [#208](https://github.com/PKU-NIP-Lab/BrainPy/pull/208) + +**Full Changelog**: +[V2.1.10\...V2.1.11](https://github.com/PKU-NIP-Lab/BrainPy/compare/V2.1.10...V2.1.11) + +### Version 2.1.10 (2022.05.05) + +#### What\'s Changed + +- update control flow APIs and Docs by + [\@chaoming0625](https://github.com/chaoming0625) in + [#192](https://github.com/PKU-NIP-Lab/BrainPy/pull/192) +- doc: update docs of dynamics simulation by + [\@chaoming0625](https://github.com/chaoming0625) in + [#193](https://github.com/PKU-NIP-Lab/BrainPy/pull/193) +- fix [#125](https://github.com/PKU-NIP-Lab/BrainPy/issues/125): add + channel models and two-compartment Pinsky-Rinzel model by + [\@chaoming0625](https://github.com/chaoming0625) in + [#194](https://github.com/PKU-NIP-Lab/BrainPy/pull/194) +- JIT errors do not change Variable values by + [\@chaoming0625](https://github.com/chaoming0625) in + [#195](https://github.com/PKU-NIP-Lab/BrainPy/pull/195) +- fix a bug in math.activations.py by + [\@c-xy17](https://github.com/c-xy17) in + [#196](https://github.com/PKU-NIP-Lab/BrainPy/pull/196) +- Functionalinaty improvements by + [\@chaoming0625](https://github.com/chaoming0625) in + [#197](https://github.com/PKU-NIP-Lab/BrainPy/pull/197) +- update rate docs by + [\@chaoming0625](https://github.com/chaoming0625) in + [#198](https://github.com/PKU-NIP-Lab/BrainPy/pull/198) +- update brainpy.dyn doc by + [\@chaoming0625](https://github.com/chaoming0625) in + [#199](https://github.com/PKU-NIP-Lab/BrainPy/pull/199) + +**Full Changelog**: +[V2.1.8\...V2.1.10](https://github.com/PKU-NIP-Lab/BrainPy/compare/V2.1.8...V2.1.10) + +### Version 2.1.8 (2022.04.26) + +#### What\'s Changed + +- Fix [#120](https://github.com/PKU-NIP-Lab/BrainPy/issues/120) by + [\@chaoming0625](https://github.com/chaoming0625) in + [#178](https://github.com/PKU-NIP-Lab/BrainPy/pull/178) +- feat: brainpy.Collector supports addition and subtraction by + [\@chaoming0625](https://github.com/chaoming0625) in + [#179](https://github.com/PKU-NIP-Lab/BrainPy/pull/179) +- feat: delay variables support \"indices\" and \"reset()\" function + by [\@chaoming0625](https://github.com/chaoming0625) in + [#180](https://github.com/PKU-NIP-Lab/BrainPy/pull/180) +- Support reset functions in neuron and synapse models by + [\@chaoming0625](https://github.com/chaoming0625) in + [#181](https://github.com/PKU-NIP-Lab/BrainPy/pull/181) +- `update()` function on longer need `_t` and `_dt` by + [\@chaoming0625](https://github.com/chaoming0625) in + [#183](https://github.com/PKU-NIP-Lab/BrainPy/pull/183) +- small updates by [\@chaoming0625](https://github.com/chaoming0625) + in [#188](https://github.com/PKU-NIP-Lab/BrainPy/pull/188) +- feat: easier control flows with `brainpy.math.ifelse` by + [\@chaoming0625](https://github.com/chaoming0625) in + [#189](https://github.com/PKU-NIP-Lab/BrainPy/pull/189) +- feat: update delay couplings of `DiffusiveCoupling` and + `AdditiveCouping` by + [\@chaoming0625](https://github.com/chaoming0625) in + [#190](https://github.com/PKU-NIP-Lab/BrainPy/pull/190) +- update version and changelog by + [\@chaoming0625](https://github.com/chaoming0625) in + [#191](https://github.com/PKU-NIP-Lab/BrainPy/pull/191) + +**Full Changelog**: +[V2.1.7\...V2.1.8](https://github.com/PKU-NIP-Lab/BrainPy/compare/V2.1.7...V2.1.8) + +### Version 2.1.7 (2022.04.22) + +#### What\'s Changed + +- synapse models support heterogeneuos weights by + [\@chaoming0625](https://github.com/chaoming0625) in + [#170](https://github.com/PKU-NIP-Lab/BrainPy/pull/170) +- more efficient synapse implementation by + [\@chaoming0625](https://github.com/chaoming0625) in + [#171](https://github.com/PKU-NIP-Lab/BrainPy/pull/171) +- fix input models in brainpy.dyn by + [\@chaoming0625](https://github.com/chaoming0625) in + [#172](https://github.com/PKU-NIP-Lab/BrainPy/pull/172) +- fix: np array astype by [\@ztqakita](https://github.com/ztqakita) in + [#173](https://github.com/PKU-NIP-Lab/BrainPy/pull/173) +- update README: \'brain-py\' to \'brainpy\' by + [\@chaoming0625](https://github.com/chaoming0625) in + [#174](https://github.com/PKU-NIP-Lab/BrainPy/pull/174) +- fix: fix the updating rules in the STP model by + [\@c-xy17](https://github.com/c-xy17) in + [#176](https://github.com/PKU-NIP-Lab/BrainPy/pull/176) +- Updates and fixes by + [\@chaoming0625](https://github.com/chaoming0625) in + [#177](https://github.com/PKU-NIP-Lab/BrainPy/pull/177) + +**Full Changelog**: +[V2.1.5\...V2.1.7](https://github.com/PKU-NIP-Lab/BrainPy/compare/V2.1.5...V2.1.7) + +### Version 2.1.5 (2022.04.18) + +#### What\'s Changed + +- `brainpy.math.random.shuffle` is numpy like by + [\@chaoming0625](https://github.com/chaoming0625) in + [#153](https://github.com/PKU-NIP-Lab/BrainPy/pull/153) +- update LICENSE by [\@chaoming0625](https://github.com/chaoming0625) + in [#155](https://github.com/PKU-NIP-Lab/BrainPy/pull/155) +- docs: add m1 warning by [\@ztqakita](https://github.com/ztqakita) in + [#154](https://github.com/PKU-NIP-Lab/BrainPy/pull/154) +- compatible apis of \'brainpy.math\' with those of \'jax.numpy\' in + most modules by [\@chaoming0625](https://github.com/chaoming0625) in + [#156](https://github.com/PKU-NIP-Lab/BrainPy/pull/156) +- Important updates by + [\@chaoming0625](https://github.com/chaoming0625) in + [#157](https://github.com/PKU-NIP-Lab/BrainPy/pull/157) +- Updates by [\@chaoming0625](https://github.com/chaoming0625) in + [#159](https://github.com/PKU-NIP-Lab/BrainPy/pull/159) +- Add LayerNorm, GroupNorm, and InstanceNorm as nn_nodes in + normalization.py by [\@c-xy17](https://github.com/c-xy17) in + [#162](https://github.com/PKU-NIP-Lab/BrainPy/pull/162) +- feat: add conv & pooling nodes by + [\@ztqakita](https://github.com/ztqakita) in + [#161](https://github.com/PKU-NIP-Lab/BrainPy/pull/161) +- fix: update setup.py by [\@ztqakita](https://github.com/ztqakita) in + [#163](https://github.com/PKU-NIP-Lab/BrainPy/pull/163) +- update setup.py by [\@chaoming0625](https://github.com/chaoming0625) + in [#165](https://github.com/PKU-NIP-Lab/BrainPy/pull/165) +- fix: change trigger condition by + [\@ztqakita](https://github.com/ztqakita) in + [#166](https://github.com/PKU-NIP-Lab/BrainPy/pull/166) +- fix: add build_conn() function by + [\@ztqakita](https://github.com/ztqakita) in + [#164](https://github.com/PKU-NIP-Lab/BrainPy/pull/164) +- update synapses by [\@chaoming0625](https://github.com/chaoming0625) + in [#167](https://github.com/PKU-NIP-Lab/BrainPy/pull/167) +- get the deserved name: brainpy by + [\@chaoming0625](https://github.com/chaoming0625) in + [#168](https://github.com/PKU-NIP-Lab/BrainPy/pull/168) +- update tests by [\@chaoming0625](https://github.com/chaoming0625) in + [#169](https://github.com/PKU-NIP-Lab/BrainPy/pull/169) + +**Full Changelog**: +[V2.1.4\...V2.1.5](https://github.com/PKU-NIP-Lab/BrainPy/compare/V2.1.4...V2.1.5) + +### Version 2.1.4 (2022.04.04) + +#### What\'s Changed + +- fix doc parsing bug by + [\@chaoming0625](https://github.com/chaoming0625) in + [#127](https://github.com/PKU-NIP-Lab/BrainPy/pull/127) +- Update overview_of_dynamic_model.ipynb by + [\@c-xy17](https://github.com/c-xy17) in + [#129](https://github.com/PKU-NIP-Lab/BrainPy/pull/129) +- Reorganization of `brainpylib.custom_op` and adding interface in + `brainpy.math` by [\@ztqakita](https://github.com/ztqakita) in + [#128](https://github.com/PKU-NIP-Lab/BrainPy/pull/128) +- Fix: modify `register_op` and brainpy.math interface by + [\@ztqakita](https://github.com/ztqakita) in + [#130](https://github.com/PKU-NIP-Lab/BrainPy/pull/130) +- new features about RNN training and delay differential equations by + [\@chaoming0625](https://github.com/chaoming0625) in + [#132](https://github.com/PKU-NIP-Lab/BrainPy/pull/132) +- Fix [#123](https://github.com/PKU-NIP-Lab/BrainPy/issues/123): Add + low-level operators docs and modify register_op by + [\@ztqakita](https://github.com/ztqakita) in + [#134](https://github.com/PKU-NIP-Lab/BrainPy/pull/134) +- feat: add generate_changelog by + [\@ztqakita](https://github.com/ztqakita) in + [#135](https://github.com/PKU-NIP-Lab/BrainPy/pull/135) +- fix [#133](https://github.com/PKU-NIP-Lab/BrainPy/issues/133), + support batch size training with offline algorithms by + [\@chaoming0625](https://github.com/chaoming0625) in + [#136](https://github.com/PKU-NIP-Lab/BrainPy/pull/136) +- fix [#84](https://github.com/PKU-NIP-Lab/BrainPy/issues/84): support + online training algorithms by + [\@chaoming0625](https://github.com/chaoming0625) in + [#137](https://github.com/PKU-NIP-Lab/BrainPy/pull/137) +- feat: add the batch normalization node by + [\@c-xy17](https://github.com/c-xy17) in + [#138](https://github.com/PKU-NIP-Lab/BrainPy/pull/138) +- fix: fix shape checking error by + [\@chaoming0625](https://github.com/chaoming0625) in + [#139](https://github.com/PKU-NIP-Lab/BrainPy/pull/139) +- solve [#131](https://github.com/PKU-NIP-Lab/BrainPy/issues/131), + support efficient synaptic computation for special connection types + by [\@chaoming0625](https://github.com/chaoming0625) in + [#140](https://github.com/PKU-NIP-Lab/BrainPy/pull/140) +- feat: update the API and test for batch normalization by + [\@c-xy17](https://github.com/c-xy17) in + [#142](https://github.com/PKU-NIP-Lab/BrainPy/pull/142) +- Node is default trainable by + [\@chaoming0625](https://github.com/chaoming0625) in + [#143](https://github.com/PKU-NIP-Lab/BrainPy/pull/143) +- Updates training apis and docs by + [\@chaoming0625](https://github.com/chaoming0625) in + [#145](https://github.com/PKU-NIP-Lab/BrainPy/pull/145) +- fix: add dependencies and update version by + [\@ztqakita](https://github.com/ztqakita) in + [#147](https://github.com/PKU-NIP-Lab/BrainPy/pull/147) +- update requirements by + [\@chaoming0625](https://github.com/chaoming0625) in + [#146](https://github.com/PKU-NIP-Lab/BrainPy/pull/146) +- data pass of the Node is default SingleData by + [\@chaoming0625](https://github.com/chaoming0625) in + [#148](https://github.com/PKU-NIP-Lab/BrainPy/pull/148) + +**Full Changelog**: +[V2.1.3\...V2.1.4](https://github.com/PKU-NIP-Lab/BrainPy/compare/V2.1.3...V2.1.4) + +### Version 2.1.3 (2022.03.27) + +This release improves the functionality and usability of BrainPy. Core +changes include + +- support customization of low-level operators by using Numba +- fix bugs + +#### What\'s Changed + +- Provide custom operators written in numba for jax jit by + [\@ztqakita](https://github.com/ztqakita) in + [#122](https://github.com/PKU-NIP-Lab/BrainPy/pull/122) +- fix DOGDecay bugs, add more features by + [\@chaoming0625](https://github.com/chaoming0625) in + [#124](https://github.com/PKU-NIP-Lab/BrainPy/pull/124) +- fix bugs by [\@chaoming0625](https://github.com/chaoming0625) in + [#126](https://github.com/PKU-NIP-Lab/BrainPy/pull/126) + +**Full Changelog** : +[V2.1.2\...V2.1.3](https://github.com/PKU-NIP-Lab/BrainPy/compare/V2.1.2...V2.1.3) + +### Version 2.1.2 (2022.03.23) + +This release improves the functionality and usability of BrainPy. Core +changes include + +- support rate-based whole-brain modeling +- add more neuron models, including rate neurons/synapses +- support Python 3.10 +- improve delays etc. APIs + +#### What\'s Changed + +- fix matplotlib dependency on \"brainpy.analysis\" module by + [\@chaoming0625](https://github.com/chaoming0625) in + [#110](https://github.com/PKU-NIP-Lab/BrainPy/pull/110) +- Sync master to brainpy-2.x branch by + [\@ztqakita](https://github.com/ztqakita) in + [#111](https://github.com/PKU-NIP-Lab/BrainPy/pull/111) +- add py3.6 test & delete multiple macos env by + [\@ztqakita](https://github.com/ztqakita) in + [#112](https://github.com/PKU-NIP-Lab/BrainPy/pull/112) +- Modify ci by [\@ztqakita](https://github.com/ztqakita) in + [#113](https://github.com/PKU-NIP-Lab/BrainPy/pull/113) +- Add py3.10 test by [\@ztqakita](https://github.com/ztqakita) in + [#115](https://github.com/PKU-NIP-Lab/BrainPy/pull/115) +- update python version by + [\@chaoming0625](https://github.com/chaoming0625) in + [#114](https://github.com/PKU-NIP-Lab/BrainPy/pull/114) +- add brainpylib mac py3.10 by + [\@ztqakita](https://github.com/ztqakita) in + [#116](https://github.com/PKU-NIP-Lab/BrainPy/pull/116) +- Enhance measure/input/brainpylib by + [\@chaoming0625](https://github.com/chaoming0625) in + [#117](https://github.com/PKU-NIP-Lab/BrainPy/pull/117) +- fix [#105](https://github.com/PKU-NIP-Lab/BrainPy/issues/105): Add + customize connections docs by + [\@ztqakita](https://github.com/ztqakita) in + [#118](https://github.com/PKU-NIP-Lab/BrainPy/pull/118) +- fix bugs by [\@chaoming0625](https://github.com/chaoming0625) in + [#119](https://github.com/PKU-NIP-Lab/BrainPy/pull/119) +- Whole brain modeling by + [\@chaoming0625](https://github.com/chaoming0625) in + [#121](https://github.com/PKU-NIP-Lab/BrainPy/pull/121) + +**Full Changelog**: +[V2.1.1\...V2.1.2](https://github.com/PKU-NIP-Lab/BrainPy/compare/V2.1.1...V2.1.2) + +### Version 2.1.1 (2022.03.18) + +This release continues to update the functionality of BrainPy. Core +changes include + +- numerical solvers for fractional differential equations +- more standard `brainpy.nn` interfaces + +#### New Features + +- + +Numerical solvers for fractional differential equations + +: - `brainpy.fde.CaputoEuler` +- `brainpy.fde.CaputoL1Schema` +- `brainpy.fde.GLShortMemory` + +- + +Fractional neuron models + +: - `brainpy.dyn.FractionalFHR` +- `brainpy.dyn.FractionalIzhikevich` + +- support `shared_kwargs` in [RNNTrainer]{.title-ref} and + [RNNRunner]{.title-ref} + +### Version 2.1.0 (2022.03.14) + +#### Highlights + +We are excited to announce the release of BrainPy 2.1.0. This release is +composed of nearly 270 commits since 2.0.2, made by [Chaoming +Wang](https://github.com/chaoming0625), [Xiaoyu +Chen](mailto:c-xy17@tsinghua.org.cn), and [Tianqiu +Zhang](mailto:tianqiuakita@gmail.com) . + +BrainPy 2.1.0 updates are focused on improving usability, functionality, +and stability of BrainPy. Highlights of version 2.1.0 include: + +- New module `brainpy.dyn` for dynamics building and simulation. It is + composed of many neuron models, synapse models, and others. +- New module `brainpy.nn` for neural network building and training. It + supports to define reservoir models, artificial neural networks, + ridge regression training, and back-propagation through time + training. +- New module `brainpy.datasets` for convenient dataset construction + and initialization. +- New module `brainpy.integrators.dde` for numerical integration of + delay differential equations. +- Add more numpy-like operators in `brainpy.math` module. +- Add automatic continuous integration on Linux, Windows, and MacOS + platforms. +- Fully update brainpy documentation. +- Fix bugs on `brainpy.analysis` and `brainpy.math.autograd` + +#### Incompatible changes + +- Remove `brainpy.math.numpy` module. +- Remove numba requirements +- Remove matplotlib requirements +- Remove [steps]{.title-ref} in `brainpy.dyn.DynamicalSystem` +- Remove travis CI + +#### New Features + +- `brainpy.ddeint` for numerical integration of delay differential + equations, the supported methods include: - Euler - MidPoint - + Heun2 - Ralston2 - RK2 - RK3 - Heun3 - Ralston3 - SSPRK3 - RK4 - + Ralston4 - RK4Rule38 + +- + +set default int/float/complex types + +: - `brainpy.math.set_dfloat()` +- `brainpy.math.set_dint()` +- `brainpy.math.set_dcomplex()` + +- + +Delay variables + +: - `brainpy.math.FixedLenDelay` +- `brainpy.math.NeutralDelay` + +- + +Dedicated operators + +: - `brainpy.math.sparse_matmul()` + +- More numpy-like operators + +- Neural network building `brainpy.nn` + +- Dynamics model building and simulation `brainpy.dyn` + +### Version 2.0.2 (2022.02.11) + +There are important updates by [Chaoming +Wang](https://github.com/chaoming0625) in BrainPy 2.0.2. + +- provide `pre2post_event_prod` operator +- support array creation from a list/tuple of JaxArray in + `brainpy.math.asarray` and `brainpy.math.array` +- update `brainpy.ConstantDelay`, add `.latest` and `.oldest` + attributes +- add `brainpy.IntegratorRunner` support for efficient simulation of + brainpy integrators +- support auto finding of RandomState when JIT SDE integrators +- fix bugs in SDE `exponential_euler` method +- move `parallel` running APIs into `brainpy.simulation` +- add `brainpy.math.syn2post_mean`, `brainpy.math.syn2post_softmax`, + `brainpy.math.pre2post_mean` and `brainpy.math.pre2post_softmax` + operators + +### Version 2.0.1 (2022.01.31) + +Today we release BrainPy 2.0.1. This release is composed of over 70 +commits since 2.0.0, made by [Chaoming +Wang](https://github.com/chaoming0625), [Xiaoyu +Chen](mailto:c-xy17@tsinghua.org.cn), and [Tianqiu +Zhang](mailto:tianqiuakita@gmail.com) . + +BrainPy 2.0.0 updates are focused on improving documentation and +operators. Core changes include: + +- Improve `brainpylib` operators +- Complete documentation for programming system +- Add more numpy APIs +- Add `jaxfwd` in autograd module +- And other changes + +### Version 2.0.0.1 (2022.01.05) + +- Add progress bar in `brainpy.StructRunner` + +### Version 2.0.0 (2021.12.31) + +Start a new version of BrainPy. + +#### Highlight + +We are excited to announce the release of BrainPy 2.0.0. This release is +composed of over 260 commits since 1.1.7, made by [Chaoming +Wang](https://github.com/chaoming0625), [Xiaoyu +Chen](mailto:c-xy17@tsinghua.org.cn), and [Tianqiu +Zhang](mailto:tianqiuakita@gmail.com) . + +BrainPy 2.0.0 updates are focused on improving performance, usability +and consistence of BrainPy. All the computations are migrated into JAX. +Model `building`, `simulation`, `training` and `analysis` are all based +on JAX. Highlights of version 2.0.0 include: + +- [brainpylib](https://pypi.org/project/brainpylib/) are provided to + dedicated operators for brain dynamics programming +- Connection APIs in `brainpy.conn` module are more efficient. +- Update analysis tools for low-dimensional and high-dimensional + systems in `brainpy.analysis` module. +- Support more general Exponential Euler methods based on automatic + differentiation. +- Improve the usability and consistence of `brainpy.math` module. +- Remove JIT compilation based on Numba. +- Separate brain building with brain simulation. + +#### Incompatible changes + +- remove `brainpy.math.use_backend()` +- remove `brainpy.math.numpy` module +- no longer support `.run()` in `brainpy.DynamicalSystem` (see New + Features) +- remove `brainpy.analysis.PhasePlane` (see New Features) +- remove `brainpy.analysis.Bifurcation` (see New Features) +- remove `brainpy.analysis.FastSlowBifurcation` (see New Features) + +#### New Features + +- + +Exponential Euler method based on automatic differentiation + +: - `brainpy.ode.ExpEulerAuto` + +- + +Numerical optimization based low-dimensional analyzers: + +: - `brainpy.analysis.PhasePlane1D` +- `brainpy.analysis.PhasePlane2D` +- `brainpy.analysis.Bifurcation1D` +- `brainpy.analysis.Bifurcation2D` +- `brainpy.analysis.FastSlow1D` +- `brainpy.analysis.FastSlow2D` + +- + +Numerical optimization based high-dimensional analyzer: + +: - `brainpy.analysis.SlowPointFinder` + +- + +Dedicated operators in `brainpy.math` module: + +: - `brainpy.math.pre2post_event_sum` +- `brainpy.math.pre2post_sum` +- `brainpy.math.pre2post_prod` +- `brainpy.math.pre2post_max` +- `brainpy.math.pre2post_min` +- `brainpy.math.pre2syn` +- `brainpy.math.syn2post` +- `brainpy.math.syn2post_prod` +- `brainpy.math.syn2post_max` +- `brainpy.math.syn2post_min` + +- + +Conversion APIs in `brainpy.math` module: + +: - `brainpy.math.as_device_array()` +- `brainpy.math.as_variable()` +- `brainpy.math.as_jaxarray()` + +- + +New autograd APIs in `brainpy.math` module: + +: - `brainpy.math.vector_grad()` + +- + +Simulation runners: + +: - `brainpy.ReportRunner` +- `brainpy.StructRunner` +- `brainpy.NumpyRunner` + +- + +Commonly used models in `brainpy.models` module + +: - `brainpy.models.LIF` +- `brainpy.models.Izhikevich` +- `brainpy.models.AdExIF` +- `brainpy.models.SpikeTimeInput` +- `brainpy.models.PoissonInput` +- `brainpy.models.DeltaSynapse` +- `brainpy.models.ExpCUBA` +- `brainpy.models.ExpCOBA` +- `brainpy.models.AMPA` +- `brainpy.models.GABAa` + +- Naming cache clean: `brainpy.clear_name_cache` + +- add safe in-place operations of `update()` method and `.value` + assignment for JaxArray + +#### Documentation + +- Complete tutorials for quickstart +- Complete tutorials for dynamics building +- Complete tutorials for dynamics simulation +- Complete tutorials for dynamics training +- Complete tutorials for dynamics analysis +- Complete tutorials for API documentation + +## brainpy 1.1.x + +If you are using `brainpy==1.x`, you can find *documentation*, +*examples*, and *models* through the following links: + +- **Documentation:** +- **Examples from papers**: + +- **Canonical brain models**: + + +### Version 1.1.7 (2021.12.13) + +- fix bugs on `numpy_array()` conversion in + [brainpy.math.utils]{.title-ref} module + +### Version 1.1.5 (2021.11.17) + +**API changes:** + +- fix bugs on ndarray import in [brainpy.base.function.py]{.title-ref} +- convenient \'get_param\' interface + [brainpy.simulation.layers]{.title-ref} +- add more weight initialization methods + +**Doc changes:** + +- add more examples in README + +### Version 1.1.4 + +**API changes:** + +- add `.struct_run()` in DynamicalSystem +- add `numpy_array()` conversion in [brainpy.math.utils]{.title-ref} + module +- add `Adagrad`, `Adadelta`, `RMSProp` optimizers +- remove [setting]{.title-ref} methods in + [brainpy.math.jax]{.title-ref} module +- remove import jax in [brainpy.\_\_init\_\_.py]{.title-ref} and + enable jax setting, including + - `enable_x64()` + - `set_platform()` + - `set_host_device_count()` +- enable `b=None` as no bias in + [brainpy.simulation.layers]{.title-ref} +- set [int\_]{.title-ref} and [float\_]{.title-ref} as default 32 bits +- remove `dtype` setting in Initializer constructor + +**Doc changes:** + +- add `optimizer` in \"Math Foundation\" +- add `dynamics training` docs +- improve others + +### Version 1.1.3 + +- fix bugs of JAX parallel API imports +- fix bugs of [post_slice]{.title-ref} structure construction +- update docs + +### Version 1.1.2 + +- add `pre2syn` and `syn2post` operators +- add [verbose]{.title-ref} and [check]{.title-ref} option to + `Base.load_states()` +- fix bugs on JIT DynamicalSystem (numpy backend) + +### Version 1.1.1 + +- fix bugs on symbolic analysis: model trajectory +- change [absolute]{.title-ref} access in the variable saving and + loading to the [relative]{.title-ref} access +- add UnexpectedTracerError hints in JAX transformation functions + +### Version 1.1.0 (2021.11.08) + +This package releases a new version of BrainPy. + +Highlights of core changes: + +#### `math` module + +- support numpy backend +- support JAX backend +- support `jit`, `vmap` and `pmap` on class objects on JAX backend +- support `grad`, `jacobian`, `hessian` on class objects on JAX + backend +- support `make_loop`, `make_while`, and `make_cond` on JAX backend +- support `jit` (based on numba) on class objects on numpy backend +- unified numpy-like ndarray operation APIs +- numpy-like random sampling APIs +- FFT functions +- gradient descent optimizers +- activation functions +- loss function +- backend settings + +#### `base` module + +- `Base` for whole Version ecosystem +- `Function` to wrap functions +- `Collector` and `TensorCollector` to collect variables, integrators, + nodes and others + +#### `integrators` module + +- class integrators for ODE numerical methods +- class integrators for SDE numerical methods + +#### `simulation` module + +- support modular and composable programming +- support multi-scale modeling +- support large-scale modeling +- support simulation on GPUs +- fix bugs on `firing_rate()` +- remove `_i` in `update()` function, replace `_i` with `_dt`, meaning + the dynamic system has the canonic equation form of + $dx/dt = f(x, t, dt)$ +- reimplement the `input_step` and `monitor_step` in a more intuitive + way +- support to set [dt]{.title-ref} in the single object level (i.e., + single instance of DynamicSystem) +- common used DNN layers +- weight initializations +- refine synaptic connections + +## brainpy 1.0.x + +### Version 1.0.3 (2021.08.18) + +Fix bugs on + +- firing rate measurement +- stability analysis + +### Version 1.0.2 + +This release continues to improve the user-friendliness. + +Highlights of core changes: + +- Remove support for Numba-CUDA backend +- Super initialization [super(XXX, self).\_\_init\_\_()]{.title-ref} + can be done at anywhere (not required to add at the bottom of the + [\_\_init\_\_()]{.title-ref} function). +- Add the output message of the step function running error. +- More powerful support for Monitoring +- More powerful support for running order scheduling +- Remove [unsqueeze()]{.title-ref} and [squeeze()]{.title-ref} + operations in `brainpy.ops` +- Add [reshape()]{.title-ref} operation in `brainpy.ops` +- Improve docs for numerical solvers +- Improve tests for numerical solvers +- Add keywords checking in ODE numerical solvers +- Add more unified operations in brainpy.ops +- Support \"@every\" in steps and monitor functions +- Fix ODE solver bugs for class bounded function +- Add build phase in Monitor + +### Version 1.0.1 + +- Fix bugs + +### Version 1.0.0 + +- **NEW VERSION OF BRAINPY** +- Change the coding style into the object-oriented programming +- Systematically improve the documentation + +## brainpy 0.x + +### Version 0.3.5 + +- Add \'timeout\' in sympy solver in neuron dynamics analysis +- Reconstruct and generalize phase plane analysis +- Generalize the repeat mode of `Network` to different running + duration between two runs +- Update benchmarks +- Update detailed documentation + +### Version 0.3.1 + +- Add a more flexible way for NeuState/SynState initialization +- Fix bugs of \"is_multi_return\" +- Add \"hand_overs\", \"requires\" and \"satisfies\". +- Update documentation +- Auto-transform [range]{.title-ref} to [numba.prange]{.title-ref} +- Support [\_obj_i]{.title-ref}, [\_pre_i]{.title-ref}, + [\_post_i]{.title-ref} for more flexible operation in scalar-based + models + +### Version 0.3.0 + +#### Computation API + +- Rename \"brainpy.numpy\" to \"brainpy.backend\" +- Delete \"pytorch\", \"tensorflow\" backends +- Add \"numba\" requirement +- Add GPU support + +#### Profile setting + +- Delete \"backend\" profile setting, add \"jit\" + +#### Core systems + +- Delete \"autopepe8\" requirement +- Delete the format code prefix +- Change keywords \"\_[t](), \_[dt](), \_[i]()\" to \"\_t, \_dt, \_i\" +- Change the \"ST\" declaration out of \"requires\" +- Add \"repeat\" mode run in Network +- Change \"vector-based\" to \"mode\" in NeuType and SynType + definition + +#### Package installation + +- Remove \"pypi\" installation, installation now only rely on + \"conda\" + +### Version 0.2.4 + +#### API changes + +- Fix bugs + +### Version 0.2.3 + +#### API changes + +- Add \"animate_1D\" in `visualization` module +- Add \"PoissonInput\", \"SpikeTimeInput\" and \"FreqInput\" in + `inputs` module +- Update phase_portrait_analyzer.py + +#### Models and examples + +- Add CANN examples + +### Version 0.2.2 + +#### API changes + +- Redesign visualization +- Redesign connectivity +- Update docs + +### Version 0.2.1 + +#### API changes + +- Fix bugs in [numba import]{.title-ref} +- Fix bugs in [numpy]{.title-ref} mode with [scalar]{.title-ref} model + +### Version 0.2.0 + +#### API changes + +- For computation: `numpy`, `numba` +- For model definition: `NeuType`, `SynConn` +- For model running: `Network`, `NeuGroup`, `SynConn`, `Runner` +- For numerical integration: `integrate`, `Integrator`, `DiffEquation` +- For connectivity: `One2One`, `All2All`, `GridFour`, `grid_four`, + `GridEight`, `grid_eight`, `GridN`, `FixedPostNum`, `FixedPreNum`, + `FixedProb`, `GaussianProb`, `GaussianWeight`, `DOG` +- For visualization: `plot_value`, `plot_potential`, `plot_raster`, + `animation_potential` +- For measurement: `cross_correlation`, `voltage_fluctuation`, + `raster_plot`, `firing_rate` +- For inputs: `constant_current`, `spike_current`, `ramp_current`. + +#### Models and examples + +- Neuron models: `HH model`, `LIF model`, `Izhikevich model` +- Synapse models: `AMPA`, `GABA`, `NMDA`, `STP`, `GapJunction` +- Network models: `gamma oscillation` diff --git a/brainpy/__init__.py b/brainpy/__init__.py index a3a1de694..79aa216ba 100644 --- a/brainpy/__init__.py +++ b/brainpy/__init__.py @@ -61,6 +61,10 @@ Sequential as Sequential, Dynamic as Dynamic, # category Projection as Projection, + receive_update_input, # decorators + receive_update_output, + not_receive_update_input, + not_receive_update_output, ) DynamicalSystemNS = DynamicalSystem Network = DynSysGroup @@ -84,7 +88,6 @@ load_state as load_state, clear_input as clear_input) - # Part: Running # # --------------- # from brainpy._src.runners import (DSRunner as DSRunner) diff --git a/brainpy/_src/delay.py b/brainpy/_src/delay.py index ee0be5763..66530a5b1 100644 --- a/brainpy/_src/delay.py +++ b/brainpy/_src/delay.py @@ -28,7 +28,21 @@ ] -delay_identifier = '_*_delay_*_' +delay_identifier = '_*_delay_of_' + + +def _get_delay(delay_time, delay_step): + if delay_time is None: + if delay_step is None: + return None, None + else: + assert isinstance(delay_step, int), '"delay_step" should be an integer.' + delay_time = delay_step * bm.get_dt() + else: + assert delay_step is None, '"delay_step" should be None if "delay_time" is given.' + assert isinstance(delay_time, (int, float)) + delay_step = math.ceil(delay_time / bm.get_dt()) + return delay_time, delay_step class Delay(DynamicalSystem, ParamDesc): @@ -97,13 +111,15 @@ def __init__( def register_entry( self, entry: str, - delay_time: Optional[Union[float, bm.Array, Callable]], + delay_time: Optional[Union[float, bm.Array, Callable]] = None, + delay_step: Optional[int] = None ) -> 'Delay': """Register an entry to access the data. Args: entry: str. The entry to access the delay data. delay_time: The delay time of the entry (can be a float). + delay_step: The delay step of the entry (must be an int). ``delay_step = delay_time / dt``. Returns: Return the self. @@ -237,13 +253,15 @@ def __init__( def register_entry( self, entry: str, - delay_time: Optional[Union[int, float]], + delay_time: Optional[Union[int, float]] = None, + delay_step: Optional[int] = None, ) -> 'Delay': """Register an entry to access the data. Args: entry: str. The entry to access the delay data. delay_time: The delay time of the entry (can be a float). + delay_step: The delay step of the entry (must be an int). ``delat_step = delay_time / dt``. Returns: Return the self. @@ -258,12 +276,7 @@ def register_entry( assert delay_time.size == 1 and delay_time.ndim == 0 delay_time = delay_time.item() - if delay_time is None: - delay_step = None - delay_time = 0. - else: - assert isinstance(delay_time, (int, float)) - delay_step = math.ceil(delay_time / bm.get_dt()) + _, delay_step = _get_delay(delay_time, delay_step) # delay variable if delay_step is not None: @@ -354,6 +367,8 @@ def update( """Update delay variable with the new data. """ if self.data is not None: + # jax.debug.print('last value == target value {} ', jnp.allclose(latest_value, self.target.value)) + # get the latest target value if latest_value is None: latest_value = self.target.value @@ -361,17 +376,20 @@ def update( # update the delay data at the rotation index if self.method == ROTATE_UPDATE: i = share.load('i') - idx = bm.as_jax((-i - 1) % self.max_length, dtype=jnp.int32) - self.data[idx] = latest_value + idx = bm.as_jax(-i % self.max_length, dtype=jnp.int32) + self.data[jax.lax.stop_gradient(idx)] = latest_value # update the delay data at the first position elif self.method == CONCAT_UPDATE: if self.max_length > 1: latest_value = bm.expand_dims(latest_value, 0) - self.data.value = bm.concat([latest_value, self.data[1:]], axis=0) + self.data.value = bm.concat([latest_value, self.data[:-1]], axis=0) else: self.data[0] = latest_value + else: + raise ValueError(f'Unknown updating method "{self.method}"') + def reset_state(self, batch_size: int = None, **kwargs): """Reset the delay data. """ diff --git a/brainpy/_src/dnn/conv.py b/brainpy/_src/dnn/conv.py index deead1f3b..e4b6e25d2 100644 --- a/brainpy/_src/dnn/conv.py +++ b/brainpy/_src/dnn/conv.py @@ -160,7 +160,7 @@ def update(self, x): nonbatching = False if x.ndim == self.num_spatial_dims + 1: nonbatching = True - x = x.unsqueeze(0) + x = bm.unsqueeze(x, 0) w = self.w.value if self.mask is not None: try: @@ -190,6 +190,9 @@ def __repr__(self): class Conv1d(_GeneralConv): """One-dimensional convolution. + The input should a 2d array with the shape of ``[H, C]``, or + a 3d array with the shape of ``[B, H, C]``, where ``H`` is the feature size. + Parameters ---------- in_channels: int @@ -282,6 +285,9 @@ def _check_input_dim(self, x): class Conv2d(_GeneralConv): """Two-dimensional convolution. + The input should a 3d array with the shape of ``[H, W, C]``, or + a 4d array with the shape of ``[B, H, W, C]``. + Parameters ---------- in_channels: int @@ -375,6 +381,9 @@ def _check_input_dim(self, x): class Conv3d(_GeneralConv): """Three-dimensional convolution. + The input should a 3d array with the shape of ``[H, W, D, C]``, or + a 4d array with the shape of ``[B, H, W, D, C]``. + Parameters ---------- in_channels: int diff --git a/brainpy/_src/dnn/tests/test_activation.py b/brainpy/_src/dnn/tests/test_activation.py index ba2a49efd..7a0fa57af 100644 --- a/brainpy/_src/dnn/tests/test_activation.py +++ b/brainpy/_src/dnn/tests/test_activation.py @@ -1,5 +1,5 @@ -from absl.testing import parameterized from absl.testing import absltest +from absl.testing import parameterized import brainpy as bp import brainpy.math as bm diff --git a/brainpy/_src/dnn/tests/test_conv_layers.py b/brainpy/_src/dnn/tests/test_conv_layers.py index 3c9fdfa87..05f523622 100644 --- a/brainpy/_src/dnn/tests/test_conv_layers.py +++ b/brainpy/_src/dnn/tests/test_conv_layers.py @@ -1,17 +1,15 @@ # -*- coding: utf-8 -*- -from unittest import TestCase -from absl.testing import absltest import jax.numpy as jnp -import brainpy.math as bm +from absl.testing import absltest from absl.testing import parameterized + import brainpy as bp import brainpy.math as bm class TestConv(parameterized.TestCase): def test_Conv2D_img(self): - bm.random.seed() img = jnp.zeros((2, 200, 198, 4)) for k in range(4): x = 30 + 60 * k @@ -24,6 +22,7 @@ def test_Conv2D_img(self): strides=(2, 1), padding='VALID', groups=4) out = net(img) print("out shape: ", out.shape) + self.assertEqual(out.shape, (2, 99, 196, 32)) # print("First output channel:") # plt.figure(figsize=(10, 10)) # plt.imshow(np.array(img)[0, :, :, 0]) @@ -31,7 +30,6 @@ def test_Conv2D_img(self): bm.clear_buffer_memory() def test_conv1D(self): - bm.random.seed() with bp.math.training_environment(): model = bp.layers.Conv1d(in_channels=3, out_channels=32, kernel_size=(3,)) @@ -39,6 +37,7 @@ def test_conv1D(self): out = model(input) print("out shape: ", out.shape) + self.assertEqual(out.shape, (2, 5, 32)) # print("First output channel:") # plt.figure(figsize=(10, 10)) # plt.imshow(np.array(out)[0, :, :]) @@ -54,6 +53,7 @@ def test_conv2D(self): out = model(input) print("out shape: ", out.shape) + self.assertEqual(out.shape, (2, 5, 5, 32)) # print("First output channel:") # plt.figure(figsize=(10, 10)) # plt.imshow(np.array(out)[0, :, :, 31]) @@ -67,6 +67,7 @@ def test_conv3D(self): input = bp.math.ones((2, 5, 5, 5, 3)) out = model(input) print("out shape: ", out.shape) + self.assertEqual(out.shape, (2, 5, 5, 5, 32)) bm.clear_buffer_memory() diff --git a/brainpy/_src/dnn/tests/test_function.py b/brainpy/_src/dnn/tests/test_function.py index 269fec441..9ad15938d 100644 --- a/brainpy/_src/dnn/tests/test_function.py +++ b/brainpy/_src/dnn/tests/test_function.py @@ -1,12 +1,10 @@ # -*- coding: utf-8 -*- -from unittest import TestCase - -import jax.numpy as jnp -import brainpy.math as bm from absl.testing import absltest from absl.testing import parameterized + import brainpy as bp +import brainpy.math as bm class TestFunction(parameterized.TestCase): diff --git a/brainpy/_src/dnn/tests/test_normalization.py b/brainpy/_src/dnn/tests/test_normalization.py index fdc5b34e3..e76b3616b 100644 --- a/brainpy/_src/dnn/tests/test_normalization.py +++ b/brainpy/_src/dnn/tests/test_normalization.py @@ -1,7 +1,8 @@ -import brainpy.math as bm from absl.testing import parameterized from absl.testing import absltest + import brainpy as bp +import brainpy.math as bm class Test_Normalization(parameterized.TestCase): diff --git a/brainpy/_src/dnn/tests/test_pooling_layers.py b/brainpy/_src/dnn/tests/test_pooling_layers.py index 34f8f5cd5..5748edd8b 100644 --- a/brainpy/_src/dnn/tests/test_pooling_layers.py +++ b/brainpy/_src/dnn/tests/test_pooling_layers.py @@ -3,8 +3,8 @@ import jax import jax.numpy as jnp import numpy as np -from absl.testing import parameterized from absl.testing import absltest +from absl.testing import parameterized import brainpy as bp import brainpy.math as bm diff --git a/brainpy/_src/dynold/neurons/biological_models.py b/brainpy/_src/dynold/neurons/biological_models.py index 43b2c2a56..8daa7acdb 100644 --- a/brainpy/_src/dynold/neurons/biological_models.py +++ b/brainpy/_src/dynold/neurons/biological_models.py @@ -196,15 +196,11 @@ def __init__( self, *args, input_var: bool = True, - noise: Union[float, ArrayType, Initializer, Callable] = None, **kwargs, ): self.input_var = input_var super().__init__(*args, **kwargs, init_var=False) - self.noise = init_noise(noise, self.varshape, num_vars=4) - if self.noise is not None: - self.integral = sdeint(method=self.method, f=self.derivative, g=self.noise) self.reset_state(self.mode) def reset_state(self, batch_size=None): @@ -302,14 +298,10 @@ def __init__( self, *args, input_var: bool = True, - noise: Union[float, ArrayType, Initializer, Callable] = None, **kwargs, ): self.input_var = input_var super().__init__(*args, **kwargs, init_var=False) - self.noise = init_noise(noise, self.varshape, num_vars=2) - if self.noise is not None: - self.integral = sdeint(method=self.method, f=self.derivative, g=self.noise) self.reset_state(self.mode) def reset_state(self, batch_size=None): @@ -808,14 +800,11 @@ def __init__( self, *args, input_var: bool = True, - noise: Union[float, ArrayType, Initializer, Callable] = None, + **kwargs, ): self.input_var = input_var super().__init__(*args, **kwargs, init_var=False) - self.noise = init_noise(noise, self.varshape, num_vars=3) - if self.noise is not None: - self.integral = sdeint(method=self.method, f=self.derivative, g=self.noise) self.reset_state(self.mode) def reset_state(self, batch_size=None): diff --git a/brainpy/_src/dynold/synapses/abstract_models.py b/brainpy/_src/dynold/synapses/abstract_models.py index c7a902f01..2e214ed29 100644 --- a/brainpy/_src/dynold/synapses/abstract_models.py +++ b/brainpy/_src/dynold/synapses/abstract_models.py @@ -115,12 +115,7 @@ def __init__( self.g_max, self.conn_mask = self._init_weights(g_max, comp_method=comp_method, sparse_data='csr') # register delay - self.pre.register_local_delay("spike", self.name, delay_step) - - def reset_state(self, batch_size=None): - self.output.reset_state(batch_size) - if self.stp is not None: - self.stp.reset_state(batch_size) + self.pre.register_local_delay("spike", self.name, delay_step=delay_step) def update(self, pre_spike=None): # pre-synaptic spikes @@ -232,7 +227,6 @@ class Exponential(TwoEndConn): method: str The numerical integration methods. - """ def __init__( @@ -283,17 +277,16 @@ def __init__( else: raise ValueError(f'Does not support {comp_method}, only "sparse" or "dense".') - # variables - self.g = self.syn.g - # delay - self.pre.register_local_delay("spike", self.name, delay_step) + self.pre.register_local_delay("spike", self.name, delay_step=delay_step) - def reset_state(self, batch_size=None): - self.syn.reset_state(batch_size) - self.output.reset_state(batch_size) - if self.stp is not None: - self.stp.reset_state(batch_size) + @property + def g(self): + return self.syn.g + + @g.setter + def g(self, value): + self.syn.g = value def update(self, pre_spike=None): # delays diff --git a/brainpy/_src/dynold/synapses/base.py b/brainpy/_src/dynold/synapses/base.py index 55bac7111..5ceeb4e23 100644 --- a/brainpy/_src/dynold/synapses/base.py +++ b/brainpy/_src/dynold/synapses/base.py @@ -10,8 +10,7 @@ from brainpy._src.dyn.base import NeuDyn from brainpy._src.dynsys import DynamicalSystem from brainpy._src.initialize import parameter -from brainpy._src.mixin import (ParamDesc, JointType, - SupportAutoDelay, BindCondData, ReturnInfo) +from brainpy._src.mixin import (ParamDesc, JointType, SupportAutoDelay, BindCondData, ReturnInfo) from brainpy.errors import UnsupportedError from brainpy.types import ArrayType @@ -47,9 +46,6 @@ def isregistered(self, val: bool): raise ValueError('Must be an instance of bool.') self._registered = val - def reset_state(self, batch_size=None): - pass - def register_master(self, master: SynConn): if not isinstance(master, SynConn): raise TypeError(f'master must be instance of {SynConn.__name__}, but we got {type(master)}') @@ -296,7 +292,7 @@ def __init__( mode=mode) # delay - self.pre.register_local_delay("spike", self.name, delay_step) + self.pre.register_local_delay("spike", self.name, delay_step=delay_step) # synaptic dynamics self.syn = syn @@ -340,11 +336,5 @@ def g_max(self, v): UserWarning) self.comm.weight = v - def reset_state(self, *args, **kwargs): - self.syn.reset(*args, **kwargs) - self.comm.reset(*args, **kwargs) - self.output.reset(*args, **kwargs) - if self.stp is not None: - self.stp.reset(*args, **kwargs) diff --git a/brainpy/_src/dynsys.py b/brainpy/_src/dynsys.py index cb086b10d..a6fcc16a7 100644 --- a/brainpy/_src/dynsys.py +++ b/brainpy/_src/dynsys.py @@ -93,17 +93,41 @@ def __init__( # Attribute for "SupportInputProj" # each instance of "SupportInputProj" should have a "cur_inputs" attribute - self.current_inputs = bm.node_dict() - self.delta_inputs = bm.node_dict() + self._current_inputs: Optional[Dict[str, Callable]] = None + self._delta_inputs: Optional[Dict[str, Callable]] = None # the before- / after-updates used for computing # added after the version of 2.4.3 - self.before_updates: Dict[str, Callable] = bm.node_dict() - self.after_updates: Dict[str, Callable] = bm.node_dict() + self._before_updates: Optional[Dict[str, Callable]] = None + self._after_updates: Optional[Dict[str, Callable]] = None # super initialization super().__init__(name=name) + @property + def current_inputs(self): + if self._current_inputs is None: + self._current_inputs = bm.node_dict() + return self._current_inputs + + @property + def delta_inputs(self): + if self._delta_inputs is None: + self._delta_inputs = bm.node_dict() + return self._delta_inputs + + @property + def before_updates(self): + if self._before_updates is None: + self._before_updates = bm.node_dict() + return self._before_updates + + @property + def after_updates(self): + if self._after_updates is None: + self._after_updates = bm.node_dict() + return self._after_updates + def add_bef_update(self, key: Any, fun: Callable): """Add the before update into this node""" if key in self.before_updates: @@ -220,25 +244,32 @@ def register_local_delay( self, var_name: str, delay_name: str, - delay: Union[numbers.Number, ArrayType] = None, + delay_time: Union[numbers.Number, ArrayType] = None, + delay_step: Union[numbers.Number, ArrayType] = None, ): """Register local relay at the given delay time. Args: var_name: str. The name of the delay target variable. delay_name: str. The name of the current delay data. - delay: The delay time. + delay_time: The delay time. Float. + delay_step: The delay step. Int. ``delay_step`` and ``delay_time`` are exclusive. ``delay_step = delay_time / dt``. """ delay_identifier, init_delay_by_return = _get_delay_tool() delay_identifier = delay_identifier + var_name + # check whether the "var_name" has been registered try: target = getattr(self, var_name) except AttributeError: raise AttributeError(f'This node {self} does not has attribute of "{var_name}".') if not self.has_aft_update(delay_identifier): - self.add_aft_update(delay_identifier, init_delay_by_return(target)) + # add a model to receive the return of the target model + # moreover, the model should not receive the return of the update function + model = not_receive_update_output(init_delay_by_return(target)) + # register the model + self.add_aft_update(delay_identifier, model) delay_cls = self.get_aft_update(delay_identifier) - delay_cls.register_entry(delay_name, delay) + delay_cls.register_entry(delay_name, delay_time=delay_time, delay_step=delay_step) def get_local_delay(self, var_name, delay_name): """Get the delay at the given identifier (`name`). @@ -381,14 +412,20 @@ def __call__(self, *args, **kwargs): # ``before_updates`` for model in self.before_updates.values(): - model() + if hasattr(model, '_receive_update_input'): + model(*args, **kwargs) + else: + model() # update the model self ret = self.update(*args, **kwargs) # ``after_updates`` for model in self.after_updates.values(): - model(ret) + if hasattr(model, '_not_receive_update_output'): + model() + else: + model(ret) return ret def __rrshift__(self, other): @@ -832,3 +869,75 @@ def _slice_to_num(slice_: slice, length: int): start += step num += 1 return num + + +def receive_update_output(cls: object): + """ + The decorator to mark the object (as the after updates) to receive the output of the update function. + + That is, the `aft_update` will receive the return of the update function:: + + ret = model.update(*args, **kwargs) + for fun in model.aft_updates: + fun(ret) + + """ + # assert isinstance(cls, DynamicalSystem), 'The input class should be instance of DynamicalSystem.' + if hasattr(cls, '_not_receive_update_output'): + delattr(cls, '_not_receive_update_output') + return cls + + +def not_receive_update_output(cls: object): + """ + The decorator to mark the object (as the after updates) to not receive the output of the update function. + + That is, the `aft_update` will not receive the return of the update function:: + + ret = model.update(*args, **kwargs) + for fun in model.aft_updates: + fun() + + """ + # assert isinstance(cls, DynamicalSystem), 'The input class should be instance of DynamicalSystem.' + cls._not_receive_update_output = True + return cls + + +def receive_update_input(cls: object): + """ + The decorator to mark the object (as the before updates) to receive the input of the update function. + + That is, the `bef_update` will receive the input of the update function:: + + + for fun in model.bef_updates: + fun(*args, **kwargs) + model.update(*args, **kwargs) + + """ + # assert isinstance(cls, DynamicalSystem), 'The input class should be instance of DynamicalSystem.' + cls._receive_update_input = True + return cls + + +def not_receive_update_input(cls: object): + """ + The decorator to mark the object (as the before updates) to not receive the input of the update function. + + That is, the `bef_update` will not receive the input of the update function:: + + for fun in model.bef_updates: + fun() + model.update() + + """ + # assert isinstance(cls, DynamicalSystem), 'The input class should be instance of DynamicalSystem.' + if hasattr(cls, '_receive_update_input'): + delattr(cls, '_receive_update_input') + return cls + + + + + diff --git a/brainpy/_src/math/delayvars.py b/brainpy/_src/math/delayvars.py index eb8e27c8f..676e4286b 100644 --- a/brainpy/_src/math/delayvars.py +++ b/brainpy/_src/math/delayvars.py @@ -11,7 +11,7 @@ from brainpy import check from brainpy.check import is_float, is_integer, jit_error from brainpy.errors import UnsupportedError -from .compat_numpy import vstack, broadcast_to +from .compat_numpy import broadcast_to, expand_dims, concatenate from .environment import get_dt, get_float from .interoperability import as_jax from .ndarray import ndarray, Array @@ -392,6 +392,7 @@ def reset( dtype=delay_target.dtype), batch_axis=batch_axis) else: + self.data.value self.data._value = jnp.zeros((self.num_delay_step,) + delay_target.shape, dtype=delay_target.dtype) @@ -472,7 +473,7 @@ def update(self, value: Union[numbers.Number, Array, jax.Array] = None): elif self.update_method == CONCAT_UPDATE: if self.num_delay_step >= 2: - self.data.value = vstack([broadcast_to(value, self.data.shape[1:]), self.data[1:]]) + self.data.value = concatenate([expand_dims(value, 0), self.data[:-1]], axis=0) else: self.data[:] = value diff --git a/brainpy/_src/math/jitconn/event_matvec.py b/brainpy/_src/math/jitconn/event_matvec.py index 279980380..a22aac757 100644 --- a/brainpy/_src/math/jitconn/event_matvec.py +++ b/brainpy/_src/math/jitconn/event_matvec.py @@ -1157,4 +1157,4 @@ def _define_event_mv_prob_normal_prim(cpu_kernel, gpu_kernel): _event_mv_prob_normal_p = _define_event_mv_prob_normal_prim( cpu_kernel=_event_mv_prob_normal_cpu, gpu_kernel=_event_mv_prob_normal_gpu - ) + ) \ No newline at end of file diff --git a/brainpy/_src/math/object_transform/autograd.py b/brainpy/_src/math/object_transform/autograd.py index f5e091675..ad8a5ccf6 100644 --- a/brainpy/_src/math/object_transform/autograd.py +++ b/brainpy/_src/math/object_transform/autograd.py @@ -28,10 +28,8 @@ get_stack_cache, cache_stack) from .base import (BrainPyObject, ObjectTransform) -from .variables import (Variable, - VariableStack, - current_transform_number, - new_transform) +from .variables import (Variable, VariableStack) +from .tools import eval_shape __all__ = [ 'grad', # gradient of scalar function @@ -203,36 +201,21 @@ def __call__(self, *args, **kwargs): elif not self._eval_dyn_vars: # evaluate dynamical variables stack = get_stack_cache(self.target) if stack is None: - with new_transform(self): - with VariableStack() as stack: - if current_transform_number() > 1: - rets = self._transform( - [v.value for v in self._grad_vars], # variables for gradients - {}, # dynamical variables - *args, - **kwargs - ) - else: - rets = jax.eval_shape( - self._transform, - [v.value for v in self._grad_vars], # variables for gradients - {}, # dynamical variables - *args, - **kwargs - ) + with VariableStack() as stack: + rets = eval_shape(self._transform, + [v.value for v in self._grad_vars], # variables for gradients + {}, # dynamical variables + *args, + **kwargs) cache_stack(self.target, stack) - self._dyn_vars = stack - self._dyn_vars.remove_by_id(*[id(v) for v in self._grad_vars]) - self._eval_dyn_vars = True + self._dyn_vars = stack + self._dyn_vars.remove_by_id(*[id(v) for v in self._grad_vars]) + self._eval_dyn_vars = True - # if not the outermost transformation - if current_transform_number(): - return self._return(rets) - else: - self._dyn_vars = stack - self._dyn_vars.remove_by_id(*[id(v) for v in self._grad_vars]) - self._eval_dyn_vars = True + # if not the outermost transformation + if not stack.is_first_stack(): + return self._return(rets) rets = self._transform( [v.value for v in self._grad_vars], # variables for gradients diff --git a/brainpy/_src/math/object_transform/base.py b/brainpy/_src/math/object_transform/base.py index b21ed2af3..de64f94e7 100644 --- a/brainpy/_src/math/object_transform/base.py +++ b/brainpy/_src/math/object_transform/base.py @@ -12,9 +12,9 @@ import jax import numpy as np -from jax._src.tree_util import _registry from jax.tree_util import register_pytree_node_class +from brainpy._src.math import defaults from brainpy._src.math.modes import Mode from brainpy._src.math.ndarray import (Array, ) from brainpy._src.math.object_transform.collectors import (ArrayCollector, Collector) @@ -23,10 +23,10 @@ from brainpy._src.math.object_transform.variables import (Variable, VariableView, TrainVar, VarList, VarDict) from brainpy._src.math.sharding import BATCH_AXIS -from brainpy._src.math import defaults variable_ = None StateLoadResult = namedtuple('StateLoadResult', ['missing_keys', 'unexpected_keys']) +registered = set() __all__ = [ 'BrainPyObject', 'Base', 'FunAsObject', 'ObjectTransform', @@ -91,8 +91,9 @@ def __init__(self, name=None): super().__init__() if defaults.bp_object_as_pytree: - if self.__class__ not in _registry: + if self.__class__ not in registered: register_pytree_node_class(self.__class__) + registered.add(self.__class__) # check whether the object has a unique name. self._name = None @@ -101,11 +102,23 @@ def __init__(self, name=None): # Used to wrap the implicit variables # which cannot be accessed by self.xxx - self.implicit_vars: ArrayCollector = ArrayCollector() + self._implicit_vars: Optional[ArrayCollector] = None # Used to wrap the implicit children nodes # which cannot be accessed by self.xxx - self.implicit_nodes: Collector = Collector() + self._implicit_nodes: Optional[Collector] = None + + @property + def implicit_vars(self): + if self._implicit_vars is None: + self._implicit_vars = ArrayCollector() + return self._implicit_vars + + @property + def implicit_nodes(self): + if self._implicit_nodes is None: + self._implicit_nodes = Collector() + return self._implicit_nodes def setattr(self, key: str, value: Any) -> None: super().__setattr__(key, value) @@ -223,7 +236,7 @@ def tree_flatten(self): static_values = [] for k, v in self.__dict__.items(): if isinstance(v, (BrainPyObject, Variable, NodeList, NodeDict, VarList, VarDict)): - # if isinstance(v, (BrainPyObject, Variable)): + # if isinstance(v, (BrainPyObject, Variable)): dynamic_names.append(k) dynamic_values.append(v) else: diff --git a/brainpy/_src/math/object_transform/controls.py b/brainpy/_src/math/object_transform/controls.py index 032a0fab6..3edeb08e8 100644 --- a/brainpy/_src/math/object_transform/controls.py +++ b/brainpy/_src/math/object_transform/controls.py @@ -21,17 +21,12 @@ cache_stack ) from .tools import ( - evaluate_dyn_vars, + eval_shape, dynvar_deprecation, node_deprecation, abstract ) -from .variables import ( - Variable, - VariableStack, - new_transform, - current_transform_number, -) +from .variables import (Variable, VariableStack) __all__ = [ 'make_loop', @@ -542,15 +537,13 @@ def cond( node_deprecation(child_objs) dyn_vars = get_stack_cache((true_fun, false_fun)) - if not jax.config.jax_disable_jit: - if dyn_vars is None: - with new_transform('cond'): - dyn_vars1, rets = evaluate_dyn_vars(true_fun, *operands, use_eval_shape=current_transform_number() <= 1) - dyn_vars2, rets = evaluate_dyn_vars(false_fun, *operands, use_eval_shape=current_transform_number() <= 1) - dyn_vars = dyn_vars1 + dyn_vars2 - cache_stack((true_fun, false_fun), dyn_vars) - if current_transform_number() > 0: - return rets + if not jax.config.jax_disable_jit and dyn_vars is None: + with VariableStack() as dyn_vars: + rets = eval_shape(true_fun, *operands, with_stack=True)[1] + _ = eval_shape(false_fun, *operands, with_stack=True) + cache_stack((true_fun, false_fun), dyn_vars) + if not dyn_vars.is_first_stack(): + return rets dyn_vars = VariableStack() if dyn_vars is None else dyn_vars dyn_values, res = _get_cond_transform(dyn_vars, pred, true_fun, false_fun)(operands) for k in dyn_values.keys(): @@ -681,20 +674,16 @@ def ifelse( else: dyn_vars = get_stack_cache(tuple(branches)) if dyn_vars is None: - with new_transform('ifelse'): - with VariableStack() as dyn_vars: - if current_transform_number() > 1: - rets = [branch(*operands) for branch in branches] - else: - rets = [jax.eval_shape(branch, *operands) for branch in branches] - trees = [jax.tree_util.tree_structure(ret) for ret in rets] - if not _all_equal(trees): - msg = 'All returns in branches should have the same tree structure. But we got:\n' - for tree in trees: - msg += f'- {tree}\n' - raise TypeError(msg) + with VariableStack() as dyn_vars: + rets = [eval_shape(fun, *operands, with_stack=True)[1] for fun in branches] + trees = [jax.tree_util.tree_structure(ret) for ret in rets] + if not _all_equal(trees): + msg = 'All returns in branches should have the same tree structure. But we got:\n' + for tree in trees: + msg += f'- {tree}\n' + raise TypeError(msg) cache_stack(tuple(branches), dyn_vars) - if current_transform_number(): + if not dyn_vars.is_first_stack(): return rets[0] branches = [_cond_transform_fun(fun, dyn_vars) for fun in branches] @@ -880,28 +869,23 @@ def for_loop( if jit is None: # jax disable jit jit = not jax.config.jax_disable_jit - dyn_vars = get_stack_cache((body_fun, unroll_kwargs)) + stack = get_stack_cache((body_fun, unroll_kwargs)) if jit: - if dyn_vars is None: + if stack is None: + transform = _get_for_loop_transform(body_fun, VariableStack(), bar, progress_bar, + remat, reverse, unroll, unroll_kwargs) # TODO: better cache mechanism? - with new_transform('for_loop'): - with VariableStack() as dyn_vars: - transform = _get_for_loop_transform(body_fun, VariableStack(), bar, - progress_bar, remat, reverse, unroll, - unroll_kwargs) - if current_transform_number() > 1: - rets = transform(operands) - else: - rets = jax.eval_shape(transform, operands) - cache_stack((body_fun, unroll_kwargs), dyn_vars) # cache - if current_transform_number(): + with VariableStack() as stack: + rets = eval_shape(transform, operands) + cache_stack((body_fun, unroll_kwargs), stack) # cache + if not stack.is_first_stack(): return rets[1] del rets else: - dyn_vars = VariableStack() + stack = VariableStack() # TODO: cache mechanism? - transform = _get_for_loop_transform(body_fun, dyn_vars, bar, + transform = _get_for_loop_transform(body_fun, stack, bar, progress_bar, remat, reverse, unroll, unroll_kwargs) if jit: @@ -909,11 +893,11 @@ def for_loop( else: with jax.disable_jit(): dyn_vals, out_vals = transform(operands) - for key in dyn_vars.keys(): - dyn_vars[key]._value = dyn_vals[key] + for key in stack.keys(): + stack[key]._value = dyn_vals[key] if progress_bar: bar.close() - del dyn_vals, dyn_vars + del dyn_vals, stack return out_vals @@ -1011,26 +995,21 @@ def scan( num_total = min([op.shape[0] for op in jax.tree_util.tree_flatten(operands)[0]]) bar = tqdm(total=num_total) - dyn_vars = get_stack_cache(body_fun) - if not jax.config.jax_disable_jit: - if dyn_vars is None: - with new_transform('scan'): - with VariableStack() as dyn_vars: - transform = _get_scan_transform(body_fun, VariableStack(), bar, progress_bar, remat, reverse, unroll) - if current_transform_number() > 1: - rets = transform(init, operands) - else: - rets = jax.eval_shape(transform, init, operands) - cache_stack(body_fun, dyn_vars) # cache - if current_transform_number(): - return rets[0][1], rets[1] - del rets - - dyn_vars = VariableStack() if dyn_vars is None else dyn_vars - transform = _get_scan_transform(body_fun, dyn_vars, bar, progress_bar, remat, reverse, unroll) + stack = get_stack_cache(body_fun) + if not jax.config.jax_disable_jit and stack is None: + transform = _get_scan_transform(body_fun, VariableStack(), bar, progress_bar, remat, reverse, unroll) + with VariableStack() as stack: + rets = eval_shape(transform, init, operands) + cache_stack(body_fun, stack) # cache + if not stack.is_first_stack(): + return rets[0][1], rets[1] + del rets + + stack = VariableStack() if stack is None else stack + transform = _get_scan_transform(body_fun, stack, bar, progress_bar, remat, reverse, unroll) (dyn_vals, carry), out_vals = transform(init, operands) - for key in dyn_vars.keys(): - dyn_vars[key]._value = dyn_vals[key] + for key in stack.keys(): + stack[key]._value = dyn_vals[key] if progress_bar: bar.close() return carry, out_vals @@ -1129,7 +1108,6 @@ def while_loop( No longer need to provide ``child_objs``. This function is capable of automatically collecting the children objects used in the target ``func``. - """ dynvar_deprecation(dyn_vars) node_deprecation(child_objs) @@ -1137,18 +1115,16 @@ def while_loop( if not isinstance(operands, (list, tuple)): operands = (operands,) - dyn_vars = get_stack_cache((body_fun, cond_fun)) - if not jax.config.jax_disable_jit: - if dyn_vars is None: - with new_transform('while_loop'): - dyn_vars1, _ = evaluate_dyn_vars(cond_fun, *operands, use_eval_shape=current_transform_number() <= 1) - dyn_vars2, rets = evaluate_dyn_vars(body_fun, *operands, use_eval_shape=current_transform_number() <= 1) - dyn_vars = dyn_vars1 + dyn_vars2 - cache_stack((body_fun, cond_fun), dyn_vars) - if current_transform_number(): - return rets - dyn_vars = VariableStack() if dyn_vars is None else dyn_vars - dyn_values, out = _get_while_transform(cond_fun, body_fun, dyn_vars)(operands) - for k, v in dyn_vars.items(): + stack = get_stack_cache((body_fun, cond_fun)) + if not jax.config.jax_disable_jit and stack is None: + with VariableStack() as stack: + _ = eval_shape(cond_fun, *operands, with_stack=True) + rets = eval_shape(body_fun, *operands, with_stack=True)[1] + cache_stack((body_fun, cond_fun), stack) + if not stack.is_first_stack(): + return rets + stack = VariableStack() if stack is None else stack + dyn_values, out = _get_while_transform(cond_fun, body_fun, stack)(operands) + for k, v in stack.items(): v._value = dyn_values[k] return out diff --git a/brainpy/_src/math/object_transform/jit.py b/brainpy/_src/math/object_transform/jit.py index 7bb36f4e2..551a0949c 100644 --- a/brainpy/_src/math/object_transform/jit.py +++ b/brainpy/_src/math/object_transform/jit.py @@ -11,23 +11,15 @@ from typing import Callable, Union, Optional, Sequence, Dict, Any, Iterable import jax -from jax.sharding import Sharding from brainpy import tools, check -from .tools import (dynvar_deprecation, - node_deprecation, - evaluate_dyn_vars_with_cache, - evaluate_dyn_vars, - _partial_fun) from .base import BrainPyObject, ObjectTransform from .naming import get_stack_cache, cache_stack +from .tools import (dynvar_deprecation, + node_deprecation, + eval_shape) +from .variables import (Variable, VariableStack) from ..ndarray import Array -from .variables import (Variable, - VariableStack, - outermost_transform, - transform_stack, - current_transform_number, - new_transform) RandomState = None @@ -96,6 +88,17 @@ def _seq_of_str(static_argnames): return static_argnames +def _jit_call_take_care_of_rngs(transform, stack, *args, **kwargs): + # call the transformed function + rng_keys = stack.call_on_subset(_is_rng, _rng_split_key) + changes, out = transform(stack.dict_data(), *args, **kwargs) + for key, v in changes.items(): + stack[key]._value = v + for key, v in rng_keys.items(): + stack[key]._value = v + return out + + class JITTransform(ObjectTransform): """Object-oriented JIT transformation in BrainPy.""" @@ -142,25 +145,21 @@ def __init__( # OO transformation parameters self._transform = None self._dyn_vars = None - - def _transform_function(self, variable_data: Dict, *args, **kwargs): - for key, v in self._dyn_vars.items(): - v._value = variable_data[key] - out = self.fun(*args, **kwargs) - changes = self._dyn_vars.dict_data_of_subset(_is_not_rng) - return changes, out + # + # def _transform_function(self, variable_data: Dict, *args, **kwargs): + # for key, v in self._dyn_vars.items(): + # v._value = variable_data[key] + # out = self.fun(*args, **kwargs) + # changes = self._dyn_vars.dict_data_of_subset(_is_not_rng) + # return changes, out def _get_transform(self, *args, **kwargs): - with new_transform(self): - self._dyn_vars, rets = evaluate_dyn_vars( - self.fun, - *args, - static_argnums=self._static_argnums, - static_argnames=self._static_argnames, - use_eval_shape=current_transform_number() <= 1, - **kwargs - ) - + with VariableStack() as self._dyn_vars: + rets = eval_shape(self.fun, + *args, + **kwargs, + static_argnums=self._static_argnums, + static_argnames=self._static_argnames) # in_shardings if self._in_shardings is None: in_shardings = None @@ -186,18 +185,18 @@ def _get_transform(self, *args, **kwargs): _dyn_vars_sharing = get_shardings(self._dyn_vars.subset_by_not_instance(RandomState)) out_shardings = (_dyn_vars_sharing,) + out_shardings - # jit - self._transform = jax.jit( - self._transform_function, - static_argnums=jax.tree_util.tree_map(lambda a: a + 1, self._static_argnums), - static_argnames=self._static_argnames, - donate_argnums=self._donate_argnums, - inline=self._inline, - keep_unused=self._keep_unused, - abstracted_axes=self._abstracted_axes, - in_shardings=in_shardings, - out_shardings=out_shardings, - ) + # jit + self._transform = jax.jit( + _make_transform(self.fun, self._dyn_vars), + static_argnums=jax.tree_util.tree_map(lambda a: a + 1, self._static_argnums), + static_argnames=self._static_argnames, + donate_argnums=self._donate_argnums, + inline=self._inline, + keep_unused=self._keep_unused, + abstracted_axes=self._abstracted_axes, + in_shardings=in_shardings, + out_shardings=out_shardings, + ) return rets def __call__(self, *args, **kwargs): @@ -207,17 +206,11 @@ def __call__(self, *args, **kwargs): if self._transform is None: # initialize the transformation rets = self._get_transform(*args, **kwargs) # if not the outermost transformation - if current_transform_number(): + if not self._dyn_vars.is_first_stack(): return rets # call the transformed function - rng_keys = self._dyn_vars.call_on_subset(_is_rng, _rng_split_key) - changes, out = self._transform(self._dyn_vars.dict_data(), *args, **kwargs) - for key, v in changes.items(): - self._dyn_vars[key]._value = v - for key, v in rng_keys.items(): - self._dyn_vars[key]._value = v - return out + return _jit_call_take_care_of_rngs(self._transform, self._dyn_vars, *args, **kwargs) def __repr__(self): name = self.__class__.__name__ @@ -314,7 +307,7 @@ def jit( Examples -------- - You can JIT any object in which all dynamical variables are defined as :py:class:`~.Variable`. + You can JIT any object in which all dynamical variables are defined as :py:class:`~.Variable`. >>> import brainpy as bp >>> class Hello(bp.BrainPyObject): @@ -401,12 +394,12 @@ def cls_jit( **kwargs ) -> Callable: """Just-in-time compile a function and then the jitted function as the bound method for a class. - + Examples -------- - + This transformation can be put on any class function. For example, - + >>> import brainpy as bp >>> import brainpy.math as bm >>> @@ -415,7 +408,7 @@ def cls_jit( >>> super(SomeProgram, self).__init__() >>> self.a = bm.zeros(2) >>> self.b = bm.Variable(bm.ones(2)) - >>> + >>> >>> @bm.cls_jit(inline=True) >>> def __call__(self, *args, **kwargs): >>> a = bm.random.uniform(size=2) @@ -424,7 +417,7 @@ def cls_jit( >>> >>> program = SomeProgram() >>> program() - + Parameters ---------- {jit_pars} @@ -477,15 +470,8 @@ def call_fun(self, *args, **kwargs): cache = get_stack_cache(hash_v) # TODO: better cache mechanism if cache is None: fun2 = partial(fun, self) - - with jax.ensure_compile_time_eval(): - if len(static_argnums) or len(static_argnames): - fun3, args_, kwargs_ = _partial_fun(fun2, args, kwargs, static_argnums, static_argnames) - else: - args_, kwargs_, fun3 = args, kwargs, fun2 - with VariableStack() as stack: - _ = jax.eval_shape(fun3, *args_, **kwargs_) - del args_, kwargs_ + with VariableStack() as stack: + out = eval_shape(fun2, *args, **kwargs, static_argnums=static_argnums, static_argnames=static_argnames) _transform = jax.jit( _make_transform(fun2, stack), static_argnums=jax.tree_util.tree_map(lambda a: a + 1, static_argnums), @@ -497,25 +483,22 @@ def call_fun(self, *args, **kwargs): **jit_kwargs ) cache_stack(hash_v, (stack, _transform)) # cache "variable stack" and "transform function" - + if not stack.is_first_stack(): + return out else: stack, _transform = cache - del cache - out, changes = _transform(stack.dict_data(), *args, **kwargs) - for key, v in stack.items(): - v._value = changes[key] - return out + return _jit_call_take_care_of_rngs(_transform, stack, *args, **kwargs) return call_fun def _make_transform(fun, stack): @wraps(fun) - def _transform_function(variable_data: dict, *args, **kwargs): + def _transform_function(variable_data: Dict, *args, **kwargs): for key, v in stack.items(): v._value = variable_data[key] out = fun(*args, **kwargs) - changes = stack.dict_data() - return out, changes + changes = stack.dict_data_of_subset(_is_not_rng) + return changes, out return _transform_function diff --git a/brainpy/_src/math/object_transform/naming.py b/brainpy/_src/math/object_transform/naming.py index 6326929c4..1181e003b 100644 --- a/brainpy/_src/math/object_transform/naming.py +++ b/brainpy/_src/math/object_transform/naming.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -import gc + import warnings from brainpy import errors @@ -11,7 +11,6 @@ _name2id = dict() _typed_names = {} -_fun2stack = dict() def check_name_uniqueness(name, obj): @@ -42,7 +41,7 @@ def get_unique_name(type_: str): return name -def clear_name_cache(ignore_warn=False): +def clear_name_cache(ignore_warn=True): """Clear the cached names.""" _name2id.clear() _typed_names.clear() @@ -50,14 +49,17 @@ def clear_name_cache(ignore_warn=False): warnings.warn(f'All named models and their ids are cleared.', UserWarning) +_fun2stack = dict() + + def cache_stack(func, stack): _fun2stack[func] = stack def clear_stack_cache(): + """Clear the cached stack.""" for k in tuple(_fun2stack.keys()): del _fun2stack[k] - gc.collect() def get_stack_cache(func): diff --git a/brainpy/_src/math/object_transform/parallels.py b/brainpy/_src/math/object_transform/parallels.py deleted file mode 100644 index 1eddce048..000000000 --- a/brainpy/_src/math/object_transform/parallels.py +++ /dev/null @@ -1,460 +0,0 @@ -# -*- coding: utf-8 -*- - -""" -The parallel compilation tools for JAX backend. - -1. Vectorize compilation is implemented by the 'vmap()' function -2. Parallel compilation is implemented by the 'pmap()' function - -""" - - -import functools - -import jax -import jax.numpy as jnp -import numpy as np -from jax.interpreters.partial_eval import DynamicJaxprTracer -from jax.interpreters.partial_eval import JaxprTracer -from jax.interpreters.pxla import ShardedDeviceArray - -try: - from jax.errors import UnexpectedTracerError -except ImportError: - from jax.core import UnexpectedTracerError - -from brainpy import errors -from brainpy._src.math.random import RandomState -from brainpy._src.math.ndarray import Array -from brainpy.tools import change_func_name -from .base import BrainPyObject, ArrayCollector - -__all__ = [ - 'vmap', - 'pmap', -] - - -def _make_vmap(func, nonbatched_vars, batched_vars, in_axes, out_axes, - batch_idx, axis_name, f_name=None): - @functools.partial(jax.vmap, in_axes=in_axes, out_axes=out_axes, axis_name=axis_name) - def vmapped_func(nonbatched_data, batched_data, *args, **kwargs): - nonbatched_vars.assign(nonbatched_data) - batched_vars.assign(batched_data) - out = func(*args, **kwargs) - nonbatched_changes = nonbatched_vars.dict() - batched_changes = batched_vars.dict() - return nonbatched_changes, batched_changes, out - - def call(*args, **kwargs): - n = args[batch_idx[0]].shape[batch_idx[1]] - nonbatched_data = nonbatched_vars.dict() - batched_data = {key: val.split_keys(n) for key, val in batched_vars.items()} - try: - out, dyn_changes, rand_changes = vmapped_func(nonbatched_data, batched_data, *args, **kwargs) - except UnexpectedTracerError as e: - nonbatched_vars.assign(nonbatched_data) - batched_vars.assign(batched_data) - raise errors.JaxTracerError() from e - # for key, v in dyn_changes.items(): - # dyn_vars[key] = reduce_func(v) - # for key, v in rand_changes.items(): - # rand_vars[key] = reduce_func(v) - return out - - return change_func_name(name=f_name, f=call) if f_name else call - - -def vmap(func, dyn_vars=None, batched_vars=None, - in_axes=0, out_axes=0, axis_name=None, - reduce_func=None, auto_infer=False): - """Vectorization compilation for class objects. - - Vectorized compile a function or a module to run in parallel on a single device. - - Examples - -------- - - Parameters - ---------- - func : BrainPyObject, function, callable - The function or the module to compile. - dyn_vars : dict, sequence - batched_vars : dict - in_axes : optional, int, sequence of int - Specify which input array axes to map over. If each positional argument to - ``obj_or_func`` is an array, then ``in_axes`` can be an integer, a None, - or a tuple of integers and Nones with length equal to the number of - positional arguments to ``obj_or_func``. An integer or ``None`` - indicates which array axis to map over for all arguments (with ``None`` - indicating not to map any axis), and a tuple indicates which axis to map - for each corresponding positional argument. Axis integers must be in the - range ``[-ndim, ndim)`` for each array, where ``ndim`` is the number of - dimensions (axes) of the corresponding input array. - - If the positional arguments to ``obj_or_func`` are container types, the - corresponding element of ``in_axes`` can itself be a matching container, - so that distinct array axes can be mapped for different container - elements. ``in_axes`` must be a container tree prefix of the positional - argument tuple passed to ``obj_or_func``. - - At least one positional argument must have ``in_axes`` not None. The sizes - of the mapped input axes for all mapped positional arguments must all be - equal. - - Arguments passed as keywords are always mapped over their leading axis - (i.e. axis index 0). - out_axes : optional, int, tuple/list/dict - Indicate where the mapped axis should appear in the output. All outputs - with a mapped axis must have a non-None ``out_axes`` specification. Axis - integers must be in the range ``[-ndim, ndim)`` for each output array, - where ``ndim`` is the number of dimensions (axes) of the array returned - by the :func:`vmap`-ed function, which is one more than the number of - dimensions (axes) of the corresponding array returned by ``obj_or_func``. - axis_name : optional - - Returns - ------- - obj_or_func : Any - Batched/vectorized version of ``obj_or_func`` with arguments that correspond to - those of ``obj_or_func``, but with extra array axes at positions indicated by - ``in_axes``, and a return value that corresponds to that of ``obj_or_func``, but - with extra array axes at positions indicated by ``out_axes``. - - """ - # if isinstance(func, DynamicalSystem): - # if len(func.steps): # DynamicalSystem has step functions - # - # # dynamical variables - # dyn_vars = (dyn_vars or func.vars().unique()) - # dyn_vars, rand_vars = ArrayCollector(), ArrayCollector() - # for key, val in dyn_vars.items(): - # if isinstance(val, RandomState): - # rand_vars[key] = val - # else: - # dyn_vars[key] = val - # - # # in axes - # if in_axes is None: - # in_axes = {key: (None, 0) for key in func.steps.keys()} - # elif isinstance(in_axes, int): - # in_axes = {key: (None, 0, in_axes) for key in func.steps.keys()} - # elif isinstance(in_axes, (tuple, list)): - # in_axes = {key: (None, 0) + tuple(in_axes) for key in func.steps.keys()} - # elif isinstance(in_axes, dict): - # keys = list(func.steps.keys()) - # if keys[0] not in in_axes: - # in_axes = {key: (None, 0, in_axes) for key in keys} - # else: - # in_axes = {key: (None, 0) + tuple(in_axes[key]) for key in keys} - # assert isinstance(in_axes, dict) - # - # # batch size index - # batch_idx = {} - # for key, axes in in_axes.items(): - # for i, axis in enumerate(axes[2:]): - # if axis is not None: - # batch_idx[key] = (i, axis) - # break - # else: - # raise ValueError(f'Found no batch axis: {axes}.') - # - # # out axes - # if out_axes is None: - # out_axes = {key: 0 for key in func.steps.keys()} - # elif isinstance(out_axes, int): - # out_axes = {key: out_axes for key in func.steps.keys()} - # elif isinstance(out_axes, (tuple, list)): - # out_axes = {key: tuple(out_axes) + (0, 0) for key in func.steps.keys()} - # elif isinstance(out_axes, dict): - # keys = list(func.steps.keys()) - # if keys[0] not in out_axes: - # out_axes = {key: (out_axes, 0, 0) for key in keys} - # else: - # out_axes = {key: tuple(out_axes[key]) + (0, 0) for key in keys} - # assert isinstance(out_axes, dict) - # - # # reduce_func - # if reduce_func is None: - # reduce_func = lambda x: x.mean(axis=0) - # - # # vectorized map functions - # for key in func.steps.keys(): - # func.steps[key] = _make_vmap(func=func.steps[key], - # dyn_vars=dyn_vars, - # rand_vars=rand_vars, - # in_axes=in_axes[key], - # out_axes=out_axes[key], - # axis_name=axis_name, - # batch_idx=batch_idx[key], - # reduce_func=reduce_func, - # f_name=key) - # - # return func - - if callable(func): - if auto_infer: - if dyn_vars is not None: - dyn_vars = dyn_vars - elif isinstance(func, BrainPyObject): # BrainPyObject has '__call__()' implementation - dyn_vars = func.vars().unique() - elif hasattr(func, '__self__'): - if isinstance(func.__self__, BrainPyObject): - dyn_vars = func.__self__.vars().unique() - - if dyn_vars is None: - return jax.vmap(func, - in_axes=in_axes, - out_axes=out_axes, - axis_name=axis_name) - - else: - if isinstance(dyn_vars, Array): - dyn_vars = [dyn_vars] - if isinstance(dyn_vars, (tuple, list)): - dyn_vars = {f'_vmap_v{i}': v for i, v in enumerate(dyn_vars)} - assert isinstance(dyn_vars, dict) - - # dynamical variables - _dyn_vars, _rand_vars = ArrayCollector(), ArrayCollector() - for key, val in dyn_vars.items(): - if isinstance(val, RandomState): - _rand_vars[key] = val - else: - _dyn_vars[key] = val - - # in axes - if in_axes is None: - in_axes = (None, 0) - elif isinstance(in_axes, (int, dict)): - in_axes = (None, 0, in_axes) - elif isinstance(in_axes, (tuple, list)): - in_axes = (None, 0) + tuple(in_axes) - assert isinstance(in_axes, (tuple, list)) - - # batch size index - batch_idx = {} - for key, axes in batch_idx.items(): - for i, axis in enumerate(axes[2:]): - if axis is not None: - batch_idx[key] = (i, axis) - break - else: - raise ValueError(f'Found no batch axis: {axes}.') - - # out axes - if out_axes is None: - out_axes = 0 - elif isinstance(out_axes, (int, dict)): - out_axes = (out_axes, 0, 0) - elif isinstance(out_axes, (tuple, list)): - out_axes = tuple(out_axes) + (0, 0) - assert isinstance(out_axes, (list, tuple)) - - # reduce_func - if reduce_func is None: - reduce_func = lambda x: x.mean(axis=0) - - # jit function - return _make_vmap(func=func, - nonbatched_vars=_dyn_vars, - batched_vars=_rand_vars, - in_axes=in_axes, - out_axes=out_axes, - axis_name=axis_name, - batch_idx=batch_idx) - - else: - raise errors.BrainPyError(f'Only support instance of {BrainPyObject.__name__}, or a callable ' - f'function, but we got {type(func)}.') - - -def _device_reshape(x): - """Reshape an input array in order to broadcast to multiple devices.""" - num_device = jax.local_device_count() - - if not hasattr(x, 'ndim'): - raise errors.BrainPyError(f'Expected Array, got {type(x)}. If you are trying to pass a scalar to ' - f'parallel, first convert it to a Array, for example np.float(0.5)') - if x.ndim == 0: - return np.broadcast_to(x, [num_device]) - if x.shape[0] % num_device != 0: - raise errors.BrainPyError(f'Must be able to equally divide batch {x.shape} among ' - f'{num_device} devices, but does not go equally.') - return x.reshape((num_device, x.shape[0] // num_device) + x.shape[1:]) - - -def _make_pmap(func, dyn_vars, rand_vars, reduce_func, axis_name=None, in_axes=0, - out_axes=0, static_broadcasted_argnums=(), devices=None, backend=None, - axis_size=None, donate_argnums=(), global_arg_shapes=None, f_name=None): - @functools.partial(jax.pmap, in_axes=in_axes, out_axes=out_axes, axis_name=axis_name, - static_broadcasted_argnums=static_broadcasted_argnums, devices=devices, - backend=backend, axis_size=axis_size, donate_argnums=donate_argnums, - global_arg_shapes=global_arg_shapes) - def pmapped_func(dyn_data, rand_data, *args, **kwargs): - dyn_vars.assign(dyn_data) - rand_vars.assign(rand_data) - out = func(*args, **kwargs) - dyn_changes = dyn_vars.dict() - rand_changes = rand_vars.dict() - return out, dyn_changes, rand_changes - - def call(*args): - un_replicated = [k for k, v in dyn_vars.items() - if not isinstance(v.value, (ShardedDeviceArray, JaxprTracer, DynamicJaxprTracer))] - if len(un_replicated): - raise errors.BrainPyError(f'Some variables were not replicated: {un_replicated}.' - f'did you forget to call xx.replicate() on them?') - _args = [] - for i, x in enumerate(args): - if i + 2 in static_broadcasted_argnums: - _args.append(x) - else: - _args.append(jax.tree_map(_device_reshape, [x])[0]) - dyn_data = dyn_vars.dict() - rand_data = rand_vars.dict() - output, dyn_changes, rand_changes = pmapped_func(dyn_data, rand_data, *_args) - dyn_vars.assign(dyn_changes) - rand_vars.assign(rand_changes) - return jax.tree_map(reduce_func, output) - - return change_func_name(name=f_name, f=call) if f_name else call - - -def pmap(func, dyn_vars=None, axis_name=None, in_axes=0, out_axes=0, static_broadcasted_argnums=(), - devices=None, backend=None, axis_size=None, donate_argnums=(), global_arg_shapes=None, - reduce_func=None): - """Parallel compilation for class objects. - - Parallel compile a function or a module to run on multiple devices in parallel. - - Parameters - ---------- - func - axis_name - in_axes - out_axes - static_broadcasted_argnums - devices - backend - axis_size - donate_argnums - global_arg_shapes - - Returns - ------- - - - Examples - -------- - - - """ - - # if isinstance(func, DynamicalSystem): - # if len(func.steps): # DynamicalSystem has step functions - # - # # dynamical variables - # all_vars = (dyn_vars or func.vars().unique()) - # dyn_vars = ArrayCollector() - # rand_vars = ArrayCollector() - # for key, val in all_vars.items(): - # if isinstance(val, RandomState): - # rand_vars[key] = val - # else: - # dyn_vars[key] = val - # - # # reduce function - # if reduce_func is None: - # reduce_func = jnp.concatenate - # - # # static broadcast-ed arguments - # if static_broadcasted_argnums is None: - # static_broadcasted_argnums = () - # elif isinstance(static_broadcasted_argnums, int): - # static_broadcasted_argnums = (static_broadcasted_argnums + 2,) - # elif isinstance(static_broadcasted_argnums, (tuple, list)): - # static_broadcasted_argnums = tuple(argnum + 2 for argnum in static_broadcasted_argnums) - # assert isinstance(static_broadcasted_argnums, (tuple, list)) - # - # # jit functions - # for key in func.steps.keys(): - # step = func.steps[key] - # func.steps[key] = _make_pmap(dyn_vars=dyn_vars, - # rand_vars=rand_vars, - # func=step, - # axis_name=axis_name, - # in_axes=in_axes, - # out_axes=out_axes, - # static_broadcasted_argnums=static_broadcasted_argnums, - # devices=devices, - # backend=backend, - # axis_size=axis_size, - # donate_argnums=donate_argnums, - # global_arg_shapes=global_arg_shapes, - # reduce_func=reduce_func, - # f_name=key) - # return func - - if callable(func): - if dyn_vars is not None: - dyn_vars = dyn_vars - elif isinstance(func, BrainPyObject): # BrainPyObject has '__call__()' implementation - dyn_vars = func.vars().unique() - elif hasattr(func, '__self__'): - if isinstance(func.__self__, BrainPyObject): - dyn_vars = func.__self__.vars().unique() - - if dyn_vars is None: - return jax.pmap(func, - axis_name=axis_name, - in_axes=in_axes, - out_axes=out_axes, - static_broadcasted_argnums=static_broadcasted_argnums, - devices=devices, - backend=backend, - axis_size=axis_size, - donate_argnums=donate_argnums, - global_arg_shapes=global_arg_shapes) - else: - # dynamical variables - dyn_vars = ArrayCollector() - rand_vars = ArrayCollector() - for key, val in dyn_vars.items(): - if isinstance(val, RandomState): - rand_vars[key] = val - else: - dyn_vars[key] = val - - # static broadcast-ed arguments - if static_broadcasted_argnums is None: - static_broadcasted_argnums = () - elif isinstance(static_broadcasted_argnums, int): - static_broadcasted_argnums = (static_broadcasted_argnums + 2,) - elif isinstance(static_broadcasted_argnums, (tuple, list)): - static_broadcasted_argnums = tuple(argnum + 2 for argnum in static_broadcasted_argnums) - assert isinstance(static_broadcasted_argnums, (tuple, list)) - - # reduce function - if reduce_func is None: - reduce_func = jnp.concatenate - - # jit function - func.__call__ = _make_pmap(dyn_vars=dyn_vars, - rand_vars=rand_vars, - func=func, - axis_name=axis_name, - in_axes=in_axes, - out_axes=out_axes, - static_broadcasted_argnums=static_broadcasted_argnums, - devices=devices, - backend=backend, - axis_size=axis_size, - donate_argnums=donate_argnums, - global_arg_shapes=global_arg_shapes, - reduce_func=reduce_func) - return func - - else: - raise errors.BrainPyError(f'Only support instance of {BrainPyObject.__name__}, or a callable function, ' - f'but we got {type(func)}.') diff --git a/brainpy/_src/math/object_transform/tools.py b/brainpy/_src/math/object_transform/tools.py index 7b519590a..632c6d79e 100644 --- a/brainpy/_src/math/object_transform/tools.py +++ b/brainpy/_src/math/object_transform/tools.py @@ -132,19 +132,65 @@ def evaluate_dyn_vars_with_cache( return stack +def _partial_fun2( + fun: Callable, + args: tuple, + kwargs: dict, + static_argnums: Sequence[int] = (), + static_argnames: Sequence[str] = () +): + num_args = len(args) + + # arguments + static_args = dict() + dyn_args = [] + dyn_arg_ids = dict() + static_argnums = list(static_argnums) + dyn_i = 0 + for i in range(num_args): + if i in static_argnums: + static_argnums.remove(i) + static_args[i] = args[i] + else: + dyn_args.append(args[i]) + dyn_arg_ids[i] = dyn_i + dyn_i += 1 + if len(static_argnums) > 0: + raise ValueError(f"Invalid static_argnums: {static_argnums}") + + # keyword arguments + static_kwargs, dyn_kwargs = {}, {} + for k, arg in kwargs.items(): + if k in static_argnames: + static_kwargs[k] = arg + else: + dyn_kwargs[k] = arg + del args, kwargs, static_argnums, static_argnames + + @wraps(fun) + def new_fun(*dynargs, **dynkwargs): + return fun(*[dynargs[dyn_arg_ids[id_]] if id_ in dyn_arg_ids else static_args[id_] for id_ in range(num_args)], + **static_kwargs, + **dynkwargs) + + return new_fun, dyn_args, dyn_kwargs + + def eval_shape( fun: Callable, *args, static_argnums: Sequence[int] = (), static_argnames: Sequence[str] = (), + with_stack: bool = False, **kwargs ): """Compute the shape/dtype of ``fun`` without any FLOPs. Args: fun: The callable function. - *args: - **kwargs: + *args: The positional arguments. + **kwargs: The keyword arguments. + with_stack: Whether evaluate the function within a local variable stack. static_argnums: The static argument indices. static_argnames: The static argument names. @@ -153,21 +199,30 @@ def eval_shape( """ # reorganize the function if len(static_argnums) or len(static_argnames): - f2, args, kwargs = _partial_fun(fun, args, kwargs, - static_argnums=static_argnums, - static_argnames=static_argnames) + f2, args, kwargs = _partial_fun2(fun, args, kwargs, static_argnums=static_argnums, static_argnames=static_argnames) else: - f2, args, kwargs = fun, args, kwargs + f2 = fun # evaluate the function fun_in_eval_shape.append(fun) try: - with jax.ensure_compile_time_eval(): + if with_stack: with VariableStack() as stack: if len(fun_in_eval_shape) > 1: - returns = fun(*args, **kwargs) + returns = f2(*args, **kwargs) else: - returns = jax.eval_shape(fun, *args, **kwargs) + returns = jax.eval_shape(f2, *args, **kwargs) + else: + stack = None + if len(fun_in_eval_shape) > 1: + returns = f2(*args, **kwargs) + else: + returns = jax.eval_shape(f2, *args, **kwargs) finally: fun_in_eval_shape.pop() - return stack, returns + del f2 + if with_stack: + return stack, returns + else: + return returns + diff --git a/brainpy/_src/math/object_transform/variables.py b/brainpy/_src/math/object_transform/variables.py index 5014da0bf..b7babae8d 100644 --- a/brainpy/_src/math/object_transform/variables.py +++ b/brainpy/_src/math/object_transform/variables.py @@ -1,4 +1,3 @@ -from contextlib import contextmanager from typing import Optional, Any, List, Callable, Sequence, Union, Dict, Tuple import jax @@ -190,6 +189,14 @@ def remove_by_id(self, *ids, error_when_absent=False): remove_var_by_id = remove_by_id + @classmethod + def num_of_stack(self): + return len(var_stack_list) + + @classmethod + def is_first_stack(self): + return len(var_stack_list) == 0 + def __enter__(self) -> 'VariableStack': self.collect_values() # recollect the original value of each variable var_stack_list.append(self) @@ -210,42 +217,6 @@ def __add__(self, other: dict): var_stack_list: List[VariableStack] = [] -transform_stack: List[Callable] = [] - - -@contextmanager -def new_transform(transform: Any): - transform_stack.append(transform) - try: - yield - finally: - transform_stack.pop() - - -def outermost_stack(): - if len(var_stack_list): - return var_stack_list[0] - else: - return None - - -def outermost_transform(): - if len(transform_stack): - return transform_stack[0] - else: - return None - - -def current_transform_number(): - return len(transform_stack) - - -def _stack_add_read(var: 'Variable'): - pass - - -def _stack_add_write(var: 'Variable'): - pass @register_pytree_node_class diff --git a/brainpy/_src/math/op_register/taichi_aot_based.py b/brainpy/_src/math/op_register/taichi_aot_based.py index 595460ea0..2a8cb3b60 100644 --- a/brainpy/_src/math/op_register/taichi_aot_based.py +++ b/brainpy/_src/math/op_register/taichi_aot_based.py @@ -324,11 +324,12 @@ def _preprocess_kernel_call_gpu( kernel_path = os.path.join(kernels_aot_path, source_md5_encode) # other args + param_total_num = len(ins) + len(outs) in_out_num = [len(ins), len(outs)] - in_out_type_list = [0] * 8 - in_out_dim_count_list = [0] * 8 - in_out_elem_count_list = [0] * 8 - in_out_shape_list = [0] * 64 + in_out_type_list = [0] * param_total_num + in_out_dim_count_list = [0] * param_total_num + in_out_elem_count_list = [0] * param_total_num + in_out_shape_list = [0] * param_total_num * 8 for i, value in enumerate(ins.values()): in_out_type_list[i] = type_number_map[value[0]] diff --git a/brainpy/_src/tests/test_base_classes.py b/brainpy/_src/tests/test_base_classes.py index 9c095a30e..3534f0a48 100644 --- a/brainpy/_src/tests/test_base_classes.py +++ b/brainpy/_src/tests/test_base_classes.py @@ -3,6 +3,7 @@ import unittest import brainpy as bp +import brainpy.math as bm class TestDynamicalSystem(unittest.TestCase): @@ -17,4 +18,53 @@ def test_delay(self): runner = bp.DSRunner(net,) runner.run(10.) + bm.clear_buffer_memory() + + def test_receive_update_output(self): + def aft_update(inp): + assert inp is not None + + hh = bp.dyn.HH(1) + hh.add_aft_update('aft_update', aft_update) + bp.share.save(i=0, t=0.) + hh(1.) + + bm.clear_buffer_memory() + + def test_do_not_receive_update_output(self): + def aft_update(): + pass + + hh = bp.dyn.HH(1) + hh.add_aft_update('aft_update', bp.not_receive_update_output(aft_update)) + bp.share.save(i=0, t=0.) + hh(1.) + + bm.clear_buffer_memory() + + def test_not_receive_update_input(self): + def bef_update(): + pass + + hh = bp.dyn.HH(1) + hh.add_bef_update('bef_update', bef_update) + bp.share.save(i=0, t=0.) + hh(1.) + + bm.clear_buffer_memory() + + def test_receive_update_input(self): + def bef_update(inp): + assert inp is not None + + hh = bp.dyn.HH(1) + hh.add_bef_update('bef_update', bp.receive_update_input(bef_update)) + bp.share.save(i=0, t=0.) + hh(1.) + + bm.clear_buffer_memory() + + + + diff --git a/brainpy/_src/tests/test_delay.py b/brainpy/_src/tests/test_delay.py index 20d49937c..b7bd44ead 100644 --- a/brainpy/_src/tests/test_delay.py +++ b/brainpy/_src/tests/test_delay.py @@ -1,13 +1,15 @@ +import unittest + +import jax.numpy as jnp import brainpy as bp -import unittest class TestVarDelay(unittest.TestCase): def test_delay1(self): bp.math.random.seed() a = bp.math.Variable((10, 20)) - delay = bp.VarDelay(a,) + delay = bp.VarDelay(a, ) delay.register_entry('a', 1.) delay.register_entry('b', 2.) delay.register_entry('c', None) @@ -15,8 +17,44 @@ def test_delay1(self): delay.register_entry('c', 10.) bp.math.clear_buffer_memory() + def test_rotation_delay(self): + a = bp.math.Variable((1,)) + rotation_delay = bp.VarDelay(a) + t0 = 0. + t1, n1 = 1., 10 + t2, n2 = 2., 20 + + rotation_delay.register_entry('a', t0) + rotation_delay.register_entry('b', t1) + rotation_delay.register_entry('c', t2) + + print() + for i in range(100): + bp.share.save(i=i) + a.value = jnp.ones((1,)) * i + rotation_delay() + self.assertTrue(jnp.allclose(rotation_delay.at('a'), jnp.ones((1,)) * i)) + self.assertTrue(jnp.allclose(rotation_delay.at('b'), jnp.maximum(jnp.ones((1,)) * i - n1 + 1, 0.))) + self.assertTrue(jnp.allclose(rotation_delay.at('c'), jnp.maximum(jnp.ones((1,)) * i - n2 + 1, 0.))) + bp.math.clear_buffer_memory() - - - - + def test_concat_delay(self): + a = bp.math.Variable((1,)) + rotation_delay = bp.VarDelay(a, method='concat') + t0 = 0. + t1, n1 = 1., 10 + t2, n2 = 2., 20 + + rotation_delay.register_entry('a', t0) + rotation_delay.register_entry('b', t1) + rotation_delay.register_entry('c', t2) + + print() + for i in range(100): + bp.share.save(i=i) + a.value = jnp.ones((1,)) * i + rotation_delay() + self.assertTrue(jnp.allclose(rotation_delay.at('a'), jnp.ones((1,)) * i)) + self.assertTrue(jnp.allclose(rotation_delay.at('b'), jnp.maximum(jnp.ones((1,)) * i - n1 + 1, 0.))) + self.assertTrue(jnp.allclose(rotation_delay.at('c'), jnp.maximum(jnp.ones((1,)) * i - n2 + 1, 0.))) + bp.math.clear_buffer_memory() diff --git a/brainpy/_src/tests/test_mixin.py b/brainpy/_src/tests/test_mixin.py index 962b76cb9..e864fd647 100644 --- a/brainpy/_src/tests/test_mixin.py +++ b/brainpy/_src/tests/test_mixin.py @@ -42,7 +42,7 @@ class TestDelayRegister(unittest.TestCase): def test2(self): bp.share.save(i=0) lif = bp.dyn.Lif(10) - lif.register_local_delay('spike', 'a', 10.) + lif.register_local_delay('spike', 'a', delay_time=10.) data = lif.get_local_delay('spike', 'a') self.assertTrue(bm.allclose(data, bm.zeros(10))) diff --git a/brainpy/_src/tools/functions.py b/brainpy/_src/tools/functions.py new file mode 100644 index 000000000..cbc710dba --- /dev/null +++ b/brainpy/_src/tools/functions.py @@ -0,0 +1,192 @@ +import inspect +from functools import partial +from operator import attrgetter +from types import MethodType + +__all__ = [ + 'compose', 'pipe' +] + + +def identity(x): + """ Identity function. Return x + + >>> identity(3) + 3 + """ + return x + + +def instanceproperty(fget=None, fset=None, fdel=None, doc=None, classval=None): + """ Like @property, but returns ``classval`` when used as a class attribute + + >>> class MyClass(object): + ... '''The class docstring''' + ... @instanceproperty(classval=__doc__) + ... def __doc__(self): + ... return 'An object docstring' + ... @instanceproperty + ... def val(self): + ... return 42 + ... + >>> MyClass.__doc__ + 'The class docstring' + >>> MyClass.val is None + True + >>> obj = MyClass() + >>> obj.__doc__ + 'An object docstring' + >>> obj.val + 42 + """ + if fget is None: + return partial(instanceproperty, fset=fset, fdel=fdel, doc=doc, + classval=classval) + return InstanceProperty(fget=fget, fset=fset, fdel=fdel, doc=doc, + classval=classval) + + +class InstanceProperty(property): + """ Like @property, but returns ``classval`` when used as a class attribute + + Should not be used directly. Use ``instanceproperty`` instead. + """ + + def __init__(self, fget=None, fset=None, fdel=None, doc=None, + classval=None): + self.classval = classval + property.__init__(self, fget=fget, fset=fset, fdel=fdel, doc=doc) + + def __get__(self, obj, type=None): + if obj is None: + return self.classval + return property.__get__(self, obj, type) + + def __reduce__(self): + state = (self.fget, self.fset, self.fdel, self.__doc__, self.classval) + return InstanceProperty, state + + +class Compose(object): + """ A composition of functions + + See Also: + compose + """ + __slots__ = 'first', 'funcs' + + def __init__(self, funcs): + funcs = tuple(reversed(funcs)) + self.first = funcs[0] + self.funcs = funcs[1:] + + def __call__(self, *args, **kwargs): + ret = self.first(*args, **kwargs) + for f in self.funcs: + ret = f(ret) + return ret + + def __getstate__(self): + return self.first, self.funcs + + def __setstate__(self, state): + self.first, self.funcs = state + + @instanceproperty(classval=__doc__) + def __doc__(self): + def composed_doc(*fs): + """Generate a docstring for the composition of fs. + """ + if not fs: + # Argument name for the docstring. + return '*args, **kwargs' + + return '{f}({g})'.format(f=fs[0].__name__, g=composed_doc(*fs[1:])) + + try: + return ( + 'lambda *args, **kwargs: ' + + composed_doc(*reversed((self.first,) + self.funcs)) + ) + except AttributeError: + # One of our callables does not have a `__name__`, whatever. + return 'A composition of functions' + + @property + def __name__(self): + try: + return '_of_'.join( + (f.__name__ for f in reversed((self.first,) + self.funcs)) + ) + except AttributeError: + return type(self).__name__ + + def __repr__(self): + return '{.__class__.__name__}{!r}'.format( + self, tuple(reversed((self.first,) + self.funcs))) + + def __eq__(self, other): + if isinstance(other, Compose): + return other.first == self.first and other.funcs == self.funcs + return NotImplemented + + def __ne__(self, other): + equality = self.__eq__(other) + return NotImplemented if equality is NotImplemented else not equality + + def __hash__(self): + return hash(self.first) ^ hash(self.funcs) + + # Mimic the descriptor behavior of python functions. + # i.e. let Compose be called as a method when bound to a class. + # adapted from + # docs.python.org/3/howto/descriptor.html#functions-and-methods + def __get__(self, obj, objtype=None): + return self if obj is None else MethodType(self, obj) + + # introspection with Signature is only possible from py3.3+ + @instanceproperty + def __signature__(self): + base = inspect.signature(self.first) + last = inspect.signature(self.funcs[-1]) + return base.replace(return_annotation=last.return_annotation) + + __wrapped__ = instanceproperty(attrgetter('first')) + + +def compose(*funcs): + """ Compose functions to operate in series. + + Returns a function that applies other functions in sequence. + + Functions are applied from right to left so that + ``compose(f, g, h)(x, y)`` is the same as ``f(g(h(x, y)))``. + + If no arguments are provided, the identity function (f(x) = x) is returned. + + >>> inc = lambda i: i + 1 + >>> compose(str, inc)(3) + '4' + """ + if not funcs: + return identity + if len(funcs) == 1: + return funcs[0] + else: + return Compose(funcs) + + +def pipe(*funcs): + """ Pipe a value through a sequence of functions + + I.e. ``pipe(f, g, h)(data)`` is equivalent to ``h(g(f(data)))`` + + We think of the value as progressing through a pipe of several + transformations, much like pipes in UNIX + + + >>> double = lambda i: 2 * i + >>> pipe(double, str)(3) + '6' + """ + return compose(*reversed(funcs)) diff --git a/brainpy/_src/tools/tests/test_functions.py b/brainpy/_src/tools/tests/test_functions.py new file mode 100644 index 000000000..c285e561a --- /dev/null +++ b/brainpy/_src/tools/tests/test_functions.py @@ -0,0 +1,24 @@ + +import unittest + +import brainpy as bp +import brainpy.math as bm + + +class TestFunction(unittest.TestCase): + def test_compose(self): + f = lambda a: a + 1 + g = lambda a: a * 10 + fun1 = bp.tools.compose(f, g) + fun2 = bp.tools.pipe(g, f) + + arr = bm.random.randn(10) + r1 = fun1(arr) + r2 = fun2(arr) + groundtruth = f(g(arr)) + self.assertTrue(bm.allclose(r1, r2)) + self.assertTrue(bm.allclose(r1, groundtruth)) + bm.clear_buffer_memory() + + + diff --git a/brainpy/math/compat_pytorch.py b/brainpy/math/compat_pytorch.py index e4570f6fd..3b0c3f517 100644 --- a/brainpy/math/compat_pytorch.py +++ b/brainpy/math/compat_pytorch.py @@ -12,7 +12,7 @@ arccos as arccos, acosh as acosh, arccosh as arccosh, - add as add, + # add as add, addcdiv as addcdiv, addcmul as addcmul, angle as angle, diff --git a/brainpy/math/oo_transform.py b/brainpy/math/oo_transform.py index 548a987d0..a488e0742 100644 --- a/brainpy/math/oo_transform.py +++ b/brainpy/math/oo_transform.py @@ -58,4 +58,6 @@ from brainpy._src.math.object_transform.tools import ( eval_shape as eval_shape, ) - +from brainpy._src.math.object_transform.variables import ( + VariableStack as VariableStack, +) diff --git a/brainpy/tools.py b/brainpy/tools.py index 0f3a4c0ef..35e98f6d6 100644 --- a/brainpy/tools.py +++ b/brainpy/tools.py @@ -43,6 +43,10 @@ from brainpy._src.tools.install import ( jaxlib_install_info, ) +from brainpy._src.tools.functions import ( + compose as compose, + pipe as pipe, +) diff --git a/brainpylib-changelog.md b/brainpylib-changelog.md new file mode 100644 index 000000000..bce0ac138 --- /dev/null +++ b/brainpylib-changelog.md @@ -0,0 +1,64 @@ +# Release notes (``brainpylib``) + +## Version 0.3.0 + +- Support `brainpy>=2.5.0` +- Fix bugs on windows platform +- remove all customized C++ and CUDA operators + +## Version 0.2.8 + +- Support `brainpy>=2.5.0` +- Fix bugs that the DLL cannot be loaded correctly when windows does not have a c++ environment, + +## ~~Version 0.2.7(YANKED)~~ + +## Version 0.2.6 + +- Support `brainpy>=2.5.0` +- Fix bugs of taichi call function for single result + +## Version 0.2.5 + +- Add new taichi call function for single result on CPU backend + +## Version 0.2.4 + +- Add taichi customized operator call on arm64 backend + +## ~~Version 0.2.3(YANKED)~~ + +## Version 0.2.2 + +- Fix bugs of just-in-time connectivity operators on CPU device + +## Version 0.2.1 + +- Fix bugs of Taichi AOT call on GPU backend by ``cudaMemset()`` CUDA arrays + +## Version 0.2.0 + +- Add XLA custom call from [Taichi](https://github.com/taichi-dev/taichi) AOT (ahead of time) operators on both CPU and + GPU platforms + +## Version 0.0.5 + +- Support operator customization on GPU by ``numba`` + +## Version 0.0.4 + +- Support operator customization on CPU by ``numba`` + +## Version 0.0.3 + +- Support ``event_sum()`` operator on GPU +- Support ``event_prod()`` operator on CPU +- Support ``atomic_sum()`` operator on GPU +- Support ``atomic_prod()`` operator on CPU and GPU + +## Version 0.0.2 + +- Support ``event_sum()`` operator on CPU +- Support ``event_sum2()`` operator on CPU +- Support ``atomic_sum()`` operator on CPU + diff --git a/changelog.rst b/changelog.rst deleted file mode 100644 index c54357f8c..000000000 --- a/changelog.rst +++ /dev/null @@ -1,1083 +0,0 @@ -Release notes (brainpy) -####################### - - - - -.. note:: - - All history release notes please see `GitHub releases `_. - - - - -brainpy 2.2.x -************* - -BrainPy 2.2.x is a complete re-design of the framework, -tackling the shortcomings of brainpy 2.1.x generation, -effectively bringing it to research needs and standards. - - - -Version 2.2.1 (2022.09.09) -========================== - -This release fixes bugs found in the codebase and improves the usability and functions of BrainPy. - -Bug fixes -~~~~~~~~~~~~~~ - - -#. Fix the bug of operator customization in ``brainpy.math.XLACustomOp`` and ``brainpy.math.register_op``. Now, it supports operator customization by using NumPy and Numba interface. For instance, - -.. code-block:: python - - import brainpy.math as bm - - def abs_eval(events, indices, indptr, post_val, values): - return post_val - - def con_compute(outs, ins): - post_val = outs - events, indices, indptr, _, values = ins - for i in range(events.size): - if events[i]: - for j in range(indptr[i], indptr[i + 1]): - index = indices[j] - old_value = post_val[index] - post_val[index] = values + old_value - - event_sum = bm.XLACustomOp(eval_shape=abs_eval, con_compute=con_compute) - - -#. Fix the bug of ``brainpy.tools.DotDict``. Now, it is compatible with the transformations of JAX. For instance, - -.. code-block:: python - - import brainpy as bp - from jax import vmap - - @vmap - def multiple_run(I): - hh = bp.neurons.HH(1) - runner = bp.dyn.DSRunner(hh, inputs=('input', I), numpy_mon_after_run=False) - runner.run(100.) - return runner.mon - - mon = multiple_run(bp.math.arange(2, 10, 2)) - -New features -~~~~~~~~~~~~~~ - - -#. Add numpy operators ``brainpy.math.mat``\ , ``brainpy.math.matrix``\ , ``brainpy.math.asmatrix``. -#. Improve translation rules of brainpylib operators, improve its running speeds. -#. Support ``DSView`` of ``DynamicalSystem`` instance. Now, it supports defining models with a slice view of a DS instance. For example, - -.. code-block:: python - - import brainpy as bp - import brainpy.math as bm - - - class EINet_V2(bp.dyn.Network): - def __init__(self, scale=1.0, method='exp_auto'): - super(EINet_V2, self).__init__() - - # network size - num_exc = int(3200 * scale) - num_inh = int(800 * scale) - - # neurons - self.N = bp.neurons.LIF(num_exc + num_inh, - V_rest=-60., V_th=-50., V_reset=-60., tau=20., tau_ref=5., - method=method, V_initializer=bp.initialize.Normal(-55., 2.)) - - # synapses - we = 0.6 / scale # excitatory synaptic weight (voltage) - wi = 6.7 / scale # inhibitory synaptic weight - self.Esyn = bp.synapses.Exponential(pre=self.N[:num_exc], post=self.N, - conn=bp.connect.FixedProb(0.02), - g_max=we, tau=5., - output=bp.synouts.COBA(E=0.), - method=method) - self.Isyn = bp.synapses.Exponential(pre=self.N[num_exc:], post=self.N, - conn=bp.connect.FixedProb(0.02), - g_max=wi, tau=10., - output=bp.synouts.COBA(E=-80.), - method=method) - - net = EINet_V2(scale=1., method='exp_auto') - # simulation - runner = bp.dyn.DSRunner( - net, - monitors={'spikes': net.N.spike}, - inputs=[(net.N.input, 20.)] - ) - runner.run(100.) - - # visualization - bp.visualize.raster_plot(runner.mon.ts, runner.mon['spikes'], show=True) - - - - -Version 2.2.0 (2022.08.12) -========================== - - - -This release has provided important improvements for BrainPy, including usability, speed, functions, and others. - -Backwards Incompatible changes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - -1. ``brainpy.nn`` module is no longer supported and has been removed since version 2.2.0. Instead, users should use ``brainpy.train`` module for the training of BP algorithms, online learning, or offline learning algorithms, and ``brainpy.algorithms`` module for online / offline training algorithms. -2. The ``update()`` function for the model definition has been changed: - -.. code-block:: - - >>> # 2.1.x - >>> - >>> import brainpy as bp - >>> - >>> class SomeModel(bp.dyn.DynamicalSystem): - >>> def __init__(self, ): - >>> ...... - >>> def update(self, t, dt): - >>> pass - >>> # 2.2.x - >>> - >>> import brainpy as bp - >>> - >>> class SomeModel(bp.dyn.DynamicalSystem): - >>> def __init__(self, ): - >>> ...... - >>> def update(self, tdi): - >>> t, dt = tdi.t, tdi.dt - >>> pass - -where ``tdi`` can be defined with other names, like ``sha``\ , to represent the shared argument across modules. - -Deprecations -~~~~~~~~~~~~~~~~~~~~ - - -#. ``brainpy.dyn.xxx (neurons)`` and ``brainpy.dyn.xxx (synapse)`` are no longer supported. Please use ``brainpy.neurons``\ , ``brainpy.synapses`` modules. -#. ``brainpy.running.monitor`` has been removed. -#. ``brainpy.nn`` module has been removed. - -New features -~~~~~~~~~~~~~~~~~~~~ - - -1. ``brainpy.math.Variable`` receives a ``batch_axis`` setting to represent the batch axis of the data. - -.. code-block:: - - >>> import brainpy.math as bm - >>> a = bm.Variable(bm.zeros((1, 4, 5)), batch_axis=0) - >>> a.value = bm.zeros((2, 4, 5)) # success - >>> a.value = bm.zeros((1, 2, 5)) # failed - MathError: The shape of the original data is (2, 4, 5), while we got (1, 2, 5) with batch_axis=0. - - -2. ``brainpy.train`` provides ``brainpy.train.BPTT`` for back-propagation algorithms, ``brainpy.train.Onlinetrainer`` for online training algorithms, ``brainpy.train.OfflineTrainer`` for offline training algorithms. -3. ``brainpy.Base`` class supports ``_excluded_vars`` setting to ignore variables when retrieving variables by using ``Base.vars()`` method. - -.. code-block:: - - >>> class OurModel(bp.Base): - >>> _excluded_vars = ('a', 'b') - >>> def __init__(self): - >>> super(OurModel, self).__init__() - >>> self.a = bm.Variable(bm.zeros(10)) - >>> self.b = bm.Variable(bm.ones(20)) - >>> self.c = bm.Variable(bm.random.random(10)) - >>> - >>> model = OurModel() - >>> model.vars().keys() - dict_keys(['OurModel0.c']) - - -4. ``brainpy.analysis.SlowPointFinder`` supports directly analyzing an instance of ``brainpy.dyn.DynamicalSystem``. - -.. code-block:: - - >>> hh = bp.neurons.HH(1) - >>> finder = bp.analysis.SlowPointFinder(hh, target_vars={'V': hh.V, 'm': hh.m, 'h': hh.h, 'n': hh.n}) - - -5. ``brainpy.datasets`` supports MNIST, FashionMNIST, and other datasets. -6. Supports defining conductance-based neuron models``. - -.. code-block:: - - >>> class HH(bp.dyn.CondNeuGroup): - >>> def __init__(self, size): - >>> super(HH, self).__init__(size) - >>> - >>> self.INa = channels.INa_HH1952(size, ) - >>> self.IK = channels.IK_HH1952(size, ) - >>> self.IL = channels.IL(size, E=-54.387, g_max=0.03) - - -7. ``brainpy.layers`` module provides commonly used models for DNN and reservoir computing. -8. Support composable definition of synaptic models by using ``TwoEndConn``\ , ``SynOut``\ , ``SynSTP`` and ``SynLTP``. - -.. code-block:: - - >>> bp.synapses.Exponential(self.E, self.E, bp.conn.FixedProb(prob), - >>> g_max=0.03 / scale, tau=5, - >>> output=bp.synouts.COBA(E=0.), - >>> stp=bp.synplast.STD()) - - -9. Provide commonly used surrogate gradient function for spiking generation, including - - * ``brainpy.math.spike_with_sigmoid_grad`` - * ``brainpy.math.spike_with_linear_grad`` - * ``brainpy.math.spike_with_gaussian_grad`` - * ``brainpy.math.spike_with_mg_grad`` - -10. Provide shortcuts for GPU memory management via ``brainpy.math.disable_gpu_memory_preallocation()``\ , and ``brainpy.math.clear_buffer_memory()``. - -What's Changed -~~~~~~~~~~~~~~~~~~~~ - - -* fix `#207 `_\ : synapses update first, then neurons, finally delay variables by `@chaoming0625 `_ in `#219 `_ -* docs: add logos by `@ztqakita `_ in `#218 `_ -* Add the biological NMDA model by `@c-xy17 `_ in `#221 `_ -* docs: fix mathjax problem by `@ztqakita `_ in `#222 `_ -* Add the parameter R to the LIF model by `@c-xy17 `_ in `#224 `_ -* new version of brainpy: V2.2.0-rc1 by `@chaoming0625 `_ in `#226 `_ -* update training apis by `@chaoming0625 `_ in `#227 `_ -* Update quickstart and the analysis module by `@c-xy17 `_ in `#229 `_ -* Eseential updates for montors, analysis, losses, and examples by `@chaoming0625 `_ in `#230 `_ -* add numpy op tests by `@ztqakita `_ in `#231 `_ -* Integrated simulation, simulaton and analysis by `@chaoming0625 `_ in `#232 `_ -* update docs by `@chaoming0625 `_ in `#233 `_ -* unify ``brainpy.layers`` with other modules in ``brainpy.dyn`` by `@chaoming0625 `_ in `#234 `_ -* fix bugs by `@chaoming0625 `_ in `#235 `_ -* update apis, docs, examples and others by `@chaoming0625 `_ in `#236 `_ -* fixes by `@chaoming0625 `_ in `#237 `_ -* fix: add dtype promotion = standard by `@ztqakita `_ in `#239 `_ -* updates by `@chaoming0625 `_ in `#240 `_ -* update training docs by `@chaoming0625 `_ in `#241 `_ -* change doc path/organization by `@chaoming0625 `_ in `#242 `_ -* Update advanced docs by `@chaoming0625 `_ in `#243 `_ -* update quickstart docs & enable jit error checking by `@chaoming0625 `_ in `#244 `_ -* update apis and examples by `@chaoming0625 `_ in `#245 `_ -* update apis and tests by `@chaoming0625 `_ in `#246 `_ -* Docs update and bugs fixed by `@ztqakita `_ in `#247 `_ -* version 2.2.0 by `@chaoming0625 `_ in `#248 `_ -* add norm and pooling & fix bugs in operators by `@ztqakita `_ in `#249 `_ - -**Full Changelog**: `V2.1.12...V2.2.0 `_ - - - - -brainpy 2.1.x -************* - - - -Version 2.1.12 (2022.05.17) -=========================== - - -Highlights -~~~~~~~~~~ - -This release is excellent. We have made important improvements. - -1. We provide dozens of random sampling in NumPy which are not - supportted in JAX, such as ``brainpy.math.random.bernoulli``, - ``brainpy.math.random.lognormal``, ``brainpy.math.random.binomial``, - ``brainpy.math.random.chisquare``, ``brainpy.math.random.dirichlet``, - ``brainpy.math.random.geometric``, ``brainpy.math.random.f``, - ``brainpy.math.random.hypergeometric``, - ``brainpy.math.random.logseries``, - ``brainpy.math.random.multinomial``, - ``brainpy.math.random.multivariate_normal``, - ``brainpy.math.random.negative_binomial``, - ``brainpy.math.random.noncentral_chisquare``, - ``brainpy.math.random.noncentral_f``, ``brainpy.math.random.power``, - ``brainpy.math.random.rayleigh``, ``brainpy.math.random.triangular``, - ``brainpy.math.random.vonmises``, ``brainpy.math.random.wald``, - ``brainpy.math.random.weibull`` -2. make efficient checking on numerical values. Instead of direct - ``id_tap()`` checking which has large overhead, currently - ``brainpy.tools.check_erro_in_jit()`` is highly efficient. -3. Fix ``JaxArray`` operator errors on ``None`` -4. improve oo-to-function transformation speeds -5. ``io`` works: ``.save_states()`` and ``.load_states()`` - -What’s Changed -~~~~~~~~~~~~~~ - -- support dtype setting in array interchange functions by - [@chaoming0625](https://github.com/chaoming0625) in - `#209 `__ -- fix `#144 `__: - operations on None raise errors by - [@chaoming0625](https://github.com/chaoming0625) in - `#210 `__ -- add tests and new functions for random sampling by - [@c-xy17](https://github.com/c-xy17) in - `#213 `__ -- feat: fix ``io`` for brainpy.Base by - [@chaoming0625](https://github.com/chaoming0625) in - `#211 `__ -- update advanced tutorial documentation by - [@chaoming0625](https://github.com/chaoming0625) in - `#212 `__ -- fix `#149 `__ - (dozens of random samplings in NumPy) and fix JaxArray op errors by - [@chaoming0625](https://github.com/chaoming0625) in - `#216 `__ -- feat: efficient checking on numerical values by - [@chaoming0625](https://github.com/chaoming0625) in - `#217 `__ - -**Full Changelog**: -`V2.1.11...V2.1.12 `__ - - - -Version 2.1.11 (2022.05.15) -=========================== - - -What's Changed -~~~~~~~~~~~~~~ - -* fix: cross-correlation bug by `@ztqakita `_ in `#201 `_ -* update apis, test and docs of numpy ops by `@chaoming0625 `_ in `#202 `_ -* docs: add sphinx_book_theme by `@ztqakita `_ in `#203 `_ -* fix: add requirements-doc.txt by `@ztqakita `_ in `#204 `_ -* update control flow, integrators, operators, and docs by `@chaoming0625 `_ in `#205 `_ -* improve oo-to-function transformation speed by `@chaoming0625 `_ in `#208 `_ - -**Full Changelog**\ : `V2.1.10...V2.1.11 `_ - - - -Version 2.1.10 (2022.05.05) -=========================== - - -What's Changed -~~~~~~~~~~~~~~ - -* update control flow APIs and Docs by `@chaoming0625 `_ in `#192 `_ -* doc: update docs of dynamics simulation by `@chaoming0625 `_ in `#193 `_ -* fix `#125 `_: add channel models and two-compartment Pinsky-Rinzel model by `@chaoming0625 `_ in `#194 `_ -* JIT errors do not change Variable values by `@chaoming0625 `_ in `#195 `_ -* fix a bug in math.activations.py by `@c-xy17 `_ in `#196 `_ -* Functionalinaty improvements by `@chaoming0625 `_ in `#197 `_ -* update rate docs by `@chaoming0625 `_ in `#198 `_ -* update brainpy.dyn doc by `@chaoming0625 `_ in `#199 `_ - -**Full Changelog**\ : `V2.1.8...V2.1.10 `_ - - - -Version 2.1.8 (2022.04.26) -========================== - - -What's Changed -~~~~~~~~~~~~~~ - -* Fix `#120 `_ by `@chaoming0625 `_ in `#178 `_ -* feat: brainpy.Collector supports addition and subtraction by `@chaoming0625 `_ in `#179 `_ -* feat: delay variables support "indices" and "reset()" function by `@chaoming0625 `_ in `#180 `_ -* Support reset functions in neuron and synapse models by `@chaoming0625 `_ in `#181 `_ -* ``update()`` function on longer need ``_t`` and ``_dt`` by `@chaoming0625 `_ in `#183 `_ -* small updates by `@chaoming0625 `_ in `#188 `_ -* feat: easier control flows with ``brainpy.math.ifelse`` by `@chaoming0625 `_ in `#189 `_ -* feat: update delay couplings of ``DiffusiveCoupling`` and ``AdditiveCouping`` by `@chaoming0625 `_ in `#190 `_ -* update version and changelog by `@chaoming0625 `_ in `#191 `_ - -**Full Changelog**\ : `V2.1.7...V2.1.8 `_ - - - -Version 2.1.7 (2022.04.22) -========================== - - -What's Changed -~~~~~~~~~~~~~~ - -* synapse models support heterogeneuos weights by `@chaoming0625 `_ in `#170 `_ -* more efficient synapse implementation by `@chaoming0625 `_ in `#171 `_ -* fix input models in brainpy.dyn by `@chaoming0625 `_ in `#172 `_ -* fix: np array astype by `@ztqakita `_ in `#173 `_ -* update README: 'brain-py' to 'brainpy' by `@chaoming0625 `_ in `#174 `_ -* fix: fix the updating rules in the STP model by `@c-xy17 `_ in `#176 `_ -* Updates and fixes by `@chaoming0625 `_ in `#177 `_ - -**Full Changelog**\ : `V2.1.5...V2.1.7 `_ - - -Version 2.1.5 (2022.04.18) -========================== - - -What's Changed -~~~~~~~~~~~~~~ - -* ``brainpy.math.random.shuffle`` is numpy like by `@chaoming0625 `_ in `#153 `_ -* update LICENSE by `@chaoming0625 `_ in `#155 `_ -* docs: add m1 warning by `@ztqakita `_ in `#154 `_ -* compatible apis of 'brainpy.math' with those of 'jax.numpy' in most modules by `@chaoming0625 `_ in `#156 `_ -* Important updates by `@chaoming0625 `_ in `#157 `_ -* Updates by `@chaoming0625 `_ in `#159 `_ -* Add LayerNorm, GroupNorm, and InstanceNorm as nn_nodes in normalization.py by `@c-xy17 `_ in `#162 `_ -* feat: add conv & pooling nodes by `@ztqakita `_ in `#161 `_ -* fix: update setup.py by `@ztqakita `_ in `#163 `_ -* update setup.py by `@chaoming0625 `_ in `#165 `_ -* fix: change trigger condition by `@ztqakita `_ in `#166 `_ -* fix: add build_conn() function by `@ztqakita `_ in `#164 `_ -* update synapses by `@chaoming0625 `_ in `#167 `_ -* get the deserved name: brainpy by `@chaoming0625 `_ in `#168 `_ -* update tests by `@chaoming0625 `_ in `#169 `_ - -**Full Changelog**\ : `V2.1.4...V2.1.5 `_ - - - -Version 2.1.4 (2022.04.04) -========================== - - -What's Changed -~~~~~~~~~~~~~~ - -* fix doc parsing bug by `@chaoming0625 `_ in `#127 `_ -* Update overview_of_dynamic_model.ipynb by `@c-xy17 `_ in `#129 `_ -* Reorganization of ``brainpylib.custom_op`` and adding interface in ``brainpy.math`` by `@ztqakita `_ in `#128 `_ -* Fix: modify ``register_op`` and brainpy.math interface by `@ztqakita `_ in `#130 `_ -* new features about RNN training and delay differential equations by `@chaoming0625 `_ in `#132 `_ -* Fix `#123 `_\ : Add low-level operators docs and modify register_op by `@ztqakita `_ in `#134 `_ -* feat: add generate_changelog by `@ztqakita `_ in `#135 `_ -* fix `#133 `_\ , support batch size training with offline algorithms by `@chaoming0625 `_ in `#136 `_ -* fix `#84 `_\ : support online training algorithms by `@chaoming0625 `_ in `#137 `_ -* feat: add the batch normalization node by `@c-xy17 `_ in `#138 `_ -* fix: fix shape checking error by `@chaoming0625 `_ in `#139 `_ -* solve `#131 `_\ , support efficient synaptic computation for special connection types by `@chaoming0625 `_ in `#140 `_ -* feat: update the API and test for batch normalization by `@c-xy17 `_ in `#142 `_ -* Node is default trainable by `@chaoming0625 `_ in `#143 `_ -* Updates training apis and docs by `@chaoming0625 `_ in `#145 `_ -* fix: add dependencies and update version by `@ztqakita `_ in `#147 `_ -* update requirements by `@chaoming0625 `_ in `#146 `_ -* data pass of the Node is default SingleData by `@chaoming0625 `_ in `#148 `_ - -**Full Changelog**\ : `V2.1.3...V2.1.4 `_ - - - -Version 2.1.3 (2022.03.27) -========================== - -This release improves the functionality and usability of BrainPy. Core changes include - -* support customization of low-level operators by using Numba -* fix bugs - -What's Changed -~~~~~~~~~~~~~~ - -* Provide custom operators written in numba for jax jit by `@ztqakita `_ in `#122 `_ -* fix DOGDecay bugs, add more features by `@chaoming0625 `_ in `#124 `_ -* fix bugs by `@chaoming0625 `_ in `#126 `_ - -**Full Changelog** : `V2.1.2...V2.1.3 `_ - - - - -Version 2.1.2 (2022.03.23) -========================== - -This release improves the functionality and usability of BrainPy. Core changes include - -- support rate-based whole-brain modeling -- add more neuron models, including rate neurons/synapses -- support Python 3.10 -- improve delays etc. APIs - - -What's Changed -~~~~~~~~~~~~~~ - -* fix matplotlib dependency on "brainpy.analysis" module by `@chaoming0625 `_ in `#110 `_ -* Sync master to brainpy-2.x branch by `@ztqakita `_ in `#111 `_ -* add py3.6 test & delete multiple macos env by `@ztqakita `_ in `#112 `_ -* Modify ci by `@ztqakita `_ in `#113 `_ -* Add py3.10 test by `@ztqakita `_ in `#115 `_ -* update python version by `@chaoming0625 `_ in `#114 `_ -* add brainpylib mac py3.10 by `@ztqakita `_ in `#116 `_ -* Enhance measure/input/brainpylib by `@chaoming0625 `_ in `#117 `_ -* fix `#105 `_\ : Add customize connections docs by `@ztqakita `_ in `#118 `_ -* fix bugs by `@chaoming0625 `_ in `#119 `_ -* Whole brain modeling by `@chaoming0625 `_ in `#121 `_ - -**Full Changelog**: `V2.1.1...V2.1.2 `_ - - -Version 2.1.1 (2022.03.18) -========================== - -This release continues to update the functionality of BrainPy. Core changes include - -- numerical solvers for fractional differential equations -- more standard ``brainpy.nn`` interfaces - - -New Features -~~~~~~~~~~~~ - -- Numerical solvers for fractional differential equations - - ``brainpy.fde.CaputoEuler`` - - ``brainpy.fde.CaputoL1Schema`` - - ``brainpy.fde.GLShortMemory`` -- Fractional neuron models - - ``brainpy.dyn.FractionalFHR`` - - ``brainpy.dyn.FractionalIzhikevich`` -- support ``shared_kwargs`` in `RNNTrainer` and `RNNRunner` - - -Version 2.1.0 (2022.03.14) -========================== - - -Highlights -~~~~~~~~~~ - -We are excited to announce the release of BrainPy 2.1.0. This release is composed of nearly -270 commits since 2.0.2, made by `Chaoming Wang `_, -`Xiaoyu Chen `_, and `Tianqiu Zhang `_ . - -BrainPy 2.1.0 updates are focused on improving usability, functionality, and stability of BrainPy. -Highlights of version 2.1.0 include: - -- New module ``brainpy.dyn`` for dynamics building and simulation. It is composed of many - neuron models, synapse models, and others. -- New module ``brainpy.nn`` for neural network building and training. It supports to - define reservoir models, artificial neural networks, ridge regression training, - and back-propagation through time training. -- New module ``brainpy.datasets`` for convenient dataset construction and initialization. -- New module ``brainpy.integrators.dde`` for numerical integration of delay differential equations. -- Add more numpy-like operators in ``brainpy.math`` module. -- Add automatic continuous integration on Linux, Windows, and MacOS platforms. -- Fully update brainpy documentation. -- Fix bugs on ``brainpy.analysis`` and ``brainpy.math.autograd`` - - -Incompatible changes -~~~~~~~~~~~~~~~~~~~~ - -- Remove ``brainpy.math.numpy`` module. -- Remove numba requirements -- Remove matplotlib requirements -- Remove `steps` in ``brainpy.dyn.DynamicalSystem`` -- Remove travis CI - - -New Features -~~~~~~~~~~~~ - -- ``brainpy.ddeint`` for numerical integration of delay differential equations, - the supported methods include: - - Euler - - MidPoint - - Heun2 - - Ralston2 - - RK2 - - RK3 - - Heun3 - - Ralston3 - - SSPRK3 - - RK4 - - Ralston4 - - RK4Rule38 -- set default int/float/complex types - - ``brainpy.math.set_dfloat()`` - - ``brainpy.math.set_dint()`` - - ``brainpy.math.set_dcomplex()`` -- Delay variables - - ``brainpy.math.FixedLenDelay`` - - ``brainpy.math.NeutralDelay`` -- Dedicated operators - - ``brainpy.math.sparse_matmul()`` -- More numpy-like operators -- Neural network building ``brainpy.nn`` -- Dynamics model building and simulation ``brainpy.dyn`` - - -Version 2.0.2 (2022.02.11) -========================== - -There are important updates by `Chaoming Wang `_ -in BrainPy 2.0.2. - -- provide ``pre2post_event_prod`` operator -- support array creation from a list/tuple of JaxArray in ``brainpy.math.asarray`` and ``brainpy.math.array`` -- update ``brainpy.ConstantDelay``, add ``.latest`` and ``.oldest`` attributes -- add ``brainpy.IntegratorRunner`` support for efficient simulation of brainpy integrators -- support auto finding of RandomState when JIT SDE integrators -- fix bugs in SDE ``exponential_euler`` method -- move ``parallel`` running APIs into ``brainpy.simulation`` -- add ``brainpy.math.syn2post_mean``, ``brainpy.math.syn2post_softmax``, - ``brainpy.math.pre2post_mean`` and ``brainpy.math.pre2post_softmax`` operators - - - -Version 2.0.1 (2022.01.31) -========================== - -Today we release BrainPy 2.0.1. This release is composed of over -70 commits since 2.0.0, made by `Chaoming Wang `_, -`Xiaoyu Chen `_, and -`Tianqiu Zhang `_ . - -BrainPy 2.0.0 updates are focused on improving documentation and operators. -Core changes include: - -- Improve ``brainpylib`` operators -- Complete documentation for programming system -- Add more numpy APIs -- Add ``jaxfwd`` in autograd module -- And other changes - - -Version 2.0.0.1 (2022.01.05) -============================ - -- Add progress bar in ``brainpy.StructRunner`` - - -Version 2.0.0 (2021.12.31) -========================== - -Start a new version of BrainPy. - -Highlight -~~~~~~~~~ - -We are excited to announce the release of BrainPy 2.0.0. This release is composed of over -260 commits since 1.1.7, made by `Chaoming Wang `_, -`Xiaoyu Chen `_, and `Tianqiu Zhang `_ . - -BrainPy 2.0.0 updates are focused on improving performance, usability and consistence of BrainPy. -All the computations are migrated into JAX. Model ``building``, ``simulation``, ``training`` -and ``analysis`` are all based on JAX. Highlights of version 2.0.0 include: - -- `brainpylib `_ are provided to dedicated operators for - brain dynamics programming -- Connection APIs in ``brainpy.conn`` module are more efficient. -- Update analysis tools for low-dimensional and high-dimensional systems in ``brainpy.analysis`` module. -- Support more general Exponential Euler methods based on automatic differentiation. -- Improve the usability and consistence of ``brainpy.math`` module. -- Remove JIT compilation based on Numba. -- Separate brain building with brain simulation. - - -Incompatible changes -~~~~~~~~~~~~~~~~~~~~ - -- remove ``brainpy.math.use_backend()`` -- remove ``brainpy.math.numpy`` module -- no longer support ``.run()`` in ``brainpy.DynamicalSystem`` (see New Features) -- remove ``brainpy.analysis.PhasePlane`` (see New Features) -- remove ``brainpy.analysis.Bifurcation`` (see New Features) -- remove ``brainpy.analysis.FastSlowBifurcation`` (see New Features) - - -New Features -~~~~~~~~~~~~ - -- Exponential Euler method based on automatic differentiation - - ``brainpy.ode.ExpEulerAuto`` -- Numerical optimization based low-dimensional analyzers: - - ``brainpy.analysis.PhasePlane1D`` - - ``brainpy.analysis.PhasePlane2D`` - - ``brainpy.analysis.Bifurcation1D`` - - ``brainpy.analysis.Bifurcation2D`` - - ``brainpy.analysis.FastSlow1D`` - - ``brainpy.analysis.FastSlow2D`` -- Numerical optimization based high-dimensional analyzer: - - ``brainpy.analysis.SlowPointFinder`` -- Dedicated operators in ``brainpy.math`` module: - - ``brainpy.math.pre2post_event_sum`` - - ``brainpy.math.pre2post_sum`` - - ``brainpy.math.pre2post_prod`` - - ``brainpy.math.pre2post_max`` - - ``brainpy.math.pre2post_min`` - - ``brainpy.math.pre2syn`` - - ``brainpy.math.syn2post`` - - ``brainpy.math.syn2post_prod`` - - ``brainpy.math.syn2post_max`` - - ``brainpy.math.syn2post_min`` -- Conversion APIs in ``brainpy.math`` module: - - ``brainpy.math.as_device_array()`` - - ``brainpy.math.as_variable()`` - - ``brainpy.math.as_jaxarray()`` -- New autograd APIs in ``brainpy.math`` module: - - ``brainpy.math.vector_grad()`` -- Simulation runners: - - ``brainpy.ReportRunner`` - - ``brainpy.StructRunner`` - - ``brainpy.NumpyRunner`` -- Commonly used models in ``brainpy.models`` module - - ``brainpy.models.LIF`` - - ``brainpy.models.Izhikevich`` - - ``brainpy.models.AdExIF`` - - ``brainpy.models.SpikeTimeInput`` - - ``brainpy.models.PoissonInput`` - - ``brainpy.models.DeltaSynapse`` - - ``brainpy.models.ExpCUBA`` - - ``brainpy.models.ExpCOBA`` - - ``brainpy.models.AMPA`` - - ``brainpy.models.GABAa`` -- Naming cache clean: ``brainpy.clear_name_cache`` -- add safe in-place operations of ``update()`` method and ``.value`` assignment for JaxArray - - -Documentation -~~~~~~~~~~~~~ - -- Complete tutorials for quickstart -- Complete tutorials for dynamics building -- Complete tutorials for dynamics simulation -- Complete tutorials for dynamics training -- Complete tutorials for dynamics analysis -- Complete tutorials for API documentation - - -brainpy 1.1.x -************* - - -If you are using ``brainpy==1.x``, you can find *documentation*, *examples*, and *models* through the following links: - -- **Documentation:** https://brainpy.readthedocs.io/en/brainpy-1.x/ -- **Examples from papers**: https://brainpy-examples.readthedocs.io/en/brainpy-1.x/ -- **Canonical brain models**: https://brainmodels.readthedocs.io/en/brainpy-1.x/ - - -Version 1.1.7 (2021.12.13) -========================== - -- fix bugs on ``numpy_array()`` conversion in `brainpy.math.utils` module - - -Version 1.1.5 (2021.11.17) -========================== - -**API changes:** - -- fix bugs on ndarray import in `brainpy.base.function.py` -- convenient 'get_param' interface `brainpy.simulation.layers` -- add more weight initialization methods - -**Doc changes:** - -- add more examples in README - - -Version 1.1.4 -============= - -**API changes:** - -- add ``.struct_run()`` in DynamicalSystem -- add ``numpy_array()`` conversion in `brainpy.math.utils` module -- add ``Adagrad``, ``Adadelta``, ``RMSProp`` optimizers -- remove `setting` methods in `brainpy.math.jax` module -- remove import jax in `brainpy.__init__.py` and enable jax setting, including - - - ``enable_x64()`` - - ``set_platform()`` - - ``set_host_device_count()`` -- enable ``b=None`` as no bias in `brainpy.simulation.layers` -- set `int_` and `float_` as default 32 bits -- remove ``dtype`` setting in Initializer constructor - -**Doc changes:** - -- add ``optimizer`` in "Math Foundation" -- add ``dynamics training`` docs -- improve others - - -Version 1.1.3 -============= - -- fix bugs of JAX parallel API imports -- fix bugs of `post_slice` structure construction -- update docs - - -Version 1.1.2 -============= - -- add ``pre2syn`` and ``syn2post`` operators -- add `verbose` and `check` option to ``Base.load_states()`` -- fix bugs on JIT DynamicalSystem (numpy backend) - - -Version 1.1.1 -============= - -- fix bugs on symbolic analysis: model trajectory -- change `absolute` access in the variable saving and loading to the `relative` access -- add UnexpectedTracerError hints in JAX transformation functions - - -Version 1.1.0 (2021.11.08) -========================== - -This package releases a new version of BrainPy. - -Highlights of core changes: - -``math`` module -~~~~~~~~~~~~~~~ - -- support numpy backend -- support JAX backend -- support ``jit``, ``vmap`` and ``pmap`` on class objects on JAX backend -- support ``grad``, ``jacobian``, ``hessian`` on class objects on JAX backend -- support ``make_loop``, ``make_while``, and ``make_cond`` on JAX backend -- support ``jit`` (based on numba) on class objects on numpy backend -- unified numpy-like ndarray operation APIs -- numpy-like random sampling APIs -- FFT functions -- gradient descent optimizers -- activation functions -- loss function -- backend settings - - -``base`` module -~~~~~~~~~~~~~~~ - -- ``Base`` for whole Version ecosystem -- ``Function`` to wrap functions -- ``Collector`` and ``TensorCollector`` to collect variables, integrators, nodes and others - - -``integrators`` module -~~~~~~~~~~~~~~~~~~~~~~ - -- class integrators for ODE numerical methods -- class integrators for SDE numerical methods - -``simulation`` module -~~~~~~~~~~~~~~~~~~~~~ - -- support modular and composable programming -- support multi-scale modeling -- support large-scale modeling -- support simulation on GPUs -- fix bugs on ``firing_rate()`` -- remove ``_i`` in ``update()`` function, replace ``_i`` with ``_dt``, - meaning the dynamic system has the canonic equation form - of :math:`dx/dt = f(x, t, dt)` -- reimplement the ``input_step`` and ``monitor_step`` in a more intuitive way -- support to set `dt` in the single object level (i.e., single instance of DynamicSystem) -- common used DNN layers -- weight initializations -- refine synaptic connections - - -brainpy 1.0.x -************* - -Version 1.0.3 (2021.08.18) -========================== - -Fix bugs on - -- firing rate measurement -- stability analysis - - -Version 1.0.2 -============= - -This release continues to improve the user-friendliness. - -Highlights of core changes: - -* Remove support for Numba-CUDA backend -* Super initialization `super(XXX, self).__init__()` can be done at anywhere - (not required to add at the bottom of the `__init__()` function). -* Add the output message of the step function running error. -* More powerful support for Monitoring -* More powerful support for running order scheduling -* Remove `unsqueeze()` and `squeeze()` operations in ``brainpy.ops`` -* Add `reshape()` operation in ``brainpy.ops`` -* Improve docs for numerical solvers -* Improve tests for numerical solvers -* Add keywords checking in ODE numerical solvers -* Add more unified operations in brainpy.ops -* Support "@every" in steps and monitor functions -* Fix ODE solver bugs for class bounded function -* Add build phase in Monitor - - -Version 1.0.1 -============= - -- Fix bugs - - -Version 1.0.0 -============= - -- **NEW VERSION OF BRAINPY** -- Change the coding style into the object-oriented programming -- Systematically improve the documentation - - -brainpy 0.x -*********** - -Version 0.3.5 -============= - -- Add 'timeout' in sympy solver in neuron dynamics analysis -- Reconstruct and generalize phase plane analysis -- Generalize the repeat mode of ``Network`` to different running duration between two runs -- Update benchmarks -- Update detailed documentation - - -Version 0.3.1 -============= - -- Add a more flexible way for NeuState/SynState initialization -- Fix bugs of "is_multi_return" -- Add "hand_overs", "requires" and "satisfies". -- Update documentation -- Auto-transform `range` to `numba.prange` -- Support `_obj_i`, `_pre_i`, `_post_i` for more flexible operation in scalar-based models - - - -Version 0.3.0 -============= - -Computation API -~~~~~~~~~~~~~~~ - -- Rename "brainpy.numpy" to "brainpy.backend" -- Delete "pytorch", "tensorflow" backends -- Add "numba" requirement -- Add GPU support - -Profile setting -~~~~~~~~~~~~~~~ - -- Delete "backend" profile setting, add "jit" - -Core systems -~~~~~~~~~~~~ - -- Delete "autopepe8" requirement -- Delete the format code prefix -- Change keywords "_t_, _dt_, _i_" to "_t, _dt, _i" -- Change the "ST" declaration out of "requires" -- Add "repeat" mode run in Network -- Change "vector-based" to "mode" in NeuType and SynType definition - -Package installation -~~~~~~~~~~~~~~~~~~~~ - -- Remove "pypi" installation, installation now only rely on "conda" - - - -Version 0.2.4 -============= - -API changes -~~~~~~~~~~~ - -- Fix bugs - - -Version 0.2.3 -============= - -API changes -~~~~~~~~~~~ - -- Add "animate_1D" in ``visualization`` module -- Add "PoissonInput", "SpikeTimeInput" and "FreqInput" in ``inputs`` module -- Update phase_portrait_analyzer.py - - -Models and examples -~~~~~~~~~~~~~~~~~~~ - -- Add CANN examples - - -Version 0.2.2 -============= - -API changes -~~~~~~~~~~~ - -- Redesign visualization -- Redesign connectivity -- Update docs - - -Version 0.2.1 -============= - -API changes -~~~~~~~~~~~ - -- Fix bugs in `numba import` -- Fix bugs in `numpy` mode with `scalar` model - - -Version 0.2.0 -============= - -API changes -~~~~~~~~~~~ - -- For computation: ``numpy``, ``numba`` -- For model definition: ``NeuType``, ``SynConn`` -- For model running: ``Network``, ``NeuGroup``, ``SynConn``, ``Runner`` -- For numerical integration: ``integrate``, ``Integrator``, ``DiffEquation`` -- For connectivity: ``One2One``, ``All2All``, ``GridFour``, ``grid_four``, - ``GridEight``, ``grid_eight``, ``GridN``, ``FixedPostNum``, ``FixedPreNum``, - ``FixedProb``, ``GaussianProb``, ``GaussianWeight``, ``DOG`` -- For visualization: ``plot_value``, ``plot_potential``, ``plot_raster``, - ``animation_potential`` -- For measurement: ``cross_correlation``, ``voltage_fluctuation``, - ``raster_plot``, ``firing_rate`` -- For inputs: ``constant_current``, ``spike_current``, ``ramp_current``. - - -Models and examples -~~~~~~~~~~~~~~~~~~~ - -- Neuron models: ``HH model``, ``LIF model``, ``Izhikevich model`` -- Synapse models: ``AMPA``, ``GABA``, ``NMDA``, ``STP``, ``GapJunction`` -- Network models: ``gamma oscillation`` - diff --git a/docs/advanced_tutorials.rst b/docs/advanced_tutorials.rst index 5c8cba0fd..0b78315ab 100644 --- a/docs/advanced_tutorials.rst +++ b/docs/advanced_tutorials.rst @@ -3,13 +3,52 @@ Advanced Tutorials This section contains tutorials that illustrate more advanced features of BrainPy. +Advanced Math +------------- .. toctree:: - :maxdepth: 2 + :maxdepth: 1 + + tutorial_advanced/compilation.ipynb + tutorial_advanced/differentiation.ipynb + + +Interoperation +-------------- + +.. toctree:: + :maxdepth: 1 + + tutorial_advanced/integrate_flax_into_brainpy.ipynb + tutorial_advanced/integrate_bp_lif_into_flax.ipynb + tutorial_advanced/integrate_bp_convlstm_into_flax.ipynb + + +Brain Dynamics Dedicated Operators +---------------------------------- + +.. toctree:: + :maxdepth: 1 + + tutorial_advanced/operator_custom_with_numba.ipynb + tutorial_advanced/operator_custom_with_taichi.ipynb + + +Developer Guides +---------------- + +.. toctree:: + :maxdepth: 1 + + tutorial_advanced/contributing.md + + +Others +------ + +.. toctree:: + :maxdepth: 1 + + tutorial_advanced/advanced_lowdim_analysis.ipynb - tutorial_advanced/1_advanced_math.rst - tutorial_advanced/2_interoperation.rst - tutorial_advanced/3_dedicated_operators.rst - tutorial_advanced/4_developer_guides.rst - tutorial_advanced/5_others.rst diff --git a/docs/api.rst b/docs/api.rst index 076ce48c9..4e0bc42d1 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -4,7 +4,8 @@ API Documentation .. toctree:: :maxdepth: 1 - apis/auto/changelog.rst + apis/auto/brainpy-changelog.md + apis/auto/brainpylib-changelog.md apis/brainpy.rst apis/math.rst apis/dnn.rst diff --git a/docs/apis/brainpy.math.oo_transform.rst b/docs/apis/brainpy.math.oo_transform.rst index 754e0d81d..9ed9cf46a 100644 --- a/docs/apis/brainpy.math.oo_transform.rst +++ b/docs/apis/brainpy.math.oo_transform.rst @@ -77,4 +77,5 @@ Helpers for Object-oriented Transformations :template: classtemplate.rst eval_shape + VariableStack diff --git a/docs/conf.py b/docs/conf.py index 19b1ab5bc..1ff612cb0 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -40,7 +40,8 @@ # sys.exit() changelogs = [ - ('../changelog.rst', 'apis/auto/changelog.rst'), + ('../brainpy-changelog.md', 'apis/auto/brainpy-changelog.md'), + ('../brainpylib-changelog.md', 'apis/auto/brainpylib-changelog.md'), ] for source, dest in changelogs: if os.path.exists(dest): diff --git a/docs/index.rst b/docs/index.rst index 732b27aa2..00271b41c 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -17,21 +17,28 @@ Installation .. code-block:: bash - pip install -U brainpy brainpylib # windows, linux, macos + pip install -U brainpy[cpu] # windows, linux, macos - .. tab-item:: GPU (CUDA-11x) + .. tab-item:: GPU (CUDA 11.0) .. code-block:: bash - pip install -U brainpy brainpylib-cu11x # only on linux + pip install -U brainpy[cuda11] -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html - .. tab-item:: GPU (CUDA-12x) + .. tab-item:: GPU (CUDA 12.0) .. code-block:: bash - pip install -U brainpy brainpylib-cu12x # only on linux + pip install -U brainpy[cuda12] -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html -For more information about supported accelerators and platforms, and for other installation details, please see `installation `_ section. + .. tab-item:: TPU + + .. code-block:: bash + + pip install -U brainpy[tpu] -f https://storage.googleapis.com/jax-releases/libtpu_releases.html + + +For more information, please see `installation `_ section. ---- diff --git a/docs/quickstart/installation.rst b/docs/quickstart/installation.rst index 46ce3822f..6931a1e3d 100644 --- a/docs/quickstart/installation.rst +++ b/docs/quickstart/installation.rst @@ -10,8 +10,18 @@ Installation Linux, and MacOS. It only relies on Python libraries. -Minimum requirements --------------------- +Minimum requirements (without dependencies) +------------------------------------------- + +To install brainpy with minimum requirements (has installed ``jax`` and ``jaxlib`` before), you can use: + +.. code-block:: bash + + pip install brainpy + + +Minimum requirements (with dependencies) +---------------------------------------- To install brainpy with minimum requirements (only depends on ``jax``), you can use: @@ -21,8 +31,12 @@ To install brainpy with minimum requirements (only depends on ``jax``), you can # or - pip install brainpy[cuda_mini] -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html # for GPU (Linux only) + pip install brainpy[cuda11_mini] -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html # for CUDA 11.0 + pip install brainpy[cuda12_mini] -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html # for CUDA 12.0 + # or + + pip install brainpy[tpu] -f https://storage.googleapis.com/jax-releases/libtpu_releases.html # for google TPU CPU with all dependencies @@ -49,32 +63,36 @@ To install a GPU-only version of BrainPy, you can run -``brainpylib`` --------------- +TPU with all dependencies +------------------------- +BrainPy supports Google Cloud TPU. To install BrainPy along with appropriate versions of jax, +you can run the following in your cloud TPU VM: -``brainpylib`` defines a set of useful operators for building and simulating spiking neural networks. +.. code-block:: bash + pip install brainpy[tpu] -f https://storage.googleapis.com/jax-releases/libtpu_releases.html # for google TPU -To install the ``brainpylib`` package on CPU devices, you can run -.. code-block:: bash - pip install brainpylib +``brainpylib`` +-------------- -To install the ``brainpylib`` package on CUDA 11, you can run +``brainpylib`` defines a set of useful operators for building and simulating spiking neural networks. + +To install the ``brainpylib`` package on CPU devices, you can run .. code-block:: bash - pip install brainpylib-cu11x + pip install brainpylib -To install the ``brainpylib`` package on CUDA 12, you can run +To install the ``brainpylib`` package on CUDA (Linux only), you can run .. code-block:: bash - pip install brainpylib-cu12x + pip install brainpylib diff --git a/docs/toolboxes.rst b/docs/toolboxes.rst index 11bf53115..cc3a38575 100644 --- a/docs/toolboxes.rst +++ b/docs/toolboxes.rst @@ -1,7 +1,16 @@ BDP Toolboxes ================== + + + This section contains detailed toolboxes BrainPy uses for brain dynamics modeling. + + +Differential Equations +----------------------- + + .. toctree:: :maxdepth: 1 @@ -10,11 +19,34 @@ This section contains detailed toolboxes BrainPy uses for brain dynamics modelin tutorial_toolbox/fde_numerical_solvers tutorial_toolbox/dde_numerical_solvers tutorial_toolbox/joint_equations + + +Toolbox for Modeling +------------------- + +.. toctree:: + :maxdepth: 1 + tutorial_toolbox/synaptic_connections tutorial_toolbox/synaptic_weights + tutorial_toolbox/inputs + + +Toolbox for Training +-------------------- + +.. toctree:: + :maxdepth: 1 + tutorial_toolbox/optimizers - tutorial_toolbox/state_saving_and_loading.ipynb - tutorial_toolbox/state_resetting.ipynb tutorial_toolbox/surrogate_gradient - tutorial_toolbox/inputs + +State Resetting, Saving and Loading +----------------------------------- + +.. toctree:: + :maxdepth: 1 + + tutorial_toolbox/state_saving_and_loading.ipynb + tutorial_toolbox/state_resetting.ipynb \ No newline at end of file diff --git a/docs/tutorials.rst b/docs/tutorials.rst index 7c9a1c876..57d18332b 100644 --- a/docs/tutorials.rst +++ b/docs/tutorials.rst @@ -3,11 +3,76 @@ BDP Tutorials This section contains tutorials on how to use BrainPy to accomplish model building, simulation, training, and analysis. + +Math Foundation +--------------- + +.. toctree:: + :maxdepth: 1 + + tutorial_math/variables + tutorial_math/control_flows + tutorial_math/Numpy_like_Operations.ipynb + tutorial_math/Dedicated_Operators.ipynb + tutorial_math/einops_in_brainpy.ipynb + + +Model Building with Existing Modules +------------------------------------ + +.. toctree:: + :maxdepth: 1 + + tutorial_building/overview_of_dynamic_model + tutorial_building/build_conductance_neurons_v2.ipynb + tutorial_building/phenon_synapse_models.ipynb + tutorial_building/kinetic_synapse_models.ipynb + tutorial_building/build_network_models + + +Model Building by Customizing New Modules +----------------------------------------- + +.. toctree:: + :maxdepth: 1 + + tutorial_building/customize_neuron_models + tutorial_building/customize_synapse_models + tutorial_building/how_to_customze_a_synapse.ipynb + + +Model Simulation +---------------- + +.. toctree:: + :maxdepth: 1 + + tutorial_simulation/simulation_dsrunner.ipynb + tutorial_simulation/parallel_for_parameter_exploration.ipynb + tutorial_simulation/monitor_per_multiple_steps.ipynb + + +Model Training +-------------- + +This tutorial shows how to train a dynamical system from data or task. + +.. toctree:: + :maxdepth: 1 + + tutorial_training/build_training_models.ipynb + tutorial_training/offline_training.ipynb + tutorial_training/online_training.ipynb + tutorial_training/bp_training.ipynb + tutorial_training/esn_introduction.ipynb + + +Model Analysis +-------------- + .. toctree:: - :maxdepth: 2 + :maxdepth: 1 - tutorial_math/index - tutorial_building/index - tutorial_simulation/index - tutorial_training/index - tutorial_analysis/index + tutorial_analysis/lowdim_analysis + tutorial_analysis/highdim_analysis + tutorial_analysis/decision_making_model diff --git a/examples/operator_customization/event_ell.py b/examples/operator_customization/event_ell.py new file mode 100644 index 000000000..0c5e7f8a5 --- /dev/null +++ b/examples/operator_customization/event_ell.py @@ -0,0 +1,40 @@ +import jax +import jax.numpy as jnp +import taichi as ti + +import brainpy.math as bm + + +@ti.kernel +def event_ell_cpu(indices: ti.types.ndarray(ndim=2), + vector: ti.types.ndarray(ndim=1), + weight: ti.types.ndarray(ndim=1), + out: ti.types.ndarray(ndim=1)): + weight_val = weight[0] + num_rows, num_cols = indices.shape + ti.loop_config(serialize=True) + for i in range(num_rows): + if vector[i]: + for j in range(num_cols): + out[indices[i, j]] += weight_val + + +prim = bm.XLACustomOp(cpu_kernel=event_ell_cpu) + + +def try_taichi_op_register(): + s = 1000 + indices = bm.random.randint(0, s, (s, 100)) + vector = bm.random.rand(s) < 0.1 + weight = bm.array([1.0]) + + out = prim(indices, vector, weight, outs=[jax.ShapeDtypeStruct((s,), dtype=jnp.float32)]) + + out = prim(indices, vector, weight, outs=[jax.ShapeDtypeStruct((s,), dtype=jnp.float32)]) + + # print(out) + bm.clear_buffer_memory() + + +# bm.clear_taichi_aot_caches() +try_taichi_op_register() diff --git a/setup.py b/setup.py index 885bbf57b..55f948e4b 100644 --- a/setup.py +++ b/setup.py @@ -69,16 +69,15 @@ ], extras_require={ 'cpu': ['jaxlib>=0.4.13', 'brainpylib', 'numba', 'taichi==1.7.0'], - 'cuda11': ['jaxlib[cuda11_pip]', 'brainpylib-cu11x', 'numba', 'taichi==1.7.0'], - 'cuda12': ['jaxlib[cuda12_pip]', 'brainpylib-cu12x', 'numba', 'taichi==1.7.0'], + 'cuda11': ['jaxlib[cuda11_pip]', 'brainpylib', 'numba', 'taichi==1.7.0'], + 'cuda12': ['jaxlib[cuda12_pip]', 'brainpylib', 'numba', 'taichi==1.7.0'], 'tpu': ['jaxlib[tpu]', 'numba',], 'cpu_mini': ['jaxlib>=0.4.13'], - 'cuda_mini': ['jaxlib[cuda12_pip]'], + 'cuda11_mini': ['jaxlib[cuda11_pip]'], + 'cuda12_mini': ['jaxlib[cuda12_pip]'], }, keywords=('computational neuroscience, ' 'brain-inspired computation, ' - 'dynamical systems, ' - 'differential equations, ' 'brain modeling, ' 'brain dynamics modeling, ' 'brain dynamics programming'),