Skip to content

Commit

Permalink
Made kf functions work with univariate form.
Browse files Browse the repository at this point in the history
update() and predict() can now take floating point numbers,
and correctly computes the univariate form of the filter.
  • Loading branch information
rlabbe committed Jan 24, 2016
1 parent c533dc3 commit 53c78f1
Show file tree
Hide file tree
Showing 6 changed files with 111 additions and 33 deletions.
2 changes: 2 additions & 0 deletions creating_a_release.rst
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ Steps to Create Release
* You need to manually update the documentation code at pythonhosted, PyPi's documentation server.

cd /docs/html
zip -r filterpy.zip *.*

add all files to a zip file (index.html must be at base)
go to https://pypi.python.org/pypi?%3Aaction=pkg_edit&name=filterpy
scroll to bottom, add the zip file you just made
Expand Down
26 changes: 24 additions & 2 deletions docs/kalman/KalmanFilter.rst
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
Kalman Filter
=============
KalmanFilter
============

Implements a linear Kalman filter. For now the best documentation
is my free book Kalman and Bayesian Filters in Python [1]_
Expand Down Expand Up @@ -180,6 +180,21 @@ while some_condition_is_true:
do_something_with_estimate (f.x)
Procedural Form
===============

This module also contains stand alone functions to peform Kalman filtering.
Use these if you are not a fan of objects.

**Example**

.. code::
while True:
z, R = read_sensor()
x, P = predict(x, P, F, Q)
x, P = update(x, P, z, R, H)
**References**


Expand All @@ -206,3 +221,10 @@ Kalman filter
:members:

.. automethod:: __init__


.. autofunction:: update
.. autofunction:: predict
.. autofunction:: batch_filter


2 changes: 1 addition & 1 deletion filterpy/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,4 @@
for more information.
"""

__version__ = "0.1.1"
__version__ = "0.1.2"
12 changes: 10 additions & 2 deletions filterpy/changelog.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,13 @@
Version 0.1.10
==============
Version 0.1.2
=============

Modified the update() and predict() functions to work in the
univariate case. You can pass int/floats into the equations
and get floats back.


Version 0.1.1
=============

* Added discrete_bayes module, which supports discrete Bayesian filtering.

Expand Down
92 changes: 69 additions & 23 deletions filterpy/kalman/kalman_filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def __init__(self, dim_x, dim_z, dim_u=0):
not give you a functional filter.
Parameters
----------
----------
dim_x : int
Number of state variables for the Kalman filter. For example, if
you are tracking the position and velocity of an object in two
Expand Down Expand Up @@ -199,8 +199,8 @@ def update(self, z, R=None, H=None):
self._K = K

# compute log likelihood
mean = np.array(dot(H, x)).flatten()
flatz = np.array(z).flatten()
mean = np.asarray(dot(H, x)).flatten()
flatz = np.asarray(z).flatten()
self.log_likelihood = multivariate_normal.logpdf(
flatz, mean, cov=S, allow_singular=True)

Expand Down Expand Up @@ -270,8 +270,8 @@ def update_correlated(self, z, R=None, H=None):
self._K = K

# compute log likelihood
mean = np.array(dot(H, x)).flatten()
flatz = np.array(z).flatten()
mean = np.asarray(dot(H, x)).flatten()
flatz = np.asarray(z).flatten()

self.log_likelihood = multivariate_normal.logpdf(
flatz, mean, cov=S, allow_singular=True)
Expand Down Expand Up @@ -786,28 +786,41 @@ def S(self):



def kf_update(x, P, z, R, H):
def update(x, P, z, R, H=None, return_all=False):
"""
Add a new measurement (z) to the Kalman filter. If z is None, nothing
is changed.
This can handle either the multidimensional or unidimensional case. If
all parameters are floats instead of arrays the filter will still work,
and return floats for x, P as the result.
update(1, 2, 1, 1, 1) # univariate
update(x, P, 1
Parameters
----------
x : numpy.array(dim_x, 1)
x : numpy.array(dim_x, 1), or float
State estimate vector
P : numpy.array(dim_x, dim_x)
P : numpy.array(dim_x, dim_x), or float
Covariance matrix
z : numpy.array
z : numpy.array(dim_z, 1), or float
measurement for this update.
R : numpy.array(dim_z, dim_z)
R : numpy.array(dim_z, dim_z), or float
Measurement noise matrix
H : numpy.array(dim_x, dim_x)
Measurement function
H : numpy.array(dim_x, dim_x), or float, optional
Measurement function. If not provided, a value of 1 is assumed.
return_all : bool, default False
If true, y, K, S, and log_likelihood are returned, otherwise
only x and P are returned.
Returns
-------
Expand All @@ -831,8 +844,27 @@ def kf_update(x, P, z, R, H):
log likelihood of the measurement
"""


if z is None:
return x, P, None, None, None, None
if return_all:
return x, P, None, None, None, None
else:
return x, P

if H is None:
H = np.array([1])

if np.isscalar(H):
H = np.array([H])

if not np.isscalar(x):
# handle special case: if z is in form [[z]] but x is not a column
# vector dimensions will not match
if x.ndim==1 and shape(z) == (1,1):
z = z[0]

if shape(z) == (): # is it scalar, e.g. z=3 or z=np.array(3)
z = np.asarray([z])

# error (residual) between measurement and prediction
y = z - dot(H, x)
Expand All @@ -842,26 +874,38 @@ def kf_update(x, P, z, R, H):


# map system uncertainty into kalman gain
K = dot3(P, H.T, linalg.inv(S))
try:
K = dot3(P, H.T, linalg.inv(S))
except:
K = dot3(P, H.T, 1/S)


# predict new x with residual scaled by the kalman gain
x = x + dot(K, y)

# P = (I-KH)P(I-KH)' + KRK'
KH = dot(K, H)
I_KH = np.eye(KH.shape[0]) - KH


try:
I_KH = np.eye(KH.shape[0]) - KH
except:
I_KH = np.array(1 - KH)
P = dot3(I_KH, P, I_KH.T) + dot3(K, R, K.T)

# compute log likelihood
mean = np.array(dot(H, x)).flatten()
flatz = np.array(z).flatten()
flatz = np.asarray(z).flatten()
log_likelihood = multivariate_normal.logpdf(flatz, mean, cov=S, allow_singular=True)

return x, P, y, K, S, log_likelihood
if return_all:
return x, P, y, K, S, log_likelihood
else:
return x, P



def kf_predict(x, P, F, Q, u=0, B=0, alpha=1.):
def predict(x, P, F=1, Q=0, u=0, B=1, alpha=1.):
""" Predict next position using the Kalman filter state propagation
equations.
Expand Down Expand Up @@ -905,14 +949,16 @@ def kf_predict(x, P, F, Q, u=0, B=0, alpha=1.):
Prior covariance matrix
"""

if np.isscalar(F):
F = np.array(F)
x = dot(F, x) + dot(B, u)
P = (alpha * alpha) * dot3(F, P, F.T) + Q

return x, P



def kf_batch_filter(x, P, zs, Fs, Qs, Hs, Rs, Bs=None, us=None, update_first=False):
def batch_filter(x, P, zs, Fs, Qs, Hs, Rs, Bs=None, us=None, update_first=False):
""" Batch processes a sequences of measurements.
Parameters
Expand Down Expand Up @@ -1021,21 +1067,21 @@ def kf_batch_filter(x, P, zs, Fs, Qs, Hs, Rs, Bs=None, us=None, update_first=Fal
if update_first:
for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)):

x, P, _, _, _, _ = kf_update(x, P, z, R=R, H=H)
x, P = update(x, P, z, R=R, H=H)
means[i,:] = x
covariances[i,:,:] = P

x, P = kf_predict(x, P, u=u, B=B, F=F, Q=Q)
x, P = predict(x, P, u=u, B=B, F=F, Q=Q)
means_p[i,:] = x
covariances_p[i,:,:] = P
else:
for i, (z, F, Q, H, R, B, u) in enumerate(zip(zs, Fs, Qs, Hs, Rs, Bs, us)):

x, P = kf_predict(x, P, u=u, B=B, F=F, Q=Q)
x, P = predict(x, P, u=u, B=B, F=F, Q=Q)
means_p[i,:] = x
covariances_p[i,:,:] = P

x, P, _, _, _, _ = kf_update(x, P, z, R=R, H=H)
x, P = update(x, P, z, R=R, H=H)
means[i,:] = x
covariances[i,:,:] = P

Expand Down
10 changes: 5 additions & 5 deletions filterpy/kalman/tests/test_kf.py
Original file line number Diff line number Diff line change
Expand Up @@ -308,10 +308,10 @@ def test_procedure_form():
pos += 100

# perform kalman filtering
x, P = kf_predict(x, P, F, Q)
x, P = predict(x, P, F, Q)
kf.predict()
assert norm(x - kf.x) < 1.e-12
x, P, _, _, _, _ = kf_update(x, P, z, R, H)
x, P, _, _, _, _ = update(x, P, z, R, H, True)
kf.update(z)
assert norm(x - kf.x) < 1.e-12

Expand Down Expand Up @@ -359,7 +359,7 @@ def test_procedural_batch_filter():

n = len(zs)
# test both list of matrices, and single matrix forms
mp, cp, _, _ = kf_batch_filter(x, P, zs, F, Q, [H]*n, R)
mp, cp, _, _ = batch_filter(x, P, zs, F, Q, [H]*n, R)

for x1, x2 in zip(m, mp):
assert abs(sum(sum(x1))) - abs(sum(sum(x2))) < 1.e-12
Expand Down Expand Up @@ -389,8 +389,8 @@ def proc_form():
pos += 100

# perform kalman filtering
x, P = kf_predict(x, P, F, Q)
x, P, _ = kf_update(z, R, x, P, H)
x, P = predict(x, P, F, Q)
x, P, _ = update(z, R, x, P, H)


def class_form():
Expand Down

0 comments on commit 53c78f1

Please sign in to comment.