Skip to content

Commit

Permalink
suggestions fix
Browse files Browse the repository at this point in the history
  • Loading branch information
Emile Mathieu committed Aug 11, 2017
1 parent 698cef6 commit 1cdf507
Showing 1 changed file with 11 additions and 12 deletions.
23 changes: 11 additions & 12 deletions edward/inferences/hmcda.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,15 +54,15 @@ def initialize(self, n_adapt, delta=0.65, Lambda=0.15, *args, **kwargs):
Parameters
----------
n_adapt : float
Number of samples with adaption for epsilon
Number of samples with adaptation for epsilon
delta : float, optional
Target accept rate
Lambda : float, optional
Target leapfrog length
"""
self.scope_iter = 0 # a convenient counter for log joint calculations

# Find intial epsilon
# Find initial epsilon
step_size = self.find_good_eps()
sess = get_session()
init_op = tf.global_variables_initializer()
Expand All @@ -89,15 +89,14 @@ def initialize(self, n_adapt, delta=0.65, Lambda=0.15, *args, **kwargs):
def build_update(self):
"""Simulate Hamiltonian dynamics using a numerical integrator.
Correct for the integrator's discretization error using an
acceptance ratio. The initial value of espilon is heuristically chosen
with Algorithm 4
acceptance ratio. The initial value of epsilon is heuristically chosen
with Algorithm 4.
Notes
-----
The updates assume each Empirical random variable is directly
parameterized by ``tf.Variable``s.
"""

old_sample = {z: tf.gather(qz.params, tf.maximum(self.t - 1, 0))
for z, qz in six.iteritems(self.latent_vars)}
old_sample = OrderedDict(old_sample)
Expand All @@ -123,7 +122,7 @@ def build_update(self):
# Accept or reject sample.
u = Uniform().sample()
alpha = tf.minimum(1.0, tf.exp(ratio))
accept = u < alpha
accept = tf.log(u) < ratio

sample_values = tf.cond(accept, lambda: list(six.itervalues(new_sample)),
lambda: list(six.itervalues(old_sample)))
Expand All @@ -137,8 +136,8 @@ def build_update(self):
# Use Dual Averaging to adapt epsilon
should_adapt = self.t <= self.n_adapt
assign_ops = tf.cond(should_adapt,
lambda: self.adapt_step_size(alpha),
lambda: self.do_not_adapt_step_size(alpha))
lambda: self._adapt_step_size(alpha),
lambda: self._do_not__adapt_step_size(alpha))

# Update Empirical random variables.
for z, qz in six.iteritems(self.latent_vars):
Expand All @@ -149,15 +148,15 @@ def build_update(self):
assign_ops.append(self.n_accept.assign_add(tf.where(accept, 1, 0)))
return tf.group(*assign_ops)

def do_not_adapt_step_size(self, alpha):
def _do_not__adapt_step_size(self, alpha):
# Do not adapt step size but assign last running averaged epsilon to epsilon
assign_ops = []
assign_ops.append(self.H_B.assign_add(0.0).op)
assign_ops.append(self.epsilon_B.assign_add(0.0).op)
assign_ops.append(tf.assign(self.H_B, self.H_B).op)
assign_ops.append(tf.assign(self.epsilon_B, self.epsilon_B).op)
assign_ops.append(tf.assign(self.epsilon, self.epsilon_B).op)
return assign_ops

def adapt_step_size(self, alpha):
def _adapt_step_size(self, alpha):
# Adapt step size as described in Algorithm 5
assign_ops = []

Expand Down

0 comments on commit 1cdf507

Please sign in to comment.