diff --git a/pymdp/agent.py b/pymdp/agent.py index b28d26e9..de8363c8 100644 --- a/pymdp/agent.py +++ b/pymdp/agent.py @@ -604,74 +604,6 @@ def _infer_states_test(self, observation, distr_obs=False): return qs, xn, vn else: return qs - - def infer_policies_old(self): - """ - Perform policy inference by optimizing a posterior (categorical) distribution over policies. - This distribution is computed as the softmax of ``G * gamma + lnE`` where ``G`` is the negative expected - free energy of policies, ``gamma`` is a policy precision and ``lnE`` is the (log) prior probability of policies. - This function returns the posterior over policies as well as the negative expected free energy of each policy. - - Returns - ---------- - q_pi: 1D ``numpy.ndarray`` - Posterior beliefs over policies, i.e. a vector containing one posterior probability per policy. - G: 1D ``numpy.ndarray`` - Negative expected free energies of each policy, i.e. a vector containing one negative expected free energy per policy. - """ - - if self.inference_algo == "VANILLA": - q_pi, G = control.update_posterior_policies( - self.qs, - self.A, - self.B, - self.C, - self.policies, - self.use_utility, - self.use_states_info_gain, - self.use_param_info_gain, - self.pA, - self.pB, - E=self.E, - I=self.I, - gamma=self.gamma - ) - elif self.inference_algo == "MMP": - if self.factorized: - raise NotImplementedError("Factorized inference not implemented for MMP") - - if self.sophisticated: - raise NotImplementedError("Sophisticated inference not implemented for MMP") - - - future_qs_seq = self.get_future_qs() - - q_pi, G = control.update_posterior_policies_full( - future_qs_seq, - self.A, - self.B, - self.C, - self.policies, - self.use_utility, - self.use_states_info_gain, - self.use_param_info_gain, - self.latest_belief, - self.pA, - self.pB, - F = self.F, - E = self.E, - I=self.I, - gamma = self.gamma - ) - - if hasattr(self, "q_pi_hist"): - self.q_pi_hist.append(q_pi) - if len(self.q_pi_hist) > self.inference_horizon: - self.q_pi_hist = self.q_pi_hist[-(self.inference_horizon-1):] - - self.q_pi = q_pi - self.G = G - return q_pi, G def infer_policies(self): """