Skip to content

Commit

Permalink
Merge pull request #91 from ucl-cssb/FixingIssue84
Browse files Browse the repository at this point in the history
  • Loading branch information
Fontanapink authored Oct 23, 2024
2 parents d3f2214 + 23a267a commit 72859c0
Show file tree
Hide file tree
Showing 7 changed files with 306 additions and 167 deletions.
2 changes: 1 addition & 1 deletion README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ For Windows

.. code-block:: bash
conda activate mimic_env_windows
conda activate mimic_env
4. **Install the Package**

Expand Down
171 changes: 104 additions & 67 deletions docs/source/notebooks/VAR/examples-bayes-VAR.ipynb

Large diffs are not rendered by default.

179 changes: 108 additions & 71 deletions examples/VAR/examples-bayes-VAR.ipynb

Large diffs are not rendered by default.

18 changes: 17 additions & 1 deletion mimic/data_imputation/base_imputator.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,22 @@ class BaseImputer(ABC):
with methods to save and load data and a structure for defining imputation methods.
"""

def __init__(self):
def __init__(self, debug: Optional[str] = None):
self.data = None
self._debug = None
self.debug = debug # Set the debug level through the property

@property
def debug(self) -> Optional[str]:
"""Gets the current debug level."""
return self._debug

@debug.setter
def debug(self, value: Optional[str]) -> None:
"""Sets the debug level, allowing only None, 'low', or 'high'."""
if value not in {None, "low", "high"}:
raise ValueError("Debug level must be None, 'low', or 'high'.")
self._debug = value

@abstractmethod
def impute_missing_values(
Expand All @@ -30,7 +44,9 @@ def impute_missing_values(
:param dataset: The dataset containing missing values.
:param feature_columns: List of feature columns to use in the imputation.
:param output_columns: List of columns to store imputed values.
:param target_column: The target column where missing values are imputed.
:param kernel: Optional kernel parameter for imputation.
:return: Dataset with imputed values in the target column.
"""
raise NotImplementedError("Subclasses must implement this method")
Expand Down
18 changes: 17 additions & 1 deletion mimic/model_infer/base_infer.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,30 @@ class BaseInfer(ABC):
results (Any | None): Results of the inference process.
"""

def __init__(self):
def __init__(self, debug: Optional[str] = None):
# Initialize the debug level with a default of None
self._debug = None
self.debug = debug # Set the debug level through the property
# Initialize priors as an empty dictionary
self.priors: Dict[str, Any] = {}
# Other initializations
self.data: Optional[np.ndarray] = None
self.dataS: Optional[np.ndarray] = None
self.model: Optional[object] = None
self.results: Optional[Any] = None

@property
def debug(self) -> Optional[str]:
"""Gets the current debug level."""
return self._debug

@debug.setter
def debug(self, value: Optional[str]) -> None:
"""Sets the debug level, allowing only None, 'low', or 'high'."""
if value not in {None, "low", "high"}:
raise ValueError("Debug level must be None, 'low', or 'high'.")
self._debug = value

def _validate_data(self, data):
"""
Validates and converts the input data to a numpy array.
Expand Down
22 changes: 15 additions & 7 deletions mimic/model_infer/infer_VAR_bayes.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,9 @@ def run_inference(self, **kwargs) -> None:
self.last_trace = trace
self.last_data = (data,)

print(az.summary(trace, var_names=["x0", "A"]))
# print if `debug` is set to 'high' or 'low'
if self.debug in ["high", "low"]:
print(az.summary(trace, var_names=["x0", "A"]))

az.plot_posterior(trace, var_names=["x0", "A"])
plt.savefig("posterior_plot.pdf")
Expand Down Expand Up @@ -257,7 +259,7 @@ def run_inference_large(self, samples=4000, tune=2000, cores=4) -> None:
c2 = pm.InverseGamma("c2", 2, 8)
tau = pm.HalfCauchy("tau", beta=tau0)
lam = pm.HalfCauchy("lam", beta=1, shape=(ndim, ndim))
A = pm.Normal('A', mu=A_prior_mu, sigma=tau * lam * \
A = pm.Normal('A', mu=A_prior_mu, sigma=tau * lam *
at.sqrt(c2 / (c2 + tau**2 * lam**2)), shape=(ndim, ndim))

# If noise covariance is provided, use it as a prior
Expand All @@ -278,7 +280,9 @@ def run_inference_large(self, samples=4000, tune=2000, cores=4) -> None:
with var_model:
trace = pm.sample(draws=samples, tune=tune, cores=cores)

print(az.summary(trace, var_names=["A"]))
# print if `debug` is set to 'high' or 'low'
if self.debug in ["high", "low"]:
print(az.summary(trace, var_names=["A"]))

# Plotting the posterior distributions
az.plot_posterior(trace, var_names=[
Expand Down Expand Up @@ -349,7 +353,9 @@ def run_inference_xs(self, samples=2000, tune=1000, cores=2) -> None:
with var_model:
idata = pm.sample(draws=samples, tune=tune, cores=cores)

print(az.summary(idata, var_names=["Ah", "Bh"]))
# print if `debug` is set to 'high' or 'low'
if self.debug in ["high", "low"]:
print(az.summary(idata, var_names=["Ah", "Bh"]))

az.plot_posterior(idata, var_names=["Ah", "Bh"])
plt.savefig("posterior_plot.pdf")
Expand Down Expand Up @@ -402,14 +408,14 @@ def run_inference_large_xs(self, samples=4000, tune=2000, cores=4) -> None:
c2_A = pm.InverseGamma("c2_A", 2, 1)
tau_A = pm.HalfCauchy("tau_A", beta=tau0_A)
lam_A = pm.HalfCauchy("lam_A", beta=1, shape=(nX, nX))
Ah = pm.Normal('Ah', mu=A_prior_mu, sigma=tau_A * lam_A * \
Ah = pm.Normal('Ah', mu=A_prior_mu, sigma=tau_A * lam_A *
at.sqrt(c2_A / (c2_A + tau_A**2 * lam_A**2)), shape=(nX, nX))

tau0_B = (DB0 / (DB - DB0)) * 0.1 / np.sqrt(N)
c2_B = pm.InverseGamma("c2_B", 2, 1)
tau_B = pm.HalfCauchy("tau_B", beta=tau0_B)
lam_B = pm.HalfCauchy("lam_B", beta=1, shape=(nS, nX))
Bh = pm.Normal('Bh', mu=0, sigma=tau_B * lam_B * \
Bh = pm.Normal('Bh', mu=0, sigma=tau_B * lam_B *
at.sqrt(c2_B / (c2_B + tau_B**2 * lam_B**2)), shape=(nS, nX))

if noise_cov_prior is not None:
Expand All @@ -432,7 +438,9 @@ def run_inference_large_xs(self, samples=4000, tune=2000, cores=4) -> None:
with var_model:
trace = pm.sample(draws=samples, tune=tune, cores=cores)

print(az.summary(trace, var_names=["Ah", "Bh"]))
# print if `debug` is set to 'high' or 'low'
if self.debug in ["high", "low"]:
print(az.summary(trace, var_names=["Ah", "Bh"]))

az.plot_posterior(trace, var_names=["Ah", "Bh"])
plt.savefig("posterior_plot.pdf")
Expand Down
63 changes: 44 additions & 19 deletions mimic/model_simulate/base_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,15 +61,38 @@ class BaseModel(ABC):
Updates class attributes based on the current parameters dictionary.
"""

def __init__(self):
def __init__(self, debug: Optional[str] = None):
"""
Initializes the BaseModel with default values.
"""
self.data: Optional[np.ndarray] = None
self.model: Optional[object] = None
self._debug = None # Initialize the private attribute
self.debug = debug # Set the debug level through the property
self.parameters: Optional[Dict[str, Union[int,
float, None, np.ndarray, str, Any]]] = None

# The debug property is a getter and setter for the private attribute
# _debug.
@property
def debug(self) -> Optional[str]:
"""Gets the current debug level."""
return self._debug

# The setter for the debug property only allows setting the debug level to
# None, 'low', or 'high'.
@debug.setter
def debug(self, value: Optional[str]) -> None:
"""Sets the debug level, allowing only None, 'low', or 'high'."""
if value not in {None, "low", "high"}:
raise ValueError("Debug level must be None, 'low', or 'high'.")
self._debug = value

# Example usage of the debug property:
# model = BaseModel(debug="low") # Valid
# model.debug = "high" # Valid
# model.debug = "invalid" # Raises ValueError

# check if params are set, else print a warning and use the default values
# for each simulation type

Expand Down Expand Up @@ -178,8 +201,9 @@ def check_params(

# Check if no parameters were provided and warn the user
if params is None:
print(
f"Warning: No parameters provided for {sim_type} simulation. Using default values.")
if self.debug in ["low", "high"]:
print(
f"Warning: No parameters provided for {sim_type} simulation. Using default values.")
else:
# Identify missing or None parameters
missing_params = [
Expand All @@ -192,8 +216,9 @@ def check_params(
if value is not None:
default_params[key] = value

print(
f"Using the following parameters for {sim_type} simulation: {default_params}")
if self.debug == "high":
print(
f"Using the following parameters for {sim_type} simulation: {default_params}")
self.parameters = default_params
self.update_attributes()

Expand Down Expand Up @@ -224,20 +249,20 @@ def print_parameters(self, precision: int = 2) -> None:
Parameters:
precision (int): Precision for formatting numpy array elements.
"""
print("Model parameters:")
print(f"Model: {self.model}")
if self.parameters is not None:
parameters = {
k: self._custom_array_to_string(
v,
precision) if isinstance(
v,
np.ndarray) else v for k,
v in self.parameters.items()}
for param, value in parameters.items():
print(f"{param}: {value}")
else:
print("No parameters to print.")
# Check the class-level debug level
if self.debug in ["low", "high"]:
print("Model parameters:")
print(f"Model: {self.model}")
if self.parameters is not None:
parameters = {
k: self._custom_array_to_string(
v, precision) if isinstance(v, np.ndarray) else v
for k, v in self.parameters.items()
}
for param, value in parameters.items():
print(f"{param}: {value}")
else:
print("No parameters to print.")

def save_parameters(self,
filepath: str,
Expand Down

0 comments on commit 72859c0

Please sign in to comment.