diff --git a/Cargo.toml b/Cargo.toml
index 02d9c37..6fd785a 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -19,7 +19,7 @@ rand = ">=0.8.5"
nalgebra = ">=0.31.4"
rug = { version = ">=1.18.0", features = ["float", "integer"] }
sysinfo = ">=0.28.1"
-search_trail = ">=0.1.2"
+search_trail = ">=0.1.3"
peak_alloc = ">=0.2.0"
chrono = ">=0.4.31"
rayon = ">=1.8.0"
diff --git a/src/common.rs b/src/common.rs
index 02c95ef..3bd9664 100644
--- a/src/common.rs
+++ b/src/common.rs
@@ -13,7 +13,7 @@
//
//You should have received a copy of the GNU Affero General Public License
//along with this program. If not, see .
-macro_rules! f128 {
+macro_rules! F128 {
($v:expr) => {
Float::with_val(113, $v)
};
@@ -21,4 +21,4 @@ macro_rules! f128 {
pub const FLOAT_CMP_THRESHOLD: f64 = 0.0000001;
-pub(crate) use f128;
+pub(crate) use F128;
diff --git a/src/diagrams/dac/dac.rs b/src/diagrams/dac/dac.rs
index a8badeb..fc00942 100644
--- a/src/diagrams/dac/dac.rs
+++ b/src/diagrams/dac/dac.rs
@@ -170,7 +170,7 @@ impl Dac
pub fn solution(&self) -> Solution {
let p = self.circuit_probability().to_f64();
- Solution::new(f128!(p), f128!(p), self.compile_time)
+ Solution::new(F128!(p), F128!(p), self.compile_time)
}
/// Updates the values of the distributions to the given values
@@ -190,9 +190,9 @@ impl Dac
/// Resets the path value of each node
pub fn zero_paths(&mut self) {
for node in (0..self.nodes.len()-1).map(NodeIndex) {
- self[node].set_path_value(f128!(0.0));
+ self[node].set_path_value(F128!(0.0));
}
- self.nodes.last_mut().unwrap().set_path_value(f128!(1.0));
+ self.nodes.last_mut().unwrap().set_path_value(F128!(1.0));
}
/// Evaluates the circuits, layer by layer (starting from the input distribution, then layer 0)
diff --git a/src/diagrams/dac/node.rs b/src/diagrams/dac/node.rs
index 532f5f2..80cc984 100644
--- a/src/diagrams/dac/node.rs
+++ b/src/diagrams/dac/node.rs
@@ -69,7 +69,7 @@ impl Node
number_inputs: 0,
layer: 0,
to_remove: true,
- path_value: f128!(1.0),
+ path_value: F128!(1.0),
}
}
@@ -86,7 +86,7 @@ impl Node
number_inputs: 0,
layer: 0,
to_remove: true,
- path_value: f128!(1.0),
+ path_value: F128!(1.0),
}
}
@@ -103,7 +103,7 @@ impl Node
number_inputs: 0,
layer: 0,
to_remove: true,
- path_value: f128!(1.0),
+ path_value: F128!(1.0),
}
}
@@ -120,7 +120,7 @@ impl Node
number_inputs: 0,
layer: 0,
to_remove: true,
- path_value: f128!(1.0),
+ path_value: F128!(1.0),
}
}
diff --git a/src/diagrams/semiring.rs b/src/diagrams/semiring.rs
index e48785c..6c8ea6e 100644
--- a/src/diagrams/semiring.rs
+++ b/src/diagrams/semiring.rs
@@ -24,7 +24,7 @@
use rug::Float;
#[cfg(feature = "tensor")]
use tch::Tensor;
-use crate::common::f128;
+use crate::common::F128;
use std::ops::{AddAssign, MulAssign};
use rug::Assign;
@@ -58,7 +58,7 @@ pub trait SemiRing: AddAssign + MulAssign + Send + Sized + std::fmt::Display {
impl SemiRing for Float {
fn from_f64(value: f64) -> Self {
- f128!(value)
+ F128!(value)
}
fn to_f64(&self) -> f64 {
diff --git a/src/learning/learner.rs b/src/learning/learner.rs
index 4ac7fa0..983010c 100644
--- a/src/learning/learner.rs
+++ b/src/learning/learner.rs
@@ -41,7 +41,7 @@ use crate::parser::*;
use crate::Branching;
use rayon::prelude::*;
use super::Learning;
-use crate::common::f128;
+use crate::common::F128;
use super::utils::*;
use super::*;
use rug::{Assign, Float};
@@ -75,7 +75,7 @@ impl Learner {
for distribution in distributions.iter() {
let unsoftmaxed_vector = distribution.iter().map(|p| p.log(std::f64::consts::E)).collect::>();
unsoftmaxed_distributions.push(unsoftmaxed_vector);
- grads.push(vec![f128!(0.0); distribution.len()]);
+ grads.push(vec![F128!(0.0); distribution.len()]);
}
// Retrieves which distributions are learned
let learned_distributions = learned_distributions_from_cnf(&inputs[0]);
@@ -92,13 +92,13 @@ impl Learner {
let d = train_dacs.pop().unwrap();
let expected = expected_outputs.pop().unwrap();
train_data.push(d);
- train_expected.push(f128!(expected));
+ train_expected.push(F128!(expected));
}
while !test_dacs.is_empty() {
let d = test_dacs.pop().unwrap();
let expected = expected_test.pop().unwrap();
test_data.push(d);
- test_expected.push(f128!(expected));
+ test_expected.push(F128!(expected));
}
let train_dataset = Dataset::new(train_data, train_expected);
let test_dataset = Dataset::new(test_data, test_expected);
@@ -201,7 +201,7 @@ impl Learner {
// If it is a product node, we need to divide the path value by the value of the child
// This is equivalent to multiplying the values of the other children
// If the value of the child is 0, then the path value is simply 0
- let mut val = f128!(0.0);
+ let mut val = F128!(0.0);
if self.train[query_id][child].value().to_f64() != 0.0 {
val = path_val.clone() * &value / self.train[query_id][child].value().to_f64();
}
@@ -223,7 +223,7 @@ impl Learner {
}
// Compute the gradient contribution for the value used in the node
// and all the other possible values of the distribution (derivative of the softmax)
- let mut sum_other_w = f128!(0.0);
+ let mut sum_other_w = F128!(0.0);
let child_w = self.get_probability(d, v);
for params in (0..self.unsoftmaxed_distributions[d].len()).filter(|i| *i != v) {
let weight = self.get_probability(d, params);
diff --git a/src/learning/utils.rs b/src/learning/utils.rs
index 50251c3..4ae870e 100644
--- a/src/learning/utils.rs
+++ b/src/learning/utils.rs
@@ -23,7 +23,7 @@ use crate::core::components::ComponentExtractor;
use crate::propagator::Propagator;
use crate::parser::*;
use crate::solvers::*;
-use crate::{common::f128, diagrams::{semiring::SemiRing, dac::dac::Dac}};
+use crate::{common::F128, diagrams::{semiring::SemiRing, dac::dac::Dac}};
/// Calculates the softmax (the normalized exponential) function, which is a generalization of the
/// logistic function to multiple dimensions.
@@ -34,7 +34,7 @@ use crate::{common::f128, diagrams::{semiring::SemiRing, dac::dac::Dac}};
/// From https://docs.rs/compute/latest/src/compute/functions/statistical.rs.html#43-46
pub fn softmax(x: &[f64]) -> Vec {
let sum_exp: f64 = x.iter().map(|i| i.exp()).sum();
- x.iter().map(|i| f128!(i.exp() / sum_exp)).collect()
+ x.iter().map(|i| F128!(i.exp() / sum_exp)).collect()
}
/// Generates a vector of optional Dacs from a list of input files
diff --git a/src/preprocess.rs b/src/preprocess.rs
index 7d1cba4..0f8631f 100644
--- a/src/preprocess.rs
+++ b/src/preprocess.rs
@@ -20,7 +20,7 @@ use crate::core::components::{ComponentExtractor, ComponentIndex};
use crate::core::problem::*;
use crate::propagator::Propagator;
use rug::Float;
-use crate::common::f128;
+use crate::common::F128;
pub struct Preprocessor<'b>
{
@@ -48,7 +48,7 @@ where
}
pub fn preprocess(&mut self) -> Option {
- let mut p = f128!(1.0);
+ let mut p = F128!(1.0);
for variable in self.problem.variables_iter() {
if self.problem[variable].is_probabilitic() && self.problem[variable].weight().unwrap() == 1.0 {
diff --git a/src/propagator.rs b/src/propagator.rs
index 57ba275..56710cc 100644
--- a/src/propagator.rs
+++ b/src/propagator.rs
@@ -47,7 +47,7 @@
use search_trail::{StateManager, UsizeManager, ReversibleUsize};
-use crate::common::f128;
+use crate::common::F128;
use crate::core::components::{ComponentIndex, ComponentExtractor};
use crate::core::problem::{ClauseIndex, DistributionIndex, Problem, VariableIndex};
use rug::{Assign, Float};
@@ -81,7 +81,7 @@ impl Propagator {
assignments: vec![],
base_assignments: state.manage_usize(0),
unconstrained_distributions: vec![],
- propagation_prob: f128!(0.0),
+ propagation_prob: F128!(0.0),
forced: 0,
}
}
@@ -149,7 +149,7 @@ impl Propagator {
g[distribution].set_unconstrained(state);
if g[distribution].number_false(state) != 0 {
self.unconstrained_distributions.push(distribution);
- let mut p = f128!(0.0);
+ let mut p = F128!(0.0);
for weight in g[distribution].iter_variables().filter(|v| !g[*v].is_fixed(state)).map(|v| g[v].weight().unwrap()) {
p += weight;
}
diff --git a/src/solvers/solver.rs b/src/solvers/solver.rs
index ddaeb52..347bc78 100644
--- a/src/solvers/solver.rs
+++ b/src/solvers/solver.rs
@@ -132,7 +132,7 @@ impl Solver {
let mut preprocessor = Preprocessor::new(&mut self.problem, &mut self.state, &mut self.propagator, &mut self.component_extractor);
let preproc = preprocessor.preprocess();
if preproc.is_none() {
- return Solution::new(f128!(0.0), f128!(0.0), self.start.elapsed().as_secs());
+ return Solution::new(F128!(0.0), F128!(0.0), self.start.elapsed().as_secs());
}
self.preproc_in = Some(preproc.unwrap());
self.preproc_out = Some(1.0 - self.problem.distributions_iter().map(|d| {
@@ -142,7 +142,7 @@ impl Solver {
self.state.restore_state();
if self.problem.number_clauses() == 0 {
let lb = self.preproc_in.clone().unwrap();
- let ub = f128!(1.0 - self.preproc_out.unwrap());
+ let ub = F128!(1.0 - self.preproc_out.unwrap());
return Solution::new(lb, ub, self.start.elapsed().as_secs());
}
let max_probability = self.problem.distributions_iter().map(|d| self.problem[d].remaining(&self.state)).product::();
@@ -185,9 +185,9 @@ impl Solver {
self.search_cache.clear();
}
self.state.save_state();
- let mut p_in = f128!(1.0);
- let mut p_out = f128!(1.0);
- let mut maximum_probability = f128!(1.0);
+ let mut p_in = F128!(1.0);
+ let mut p_out = F128!(1.0);
+ let mut maximum_probability = F128!(1.0);
for distribution in self.component_extractor.component_distribution_iter(component) {
if self.problem[distribution].is_constrained(&self.state) {
maximum_probability *= self.problem[distribution].remaining(&self.state);
@@ -206,14 +206,14 @@ impl Solver {
// remaining of the components. This way we always produce a valid lower/upper
// bound.
if self.start.elapsed().as_secs() >= self.timeout {
- return ((f128!(0.0), f128!(0.0)), level - 1);
+ return ((F128!(0.0), F128!(0.0)), level - 1);
}
let sub_maximum_probability = self.component_extractor[sub_component].max_probability();
assert!(0.0 <= sub_maximum_probability && sub_maximum_probability <= 1.0);
let (sub_problem, backtrack_level) = self.get_bounds_from_cache(sub_component, new_bound_factor, level, discrepancy);
if backtrack_level != level {
self.restore();
- return ((f128!(0.0), maximum_probability), backtrack_level);
+ return ((F128!(0.0), maximum_probability), backtrack_level);
}
// If any of the component is not fully explored, then so is the node
let (sub_p_in, sub_p_out) = sub_problem.bounds();
@@ -265,9 +265,9 @@ impl Solver {
self.statistics.or_node();
let maximum_probability = self.component_extractor[component].max_probability();
// Stores the accumulated probability of the found models in the sub-problem
- let mut p_in = f128!(0.0);
+ let mut p_in = F128!(0.0);
// Stores the accumulated probability of the found non-models in the sub-problem
- let mut p_out = f128!(0.0);
+ let mut p_out = F128!(0.0);
// When a sub-problem is UNSAT, this is the factor that must be used for the
// computation of p_out
let unsat_factor = maximum_probability / self.problem[distribution].remaining(&self.state);
@@ -296,7 +296,7 @@ impl Solver {
// The clause learning scheme tells us that we need to backtrack
// non-chronologically. There are no models in this sub-problem
self.restore();
- return (SearchCacheEntry::new((f128!(0.0), f128!(maximum_probability)), usize::MAX, Some(distribution)), backtrack_level);
+ return (SearchCacheEntry::new((F128!(0.0), F128!(maximum_probability)), usize::MAX, Some(distribution)), backtrack_level);
}
},
Ok(_) => {
@@ -320,7 +320,7 @@ impl Solver {
let ((child_p_in, child_p_out), backtrack_level) = self.solve_components(component, level + 1, bound_factor, new_discrepancy);
if backtrack_level != level {
self.restore();
- return (SearchCacheEntry::new((f128!(0.0), f128!(maximum_probability)), usize::MAX, Some(distribution)), backtrack_level);
+ return (SearchCacheEntry::new((F128!(0.0), F128!(maximum_probability)), usize::MAX, Some(distribution)), backtrack_level);
}
p_in += child_p_in * &p;
p_out += child_p_out * &p;
@@ -337,7 +337,7 @@ impl Solver {
let cache_entry = SearchCacheEntry::new((p_in, p_out), discrepancy, Some(distribution));
(cache_entry, level)
} else {
- (SearchCacheEntry::new((f128!(1.0), f128!(0.0)), usize::MAX, None), level)
+ (SearchCacheEntry::new((F128!(1.0), F128!(0.0)), usize::MAX, None), level)
}
}