Skip to content

Commit

Permalink
update f128! macro name to F128! since newer version of Rust have f12…
Browse files Browse the repository at this point in the history
…8 (unstable) type
  • Loading branch information
AlexandreDubray committed May 6, 2024
1 parent d905f38 commit 239ed84
Show file tree
Hide file tree
Showing 10 changed files with 37 additions and 37 deletions.
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ rand = ">=0.8.5"
nalgebra = ">=0.31.4"
rug = { version = ">=1.18.0", features = ["float", "integer"] }
sysinfo = ">=0.28.1"
search_trail = ">=0.1.2"
search_trail = ">=0.1.3"
peak_alloc = ">=0.2.0"
chrono = ">=0.4.31"
rayon = ">=1.8.0"
Expand Down
4 changes: 2 additions & 2 deletions src/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,12 @@
//
//You should have received a copy of the GNU Affero General Public License
//along with this program. If not, see <http://www.gnu.org/licenses/>.
macro_rules! f128 {
macro_rules! F128 {
($v:expr) => {
Float::with_val(113, $v)
};
}

pub const FLOAT_CMP_THRESHOLD: f64 = 0.0000001;

pub(crate) use f128;
pub(crate) use F128;
6 changes: 3 additions & 3 deletions src/diagrams/dac/dac.rs
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ impl<R> Dac<R>

pub fn solution(&self) -> Solution {
let p = self.circuit_probability().to_f64();
Solution::new(f128!(p), f128!(p), self.compile_time)
Solution::new(F128!(p), F128!(p), self.compile_time)
}

/// Updates the values of the distributions to the given values
Expand All @@ -190,9 +190,9 @@ impl<R> Dac<R>
/// Resets the path value of each node
pub fn zero_paths(&mut self) {
for node in (0..self.nodes.len()-1).map(NodeIndex) {
self[node].set_path_value(f128!(0.0));
self[node].set_path_value(F128!(0.0));
}
self.nodes.last_mut().unwrap().set_path_value(f128!(1.0));
self.nodes.last_mut().unwrap().set_path_value(F128!(1.0));
}

/// Evaluates the circuits, layer by layer (starting from the input distribution, then layer 0)
Expand Down
8 changes: 4 additions & 4 deletions src/diagrams/dac/node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ impl<R> Node<R>
number_inputs: 0,
layer: 0,
to_remove: true,
path_value: f128!(1.0),
path_value: F128!(1.0),
}
}

Expand All @@ -86,7 +86,7 @@ impl<R> Node<R>
number_inputs: 0,
layer: 0,
to_remove: true,
path_value: f128!(1.0),
path_value: F128!(1.0),
}
}

Expand All @@ -103,7 +103,7 @@ impl<R> Node<R>
number_inputs: 0,
layer: 0,
to_remove: true,
path_value: f128!(1.0),
path_value: F128!(1.0),
}
}

Expand All @@ -120,7 +120,7 @@ impl<R> Node<R>
number_inputs: 0,
layer: 0,
to_remove: true,
path_value: f128!(1.0),
path_value: F128!(1.0),
}
}

Expand Down
4 changes: 2 additions & 2 deletions src/diagrams/semiring.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
use rug::Float;
#[cfg(feature = "tensor")]
use tch::Tensor;
use crate::common::f128;
use crate::common::F128;
use std::ops::{AddAssign, MulAssign};
use rug::Assign;

Expand Down Expand Up @@ -58,7 +58,7 @@ pub trait SemiRing: AddAssign + MulAssign + Send + Sized + std::fmt::Display {
impl SemiRing for Float {

fn from_f64(value: f64) -> Self {
f128!(value)
F128!(value)
}

fn to_f64(&self) -> f64 {
Expand Down
12 changes: 6 additions & 6 deletions src/learning/learner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ use crate::parser::*;
use crate::Branching;
use rayon::prelude::*;
use super::Learning;
use crate::common::f128;
use crate::common::F128;
use super::utils::*;
use super::*;
use rug::{Assign, Float};
Expand Down Expand Up @@ -75,7 +75,7 @@ impl <const S: bool> Learner<S> {
for distribution in distributions.iter() {
let unsoftmaxed_vector = distribution.iter().map(|p| p.log(std::f64::consts::E)).collect::<Vec<f64>>();
unsoftmaxed_distributions.push(unsoftmaxed_vector);
grads.push(vec![f128!(0.0); distribution.len()]);
grads.push(vec![F128!(0.0); distribution.len()]);
}
// Retrieves which distributions are learned
let learned_distributions = learned_distributions_from_cnf(&inputs[0]);
Expand All @@ -92,13 +92,13 @@ impl <const S: bool> Learner<S> {
let d = train_dacs.pop().unwrap();
let expected = expected_outputs.pop().unwrap();
train_data.push(d);
train_expected.push(f128!(expected));
train_expected.push(F128!(expected));
}
while !test_dacs.is_empty() {
let d = test_dacs.pop().unwrap();
let expected = expected_test.pop().unwrap();
test_data.push(d);
test_expected.push(f128!(expected));
test_expected.push(F128!(expected));
}
let train_dataset = Dataset::new(train_data, train_expected);
let test_dataset = Dataset::new(test_data, test_expected);
Expand Down Expand Up @@ -201,7 +201,7 @@ impl <const S: bool> Learner<S> {
// If it is a product node, we need to divide the path value by the value of the child
// This is equivalent to multiplying the values of the other children
// If the value of the child is 0, then the path value is simply 0
let mut val = f128!(0.0);
let mut val = F128!(0.0);
if self.train[query_id][child].value().to_f64() != 0.0 {
val = path_val.clone() * &value / self.train[query_id][child].value().to_f64();
}
Expand All @@ -223,7 +223,7 @@ impl <const S: bool> Learner<S> {
}
// Compute the gradient contribution for the value used in the node
// and all the other possible values of the distribution (derivative of the softmax)
let mut sum_other_w = f128!(0.0);
let mut sum_other_w = F128!(0.0);
let child_w = self.get_probability(d, v);
for params in (0..self.unsoftmaxed_distributions[d].len()).filter(|i| *i != v) {
let weight = self.get_probability(d, params);
Expand Down
4 changes: 2 additions & 2 deletions src/learning/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ use crate::core::components::ComponentExtractor;
use crate::propagator::Propagator;
use crate::parser::*;
use crate::solvers::*;
use crate::{common::f128, diagrams::{semiring::SemiRing, dac::dac::Dac}};
use crate::{common::F128, diagrams::{semiring::SemiRing, dac::dac::Dac}};

/// Calculates the softmax (the normalized exponential) function, which is a generalization of the
/// logistic function to multiple dimensions.
Expand All @@ -34,7 +34,7 @@ use crate::{common::f128, diagrams::{semiring::SemiRing, dac::dac::Dac}};
/// From https://docs.rs/compute/latest/src/compute/functions/statistical.rs.html#43-46
pub fn softmax(x: &[f64]) -> Vec<Float> {
let sum_exp: f64 = x.iter().map(|i| i.exp()).sum();
x.iter().map(|i| f128!(i.exp() / sum_exp)).collect()
x.iter().map(|i| F128!(i.exp() / sum_exp)).collect()
}

/// Generates a vector of optional Dacs from a list of input files
Expand Down
4 changes: 2 additions & 2 deletions src/preprocess.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ use crate::core::components::{ComponentExtractor, ComponentIndex};
use crate::core::problem::*;
use crate::propagator::Propagator;
use rug::Float;
use crate::common::f128;
use crate::common::F128;

pub struct Preprocessor<'b>
{
Expand Down Expand Up @@ -48,7 +48,7 @@ where
}

pub fn preprocess(&mut self) -> Option<Float> {
let mut p = f128!(1.0);
let mut p = F128!(1.0);

for variable in self.problem.variables_iter() {
if self.problem[variable].is_probabilitic() && self.problem[variable].weight().unwrap() == 1.0 {
Expand Down
6 changes: 3 additions & 3 deletions src/propagator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@
use search_trail::{StateManager, UsizeManager, ReversibleUsize};

use crate::common::f128;
use crate::common::F128;
use crate::core::components::{ComponentIndex, ComponentExtractor};
use crate::core::problem::{ClauseIndex, DistributionIndex, Problem, VariableIndex};
use rug::{Assign, Float};
Expand Down Expand Up @@ -81,7 +81,7 @@ impl Propagator {
assignments: vec![],
base_assignments: state.manage_usize(0),
unconstrained_distributions: vec![],
propagation_prob: f128!(0.0),
propagation_prob: F128!(0.0),
forced: 0,
}
}
Expand Down Expand Up @@ -149,7 +149,7 @@ impl Propagator {
g[distribution].set_unconstrained(state);
if g[distribution].number_false(state) != 0 {
self.unconstrained_distributions.push(distribution);
let mut p = f128!(0.0);
let mut p = F128!(0.0);
for weight in g[distribution].iter_variables().filter(|v| !g[*v].is_fixed(state)).map(|v| g[v].weight().unwrap()) {
p += weight;
}
Expand Down
24 changes: 12 additions & 12 deletions src/solvers/solver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ impl<B: BranchingDecision, const S: bool> Solver<B, S> {
let mut preprocessor = Preprocessor::new(&mut self.problem, &mut self.state, &mut self.propagator, &mut self.component_extractor);
let preproc = preprocessor.preprocess();
if preproc.is_none() {
return Solution::new(f128!(0.0), f128!(0.0), self.start.elapsed().as_secs());
return Solution::new(F128!(0.0), F128!(0.0), self.start.elapsed().as_secs());
}
self.preproc_in = Some(preproc.unwrap());
self.preproc_out = Some(1.0 - self.problem.distributions_iter().map(|d| {
Expand All @@ -142,7 +142,7 @@ impl<B: BranchingDecision, const S: bool> Solver<B, S> {
self.state.restore_state();
if self.problem.number_clauses() == 0 {
let lb = self.preproc_in.clone().unwrap();
let ub = f128!(1.0 - self.preproc_out.unwrap());
let ub = F128!(1.0 - self.preproc_out.unwrap());
return Solution::new(lb, ub, self.start.elapsed().as_secs());
}
let max_probability = self.problem.distributions_iter().map(|d| self.problem[d].remaining(&self.state)).product::<f64>();
Expand Down Expand Up @@ -185,9 +185,9 @@ impl<B: BranchingDecision, const S: bool> Solver<B, S> {
self.search_cache.clear();
}
self.state.save_state();
let mut p_in = f128!(1.0);
let mut p_out = f128!(1.0);
let mut maximum_probability = f128!(1.0);
let mut p_in = F128!(1.0);
let mut p_out = F128!(1.0);
let mut maximum_probability = F128!(1.0);
for distribution in self.component_extractor.component_distribution_iter(component) {
if self.problem[distribution].is_constrained(&self.state) {
maximum_probability *= self.problem[distribution].remaining(&self.state);
Expand All @@ -206,14 +206,14 @@ impl<B: BranchingDecision, const S: bool> Solver<B, S> {
// remaining of the components. This way we always produce a valid lower/upper
// bound.
if self.start.elapsed().as_secs() >= self.timeout {
return ((f128!(0.0), f128!(0.0)), level - 1);
return ((F128!(0.0), F128!(0.0)), level - 1);
}
let sub_maximum_probability = self.component_extractor[sub_component].max_probability();
assert!(0.0 <= sub_maximum_probability && sub_maximum_probability <= 1.0);
let (sub_problem, backtrack_level) = self.get_bounds_from_cache(sub_component, new_bound_factor, level, discrepancy);
if backtrack_level != level {
self.restore();
return ((f128!(0.0), maximum_probability), backtrack_level);
return ((F128!(0.0), maximum_probability), backtrack_level);
}
// If any of the component is not fully explored, then so is the node
let (sub_p_in, sub_p_out) = sub_problem.bounds();
Expand Down Expand Up @@ -265,9 +265,9 @@ impl<B: BranchingDecision, const S: bool> Solver<B, S> {
self.statistics.or_node();
let maximum_probability = self.component_extractor[component].max_probability();
// Stores the accumulated probability of the found models in the sub-problem
let mut p_in = f128!(0.0);
let mut p_in = F128!(0.0);
// Stores the accumulated probability of the found non-models in the sub-problem
let mut p_out = f128!(0.0);
let mut p_out = F128!(0.0);
// When a sub-problem is UNSAT, this is the factor that must be used for the
// computation of p_out
let unsat_factor = maximum_probability / self.problem[distribution].remaining(&self.state);
Expand Down Expand Up @@ -296,7 +296,7 @@ impl<B: BranchingDecision, const S: bool> Solver<B, S> {
// The clause learning scheme tells us that we need to backtrack
// non-chronologically. There are no models in this sub-problem
self.restore();
return (SearchCacheEntry::new((f128!(0.0), f128!(maximum_probability)), usize::MAX, Some(distribution)), backtrack_level);
return (SearchCacheEntry::new((F128!(0.0), F128!(maximum_probability)), usize::MAX, Some(distribution)), backtrack_level);
}
},
Ok(_) => {
Expand All @@ -320,7 +320,7 @@ impl<B: BranchingDecision, const S: bool> Solver<B, S> {
let ((child_p_in, child_p_out), backtrack_level) = self.solve_components(component, level + 1, bound_factor, new_discrepancy);
if backtrack_level != level {
self.restore();
return (SearchCacheEntry::new((f128!(0.0), f128!(maximum_probability)), usize::MAX, Some(distribution)), backtrack_level);
return (SearchCacheEntry::new((F128!(0.0), F128!(maximum_probability)), usize::MAX, Some(distribution)), backtrack_level);
}
p_in += child_p_in * &p;
p_out += child_p_out * &p;
Expand All @@ -337,7 +337,7 @@ impl<B: BranchingDecision, const S: bool> Solver<B, S> {
let cache_entry = SearchCacheEntry::new((p_in, p_out), discrepancy, Some(distribution));
(cache_entry, level)
} else {
(SearchCacheEntry::new((f128!(1.0), f128!(0.0)), usize::MAX, None), level)
(SearchCacheEntry::new((F128!(1.0), F128!(0.0)), usize::MAX, None), level)
}
}

Expand Down

0 comments on commit 239ed84

Please sign in to comment.