Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Sparse optimizations for UniformABW sampling #329

Merged
merged 15 commits into from
Aug 21, 2024
52 changes: 50 additions & 2 deletions include/convex_bodies/hpolytope.h
Original file line number Diff line number Diff line change
Expand Up @@ -567,6 +567,44 @@ class HPolytope {
}



template <typename update_parameters, typename set_type, typename AA_type>
std::pair<NT, int> line_positive_intersect(Point const& r,
VT& Ar,
VT& Av,
NT const& lambda_prev,
set_type& distances_set,
AA_type const& AA,
update_parameters& params) const
{
NT inner_prev = params.inner_vi_ak;

NT* Av_data = Av.data();
const NT* b_data = b.data();

for (Eigen::SparseMatrix<double>::InnerIterator it(AA, params.facet_prev); it; ++it) {

*(Av_data + it.row()) += (-2.0 * inner_prev) * it.value();
NT val = distances_set.get_val(it.row());
val += (val - params.moved_dist) * 2.0 * inner_prev * it.value() / *(Av_data + it.row());
distances_set.change_val(it.row(), val, params.moved_dist);
}

std::pair<NT, int> ans = distances_set.get_min();
ans.first -= params.moved_dist;

params.inner_vi_ak = *(Av_data + ans.second);
params.facet_prev = ans.second;

/*if(ans.first < 0.00000001) {
std::cout << "distance of 0 found" << std::endl;
exit(0);
}*/
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we need these lines?


return ans;
}


template <typename update_parameters>
std::pair<NT, int> line_positive_intersect(Point const& r,
Point const& v,
Expand Down Expand Up @@ -953,12 +991,22 @@ class HPolytope {
}

template <typename update_parameters>
void compute_reflection(Point &v, const Point &, update_parameters const& params) const {

void compute_reflection(Point &v, Point const&, update_parameters const& params) const {
Point a((-2.0 * params.inner_vi_ak) * A.row(params.facet_prev));
v += a;
}

template <typename update_parameters>
auto compute_reflection(Point &v, Point &p, update_parameters const& params) const
-> std::enable_if_t<std::is_same_v<MT, Eigen::SparseMatrix<NT, Eigen::RowMajor>> && !std::is_same_v<update_parameters, int>, void> { // MT must be in RowMajor format
NT* v_data = v.pointerToData();
NT* p_data = p.pointerToData();
for(Eigen::SparseMatrix<double, Eigen::RowMajor>::InnerIterator it(A, params.facet_prev); it; ++it) {
*(v_data + it.col()) += (-2.0 * params.inner_vi_ak) * it.value();
*(p_data + it.col()) -= (-2.0 * params.inner_vi_ak * params.moved_dist) * it.value();
}
}

template <typename update_parameters>
NT compute_reflection(Point &v, const Point &, DenseMT const &AE, VT const &AEA, NT const &vEv, update_parameters const &params) const {

Expand Down
20 changes: 10 additions & 10 deletions include/preprocess/rounding_util_functions.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ initialize_chol(MT const& mat)
if constexpr (std::is_same<MT, DenseMT>::value)
{
return std::make_unique<Eigen::LLT<MT>>();
} else if constexpr (std::is_same<MT, SparseMT>::value)
} else if constexpr (std::is_base_of<Eigen::SparseMatrixBase<MT>, MT >::value)
{
auto llt = std::make_unique<Eigen::SimplicialLLT<MT>>();
llt->analyzePattern(mat);
Expand All @@ -78,7 +78,7 @@ initialize_chol(MT const& A_trans, MT const& A)
if constexpr (std::is_same<MT, DenseMT>::value)
{
return std::make_unique<Eigen::LLT<MT>>();
} else if constexpr (std::is_same<MT, SparseMT>::value)
} else if constexpr (std::is_base_of<Eigen::SparseMatrixBase<MT>, MT >::value)
{
MT mat = A_trans * A;
return initialize_chol<NT>(mat);
Expand All @@ -99,7 +99,7 @@ inline static VT solve_vec(std::unique_ptr<Eigen_lltMT> const& llt,
{
llt->compute(H);
return llt->solve(b);
} else if constexpr (std::is_same<MT, SparseMT>::value)
} else if constexpr (std::is_base_of<Eigen::SparseMatrixBase<MT>, MT >::value)
{
llt->factorize(H);
return llt->solve(b);
Expand All @@ -122,7 +122,7 @@ solve_mat(std::unique_ptr<Eigen_lltMT> const& llt,
llt->compute(H);
logdetE = llt->matrixL().toDenseMatrix().diagonal().array().log().sum();
return llt->solve(mat);
} else if constexpr (std::is_same<MT, SparseMT>::value)
} else if constexpr (std::is_base_of<Eigen::SparseMatrixBase<MT>, MT >::value)
{
llt->factorize(H);
logdetE = llt->matrixL().nestedExpression().diagonal().array().log().sum();
Expand All @@ -143,7 +143,7 @@ inline static void update_Atrans_Diag_A(MT &H, MT const& A_trans,
if constexpr (std::is_same<MT, DenseMT>::value)
{
H.noalias() = A_trans * D * A;
} else if constexpr (std::is_same<MT, SparseMT>::value)
} else if constexpr (std::is_base_of<Eigen::SparseMatrixBase<MT>, MT >::value)
{
H = A_trans * D * A;
} else
Expand All @@ -161,7 +161,7 @@ inline static void update_Diag_A(MT &H, diag_MT const& D, MT const& A)
if constexpr (std::is_same<MT, DenseMT>::value)
{
H.noalias() = D * A;
} else if constexpr (std::is_same<MT, SparseMT>::value)
} else if constexpr (std::is_base_of<Eigen::SparseMatrixBase<MT>, MT >::value)
{
H = D * A;
} else
Expand All @@ -179,7 +179,7 @@ inline static void update_A_Diag(MT &H, MT const& A, diag_MT const& D)
if constexpr (std::is_same<MT, DenseMT>::value)
{
H.noalias() = A * D;
} else if constexpr (std::is_same<MT, SparseMT>::value)
} else if constexpr (std::is_base_of<Eigen::SparseMatrixBase<MT>, MT >::value)
{
H = A * D;
} else
Expand All @@ -198,7 +198,7 @@ get_mat_prod_op(MT const& E)
if constexpr (std::is_same<MT, DenseMT>::value)
{
return std::make_unique<Spectra::DenseSymMatProd<NT>>(E);
} else if constexpr (std::is_same<MT, SparseMT>::value)
} else if constexpr (std::is_base_of<Eigen::SparseMatrixBase<MT>, MT >::value)
{
return std::make_unique<Spectra::SparseSymMatProd<NT>>(E);
} else
Expand Down Expand Up @@ -249,7 +249,7 @@ init_Bmat(MT &B, int const n, MT const& A_trans, MT const& A)
if constexpr (std::is_same<MT, DenseMT>::value)
{
B.resize(n+1, n+1);
} else if constexpr (std::is_same<MT, SparseMT>::value)
} else if constexpr (std::is_base_of<Eigen::SparseMatrixBase<MT>, MT >::value)
{
// Initialize the structure of matrix B
typedef Eigen::Triplet<NT> triplet;
Expand Down Expand Up @@ -299,7 +299,7 @@ update_Bmat(MT &B, VT const& AtDe, VT const& d,
B.block(n, 0, 1, n).noalias() = AtDe.transpose();
B(n, n) = d.sum();
B.noalias() += 1e-14 * MT::Identity(n + 1, n + 1);
} else if constexpr (std::is_same<MT, SparseMT>::value)
} else if constexpr (std::is_base_of<Eigen::SparseMatrixBase<MT>, MT >::value)
{
MT AtD_A = AtD * A;
int k = 0;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ struct GaussianAcceleratedBilliardWalk

void computeLcov(const E_type E)
{
if constexpr (std::is_same<E_type, Eigen::SparseMatrix<NT> >::value) {
if constexpr (std::is_base_of<Eigen::SparseMatrixBase<E_type>, E_type >::value) {
Eigen::SimplicialLLT<E_type> lltofE;
lltofE.compute(E);
if (lltofE.info() != Eigen::Success) {
Expand Down
Loading
Loading