Skip to content

Commit

Permalink
hopeuflly some more speed
Browse files Browse the repository at this point in the history
  • Loading branch information
clairevalva committed Jan 30, 2024
1 parent 70366c9 commit 54a944f
Show file tree
Hide file tree
Showing 3 changed files with 31 additions and 14 deletions.
10 changes: 7 additions & 3 deletions src/domodel.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ struct eigsNLSA
w::Vector{Float64}
end

function doNLSA(params::paramsNLSA)
function doNLSA(params::paramsNLSA, kernel_choice)
X = params.srcdata
NN = params.NN
candidate_ϵs = params.candidate_ϵs
Expand All @@ -40,8 +40,12 @@ function doNLSA(params::paramsNLSA)
D, DN = distNN(X, NN)
print("computing bandwidth")
useϵ, m̂ = tune_bandwidth(D, DN, NN_bw, nT, candidate_ϵs)
print("sparceW")
W = sparseW_sepband(X, useϵ, m̂, D, DN, NN = NN, sym = true)
print("sparseW")
if kernel_choice == "cone"
W = sparseW_cone(X, useϵ, m̂, D, DN, NN = NN, sym = true)
else
W = sparseW_sepband(X, useϵ, m̂, D, DN, NN = NN, sym = true)
end
W = sparse(W)
print("normW")
P = normW(W)
Expand Down
11 changes: 8 additions & 3 deletions src/modelcomponents.jl
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
export
tune_bandwidth,
sparseW_sepband,
sparseW_cone,
normW,
computeDiffusionEig,
diffSVD,
diffProjection,
projectDiffEig


"""
tune_bandwidth(D::Matrix{Float64},
Expand Down Expand Up @@ -146,8 +148,9 @@ end
- sym: boolean for optional operator symmetrization
"""

function sparseW_cone(X::Matrix{Float64}, eps::Float64, m̂::Float64,
D::Matrix{Float64}, N::Matrix{Integer}, dt::Float64; NN::Integer = 0, sym::Bool = true )
D::Matrix{Float64}, N::Matrix{Integer}; NN::Integer = 0, sym::Bool = true )
# get distances
# D, N = distNN(X, NN, usenorm = usenorm)

Expand All @@ -158,18 +161,20 @@ function sparseW_cone(X::Matrix{Float64}, eps::Float64, m̂::Float64,

# point_density, _ = est_ind_bandwidth(D, NN, nT)
# m = m̂ / 2
# should be moving things appropriately for this kernel

# W = zeros(Float64, nT, nT)
# believing reddit for sparse matrices purposes
rows = Int64[]
cols = Int64[]
vals = Float64[]

for i = 1:nT
for i = 2:nT
for j = 1:NN
k = N[i,j]
push!(rows, i)
push!(cols, k)
iszero(k - 1) && continue
if i != k
# wik = sepbw_kernel(X[i,:], X[k,:], point_density[i], point_density[k], m, γ = eps)

Expand All @@ -187,7 +192,7 @@ function sparseW_cone(X::Matrix{Float64}, eps::Float64, m̂::Float64,
end
end

W = dropzeros(sparse(rows, cols, vals, nT, nT))
W = dropzeros(sparse(rows, cols, vals, nT - 1, nT - 1))

if sym
if ~all(isapprox.(W - W', 0; rtol=1e-16))
Expand Down
24 changes: 16 additions & 8 deletions src/utils.jl
Original file line number Diff line number Diff line change
Expand Up @@ -56,23 +56,31 @@ end
- usenorm: specify the distance norm to use, defaults to l2
"""
function distNN(X::Matrix{Float64}, NN::Integer = 0; usenorm::Function = norm)
function distNN(X::Matrix{Float64}, NN::Integer = 0; usenorm = euclidean)
_, nT = size(X)
D = zeros(Float64, nT, nT)
N = zeros(Int, nT, nT)

if NN == 0
# if no nearest neighbors specified, keep all of them
println("here")
NN = nT
end
for i = 1:nT
N[:,i] = collect(1:nT)
for j = 1:(i-1)
D[j,i] = euclidean(X[:,j], X[:,i])
end
end
else

for i = 1:nT
d = zeros(Float64, nT)
for j = 1:nT
d[j] = usenorm(X[:, i] .- X[:, j])
for i = 1:nT
d = zeros(Float64, nT)
for j = 1:nT
d[j] = usenorm(X[:, i] .- X[:, j])
end
inds = sortperm(d)[1:NN]
D[i,1:NN], N[i,1:NN] = d[inds], inds
end
inds = sortperm(d)[1:NN]
D[i,1:NN], N[i,1:NN] = d[inds], inds
end
# then have that D(i,j), d(N(j))
return D, N
Expand Down

0 comments on commit 54a944f

Please sign in to comment.