Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make python3 friendly. #3

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
228 changes: 114 additions & 114 deletions A_opt.py

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -72,10 +72,10 @@ while not converged:
# nij = A_optimize( sij) # call this if not using iterative optimization.
# nij[nij < ncut] = 0 # Omit pairs with impractically small allocations.
nij = round_to_integers( nij)
for i in xrange(nmols):
for i in range(nmols):
if nij[i,i] > 0:
__individual_free_energy__( mols[i], nsamples=nij[i,i], ...)
for j in xrange(i+1, nmols):
for j in range(i+1, nmols):
if nij[i,j] == 0: continue
# Compute the relative free energy between i and j,
# using nij[i,j] number of samples.
Expand Down
44 changes: 22 additions & 22 deletions analysis.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -186,8 +186,8 @@
"# K0 = 5\n",
"# x0 = np.arange( 1., K0+1, 1)\n",
"sij0 = np.diag( x0)\n",
"for i in xrange(K0):\n",
" for j in xrange(i+1, K0):\n",
"for i in range(K0):\n",
" for j in range(i+1, K0):\n",
" sij0[i,j] = sij0[j,i] = x0[j] - x0[i]\n",
"sij0 = matrix( sij0) "
]
Expand Down Expand Up @@ -267,7 +267,7 @@
" u = np.zeros( K)\n",
" u[0] = x0[0]/np.sqrt(K)\n",
" s = np.sqrt(K)*x0[0]\n",
" for i in xrange( 1, K):\n",
" for i in range( 1, K):\n",
" u[i] = u[i-1] + (x0[i] - x0[i-1])/np.sqrt(K-i)\n",
" s += (x0[i] - x0[i-1])*np.sqrt(K-i)\n",
" return u*s"
Expand All @@ -284,7 +284,7 @@
"def distnet_minTrC( xs):\n",
" K = len(xs)\n",
" trC = np.sqrt(K)*xs[0]\n",
" for i in xrange( 1, K):\n",
" for i in range( 1, K):\n",
" trC += np.sqrt(K-i)*(xs[i] - xs[i-1])\n",
" return trC**2"
]
Expand Down Expand Up @@ -765,7 +765,7 @@
" else:\n",
" sCOX2rel[i,i] = sCOX2[a, ROF]\n",
" origins[i] = 'R'\n",
" for j in xrange(i+1, len(allmols)):\n",
" for j in range(i+1, len(allmols)):\n",
" b = allmols[j]\n",
" sCOX2rel[i,j] = sCOX2rel[j,i] = sCOX2[a,b]\n",
" return matrix(sCOX2rel), origins\n",
Expand Down Expand Up @@ -1285,7 +1285,7 @@
" stats2 = results[val][o2]\n",
" ratio = stats2/stats1\n",
" bl = len(ratio)/blocks\n",
" bavg = [ np.mean( ratio[b*bl:(b+1)*bl]) for b in xrange(blocks)]\n",
" bavg = [ np.mean( ratio[b*bl:(b+1)*bl]) for b in range(blocks)]\n",
" return np.mean(ratio), np.std( bavg)/np.sqrt(blocks)"
]
},
Expand Down Expand Up @@ -1829,7 +1829,7 @@
}
],
"source": [
"for j in xrange(4): \n",
"for j in range(4): \n",
" plt.plot( x0[j::4], xML[j::4], 'o')\n",
"plt.gca().set_aspect( 1)\n",
"plt.gca().set_xlabel( r'$x_0$')\n",
Expand Down Expand Up @@ -1945,9 +1945,9 @@
" '''\n",
" K = len(xtrue)\n",
" xijtrue = np.zeros( (K, K), dtype=float)\n",
" for i in xrange( K):\n",
" for i in range( K):\n",
" xijtrue[i,i] = xtrue[i]\n",
" for j in xrange( i+1, K):\n",
" for j in range( i+1, K):\n",
" xijtrue[i,j] = xtrue[i] - xtrue[j]\n",
" xijtrue[j,i] = -xijtrue[i,j]\n",
" xij = np.zeros( (K, K), dtype=float)\n",
Expand All @@ -1958,8 +1958,8 @@
" for t, nsofar in enumerate( ntotals):\n",
" nactual = np.asarray(fij*(nsofar - nprev), dtype=int)\n",
" invsij2 = np.zeros( (K, K), dtype=float)\n",
" for i in xrange(K):\n",
" for j in xrange(i, K):\n",
" for i in range(K):\n",
" for j in range(i, K):\n",
" if nactual[i,j] > 0:\n",
" gij = np.random.normal( xijtrue[i,j], strue[i,j], nactual[i,j])\n",
" xijnew = np.mean( gij)\n",
Expand Down Expand Up @@ -1994,9 +1994,9 @@
" np.random.seed( rseed)\n",
" K = len(xtrue)\n",
" xijtrue = np.zeros( (K, K), dtype=float)\n",
" for i in xrange( K):\n",
" for i in range( K):\n",
" xijtrue[i,i] = xtrue[i]\n",
" for j in xrange( i+1, K):\n",
" for j in range( i+1, K):\n",
" xijtrue[i,j] = xtrue[i] - xtrue[j]\n",
" xijtrue[j,i] = -xijtrue[i,j]\n",
"\n",
Expand All @@ -2020,8 +2020,8 @@
" \n",
" smin, smax = np.inf, 0\n",
" invsij2 = np.zeros( (K, K), dtype=float)\n",
" for i in xrange( K):\n",
" for j in xrange( i, K):\n",
" for i in range( K):\n",
" for j in range( i, K):\n",
" if nactual[i,j] > 0:\n",
" gij = np.random.normal( xijtrue[i,j], strue[i,j], nactual[i,j])\n",
" xijnew = np.mean( gij)\n",
Expand All @@ -2041,8 +2041,8 @@
" if sij[i,j] < smin: smin = sij[i,j]\n",
" \n",
" # For pairs where a difference has not been estimated, we set sij to be a random number between smin and smax\n",
" for i in xrange( K):\n",
" for j in xrange( i, K):\n",
" for i in range( K):\n",
" for j in range( i, K):\n",
" if nij[i,j] <= 1: # Need at 2 samples to have meaningful sij estimate\n",
" z = np.random.rand( 1)\n",
" sij[i,j] = smin*(1 - z) + smax*z\n",
Expand Down Expand Up @@ -3285,7 +3285,7 @@
"nijCOX2trj = []\n",
"dGCOX2MSTtrj = []\n",
"dGCOX2Aopttrj = []\n",
"for t in xrange( TRIALS):\n",
"for t in range( TRIALS):\n",
" _dgtrj, _sijtrj, _trCtrj, _nijtrj = iterative_diffnet( dGCOX2, sCOX2, N=10000, rseed=2019 + t)\n",
" dGCOX2trj.append( _dgtrj)\n",
" sCOX2trj.append( _sijtrj)\n",
Expand Down Expand Up @@ -3314,7 +3314,7 @@
}
],
"source": [
"[ dn.sum_upper_triangle( matrix(nijCOX2trj[i][-1])) for i in xrange(TRIALS) ]"
"[ dn.sum_upper_triangle( matrix(nijCOX2trj[i][-1])) for i in range(TRIALS) ]"
]
},
{
Expand Down Expand Up @@ -3351,8 +3351,8 @@
" for t, nij in enumerate( nijs):\n",
" nij = nij/dn.sum_upper_triangle( matrix(nij))\n",
" d = 0.\n",
" for i in xrange( K):\n",
" for j in xrange( i, K):\n",
" for i in range( K):\n",
" for j in range( i, K):\n",
" if nij[i,j]>0: \n",
" d += nij[i,j]*np.log( nij[i,j]/(nopt[i,j] + epsilon))\n",
" D[t] = d\n",
Expand Down Expand Up @@ -3503,7 +3503,7 @@
" sstd = np.zeros( len(ns))\n",
" for k, n in enumerate(ns):\n",
" n = int(n)\n",
" sigma = np.array([ np.std( x[b*n:(b+1)*n]) for b in xrange( blocks) ])\n",
" sigma = np.array([ np.std( x[b*n:(b+1)*n]) for b in range( blocks) ])\n",
" smean[k] = np.mean(sigma)\n",
" sstd[k] = np.std( sigma)\n",
" return ns, smean, sstd\n",
Expand Down
72 changes: 36 additions & 36 deletions diffnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,8 @@ def sum_upper_triangle( x):
'''
if not isinstance(x, matrix): x = matrix( x)
s = 0.
for i in xrange( x.size[0]):
for j in xrange( i, x.size[1]):
for i in range( x.size[0]):
for j in range( i, x.size[1]):
s += x[i,j]
return s

Expand All @@ -78,10 +78,10 @@ def lndetC( sij, x, hessian=False):
K = sij.size[0]
M = K*(K+1)/2
F = matrix( 0., (K, K))
for i in xrange( K):
for i in range( K):
# n_{ii}*v_{ii}.v_{ii}^t
F[i,i] += x[i]/(sij[i,i]*sij[i,i])
for j in xrange( i+1, K):
for j in range( i+1, K):
m = measurement_index( i, j, K)
v2 = x[m]/(sij[i,j]*sij[i,j])
F[i,i] += v2
Expand All @@ -90,32 +90,32 @@ def lndetC( sij, x, hessian=False):
C = linalg.inv( F)
fval = -np.log(linalg.det( F))
df = matrix( 0., (1, M))
for i in xrange( K):
for i in range( K):
df[i] = -C[i,i]/(sij[i,i]*sij[i,i])
for j in xrange( i+1, K):
for j in range( i+1, K):
m = measurement_index( i, j, K)
df[m] = (2*C[i,j] - C[i,i] - C[j,j])/(sij[i,j]*sij[i,j])
if not hessian:
return (fval, df)
# Compute the Hessian
d2f = matrix( 0., (M, M))
for i in xrange( K):
for j in xrange( i, K):
for i in range( K):
for j in range( i, K):
# d^2/dx_i dx_j = C_{ij}^2/(s_{ii}^2 s_{jj}^2)
d2f[i, j] = C[i,j]*C[i,j]/(sij[i,i]*sij[i,i]*sij[j,j]*sij[j,j])
d2f[j, i] = d2f[i, j]
for i2 in xrange( K):
for j2 in xrange( i2+1, K):
for i2 in range( K):
for j2 in range( i2+1, K):
m2 = measurement_index( i2, j2, K)
# d^2/dx_id_x(i',j') = (C_{ii'}-C_{ji'})^2/(s_{i'i'}^2 s_{ij}^2)
dC = C[i2,i] - C[j2,i]
d2f[i, m2] = dC*dC/(sij[i,i]*sij[i,i]*sij[i2,j2]*sij[i2,j2])
d2f[m2, i] = d2f[i, m2]
for j in xrange( i+1, K):
for j in range( i+1, K):
m = measurement_index( i, j, K)
invs2 = 1/(sij[i,j]*sij[i,j])
for i2 in xrange( i, K):
for j2 in xrange( i2+1, K):
for i2 in range( i, K):
for j2 in range( i2+1, K):
m2 = measurement_index( i2, j2, K)
# d^2/dx_{ij}dx_{i'j'} =
# (C_{ii'}+C_{jj'}-C_{ji'}-C_{ij'})^2/(s_{i'j'}^2 s_{ij}^2)
Expand Down Expand Up @@ -173,7 +173,7 @@ def A_optimize( sij, nadd=1., nsofar=None, delta=None,
only_include_measurements)
else:
if delta is not None:
raise ValueError, 'Currently delta values are only supported in A-optimal by the conelp method.'
raise ValueError('Currently delta values are only supported in A-optimal by the conelp method.')
if nsofar is None:
nij = A_optimize_sdp( sij)
nij *= nadd
Expand Down Expand Up @@ -212,9 +212,9 @@ def D_optimize( sij):

def F( x=None, z=None):
if x is None:
x0 = matrix( [ sij[i,i] for i in xrange( K) ] +
[ sij[i,j] for i in xrange( K)
for j in xrange( i+1, K) ], (M, 1))
x0 = matrix( [ sij[i,i] for i in range( K) ] +
[ sij[i,j] for i in range( K)
for j in range( i+1, K) ], (M, 1))
return (0, x0)
if z is None:
return lndetC( sij, x)
Expand Down Expand Up @@ -246,8 +246,8 @@ def constant_relative_error( si):
K = len(si)
si = np.sort( si)
sij = np.diag( si)
for i in xrange( K):
for j in xrange( i+1, K):
for i in range( K):
for j in range( i+1, K):
sij[i,j] = sij[j,i] = si[j] - si[i]
return matrix( sij)

Expand All @@ -260,7 +260,7 @@ def A_optimize_const_relative_error( si):

nij = np.zeros( (K, K), dtype=float)
N = nij[0,0] = np.sqrt( K)*si[0]
for i in xrange(K-1):
for i in range(K-1):
nij[i+1, i] = nij[i, i+1] = np.sqrt(K - (i+1))*(si[i+1] - si[i])
N += nij[i, i+1]

Expand All @@ -278,7 +278,7 @@ def D_optimize_const_relative_error( si):
iK = 1./K
nij = np.zeros( (K, K), dtype=float)
nij[0,0] = iK
for i in xrange(K-1):
for i in range(K-1):
nij[i,i+1] = nij[i+1,i] = iK

return matrix( nij)
Expand Down Expand Up @@ -323,10 +323,10 @@ def E_optimize( sij):
# G[i*K + j] = (v_m.v_m^t)[i,j]
G = matrix( 0., (K*K, M+1))
h = matrix( 0., (K, K))
for i in xrange( K):
for i in range( K):
G[i*(K+1), i] = 1./(sij[i,i]*sij[i,i])
G[i*(K+1), M] = 1. # The column-major identity matrix for t.
for j in xrange( i+1, K):
for j in range( i+1, K):
m = measurement_index( i, j, K)
v2 = 1./(sij[i,j]*sij[i,j])
G[j*K + i, m] = G[i*K + j, m] = -v2
Expand Down Expand Up @@ -366,7 +366,7 @@ def Dijkstra_shortest_path( sij):

while q:
d, u = heapq.heappop( q)
for v in xrange( K):
for v in range( K):
suv = sij[u,v] if u != K else sij[v,v]
dp = d + suv
if dp < dist[v]:
Expand Down Expand Up @@ -407,7 +407,7 @@ def E_optimal_tree( sij):
# For each node, compute \sum_{j \elem T_i} a_j, where j runs over
# the set of nodes in the subtree T_i rooted at i, including i itself.
suma = np.zeros( K, dtype=float)
for v in xrange( K):
for v in range( K):
suma[v] += a[v]
u = prev[v]
# Follow up the tree until the root at the origin
Expand All @@ -417,7 +417,7 @@ def E_optimal_tree( sij):

# n_{i\mu_i} = s_{i\mu_i} \lambda \sum_{j\elem T_i} a_j
nij = matrix( 0., (K, K))
for i in xrange( K):
for i in range( K):
j = prev[i]
if j!=K: # not the origin
nij[i,j] = sij[i,j]*suma[i]
Expand Down Expand Up @@ -513,7 +513,7 @@ def MLestimate( xij, invsij2, x0=None, di2=None):
# z_i = \sigma_i^{-2} x_i + \sum_{j\neq i} \sigma_{ij}^{-2} x_{ij}
z = np.sum( invsij2*xij, axis=1)
if di2 is not None and x0 is not None:
for i in xrange( len(z)):
for i in range( len(z)):
if x0[i] is not None and x0[i] is not np.nan and di2[i] != 0:
z[i] += di2[i]*x0[i]

Expand Down Expand Up @@ -574,7 +574,7 @@ def covariance( invv, delta=None):
if np.all( np.diag(f) == 0) and delta is None:
return covariance_singular_Fisher( f)
F = np.diag( np.sum(f, axis=1) )
for k in xrange(K): f[k,k] = 0
for k in range(K): f[k,k] = 0
F -= f
if delta is not None:
di2 = np.square( 1/delta)
Expand Down Expand Up @@ -632,15 +632,15 @@ def round_to_integers( n):
nceilsum = 0
edges = []
nint = np.zeros( (K, K), dtype=int)
for i in xrange(K):
for j in xrange(i, K):
for i in range(K):
for j in range(i, K):
nsum += n[i,j]
nint[i,j] = nint[j,i] = int(np.ceil( n[i,j]))
nceilsum += nint[i,j]
heapq.heappush( edges, (-n[i,j], (i,j)))
nsum = int(np.floor(nsum + 0.5))
surplus = nceilsum - nsum
for k in xrange( surplus):
for k in range( surplus):
d, (i, j) = heapq.heappop( edges)
nint[i,j] -= 1
nint[j,i] = nint[i,j]
Expand Down Expand Up @@ -747,9 +747,9 @@ def weight( i,j):
G.add_node( 'O')
edges = []

for i in xrange(K):
for i in range(K):
edges.append( ('O', i, weight( i,i)))
for j in xrange(i+1, K):
for j in range(i+1, K):
edges.append( (i, j, weight(i,j)))
edges = list(nx.k_edge_augmentation( G, k=connectivity, partial=True))

Expand All @@ -771,13 +771,13 @@ def weight( i,j):
# network.
if (len(only_include_measurements) < n_measure):
indices = []
for i in xrange(K):
for j in xrange(i, K):
for i in range(K):
for j in range(i, K):
if (i,j) in only_include_measurements:
continue
heapq.heappush( indices, (weight(i,j), (i,j)))
addition = []
for m in xrange(n_measure - len(only_include_measurements)):
for m in range(n_measure - len(only_include_measurements)):
_w, (i,j) = heapq.heappop( indices)
addition.append( (i,j))
only_include_measurements.update( addition)
Expand Down
Loading