Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

updated poetry lock #193

Merged
merged 3 commits into from
Dec 23, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file modified .DS_Store
Binary file not shown.
1,074 changes: 573 additions & 501 deletions poetry.lock

Large diffs are not rendered by default.

Binary file modified symmer/.DS_Store
Binary file not shown.
142 changes: 71 additions & 71 deletions symmer/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,77 +224,77 @@ def gram_schmidt_from_quantum_state(state:Union[np.array, list, QuantumState]) -

return M

def get_sparse_matrix_large_pauliwordop(P_op: PauliwordOp) -> csr_matrix:
"""
In order to build the sparse matrix (e.g. above 18 qubits), this function goes through each pauli term
divides into two equally sized tensor products finds the sparse matrix of those and then does a sparse
kron product to get the large matrix.

TODO: Could also add how many chunks to split problem into (e.g. three/four/... tensor products).

Args:
P_op (PauliwordOp): Pauli operator to convert into sparse matrix
Returns:
mat (csr_matrix): sparse matrix of P_op
"""
nq = P_op.n_qubits
if nq<16:
mat = P_op.to_sparse_matrix
else:
# n_cpus = mp.cpu_count()
# P_op_chunks_inds = np.rint(np.linspace(0, P_op.n_terms, min(n_cpus, P_op.n_terms))).astype(set).astype(int)
#
# # miss zero index out (as emtpy list)
# P_op_chunks = [P_op[P_op_chunks_inds[ind_i]: P_op_chunks_inds[ind_i+1]] for ind_i, _ in enumerate(P_op_chunks_inds[1:])]
# with mp.Pool(n_cpus) as pool:
# tracker = pool.map(_get_sparse_matrix_large_pauliwordop, P_op_chunks)

# plus one below due to indexing (actual number of chunks ignores this value)
n_chunks = os.cpu_count()
if (n_chunks<=1) or (P_op.n_terms<=1):
# no multiprocessing possible
mat = get(_get_sparse_matrix_large_pauliwordop.remote(P_op))
else:
# plus one below due to indexing (actual number of chunks ignores this value)
n_chunks += 1
P_op_chunks_inds = np.rint(np.linspace(0, P_op.n_terms, min(n_chunks, P_op.n_terms+1))).astype(set).astype(int)
P_op_chunks = [P_op[P_op_chunks_inds[ind_i]: P_op_chunks_inds[ind_i + 1]] for ind_i, _ in
enumerate(P_op_chunks_inds[1:])]
tracker = np.array(get(
[_get_sparse_matrix_large_pauliwordop.remote(op) for op in P_op_chunks]))
mat = reduce(lambda x, y: x + y, tracker)

return mat

@remote(num_cpus=os.cpu_count(),
runtime_env={
"env_vars": {
"NUMBA_NUM_THREADS": os.getenv("NUMBA_NUM_THREADS"),
# "OMP_NUM_THREADS": str(os.cpu_count()),
"OMP_NUM_THREADS": os.getenv("NUMBA_NUM_THREADS"),
"NUMEXPR_MAX_THREADS": str(os.cpu_count())
}
}
)
def _get_sparse_matrix_large_pauliwordop(P_op: PauliwordOp) -> csr_matrix:
"""
"""
nq = P_op.n_qubits
mat = csr_matrix(([], ([],[])), shape=(2**nq,2**nq))
for op in P_op:
left_tensor = np.hstack((op.X_block[:, :nq // 2],
op.Z_block[:, :nq // 2]))
left_coeff = op.coeff_vec

right_tensor = np.hstack((op.X_block[:, nq // 2:],
op.Z_block[:, nq // 2:]))
right_coeff = np.array([1])

mat += sparse_kron(PauliwordOp(left_tensor, left_coeff).to_sparse_matrix,
PauliwordOp(right_tensor, right_coeff).to_sparse_matrix,
format='csr') # setting format makes this faster!

return mat
# def get_sparse_matrix_large_pauliwordop(P_op: PauliwordOp) -> csr_matrix:
# """
# In order to build the sparse matrix (e.g. above 18 qubits), this function goes through each pauli term
# divides into two equally sized tensor products finds the sparse matrix of those and then does a sparse
# kron product to get the large matrix.

# TODO: Could also add how many chunks to split problem into (e.g. three/four/... tensor products).

# Args:
# P_op (PauliwordOp): Pauli operator to convert into sparse matrix
# Returns:
# mat (csr_matrix): sparse matrix of P_op
# """
# nq = P_op.n_qubits
# if nq<16:
# mat = P_op.to_sparse_matrix
# else:
# # n_cpus = mp.cpu_count()
# # P_op_chunks_inds = np.rint(np.linspace(0, P_op.n_terms, min(n_cpus, P_op.n_terms))).astype(set).astype(int)
# #
# # # miss zero index out (as emtpy list)
# # P_op_chunks = [P_op[P_op_chunks_inds[ind_i]: P_op_chunks_inds[ind_i+1]] for ind_i, _ in enumerate(P_op_chunks_inds[1:])]
# # with mp.Pool(n_cpus) as pool:
# # tracker = pool.map(_get_sparse_matrix_large_pauliwordop, P_op_chunks)

# # plus one below due to indexing (actual number of chunks ignores this value)
# n_chunks = os.cpu_count()
# if (n_chunks<=1) or (P_op.n_terms<=1):
# # no multiprocessing possible
# mat = get(_get_sparse_matrix_large_pauliwordop.remote(P_op))
# else:
# # plus one below due to indexing (actual number of chunks ignores this value)
# n_chunks += 1
# P_op_chunks_inds = np.rint(np.linspace(0, P_op.n_terms, min(n_chunks, P_op.n_terms+1))).astype(set).astype(int)
# P_op_chunks = [P_op[P_op_chunks_inds[ind_i]: P_op_chunks_inds[ind_i + 1]] for ind_i, _ in
# enumerate(P_op_chunks_inds[1:])]
# tracker = np.array(get(
# [_get_sparse_matrix_large_pauliwordop.remote(op) for op in P_op_chunks]))
# mat = reduce(lambda x, y: x + y, tracker)

# return mat

# @remote(num_cpus=os.cpu_count(),
# runtime_env={
# "env_vars": {
# "NUMBA_NUM_THREADS": os.getenv("NUMBA_NUM_THREADS"),
# # "OMP_NUM_THREADS": str(os.cpu_count()),
# "OMP_NUM_THREADS": os.getenv("NUMBA_NUM_THREADS"),
# "NUMEXPR_MAX_THREADS": str(os.cpu_count())
# }
# }
# )
# def _get_sparse_matrix_large_pauliwordop(P_op: PauliwordOp) -> csr_matrix:
# """
# """
# nq = P_op.n_qubits
# mat = csr_matrix(([], ([],[])), shape=(2**nq,2**nq))
# for op in P_op:
# left_tensor = np.hstack((op.X_block[:, :nq // 2],
# op.Z_block[:, :nq // 2]))
# left_coeff = op.coeff_vec

# right_tensor = np.hstack((op.X_block[:, nq // 2:],
# op.Z_block[:, nq // 2:]))
# right_coeff = np.array([1])

# mat += sparse_kron(PauliwordOp(left_tensor, left_coeff).to_sparse_matrix,
# PauliwordOp(right_tensor, right_coeff).to_sparse_matrix,
# format='csr') # setting format makes this faster!

# return mat


def matrix_allclose(A: Union[csr_matrix, np.array], B:Union[csr_matrix, np.array], tol:int = 1e-15) -> bool:
Expand Down
Binary file modified tests/.DS_Store
Binary file not shown.
72 changes: 36 additions & 36 deletions tests/test_symmer_utils.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from symmer.operators import PauliwordOp, QuantumState
from symmer.utils import (exact_gs_energy, random_anitcomm_2n_1_PauliwordOp,
tensor_list, gram_schmidt_from_quantum_state, product_list,
get_sparse_matrix_large_pauliwordop, matrix_allclose)
matrix_allclose)
import numpy as np
from openfermion import QubitOperator

Expand Down Expand Up @@ -445,38 +445,38 @@ def test_gram_schmidt_from_quantum_state_numpy_array():
assert np.allclose(U_gram[:, 0], psi_norm), 'first column of U_gram not correct'
assert np.allclose(U_gram @ U_gram.conj().T, np.eye(2 ** nq)), 'U_gram not unitary'

def test_get_sparse_matrix_large_pauliwordop():
for nq in range(2,6):
n_terms = 10*nq
random_P = PauliwordOp.random(nq, n_terms)
sparse_mat = get_sparse_matrix_large_pauliwordop(random_P)
assert np.allclose(random_P.to_sparse_matrix.toarray(),
sparse_mat.toarray())

def test_matrix_allclose_sparse():
for nq in range(2,6):
n_terms = 10*nq
random_P = PauliwordOp.random(nq, n_terms)
sparse_mat = get_sparse_matrix_large_pauliwordop(random_P)
assert matrix_allclose(random_P.to_sparse_matrix,
sparse_mat)

# assert false output
Pop_XI= PauliwordOp.from_list(['XI']).to_sparse_matrix
Pop_ZI = PauliwordOp.from_list(['ZI']).to_sparse_matrix
assert not matrix_allclose(Pop_XI,
Pop_ZI)

def test_matrix_allclose_dense():
for nq in range(2,6):
n_terms = 10*nq
random_P = PauliwordOp.random(nq, n_terms)
sparse_mat = get_sparse_matrix_large_pauliwordop(random_P)
assert matrix_allclose(random_P.to_sparse_matrix.toarray(),
sparse_mat.toarray())

# assert false output
Pop_XI= PauliwordOp.from_list(['XI']).to_sparse_matrix
Pop_ZI = PauliwordOp.from_list(['ZI']).to_sparse_matrix
assert not matrix_allclose(Pop_XI.toarray(),
Pop_ZI.toarray())
# def test_get_sparse_matrix_large_pauliwordop():
# for nq in range(2,6):
# n_terms = 10*nq
# random_P = PauliwordOp.random(nq, n_terms)
# sparse_mat = get_sparse_matrix_large_pauliwordop(random_P)
# assert np.allclose(random_P.to_sparse_matrix.toarray(),
# sparse_mat.toarray())

# def test_matrix_allclose_sparse():
# for nq in range(2,6):
# n_terms = 10*nq
# random_P = PauliwordOp.random(nq, n_terms)
# sparse_mat = get_sparse_matrix_large_pauliwordop(random_P)
# assert matrix_allclose(random_P.to_sparse_matrix,
# sparse_mat)

# # assert false output
# Pop_XI= PauliwordOp.from_list(['XI']).to_sparse_matrix
# Pop_ZI = PauliwordOp.from_list(['ZI']).to_sparse_matrix
# assert not matrix_allclose(Pop_XI,
# Pop_ZI)

# def test_matrix_allclose_dense():
# for nq in range(2,6):
# n_terms = 10*nq
# random_P = PauliwordOp.random(nq, n_terms)
# sparse_mat = get_sparse_matrix_large_pauliwordop(random_P)
# assert matrix_allclose(random_P.to_sparse_matrix.toarray(),
# sparse_mat.toarray())

# # assert false output
# Pop_XI= PauliwordOp.from_list(['XI']).to_sparse_matrix
# Pop_ZI = PauliwordOp.from_list(['ZI']).to_sparse_matrix
# assert not matrix_allclose(Pop_XI.toarray(),
# Pop_ZI.toarray())
Loading