From 8a9a911b2fa0d274d14c33ce0a9eff6f40247c61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Zolt=C3=A1n=20Kolarovszki?= Date: Mon, 17 Jul 2023 19:29:32 +0200 Subject: [PATCH] feat: Batch processing of pure Fock states The batch processing of pure Fock states has been implemented. This is done by storing multiple states in a tensor of shape `(state_vector_size, number_of_batches)` in `BatchPureFockState`. However, `BatchPureFockState` has a limited support of methods compared to the original `PureFockState`. --- piquasso/__init__.py | 10 + piquasso/_backends/fock/__init__.py | 1 + piquasso/_backends/fock/pure/batch_state.py | 149 +++++ piquasso/_backends/fock/pure/calculations.py | 114 +++- piquasso/_backends/fock/pure/simulator.py | 6 +- piquasso/_backends/fock/pure/state.py | 32 +- piquasso/_math/fock.py | 25 +- piquasso/api/instruction.py | 4 + piquasso/api/simulator.py | 18 +- piquasso/instructions/batch.py | 36 ++ scripts/profile_cvnn_layer_batch.py | 161 ++++++ tests/backends/fock/pure/test_batch.py | 284 ++++++++++ .../tensorflow/test_batch_gradient.py | 526 ++++++++++++++++++ 13 files changed, 1316 insertions(+), 50 deletions(-) create mode 100644 piquasso/_backends/fock/pure/batch_state.py create mode 100644 piquasso/instructions/batch.py create mode 100644 scripts/profile_cvnn_layer_batch.py create mode 100644 tests/backends/fock/pure/test_batch.py create mode 100644 tests/backends/tensorflow/test_batch_gradient.py diff --git a/piquasso/__init__.py b/piquasso/__init__.py index 177294b8..54cdb66b 100644 --- a/piquasso/__init__.py +++ b/piquasso/__init__.py @@ -38,6 +38,7 @@ from piquasso._backends.fock import ( FockState, PureFockState, + BatchPureFockState, FockSimulator, PureFockSimulator, ) @@ -90,6 +91,11 @@ LossyInterferometer, ) +from .instructions.batch import ( + BatchPrepare, + BatchApply, +) + __all__ = [ # API @@ -115,6 +121,7 @@ "SamplingState", "FockState", "PureFockState", + "BatchPureFockState", # Preparations "Vacuum", "Mean", @@ -154,6 +161,9 @@ "Attenuator", "Loss", "LossyInterferometer", + # Batch + "BatchPrepare", + "BatchApply", ] __version__ = "3.0.0" diff --git a/piquasso/_backends/fock/__init__.py b/piquasso/_backends/fock/__init__.py index 6121380a..2b0a2aa4 100644 --- a/piquasso/_backends/fock/__init__.py +++ b/piquasso/_backends/fock/__init__.py @@ -17,4 +17,5 @@ from .general.simulator import FockSimulator # noqa: F401 from .pure.state import PureFockState # noqa: F401 +from .pure.batch_state import BatchPureFockState # noqa: F401 from .pure.simulator import PureFockSimulator # noqa: F401 diff --git a/piquasso/_backends/fock/pure/batch_state.py b/piquasso/_backends/fock/pure/batch_state.py new file mode 100644 index 00000000..8390e162 --- /dev/null +++ b/piquasso/_backends/fock/pure/batch_state.py @@ -0,0 +1,149 @@ +# +# Copyright 2021-2023 Budapest Quantum Computing Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Optional + +import numpy as np + +from piquasso.api.config import Config +from piquasso.api.exceptions import InvalidState +from piquasso.api.calculator import BaseCalculator + +from piquasso._math.linalg import vector_absolute_square +from piquasso._math.indices import get_index_in_fock_space + +from .state import PureFockState + + +class BatchPureFockState(PureFockState): + r"""A simulated batch pure Fock state, containing multiple state vectors.""" + + def __init__( + self, *, d: int, calculator: BaseCalculator, config: Optional[Config] = None + ) -> None: + """ + Args: + d (int): The number of modes. + calculator (BaseCalculator): Instance containing calculation functions. + config (Config): Instance containing constants for the simulation. + """ + + super().__init__(d=d, calculator=calculator, config=config) + + def _apply_separate_state_vectors(self, state_vectors): + self._state_vector = self._np.array( + state_vectors, dtype=self._config.complex_dtype + ).T + + @property + def _batch_size(self): + return self._state_vector.shape[1] + + @property + def _batch_state_vectors(self): + for index in range(self._batch_size): + yield self._state_vector[:, index] + + @property + def nonzero_elements(self): + return [ + self._nonzero_elements_for_single_state_vector(state_vector) + for state_vector in self._batch_state_vectors + ] + + def __repr__(self) -> str: + partial_strings = [] + for partial_nonzero_elements in self.nonzero_elements: + partial_strings.append( + self._get_repr_for_single_state_vector(partial_nonzero_elements) + ) + + return "\n".join(partial_strings) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, BatchPureFockState): + return False + return self._np.allclose(self._state_vector, other._state_vector) + + @property + def fock_probabilities(self) -> np.ndarray: + return [ + vector_absolute_square(state_vector, self._calculator) + for state_vector in self._batch_state_vectors + ] + + @property + def norm(self): + return [ + self._calculator.np.sum(partial_fock_probabilities) + for partial_fock_probabilities in self.fock_probabilities + ] + + def normalize(self) -> None: + if not self._config.normalize: + return + + norms = self.norm + + if any(np.isclose(norm, 0) for norm in norms): + raise InvalidState("The norm of a state in the batch is 0.") + + self._state_vector = self._state_vector / self._np.sqrt(norms) + + def validate(self) -> None: + return True + + def _get_mean_position_indices(self, mode): + fallback_np = self._calculator.fallback_np + + left_indices = [] + multipliers = [] + right_indices = [] + + for index, basis in enumerate(self._space): + i = basis[mode] + basis_array = fallback_np.array(basis) + + if i > 0: + basis_array[mode] = i - 1 + lower_index = get_index_in_fock_space(tuple(basis_array)) + + left_indices.append(lower_index) + multipliers.append(fallback_np.sqrt(i)) + right_indices.append(index) + + if sum(basis) + 1 < self._config.cutoff: + basis_array[mode] = i + 1 + upper_index = get_index_in_fock_space(tuple(basis_array)) + + left_indices.append(upper_index) + multipliers.append(fallback_np.sqrt(i + 1)) + right_indices.append(index) + + multipliers = fallback_np.array(multipliers) + + return multipliers, left_indices, right_indices + + def mean_position(self, mode: int) -> np.ndarray: + np = self._calculator.np + fallback_np = self._calculator.fallback_np + multipliers, left_indices, right_indices = self._get_mean_position_indices(mode) + + lhs = (multipliers * self._state_vector[left_indices].T).T + rhs = self._state_vector[right_indices] + + return np.real( + np.einsum("ij,ij->j", lhs, rhs) * fallback_np.sqrt(self._config.hbar / 2) + ) diff --git a/piquasso/_backends/fock/pure/calculations.py b/piquasso/_backends/fock/pure/calculations.py index 18a340ab..4c12e3b5 100644 --- a/piquasso/_backends/fock/pure/calculations.py +++ b/piquasso/_backends/fock/pure/calculations.py @@ -30,6 +30,7 @@ from piquasso.api.calculator import BaseCalculator from .state import PureFockState +from .batch_state import BatchPureFockState from piquasso.instructions import gates @@ -492,8 +493,14 @@ def _calculate_state_vector_after_interferometer( ) -> np.ndarray: new_state_vector = np.empty_like(state_vector) + is_batch = len(state_vector.shape) == 2 + + einsum_string = "ij,jkl->ikl" if is_batch else "ij,jk->ik" + for n, indices in enumerate(index_list): - new_state_vector[indices] = subspace_transformations[n] @ state_vector[indices] + new_state_vector[indices] = np.einsum( + einsum_string, subspace_transformations[n], state_vector[indices] + ) return new_state_vector @@ -516,34 +523,40 @@ def applying_interferometer_gradient(upstream): else: np = calculator.np + is_batch = len(state_vector.shape) == 2 + + matrix_einsum_string = "ijl,kjl->ki" if is_batch else "ij,kj->ki" + initial_state_einsum_string = "ji,jkl->ikl" if is_batch else "ji,jk->ik" + + reshape_arg = (-1, state_vector.shape[1]) if is_batch else (-1,) + unordered_gradient_by_initial_state = [] order_by = [] gradient_by_matrix = [] + conjugated_state_vector = np.conj(state_vector) + for n, indices in enumerate(index_list): matrix = np.conj(subspace_transformations[n]) - sliced_upstream = np.take(upstream, indices) + sliced_upstream = upstream[indices] state_vector_slice = conjugated_state_vector[indices] order_by.append(indices.reshape(-1)) - product = np.einsum("ji,jk->ik", matrix, sliced_upstream) - unordered_gradient_by_initial_state.append(product.reshape(-1)) + product = np.einsum(initial_state_einsum_string, matrix, sliced_upstream) + unordered_gradient_by_initial_state.append(product.reshape(*reshape_arg)) gradient_by_matrix.append( - np.einsum("ij,kj->ki", state_vector_slice, sliced_upstream) + np.einsum(matrix_einsum_string, state_vector_slice, sliced_upstream) ) - gradient_by_initial_state = np.take( - np.concatenate(unordered_gradient_by_initial_state), - fallback_np.concatenate(order_by).argsort(), - ) + gradient_by_initial_state = np.concatenate(unordered_gradient_by_initial_state)[ + fallback_np.concatenate(order_by).argsort() + ] if static_valued: - return ( - tf.constant(gradient_by_initial_state), - [tf.constant(matrix) for matrix in gradient_by_matrix], - ) + gradient_by_initial_state = tf.constant(gradient_by_initial_state) + gradient_by_matrix = [tf.constant(matrix) for matrix in gradient_by_matrix] return gradient_by_initial_state, gradient_by_matrix @@ -617,10 +630,14 @@ def _calculate_state_vector_after_apply_active_gate( ): new_state_vector = np.empty_like(state_vector, dtype=state_vector.dtype) + is_batch = len(state_vector.shape) == 2 + + einsum_string = "ij,jkl->ikl" if is_batch else "ij,jk->ik" + for state_index_matrix in state_index_matrix_list: limit = state_index_matrix.shape[0] - new_state_vector[state_index_matrix] = ( - matrix[:limit, :limit] @ state_vector[state_index_matrix] + new_state_vector[state_index_matrix] = np.einsum( + einsum_string, matrix[:limit, :limit], state_vector[state_index_matrix] ) return new_state_vector @@ -646,6 +663,13 @@ def linear_active_gate_gradient(upstream): cutoff = len(matrix) + is_batch = len(state_vector.shape) == 2 + + matrix_einsum_string = "ijl,kjl->ki" if is_batch else "ij,kj->ki" + initial_state_einsum_string = "ji,jkl->ikl" if is_batch else "ji,jk->ik" + + reshape_arg = (-1, state_vector.shape[1]) if is_batch else (-1,) + unordered_gradient_by_initial_state = [] order_by = [] @@ -656,18 +680,19 @@ def linear_active_gate_gradient(upstream): for indices in state_index_matrix_list: limit = indices.shape[0] - upstream_slice = np.take(upstream, indices) + upstream_slice = upstream[indices] state_vector_slice = conjugated_state_vector[indices] matrix_slice = conjugated_matrix[:limit, :limit] order_by.append(indices.reshape(-1)) - unordered_gradient_by_initial_state.append( - (np.einsum("ji,jk->ik", matrix_slice, upstream_slice)).reshape(-1) + product = np.einsum( + initial_state_einsum_string, matrix_slice, upstream_slice ) + unordered_gradient_by_initial_state.append(product.reshape(*reshape_arg)) partial_gradient = np.einsum( - "ij,kj->ki", state_vector_slice, upstream_slice + matrix_einsum_string, state_vector_slice, upstream_slice ) if static_valued: @@ -679,10 +704,9 @@ def linear_active_gate_gradient(upstream): "constant", ) - gradient_by_initial_state = np.take( - np.concatenate(unordered_gradient_by_initial_state), - fallback_np.concatenate(order_by).argsort(), - ) + gradient_by_initial_state = np.concatenate(unordered_gradient_by_initial_state)[ + fallback_np.concatenate(order_by).argsort() + ] if static_valued: return ( @@ -725,7 +749,8 @@ def kerr(state: PureFockState, instruction: Instruction, shots: int) -> Result: 1j * xi * np.array([basis[mode] ** 2 for basis in state._space]) ) - state._state_vector = coefficients * state._state_vector + # NOTE: Transposition is done here in order to work with batch processing. + state._state_vector = (coefficients * state._state_vector.T).T return Result(state=state) @@ -828,3 +853,44 @@ def _add_occupation_number_basis( # type: ignore state._state_vector = state._calculator.assign( state._state_vector, index, coefficient ) + + +def batch_prepare(state: PureFockState, instruction: Instruction, shots: int): + subprograms = instruction._all_params["subprograms"] + execute = instruction._all_params["execute"] + + state_vectors = [ + execute(subprogram, shots).state._state_vector for subprogram in subprograms + ] + + batch_state = BatchPureFockState( + d=state.d, calculator=state._calculator, config=state._config + ) + + batch_state._apply_separate_state_vectors(state_vectors) + + return Result(state=batch_state) + + +def batch_apply(state: BatchPureFockState, instruction: Instruction, shots: int): + subprograms = instruction._all_params["subprograms"] + execute = instruction._all_params["execute"] + + d = state.d + calculator = state._calculator + config = state._config + + resulting_state_vectors = [] + + for state_vector, subprogram in zip(state._batch_state_vectors, subprograms): + small_state = PureFockState(d=d, calculator=calculator, config=config) + + small_state._state_vector = state_vector + + resulting_state_vectors.append( + execute(subprogram, initial_state=small_state).state._state_vector + ) + + state._apply_separate_state_vectors(resulting_state_vectors) + + return Result(state=state) diff --git a/piquasso/_backends/fock/pure/simulator.py b/piquasso/_backends/fock/pure/simulator.py index 1fa77b9f..7eec6c80 100644 --- a/piquasso/_backends/fock/pure/simulator.py +++ b/piquasso/_backends/fock/pure/simulator.py @@ -30,13 +30,15 @@ vacuum, create, annihilate, + batch_prepare, + batch_apply, ) from ..calculations import attenuator from piquasso.api.simulator import Simulator from piquasso.api.calculator import BaseCalculator -from piquasso.instructions import preparations, gates, measurements, channels +from piquasso.instructions import preparations, gates, measurements, channels, batch from piquasso._backends.calculator import NumpyCalculator @@ -111,6 +113,8 @@ class PureFockSimulator(Simulator): gates.GaussianTransform: linear, measurements.ParticleNumberMeasurement: particle_number_measurement, channels.Attenuator: attenuator, + batch.BatchPrepare: batch_prepare, + batch.BatchApply: batch_apply, } _calculator_class: Type[BaseCalculator] = NumpyCalculator diff --git a/piquasso/_backends/fock/pure/state.py b/piquasso/_backends/fock/pure/state.py index 62f6befa..000f6336 100644 --- a/piquasso/_backends/fock/pure/state.py +++ b/piquasso/_backends/fock/pure/state.py @@ -72,21 +72,26 @@ def reset(self) -> None: state_vector_list, dtype=self._config.complex_dtype ) - @property - def nonzero_elements(self) -> Generator[Tuple[complex, FockBasis], Any, None]: + def _nonzero_elements_for_single_state_vector( + self, state_vector: np.ndarray + ) -> Generator[Tuple[complex, FockBasis], Any, None]: for index, basis in self._space.basis: - coefficient: complex = self._state_vector[index] + coefficient: complex = state_vector[index] if coefficient != 0: yield coefficient, basis - def __repr__(self) -> str: + @property + def nonzero_elements(self) -> Generator[Tuple[complex, FockBasis], Any, None]: + return self._nonzero_elements_for_single_state_vector(self._state_vector) + + def _get_repr_for_single_state_vector(self, nonzero_elements): return " + ".join( - [ - str(coefficient) + str(basis) - for coefficient, basis in self.nonzero_elements - ] + [str(coefficient) + str(basis) for coefficient, basis in nonzero_elements] ) + def __repr__(self) -> str: + return self._get_repr_for_single_state_vector(self.nonzero_elements) + def __eq__(self, other: object) -> bool: if not isinstance(other, PureFockState): return False @@ -161,7 +166,7 @@ def validate(self) -> None: f"fock_probabilities={self.fock_probabilities}" ) - def mean_position(self, mode: int) -> np.ndarray: + def _get_mean_position_indices(self, mode): fallback_np = self._calculator.fallback_np left_indices = [] @@ -188,11 +193,16 @@ def mean_position(self, mode: int) -> np.ndarray: multipliers.append(fallback_np.sqrt(i + 1)) right_indices.append(index) - state_vector = self._state_vector - multipliers = fallback_np.array(multipliers) + return multipliers, left_indices, right_indices + + def mean_position(self, mode: int) -> np.ndarray: np = self._calculator.np + fallback_np = self._calculator.fallback_np + multipliers, left_indices, right_indices = self._get_mean_position_indices(mode) + + state_vector = self._state_vector accumulator = np.dot( (multipliers * np.take(state_vector, left_indices)), diff --git a/piquasso/_math/fock.py b/piquasso/_math/fock.py index fea80ddf..c4369a78 100644 --- a/piquasso/_math/fock.py +++ b/piquasso/_math/fock.py @@ -52,6 +52,16 @@ def symmetric_subspace_cardinality(*, d: int, n: int) -> int: return comb(d + n - 1, n, exact=True) +@functools.lru_cache(maxsize=None) +def _create_all_fock_basis_elements(d: int, cutoff: int) -> List[Tuple[int, ...]]: + ret = [] + + for n in range(cutoff): + ret.extend(partitions(boxes=d, particles=n, class_=FockBasis)) + + return ret + + class FockBasis(tuple): def __str__(self) -> str: return self.display(template="|{}>") @@ -78,20 +88,9 @@ def d(self) -> int: def n(self) -> int: return sum(self) - @classmethod - def create_on_particle_subspace( - cls, *, boxes: int, particles: int - ) -> List[Tuple[int, ...]]: - return partitions(boxes=boxes, particles=particles, class_=cls) - @classmethod def create_all(cls, *, d: int, cutoff: int) -> List[Tuple[int, ...]]: - ret = [] - - for n in range(cutoff): - ret.extend(cls.create_on_particle_subspace(boxes=d, particles=n)) - - return ret + return _create_all_fock_basis_elements(d, cutoff) def on_modes(self, *, modes: Tuple[int, ...]) -> "FockBasis": return FockBasis(self[mode] for mode in modes) @@ -444,7 +443,7 @@ def get_subspace_basis( self, n: int, d: Optional[int] = None ) -> List[Tuple[int, ...]]: d = d or self.d - return FockBasis.create_on_particle_subspace(boxes=d, particles=n) + return partitions(boxes=d, particles=n, class_=FockBasis) def symmetric_tensorpower( self, diff --git a/piquasso/api/instruction.py b/piquasso/api/instruction.py index f0683399..b225a105 100644 --- a/piquasso/api/instruction.py +++ b/piquasso/api/instruction.py @@ -198,6 +198,10 @@ def _validate_modes(self, modes): ) +class BatchInstruction(Instruction): + """Base class to control batch processing.""" + + class Preparation(Instruction): """Base class for preparations.""" diff --git a/piquasso/api/simulator.py b/piquasso/api/simulator.py index effb3fe9..b847f64a 100644 --- a/piquasso/api/simulator.py +++ b/piquasso/api/simulator.py @@ -30,7 +30,13 @@ InvalidSimulation, InvalidState, ) -from piquasso.api.instruction import Gate, Instruction, Measurement, Preparation +from piquasso.api.instruction import ( + Gate, + Instruction, + Measurement, + Preparation, + BatchInstruction, +) from piquasso._math.lists import is_ordered_sublist, deduplicate_neighbours @@ -174,6 +180,14 @@ def validate(self, program: Program) -> None: self._validate_instructions(program.instructions) + def _maybe_postprocess_batch_instruction( + self, instruction: Instruction + ) -> Instruction: + if isinstance(instruction, BatchInstruction): + instruction._extra_params["execute"] = self.execute + + return instruction + def execute_instructions( self, instructions: List[Instruction], @@ -220,6 +234,8 @@ def execute_instructions( calculation = self._get_calculation(instruction) + instruction = self._maybe_postprocess_batch_instruction(instruction) + result = calculation(result.state, instruction, shots) return result diff --git a/piquasso/instructions/batch.py b/piquasso/instructions/batch.py new file mode 100644 index 00000000..c0acb11c --- /dev/null +++ b/piquasso/instructions/batch.py @@ -0,0 +1,36 @@ +# +# Copyright 2021-2023 Budapest Quantum Computing Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from piquasso.api.instruction import Preparation, Gate, BatchInstruction + + +class BatchPrepare(Preparation, BatchInstruction): + r"""Allows for batch processing of multiple states. + + NOTE: This feature is experimental. + """ + + def __init__(self, subprograms) -> None: + super().__init__(params=dict(subprograms=subprograms)) + + +class BatchApply(Gate, BatchInstruction): + r"""Applies subprograms to each element in the batch. + + NOTE: This feature is experimental. + """ + + def __init__(self, subprograms) -> None: + super().__init__(params=dict(subprograms=subprograms)) diff --git a/scripts/profile_cvnn_layer_batch.py b/scripts/profile_cvnn_layer_batch.py new file mode 100644 index 00000000..e23a5e6d --- /dev/null +++ b/scripts/profile_cvnn_layer_batch.py @@ -0,0 +1,161 @@ +# +# Copyright 2021-2023 Budapest Quantum Computing Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import piquasso as pq +from piquasso._math.fock import cutoff_cardinality +import tensorflow as tf +import time +import numpy as np + + +PROFILE = True +number_of_inputs = 2 +inputs = 0.01 * np.random.rand(number_of_inputs) +d = 2 +cutoff = 16 + + +def create_layer_parameters(d: int): + number_of_beamsplitters: int + if d % 2 == 0: + number_of_beamsplitters = (d // 2) ** 2 + number_of_beamsplitters += ((d - 1) // 2) * (d // 2) + else: + number_of_beamsplitters = ((d - 1) // 2) * d + + thetas_1 = [tf.Variable(0.1) for _ in range(number_of_beamsplitters)] + phis_1 = [tf.Variable(0.0) for _ in range(d - 1)] + + squeezings = [tf.Variable(0.1) for _ in range(d)] + + thetas_2 = [tf.Variable(0.1) for _ in range(number_of_beamsplitters)] + phis_2 = [tf.Variable(0.0) for _ in range(d - 1)] + + displacements = [tf.Variable(0.1) for _ in range(d)] + + kappas = [tf.Variable(0.1) for _ in range(d)] + + return { + "d": d, + "thetas_1": thetas_1, + "phis_1": phis_1, + "squeezings": squeezings, + "thetas_2": thetas_2, + "phis_2": phis_2, + "displacements": displacements, + "kappas": kappas, + } + + +expected_mean_positions = 0.01 * np.random.rand(number_of_inputs) + +target_state_vector = np.zeros(cutoff_cardinality(cutoff=cutoff, d=d), dtype=complex) +target_state_vector[1] = 1.0 +target_state = tf.constant(target_state_vector, dtype=tf.complex128) + +simulator = pq.TensorflowPureFockSimulator( + d=d, config=pq.Config(cutoff=cutoff, normalize=False) +) + +parameters = create_layer_parameters(d) + +if PROFILE: + profiler_options = tf.profiler.experimental.ProfilerOptions(python_tracer_level=1) + tf.profiler.experimental.start("logdir", options=profiler_options) + +with tf.GradientTape() as tape: + preparations = [] + + for i in range(number_of_inputs): + with pq.Program() as preparation: + pq.Q(all) | pq.Vacuum() + + for mode in range(d): + pq.Q(mode) | pq.Displacement(r=inputs[i]) + + preparations.append(preparation) + + with pq.Program() as program: + pq.Q() | pq.BatchPrepare(preparations) + + i = 0 + for col in range(d): + if col % 2 == 0: + for mode in range(0, d - 1, 2): + modes = (mode, mode + 1) + pq.Q(*modes) | pq.Beamsplitter(parameters["thetas_1"][i], phi=0.0) + i += 1 + + if col % 2 == 1: + for mode in range(1, d - 1, 2): + modes = (mode, mode + 1) + pq.Q(*modes) | pq.Beamsplitter(parameters["thetas_1"][i], phi=0.0) + i += 1 + + for i in range(d - 1): + pq.Q(i) | pq.Phaseshifter(parameters["phis_1"][i]) + + for mode, r in enumerate(parameters["squeezings"]): + pq.Q(mode) | pq.Squeezing(r) + + i = 0 + for col in range(d): + if col % 2 == 0: + for mode in range(0, d - 1, 2): + modes = (mode, mode + 1) + pq.Q(*modes) | pq.Beamsplitter(parameters["thetas_2"][i], phi=0.0) + i += 1 + + if col % 2 == 1: + for mode in range(1, d - 1, 2): + modes = (mode, mode + 1) + pq.Q(*modes) | pq.Beamsplitter(parameters["thetas_2"][i], phi=0.0) + i += 1 + + for i in range(d - 1): + pq.Q(i) | pq.Phaseshifter(parameters["phis_2"][i]) + + for mode, r in enumerate(parameters["displacements"]): + pq.Q(mode) | pq.Displacement(r=r) + + for mode, xi in enumerate(parameters["kappas"]): + pq.Q(mode) | pq.Kerr(xi=xi) + + start_time = time.time() + state = simulator.execute(program).state + print("EXECUTION TIME: ", time.time() - start_time) + + mean_position = state.mean_position(mode=0) + + cost = tf.reduce_sum(tf.abs(mean_position - expected_mean_positions)) + +flattened_parameters = ( + parameters["thetas_1"] + + parameters["phis_1"] + + parameters["squeezings"] + + parameters["thetas_2"] + + parameters["phis_2"] + + parameters["displacements"] + + parameters["kappas"] +) + +start_time = time.time() +gradient = tape.gradient(cost, flattened_parameters) + +print("GRADIENT CALCULATION TIME:", time.time() - start_time) +print("GRADIENT:", [g.numpy() for g in gradient]) + +if PROFILE: + tf.profiler.experimental.stop() diff --git a/tests/backends/fock/pure/test_batch.py b/tests/backends/fock/pure/test_batch.py new file mode 100644 index 00000000..7b2600a7 --- /dev/null +++ b/tests/backends/fock/pure/test_batch.py @@ -0,0 +1,284 @@ +# +# Copyright 2021-2023 Budapest Quantum Computing Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np + +import piquasso as pq + + +def test_batch_Beamsplitter_state_vector(): + with pq.Program() as first_preparation: + pq.Q() | pq.Vacuum() + pq.Q(1) | pq.Squeezing(r=0.1, phi=np.pi / 4) + + with pq.Program() as second_preparation: + pq.Q() | pq.Vacuum() + pq.Q(0) | pq.Displacement(r=1.0, phi=np.pi / 10) + + with pq.Program() as common: + pq.Q(0, 1) | pq.Beamsplitter(theta=np.pi / 6, phi=np.pi / 3) + + with pq.Program() as batch_program: + pq.Q() | pq.BatchPrepare([first_preparation, second_preparation]) + + pq.Q() | common + + with pq.Program() as first_program: + pq.Q() | first_preparation + + pq.Q() | common + + with pq.Program() as second_program: + pq.Q() | second_preparation + + pq.Q() | common + + simulator = pq.PureFockSimulator(d=2, config=pq.Config(cutoff=5)) + + batch_state_vector = simulator.execute(batch_program).state._state_vector + + first_state_vector = simulator.execute(first_program).state._state_vector + second_state_vector = simulator.execute(second_program).state._state_vector + + assert np.allclose(batch_state_vector[:, 0], first_state_vector) + assert np.allclose(batch_state_vector[:, 1], second_state_vector) + + +def test_batch_Squeezing_and_Displacement_state_vector(): + with pq.Program() as first_preparation: + pq.Q() | pq.Vacuum() + pq.Q(1) | pq.Squeezing(r=0.1, phi=np.pi / 4) + + with pq.Program() as second_preparation: + pq.Q() | pq.Vacuum() + pq.Q(0) | pq.Displacement(r=1.0, phi=np.pi / 10) + + with pq.Program() as common: + pq.Q(0, 1) | pq.Beamsplitter(theta=np.pi / 6, phi=np.pi / 3) + + pq.Q(0) | pq.Displacement(1.0) + pq.Q(0) | pq.Squeezing(0.1) + + with pq.Program() as batch_program: + pq.Q() | pq.BatchPrepare([first_preparation, second_preparation]) + + pq.Q() | common + + with pq.Program() as first_program: + pq.Q() | first_preparation + + pq.Q() | common + + with pq.Program() as second_program: + pq.Q() | second_preparation + + pq.Q() | common + + simulator = pq.PureFockSimulator(d=2, config=pq.Config(cutoff=5)) + + batch_state_vector = simulator.execute(batch_program).state._state_vector + + first_state_vector = simulator.execute(first_program).state._state_vector + second_state_vector = simulator.execute(second_program).state._state_vector + + assert np.allclose(batch_state_vector[:, 0], first_state_vector) + assert np.allclose(batch_state_vector[:, 1], second_state_vector) + + +def test_batch_Kerr_state_vector(): + with pq.Program() as first_preparation: + pq.Q() | pq.Vacuum() + pq.Q(1) | pq.Squeezing(r=0.1, phi=np.pi / 4) + + with pq.Program() as second_preparation: + pq.Q() | pq.Vacuum() + pq.Q(0) | pq.Displacement(r=1.0, phi=np.pi / 10) + + with pq.Program() as common: + pq.Q(0) | pq.Kerr(0.1) + pq.Q(1) | pq.Kerr(0.2) + + with pq.Program() as batch_program: + pq.Q() | pq.BatchPrepare([first_preparation, second_preparation]) + + pq.Q() | common + + with pq.Program() as first_program: + pq.Q() | first_preparation + + pq.Q() | common + + with pq.Program() as second_program: + pq.Q() | second_preparation + + pq.Q() | common + + simulator = pq.PureFockSimulator(d=2, config=pq.Config(cutoff=5)) + + batch_state_vector = simulator.execute(batch_program).state._state_vector + + first_state_vector = simulator.execute(first_program).state._state_vector + second_state_vector = simulator.execute(second_program).state._state_vector + + assert np.allclose(batch_state_vector[:, 0], first_state_vector) + assert np.allclose(batch_state_vector[:, 1], second_state_vector) + + +def test_batch_Phaseshifter_state_vector(): + with pq.Program() as first_preparation: + pq.Q() | pq.Vacuum() + pq.Q(1) | pq.Squeezing(r=0.1, phi=np.pi / 4) + + with pq.Program() as second_preparation: + pq.Q() | pq.Vacuum() + pq.Q(0) | pq.Displacement(r=1.0, phi=np.pi / 10) + + with pq.Program() as common: + pq.Q(0) | pq.Phaseshifter(0.1) + pq.Q(1) | pq.Phaseshifter(0.2) + + with pq.Program() as batch_program: + pq.Q() | pq.BatchPrepare([first_preparation, second_preparation]) + + pq.Q() | common + + with pq.Program() as first_program: + pq.Q() | first_preparation + + pq.Q() | common + + with pq.Program() as second_program: + pq.Q() | second_preparation + + pq.Q() | common + + simulator = pq.PureFockSimulator(d=2, config=pq.Config(cutoff=5)) + + batch_state_vector = simulator.execute(batch_program).state._state_vector + + first_state_vector = simulator.execute(first_program).state._state_vector + second_state_vector = simulator.execute(second_program).state._state_vector + + assert np.allclose(batch_state_vector[:, 0], first_state_vector) + assert np.allclose(batch_state_vector[:, 1], second_state_vector) + + +def test_batch_mean_position(): + with pq.Program() as first_preparation: + pq.Q() | pq.Vacuum() + pq.Q(1) | pq.Squeezing(r=0.1, phi=np.pi / 4) + + with pq.Program() as second_preparation: + pq.Q() | pq.Vacuum() + pq.Q(0) | pq.Displacement(r=1.0, phi=np.pi / 10) + + with pq.Program() as common: + pq.Q(0, 1) | pq.Beamsplitter(theta=np.pi / 6, phi=np.pi / 3) + + pq.Q(0) | pq.Displacement(1.0) + pq.Q(0) | pq.Squeezing(0.1) + + pq.Q(0) | pq.Kerr(0.1) + pq.Q(1) | pq.Kerr(0.2) + + with pq.Program() as batch_program: + pq.Q() | pq.BatchPrepare([first_preparation, second_preparation]) + + pq.Q() | common + + with pq.Program() as first_program: + pq.Q() | first_preparation + + pq.Q() | common + + with pq.Program() as second_program: + pq.Q() | second_preparation + + pq.Q() | common + + simulator = pq.PureFockSimulator(d=2, config=pq.Config(cutoff=5)) + + batch_mean_positions = simulator.execute(batch_program).state.mean_position(0) + + first_mean_position = simulator.execute(first_program).state.mean_position(0) + second_mean_position = simulator.execute(second_program).state.mean_position(0) + + assert np.allclose(batch_mean_positions[0], first_mean_position) + assert np.allclose(batch_mean_positions[1], second_mean_position) + + +def test_Batch_with_OneByOne(): + with pq.Program() as first_preparation: + pq.Q() | pq.Vacuum() + pq.Q(1) | pq.Squeezing(r=0.1, phi=np.pi / 4) + + with pq.Program() as second_preparation: + pq.Q() | pq.Vacuum() + pq.Q(0) | pq.Displacement(r=1.0, phi=np.pi / 10) + + with pq.Program() as common1: + pq.Q(0, 1) | pq.Beamsplitter(theta=np.pi / 6, phi=np.pi / 3) + + pq.Q(0) | pq.Displacement(1.0) + pq.Q(0) | pq.Squeezing(0.1) + + with pq.Program() as common2: + pq.Q(0, 1) | pq.Beamsplitter(theta=np.pi / 5, phi=np.pi / 6) + + pq.Q(0) | pq.Displacement(-1.0, phi=np.pi/3) + pq.Q(0) | pq.Squeezing(0.2, phi=np.pi/5) + + with pq.Program() as first_intermediate: + pq.Q(0) | pq.Kerr(0.1) + + with pq.Program() as second_intermediate: + pq.Q(1) | pq.Kerr(0.2) + + with pq.Program() as batch_program: + pq.Q() | pq.BatchPrepare([first_preparation, second_preparation]) + + pq.Q() | common1 + + pq.Q() | pq.BatchApply([first_intermediate, second_intermediate]) + + pq.Q() | common2 + + with pq.Program() as first_program: + pq.Q() | first_preparation + + pq.Q() | common1 + + pq.Q() | first_intermediate + + pq.Q() | common2 + + with pq.Program() as second_program: + pq.Q() | second_preparation + + pq.Q() | common1 + + pq.Q() | second_intermediate + + pq.Q() | common2 + + simulator = pq.PureFockSimulator(d=2, config=pq.Config(cutoff=5)) + + batch_state_vector = simulator.execute(batch_program).state._state_vector + + first_state_vector = simulator.execute(first_program).state._state_vector + second_state_vector = simulator.execute(second_program).state._state_vector + + assert np.allclose(batch_state_vector[:, 0], first_state_vector) + assert np.allclose(batch_state_vector[:, 1], second_state_vector) diff --git a/tests/backends/tensorflow/test_batch_gradient.py b/tests/backends/tensorflow/test_batch_gradient.py new file mode 100644 index 00000000..ac9b089e --- /dev/null +++ b/tests/backends/tensorflow/test_batch_gradient.py @@ -0,0 +1,526 @@ +# +# Copyright 2021-2023 Budapest Quantum Computing Group +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import tensorflow as tf + +import numpy as np + +import piquasso as pq + + +def test_batch_Beamsplitter_mean_position(): + theta = tf.Variable(np.pi / 3) + + with pq.Program() as first_preparation: + pq.Q() | pq.Vacuum() + pq.Q(1) | pq.Squeezing(r=0.1, phi=np.pi / 4) + + with pq.Program() as second_preparation: + pq.Q() | pq.Vacuum() + pq.Q(0) | pq.Displacement(r=1.0, phi=np.pi / 10) + + with tf.GradientTape() as tape: + with pq.Program() as batch_program: + pq.Q() | pq.BatchPrepare([first_preparation, second_preparation]) + + pq.Q() | pq.Beamsplitter(theta=theta, phi=np.pi / 3) + + simulator = pq.TensorflowPureFockSimulator(d=2, config=pq.Config(cutoff=5)) + + batch_mean_positions = simulator.execute(batch_program).state.mean_position(0) + + batch_gradient = tape.jacobian(batch_mean_positions, theta) + + with tf.GradientTape() as tape: + with pq.Program() as first_program: + pq.Q() | first_preparation + + pq.Q() | pq.Beamsplitter(theta=theta, phi=np.pi / 3) + + first_mean_position = simulator.execute(first_program).state.mean_position(0) + + first_gradient = tape.gradient(first_mean_position, theta) + + with tf.GradientTape() as tape: + with pq.Program() as second_program: + pq.Q() | second_preparation + + pq.Q() | pq.Beamsplitter(theta=theta, phi=np.pi / 3) + + second_mean_position = simulator.execute(second_program).state.mean_position(0) + + second_gradient = tape.gradient(second_mean_position, theta) + + assert np.allclose(batch_mean_positions[0], first_mean_position) + assert np.allclose(batch_mean_positions[1], second_mean_position) + + assert np.allclose(batch_gradient[0], first_gradient) + assert np.allclose(batch_gradient[1], second_gradient) + + +def test_batch_Squeezing_mean_position(): + r = tf.Variable(0.1) + + with pq.Program() as first_preparation: + pq.Q() | pq.Vacuum() + pq.Q(1) | pq.Squeezing(r=0.1, phi=np.pi / 4) + + with pq.Program() as second_preparation: + pq.Q() | pq.Vacuum() + pq.Q(0) | pq.Displacement(r=1.0, phi=np.pi / 10) + + with tf.GradientTape() as tape: + with pq.Program() as batch_program: + pq.Q() | pq.BatchPrepare([first_preparation, second_preparation]) + + pq.Q(0) | pq.Squeezing(r=r) + pq.Q() | pq.Beamsplitter(theta=np.pi / 3, phi=np.pi / 3) + + simulator = pq.TensorflowPureFockSimulator(d=2, config=pq.Config(cutoff=5)) + + batch_mean_positions = simulator.execute(batch_program).state.mean_position(0) + + batch_gradient = tape.jacobian(batch_mean_positions, r) + + with tf.GradientTape() as tape: + with pq.Program() as first_program: + pq.Q() | first_preparation + + pq.Q(0) | pq.Squeezing(r=r) + pq.Q() | pq.Beamsplitter(theta=np.pi / 3, phi=np.pi / 3) + + first_mean_position = simulator.execute(first_program).state.mean_position(0) + + first_gradient = tape.gradient(first_mean_position, r) + + with tf.GradientTape() as tape: + with pq.Program() as second_program: + pq.Q() | second_preparation + + pq.Q(0) | pq.Squeezing(r=r) + pq.Q() | pq.Beamsplitter(theta=np.pi / 3, phi=np.pi / 3) + + second_mean_position = simulator.execute(second_program).state.mean_position(0) + + second_gradient = tape.gradient(second_mean_position, r) + + assert np.allclose(batch_mean_positions[0], first_mean_position) + assert np.allclose(batch_mean_positions[1], second_mean_position) + + assert np.allclose(batch_gradient[0], first_gradient) + assert np.allclose(batch_gradient[1], second_gradient) + + +def test_batch_Displacement_mean_position(): + r = tf.Variable(0.1) + + with pq.Program() as first_preparation: + pq.Q() | pq.Vacuum() + pq.Q(1) | pq.Squeezing(r=0.1, phi=np.pi / 4) + + with pq.Program() as second_preparation: + pq.Q() | pq.Vacuum() + pq.Q(0) | pq.Displacement(r=1.0, phi=np.pi / 10) + + with tf.GradientTape() as tape: + with pq.Program() as batch_program: + pq.Q() | pq.BatchPrepare([first_preparation, second_preparation]) + + pq.Q(0) | pq.Displacement(r=r) + pq.Q() | pq.Beamsplitter(theta=np.pi / 3, phi=np.pi / 3) + + simulator = pq.TensorflowPureFockSimulator(d=2, config=pq.Config(cutoff=5)) + + batch_mean_positions = simulator.execute(batch_program).state.mean_position(0) + + batch_gradient = tape.jacobian(batch_mean_positions, r) + + with tf.GradientTape() as tape: + with pq.Program() as first_program: + pq.Q() | first_preparation + + pq.Q(0) | pq.Displacement(r=r) + pq.Q() | pq.Beamsplitter(theta=np.pi / 3, phi=np.pi / 3) + + first_mean_position = simulator.execute(first_program).state.mean_position(0) + + first_gradient = tape.gradient(first_mean_position, r) + + with tf.GradientTape() as tape: + with pq.Program() as second_program: + pq.Q() | second_preparation + + pq.Q(0) | pq.Displacement(r=r) + pq.Q() | pq.Beamsplitter(theta=np.pi / 3, phi=np.pi / 3) + + second_mean_position = simulator.execute(second_program).state.mean_position(0) + + second_gradient = tape.gradient(second_mean_position, r) + + assert np.allclose(batch_mean_positions[0], first_mean_position) + assert np.allclose(batch_mean_positions[1], second_mean_position) + + assert np.allclose(batch_gradient[0], first_gradient) + assert np.allclose(batch_gradient[1], second_gradient) + + +def test_batch_Kerr_mean_position(): + xi = tf.Variable(0.1) + + with pq.Program() as first_preparation: + pq.Q() | pq.Vacuum() + pq.Q(1) | pq.Squeezing(r=0.1, phi=np.pi / 4) + + with pq.Program() as second_preparation: + pq.Q() | pq.Vacuum() + pq.Q(0) | pq.Displacement(r=1.0, phi=np.pi / 10) + + with tf.GradientTape() as tape: + with pq.Program() as batch_program: + pq.Q() | pq.BatchPrepare([first_preparation, second_preparation]) + + pq.Q(0) | pq.Kerr(xi=xi) + pq.Q() | pq.Beamsplitter(theta=np.pi / 3, phi=np.pi / 3) + + simulator = pq.TensorflowPureFockSimulator(d=2, config=pq.Config(cutoff=5)) + + batch_mean_positions = simulator.execute(batch_program).state.mean_position(0) + + batch_gradient = tape.jacobian(batch_mean_positions, xi) + + with tf.GradientTape() as tape: + with pq.Program() as first_program: + pq.Q() | first_preparation + + pq.Q(0) | pq.Kerr(xi=xi) + pq.Q() | pq.Beamsplitter(theta=np.pi / 3, phi=np.pi / 3) + + first_mean_position = simulator.execute(first_program).state.mean_position(0) + + first_gradient = tape.gradient(first_mean_position, xi) + + with tf.GradientTape() as tape: + with pq.Program() as second_program: + pq.Q() | second_preparation + + pq.Q(0) | pq.Kerr(xi=xi) + pq.Q() | pq.Beamsplitter(theta=np.pi / 3, phi=np.pi / 3) + + second_mean_position = simulator.execute(second_program).state.mean_position(0) + + second_gradient = tape.gradient(second_mean_position, xi) + + assert np.allclose(batch_mean_positions[0], first_mean_position) + assert np.allclose(batch_mean_positions[1], second_mean_position) + + assert np.allclose(batch_gradient[0], first_gradient) + assert np.allclose(batch_gradient[1], second_gradient) + + +def test_batch_Phaseshifter_mean_position(): + phi = tf.Variable(np.pi / 5) + + with pq.Program() as first_preparation: + pq.Q() | pq.Vacuum() + pq.Q(1) | pq.Squeezing(r=0.1, phi=np.pi / 4) + + with pq.Program() as second_preparation: + pq.Q() | pq.Vacuum() + pq.Q(0) | pq.Displacement(r=1.0, phi=np.pi / 10) + + with tf.GradientTape() as tape: + with pq.Program() as batch_program: + pq.Q() | pq.BatchPrepare([first_preparation, second_preparation]) + + pq.Q(0) | pq.Phaseshifter(phi=phi) + pq.Q() | pq.Beamsplitter(theta=np.pi / 3, phi=np.pi / 3) + + simulator = pq.TensorflowPureFockSimulator(d=2, config=pq.Config(cutoff=5)) + + batch_mean_positions = simulator.execute(batch_program).state.mean_position(0) + + batch_gradient = tape.jacobian(batch_mean_positions, phi) + + with tf.GradientTape() as tape: + with pq.Program() as first_program: + pq.Q() | first_preparation + + pq.Q(0) | pq.Phaseshifter(phi=phi) + pq.Q() | pq.Beamsplitter(theta=np.pi / 3, phi=np.pi / 3) + + first_mean_position = simulator.execute(first_program).state.mean_position(0) + + first_gradient = tape.gradient(first_mean_position, phi) + + with tf.GradientTape() as tape: + with pq.Program() as second_program: + pq.Q() | second_preparation + + pq.Q(0) | pq.Phaseshifter(phi=phi) + pq.Q() | pq.Beamsplitter(theta=np.pi / 3, phi=np.pi / 3) + + second_mean_position = simulator.execute(second_program).state.mean_position(0) + + second_gradient = tape.gradient(second_mean_position, phi) + + assert np.allclose(batch_mean_positions[0], first_mean_position) + assert np.allclose(batch_mean_positions[1], second_mean_position) + + assert np.allclose(batch_gradient[0], first_gradient) + assert np.allclose(batch_gradient[1], second_gradient) + + +def test_batch_complex_circuit_mean_position(): + theta1 = tf.Variable(np.pi / 3) + phi1 = tf.Variable(np.pi / 4) + + phi_phaseshift = tf.Variable(np.pi / 5) + + r1 = tf.Variable(0.1) + r2 = tf.Variable(0.2) + + alpha1 = tf.Variable(1.0) + alpha2 = tf.Variable(0.5) + + theta2 = tf.Variable(np.pi / 3) + phi2 = tf.Variable(np.pi / 4) + + theta2 = tf.Variable(np.pi / 3) + phi2 = tf.Variable(np.pi / 4) + + xi1 = tf.Variable(0.1) + xi2 = tf.Variable(0.15) + + with pq.Program() as first_preparation: + pq.Q() | pq.Vacuum() + pq.Q(1) | pq.Squeezing(r=0.1, phi=np.pi / 4) + + with pq.Program() as second_preparation: + pq.Q() | pq.Vacuum() + pq.Q(0) | pq.Displacement(r=1.0, phi=np.pi / 10) + + with tf.GradientTape() as tape: + with pq.Program() as batch_program: + pq.Q() | pq.BatchPrepare([first_preparation, second_preparation]) + + pq.Q() | pq.Beamsplitter(theta=theta1, phi=phi1) + pq.Q(0) | pq.Phaseshifter(phi=phi_phaseshift) + + pq.Q(0) | pq.Squeezing(r=r1) + pq.Q(1) | pq.Squeezing(r=r2) + + pq.Q(0) | pq.Displacement(r=alpha1) + pq.Q(1) | pq.Displacement(r=alpha2) + + pq.Q() | pq.Beamsplitter(theta=theta2, phi=phi2) + + pq.Q(0) | pq.Kerr(xi=xi1) + pq.Q(1) | pq.Kerr(xi=xi2) + + simulator = pq.TensorflowPureFockSimulator(d=2, config=pq.Config(cutoff=5)) + + batch_mean_positions = simulator.execute(batch_program).state.mean_position(0) + + parameters = [theta1, phi1, phi_phaseshift, r1, r2, alpha1, alpha2, theta2, phi2] + + batch_gradient = np.array(tape.jacobian(batch_mean_positions, parameters)) + + with tf.GradientTape() as tape: + with pq.Program() as first_program: + pq.Q() | first_preparation + + pq.Q() | pq.Beamsplitter(theta=theta1, phi=phi1) + pq.Q(0) | pq.Phaseshifter(phi=phi_phaseshift) + + pq.Q(0) | pq.Squeezing(r=r1) + pq.Q(1) | pq.Squeezing(r=r2) + + pq.Q(0) | pq.Displacement(r=alpha1) + pq.Q(1) | pq.Displacement(r=alpha2) + + pq.Q() | pq.Beamsplitter(theta=theta2, phi=phi2) + + pq.Q(0) | pq.Kerr(xi=xi1) + pq.Q(1) | pq.Kerr(xi=xi2) + + first_mean_position = simulator.execute(first_program).state.mean_position(0) + + first_gradient = np.array(tape.gradient(first_mean_position, parameters)) + + with tf.GradientTape() as tape: + with pq.Program() as second_program: + pq.Q() | second_preparation + + pq.Q() | pq.Beamsplitter(theta=theta1, phi=phi1) + pq.Q(0) | pq.Phaseshifter(phi=phi_phaseshift) + + pq.Q(0) | pq.Squeezing(r=r1) + pq.Q(1) | pq.Squeezing(r=r2) + + pq.Q(0) | pq.Displacement(r=alpha1) + pq.Q(1) | pq.Displacement(r=alpha2) + + pq.Q() | pq.Beamsplitter(theta=theta2, phi=phi2) + + pq.Q(0) | pq.Kerr(xi=xi1) + pq.Q(1) | pq.Kerr(xi=xi2) + + second_mean_position = simulator.execute(second_program).state.mean_position(0) + + second_gradient = np.array(tape.gradient(second_mean_position, parameters)) + + assert np.allclose(batch_mean_positions[0], first_mean_position) + assert np.allclose(batch_mean_positions[1], second_mean_position) + + assert np.allclose(batch_gradient[:, 0], first_gradient) + assert np.allclose(batch_gradient[:, 1], second_gradient) + + +def test_batch_complex_circuit_mean_position_with_batch_apply(): + theta1 = tf.Variable(np.pi / 3) + phi1 = tf.Variable(np.pi / 4) + + phi_phaseshift = tf.Variable(np.pi / 5) + + r1 = tf.Variable(0.1) + r2 = tf.Variable(0.2) + + alpha1 = tf.Variable(1.0) + alpha2 = tf.Variable(0.5) + + theta2 = tf.Variable(np.pi / 3) + phi2 = tf.Variable(np.pi / 4) + + theta2 = tf.Variable(np.pi / 3) + phi2 = tf.Variable(np.pi / 4) + + xi1 = tf.Variable(0.1) + xi2 = tf.Variable(0.15) + + xi1_intermediate = tf.Variable(0.3) + xi2_intermediate = tf.Variable(0.4) + + with pq.Program() as first_preparation: + pq.Q() | pq.Vacuum() + pq.Q(1) | pq.Squeezing(r=0.1, phi=np.pi / 4) + + with pq.Program() as second_preparation: + pq.Q() | pq.Vacuum() + pq.Q(0) | pq.Displacement(r=1.0, phi=np.pi / 10) + + with tf.GradientTape() as tape: + with pq.Program() as first_intermediate: + pq.Q(0) | pq.Kerr(xi1_intermediate) + + with pq.Program() as second_intermediate: + pq.Q(1) | pq.Kerr(xi2_intermediate) + + with pq.Program() as batch_program: + pq.Q() | pq.BatchPrepare([first_preparation, second_preparation]) + + pq.Q() | pq.Beamsplitter(theta=theta1, phi=phi1) + pq.Q(0) | pq.Phaseshifter(phi=phi_phaseshift) + + pq.Q(0) | pq.Squeezing(r=r1) + pq.Q(1) | pq.Squeezing(r=r2) + + pq.Q() | pq.BatchApply([first_intermediate, second_intermediate]) + + pq.Q(0) | pq.Displacement(r=alpha1) + pq.Q(1) | pq.Displacement(r=alpha2) + + pq.Q() | pq.Beamsplitter(theta=theta2, phi=phi2) + + pq.Q(0) | pq.Kerr(xi=xi1) + pq.Q(1) | pq.Kerr(xi=xi2) + + simulator = pq.TensorflowPureFockSimulator(d=2, config=pq.Config(cutoff=5)) + + batch_mean_positions = simulator.execute(batch_program).state.mean_position(0) + + parameters = [theta1, phi1, phi_phaseshift, r1, r2, alpha1, alpha2, theta2, phi2] + + batch_gradient = np.array( + tape.jacobian( + batch_mean_positions, parameters + [xi1_intermediate, xi2_intermediate] + ) + ) + + with tf.GradientTape() as tape: + with pq.Program() as first_program: + pq.Q() | first_preparation + + pq.Q() | pq.Beamsplitter(theta=theta1, phi=phi1) + pq.Q(0) | pq.Phaseshifter(phi=phi_phaseshift) + + pq.Q(0) | pq.Squeezing(r=r1) + pq.Q(1) | pq.Squeezing(r=r2) + + pq.Q(0) | pq.Kerr(xi1_intermediate) + + pq.Q(0) | pq.Displacement(r=alpha1) + pq.Q(1) | pq.Displacement(r=alpha2) + + pq.Q() | pq.Beamsplitter(theta=theta2, phi=phi2) + + pq.Q(0) | pq.Kerr(xi=xi1) + pq.Q(1) | pq.Kerr(xi=xi2) + + first_mean_position = simulator.execute(first_program).state.mean_position(0) + + first_gradient = np.array( + tape.gradient(first_mean_position, parameters + [xi1_intermediate]) + ) + + with tf.GradientTape() as tape: + with pq.Program() as second_program: + pq.Q() | second_preparation + + pq.Q() | pq.Beamsplitter(theta=theta1, phi=phi1) + pq.Q(0) | pq.Phaseshifter(phi=phi_phaseshift) + + pq.Q(0) | pq.Squeezing(r=r1) + pq.Q(1) | pq.Squeezing(r=r2) + + pq.Q(1) | pq.Kerr(xi2_intermediate) + + pq.Q(0) | pq.Displacement(r=alpha1) + pq.Q(1) | pq.Displacement(r=alpha2) + + pq.Q() | pq.Beamsplitter(theta=theta2, phi=phi2) + + pq.Q(0) | pq.Kerr(xi=xi1) + pq.Q(1) | pq.Kerr(xi=xi2) + + second_mean_position = simulator.execute(second_program).state.mean_position(0) + + second_gradient = np.array( + tape.gradient(second_mean_position, parameters + [xi2_intermediate]) + ) + + assert np.allclose(batch_mean_positions[0], first_mean_position) + assert np.allclose(batch_mean_positions[1], second_mean_position) + + assert np.allclose( + batch_gradient[: len(parameters), 0], first_gradient[: len(parameters)] + ) + assert np.isclose(batch_gradient[-2, 0], first_gradient[-1]) + assert np.isclose(batch_gradient[-1, 0], 0.0) + + assert np.allclose( + batch_gradient[: len(parameters), 1], second_gradient[: len(parameters)] + ) + assert np.isclose(batch_gradient[-1, 1], second_gradient[-1]) + assert np.isclose(batch_gradient[-2, 1], 0.0)